Home | History | Annotate | Line # | Download | only in libevent
      1  1.6  christos /*	$NetBSD: event.c,v 1.7 2024/08/18 20:47:21 christos Exp $	*/
      2  1.1  christos 
      3  1.1  christos /*
      4  1.1  christos  * Copyright (c) 2000-2007 Niels Provos <provos (at) citi.umich.edu>
      5  1.1  christos  * Copyright (c) 2007-2012 Niels Provos and Nick Mathewson
      6  1.1  christos  *
      7  1.1  christos  * Redistribution and use in source and binary forms, with or without
      8  1.1  christos  * modification, are permitted provided that the following conditions
      9  1.1  christos  * are met:
     10  1.1  christos  * 1. Redistributions of source code must retain the above copyright
     11  1.1  christos  *    notice, this list of conditions and the following disclaimer.
     12  1.1  christos  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.1  christos  *    notice, this list of conditions and the following disclaimer in the
     14  1.1  christos  *    documentation and/or other materials provided with the distribution.
     15  1.1  christos  * 3. The name of the author may not be used to endorse or promote products
     16  1.1  christos  *    derived from this software without specific prior written permission.
     17  1.1  christos  *
     18  1.1  christos  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     19  1.1  christos  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     20  1.1  christos  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     21  1.1  christos  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     22  1.1  christos  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     23  1.1  christos  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     24  1.1  christos  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     25  1.1  christos  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     26  1.1  christos  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     27  1.1  christos  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     28  1.1  christos  */
     29  1.1  christos #include "event2/event-config.h"
     30  1.1  christos #include "evconfig-private.h"
     31  1.1  christos 
     32  1.1  christos #ifdef _WIN32
     33  1.1  christos #include <winsock2.h>
     34  1.1  christos #define WIN32_LEAN_AND_MEAN
     35  1.1  christos #include <windows.h>
     36  1.1  christos #undef WIN32_LEAN_AND_MEAN
     37  1.1  christos #endif
     38  1.1  christos #include <sys/types.h>
     39  1.1  christos #if !defined(_WIN32) && defined(EVENT__HAVE_SYS_TIME_H)
     40  1.1  christos #include <sys/time.h>
     41  1.1  christos #endif
     42  1.1  christos #include <sys/queue.h>
     43  1.1  christos #ifdef EVENT__HAVE_SYS_SOCKET_H
     44  1.1  christos #include <sys/socket.h>
     45  1.1  christos #endif
     46  1.1  christos #include <stdio.h>
     47  1.1  christos #include <stdlib.h>
     48  1.1  christos #ifdef EVENT__HAVE_UNISTD_H
     49  1.1  christos #include <unistd.h>
     50  1.1  christos #endif
     51  1.1  christos #include <ctype.h>
     52  1.1  christos #include <errno.h>
     53  1.1  christos #include <signal.h>
     54  1.1  christos #include <string.h>
     55  1.1  christos #include <time.h>
     56  1.1  christos #include <limits.h>
     57  1.7  christos #ifdef EVENT__HAVE_FCNTL_H
     58  1.7  christos #include <fcntl.h>
     59  1.7  christos #endif
     60  1.1  christos 
     61  1.1  christos #include "event2/event.h"
     62  1.1  christos #include "event2/event_struct.h"
     63  1.1  christos #include "event2/event_compat.h"
     64  1.1  christos #include "event-internal.h"
     65  1.1  christos #include "defer-internal.h"
     66  1.1  christos #include "evthread-internal.h"
     67  1.1  christos #include "event2/thread.h"
     68  1.1  christos #include "event2/util.h"
     69  1.1  christos #include "log-internal.h"
     70  1.1  christos #include "evmap-internal.h"
     71  1.1  christos #include "iocp-internal.h"
     72  1.1  christos #include "changelist-internal.h"
     73  1.1  christos #define HT_NO_CACHE_HASH_VALUES
     74  1.1  christos #include "ht-internal.h"
     75  1.1  christos #include "util-internal.h"
     76  1.1  christos 
     77  1.1  christos 
     78  1.1  christos #ifdef EVENT__HAVE_WORKING_KQUEUE
     79  1.1  christos #include "kqueue-internal.h"
     80  1.1  christos #endif
     81  1.1  christos 
     82  1.1  christos #ifdef EVENT__HAVE_EVENT_PORTS
     83  1.1  christos extern const struct eventop evportops;
     84  1.1  christos #endif
     85  1.1  christos #ifdef EVENT__HAVE_SELECT
     86  1.1  christos extern const struct eventop selectops;
     87  1.1  christos #endif
     88  1.1  christos #ifdef EVENT__HAVE_POLL
     89  1.1  christos extern const struct eventop pollops;
     90  1.1  christos #endif
     91  1.1  christos #ifdef EVENT__HAVE_EPOLL
     92  1.1  christos extern const struct eventop epollops;
     93  1.1  christos #endif
     94  1.1  christos #ifdef EVENT__HAVE_WORKING_KQUEUE
     95  1.1  christos extern const struct eventop kqops;
     96  1.1  christos #endif
     97  1.1  christos #ifdef EVENT__HAVE_DEVPOLL
     98  1.1  christos extern const struct eventop devpollops;
     99  1.1  christos #endif
    100  1.1  christos #ifdef _WIN32
    101  1.1  christos extern const struct eventop win32ops;
    102  1.1  christos #endif
    103  1.1  christos 
    104  1.1  christos /* Array of backends in order of preference. */
    105  1.1  christos static const struct eventop *eventops[] = {
    106  1.1  christos #ifdef EVENT__HAVE_EVENT_PORTS
    107  1.1  christos 	&evportops,
    108  1.1  christos #endif
    109  1.1  christos #ifdef EVENT__HAVE_WORKING_KQUEUE
    110  1.1  christos 	&kqops,
    111  1.1  christos #endif
    112  1.1  christos #ifdef EVENT__HAVE_EPOLL
    113  1.1  christos 	&epollops,
    114  1.1  christos #endif
    115  1.1  christos #ifdef EVENT__HAVE_DEVPOLL
    116  1.1  christos 	&devpollops,
    117  1.1  christos #endif
    118  1.1  christos #ifdef EVENT__HAVE_POLL
    119  1.1  christos 	&pollops,
    120  1.1  christos #endif
    121  1.1  christos #ifdef EVENT__HAVE_SELECT
    122  1.1  christos 	&selectops,
    123  1.1  christos #endif
    124  1.1  christos #ifdef _WIN32
    125  1.1  christos 	&win32ops,
    126  1.1  christos #endif
    127  1.1  christos 	NULL
    128  1.1  christos };
    129  1.1  christos 
    130  1.1  christos /* Global state; deprecated */
    131  1.7  christos EVENT2_EXPORT_SYMBOL
    132  1.1  christos struct event_base *event_global_current_base_ = NULL;
    133  1.1  christos #define current_base event_global_current_base_
    134  1.1  christos 
    135  1.1  christos /* Global state */
    136  1.1  christos 
    137  1.1  christos static void *event_self_cbarg_ptr_ = NULL;
    138  1.1  christos 
    139  1.1  christos /* Prototypes */
    140  1.1  christos static void	event_queue_insert_active(struct event_base *, struct event_callback *);
    141  1.1  christos static void	event_queue_insert_active_later(struct event_base *, struct event_callback *);
    142  1.1  christos static void	event_queue_insert_timeout(struct event_base *, struct event *);
    143  1.1  christos static void	event_queue_insert_inserted(struct event_base *, struct event *);
    144  1.1  christos static void	event_queue_remove_active(struct event_base *, struct event_callback *);
    145  1.1  christos static void	event_queue_remove_active_later(struct event_base *, struct event_callback *);
    146  1.1  christos static void	event_queue_remove_timeout(struct event_base *, struct event *);
    147  1.1  christos static void	event_queue_remove_inserted(struct event_base *, struct event *);
    148  1.1  christos static void event_queue_make_later_events_active(struct event_base *base);
    149  1.1  christos 
    150  1.1  christos static int evthread_make_base_notifiable_nolock_(struct event_base *base);
    151  1.2  christos static int event_del_(struct event *ev, int blocking);
    152  1.1  christos 
    153  1.1  christos #ifdef USE_REINSERT_TIMEOUT
    154  1.1  christos /* This code seems buggy; only turn it on if we find out what the trouble is. */
    155  1.1  christos static void	event_queue_reinsert_timeout(struct event_base *,struct event *, int was_common, int is_common, int old_timeout_idx);
    156  1.1  christos #endif
    157  1.1  christos 
    158  1.1  christos static int	event_haveevents(struct event_base *);
    159  1.1  christos 
    160  1.1  christos static int	event_process_active(struct event_base *);
    161  1.1  christos 
    162  1.1  christos static int	timeout_next(struct event_base *, struct timeval **);
    163  1.1  christos static void	timeout_process(struct event_base *);
    164  1.1  christos 
    165  1.1  christos static inline void	event_signal_closure(struct event_base *, struct event *ev);
    166  1.1  christos static inline void	event_persist_closure(struct event_base *, struct event *ev);
    167  1.1  christos 
    168  1.1  christos static int	evthread_notify_base(struct event_base *base);
    169  1.1  christos 
    170  1.1  christos static void insert_common_timeout_inorder(struct common_timeout_list *ctl,
    171  1.1  christos     struct event *ev);
    172  1.1  christos 
    173  1.1  christos #ifndef EVENT__DISABLE_DEBUG_MODE
    174  1.1  christos /* These functions implement a hashtable of which 'struct event *' structures
    175  1.1  christos  * have been setup or added.  We don't want to trust the content of the struct
    176  1.1  christos  * event itself, since we're trying to work through cases where an event gets
    177  1.1  christos  * clobbered or freed.  Instead, we keep a hashtable indexed by the pointer.
    178  1.1  christos  */
    179  1.1  christos 
    180  1.1  christos struct event_debug_entry {
    181  1.1  christos 	HT_ENTRY(event_debug_entry) node;
    182  1.1  christos 	const struct event *ptr;
    183  1.1  christos 	unsigned added : 1;
    184  1.1  christos };
    185  1.1  christos 
    186  1.1  christos static inline unsigned
    187  1.1  christos hash_debug_entry(const struct event_debug_entry *e)
    188  1.1  christos {
    189  1.1  christos 	/* We need to do this silliness to convince compilers that we
    190  1.1  christos 	 * honestly mean to cast e->ptr to an integer, and discard any
    191  1.1  christos 	 * part of it that doesn't fit in an unsigned.
    192  1.1  christos 	 */
    193  1.1  christos 	unsigned u = (unsigned) ((ev_uintptr_t) e->ptr);
    194  1.1  christos 	/* Our hashtable implementation is pretty sensitive to low bits,
    195  1.1  christos 	 * and every struct event is over 64 bytes in size, so we can
    196  1.1  christos 	 * just say >>6. */
    197  1.1  christos 	return (u >> 6);
    198  1.1  christos }
    199  1.1  christos 
    200  1.1  christos static inline int
    201  1.1  christos eq_debug_entry(const struct event_debug_entry *a,
    202  1.1  christos     const struct event_debug_entry *b)
    203  1.1  christos {
    204  1.1  christos 	return a->ptr == b->ptr;
    205  1.1  christos }
    206  1.1  christos 
    207  1.1  christos int event_debug_mode_on_ = 0;
    208  1.7  christos 
    209  1.7  christos 
    210  1.7  christos #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
    211  1.7  christos /**
    212  1.7  christos  * @brief debug mode variable which is set for any function/structure that needs
    213  1.7  christos  *        to be shared across threads (if thread support is enabled).
    214  1.7  christos  *
    215  1.7  christos  *        When and if evthreads are initialized, this variable will be evaluated,
    216  1.7  christos  *        and if set to something other than zero, this means the evthread setup
    217  1.7  christos  *        functions were called out of order.
    218  1.7  christos  *
    219  1.7  christos  *        See: "Locks and threading" in the documentation.
    220  1.7  christos  */
    221  1.7  christos int event_debug_created_threadable_ctx_ = 0;
    222  1.7  christos #endif
    223  1.7  christos 
    224  1.1  christos /* Set if it's too late to enable event_debug_mode. */
    225  1.1  christos static int event_debug_mode_too_late = 0;
    226  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
    227  1.1  christos static void *event_debug_map_lock_ = NULL;
    228  1.1  christos #endif
    229  1.1  christos static HT_HEAD(event_debug_map, event_debug_entry) global_debug_map =
    230  1.1  christos 	HT_INITIALIZER();
    231  1.1  christos 
    232  1.1  christos HT_PROTOTYPE(event_debug_map, event_debug_entry, node, hash_debug_entry,
    233  1.1  christos     eq_debug_entry)
    234  1.1  christos HT_GENERATE(event_debug_map, event_debug_entry, node, hash_debug_entry,
    235  1.1  christos     eq_debug_entry, 0.5, mm_malloc, mm_realloc, mm_free)
    236  1.1  christos 
    237  1.7  christos /* record that ev is now setup (that is, ready for an add) */
    238  1.7  christos static void event_debug_note_setup_(const struct event *ev)
    239  1.7  christos {
    240  1.7  christos 	struct event_debug_entry *dent, find;
    241  1.7  christos 
    242  1.7  christos 	if (!event_debug_mode_on_)
    243  1.7  christos 		goto out;
    244  1.7  christos 
    245  1.7  christos 	find.ptr = ev;
    246  1.7  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    247  1.7  christos 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
    248  1.7  christos 	if (dent) {
    249  1.7  christos 		dent->added = 0;
    250  1.7  christos 	} else {
    251  1.7  christos 		dent = mm_malloc(sizeof(*dent));
    252  1.7  christos 		if (!dent)
    253  1.7  christos 			event_err(1,
    254  1.7  christos 			    "Out of memory in debugging code");
    255  1.7  christos 		dent->ptr = ev;
    256  1.7  christos 		dent->added = 0;
    257  1.7  christos 		HT_INSERT(event_debug_map, &global_debug_map, dent);
    258  1.7  christos 	}
    259  1.7  christos 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
    260  1.7  christos 
    261  1.7  christos out:
    262  1.7  christos 	event_debug_mode_too_late = 1;
    263  1.7  christos }
    264  1.7  christos /* record that ev is no longer setup */
    265  1.7  christos static void event_debug_note_teardown_(const struct event *ev)
    266  1.7  christos {
    267  1.7  christos 	struct event_debug_entry *dent, find;
    268  1.7  christos 
    269  1.7  christos 	if (!event_debug_mode_on_)
    270  1.7  christos 		goto out;
    271  1.7  christos 
    272  1.7  christos 	find.ptr = ev;
    273  1.7  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    274  1.7  christos 	dent = HT_REMOVE(event_debug_map, &global_debug_map, &find);
    275  1.7  christos 	if (dent)
    276  1.7  christos 		mm_free(dent);
    277  1.7  christos 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
    278  1.7  christos 
    279  1.7  christos out:
    280  1.7  christos 	event_debug_mode_too_late = 1;
    281  1.7  christos }
    282  1.1  christos /* Macro: record that ev is now added */
    283  1.7  christos static void event_debug_note_add_(const struct event *ev)
    284  1.7  christos {
    285  1.7  christos 	struct event_debug_entry *dent,find;
    286  1.7  christos 
    287  1.7  christos 	if (!event_debug_mode_on_)
    288  1.7  christos 		goto out;
    289  1.7  christos 
    290  1.7  christos 	find.ptr = ev;
    291  1.7  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    292  1.7  christos 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
    293  1.7  christos 	if (dent) {
    294  1.7  christos 		dent->added = 1;
    295  1.7  christos 	} else {
    296  1.7  christos 		event_errx(EVENT_ERR_ABORT_,
    297  1.7  christos 		    "%s: noting an add on a non-setup event %p"
    298  1.7  christos 		    " (events: 0x%x, fd: "EV_SOCK_FMT
    299  1.7  christos 		    ", flags: 0x%x)",
    300  1.7  christos 		    __func__, ev, ev->ev_events,
    301  1.7  christos 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
    302  1.7  christos 	}
    303  1.7  christos 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
    304  1.7  christos 
    305  1.7  christos out:
    306  1.7  christos 	event_debug_mode_too_late = 1;
    307  1.7  christos }
    308  1.7  christos /* record that ev is no longer added */
    309  1.7  christos static void event_debug_note_del_(const struct event *ev)
    310  1.7  christos {
    311  1.7  christos 	struct event_debug_entry *dent, find;
    312  1.7  christos 
    313  1.7  christos 	if (!event_debug_mode_on_)
    314  1.7  christos 		goto out;
    315  1.7  christos 
    316  1.7  christos 	find.ptr = ev;
    317  1.7  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    318  1.7  christos 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
    319  1.7  christos 	if (dent) {
    320  1.7  christos 		dent->added = 0;
    321  1.7  christos 	} else {
    322  1.7  christos 		event_errx(EVENT_ERR_ABORT_,
    323  1.7  christos 		    "%s: noting a del on a non-setup event %p"
    324  1.7  christos 		    " (events: 0x%x, fd: "EV_SOCK_FMT
    325  1.7  christos 		    ", flags: 0x%x)",
    326  1.7  christos 		    __func__, ev, ev->ev_events,
    327  1.7  christos 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
    328  1.7  christos 	}
    329  1.7  christos 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
    330  1.7  christos 
    331  1.7  christos out:
    332  1.7  christos 	event_debug_mode_too_late = 1;
    333  1.7  christos }
    334  1.7  christos /* assert that ev is setup (i.e., okay to add or inspect) */
    335  1.7  christos static void event_debug_assert_is_setup_(const struct event *ev)
    336  1.7  christos {
    337  1.7  christos 	struct event_debug_entry *dent, find;
    338  1.7  christos 
    339  1.7  christos 	if (!event_debug_mode_on_)
    340  1.7  christos 		return;
    341  1.7  christos 
    342  1.7  christos 	find.ptr = ev;
    343  1.7  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    344  1.7  christos 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
    345  1.7  christos 	if (!dent) {
    346  1.7  christos 		event_errx(EVENT_ERR_ABORT_,
    347  1.7  christos 		    "%s called on a non-initialized event %p"
    348  1.7  christos 		    " (events: 0x%x, fd: "EV_SOCK_FMT
    349  1.7  christos 		    ", flags: 0x%x)",
    350  1.7  christos 		    __func__, ev, ev->ev_events,
    351  1.7  christos 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
    352  1.7  christos 	}
    353  1.7  christos 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
    354  1.7  christos }
    355  1.7  christos /* assert that ev is not added (i.e., okay to tear down or set up again) */
    356  1.7  christos static void event_debug_assert_not_added_(const struct event *ev)
    357  1.7  christos {
    358  1.7  christos 	struct event_debug_entry *dent, find;
    359  1.7  christos 
    360  1.7  christos 	if (!event_debug_mode_on_)
    361  1.7  christos 		return;
    362  1.7  christos 
    363  1.7  christos 	find.ptr = ev;
    364  1.7  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    365  1.7  christos 	dent = HT_FIND(event_debug_map, &global_debug_map, &find);
    366  1.7  christos 	if (dent && dent->added) {
    367  1.7  christos 		event_errx(EVENT_ERR_ABORT_,
    368  1.7  christos 		    "%s called on an already added event %p"
    369  1.7  christos 		    " (events: 0x%x, fd: "EV_SOCK_FMT", "
    370  1.7  christos 		    "flags: 0x%x)",
    371  1.7  christos 		    __func__, ev, ev->ev_events,
    372  1.7  christos 		    EV_SOCK_ARG(ev->ev_fd), ev->ev_flags);
    373  1.7  christos 	}
    374  1.7  christos 	EVLOCK_UNLOCK(event_debug_map_lock_, 0);
    375  1.7  christos }
    376  1.7  christos static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd)
    377  1.7  christos {
    378  1.7  christos 	if (!event_debug_mode_on_)
    379  1.7  christos 		return;
    380  1.7  christos 	if (fd < 0)
    381  1.7  christos 		return;
    382  1.7  christos 
    383  1.7  christos #ifndef _WIN32
    384  1.7  christos 	{
    385  1.7  christos 		int flags;
    386  1.7  christos 		if ((flags = fcntl(fd, F_GETFL, NULL)) >= 0) {
    387  1.7  christos 			EVUTIL_ASSERT(flags & O_NONBLOCK);
    388  1.7  christos 		}
    389  1.7  christos 	}
    390  1.7  christos #endif
    391  1.7  christos }
    392  1.1  christos #else
    393  1.7  christos static void event_debug_note_setup_(const struct event *ev) { (void)ev; }
    394  1.7  christos static void event_debug_note_teardown_(const struct event *ev) { (void)ev; }
    395  1.7  christos static void event_debug_note_add_(const struct event *ev) { (void)ev; }
    396  1.7  christos static void event_debug_note_del_(const struct event *ev) { (void)ev; }
    397  1.7  christos static void event_debug_assert_is_setup_(const struct event *ev) { (void)ev; }
    398  1.7  christos static void event_debug_assert_not_added_(const struct event *ev) { (void)ev; }
    399  1.7  christos static void event_debug_assert_socket_nonblocking_(evutil_socket_t fd) { (void)fd; }
    400  1.1  christos #endif
    401  1.1  christos 
    402  1.1  christos #define EVENT_BASE_ASSERT_LOCKED(base)		\
    403  1.1  christos 	EVLOCK_ASSERT_LOCKED((base)->th_base_lock)
    404  1.1  christos 
    405  1.1  christos /* How often (in seconds) do we check for changes in wall clock time relative
    406  1.1  christos  * to monotonic time?  Set this to -1 for 'never.' */
    407  1.1  christos #define CLOCK_SYNC_INTERVAL 5
    408  1.1  christos 
    409  1.1  christos /** Set 'tp' to the current time according to 'base'.  We must hold the lock
    410  1.1  christos  * on 'base'.  If there is a cached time, return it.  Otherwise, use
    411  1.1  christos  * clock_gettime or gettimeofday as appropriate to find out the right time.
    412  1.1  christos  * Return 0 on success, -1 on failure.
    413  1.1  christos  */
    414  1.1  christos static int
    415  1.1  christos gettime(struct event_base *base, struct timeval *tp)
    416  1.1  christos {
    417  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
    418  1.1  christos 
    419  1.1  christos 	if (base->tv_cache.tv_sec) {
    420  1.1  christos 		*tp = base->tv_cache;
    421  1.1  christos 		return (0);
    422  1.1  christos 	}
    423  1.1  christos 
    424  1.1  christos 	if (evutil_gettime_monotonic_(&base->monotonic_timer, tp) == -1) {
    425  1.1  christos 		return -1;
    426  1.1  christos 	}
    427  1.1  christos 
    428  1.1  christos 	if (base->last_updated_clock_diff + CLOCK_SYNC_INTERVAL
    429  1.1  christos 	    < tp->tv_sec) {
    430  1.1  christos 		struct timeval tv;
    431  1.1  christos 		evutil_gettimeofday(&tv,NULL);
    432  1.1  christos 		evutil_timersub(&tv, tp, &base->tv_clock_diff);
    433  1.1  christos 		base->last_updated_clock_diff = tp->tv_sec;
    434  1.1  christos 	}
    435  1.1  christos 
    436  1.1  christos 	return 0;
    437  1.1  christos }
    438  1.1  christos 
    439  1.1  christos int
    440  1.1  christos event_base_gettimeofday_cached(struct event_base *base, struct timeval *tv)
    441  1.1  christos {
    442  1.1  christos 	int r;
    443  1.1  christos 	if (!base) {
    444  1.1  christos 		base = current_base;
    445  1.1  christos 		if (!current_base)
    446  1.1  christos 			return evutil_gettimeofday(tv, NULL);
    447  1.1  christos 	}
    448  1.1  christos 
    449  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    450  1.1  christos 	if (base->tv_cache.tv_sec == 0) {
    451  1.1  christos 		r = evutil_gettimeofday(tv, NULL);
    452  1.1  christos 	} else {
    453  1.1  christos 		evutil_timeradd(&base->tv_cache, &base->tv_clock_diff, tv);
    454  1.1  christos 		r = 0;
    455  1.1  christos 	}
    456  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
    457  1.1  christos 	return r;
    458  1.1  christos }
    459  1.1  christos 
    460  1.1  christos /** Make 'base' have no current cached time. */
    461  1.1  christos static inline void
    462  1.1  christos clear_time_cache(struct event_base *base)
    463  1.1  christos {
    464  1.1  christos 	base->tv_cache.tv_sec = 0;
    465  1.1  christos }
    466  1.1  christos 
    467  1.1  christos /** Replace the cached time in 'base' with the current time. */
    468  1.1  christos static inline void
    469  1.1  christos update_time_cache(struct event_base *base)
    470  1.1  christos {
    471  1.1  christos 	base->tv_cache.tv_sec = 0;
    472  1.1  christos 	if (!(base->flags & EVENT_BASE_FLAG_NO_CACHE_TIME))
    473  1.1  christos 	    gettime(base, &base->tv_cache);
    474  1.1  christos }
    475  1.1  christos 
    476  1.1  christos int
    477  1.1  christos event_base_update_cache_time(struct event_base *base)
    478  1.1  christos {
    479  1.1  christos 
    480  1.1  christos 	if (!base) {
    481  1.1  christos 		base = current_base;
    482  1.1  christos 		if (!current_base)
    483  1.1  christos 			return -1;
    484  1.1  christos 	}
    485  1.1  christos 
    486  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    487  1.2  christos 	if (base->running_loop)
    488  1.2  christos 		update_time_cache(base);
    489  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
    490  1.1  christos 	return 0;
    491  1.1  christos }
    492  1.1  christos 
    493  1.1  christos static inline struct event *
    494  1.1  christos event_callback_to_event(struct event_callback *evcb)
    495  1.1  christos {
    496  1.1  christos 	EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_INIT));
    497  1.1  christos 	return EVUTIL_UPCAST(evcb, struct event, ev_evcallback);
    498  1.1  christos }
    499  1.1  christos 
    500  1.1  christos static inline struct event_callback *
    501  1.1  christos event_to_event_callback(struct event *ev)
    502  1.1  christos {
    503  1.1  christos 	return &ev->ev_evcallback;
    504  1.1  christos }
    505  1.1  christos 
    506  1.1  christos struct event_base *
    507  1.1  christos event_init(void)
    508  1.1  christos {
    509  1.1  christos 	struct event_base *base = event_base_new_with_config(NULL);
    510  1.1  christos 
    511  1.1  christos 	if (base == NULL) {
    512  1.1  christos 		event_errx(1, "%s: Unable to construct event_base", __func__);
    513  1.1  christos 		return NULL;
    514  1.1  christos 	}
    515  1.1  christos 
    516  1.1  christos 	current_base = base;
    517  1.1  christos 
    518  1.1  christos 	return (base);
    519  1.1  christos }
    520  1.1  christos 
    521  1.1  christos struct event_base *
    522  1.1  christos event_base_new(void)
    523  1.1  christos {
    524  1.1  christos 	struct event_base *base = NULL;
    525  1.1  christos 	struct event_config *cfg = event_config_new();
    526  1.1  christos 	if (cfg) {
    527  1.1  christos 		base = event_base_new_with_config(cfg);
    528  1.1  christos 		event_config_free(cfg);
    529  1.1  christos 	}
    530  1.1  christos 	return base;
    531  1.1  christos }
    532  1.1  christos 
    533  1.1  christos /** Return true iff 'method' is the name of a method that 'cfg' tells us to
    534  1.1  christos  * avoid. */
    535  1.1  christos static int
    536  1.1  christos event_config_is_avoided_method(const struct event_config *cfg,
    537  1.1  christos     const char *method)
    538  1.1  christos {
    539  1.1  christos 	struct event_config_entry *entry;
    540  1.1  christos 
    541  1.1  christos 	TAILQ_FOREACH(entry, &cfg->entries, next) {
    542  1.1  christos 		if (entry->avoid_method != NULL &&
    543  1.1  christos 		    strcmp(entry->avoid_method, method) == 0)
    544  1.1  christos 			return (1);
    545  1.1  christos 	}
    546  1.1  christos 
    547  1.1  christos 	return (0);
    548  1.1  christos }
    549  1.1  christos 
    550  1.1  christos /** Return true iff 'method' is disabled according to the environment. */
    551  1.1  christos static int
    552  1.1  christos event_is_method_disabled(const char *name)
    553  1.1  christos {
    554  1.1  christos 	char environment[64];
    555  1.1  christos 	int i;
    556  1.1  christos 
    557  1.1  christos 	evutil_snprintf(environment, sizeof(environment), "EVENT_NO%s", name);
    558  1.1  christos 	for (i = 8; environment[i] != '\0'; ++i)
    559  1.1  christos 		environment[i] = EVUTIL_TOUPPER_(environment[i]);
    560  1.1  christos 	/* Note that evutil_getenv_() ignores the environment entirely if
    561  1.1  christos 	 * we're setuid */
    562  1.1  christos 	return (evutil_getenv_(environment) != NULL);
    563  1.1  christos }
    564  1.1  christos 
    565  1.1  christos int
    566  1.1  christos event_base_get_features(const struct event_base *base)
    567  1.1  christos {
    568  1.1  christos 	return base->evsel->features;
    569  1.1  christos }
    570  1.1  christos 
    571  1.1  christos void
    572  1.1  christos event_enable_debug_mode(void)
    573  1.1  christos {
    574  1.1  christos #ifndef EVENT__DISABLE_DEBUG_MODE
    575  1.1  christos 	if (event_debug_mode_on_)
    576  1.1  christos 		event_errx(1, "%s was called twice!", __func__);
    577  1.1  christos 	if (event_debug_mode_too_late)
    578  1.1  christos 		event_errx(1, "%s must be called *before* creating any events "
    579  1.1  christos 		    "or event_bases",__func__);
    580  1.1  christos 
    581  1.1  christos 	event_debug_mode_on_ = 1;
    582  1.1  christos 
    583  1.1  christos 	HT_INIT(event_debug_map, &global_debug_map);
    584  1.1  christos #endif
    585  1.1  christos }
    586  1.1  christos 
    587  1.1  christos void
    588  1.1  christos event_disable_debug_mode(void)
    589  1.1  christos {
    590  1.3  christos #ifndef EVENT__DISABLE_DEBUG_MODE
    591  1.1  christos 	struct event_debug_entry **ent, *victim;
    592  1.1  christos 
    593  1.1  christos 	EVLOCK_LOCK(event_debug_map_lock_, 0);
    594  1.1  christos 	for (ent = HT_START(event_debug_map, &global_debug_map); ent; ) {
    595  1.1  christos 		victim = *ent;
    596  1.3  christos 		ent = HT_NEXT_RMV(event_debug_map, &global_debug_map, ent);
    597  1.1  christos 		mm_free(victim);
    598  1.1  christos 	}
    599  1.1  christos 	HT_CLEAR(event_debug_map, &global_debug_map);
    600  1.1  christos 	EVLOCK_UNLOCK(event_debug_map_lock_ , 0);
    601  1.3  christos 
    602  1.3  christos 	event_debug_mode_on_  = 0;
    603  1.3  christos #endif
    604  1.1  christos }
    605  1.1  christos 
    606  1.1  christos struct event_base *
    607  1.1  christos event_base_new_with_config(const struct event_config *cfg)
    608  1.1  christos {
    609  1.1  christos 	int i;
    610  1.1  christos 	struct event_base *base;
    611  1.1  christos 	int should_check_environment;
    612  1.1  christos 
    613  1.1  christos #ifndef EVENT__DISABLE_DEBUG_MODE
    614  1.1  christos 	event_debug_mode_too_late = 1;
    615  1.1  christos #endif
    616  1.1  christos 
    617  1.1  christos 	if ((base = mm_calloc(1, sizeof(struct event_base))) == NULL) {
    618  1.1  christos 		event_warn("%s: calloc", __func__);
    619  1.1  christos 		return NULL;
    620  1.1  christos 	}
    621  1.1  christos 
    622  1.1  christos 	if (cfg)
    623  1.1  christos 		base->flags = cfg->flags;
    624  1.1  christos 
    625  1.1  christos 	should_check_environment =
    626  1.1  christos 	    !(cfg && (cfg->flags & EVENT_BASE_FLAG_IGNORE_ENV));
    627  1.1  christos 
    628  1.1  christos 	{
    629  1.1  christos 		struct timeval tmp;
    630  1.1  christos 		int precise_time =
    631  1.1  christos 		    cfg && (cfg->flags & EVENT_BASE_FLAG_PRECISE_TIMER);
    632  1.1  christos 		int flags;
    633  1.1  christos 		if (should_check_environment && !precise_time) {
    634  1.1  christos 			precise_time = evutil_getenv_("EVENT_PRECISE_TIMER") != NULL;
    635  1.7  christos 			if (precise_time) {
    636  1.7  christos 				base->flags |= EVENT_BASE_FLAG_PRECISE_TIMER;
    637  1.7  christos 			}
    638  1.1  christos 		}
    639  1.1  christos 		flags = precise_time ? EV_MONOT_PRECISE : 0;
    640  1.1  christos 		evutil_configure_monotonic_time_(&base->monotonic_timer, flags);
    641  1.1  christos 
    642  1.1  christos 		gettime(base, &tmp);
    643  1.1  christos 	}
    644  1.1  christos 
    645  1.1  christos 	min_heap_ctor_(&base->timeheap);
    646  1.1  christos 
    647  1.1  christos 	base->sig.ev_signal_pair[0] = -1;
    648  1.1  christos 	base->sig.ev_signal_pair[1] = -1;
    649  1.1  christos 	base->th_notify_fd[0] = -1;
    650  1.1  christos 	base->th_notify_fd[1] = -1;
    651  1.1  christos 
    652  1.1  christos 	TAILQ_INIT(&base->active_later_queue);
    653  1.1  christos 
    654  1.1  christos 	evmap_io_initmap_(&base->io);
    655  1.1  christos 	evmap_signal_initmap_(&base->sigmap);
    656  1.1  christos 	event_changelist_init_(&base->changelist);
    657  1.1  christos 
    658  1.1  christos 	base->evbase = NULL;
    659  1.1  christos 
    660  1.1  christos 	if (cfg) {
    661  1.1  christos 		memcpy(&base->max_dispatch_time,
    662  1.1  christos 		    &cfg->max_dispatch_interval, sizeof(struct timeval));
    663  1.1  christos 		base->limit_callbacks_after_prio =
    664  1.1  christos 		    cfg->limit_callbacks_after_prio;
    665  1.1  christos 	} else {
    666  1.1  christos 		base->max_dispatch_time.tv_sec = -1;
    667  1.1  christos 		base->limit_callbacks_after_prio = 1;
    668  1.1  christos 	}
    669  1.1  christos 	if (cfg && cfg->max_dispatch_callbacks >= 0) {
    670  1.1  christos 		base->max_dispatch_callbacks = cfg->max_dispatch_callbacks;
    671  1.1  christos 	} else {
    672  1.1  christos 		base->max_dispatch_callbacks = INT_MAX;
    673  1.1  christos 	}
    674  1.1  christos 	if (base->max_dispatch_callbacks == INT_MAX &&
    675  1.1  christos 	    base->max_dispatch_time.tv_sec == -1)
    676  1.1  christos 		base->limit_callbacks_after_prio = INT_MAX;
    677  1.1  christos 
    678  1.1  christos 	for (i = 0; eventops[i] && !base->evbase; i++) {
    679  1.1  christos 		if (cfg != NULL) {
    680  1.1  christos 			/* determine if this backend should be avoided */
    681  1.1  christos 			if (event_config_is_avoided_method(cfg,
    682  1.1  christos 				eventops[i]->name))
    683  1.1  christos 				continue;
    684  1.1  christos 			if ((eventops[i]->features & cfg->require_features)
    685  1.1  christos 			    != cfg->require_features)
    686  1.1  christos 				continue;
    687  1.1  christos 		}
    688  1.1  christos 
    689  1.1  christos 		/* also obey the environment variables */
    690  1.1  christos 		if (should_check_environment &&
    691  1.1  christos 		    event_is_method_disabled(eventops[i]->name))
    692  1.1  christos 			continue;
    693  1.1  christos 
    694  1.1  christos 		base->evsel = eventops[i];
    695  1.1  christos 
    696  1.1  christos 		base->evbase = base->evsel->init(base);
    697  1.1  christos 	}
    698  1.1  christos 
    699  1.1  christos 	if (base->evbase == NULL) {
    700  1.1  christos 		event_warnx("%s: no event mechanism available",
    701  1.1  christos 		    __func__);
    702  1.1  christos 		base->evsel = NULL;
    703  1.1  christos 		event_base_free(base);
    704  1.1  christos 		return NULL;
    705  1.1  christos 	}
    706  1.1  christos 
    707  1.1  christos 	if (evutil_getenv_("EVENT_SHOW_METHOD"))
    708  1.1  christos 		event_msgx("libevent using: %s", base->evsel->name);
    709  1.1  christos 
    710  1.1  christos 	/* allocate a single active event queue */
    711  1.1  christos 	if (event_base_priority_init(base, 1) < 0) {
    712  1.1  christos 		event_base_free(base);
    713  1.1  christos 		return NULL;
    714  1.1  christos 	}
    715  1.1  christos 
    716  1.1  christos 	/* prepare for threading */
    717  1.1  christos 
    718  1.7  christos #if !defined(EVENT__DISABLE_THREAD_SUPPORT) && !defined(EVENT__DISABLE_DEBUG_MODE)
    719  1.7  christos 	event_debug_created_threadable_ctx_ = 1;
    720  1.7  christos #endif
    721  1.7  christos 
    722  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
    723  1.1  christos 	if (EVTHREAD_LOCKING_ENABLED() &&
    724  1.1  christos 	    (!cfg || !(cfg->flags & EVENT_BASE_FLAG_NOLOCK))) {
    725  1.1  christos 		int r;
    726  1.1  christos 		EVTHREAD_ALLOC_LOCK(base->th_base_lock, 0);
    727  1.1  christos 		EVTHREAD_ALLOC_COND(base->current_event_cond);
    728  1.1  christos 		r = evthread_make_base_notifiable(base);
    729  1.1  christos 		if (r<0) {
    730  1.1  christos 			event_warnx("%s: Unable to make base notifiable.", __func__);
    731  1.1  christos 			event_base_free(base);
    732  1.1  christos 			return NULL;
    733  1.1  christos 		}
    734  1.1  christos 	}
    735  1.1  christos #endif
    736  1.1  christos 
    737  1.1  christos #ifdef _WIN32
    738  1.1  christos 	if (cfg && (cfg->flags & EVENT_BASE_FLAG_STARTUP_IOCP))
    739  1.1  christos 		event_base_start_iocp_(base, cfg->n_cpus_hint);
    740  1.1  christos #endif
    741  1.1  christos 
    742  1.1  christos 	return (base);
    743  1.1  christos }
    744  1.1  christos 
    745  1.1  christos int
    746  1.1  christos event_base_start_iocp_(struct event_base *base, int n_cpus)
    747  1.1  christos {
    748  1.1  christos #ifdef _WIN32
    749  1.1  christos 	if (base->iocp)
    750  1.1  christos 		return 0;
    751  1.1  christos 	base->iocp = event_iocp_port_launch_(n_cpus);
    752  1.1  christos 	if (!base->iocp) {
    753  1.1  christos 		event_warnx("%s: Couldn't launch IOCP", __func__);
    754  1.1  christos 		return -1;
    755  1.1  christos 	}
    756  1.1  christos 	return 0;
    757  1.1  christos #else
    758  1.1  christos 	return -1;
    759  1.1  christos #endif
    760  1.1  christos }
    761  1.1  christos 
    762  1.1  christos void
    763  1.1  christos event_base_stop_iocp_(struct event_base *base)
    764  1.1  christos {
    765  1.1  christos #ifdef _WIN32
    766  1.1  christos 	int rv;
    767  1.1  christos 
    768  1.1  christos 	if (!base->iocp)
    769  1.1  christos 		return;
    770  1.1  christos 	rv = event_iocp_shutdown_(base->iocp, -1);
    771  1.1  christos 	EVUTIL_ASSERT(rv >= 0);
    772  1.1  christos 	base->iocp = NULL;
    773  1.1  christos #endif
    774  1.1  christos }
    775  1.1  christos 
    776  1.2  christos static int
    777  1.2  christos event_base_cancel_single_callback_(struct event_base *base,
    778  1.2  christos     struct event_callback *evcb,
    779  1.2  christos     int run_finalizers)
    780  1.2  christos {
    781  1.2  christos 	int result = 0;
    782  1.2  christos 
    783  1.2  christos 	if (evcb->evcb_flags & EVLIST_INIT) {
    784  1.2  christos 		struct event *ev = event_callback_to_event(evcb);
    785  1.2  christos 		if (!(ev->ev_flags & EVLIST_INTERNAL)) {
    786  1.2  christos 			event_del_(ev, EVENT_DEL_EVEN_IF_FINALIZING);
    787  1.2  christos 			result = 1;
    788  1.2  christos 		}
    789  1.2  christos 	} else {
    790  1.2  christos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    791  1.2  christos 		event_callback_cancel_nolock_(base, evcb, 1);
    792  1.2  christos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
    793  1.2  christos 		result = 1;
    794  1.2  christos 	}
    795  1.2  christos 
    796  1.2  christos 	if (run_finalizers && (evcb->evcb_flags & EVLIST_FINALIZING)) {
    797  1.2  christos 		switch (evcb->evcb_closure) {
    798  1.2  christos 		case EV_CLOSURE_EVENT_FINALIZE:
    799  1.2  christos 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
    800  1.2  christos 			struct event *ev = event_callback_to_event(evcb);
    801  1.2  christos 			ev->ev_evcallback.evcb_cb_union.evcb_evfinalize(ev, ev->ev_arg);
    802  1.2  christos 			if (evcb->evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
    803  1.2  christos 				mm_free(ev);
    804  1.2  christos 			break;
    805  1.2  christos 		}
    806  1.2  christos 		case EV_CLOSURE_CB_FINALIZE:
    807  1.2  christos 			evcb->evcb_cb_union.evcb_cbfinalize(evcb, evcb->evcb_arg);
    808  1.2  christos 			break;
    809  1.2  christos 		default:
    810  1.2  christos 			break;
    811  1.2  christos 		}
    812  1.2  christos 	}
    813  1.2  christos 	return result;
    814  1.2  christos }
    815  1.2  christos 
    816  1.7  christos static int event_base_free_queues_(struct event_base *base, int run_finalizers)
    817  1.7  christos {
    818  1.7  christos 	int deleted = 0, i;
    819  1.7  christos 
    820  1.7  christos 	for (i = 0; i < base->nactivequeues; ++i) {
    821  1.7  christos 		struct event_callback *evcb, *next;
    822  1.7  christos 		for (evcb = TAILQ_FIRST(&base->activequeues[i]); evcb; ) {
    823  1.7  christos 			next = TAILQ_NEXT(evcb, evcb_active_next);
    824  1.7  christos 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
    825  1.7  christos 			evcb = next;
    826  1.7  christos 		}
    827  1.7  christos 	}
    828  1.7  christos 
    829  1.7  christos 	{
    830  1.7  christos 		struct event_callback *evcb;
    831  1.7  christos 		while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
    832  1.7  christos 			deleted += event_base_cancel_single_callback_(base, evcb, run_finalizers);
    833  1.7  christos 		}
    834  1.7  christos 	}
    835  1.7  christos 
    836  1.7  christos 	return deleted;
    837  1.7  christos }
    838  1.7  christos 
    839  1.2  christos static void
    840  1.2  christos event_base_free_(struct event_base *base, int run_finalizers)
    841  1.1  christos {
    842  1.1  christos 	int i, n_deleted=0;
    843  1.1  christos 	struct event *ev;
    844  1.1  christos 	/* XXXX grab the lock? If there is contention when one thread frees
    845  1.1  christos 	 * the base, then the contending thread will be very sad soon. */
    846  1.1  christos 
    847  1.1  christos 	/* event_base_free(NULL) is how to free the current_base if we
    848  1.1  christos 	 * made it with event_init and forgot to hold a reference to it. */
    849  1.1  christos 	if (base == NULL && current_base)
    850  1.1  christos 		base = current_base;
    851  1.1  christos 	/* Don't actually free NULL. */
    852  1.1  christos 	if (base == NULL) {
    853  1.1  christos 		event_warnx("%s: no base to free", __func__);
    854  1.1  christos 		return;
    855  1.1  christos 	}
    856  1.1  christos 	/* XXX(niels) - check for internal events first */
    857  1.1  christos 
    858  1.1  christos #ifdef _WIN32
    859  1.1  christos 	event_base_stop_iocp_(base);
    860  1.1  christos #endif
    861  1.1  christos 
    862  1.1  christos 	/* threading fds if we have them */
    863  1.1  christos 	if (base->th_notify_fd[0] != -1) {
    864  1.1  christos 		event_del(&base->th_notify);
    865  1.1  christos 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
    866  1.1  christos 		if (base->th_notify_fd[1] != -1)
    867  1.1  christos 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
    868  1.1  christos 		base->th_notify_fd[0] = -1;
    869  1.1  christos 		base->th_notify_fd[1] = -1;
    870  1.1  christos 		event_debug_unassign(&base->th_notify);
    871  1.1  christos 	}
    872  1.1  christos 
    873  1.1  christos 	/* Delete all non-internal events. */
    874  1.1  christos 	evmap_delete_all_(base);
    875  1.1  christos 
    876  1.1  christos 	while ((ev = min_heap_top_(&base->timeheap)) != NULL) {
    877  1.1  christos 		event_del(ev);
    878  1.1  christos 		++n_deleted;
    879  1.1  christos 	}
    880  1.1  christos 	for (i = 0; i < base->n_common_timeouts; ++i) {
    881  1.1  christos 		struct common_timeout_list *ctl =
    882  1.1  christos 		    base->common_timeout_queues[i];
    883  1.1  christos 		event_del(&ctl->timeout_event); /* Internal; doesn't count */
    884  1.1  christos 		event_debug_unassign(&ctl->timeout_event);
    885  1.1  christos 		for (ev = TAILQ_FIRST(&ctl->events); ev; ) {
    886  1.1  christos 			struct event *next = TAILQ_NEXT(ev,
    887  1.1  christos 			    ev_timeout_pos.ev_next_with_common_timeout);
    888  1.1  christos 			if (!(ev->ev_flags & EVLIST_INTERNAL)) {
    889  1.1  christos 				event_del(ev);
    890  1.1  christos 				++n_deleted;
    891  1.1  christos 			}
    892  1.1  christos 			ev = next;
    893  1.1  christos 		}
    894  1.1  christos 		mm_free(ctl);
    895  1.1  christos 	}
    896  1.1  christos 	if (base->common_timeout_queues)
    897  1.1  christos 		mm_free(base->common_timeout_queues);
    898  1.1  christos 
    899  1.7  christos 	for (;;) {
    900  1.7  christos 		/* For finalizers we can register yet another finalizer out from
    901  1.7  christos 		 * finalizer, and iff finalizer will be in active_later_queue we can
    902  1.7  christos 		 * add finalizer to activequeues, and we will have events in
    903  1.7  christos 		 * activequeues after this function returns, which is not what we want
    904  1.7  christos 		 * (we even have an assertion for this).
    905  1.7  christos 		 *
    906  1.7  christos 		 * A simple case is bufferevent with underlying (i.e. filters).
    907  1.7  christos 		 */
    908  1.7  christos 		int i = event_base_free_queues_(base, run_finalizers);
    909  1.7  christos 		event_debug(("%s: %d events freed", __func__, i));
    910  1.7  christos 		if (!i) {
    911  1.7  christos 			break;
    912  1.1  christos 		}
    913  1.7  christos 		n_deleted += i;
    914  1.1  christos 	}
    915  1.1  christos 
    916  1.1  christos 	if (n_deleted)
    917  1.1  christos 		event_debug(("%s: %d events were still set in base",
    918  1.1  christos 			__func__, n_deleted));
    919  1.1  christos 
    920  1.1  christos 	while (LIST_FIRST(&base->once_events)) {
    921  1.1  christos 		struct event_once *eonce = LIST_FIRST(&base->once_events);
    922  1.1  christos 		LIST_REMOVE(eonce, next_once);
    923  1.1  christos 		mm_free(eonce);
    924  1.1  christos 	}
    925  1.1  christos 
    926  1.1  christos 	if (base->evsel != NULL && base->evsel->dealloc != NULL)
    927  1.1  christos 		base->evsel->dealloc(base);
    928  1.1  christos 
    929  1.1  christos 	for (i = 0; i < base->nactivequeues; ++i)
    930  1.1  christos 		EVUTIL_ASSERT(TAILQ_EMPTY(&base->activequeues[i]));
    931  1.1  christos 
    932  1.1  christos 	EVUTIL_ASSERT(min_heap_empty_(&base->timeheap));
    933  1.1  christos 	min_heap_dtor_(&base->timeheap);
    934  1.1  christos 
    935  1.1  christos 	mm_free(base->activequeues);
    936  1.1  christos 
    937  1.1  christos 	evmap_io_clear_(&base->io);
    938  1.1  christos 	evmap_signal_clear_(&base->sigmap);
    939  1.1  christos 	event_changelist_freemem_(&base->changelist);
    940  1.1  christos 
    941  1.1  christos 	EVTHREAD_FREE_LOCK(base->th_base_lock, 0);
    942  1.1  christos 	EVTHREAD_FREE_COND(base->current_event_cond);
    943  1.1  christos 
    944  1.2  christos 	/* If we're freeing current_base, there won't be a current_base. */
    945  1.2  christos 	if (base == current_base)
    946  1.2  christos 		current_base = NULL;
    947  1.1  christos 	mm_free(base);
    948  1.1  christos }
    949  1.1  christos 
    950  1.2  christos void
    951  1.2  christos event_base_free_nofinalize(struct event_base *base)
    952  1.2  christos {
    953  1.2  christos 	event_base_free_(base, 0);
    954  1.2  christos }
    955  1.2  christos 
    956  1.2  christos void
    957  1.2  christos event_base_free(struct event_base *base)
    958  1.2  christos {
    959  1.2  christos 	event_base_free_(base, 1);
    960  1.2  christos }
    961  1.2  christos 
    962  1.1  christos /* Fake eventop; used to disable the backend temporarily inside event_reinit
    963  1.1  christos  * so that we can call event_del() on an event without telling the backend.
    964  1.1  christos  */
    965  1.1  christos static int
    966  1.1  christos nil_backend_del(struct event_base *b, evutil_socket_t fd, short old,
    967  1.1  christos     short events, void *fdinfo)
    968  1.1  christos {
    969  1.1  christos 	return 0;
    970  1.1  christos }
    971  1.1  christos const struct eventop nil_eventop = {
    972  1.1  christos 	"nil",
    973  1.1  christos 	NULL, /* init: unused. */
    974  1.1  christos 	NULL, /* add: unused. */
    975  1.1  christos 	nil_backend_del, /* del: used, so needs to be killed. */
    976  1.1  christos 	NULL, /* dispatch: unused. */
    977  1.1  christos 	NULL, /* dealloc: unused. */
    978  1.1  christos 	0, 0, 0
    979  1.1  christos };
    980  1.1  christos 
    981  1.1  christos /* reinitialize the event base after a fork */
    982  1.1  christos int
    983  1.1  christos event_reinit(struct event_base *base)
    984  1.1  christos {
    985  1.1  christos 	const struct eventop *evsel;
    986  1.1  christos 	int res = 0;
    987  1.1  christos 	int was_notifiable = 0;
    988  1.1  christos 	int had_signal_added = 0;
    989  1.1  christos 
    990  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    991  1.1  christos 
    992  1.1  christos 	evsel = base->evsel;
    993  1.1  christos 
    994  1.1  christos 	/* check if this event mechanism requires reinit on the backend */
    995  1.1  christos 	if (evsel->need_reinit) {
    996  1.1  christos 		/* We're going to call event_del() on our notify events (the
    997  1.1  christos 		 * ones that tell about signals and wakeup events).  But we
    998  1.1  christos 		 * don't actually want to tell the backend to change its
    999  1.1  christos 		 * state, since it might still share some resource (a kqueue,
   1000  1.1  christos 		 * an epoll fd) with the parent process, and we don't want to
   1001  1.1  christos 		 * delete the fds from _that_ backend, we temporarily stub out
   1002  1.1  christos 		 * the evsel with a replacement.
   1003  1.1  christos 		 */
   1004  1.1  christos 		base->evsel = &nil_eventop;
   1005  1.1  christos 	}
   1006  1.1  christos 
   1007  1.1  christos 	/* We need to re-create a new signal-notification fd and a new
   1008  1.1  christos 	 * thread-notification fd.  Otherwise, we'll still share those with
   1009  1.1  christos 	 * the parent process, which would make any notification sent to them
   1010  1.1  christos 	 * get received by one or both of the event loops, more or less at
   1011  1.1  christos 	 * random.
   1012  1.1  christos 	 */
   1013  1.1  christos 	if (base->sig.ev_signal_added) {
   1014  1.2  christos 		event_del_nolock_(&base->sig.ev_signal, EVENT_DEL_AUTOBLOCK);
   1015  1.1  christos 		event_debug_unassign(&base->sig.ev_signal);
   1016  1.1  christos 		memset(&base->sig.ev_signal, 0, sizeof(base->sig.ev_signal));
   1017  1.1  christos 		had_signal_added = 1;
   1018  1.1  christos 		base->sig.ev_signal_added = 0;
   1019  1.1  christos 	}
   1020  1.7  christos 	if (base->sig.ev_signal_pair[0] != -1)
   1021  1.7  christos 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[0]);
   1022  1.7  christos 	if (base->sig.ev_signal_pair[1] != -1)
   1023  1.7  christos 		EVUTIL_CLOSESOCKET(base->sig.ev_signal_pair[1]);
   1024  1.1  christos 	if (base->th_notify_fn != NULL) {
   1025  1.1  christos 		was_notifiable = 1;
   1026  1.1  christos 		base->th_notify_fn = NULL;
   1027  1.1  christos 	}
   1028  1.1  christos 	if (base->th_notify_fd[0] != -1) {
   1029  1.2  christos 		event_del_nolock_(&base->th_notify, EVENT_DEL_AUTOBLOCK);
   1030  1.1  christos 		EVUTIL_CLOSESOCKET(base->th_notify_fd[0]);
   1031  1.1  christos 		if (base->th_notify_fd[1] != -1)
   1032  1.1  christos 			EVUTIL_CLOSESOCKET(base->th_notify_fd[1]);
   1033  1.1  christos 		base->th_notify_fd[0] = -1;
   1034  1.1  christos 		base->th_notify_fd[1] = -1;
   1035  1.1  christos 		event_debug_unassign(&base->th_notify);
   1036  1.1  christos 	}
   1037  1.1  christos 
   1038  1.1  christos 	/* Replace the original evsel. */
   1039  1.1  christos         base->evsel = evsel;
   1040  1.1  christos 
   1041  1.1  christos 	if (evsel->need_reinit) {
   1042  1.1  christos 		/* Reconstruct the backend through brute-force, so that we do
   1043  1.1  christos 		 * not share any structures with the parent process. For some
   1044  1.1  christos 		 * backends, this is necessary: epoll and kqueue, for
   1045  1.1  christos 		 * instance, have events associated with a kernel
   1046  1.1  christos 		 * structure. If didn't reinitialize, we'd share that
   1047  1.1  christos 		 * structure with the parent process, and any changes made by
   1048  1.1  christos 		 * the parent would affect our backend's behavior (and vice
   1049  1.1  christos 		 * versa).
   1050  1.1  christos 		 */
   1051  1.1  christos 		if (base->evsel->dealloc != NULL)
   1052  1.1  christos 			base->evsel->dealloc(base);
   1053  1.1  christos 		base->evbase = evsel->init(base);
   1054  1.1  christos 		if (base->evbase == NULL) {
   1055  1.1  christos 			event_errx(1,
   1056  1.1  christos 			   "%s: could not reinitialize event mechanism",
   1057  1.1  christos 			   __func__);
   1058  1.1  christos 			res = -1;
   1059  1.1  christos 			goto done;
   1060  1.1  christos 		}
   1061  1.1  christos 
   1062  1.1  christos 		/* Empty out the changelist (if any): we are starting from a
   1063  1.1  christos 		 * blank slate. */
   1064  1.1  christos 		event_changelist_freemem_(&base->changelist);
   1065  1.1  christos 
   1066  1.1  christos 		/* Tell the event maps to re-inform the backend about all
   1067  1.1  christos 		 * pending events. This will make the signal notification
   1068  1.1  christos 		 * event get re-created if necessary. */
   1069  1.1  christos 		if (evmap_reinit_(base) < 0)
   1070  1.1  christos 			res = -1;
   1071  1.1  christos 	} else {
   1072  1.7  christos 		res = evsig_init_(base);
   1073  1.7  christos 		if (res == 0 && had_signal_added) {
   1074  1.7  christos 			res = event_add_nolock_(&base->sig.ev_signal, NULL, 0);
   1075  1.7  christos 			if (res == 0)
   1076  1.7  christos 				base->sig.ev_signal_added = 1;
   1077  1.7  christos 		}
   1078  1.1  christos 	}
   1079  1.1  christos 
   1080  1.1  christos 	/* If we were notifiable before, and nothing just exploded, become
   1081  1.1  christos 	 * notifiable again. */
   1082  1.1  christos 	if (was_notifiable && res == 0)
   1083  1.1  christos 		res = evthread_make_base_notifiable_nolock_(base);
   1084  1.1  christos 
   1085  1.1  christos done:
   1086  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1087  1.1  christos 	return (res);
   1088  1.1  christos }
   1089  1.1  christos 
   1090  1.3  christos /* Get the monotonic time for this event_base' timer */
   1091  1.3  christos int
   1092  1.3  christos event_gettime_monotonic(struct event_base *base, struct timeval *tv)
   1093  1.3  christos {
   1094  1.3  christos   int rv = -1;
   1095  1.3  christos 
   1096  1.3  christos   if (base && tv) {
   1097  1.3  christos     EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1098  1.3  christos     rv = evutil_gettime_monotonic_(&(base->monotonic_timer), tv);
   1099  1.3  christos     EVBASE_RELEASE_LOCK(base, th_base_lock);
   1100  1.3  christos   }
   1101  1.3  christos 
   1102  1.3  christos   return rv;
   1103  1.3  christos }
   1104  1.3  christos 
   1105  1.1  christos const char **
   1106  1.1  christos event_get_supported_methods(void)
   1107  1.1  christos {
   1108  1.1  christos 	static const char **methods = NULL;
   1109  1.1  christos 	const struct eventop **method;
   1110  1.1  christos 	const char **tmp;
   1111  1.1  christos 	int i = 0, k;
   1112  1.1  christos 
   1113  1.1  christos 	/* count all methods */
   1114  1.1  christos 	for (method = &eventops[0]; *method != NULL; ++method) {
   1115  1.1  christos 		++i;
   1116  1.1  christos 	}
   1117  1.1  christos 
   1118  1.1  christos 	/* allocate one more than we need for the NULL pointer */
   1119  1.1  christos 	tmp = mm_calloc((i + 1), sizeof(char *));
   1120  1.1  christos 	if (tmp == NULL)
   1121  1.1  christos 		return (NULL);
   1122  1.1  christos 
   1123  1.1  christos 	/* populate the array with the supported methods */
   1124  1.1  christos 	for (k = 0, i = 0; eventops[k] != NULL; ++k) {
   1125  1.1  christos 		tmp[i++] = eventops[k]->name;
   1126  1.1  christos 	}
   1127  1.1  christos 	tmp[i] = NULL;
   1128  1.1  christos 
   1129  1.1  christos 	if (methods != NULL)
   1130  1.1  christos 		mm_free((char**)methods);
   1131  1.1  christos 
   1132  1.1  christos 	methods = tmp;
   1133  1.1  christos 
   1134  1.1  christos 	return (methods);
   1135  1.1  christos }
   1136  1.1  christos 
   1137  1.1  christos struct event_config *
   1138  1.1  christos event_config_new(void)
   1139  1.1  christos {
   1140  1.1  christos 	struct event_config *cfg = mm_calloc(1, sizeof(*cfg));
   1141  1.1  christos 
   1142  1.1  christos 	if (cfg == NULL)
   1143  1.1  christos 		return (NULL);
   1144  1.1  christos 
   1145  1.1  christos 	TAILQ_INIT(&cfg->entries);
   1146  1.1  christos 	cfg->max_dispatch_interval.tv_sec = -1;
   1147  1.1  christos 	cfg->max_dispatch_callbacks = INT_MAX;
   1148  1.1  christos 	cfg->limit_callbacks_after_prio = 1;
   1149  1.1  christos 
   1150  1.1  christos 	return (cfg);
   1151  1.1  christos }
   1152  1.1  christos 
   1153  1.1  christos static void
   1154  1.1  christos event_config_entry_free(struct event_config_entry *entry)
   1155  1.1  christos {
   1156  1.1  christos 	if (entry->avoid_method != NULL)
   1157  1.1  christos 		mm_free((char *)entry->avoid_method);
   1158  1.1  christos 	mm_free(entry);
   1159  1.1  christos }
   1160  1.1  christos 
   1161  1.1  christos void
   1162  1.1  christos event_config_free(struct event_config *cfg)
   1163  1.1  christos {
   1164  1.1  christos 	struct event_config_entry *entry;
   1165  1.1  christos 
   1166  1.1  christos 	while ((entry = TAILQ_FIRST(&cfg->entries)) != NULL) {
   1167  1.1  christos 		TAILQ_REMOVE(&cfg->entries, entry, next);
   1168  1.1  christos 		event_config_entry_free(entry);
   1169  1.1  christos 	}
   1170  1.1  christos 	mm_free(cfg);
   1171  1.1  christos }
   1172  1.1  christos 
   1173  1.1  christos int
   1174  1.1  christos event_config_set_flag(struct event_config *cfg, int flag)
   1175  1.1  christos {
   1176  1.1  christos 	if (!cfg)
   1177  1.1  christos 		return -1;
   1178  1.1  christos 	cfg->flags |= flag;
   1179  1.1  christos 	return 0;
   1180  1.1  christos }
   1181  1.1  christos 
   1182  1.1  christos int
   1183  1.1  christos event_config_avoid_method(struct event_config *cfg, const char *method)
   1184  1.1  christos {
   1185  1.1  christos 	struct event_config_entry *entry = mm_malloc(sizeof(*entry));
   1186  1.1  christos 	if (entry == NULL)
   1187  1.1  christos 		return (-1);
   1188  1.1  christos 
   1189  1.1  christos 	if ((entry->avoid_method = mm_strdup(method)) == NULL) {
   1190  1.1  christos 		mm_free(entry);
   1191  1.1  christos 		return (-1);
   1192  1.1  christos 	}
   1193  1.1  christos 
   1194  1.1  christos 	TAILQ_INSERT_TAIL(&cfg->entries, entry, next);
   1195  1.1  christos 
   1196  1.1  christos 	return (0);
   1197  1.1  christos }
   1198  1.1  christos 
   1199  1.1  christos int
   1200  1.1  christos event_config_require_features(struct event_config *cfg,
   1201  1.1  christos     int features)
   1202  1.1  christos {
   1203  1.1  christos 	if (!cfg)
   1204  1.1  christos 		return (-1);
   1205  1.1  christos 	cfg->require_features = features;
   1206  1.1  christos 	return (0);
   1207  1.1  christos }
   1208  1.1  christos 
   1209  1.1  christos int
   1210  1.1  christos event_config_set_num_cpus_hint(struct event_config *cfg, int cpus)
   1211  1.1  christos {
   1212  1.1  christos 	if (!cfg)
   1213  1.1  christos 		return (-1);
   1214  1.1  christos 	cfg->n_cpus_hint = cpus;
   1215  1.1  christos 	return (0);
   1216  1.1  christos }
   1217  1.1  christos 
   1218  1.1  christos int
   1219  1.1  christos event_config_set_max_dispatch_interval(struct event_config *cfg,
   1220  1.1  christos     const struct timeval *max_interval, int max_callbacks, int min_priority)
   1221  1.1  christos {
   1222  1.1  christos 	if (max_interval)
   1223  1.1  christos 		memcpy(&cfg->max_dispatch_interval, max_interval,
   1224  1.1  christos 		    sizeof(struct timeval));
   1225  1.1  christos 	else
   1226  1.1  christos 		cfg->max_dispatch_interval.tv_sec = -1;
   1227  1.1  christos 	cfg->max_dispatch_callbacks =
   1228  1.1  christos 	    max_callbacks >= 0 ? max_callbacks : INT_MAX;
   1229  1.1  christos 	if (min_priority < 0)
   1230  1.1  christos 		min_priority = 0;
   1231  1.1  christos 	cfg->limit_callbacks_after_prio = min_priority;
   1232  1.1  christos 	return (0);
   1233  1.1  christos }
   1234  1.1  christos 
   1235  1.1  christos int
   1236  1.1  christos event_priority_init(int npriorities)
   1237  1.1  christos {
   1238  1.1  christos 	return event_base_priority_init(current_base, npriorities);
   1239  1.1  christos }
   1240  1.1  christos 
   1241  1.1  christos int
   1242  1.1  christos event_base_priority_init(struct event_base *base, int npriorities)
   1243  1.1  christos {
   1244  1.1  christos 	int i, r;
   1245  1.1  christos 	r = -1;
   1246  1.1  christos 
   1247  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1248  1.1  christos 
   1249  1.1  christos 	if (N_ACTIVE_CALLBACKS(base) || npriorities < 1
   1250  1.1  christos 	    || npriorities >= EVENT_MAX_PRIORITIES)
   1251  1.1  christos 		goto err;
   1252  1.1  christos 
   1253  1.1  christos 	if (npriorities == base->nactivequeues)
   1254  1.1  christos 		goto ok;
   1255  1.1  christos 
   1256  1.1  christos 	if (base->nactivequeues) {
   1257  1.1  christos 		mm_free(base->activequeues);
   1258  1.1  christos 		base->nactivequeues = 0;
   1259  1.1  christos 	}
   1260  1.1  christos 
   1261  1.1  christos 	/* Allocate our priority queues */
   1262  1.1  christos 	base->activequeues = (struct evcallback_list *)
   1263  1.1  christos 	  mm_calloc(npriorities, sizeof(struct evcallback_list));
   1264  1.1  christos 	if (base->activequeues == NULL) {
   1265  1.1  christos 		event_warn("%s: calloc", __func__);
   1266  1.1  christos 		goto err;
   1267  1.1  christos 	}
   1268  1.1  christos 	base->nactivequeues = npriorities;
   1269  1.1  christos 
   1270  1.1  christos 	for (i = 0; i < base->nactivequeues; ++i) {
   1271  1.1  christos 		TAILQ_INIT(&base->activequeues[i]);
   1272  1.1  christos 	}
   1273  1.1  christos 
   1274  1.1  christos ok:
   1275  1.1  christos 	r = 0;
   1276  1.1  christos err:
   1277  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1278  1.1  christos 	return (r);
   1279  1.1  christos }
   1280  1.1  christos 
   1281  1.1  christos int
   1282  1.1  christos event_base_get_npriorities(struct event_base *base)
   1283  1.1  christos {
   1284  1.1  christos 
   1285  1.1  christos 	int n;
   1286  1.1  christos 	if (base == NULL)
   1287  1.1  christos 		base = current_base;
   1288  1.1  christos 
   1289  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1290  1.1  christos 	n = base->nactivequeues;
   1291  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1292  1.1  christos 	return (n);
   1293  1.1  christos }
   1294  1.1  christos 
   1295  1.2  christos int
   1296  1.2  christos event_base_get_num_events(struct event_base *base, unsigned int type)
   1297  1.2  christos {
   1298  1.2  christos 	int r = 0;
   1299  1.2  christos 
   1300  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1301  1.2  christos 
   1302  1.2  christos 	if (type & EVENT_BASE_COUNT_ACTIVE)
   1303  1.2  christos 		r += base->event_count_active;
   1304  1.2  christos 
   1305  1.2  christos 	if (type & EVENT_BASE_COUNT_VIRTUAL)
   1306  1.2  christos 		r += base->virtual_event_count;
   1307  1.2  christos 
   1308  1.2  christos 	if (type & EVENT_BASE_COUNT_ADDED)
   1309  1.2  christos 		r += base->event_count;
   1310  1.2  christos 
   1311  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1312  1.2  christos 
   1313  1.2  christos 	return r;
   1314  1.2  christos }
   1315  1.2  christos 
   1316  1.2  christos int
   1317  1.2  christos event_base_get_max_events(struct event_base *base, unsigned int type, int clear)
   1318  1.2  christos {
   1319  1.2  christos 	int r = 0;
   1320  1.2  christos 
   1321  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1322  1.2  christos 
   1323  1.2  christos 	if (type & EVENT_BASE_COUNT_ACTIVE) {
   1324  1.2  christos 		r += base->event_count_active_max;
   1325  1.2  christos 		if (clear)
   1326  1.2  christos 			base->event_count_active_max = 0;
   1327  1.2  christos 	}
   1328  1.2  christos 
   1329  1.2  christos 	if (type & EVENT_BASE_COUNT_VIRTUAL) {
   1330  1.2  christos 		r += base->virtual_event_count_max;
   1331  1.2  christos 		if (clear)
   1332  1.2  christos 			base->virtual_event_count_max = 0;
   1333  1.2  christos 	}
   1334  1.2  christos 
   1335  1.2  christos 	if (type & EVENT_BASE_COUNT_ADDED) {
   1336  1.2  christos 		r += base->event_count_max;
   1337  1.2  christos 		if (clear)
   1338  1.2  christos 			base->event_count_max = 0;
   1339  1.2  christos 	}
   1340  1.2  christos 
   1341  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1342  1.2  christos 
   1343  1.2  christos 	return r;
   1344  1.2  christos }
   1345  1.2  christos 
   1346  1.1  christos /* Returns true iff we're currently watching any events. */
   1347  1.1  christos static int
   1348  1.1  christos event_haveevents(struct event_base *base)
   1349  1.1  christos {
   1350  1.1  christos 	/* Caller must hold th_base_lock */
   1351  1.1  christos 	return (base->virtual_event_count > 0 || base->event_count > 0);
   1352  1.1  christos }
   1353  1.1  christos 
   1354  1.1  christos /* "closure" function called when processing active signal events */
   1355  1.1  christos static inline void
   1356  1.1  christos event_signal_closure(struct event_base *base, struct event *ev)
   1357  1.1  christos {
   1358  1.1  christos 	short ncalls;
   1359  1.1  christos 	int should_break;
   1360  1.1  christos 
   1361  1.1  christos 	/* Allows deletes to work */
   1362  1.1  christos 	ncalls = ev->ev_ncalls;
   1363  1.1  christos 	if (ncalls != 0)
   1364  1.1  christos 		ev->ev_pncalls = &ncalls;
   1365  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1366  1.1  christos 	while (ncalls) {
   1367  1.1  christos 		ncalls--;
   1368  1.1  christos 		ev->ev_ncalls = ncalls;
   1369  1.1  christos 		if (ncalls == 0)
   1370  1.1  christos 			ev->ev_pncalls = NULL;
   1371  1.1  christos 		(*ev->ev_callback)(ev->ev_fd, ev->ev_res, ev->ev_arg);
   1372  1.1  christos 
   1373  1.1  christos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1374  1.1  christos 		should_break = base->event_break;
   1375  1.1  christos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
   1376  1.1  christos 
   1377  1.1  christos 		if (should_break) {
   1378  1.1  christos 			if (ncalls != 0)
   1379  1.1  christos 				ev->ev_pncalls = NULL;
   1380  1.1  christos 			return;
   1381  1.1  christos 		}
   1382  1.1  christos 	}
   1383  1.1  christos }
   1384  1.1  christos 
   1385  1.1  christos /* Common timeouts are special timeouts that are handled as queues rather than
   1386  1.1  christos  * in the minheap.  This is more efficient than the minheap if we happen to
   1387  1.1  christos  * know that we're going to get several thousands of timeout events all with
   1388  1.1  christos  * the same timeout value.
   1389  1.1  christos  *
   1390  1.1  christos  * Since all our timeout handling code assumes timevals can be copied,
   1391  1.1  christos  * assigned, etc, we can't use "magic pointer" to encode these common
   1392  1.1  christos  * timeouts.  Searching through a list to see if every timeout is common could
   1393  1.1  christos  * also get inefficient.  Instead, we take advantage of the fact that tv_usec
   1394  1.1  christos  * is 32 bits long, but only uses 20 of those bits (since it can never be over
   1395  1.1  christos  * 999999.)  We use the top bits to encode 4 bites of magic number, and 8 bits
   1396  1.1  christos  * of index into the event_base's aray of common timeouts.
   1397  1.1  christos  */
   1398  1.1  christos 
   1399  1.1  christos #define MICROSECONDS_MASK       COMMON_TIMEOUT_MICROSECONDS_MASK
   1400  1.1  christos #define COMMON_TIMEOUT_IDX_MASK 0x0ff00000
   1401  1.1  christos #define COMMON_TIMEOUT_IDX_SHIFT 20
   1402  1.1  christos #define COMMON_TIMEOUT_MASK     0xf0000000
   1403  1.1  christos #define COMMON_TIMEOUT_MAGIC    0x50000000
   1404  1.1  christos 
   1405  1.1  christos #define COMMON_TIMEOUT_IDX(tv) \
   1406  1.1  christos 	(((tv)->tv_usec & COMMON_TIMEOUT_IDX_MASK)>>COMMON_TIMEOUT_IDX_SHIFT)
   1407  1.1  christos 
   1408  1.1  christos /** Return true iff if 'tv' is a common timeout in 'base' */
   1409  1.1  christos static inline int
   1410  1.1  christos is_common_timeout(const struct timeval *tv,
   1411  1.1  christos     const struct event_base *base)
   1412  1.1  christos {
   1413  1.1  christos 	int idx;
   1414  1.1  christos 	if ((tv->tv_usec & COMMON_TIMEOUT_MASK) != COMMON_TIMEOUT_MAGIC)
   1415  1.1  christos 		return 0;
   1416  1.1  christos 	idx = COMMON_TIMEOUT_IDX(tv);
   1417  1.1  christos 	return idx < base->n_common_timeouts;
   1418  1.1  christos }
   1419  1.1  christos 
   1420  1.1  christos /* True iff tv1 and tv2 have the same common-timeout index, or if neither
   1421  1.1  christos  * one is a common timeout. */
   1422  1.1  christos static inline int
   1423  1.1  christos is_same_common_timeout(const struct timeval *tv1, const struct timeval *tv2)
   1424  1.1  christos {
   1425  1.1  christos 	return (tv1->tv_usec & ~MICROSECONDS_MASK) ==
   1426  1.1  christos 	    (tv2->tv_usec & ~MICROSECONDS_MASK);
   1427  1.1  christos }
   1428  1.1  christos 
   1429  1.1  christos /** Requires that 'tv' is a common timeout.  Return the corresponding
   1430  1.1  christos  * common_timeout_list. */
   1431  1.1  christos static inline struct common_timeout_list *
   1432  1.1  christos get_common_timeout_list(struct event_base *base, const struct timeval *tv)
   1433  1.1  christos {
   1434  1.1  christos 	return base->common_timeout_queues[COMMON_TIMEOUT_IDX(tv)];
   1435  1.1  christos }
   1436  1.1  christos 
   1437  1.1  christos #if 0
   1438  1.1  christos static inline int
   1439  1.1  christos common_timeout_ok(const struct timeval *tv,
   1440  1.1  christos     struct event_base *base)
   1441  1.1  christos {
   1442  1.1  christos 	const struct timeval *expect =
   1443  1.1  christos 	    &get_common_timeout_list(base, tv)->duration;
   1444  1.1  christos 	return tv->tv_sec == expect->tv_sec &&
   1445  1.1  christos 	    tv->tv_usec == expect->tv_usec;
   1446  1.1  christos }
   1447  1.1  christos #endif
   1448  1.1  christos 
   1449  1.1  christos /* Add the timeout for the first event in given common timeout list to the
   1450  1.1  christos  * event_base's minheap. */
   1451  1.1  christos static void
   1452  1.1  christos common_timeout_schedule(struct common_timeout_list *ctl,
   1453  1.1  christos     const struct timeval *now, struct event *head)
   1454  1.1  christos {
   1455  1.1  christos 	struct timeval timeout = head->ev_timeout;
   1456  1.1  christos 	timeout.tv_usec &= MICROSECONDS_MASK;
   1457  1.1  christos 	event_add_nolock_(&ctl->timeout_event, &timeout, 1);
   1458  1.1  christos }
   1459  1.1  christos 
   1460  1.1  christos /* Callback: invoked when the timeout for a common timeout queue triggers.
   1461  1.1  christos  * This means that (at least) the first event in that queue should be run,
   1462  1.1  christos  * and the timeout should be rescheduled if there are more events. */
   1463  1.1  christos static void
   1464  1.1  christos common_timeout_callback(evutil_socket_t fd, short what, void *arg)
   1465  1.1  christos {
   1466  1.1  christos 	struct timeval now;
   1467  1.1  christos 	struct common_timeout_list *ctl = arg;
   1468  1.1  christos 	struct event_base *base = ctl->base;
   1469  1.1  christos 	struct event *ev = NULL;
   1470  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1471  1.1  christos 	gettime(base, &now);
   1472  1.1  christos 	while (1) {
   1473  1.1  christos 		ev = TAILQ_FIRST(&ctl->events);
   1474  1.1  christos 		if (!ev || ev->ev_timeout.tv_sec > now.tv_sec ||
   1475  1.1  christos 		    (ev->ev_timeout.tv_sec == now.tv_sec &&
   1476  1.1  christos 			(ev->ev_timeout.tv_usec&MICROSECONDS_MASK) > now.tv_usec))
   1477  1.1  christos 			break;
   1478  1.2  christos 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   1479  1.1  christos 		event_active_nolock_(ev, EV_TIMEOUT, 1);
   1480  1.1  christos 	}
   1481  1.1  christos 	if (ev)
   1482  1.1  christos 		common_timeout_schedule(ctl, &now, ev);
   1483  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1484  1.1  christos }
   1485  1.1  christos 
   1486  1.1  christos #define MAX_COMMON_TIMEOUTS 256
   1487  1.1  christos 
   1488  1.1  christos const struct timeval *
   1489  1.1  christos event_base_init_common_timeout(struct event_base *base,
   1490  1.1  christos     const struct timeval *duration)
   1491  1.1  christos {
   1492  1.1  christos 	int i;
   1493  1.1  christos 	struct timeval tv;
   1494  1.1  christos 	const struct timeval *result=NULL;
   1495  1.1  christos 	struct common_timeout_list *new_ctl;
   1496  1.1  christos 
   1497  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1498  1.1  christos 	if (duration->tv_usec > 1000000) {
   1499  1.1  christos 		memcpy(&tv, duration, sizeof(struct timeval));
   1500  1.1  christos 		if (is_common_timeout(duration, base))
   1501  1.1  christos 			tv.tv_usec &= MICROSECONDS_MASK;
   1502  1.1  christos 		tv.tv_sec += tv.tv_usec / 1000000;
   1503  1.1  christos 		tv.tv_usec %= 1000000;
   1504  1.1  christos 		duration = &tv;
   1505  1.1  christos 	}
   1506  1.1  christos 	for (i = 0; i < base->n_common_timeouts; ++i) {
   1507  1.1  christos 		const struct common_timeout_list *ctl =
   1508  1.1  christos 		    base->common_timeout_queues[i];
   1509  1.1  christos 		if (duration->tv_sec == ctl->duration.tv_sec &&
   1510  1.1  christos 		    duration->tv_usec ==
   1511  1.1  christos 		    (ctl->duration.tv_usec & MICROSECONDS_MASK)) {
   1512  1.1  christos 			EVUTIL_ASSERT(is_common_timeout(&ctl->duration, base));
   1513  1.1  christos 			result = &ctl->duration;
   1514  1.1  christos 			goto done;
   1515  1.1  christos 		}
   1516  1.1  christos 	}
   1517  1.1  christos 	if (base->n_common_timeouts == MAX_COMMON_TIMEOUTS) {
   1518  1.1  christos 		event_warnx("%s: Too many common timeouts already in use; "
   1519  1.1  christos 		    "we only support %d per event_base", __func__,
   1520  1.1  christos 		    MAX_COMMON_TIMEOUTS);
   1521  1.1  christos 		goto done;
   1522  1.1  christos 	}
   1523  1.1  christos 	if (base->n_common_timeouts_allocated == base->n_common_timeouts) {
   1524  1.1  christos 		int n = base->n_common_timeouts < 16 ? 16 :
   1525  1.1  christos 		    base->n_common_timeouts*2;
   1526  1.1  christos 		struct common_timeout_list **newqueues =
   1527  1.1  christos 		    mm_realloc(base->common_timeout_queues,
   1528  1.1  christos 			n*sizeof(struct common_timeout_queue *));
   1529  1.1  christos 		if (!newqueues) {
   1530  1.1  christos 			event_warn("%s: realloc",__func__);
   1531  1.1  christos 			goto done;
   1532  1.1  christos 		}
   1533  1.1  christos 		base->n_common_timeouts_allocated = n;
   1534  1.1  christos 		base->common_timeout_queues = newqueues;
   1535  1.1  christos 	}
   1536  1.1  christos 	new_ctl = mm_calloc(1, sizeof(struct common_timeout_list));
   1537  1.1  christos 	if (!new_ctl) {
   1538  1.1  christos 		event_warn("%s: calloc",__func__);
   1539  1.1  christos 		goto done;
   1540  1.1  christos 	}
   1541  1.1  christos 	TAILQ_INIT(&new_ctl->events);
   1542  1.1  christos 	new_ctl->duration.tv_sec = duration->tv_sec;
   1543  1.1  christos 	new_ctl->duration.tv_usec =
   1544  1.1  christos 	    duration->tv_usec | COMMON_TIMEOUT_MAGIC |
   1545  1.1  christos 	    (base->n_common_timeouts << COMMON_TIMEOUT_IDX_SHIFT);
   1546  1.1  christos 	evtimer_assign(&new_ctl->timeout_event, base,
   1547  1.1  christos 	    common_timeout_callback, new_ctl);
   1548  1.1  christos 	new_ctl->timeout_event.ev_flags |= EVLIST_INTERNAL;
   1549  1.1  christos 	event_priority_set(&new_ctl->timeout_event, 0);
   1550  1.1  christos 	new_ctl->base = base;
   1551  1.1  christos 	base->common_timeout_queues[base->n_common_timeouts++] = new_ctl;
   1552  1.1  christos 	result = &new_ctl->duration;
   1553  1.1  christos 
   1554  1.1  christos done:
   1555  1.1  christos 	if (result)
   1556  1.1  christos 		EVUTIL_ASSERT(is_common_timeout(result, base));
   1557  1.1  christos 
   1558  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1559  1.1  christos 	return result;
   1560  1.1  christos }
   1561  1.1  christos 
   1562  1.1  christos /* Closure function invoked when we're activating a persistent event. */
   1563  1.1  christos static inline void
   1564  1.1  christos event_persist_closure(struct event_base *base, struct event *ev)
   1565  1.1  christos {
   1566  1.3  christos 	void (*evcb_callback)(evutil_socket_t, short, void *);
   1567  1.2  christos 
   1568  1.3  christos         // Other fields of *ev that must be stored before executing
   1569  1.3  christos         evutil_socket_t evcb_fd;
   1570  1.3  christos         short evcb_res;
   1571  1.3  christos         void *evcb_arg;
   1572  1.2  christos 
   1573  1.1  christos 	/* reschedule the persistent event if we have a timeout. */
   1574  1.1  christos 	if (ev->ev_io_timeout.tv_sec || ev->ev_io_timeout.tv_usec) {
   1575  1.1  christos 		/* If there was a timeout, we want it to run at an interval of
   1576  1.1  christos 		 * ev_io_timeout after the last time it was _scheduled_ for,
   1577  1.1  christos 		 * not ev_io_timeout after _now_.  If it fired for another
   1578  1.1  christos 		 * reason, though, the timeout ought to start ticking _now_. */
   1579  1.1  christos 		struct timeval run_at, relative_to, delay, now;
   1580  1.1  christos 		ev_uint32_t usec_mask = 0;
   1581  1.1  christos 		EVUTIL_ASSERT(is_same_common_timeout(&ev->ev_timeout,
   1582  1.1  christos 			&ev->ev_io_timeout));
   1583  1.1  christos 		gettime(base, &now);
   1584  1.1  christos 		if (is_common_timeout(&ev->ev_timeout, base)) {
   1585  1.1  christos 			delay = ev->ev_io_timeout;
   1586  1.1  christos 			usec_mask = delay.tv_usec & ~MICROSECONDS_MASK;
   1587  1.1  christos 			delay.tv_usec &= MICROSECONDS_MASK;
   1588  1.1  christos 			if (ev->ev_res & EV_TIMEOUT) {
   1589  1.1  christos 				relative_to = ev->ev_timeout;
   1590  1.1  christos 				relative_to.tv_usec &= MICROSECONDS_MASK;
   1591  1.1  christos 			} else {
   1592  1.1  christos 				relative_to = now;
   1593  1.1  christos 			}
   1594  1.1  christos 		} else {
   1595  1.1  christos 			delay = ev->ev_io_timeout;
   1596  1.1  christos 			if (ev->ev_res & EV_TIMEOUT) {
   1597  1.1  christos 				relative_to = ev->ev_timeout;
   1598  1.1  christos 			} else {
   1599  1.1  christos 				relative_to = now;
   1600  1.1  christos 			}
   1601  1.1  christos 		}
   1602  1.1  christos 		evutil_timeradd(&relative_to, &delay, &run_at);
   1603  1.1  christos 		if (evutil_timercmp(&run_at, &now, <)) {
   1604  1.1  christos 			/* Looks like we missed at least one invocation due to
   1605  1.1  christos 			 * a clock jump, not running the event loop for a
   1606  1.1  christos 			 * while, really slow callbacks, or
   1607  1.1  christos 			 * something. Reschedule relative to now.
   1608  1.1  christos 			 */
   1609  1.1  christos 			evutil_timeradd(&now, &delay, &run_at);
   1610  1.1  christos 		}
   1611  1.1  christos 		run_at.tv_usec |= usec_mask;
   1612  1.1  christos 		event_add_nolock_(ev, &run_at, 1);
   1613  1.1  christos 	}
   1614  1.2  christos 
   1615  1.2  christos 	// Save our callback before we release the lock
   1616  1.3  christos 	evcb_callback = ev->ev_callback;
   1617  1.3  christos         evcb_fd = ev->ev_fd;
   1618  1.3  christos         evcb_res = ev->ev_res;
   1619  1.3  christos         evcb_arg = ev->ev_arg;
   1620  1.2  christos 
   1621  1.2  christos 	// Release the lock
   1622  1.2  christos  	EVBASE_RELEASE_LOCK(base, th_base_lock);
   1623  1.2  christos 
   1624  1.2  christos 	// Execute the callback
   1625  1.3  christos         (evcb_callback)(evcb_fd, evcb_res, evcb_arg);
   1626  1.1  christos }
   1627  1.1  christos 
   1628  1.1  christos /*
   1629  1.1  christos   Helper for event_process_active to process all the events in a single queue,
   1630  1.1  christos   releasing the lock as we go.  This function requires that the lock be held
   1631  1.1  christos   when it's invoked.  Returns -1 if we get a signal or an event_break that
   1632  1.1  christos   means we should stop processing any active events now.  Otherwise returns
   1633  1.1  christos   the number of non-internal event_callbacks that we processed.
   1634  1.1  christos */
   1635  1.1  christos static int
   1636  1.1  christos event_process_active_single_queue(struct event_base *base,
   1637  1.1  christos     struct evcallback_list *activeq,
   1638  1.1  christos     int max_to_process, const struct timeval *endtime)
   1639  1.1  christos {
   1640  1.1  christos 	struct event_callback *evcb;
   1641  1.1  christos 	int count = 0;
   1642  1.1  christos 
   1643  1.1  christos 	EVUTIL_ASSERT(activeq != NULL);
   1644  1.1  christos 
   1645  1.1  christos 	for (evcb = TAILQ_FIRST(activeq); evcb; evcb = TAILQ_FIRST(activeq)) {
   1646  1.1  christos 		struct event *ev=NULL;
   1647  1.1  christos 		if (evcb->evcb_flags & EVLIST_INIT) {
   1648  1.1  christos 			ev = event_callback_to_event(evcb);
   1649  1.1  christos 
   1650  1.2  christos 			if (ev->ev_events & EV_PERSIST || ev->ev_flags & EVLIST_FINALIZING)
   1651  1.1  christos 				event_queue_remove_active(base, evcb);
   1652  1.1  christos 			else
   1653  1.2  christos 				event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   1654  1.1  christos 			event_debug((
   1655  1.2  christos 			    "event_process_active: event: %p, %s%s%scall %p",
   1656  1.1  christos 			    ev,
   1657  1.1  christos 			    ev->ev_res & EV_READ ? "EV_READ " : " ",
   1658  1.1  christos 			    ev->ev_res & EV_WRITE ? "EV_WRITE " : " ",
   1659  1.2  christos 			    ev->ev_res & EV_CLOSED ? "EV_CLOSED " : " ",
   1660  1.1  christos 			    ev->ev_callback));
   1661  1.1  christos 		} else {
   1662  1.1  christos 			event_queue_remove_active(base, evcb);
   1663  1.1  christos 			event_debug(("event_process_active: event_callback %p, "
   1664  1.1  christos 				"closure %d, call %p",
   1665  1.1  christos 				evcb, evcb->evcb_closure, evcb->evcb_cb_union.evcb_callback));
   1666  1.1  christos 		}
   1667  1.1  christos 
   1668  1.1  christos 		if (!(evcb->evcb_flags & EVLIST_INTERNAL))
   1669  1.1  christos 			++count;
   1670  1.1  christos 
   1671  1.1  christos 
   1672  1.1  christos 		base->current_event = evcb;
   1673  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   1674  1.1  christos 		base->current_event_waiters = 0;
   1675  1.1  christos #endif
   1676  1.1  christos 
   1677  1.1  christos 		switch (evcb->evcb_closure) {
   1678  1.1  christos 		case EV_CLOSURE_EVENT_SIGNAL:
   1679  1.2  christos 			EVUTIL_ASSERT(ev != NULL);
   1680  1.1  christos 			event_signal_closure(base, ev);
   1681  1.1  christos 			break;
   1682  1.1  christos 		case EV_CLOSURE_EVENT_PERSIST:
   1683  1.2  christos 			EVUTIL_ASSERT(ev != NULL);
   1684  1.1  christos 			event_persist_closure(base, ev);
   1685  1.1  christos 			break;
   1686  1.2  christos 		case EV_CLOSURE_EVENT: {
   1687  1.3  christos 			void (*evcb_callback)(evutil_socket_t, short, void *);
   1688  1.7  christos 			short res;
   1689  1.2  christos 			EVUTIL_ASSERT(ev != NULL);
   1690  1.3  christos 			evcb_callback = *ev->ev_callback;
   1691  1.7  christos 			res = ev->ev_res;
   1692  1.1  christos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1693  1.7  christos 			evcb_callback(ev->ev_fd, res, ev->ev_arg);
   1694  1.2  christos 		}
   1695  1.2  christos 		break;
   1696  1.2  christos 		case EV_CLOSURE_CB_SELF: {
   1697  1.2  christos 			void (*evcb_selfcb)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_selfcb;
   1698  1.2  christos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1699  1.2  christos 			evcb_selfcb(evcb, evcb->evcb_arg);
   1700  1.2  christos 		}
   1701  1.2  christos 		break;
   1702  1.2  christos 		case EV_CLOSURE_EVENT_FINALIZE:
   1703  1.2  christos 		case EV_CLOSURE_EVENT_FINALIZE_FREE: {
   1704  1.3  christos 			void (*evcb_evfinalize)(struct event *, void *);
   1705  1.3  christos 			int evcb_closure = evcb->evcb_closure;
   1706  1.2  christos 			EVUTIL_ASSERT(ev != NULL);
   1707  1.2  christos 			base->current_event = NULL;
   1708  1.3  christos 			evcb_evfinalize = ev->ev_evcallback.evcb_cb_union.evcb_evfinalize;
   1709  1.2  christos 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
   1710  1.2  christos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1711  1.7  christos 			event_debug_note_teardown_(ev);
   1712  1.2  christos 			evcb_evfinalize(ev, ev->ev_arg);
   1713  1.3  christos 			if (evcb_closure == EV_CLOSURE_EVENT_FINALIZE_FREE)
   1714  1.2  christos 				mm_free(ev);
   1715  1.2  christos 		}
   1716  1.2  christos 		break;
   1717  1.2  christos 		case EV_CLOSURE_CB_FINALIZE: {
   1718  1.2  christos 			void (*evcb_cbfinalize)(struct event_callback *, void *) = evcb->evcb_cb_union.evcb_cbfinalize;
   1719  1.2  christos 			base->current_event = NULL;
   1720  1.2  christos 			EVUTIL_ASSERT((evcb->evcb_flags & EVLIST_FINALIZING));
   1721  1.1  christos 			EVBASE_RELEASE_LOCK(base, th_base_lock);
   1722  1.2  christos 			evcb_cbfinalize(evcb, evcb->evcb_arg);
   1723  1.2  christos 		}
   1724  1.2  christos 		break;
   1725  1.1  christos 		default:
   1726  1.1  christos 			EVUTIL_ASSERT(0);
   1727  1.1  christos 		}
   1728  1.1  christos 
   1729  1.1  christos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1730  1.1  christos 		base->current_event = NULL;
   1731  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   1732  1.1  christos 		if (base->current_event_waiters) {
   1733  1.1  christos 			base->current_event_waiters = 0;
   1734  1.1  christos 			EVTHREAD_COND_BROADCAST(base->current_event_cond);
   1735  1.1  christos 		}
   1736  1.1  christos #endif
   1737  1.1  christos 
   1738  1.1  christos 		if (base->event_break)
   1739  1.1  christos 			return -1;
   1740  1.1  christos 		if (count >= max_to_process)
   1741  1.1  christos 			return count;
   1742  1.1  christos 		if (count && endtime) {
   1743  1.1  christos 			struct timeval now;
   1744  1.1  christos 			update_time_cache(base);
   1745  1.1  christos 			gettime(base, &now);
   1746  1.1  christos 			if (evutil_timercmp(&now, endtime, >=))
   1747  1.1  christos 				return count;
   1748  1.1  christos 		}
   1749  1.1  christos 		if (base->event_continue)
   1750  1.1  christos 			break;
   1751  1.1  christos 	}
   1752  1.1  christos 	return count;
   1753  1.1  christos }
   1754  1.1  christos 
   1755  1.1  christos /*
   1756  1.1  christos  * Active events are stored in priority queues.  Lower priorities are always
   1757  1.1  christos  * process before higher priorities.  Low priority events can starve high
   1758  1.1  christos  * priority ones.
   1759  1.1  christos  */
   1760  1.1  christos 
   1761  1.1  christos static int
   1762  1.1  christos event_process_active(struct event_base *base)
   1763  1.1  christos {
   1764  1.1  christos 	/* Caller must hold th_base_lock */
   1765  1.1  christos 	struct evcallback_list *activeq = NULL;
   1766  1.1  christos 	int i, c = 0;
   1767  1.1  christos 	const struct timeval *endtime;
   1768  1.1  christos 	struct timeval tv;
   1769  1.1  christos 	const int maxcb = base->max_dispatch_callbacks;
   1770  1.1  christos 	const int limit_after_prio = base->limit_callbacks_after_prio;
   1771  1.1  christos 	if (base->max_dispatch_time.tv_sec >= 0) {
   1772  1.1  christos 		update_time_cache(base);
   1773  1.1  christos 		gettime(base, &tv);
   1774  1.1  christos 		evutil_timeradd(&base->max_dispatch_time, &tv, &tv);
   1775  1.1  christos 		endtime = &tv;
   1776  1.1  christos 	} else {
   1777  1.1  christos 		endtime = NULL;
   1778  1.1  christos 	}
   1779  1.1  christos 
   1780  1.1  christos 	for (i = 0; i < base->nactivequeues; ++i) {
   1781  1.1  christos 		if (TAILQ_FIRST(&base->activequeues[i]) != NULL) {
   1782  1.1  christos 			base->event_running_priority = i;
   1783  1.1  christos 			activeq = &base->activequeues[i];
   1784  1.1  christos 			if (i < limit_after_prio)
   1785  1.1  christos 				c = event_process_active_single_queue(base, activeq,
   1786  1.1  christos 				    INT_MAX, NULL);
   1787  1.1  christos 			else
   1788  1.1  christos 				c = event_process_active_single_queue(base, activeq,
   1789  1.1  christos 				    maxcb, endtime);
   1790  1.1  christos 			if (c < 0) {
   1791  1.1  christos 				goto done;
   1792  1.1  christos 			} else if (c > 0)
   1793  1.1  christos 				break; /* Processed a real event; do not
   1794  1.1  christos 					* consider lower-priority events */
   1795  1.1  christos 			/* If we get here, all of the events we processed
   1796  1.1  christos 			 * were internal.  Continue. */
   1797  1.1  christos 		}
   1798  1.1  christos 	}
   1799  1.1  christos 
   1800  1.1  christos done:
   1801  1.1  christos 	base->event_running_priority = -1;
   1802  1.1  christos 
   1803  1.1  christos 	return c;
   1804  1.1  christos }
   1805  1.1  christos 
   1806  1.1  christos /*
   1807  1.1  christos  * Wait continuously for events.  We exit only if no events are left.
   1808  1.1  christos  */
   1809  1.1  christos 
   1810  1.1  christos int
   1811  1.1  christos event_dispatch(void)
   1812  1.1  christos {
   1813  1.1  christos 	return (event_loop(0));
   1814  1.1  christos }
   1815  1.1  christos 
   1816  1.1  christos int
   1817  1.1  christos event_base_dispatch(struct event_base *event_base)
   1818  1.1  christos {
   1819  1.1  christos 	return (event_base_loop(event_base, 0));
   1820  1.1  christos }
   1821  1.1  christos 
   1822  1.1  christos const char *
   1823  1.1  christos event_base_get_method(const struct event_base *base)
   1824  1.1  christos {
   1825  1.1  christos 	EVUTIL_ASSERT(base);
   1826  1.1  christos 	return (base->evsel->name);
   1827  1.1  christos }
   1828  1.1  christos 
   1829  1.1  christos /** Callback: used to implement event_base_loopexit by telling the event_base
   1830  1.1  christos  * that it's time to exit its loop. */
   1831  1.1  christos static void
   1832  1.1  christos event_loopexit_cb(evutil_socket_t fd, short what, void *arg)
   1833  1.1  christos {
   1834  1.1  christos 	struct event_base *base = arg;
   1835  1.1  christos 	base->event_gotterm = 1;
   1836  1.1  christos }
   1837  1.1  christos 
   1838  1.1  christos int
   1839  1.1  christos event_loopexit(const struct timeval *tv)
   1840  1.1  christos {
   1841  1.1  christos 	return (event_once(-1, EV_TIMEOUT, event_loopexit_cb,
   1842  1.1  christos 		    current_base, tv));
   1843  1.1  christos }
   1844  1.1  christos 
   1845  1.1  christos int
   1846  1.1  christos event_base_loopexit(struct event_base *event_base, const struct timeval *tv)
   1847  1.1  christos {
   1848  1.1  christos 	return (event_base_once(event_base, -1, EV_TIMEOUT, event_loopexit_cb,
   1849  1.1  christos 		    event_base, tv));
   1850  1.1  christos }
   1851  1.1  christos 
   1852  1.1  christos int
   1853  1.1  christos event_loopbreak(void)
   1854  1.1  christos {
   1855  1.1  christos 	return (event_base_loopbreak(current_base));
   1856  1.1  christos }
   1857  1.1  christos 
   1858  1.1  christos int
   1859  1.1  christos event_base_loopbreak(struct event_base *event_base)
   1860  1.1  christos {
   1861  1.1  christos 	int r = 0;
   1862  1.1  christos 	if (event_base == NULL)
   1863  1.1  christos 		return (-1);
   1864  1.1  christos 
   1865  1.1  christos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1866  1.1  christos 	event_base->event_break = 1;
   1867  1.1  christos 
   1868  1.1  christos 	if (EVBASE_NEED_NOTIFY(event_base)) {
   1869  1.1  christos 		r = evthread_notify_base(event_base);
   1870  1.1  christos 	} else {
   1871  1.1  christos 		r = (0);
   1872  1.1  christos 	}
   1873  1.1  christos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1874  1.1  christos 	return r;
   1875  1.1  christos }
   1876  1.1  christos 
   1877  1.1  christos int
   1878  1.1  christos event_base_loopcontinue(struct event_base *event_base)
   1879  1.1  christos {
   1880  1.1  christos 	int r = 0;
   1881  1.1  christos 	if (event_base == NULL)
   1882  1.1  christos 		return (-1);
   1883  1.1  christos 
   1884  1.1  christos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1885  1.1  christos 	event_base->event_continue = 1;
   1886  1.1  christos 
   1887  1.1  christos 	if (EVBASE_NEED_NOTIFY(event_base)) {
   1888  1.1  christos 		r = evthread_notify_base(event_base);
   1889  1.1  christos 	} else {
   1890  1.1  christos 		r = (0);
   1891  1.1  christos 	}
   1892  1.1  christos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1893  1.1  christos 	return r;
   1894  1.1  christos }
   1895  1.1  christos 
   1896  1.1  christos int
   1897  1.1  christos event_base_got_break(struct event_base *event_base)
   1898  1.1  christos {
   1899  1.1  christos 	int res;
   1900  1.1  christos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1901  1.1  christos 	res = event_base->event_break;
   1902  1.1  christos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1903  1.1  christos 	return res;
   1904  1.1  christos }
   1905  1.1  christos 
   1906  1.1  christos int
   1907  1.1  christos event_base_got_exit(struct event_base *event_base)
   1908  1.1  christos {
   1909  1.1  christos 	int res;
   1910  1.1  christos 	EVBASE_ACQUIRE_LOCK(event_base, th_base_lock);
   1911  1.1  christos 	res = event_base->event_gotterm;
   1912  1.1  christos 	EVBASE_RELEASE_LOCK(event_base, th_base_lock);
   1913  1.1  christos 	return res;
   1914  1.1  christos }
   1915  1.1  christos 
   1916  1.1  christos /* not thread safe */
   1917  1.1  christos 
   1918  1.1  christos int
   1919  1.1  christos event_loop(int flags)
   1920  1.1  christos {
   1921  1.1  christos 	return event_base_loop(current_base, flags);
   1922  1.1  christos }
   1923  1.1  christos 
   1924  1.1  christos int
   1925  1.1  christos event_base_loop(struct event_base *base, int flags)
   1926  1.1  christos {
   1927  1.1  christos 	const struct eventop *evsel = base->evsel;
   1928  1.1  christos 	struct timeval tv;
   1929  1.1  christos 	struct timeval *tv_p;
   1930  1.1  christos 	int res, done, retval = 0;
   1931  1.1  christos 
   1932  1.1  christos 	/* Grab the lock.  We will release it inside evsel.dispatch, and again
   1933  1.1  christos 	 * as we invoke user callbacks. */
   1934  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   1935  1.1  christos 
   1936  1.1  christos 	if (base->running_loop) {
   1937  1.1  christos 		event_warnx("%s: reentrant invocation.  Only one event_base_loop"
   1938  1.1  christos 		    " can run on each event_base at once.", __func__);
   1939  1.1  christos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
   1940  1.1  christos 		return -1;
   1941  1.1  christos 	}
   1942  1.1  christos 
   1943  1.1  christos 	base->running_loop = 1;
   1944  1.1  christos 
   1945  1.1  christos 	clear_time_cache(base);
   1946  1.1  christos 
   1947  1.1  christos 	if (base->sig.ev_signal_added && base->sig.ev_n_signals_added)
   1948  1.1  christos 		evsig_set_base_(base);
   1949  1.1  christos 
   1950  1.1  christos 	done = 0;
   1951  1.1  christos 
   1952  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   1953  1.1  christos 	base->th_owner_id = EVTHREAD_GET_ID();
   1954  1.1  christos #endif
   1955  1.1  christos 
   1956  1.1  christos 	base->event_gotterm = base->event_break = 0;
   1957  1.1  christos 
   1958  1.1  christos 	while (!done) {
   1959  1.1  christos 		base->event_continue = 0;
   1960  1.1  christos 		base->n_deferreds_queued = 0;
   1961  1.1  christos 
   1962  1.1  christos 		/* Terminate the loop if we have been asked to */
   1963  1.1  christos 		if (base->event_gotterm) {
   1964  1.1  christos 			break;
   1965  1.1  christos 		}
   1966  1.1  christos 
   1967  1.1  christos 		if (base->event_break) {
   1968  1.1  christos 			break;
   1969  1.1  christos 		}
   1970  1.1  christos 
   1971  1.1  christos 		tv_p = &tv;
   1972  1.1  christos 		if (!N_ACTIVE_CALLBACKS(base) && !(flags & EVLOOP_NONBLOCK)) {
   1973  1.1  christos 			timeout_next(base, &tv_p);
   1974  1.1  christos 		} else {
   1975  1.1  christos 			/*
   1976  1.1  christos 			 * if we have active events, we just poll new events
   1977  1.1  christos 			 * without waiting.
   1978  1.1  christos 			 */
   1979  1.1  christos 			evutil_timerclear(&tv);
   1980  1.1  christos 		}
   1981  1.1  christos 
   1982  1.1  christos 		/* If we have no events, we just exit */
   1983  1.1  christos 		if (0==(flags&EVLOOP_NO_EXIT_ON_EMPTY) &&
   1984  1.1  christos 		    !event_haveevents(base) && !N_ACTIVE_CALLBACKS(base)) {
   1985  1.1  christos 			event_debug(("%s: no events registered.", __func__));
   1986  1.1  christos 			retval = 1;
   1987  1.1  christos 			goto done;
   1988  1.1  christos 		}
   1989  1.1  christos 
   1990  1.1  christos 		event_queue_make_later_events_active(base);
   1991  1.1  christos 
   1992  1.1  christos 		clear_time_cache(base);
   1993  1.1  christos 
   1994  1.1  christos 		res = evsel->dispatch(base, tv_p);
   1995  1.1  christos 
   1996  1.1  christos 		if (res == -1) {
   1997  1.1  christos 			event_debug(("%s: dispatch returned unsuccessfully.",
   1998  1.1  christos 				__func__));
   1999  1.1  christos 			retval = -1;
   2000  1.1  christos 			goto done;
   2001  1.1  christos 		}
   2002  1.1  christos 
   2003  1.1  christos 		update_time_cache(base);
   2004  1.1  christos 
   2005  1.1  christos 		timeout_process(base);
   2006  1.1  christos 
   2007  1.1  christos 		if (N_ACTIVE_CALLBACKS(base)) {
   2008  1.1  christos 			int n = event_process_active(base);
   2009  1.1  christos 			if ((flags & EVLOOP_ONCE)
   2010  1.1  christos 			    && N_ACTIVE_CALLBACKS(base) == 0
   2011  1.1  christos 			    && n != 0)
   2012  1.1  christos 				done = 1;
   2013  1.1  christos 		} else if (flags & EVLOOP_NONBLOCK)
   2014  1.1  christos 			done = 1;
   2015  1.1  christos 	}
   2016  1.1  christos 	event_debug(("%s: asked to terminate loop.", __func__));
   2017  1.1  christos 
   2018  1.1  christos done:
   2019  1.1  christos 	clear_time_cache(base);
   2020  1.1  christos 	base->running_loop = 0;
   2021  1.1  christos 
   2022  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2023  1.1  christos 
   2024  1.1  christos 	return (retval);
   2025  1.1  christos }
   2026  1.1  christos 
   2027  1.1  christos /* One-time callback to implement event_base_once: invokes the user callback,
   2028  1.1  christos  * then deletes the allocated storage */
   2029  1.1  christos static void
   2030  1.1  christos event_once_cb(evutil_socket_t fd, short events, void *arg)
   2031  1.1  christos {
   2032  1.1  christos 	struct event_once *eonce = arg;
   2033  1.1  christos 
   2034  1.1  christos 	(*eonce->cb)(fd, events, eonce->arg);
   2035  1.1  christos 	EVBASE_ACQUIRE_LOCK(eonce->ev.ev_base, th_base_lock);
   2036  1.1  christos 	LIST_REMOVE(eonce, next_once);
   2037  1.1  christos 	EVBASE_RELEASE_LOCK(eonce->ev.ev_base, th_base_lock);
   2038  1.1  christos 	event_debug_unassign(&eonce->ev);
   2039  1.1  christos 	mm_free(eonce);
   2040  1.1  christos }
   2041  1.1  christos 
   2042  1.1  christos /* not threadsafe, event scheduled once. */
   2043  1.1  christos int
   2044  1.1  christos event_once(evutil_socket_t fd, short events,
   2045  1.1  christos     void (*callback)(evutil_socket_t, short, void *),
   2046  1.1  christos     void *arg, const struct timeval *tv)
   2047  1.1  christos {
   2048  1.1  christos 	return event_base_once(current_base, fd, events, callback, arg, tv);
   2049  1.1  christos }
   2050  1.1  christos 
   2051  1.1  christos /* Schedules an event once */
   2052  1.1  christos int
   2053  1.1  christos event_base_once(struct event_base *base, evutil_socket_t fd, short events,
   2054  1.1  christos     void (*callback)(evutil_socket_t, short, void *),
   2055  1.1  christos     void *arg, const struct timeval *tv)
   2056  1.1  christos {
   2057  1.1  christos 	struct event_once *eonce;
   2058  1.1  christos 	int res = 0;
   2059  1.1  christos 	int activate = 0;
   2060  1.1  christos 
   2061  1.7  christos 	if (!base)
   2062  1.7  christos 		return (-1);
   2063  1.7  christos 
   2064  1.1  christos 	/* We cannot support signals that just fire once, or persistent
   2065  1.1  christos 	 * events. */
   2066  1.1  christos 	if (events & (EV_SIGNAL|EV_PERSIST))
   2067  1.1  christos 		return (-1);
   2068  1.1  christos 
   2069  1.1  christos 	if ((eonce = mm_calloc(1, sizeof(struct event_once))) == NULL)
   2070  1.1  christos 		return (-1);
   2071  1.1  christos 
   2072  1.1  christos 	eonce->cb = callback;
   2073  1.1  christos 	eonce->arg = arg;
   2074  1.1  christos 
   2075  1.2  christos 	if ((events & (EV_TIMEOUT|EV_SIGNAL|EV_READ|EV_WRITE|EV_CLOSED)) == EV_TIMEOUT) {
   2076  1.1  christos 		evtimer_assign(&eonce->ev, base, event_once_cb, eonce);
   2077  1.1  christos 
   2078  1.1  christos 		if (tv == NULL || ! evutil_timerisset(tv)) {
   2079  1.1  christos 			/* If the event is going to become active immediately,
   2080  1.1  christos 			 * don't put it on the timeout queue.  This is one
   2081  1.1  christos 			 * idiom for scheduling a callback, so let's make
   2082  1.1  christos 			 * it fast (and order-preserving). */
   2083  1.1  christos 			activate = 1;
   2084  1.1  christos 		}
   2085  1.2  christos 	} else if (events & (EV_READ|EV_WRITE|EV_CLOSED)) {
   2086  1.2  christos 		events &= EV_READ|EV_WRITE|EV_CLOSED;
   2087  1.1  christos 
   2088  1.1  christos 		event_assign(&eonce->ev, base, fd, events, event_once_cb, eonce);
   2089  1.1  christos 	} else {
   2090  1.1  christos 		/* Bad event combination */
   2091  1.1  christos 		mm_free(eonce);
   2092  1.1  christos 		return (-1);
   2093  1.1  christos 	}
   2094  1.1  christos 
   2095  1.1  christos 	if (res == 0) {
   2096  1.1  christos 		EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2097  1.1  christos 		if (activate)
   2098  1.1  christos 			event_active_nolock_(&eonce->ev, EV_TIMEOUT, 1);
   2099  1.1  christos 		else
   2100  1.1  christos 			res = event_add_nolock_(&eonce->ev, tv, 0);
   2101  1.1  christos 
   2102  1.1  christos 		if (res != 0) {
   2103  1.1  christos 			mm_free(eonce);
   2104  1.1  christos 			return (res);
   2105  1.1  christos 		} else {
   2106  1.1  christos 			LIST_INSERT_HEAD(&base->once_events, eonce, next_once);
   2107  1.1  christos 		}
   2108  1.1  christos 		EVBASE_RELEASE_LOCK(base, th_base_lock);
   2109  1.1  christos 	}
   2110  1.1  christos 
   2111  1.1  christos 	return (0);
   2112  1.1  christos }
   2113  1.1  christos 
   2114  1.1  christos int
   2115  1.1  christos event_assign(struct event *ev, struct event_base *base, evutil_socket_t fd, short events, void (*callback)(evutil_socket_t, short, void *), void *arg)
   2116  1.1  christos {
   2117  1.1  christos 	if (!base)
   2118  1.1  christos 		base = current_base;
   2119  1.1  christos 	if (arg == &event_self_cbarg_ptr_)
   2120  1.1  christos 		arg = ev;
   2121  1.1  christos 
   2122  1.7  christos 	if (!(events & EV_SIGNAL))
   2123  1.7  christos 		event_debug_assert_socket_nonblocking_(fd);
   2124  1.1  christos 	event_debug_assert_not_added_(ev);
   2125  1.1  christos 
   2126  1.1  christos 	ev->ev_base = base;
   2127  1.1  christos 
   2128  1.1  christos 	ev->ev_callback = callback;
   2129  1.1  christos 	ev->ev_arg = arg;
   2130  1.1  christos 	ev->ev_fd = fd;
   2131  1.1  christos 	ev->ev_events = events;
   2132  1.1  christos 	ev->ev_res = 0;
   2133  1.1  christos 	ev->ev_flags = EVLIST_INIT;
   2134  1.1  christos 	ev->ev_ncalls = 0;
   2135  1.1  christos 	ev->ev_pncalls = NULL;
   2136  1.1  christos 
   2137  1.1  christos 	if (events & EV_SIGNAL) {
   2138  1.2  christos 		if ((events & (EV_READ|EV_WRITE|EV_CLOSED)) != 0) {
   2139  1.1  christos 			event_warnx("%s: EV_SIGNAL is not compatible with "
   2140  1.2  christos 			    "EV_READ, EV_WRITE or EV_CLOSED", __func__);
   2141  1.1  christos 			return -1;
   2142  1.1  christos 		}
   2143  1.1  christos 		ev->ev_closure = EV_CLOSURE_EVENT_SIGNAL;
   2144  1.1  christos 	} else {
   2145  1.1  christos 		if (events & EV_PERSIST) {
   2146  1.1  christos 			evutil_timerclear(&ev->ev_io_timeout);
   2147  1.1  christos 			ev->ev_closure = EV_CLOSURE_EVENT_PERSIST;
   2148  1.1  christos 		} else {
   2149  1.1  christos 			ev->ev_closure = EV_CLOSURE_EVENT;
   2150  1.1  christos 		}
   2151  1.1  christos 	}
   2152  1.1  christos 
   2153  1.1  christos 	min_heap_elem_init_(ev);
   2154  1.1  christos 
   2155  1.1  christos 	if (base != NULL) {
   2156  1.1  christos 		/* by default, we put new events into the middle priority */
   2157  1.1  christos 		ev->ev_pri = base->nactivequeues / 2;
   2158  1.1  christos 	}
   2159  1.1  christos 
   2160  1.1  christos 	event_debug_note_setup_(ev);
   2161  1.1  christos 
   2162  1.1  christos 	return 0;
   2163  1.1  christos }
   2164  1.1  christos 
   2165  1.1  christos int
   2166  1.1  christos event_base_set(struct event_base *base, struct event *ev)
   2167  1.1  christos {
   2168  1.1  christos 	/* Only innocent events may be assigned to a different base */
   2169  1.1  christos 	if (ev->ev_flags != EVLIST_INIT)
   2170  1.1  christos 		return (-1);
   2171  1.1  christos 
   2172  1.1  christos 	event_debug_assert_is_setup_(ev);
   2173  1.1  christos 
   2174  1.1  christos 	ev->ev_base = base;
   2175  1.1  christos 	ev->ev_pri = base->nactivequeues/2;
   2176  1.1  christos 
   2177  1.1  christos 	return (0);
   2178  1.1  christos }
   2179  1.1  christos 
   2180  1.1  christos void
   2181  1.1  christos event_set(struct event *ev, evutil_socket_t fd, short events,
   2182  1.1  christos 	  void (*callback)(evutil_socket_t, short, void *), void *arg)
   2183  1.1  christos {
   2184  1.1  christos 	int r;
   2185  1.1  christos 	r = event_assign(ev, current_base, fd, events, callback, arg);
   2186  1.1  christos 	EVUTIL_ASSERT(r == 0);
   2187  1.1  christos }
   2188  1.1  christos 
   2189  1.1  christos void *
   2190  1.1  christos event_self_cbarg(void)
   2191  1.1  christos {
   2192  1.1  christos 	return &event_self_cbarg_ptr_;
   2193  1.1  christos }
   2194  1.1  christos 
   2195  1.1  christos struct event *
   2196  1.1  christos event_base_get_running_event(struct event_base *base)
   2197  1.1  christos {
   2198  1.1  christos 	struct event *ev = NULL;
   2199  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2200  1.1  christos 	if (EVBASE_IN_THREAD(base)) {
   2201  1.1  christos 		struct event_callback *evcb = base->current_event;
   2202  1.1  christos 		if (evcb->evcb_flags & EVLIST_INIT)
   2203  1.1  christos 			ev = event_callback_to_event(evcb);
   2204  1.1  christos 	}
   2205  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2206  1.1  christos 	return ev;
   2207  1.1  christos }
   2208  1.1  christos 
   2209  1.1  christos struct event *
   2210  1.1  christos event_new(struct event_base *base, evutil_socket_t fd, short events, void (*cb)(evutil_socket_t, short, void *), void *arg)
   2211  1.1  christos {
   2212  1.1  christos 	struct event *ev;
   2213  1.1  christos 	ev = mm_malloc(sizeof(struct event));
   2214  1.1  christos 	if (ev == NULL)
   2215  1.1  christos 		return (NULL);
   2216  1.1  christos 	if (event_assign(ev, base, fd, events, cb, arg) < 0) {
   2217  1.1  christos 		mm_free(ev);
   2218  1.1  christos 		return (NULL);
   2219  1.1  christos 	}
   2220  1.1  christos 
   2221  1.1  christos 	return (ev);
   2222  1.1  christos }
   2223  1.1  christos 
   2224  1.1  christos void
   2225  1.1  christos event_free(struct event *ev)
   2226  1.1  christos {
   2227  1.2  christos 	/* This is disabled, so that events which have been finalized be a
   2228  1.2  christos 	 * valid target for event_free(). That's */
   2229  1.2  christos 	// event_debug_assert_is_setup_(ev);
   2230  1.1  christos 
   2231  1.1  christos 	/* make sure that this event won't be coming back to haunt us. */
   2232  1.1  christos 	event_del(ev);
   2233  1.1  christos 	event_debug_note_teardown_(ev);
   2234  1.1  christos 	mm_free(ev);
   2235  1.1  christos 
   2236  1.1  christos }
   2237  1.1  christos 
   2238  1.1  christos void
   2239  1.1  christos event_debug_unassign(struct event *ev)
   2240  1.1  christos {
   2241  1.1  christos 	event_debug_assert_not_added_(ev);
   2242  1.1  christos 	event_debug_note_teardown_(ev);
   2243  1.1  christos 
   2244  1.1  christos 	ev->ev_flags &= ~EVLIST_INIT;
   2245  1.1  christos }
   2246  1.1  christos 
   2247  1.2  christos #define EVENT_FINALIZE_FREE_ 0x10000
   2248  1.2  christos static int
   2249  1.2  christos event_finalize_nolock_(struct event_base *base, unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2250  1.2  christos {
   2251  1.2  christos 	ev_uint8_t closure = (flags & EVENT_FINALIZE_FREE_) ?
   2252  1.2  christos 	    EV_CLOSURE_EVENT_FINALIZE_FREE : EV_CLOSURE_EVENT_FINALIZE;
   2253  1.2  christos 
   2254  1.2  christos 	event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   2255  1.2  christos 	ev->ev_closure = closure;
   2256  1.2  christos 	ev->ev_evcallback.evcb_cb_union.evcb_evfinalize = cb;
   2257  1.2  christos 	event_active_nolock_(ev, EV_FINALIZE, 1);
   2258  1.2  christos 	ev->ev_flags |= EVLIST_FINALIZING;
   2259  1.2  christos 	return 0;
   2260  1.2  christos }
   2261  1.2  christos 
   2262  1.2  christos static int
   2263  1.2  christos event_finalize_impl_(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2264  1.2  christos {
   2265  1.2  christos 	int r;
   2266  1.2  christos 	struct event_base *base = ev->ev_base;
   2267  1.2  christos 	if (EVUTIL_FAILURE_CHECK(!base)) {
   2268  1.2  christos 		event_warnx("%s: event has no event_base set.", __func__);
   2269  1.2  christos 		return -1;
   2270  1.2  christos 	}
   2271  1.2  christos 
   2272  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2273  1.2  christos 	r = event_finalize_nolock_(base, flags, ev, cb);
   2274  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2275  1.2  christos 	return r;
   2276  1.2  christos }
   2277  1.2  christos 
   2278  1.2  christos int
   2279  1.2  christos event_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2280  1.2  christos {
   2281  1.2  christos 	return event_finalize_impl_(flags, ev, cb);
   2282  1.2  christos }
   2283  1.2  christos 
   2284  1.2  christos int
   2285  1.2  christos event_free_finalize(unsigned flags, struct event *ev, event_finalize_callback_fn cb)
   2286  1.2  christos {
   2287  1.2  christos 	return event_finalize_impl_(flags|EVENT_FINALIZE_FREE_, ev, cb);
   2288  1.2  christos }
   2289  1.2  christos 
   2290  1.2  christos void
   2291  1.2  christos event_callback_finalize_nolock_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
   2292  1.2  christos {
   2293  1.2  christos 	struct event *ev = NULL;
   2294  1.2  christos 	if (evcb->evcb_flags & EVLIST_INIT) {
   2295  1.2  christos 		ev = event_callback_to_event(evcb);
   2296  1.2  christos 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   2297  1.2  christos 	} else {
   2298  1.2  christos 		event_callback_cancel_nolock_(base, evcb, 0); /*XXX can this fail?*/
   2299  1.2  christos 	}
   2300  1.2  christos 
   2301  1.2  christos 	evcb->evcb_closure = EV_CLOSURE_CB_FINALIZE;
   2302  1.2  christos 	evcb->evcb_cb_union.evcb_cbfinalize = cb;
   2303  1.2  christos 	event_callback_activate_nolock_(base, evcb); /* XXX can this really fail?*/
   2304  1.2  christos 	evcb->evcb_flags |= EVLIST_FINALIZING;
   2305  1.2  christos }
   2306  1.2  christos 
   2307  1.2  christos void
   2308  1.2  christos event_callback_finalize_(struct event_base *base, unsigned flags, struct event_callback *evcb, void (*cb)(struct event_callback *, void *))
   2309  1.2  christos {
   2310  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2311  1.2  christos 	event_callback_finalize_nolock_(base, flags, evcb, cb);
   2312  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2313  1.2  christos }
   2314  1.2  christos 
   2315  1.2  christos /** Internal: Finalize all of the n_cbs callbacks in evcbs.  The provided
   2316  1.2  christos  * callback will be invoked on *one of them*, after they have *all* been
   2317  1.2  christos  * finalized. */
   2318  1.2  christos int
   2319  1.2  christos event_callback_finalize_many_(struct event_base *base, int n_cbs, struct event_callback **evcbs, void (*cb)(struct event_callback *, void *))
   2320  1.2  christos {
   2321  1.2  christos 	int n_pending = 0, i;
   2322  1.2  christos 
   2323  1.2  christos 	if (base == NULL)
   2324  1.2  christos 		base = current_base;
   2325  1.2  christos 
   2326  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2327  1.2  christos 
   2328  1.2  christos 	event_debug(("%s: %d events finalizing", __func__, n_cbs));
   2329  1.2  christos 
   2330  1.2  christos 	/* At most one can be currently executing; the rest we just
   2331  1.2  christos 	 * cancel... But we always make sure that the finalize callback
   2332  1.2  christos 	 * runs. */
   2333  1.2  christos 	for (i = 0; i < n_cbs; ++i) {
   2334  1.2  christos 		struct event_callback *evcb = evcbs[i];
   2335  1.2  christos 		if (evcb == base->current_event) {
   2336  1.2  christos 			event_callback_finalize_nolock_(base, 0, evcb, cb);
   2337  1.2  christos 			++n_pending;
   2338  1.2  christos 		} else {
   2339  1.2  christos 			event_callback_cancel_nolock_(base, evcb, 0);
   2340  1.2  christos 		}
   2341  1.2  christos 	}
   2342  1.2  christos 
   2343  1.2  christos 	if (n_pending == 0) {
   2344  1.2  christos 		/* Just do the first one. */
   2345  1.2  christos 		event_callback_finalize_nolock_(base, 0, evcbs[0], cb);
   2346  1.2  christos 	}
   2347  1.2  christos 
   2348  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2349  1.2  christos 	return 0;
   2350  1.2  christos }
   2351  1.2  christos 
   2352  1.1  christos /*
   2353  1.1  christos  * Set's the priority of an event - if an event is already scheduled
   2354  1.1  christos  * changing the priority is going to fail.
   2355  1.1  christos  */
   2356  1.1  christos 
   2357  1.1  christos int
   2358  1.1  christos event_priority_set(struct event *ev, int pri)
   2359  1.1  christos {
   2360  1.1  christos 	event_debug_assert_is_setup_(ev);
   2361  1.1  christos 
   2362  1.1  christos 	if (ev->ev_flags & EVLIST_ACTIVE)
   2363  1.1  christos 		return (-1);
   2364  1.1  christos 	if (pri < 0 || pri >= ev->ev_base->nactivequeues)
   2365  1.1  christos 		return (-1);
   2366  1.1  christos 
   2367  1.1  christos 	ev->ev_pri = pri;
   2368  1.1  christos 
   2369  1.1  christos 	return (0);
   2370  1.1  christos }
   2371  1.1  christos 
   2372  1.1  christos /*
   2373  1.1  christos  * Checks if a specific event is pending or scheduled.
   2374  1.1  christos  */
   2375  1.1  christos 
   2376  1.1  christos int
   2377  1.1  christos event_pending(const struct event *ev, short event, struct timeval *tv)
   2378  1.1  christos {
   2379  1.1  christos 	int flags = 0;
   2380  1.1  christos 
   2381  1.1  christos 	if (EVUTIL_FAILURE_CHECK(ev->ev_base == NULL)) {
   2382  1.1  christos 		event_warnx("%s: event has no event_base set.", __func__);
   2383  1.1  christos 		return 0;
   2384  1.1  christos 	}
   2385  1.1  christos 
   2386  1.1  christos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2387  1.1  christos 	event_debug_assert_is_setup_(ev);
   2388  1.1  christos 
   2389  1.1  christos 	if (ev->ev_flags & EVLIST_INSERTED)
   2390  1.2  christos 		flags |= (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL));
   2391  1.1  christos 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
   2392  1.1  christos 		flags |= ev->ev_res;
   2393  1.1  christos 	if (ev->ev_flags & EVLIST_TIMEOUT)
   2394  1.1  christos 		flags |= EV_TIMEOUT;
   2395  1.1  christos 
   2396  1.2  christos 	event &= (EV_TIMEOUT|EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL);
   2397  1.1  christos 
   2398  1.1  christos 	/* See if there is a timeout that we should report */
   2399  1.1  christos 	if (tv != NULL && (flags & event & EV_TIMEOUT)) {
   2400  1.1  christos 		struct timeval tmp = ev->ev_timeout;
   2401  1.1  christos 		tmp.tv_usec &= MICROSECONDS_MASK;
   2402  1.1  christos 		/* correctly remamp to real time */
   2403  1.1  christos 		evutil_timeradd(&ev->ev_base->tv_clock_diff, &tmp, tv);
   2404  1.1  christos 	}
   2405  1.1  christos 
   2406  1.1  christos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2407  1.1  christos 
   2408  1.1  christos 	return (flags & event);
   2409  1.1  christos }
   2410  1.1  christos 
   2411  1.1  christos int
   2412  1.1  christos event_initialized(const struct event *ev)
   2413  1.1  christos {
   2414  1.1  christos 	if (!(ev->ev_flags & EVLIST_INIT))
   2415  1.1  christos 		return 0;
   2416  1.1  christos 
   2417  1.1  christos 	return 1;
   2418  1.1  christos }
   2419  1.1  christos 
   2420  1.1  christos void
   2421  1.1  christos event_get_assignment(const struct event *event, struct event_base **base_out, evutil_socket_t *fd_out, short *events_out, event_callback_fn *callback_out, void **arg_out)
   2422  1.1  christos {
   2423  1.1  christos 	event_debug_assert_is_setup_(event);
   2424  1.1  christos 
   2425  1.1  christos 	if (base_out)
   2426  1.1  christos 		*base_out = event->ev_base;
   2427  1.1  christos 	if (fd_out)
   2428  1.1  christos 		*fd_out = event->ev_fd;
   2429  1.1  christos 	if (events_out)
   2430  1.1  christos 		*events_out = event->ev_events;
   2431  1.1  christos 	if (callback_out)
   2432  1.1  christos 		*callback_out = event->ev_callback;
   2433  1.1  christos 	if (arg_out)
   2434  1.1  christos 		*arg_out = event->ev_arg;
   2435  1.1  christos }
   2436  1.1  christos 
   2437  1.1  christos size_t
   2438  1.1  christos event_get_struct_event_size(void)
   2439  1.1  christos {
   2440  1.1  christos 	return sizeof(struct event);
   2441  1.1  christos }
   2442  1.1  christos 
   2443  1.1  christos evutil_socket_t
   2444  1.1  christos event_get_fd(const struct event *ev)
   2445  1.1  christos {
   2446  1.1  christos 	event_debug_assert_is_setup_(ev);
   2447  1.1  christos 	return ev->ev_fd;
   2448  1.1  christos }
   2449  1.1  christos 
   2450  1.1  christos struct event_base *
   2451  1.1  christos event_get_base(const struct event *ev)
   2452  1.1  christos {
   2453  1.1  christos 	event_debug_assert_is_setup_(ev);
   2454  1.1  christos 	return ev->ev_base;
   2455  1.1  christos }
   2456  1.1  christos 
   2457  1.1  christos short
   2458  1.1  christos event_get_events(const struct event *ev)
   2459  1.1  christos {
   2460  1.1  christos 	event_debug_assert_is_setup_(ev);
   2461  1.1  christos 	return ev->ev_events;
   2462  1.1  christos }
   2463  1.1  christos 
   2464  1.1  christos event_callback_fn
   2465  1.1  christos event_get_callback(const struct event *ev)
   2466  1.1  christos {
   2467  1.1  christos 	event_debug_assert_is_setup_(ev);
   2468  1.1  christos 	return ev->ev_callback;
   2469  1.1  christos }
   2470  1.1  christos 
   2471  1.1  christos void *
   2472  1.1  christos event_get_callback_arg(const struct event *ev)
   2473  1.1  christos {
   2474  1.1  christos 	event_debug_assert_is_setup_(ev);
   2475  1.1  christos 	return ev->ev_arg;
   2476  1.1  christos }
   2477  1.1  christos 
   2478  1.1  christos int
   2479  1.1  christos event_get_priority(const struct event *ev)
   2480  1.1  christos {
   2481  1.1  christos 	event_debug_assert_is_setup_(ev);
   2482  1.1  christos 	return ev->ev_pri;
   2483  1.1  christos }
   2484  1.1  christos 
   2485  1.1  christos int
   2486  1.1  christos event_add(struct event *ev, const struct timeval *tv)
   2487  1.1  christos {
   2488  1.1  christos 	int res;
   2489  1.1  christos 
   2490  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2491  1.1  christos 		event_warnx("%s: event has no event_base set.", __func__);
   2492  1.1  christos 		return -1;
   2493  1.1  christos 	}
   2494  1.1  christos 
   2495  1.1  christos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2496  1.1  christos 
   2497  1.1  christos 	res = event_add_nolock_(ev, tv, 0);
   2498  1.1  christos 
   2499  1.1  christos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2500  1.1  christos 
   2501  1.1  christos 	return (res);
   2502  1.1  christos }
   2503  1.1  christos 
   2504  1.1  christos /* Helper callback: wake an event_base from another thread.  This version
   2505  1.1  christos  * works by writing a byte to one end of a socketpair, so that the event_base
   2506  1.1  christos  * listening on the other end will wake up as the corresponding event
   2507  1.1  christos  * triggers */
   2508  1.1  christos static int
   2509  1.1  christos evthread_notify_base_default(struct event_base *base)
   2510  1.1  christos {
   2511  1.1  christos 	char buf[1];
   2512  1.1  christos 	int r;
   2513  1.1  christos 	buf[0] = (char) 0;
   2514  1.1  christos #ifdef _WIN32
   2515  1.1  christos 	r = send(base->th_notify_fd[1], buf, 1, 0);
   2516  1.1  christos #else
   2517  1.1  christos 	r = write(base->th_notify_fd[1], buf, 1);
   2518  1.1  christos #endif
   2519  1.1  christos 	return (r < 0 && ! EVUTIL_ERR_IS_EAGAIN(errno)) ? -1 : 0;
   2520  1.1  christos }
   2521  1.1  christos 
   2522  1.1  christos #ifdef EVENT__HAVE_EVENTFD
   2523  1.1  christos /* Helper callback: wake an event_base from another thread.  This version
   2524  1.1  christos  * assumes that you have a working eventfd() implementation. */
   2525  1.1  christos static int
   2526  1.1  christos evthread_notify_base_eventfd(struct event_base *base)
   2527  1.1  christos {
   2528  1.1  christos 	ev_uint64_t msg = 1;
   2529  1.1  christos 	int r;
   2530  1.1  christos 	do {
   2531  1.1  christos 		r = write(base->th_notify_fd[0], (void*) &msg, sizeof(msg));
   2532  1.1  christos 	} while (r < 0 && errno == EAGAIN);
   2533  1.1  christos 
   2534  1.1  christos 	return (r < 0) ? -1 : 0;
   2535  1.1  christos }
   2536  1.1  christos #endif
   2537  1.1  christos 
   2538  1.1  christos 
   2539  1.1  christos /** Tell the thread currently running the event_loop for base (if any) that it
   2540  1.1  christos  * needs to stop waiting in its dispatch function (if it is) and process all
   2541  1.1  christos  * active callbacks. */
   2542  1.1  christos static int
   2543  1.1  christos evthread_notify_base(struct event_base *base)
   2544  1.1  christos {
   2545  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   2546  1.1  christos 	if (!base->th_notify_fn)
   2547  1.1  christos 		return -1;
   2548  1.1  christos 	if (base->is_notify_pending)
   2549  1.1  christos 		return 0;
   2550  1.1  christos 	base->is_notify_pending = 1;
   2551  1.1  christos 	return base->th_notify_fn(base);
   2552  1.1  christos }
   2553  1.1  christos 
   2554  1.1  christos /* Implementation function to remove a timeout on a currently pending event.
   2555  1.1  christos  */
   2556  1.1  christos int
   2557  1.1  christos event_remove_timer_nolock_(struct event *ev)
   2558  1.1  christos {
   2559  1.1  christos 	struct event_base *base = ev->ev_base;
   2560  1.1  christos 
   2561  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   2562  1.1  christos 	event_debug_assert_is_setup_(ev);
   2563  1.1  christos 
   2564  1.1  christos 	event_debug(("event_remove_timer_nolock: event: %p", ev));
   2565  1.1  christos 
   2566  1.1  christos 	/* If it's not pending on a timeout, we don't need to do anything. */
   2567  1.1  christos 	if (ev->ev_flags & EVLIST_TIMEOUT) {
   2568  1.1  christos 		event_queue_remove_timeout(base, ev);
   2569  1.1  christos 		evutil_timerclear(&ev->ev_.ev_io.ev_timeout);
   2570  1.1  christos 	}
   2571  1.1  christos 
   2572  1.1  christos 	return (0);
   2573  1.1  christos }
   2574  1.1  christos 
   2575  1.1  christos int
   2576  1.1  christos event_remove_timer(struct event *ev)
   2577  1.1  christos {
   2578  1.1  christos 	int res;
   2579  1.1  christos 
   2580  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2581  1.1  christos 		event_warnx("%s: event has no event_base set.", __func__);
   2582  1.1  christos 		return -1;
   2583  1.1  christos 	}
   2584  1.1  christos 
   2585  1.1  christos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2586  1.1  christos 
   2587  1.1  christos 	res = event_remove_timer_nolock_(ev);
   2588  1.1  christos 
   2589  1.1  christos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2590  1.1  christos 
   2591  1.1  christos 	return (res);
   2592  1.1  christos }
   2593  1.1  christos 
   2594  1.1  christos /* Implementation function to add an event.  Works just like event_add,
   2595  1.1  christos  * except: 1) it requires that we have the lock.  2) if tv_is_absolute is set,
   2596  1.1  christos  * we treat tv as an absolute time, not as an interval to add to the current
   2597  1.1  christos  * time */
   2598  1.1  christos int
   2599  1.1  christos event_add_nolock_(struct event *ev, const struct timeval *tv,
   2600  1.1  christos     int tv_is_absolute)
   2601  1.1  christos {
   2602  1.1  christos 	struct event_base *base = ev->ev_base;
   2603  1.1  christos 	int res = 0;
   2604  1.1  christos 	int notify = 0;
   2605  1.1  christos 
   2606  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   2607  1.1  christos 	event_debug_assert_is_setup_(ev);
   2608  1.1  christos 
   2609  1.1  christos 	event_debug((
   2610  1.2  christos 		 "event_add: event: %p (fd "EV_SOCK_FMT"), %s%s%s%scall %p",
   2611  1.1  christos 		 ev,
   2612  1.1  christos 		 EV_SOCK_ARG(ev->ev_fd),
   2613  1.1  christos 		 ev->ev_events & EV_READ ? "EV_READ " : " ",
   2614  1.1  christos 		 ev->ev_events & EV_WRITE ? "EV_WRITE " : " ",
   2615  1.2  christos 		 ev->ev_events & EV_CLOSED ? "EV_CLOSED " : " ",
   2616  1.1  christos 		 tv ? "EV_TIMEOUT " : " ",
   2617  1.1  christos 		 ev->ev_callback));
   2618  1.1  christos 
   2619  1.1  christos 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
   2620  1.1  christos 
   2621  1.2  christos 	if (ev->ev_flags & EVLIST_FINALIZING) {
   2622  1.2  christos 		/* XXXX debug */
   2623  1.2  christos 		return (-1);
   2624  1.2  christos 	}
   2625  1.2  christos 
   2626  1.1  christos 	/*
   2627  1.1  christos 	 * prepare for timeout insertion further below, if we get a
   2628  1.1  christos 	 * failure on any step, we should not change any state.
   2629  1.1  christos 	 */
   2630  1.1  christos 	if (tv != NULL && !(ev->ev_flags & EVLIST_TIMEOUT)) {
   2631  1.1  christos 		if (min_heap_reserve_(&base->timeheap,
   2632  1.1  christos 			1 + min_heap_size_(&base->timeheap)) == -1)
   2633  1.1  christos 			return (-1);  /* ENOMEM == errno */
   2634  1.1  christos 	}
   2635  1.1  christos 
   2636  1.1  christos 	/* If the main thread is currently executing a signal event's
   2637  1.1  christos 	 * callback, and we are not the main thread, then we want to wait
   2638  1.1  christos 	 * until the callback is done before we mess with the event, or else
   2639  1.1  christos 	 * we can race on ev_ncalls and ev_pncalls below. */
   2640  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   2641  1.1  christos 	if (base->current_event == event_to_event_callback(ev) &&
   2642  1.1  christos 	    (ev->ev_events & EV_SIGNAL)
   2643  1.1  christos 	    && !EVBASE_IN_THREAD(base)) {
   2644  1.1  christos 		++base->current_event_waiters;
   2645  1.1  christos 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
   2646  1.1  christos 	}
   2647  1.1  christos #endif
   2648  1.1  christos 
   2649  1.2  christos 	if ((ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED|EV_SIGNAL)) &&
   2650  1.1  christos 	    !(ev->ev_flags & (EVLIST_INSERTED|EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
   2651  1.2  christos 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
   2652  1.1  christos 			res = evmap_io_add_(base, ev->ev_fd, ev);
   2653  1.1  christos 		else if (ev->ev_events & EV_SIGNAL)
   2654  1.1  christos 			res = evmap_signal_add_(base, (int)ev->ev_fd, ev);
   2655  1.1  christos 		if (res != -1)
   2656  1.1  christos 			event_queue_insert_inserted(base, ev);
   2657  1.1  christos 		if (res == 1) {
   2658  1.1  christos 			/* evmap says we need to notify the main thread. */
   2659  1.1  christos 			notify = 1;
   2660  1.1  christos 			res = 0;
   2661  1.1  christos 		}
   2662  1.1  christos 	}
   2663  1.1  christos 
   2664  1.1  christos 	/*
   2665  1.1  christos 	 * we should change the timeout state only if the previous event
   2666  1.1  christos 	 * addition succeeded.
   2667  1.1  christos 	 */
   2668  1.1  christos 	if (res != -1 && tv != NULL) {
   2669  1.1  christos 		struct timeval now;
   2670  1.1  christos 		int common_timeout;
   2671  1.1  christos #ifdef USE_REINSERT_TIMEOUT
   2672  1.1  christos 		int was_common;
   2673  1.1  christos 		int old_timeout_idx;
   2674  1.1  christos #endif
   2675  1.1  christos 
   2676  1.1  christos 		/*
   2677  1.1  christos 		 * for persistent timeout events, we remember the
   2678  1.1  christos 		 * timeout value and re-add the event.
   2679  1.1  christos 		 *
   2680  1.1  christos 		 * If tv_is_absolute, this was already set.
   2681  1.1  christos 		 */
   2682  1.1  christos 		if (ev->ev_closure == EV_CLOSURE_EVENT_PERSIST && !tv_is_absolute)
   2683  1.1  christos 			ev->ev_io_timeout = *tv;
   2684  1.1  christos 
   2685  1.1  christos #ifndef USE_REINSERT_TIMEOUT
   2686  1.1  christos 		if (ev->ev_flags & EVLIST_TIMEOUT) {
   2687  1.1  christos 			event_queue_remove_timeout(base, ev);
   2688  1.1  christos 		}
   2689  1.1  christos #endif
   2690  1.1  christos 
   2691  1.1  christos 		/* Check if it is active due to a timeout.  Rescheduling
   2692  1.1  christos 		 * this timeout before the callback can be executed
   2693  1.1  christos 		 * removes it from the active list. */
   2694  1.1  christos 		if ((ev->ev_flags & EVLIST_ACTIVE) &&
   2695  1.1  christos 		    (ev->ev_res & EV_TIMEOUT)) {
   2696  1.1  christos 			if (ev->ev_events & EV_SIGNAL) {
   2697  1.1  christos 				/* See if we are just active executing
   2698  1.1  christos 				 * this event in a loop
   2699  1.1  christos 				 */
   2700  1.1  christos 				if (ev->ev_ncalls && ev->ev_pncalls) {
   2701  1.1  christos 					/* Abort loop */
   2702  1.1  christos 					*ev->ev_pncalls = 0;
   2703  1.1  christos 				}
   2704  1.1  christos 			}
   2705  1.1  christos 
   2706  1.1  christos 			event_queue_remove_active(base, event_to_event_callback(ev));
   2707  1.1  christos 		}
   2708  1.1  christos 
   2709  1.1  christos 		gettime(base, &now);
   2710  1.1  christos 
   2711  1.1  christos 		common_timeout = is_common_timeout(tv, base);
   2712  1.1  christos #ifdef USE_REINSERT_TIMEOUT
   2713  1.1  christos 		was_common = is_common_timeout(&ev->ev_timeout, base);
   2714  1.1  christos 		old_timeout_idx = COMMON_TIMEOUT_IDX(&ev->ev_timeout);
   2715  1.1  christos #endif
   2716  1.1  christos 
   2717  1.1  christos 		if (tv_is_absolute) {
   2718  1.1  christos 			ev->ev_timeout = *tv;
   2719  1.1  christos 		} else if (common_timeout) {
   2720  1.1  christos 			struct timeval tmp = *tv;
   2721  1.1  christos 			tmp.tv_usec &= MICROSECONDS_MASK;
   2722  1.1  christos 			evutil_timeradd(&now, &tmp, &ev->ev_timeout);
   2723  1.1  christos 			ev->ev_timeout.tv_usec |=
   2724  1.1  christos 			    (tv->tv_usec & ~MICROSECONDS_MASK);
   2725  1.1  christos 		} else {
   2726  1.1  christos 			evutil_timeradd(&now, tv, &ev->ev_timeout);
   2727  1.1  christos 		}
   2728  1.1  christos 
   2729  1.1  christos 		event_debug((
   2730  1.1  christos 			 "event_add: event %p, timeout in %d seconds %d useconds, call %p",
   2731  1.1  christos 			 ev, (int)tv->tv_sec, (int)tv->tv_usec, ev->ev_callback));
   2732  1.1  christos 
   2733  1.1  christos #ifdef USE_REINSERT_TIMEOUT
   2734  1.1  christos 		event_queue_reinsert_timeout(base, ev, was_common, common_timeout, old_timeout_idx);
   2735  1.1  christos #else
   2736  1.1  christos 		event_queue_insert_timeout(base, ev);
   2737  1.1  christos #endif
   2738  1.1  christos 
   2739  1.1  christos 		if (common_timeout) {
   2740  1.1  christos 			struct common_timeout_list *ctl =
   2741  1.1  christos 			    get_common_timeout_list(base, &ev->ev_timeout);
   2742  1.1  christos 			if (ev == TAILQ_FIRST(&ctl->events)) {
   2743  1.1  christos 				common_timeout_schedule(ctl, &now, ev);
   2744  1.1  christos 			}
   2745  1.1  christos 		} else {
   2746  1.1  christos 			struct event* top = NULL;
   2747  1.1  christos 			/* See if the earliest timeout is now earlier than it
   2748  1.1  christos 			 * was before: if so, we will need to tell the main
   2749  1.1  christos 			 * thread to wake up earlier than it would otherwise.
   2750  1.1  christos 			 * We double check the timeout of the top element to
   2751  1.1  christos 			 * handle time distortions due to system suspension.
   2752  1.1  christos 			 */
   2753  1.1  christos 			if (min_heap_elt_is_top_(ev))
   2754  1.1  christos 				notify = 1;
   2755  1.1  christos 			else if ((top = min_heap_top_(&base->timeheap)) != NULL &&
   2756  1.1  christos 					 evutil_timercmp(&top->ev_timeout, &now, <))
   2757  1.1  christos 				notify = 1;
   2758  1.1  christos 		}
   2759  1.1  christos 	}
   2760  1.1  christos 
   2761  1.1  christos 	/* if we are not in the right thread, we need to wake up the loop */
   2762  1.1  christos 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
   2763  1.1  christos 		evthread_notify_base(base);
   2764  1.1  christos 
   2765  1.1  christos 	event_debug_note_add_(ev);
   2766  1.1  christos 
   2767  1.1  christos 	return (res);
   2768  1.1  christos }
   2769  1.1  christos 
   2770  1.2  christos static int
   2771  1.2  christos event_del_(struct event *ev, int blocking)
   2772  1.1  christos {
   2773  1.1  christos 	int res;
   2774  1.7  christos 	struct event_base *base = ev->ev_base;
   2775  1.1  christos 
   2776  1.7  christos 	if (EVUTIL_FAILURE_CHECK(!base)) {
   2777  1.1  christos 		event_warnx("%s: event has no event_base set.", __func__);
   2778  1.1  christos 		return -1;
   2779  1.1  christos 	}
   2780  1.1  christos 
   2781  1.7  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   2782  1.2  christos 	res = event_del_nolock_(ev, blocking);
   2783  1.7  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   2784  1.1  christos 
   2785  1.1  christos 	return (res);
   2786  1.1  christos }
   2787  1.1  christos 
   2788  1.1  christos int
   2789  1.2  christos event_del(struct event *ev)
   2790  1.2  christos {
   2791  1.2  christos 	return event_del_(ev, EVENT_DEL_AUTOBLOCK);
   2792  1.2  christos }
   2793  1.2  christos 
   2794  1.2  christos int
   2795  1.2  christos event_del_block(struct event *ev)
   2796  1.2  christos {
   2797  1.2  christos 	return event_del_(ev, EVENT_DEL_BLOCK);
   2798  1.2  christos }
   2799  1.2  christos 
   2800  1.2  christos int
   2801  1.2  christos event_del_noblock(struct event *ev)
   2802  1.2  christos {
   2803  1.2  christos 	return event_del_(ev, EVENT_DEL_NOBLOCK);
   2804  1.2  christos }
   2805  1.2  christos 
   2806  1.2  christos /** Helper for event_del: always called with th_base_lock held.
   2807  1.2  christos  *
   2808  1.2  christos  * "blocking" must be one of the EVENT_DEL_{BLOCK, NOBLOCK, AUTOBLOCK,
   2809  1.2  christos  * EVEN_IF_FINALIZING} values. See those for more information.
   2810  1.2  christos  */
   2811  1.2  christos int
   2812  1.2  christos event_del_nolock_(struct event *ev, int blocking)
   2813  1.1  christos {
   2814  1.1  christos 	struct event_base *base;
   2815  1.1  christos 	int res = 0, notify = 0;
   2816  1.1  christos 
   2817  1.1  christos 	event_debug(("event_del: %p (fd "EV_SOCK_FMT"), callback %p",
   2818  1.1  christos 		ev, EV_SOCK_ARG(ev->ev_fd), ev->ev_callback));
   2819  1.1  christos 
   2820  1.1  christos 	/* An event without a base has not been added */
   2821  1.1  christos 	if (ev->ev_base == NULL)
   2822  1.1  christos 		return (-1);
   2823  1.1  christos 
   2824  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(ev->ev_base);
   2825  1.1  christos 
   2826  1.2  christos 	if (blocking != EVENT_DEL_EVEN_IF_FINALIZING) {
   2827  1.2  christos 		if (ev->ev_flags & EVLIST_FINALIZING) {
   2828  1.2  christos 			/* XXXX Debug */
   2829  1.2  christos 			return 0;
   2830  1.2  christos 		}
   2831  1.2  christos 	}
   2832  1.2  christos 
   2833  1.1  christos 	base = ev->ev_base;
   2834  1.1  christos 
   2835  1.1  christos 	EVUTIL_ASSERT(!(ev->ev_flags & ~EVLIST_ALL));
   2836  1.1  christos 
   2837  1.1  christos 	/* See if we are just active executing this event in a loop */
   2838  1.1  christos 	if (ev->ev_events & EV_SIGNAL) {
   2839  1.1  christos 		if (ev->ev_ncalls && ev->ev_pncalls) {
   2840  1.1  christos 			/* Abort loop */
   2841  1.1  christos 			*ev->ev_pncalls = 0;
   2842  1.1  christos 		}
   2843  1.1  christos 	}
   2844  1.1  christos 
   2845  1.1  christos 	if (ev->ev_flags & EVLIST_TIMEOUT) {
   2846  1.1  christos 		/* NOTE: We never need to notify the main thread because of a
   2847  1.1  christos 		 * deleted timeout event: all that could happen if we don't is
   2848  1.1  christos 		 * that the dispatch loop might wake up too early.  But the
   2849  1.1  christos 		 * point of notifying the main thread _is_ to wake up the
   2850  1.1  christos 		 * dispatch loop early anyway, so we wouldn't gain anything by
   2851  1.1  christos 		 * doing it.
   2852  1.1  christos 		 */
   2853  1.1  christos 		event_queue_remove_timeout(base, ev);
   2854  1.1  christos 	}
   2855  1.1  christos 
   2856  1.1  christos 	if (ev->ev_flags & EVLIST_ACTIVE)
   2857  1.1  christos 		event_queue_remove_active(base, event_to_event_callback(ev));
   2858  1.1  christos 	else if (ev->ev_flags & EVLIST_ACTIVE_LATER)
   2859  1.1  christos 		event_queue_remove_active_later(base, event_to_event_callback(ev));
   2860  1.1  christos 
   2861  1.1  christos 	if (ev->ev_flags & EVLIST_INSERTED) {
   2862  1.1  christos 		event_queue_remove_inserted(base, ev);
   2863  1.2  christos 		if (ev->ev_events & (EV_READ|EV_WRITE|EV_CLOSED))
   2864  1.1  christos 			res = evmap_io_del_(base, ev->ev_fd, ev);
   2865  1.1  christos 		else
   2866  1.1  christos 			res = evmap_signal_del_(base, (int)ev->ev_fd, ev);
   2867  1.1  christos 		if (res == 1) {
   2868  1.1  christos 			/* evmap says we need to notify the main thread. */
   2869  1.1  christos 			notify = 1;
   2870  1.1  christos 			res = 0;
   2871  1.1  christos 		}
   2872  1.7  christos 		/* If we do not have events, let's notify event base so it can
   2873  1.7  christos 		 * exit without waiting */
   2874  1.7  christos 		if (!event_haveevents(base) && !N_ACTIVE_CALLBACKS(base))
   2875  1.7  christos 			notify = 1;
   2876  1.1  christos 	}
   2877  1.1  christos 
   2878  1.1  christos 	/* if we are not in the right thread, we need to wake up the loop */
   2879  1.1  christos 	if (res != -1 && notify && EVBASE_NEED_NOTIFY(base))
   2880  1.1  christos 		evthread_notify_base(base);
   2881  1.1  christos 
   2882  1.1  christos 	event_debug_note_del_(ev);
   2883  1.1  christos 
   2884  1.7  christos 	/* If the main thread is currently executing this event's callback,
   2885  1.7  christos 	 * and we are not the main thread, then we want to wait until the
   2886  1.7  christos 	 * callback is done before returning. That way, when this function
   2887  1.7  christos 	 * returns, it will be safe to free the user-supplied argument.
   2888  1.7  christos 	 */
   2889  1.7  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   2890  1.7  christos 	if (blocking != EVENT_DEL_NOBLOCK &&
   2891  1.7  christos 	    base->current_event == event_to_event_callback(ev) &&
   2892  1.7  christos 	    !EVBASE_IN_THREAD(base) &&
   2893  1.7  christos 	    (blocking == EVENT_DEL_BLOCK || !(ev->ev_events & EV_FINALIZE))) {
   2894  1.7  christos 		++base->current_event_waiters;
   2895  1.7  christos 		EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
   2896  1.7  christos 	}
   2897  1.7  christos #endif
   2898  1.7  christos 
   2899  1.1  christos 	return (res);
   2900  1.1  christos }
   2901  1.1  christos 
   2902  1.1  christos void
   2903  1.1  christos event_active(struct event *ev, int res, short ncalls)
   2904  1.1  christos {
   2905  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!ev->ev_base)) {
   2906  1.1  christos 		event_warnx("%s: event has no event_base set.", __func__);
   2907  1.1  christos 		return;
   2908  1.1  christos 	}
   2909  1.1  christos 
   2910  1.1  christos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2911  1.1  christos 
   2912  1.1  christos 	event_debug_assert_is_setup_(ev);
   2913  1.1  christos 
   2914  1.1  christos 	event_active_nolock_(ev, res, ncalls);
   2915  1.1  christos 
   2916  1.1  christos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2917  1.1  christos }
   2918  1.1  christos 
   2919  1.1  christos 
   2920  1.1  christos void
   2921  1.1  christos event_active_nolock_(struct event *ev, int res, short ncalls)
   2922  1.1  christos {
   2923  1.1  christos 	struct event_base *base;
   2924  1.1  christos 
   2925  1.1  christos 	event_debug(("event_active: %p (fd "EV_SOCK_FMT"), res %d, callback %p",
   2926  1.1  christos 		ev, EV_SOCK_ARG(ev->ev_fd), (int)res, ev->ev_callback));
   2927  1.1  christos 
   2928  1.1  christos 	base = ev->ev_base;
   2929  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   2930  1.1  christos 
   2931  1.2  christos 	if (ev->ev_flags & EVLIST_FINALIZING) {
   2932  1.2  christos 		/* XXXX debug */
   2933  1.2  christos 		return;
   2934  1.2  christos 	}
   2935  1.2  christos 
   2936  1.1  christos 	switch ((ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
   2937  1.1  christos 	default:
   2938  1.1  christos 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
   2939  1.1  christos 		EVUTIL_ASSERT(0);
   2940  1.1  christos 		break;
   2941  1.1  christos 	case EVLIST_ACTIVE:
   2942  1.1  christos 		/* We get different kinds of events, add them together */
   2943  1.1  christos 		ev->ev_res |= res;
   2944  1.1  christos 		return;
   2945  1.1  christos 	case EVLIST_ACTIVE_LATER:
   2946  1.1  christos 		ev->ev_res |= res;
   2947  1.1  christos 		break;
   2948  1.1  christos 	case 0:
   2949  1.1  christos 		ev->ev_res = res;
   2950  1.1  christos 		break;
   2951  1.1  christos 	}
   2952  1.1  christos 
   2953  1.1  christos 	if (ev->ev_pri < base->event_running_priority)
   2954  1.1  christos 		base->event_continue = 1;
   2955  1.1  christos 
   2956  1.1  christos 	if (ev->ev_events & EV_SIGNAL) {
   2957  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   2958  1.1  christos 		if (base->current_event == event_to_event_callback(ev) &&
   2959  1.1  christos 		    !EVBASE_IN_THREAD(base)) {
   2960  1.1  christos 			++base->current_event_waiters;
   2961  1.1  christos 			EVTHREAD_COND_WAIT(base->current_event_cond, base->th_base_lock);
   2962  1.1  christos 		}
   2963  1.1  christos #endif
   2964  1.1  christos 		ev->ev_ncalls = ncalls;
   2965  1.1  christos 		ev->ev_pncalls = NULL;
   2966  1.1  christos 	}
   2967  1.1  christos 
   2968  1.1  christos 	event_callback_activate_nolock_(base, event_to_event_callback(ev));
   2969  1.1  christos }
   2970  1.1  christos 
   2971  1.1  christos void
   2972  1.1  christos event_active_later_(struct event *ev, int res)
   2973  1.1  christos {
   2974  1.1  christos 	EVBASE_ACQUIRE_LOCK(ev->ev_base, th_base_lock);
   2975  1.1  christos 	event_active_later_nolock_(ev, res);
   2976  1.1  christos 	EVBASE_RELEASE_LOCK(ev->ev_base, th_base_lock);
   2977  1.1  christos }
   2978  1.1  christos 
   2979  1.1  christos void
   2980  1.1  christos event_active_later_nolock_(struct event *ev, int res)
   2981  1.1  christos {
   2982  1.1  christos 	struct event_base *base = ev->ev_base;
   2983  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   2984  1.1  christos 
   2985  1.1  christos 	if (ev->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
   2986  1.1  christos 		/* We get different kinds of events, add them together */
   2987  1.1  christos 		ev->ev_res |= res;
   2988  1.1  christos 		return;
   2989  1.1  christos 	}
   2990  1.1  christos 
   2991  1.1  christos 	ev->ev_res = res;
   2992  1.1  christos 
   2993  1.1  christos 	event_callback_activate_later_nolock_(base, event_to_event_callback(ev));
   2994  1.1  christos }
   2995  1.1  christos 
   2996  1.1  christos int
   2997  1.1  christos event_callback_activate_(struct event_base *base,
   2998  1.1  christos     struct event_callback *evcb)
   2999  1.1  christos {
   3000  1.1  christos 	int r;
   3001  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3002  1.1  christos 	r = event_callback_activate_nolock_(base, evcb);
   3003  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3004  1.1  christos 	return r;
   3005  1.1  christos }
   3006  1.1  christos 
   3007  1.1  christos int
   3008  1.1  christos event_callback_activate_nolock_(struct event_base *base,
   3009  1.1  christos     struct event_callback *evcb)
   3010  1.1  christos {
   3011  1.1  christos 	int r = 1;
   3012  1.1  christos 
   3013  1.2  christos 	if (evcb->evcb_flags & EVLIST_FINALIZING)
   3014  1.2  christos 		return 0;
   3015  1.2  christos 
   3016  1.1  christos 	switch (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) {
   3017  1.1  christos 	default:
   3018  1.1  christos 		EVUTIL_ASSERT(0);
   3019  1.7  christos 		EVUTIL_FALLTHROUGH;
   3020  1.1  christos 	case EVLIST_ACTIVE_LATER:
   3021  1.1  christos 		event_queue_remove_active_later(base, evcb);
   3022  1.1  christos 		r = 0;
   3023  1.1  christos 		break;
   3024  1.1  christos 	case EVLIST_ACTIVE:
   3025  1.1  christos 		return 0;
   3026  1.1  christos 	case 0:
   3027  1.1  christos 		break;
   3028  1.1  christos 	}
   3029  1.1  christos 
   3030  1.1  christos 	event_queue_insert_active(base, evcb);
   3031  1.1  christos 
   3032  1.1  christos 	if (EVBASE_NEED_NOTIFY(base))
   3033  1.1  christos 		evthread_notify_base(base);
   3034  1.1  christos 
   3035  1.1  christos 	return r;
   3036  1.1  christos }
   3037  1.1  christos 
   3038  1.7  christos int
   3039  1.1  christos event_callback_activate_later_nolock_(struct event_base *base,
   3040  1.1  christos     struct event_callback *evcb)
   3041  1.1  christos {
   3042  1.1  christos 	if (evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))
   3043  1.7  christos 		return 0;
   3044  1.1  christos 
   3045  1.1  christos 	event_queue_insert_active_later(base, evcb);
   3046  1.1  christos 	if (EVBASE_NEED_NOTIFY(base))
   3047  1.1  christos 		evthread_notify_base(base);
   3048  1.7  christos 	return 1;
   3049  1.1  christos }
   3050  1.1  christos 
   3051  1.1  christos void
   3052  1.1  christos event_callback_init_(struct event_base *base,
   3053  1.1  christos     struct event_callback *cb)
   3054  1.1  christos {
   3055  1.1  christos 	memset(cb, 0, sizeof(*cb));
   3056  1.1  christos 	cb->evcb_pri = base->nactivequeues - 1;
   3057  1.1  christos }
   3058  1.1  christos 
   3059  1.1  christos int
   3060  1.1  christos event_callback_cancel_(struct event_base *base,
   3061  1.1  christos     struct event_callback *evcb)
   3062  1.1  christos {
   3063  1.1  christos 	int r;
   3064  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3065  1.2  christos 	r = event_callback_cancel_nolock_(base, evcb, 0);
   3066  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3067  1.1  christos 	return r;
   3068  1.1  christos }
   3069  1.1  christos 
   3070  1.1  christos int
   3071  1.1  christos event_callback_cancel_nolock_(struct event_base *base,
   3072  1.2  christos     struct event_callback *evcb, int even_if_finalizing)
   3073  1.1  christos {
   3074  1.2  christos 	if ((evcb->evcb_flags & EVLIST_FINALIZING) && !even_if_finalizing)
   3075  1.2  christos 		return 0;
   3076  1.2  christos 
   3077  1.1  christos 	if (evcb->evcb_flags & EVLIST_INIT)
   3078  1.2  christos 		return event_del_nolock_(event_callback_to_event(evcb),
   3079  1.2  christos 		    even_if_finalizing ? EVENT_DEL_EVEN_IF_FINALIZING : EVENT_DEL_AUTOBLOCK);
   3080  1.1  christos 
   3081  1.1  christos 	switch ((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER))) {
   3082  1.1  christos 	default:
   3083  1.1  christos 	case EVLIST_ACTIVE|EVLIST_ACTIVE_LATER:
   3084  1.1  christos 		EVUTIL_ASSERT(0);
   3085  1.1  christos 		break;
   3086  1.1  christos 	case EVLIST_ACTIVE:
   3087  1.1  christos 		/* We get different kinds of events, add them together */
   3088  1.1  christos 		event_queue_remove_active(base, evcb);
   3089  1.1  christos 		return 0;
   3090  1.1  christos 	case EVLIST_ACTIVE_LATER:
   3091  1.1  christos 		event_queue_remove_active_later(base, evcb);
   3092  1.1  christos 		break;
   3093  1.1  christos 	case 0:
   3094  1.1  christos 		break;
   3095  1.1  christos 	}
   3096  1.1  christos 
   3097  1.1  christos 	return 0;
   3098  1.1  christos }
   3099  1.1  christos 
   3100  1.1  christos void
   3101  1.1  christos event_deferred_cb_init_(struct event_callback *cb, ev_uint8_t priority, deferred_cb_fn fn, void *arg)
   3102  1.1  christos {
   3103  1.1  christos 	memset(cb, 0, sizeof(*cb));
   3104  1.1  christos 	cb->evcb_cb_union.evcb_selfcb = fn;
   3105  1.1  christos 	cb->evcb_arg = arg;
   3106  1.1  christos 	cb->evcb_pri = priority;
   3107  1.1  christos 	cb->evcb_closure = EV_CLOSURE_CB_SELF;
   3108  1.1  christos }
   3109  1.1  christos 
   3110  1.1  christos void
   3111  1.1  christos event_deferred_cb_set_priority_(struct event_callback *cb, ev_uint8_t priority)
   3112  1.1  christos {
   3113  1.1  christos 	cb->evcb_pri = priority;
   3114  1.1  christos }
   3115  1.1  christos 
   3116  1.1  christos void
   3117  1.1  christos event_deferred_cb_cancel_(struct event_base *base, struct event_callback *cb)
   3118  1.1  christos {
   3119  1.1  christos 	if (!base)
   3120  1.1  christos 		base = current_base;
   3121  1.1  christos 	event_callback_cancel_(base, cb);
   3122  1.1  christos }
   3123  1.1  christos 
   3124  1.1  christos #define MAX_DEFERREDS_QUEUED 32
   3125  1.1  christos int
   3126  1.1  christos event_deferred_cb_schedule_(struct event_base *base, struct event_callback *cb)
   3127  1.1  christos {
   3128  1.1  christos 	int r = 1;
   3129  1.1  christos 	if (!base)
   3130  1.1  christos 		base = current_base;
   3131  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3132  1.1  christos 	if (base->n_deferreds_queued > MAX_DEFERREDS_QUEUED) {
   3133  1.7  christos 		r = event_callback_activate_later_nolock_(base, cb);
   3134  1.1  christos 	} else {
   3135  1.1  christos 		r = event_callback_activate_nolock_(base, cb);
   3136  1.7  christos 		if (r) {
   3137  1.7  christos 			++base->n_deferreds_queued;
   3138  1.7  christos 		}
   3139  1.1  christos 	}
   3140  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3141  1.1  christos 	return r;
   3142  1.1  christos }
   3143  1.1  christos 
   3144  1.1  christos static int
   3145  1.1  christos timeout_next(struct event_base *base, struct timeval **tv_p)
   3146  1.1  christos {
   3147  1.1  christos 	/* Caller must hold th_base_lock */
   3148  1.1  christos 	struct timeval now;
   3149  1.1  christos 	struct event *ev;
   3150  1.1  christos 	struct timeval *tv = *tv_p;
   3151  1.1  christos 	int res = 0;
   3152  1.1  christos 
   3153  1.1  christos 	ev = min_heap_top_(&base->timeheap);
   3154  1.1  christos 
   3155  1.1  christos 	if (ev == NULL) {
   3156  1.1  christos 		/* if no time-based events are active wait for I/O */
   3157  1.1  christos 		*tv_p = NULL;
   3158  1.1  christos 		goto out;
   3159  1.1  christos 	}
   3160  1.1  christos 
   3161  1.1  christos 	if (gettime(base, &now) == -1) {
   3162  1.1  christos 		res = -1;
   3163  1.1  christos 		goto out;
   3164  1.1  christos 	}
   3165  1.1  christos 
   3166  1.1  christos 	if (evutil_timercmp(&ev->ev_timeout, &now, <=)) {
   3167  1.1  christos 		evutil_timerclear(tv);
   3168  1.1  christos 		goto out;
   3169  1.1  christos 	}
   3170  1.1  christos 
   3171  1.1  christos 	evutil_timersub(&ev->ev_timeout, &now, tv);
   3172  1.1  christos 
   3173  1.1  christos 	EVUTIL_ASSERT(tv->tv_sec >= 0);
   3174  1.1  christos 	EVUTIL_ASSERT(tv->tv_usec >= 0);
   3175  1.1  christos 	event_debug(("timeout_next: event: %p, in %d seconds, %d useconds", ev, (int)tv->tv_sec, (int)tv->tv_usec));
   3176  1.1  christos 
   3177  1.1  christos out:
   3178  1.1  christos 	return (res);
   3179  1.1  christos }
   3180  1.1  christos 
   3181  1.1  christos /* Activate every event whose timeout has elapsed. */
   3182  1.1  christos static void
   3183  1.1  christos timeout_process(struct event_base *base)
   3184  1.1  christos {
   3185  1.1  christos 	/* Caller must hold lock. */
   3186  1.1  christos 	struct timeval now;
   3187  1.1  christos 	struct event *ev;
   3188  1.1  christos 
   3189  1.1  christos 	if (min_heap_empty_(&base->timeheap)) {
   3190  1.1  christos 		return;
   3191  1.1  christos 	}
   3192  1.1  christos 
   3193  1.1  christos 	gettime(base, &now);
   3194  1.1  christos 
   3195  1.1  christos 	while ((ev = min_heap_top_(&base->timeheap))) {
   3196  1.1  christos 		if (evutil_timercmp(&ev->ev_timeout, &now, >))
   3197  1.1  christos 			break;
   3198  1.1  christos 
   3199  1.1  christos 		/* delete this event from the I/O queues */
   3200  1.2  christos 		event_del_nolock_(ev, EVENT_DEL_NOBLOCK);
   3201  1.1  christos 
   3202  1.1  christos 		event_debug(("timeout_process: event: %p, call %p",
   3203  1.1  christos 			 ev, ev->ev_callback));
   3204  1.1  christos 		event_active_nolock_(ev, EV_TIMEOUT, 1);
   3205  1.1  christos 	}
   3206  1.1  christos }
   3207  1.1  christos 
   3208  1.2  christos #ifndef MAX
   3209  1.2  christos #define MAX(a,b) (((a)>(b))?(a):(b))
   3210  1.2  christos #endif
   3211  1.2  christos 
   3212  1.2  christos #define MAX_EVENT_COUNT(var, v) var = MAX(var, v)
   3213  1.2  christos 
   3214  1.1  christos /* These are a fancy way to spell
   3215  1.7  christos      if (~flags & EVLIST_INTERNAL)
   3216  1.1  christos          base->event_count--/++;
   3217  1.1  christos */
   3218  1.1  christos #define DECR_EVENT_COUNT(base,flags) \
   3219  1.7  christos 	((base)->event_count -= !((flags) & EVLIST_INTERNAL))
   3220  1.2  christos #define INCR_EVENT_COUNT(base,flags) do {					\
   3221  1.7  christos 	((base)->event_count += !((flags) & EVLIST_INTERNAL));			\
   3222  1.2  christos 	MAX_EVENT_COUNT((base)->event_count_max, (base)->event_count);		\
   3223  1.2  christos } while (0)
   3224  1.1  christos 
   3225  1.1  christos static void
   3226  1.1  christos event_queue_remove_inserted(struct event_base *base, struct event *ev)
   3227  1.1  christos {
   3228  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3229  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_INSERTED))) {
   3230  1.1  christos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
   3231  1.1  christos 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_INSERTED);
   3232  1.1  christos 		return;
   3233  1.1  christos 	}
   3234  1.1  christos 	DECR_EVENT_COUNT(base, ev->ev_flags);
   3235  1.1  christos 	ev->ev_flags &= ~EVLIST_INSERTED;
   3236  1.1  christos }
   3237  1.1  christos static void
   3238  1.1  christos event_queue_remove_active(struct event_base *base, struct event_callback *evcb)
   3239  1.1  christos {
   3240  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3241  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE))) {
   3242  1.1  christos 		event_errx(1, "%s: %p not on queue %x", __func__,
   3243  1.1  christos 			   evcb, EVLIST_ACTIVE);
   3244  1.1  christos 		return;
   3245  1.1  christos 	}
   3246  1.1  christos 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
   3247  1.1  christos 	evcb->evcb_flags &= ~EVLIST_ACTIVE;
   3248  1.1  christos 	base->event_count_active--;
   3249  1.1  christos 
   3250  1.1  christos 	TAILQ_REMOVE(&base->activequeues[evcb->evcb_pri],
   3251  1.1  christos 	    evcb, evcb_active_next);
   3252  1.1  christos }
   3253  1.1  christos static void
   3254  1.1  christos event_queue_remove_active_later(struct event_base *base, struct event_callback *evcb)
   3255  1.1  christos {
   3256  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3257  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!(evcb->evcb_flags & EVLIST_ACTIVE_LATER))) {
   3258  1.1  christos 		event_errx(1, "%s: %p not on queue %x", __func__,
   3259  1.1  christos 			   evcb, EVLIST_ACTIVE_LATER);
   3260  1.1  christos 		return;
   3261  1.1  christos 	}
   3262  1.1  christos 	DECR_EVENT_COUNT(base, evcb->evcb_flags);
   3263  1.1  christos 	evcb->evcb_flags &= ~EVLIST_ACTIVE_LATER;
   3264  1.1  christos 	base->event_count_active--;
   3265  1.1  christos 
   3266  1.1  christos 	TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
   3267  1.1  christos }
   3268  1.1  christos static void
   3269  1.1  christos event_queue_remove_timeout(struct event_base *base, struct event *ev)
   3270  1.1  christos {
   3271  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3272  1.1  christos 	if (EVUTIL_FAILURE_CHECK(!(ev->ev_flags & EVLIST_TIMEOUT))) {
   3273  1.1  christos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") not on queue %x", __func__,
   3274  1.1  christos 		    ev, EV_SOCK_ARG(ev->ev_fd), EVLIST_TIMEOUT);
   3275  1.1  christos 		return;
   3276  1.1  christos 	}
   3277  1.1  christos 	DECR_EVENT_COUNT(base, ev->ev_flags);
   3278  1.1  christos 	ev->ev_flags &= ~EVLIST_TIMEOUT;
   3279  1.1  christos 
   3280  1.1  christos 	if (is_common_timeout(&ev->ev_timeout, base)) {
   3281  1.1  christos 		struct common_timeout_list *ctl =
   3282  1.1  christos 		    get_common_timeout_list(base, &ev->ev_timeout);
   3283  1.1  christos 		TAILQ_REMOVE(&ctl->events, ev,
   3284  1.1  christos 		    ev_timeout_pos.ev_next_with_common_timeout);
   3285  1.1  christos 	} else {
   3286  1.1  christos 		min_heap_erase_(&base->timeheap, ev);
   3287  1.1  christos 	}
   3288  1.1  christos }
   3289  1.1  christos 
   3290  1.1  christos #ifdef USE_REINSERT_TIMEOUT
   3291  1.1  christos /* Remove and reinsert 'ev' into the timeout queue. */
   3292  1.1  christos static void
   3293  1.1  christos event_queue_reinsert_timeout(struct event_base *base, struct event *ev,
   3294  1.1  christos     int was_common, int is_common, int old_timeout_idx)
   3295  1.1  christos {
   3296  1.1  christos 	struct common_timeout_list *ctl;
   3297  1.1  christos 	if (!(ev->ev_flags & EVLIST_TIMEOUT)) {
   3298  1.1  christos 		event_queue_insert_timeout(base, ev);
   3299  1.1  christos 		return;
   3300  1.1  christos 	}
   3301  1.1  christos 
   3302  1.1  christos 	switch ((was_common<<1) | is_common) {
   3303  1.1  christos 	case 3: /* Changing from one common timeout to another */
   3304  1.1  christos 		ctl = base->common_timeout_queues[old_timeout_idx];
   3305  1.1  christos 		TAILQ_REMOVE(&ctl->events, ev,
   3306  1.1  christos 		    ev_timeout_pos.ev_next_with_common_timeout);
   3307  1.1  christos 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
   3308  1.1  christos 		insert_common_timeout_inorder(ctl, ev);
   3309  1.1  christos 		break;
   3310  1.1  christos 	case 2: /* Was common; is no longer common */
   3311  1.1  christos 		ctl = base->common_timeout_queues[old_timeout_idx];
   3312  1.1  christos 		TAILQ_REMOVE(&ctl->events, ev,
   3313  1.1  christos 		    ev_timeout_pos.ev_next_with_common_timeout);
   3314  1.1  christos 		min_heap_push_(&base->timeheap, ev);
   3315  1.1  christos 		break;
   3316  1.1  christos 	case 1: /* Wasn't common; has become common. */
   3317  1.1  christos 		min_heap_erase_(&base->timeheap, ev);
   3318  1.1  christos 		ctl = get_common_timeout_list(base, &ev->ev_timeout);
   3319  1.1  christos 		insert_common_timeout_inorder(ctl, ev);
   3320  1.1  christos 		break;
   3321  1.1  christos 	case 0: /* was in heap; is still on heap. */
   3322  1.1  christos 		min_heap_adjust_(&base->timeheap, ev);
   3323  1.1  christos 		break;
   3324  1.1  christos 	default:
   3325  1.1  christos 		EVUTIL_ASSERT(0); /* unreachable */
   3326  1.1  christos 		break;
   3327  1.1  christos 	}
   3328  1.1  christos }
   3329  1.1  christos #endif
   3330  1.1  christos 
   3331  1.1  christos /* Add 'ev' to the common timeout list in 'ev'. */
   3332  1.1  christos static void
   3333  1.1  christos insert_common_timeout_inorder(struct common_timeout_list *ctl,
   3334  1.1  christos     struct event *ev)
   3335  1.1  christos {
   3336  1.1  christos 	struct event *e;
   3337  1.1  christos 	/* By all logic, we should just be able to append 'ev' to the end of
   3338  1.1  christos 	 * ctl->events, since the timeout on each 'ev' is set to {the common
   3339  1.1  christos 	 * timeout} + {the time when we add the event}, and so the events
   3340  1.1  christos 	 * should arrive in order of their timeeouts.  But just in case
   3341  1.1  christos 	 * there's some wacky threading issue going on, we do a search from
   3342  1.1  christos 	 * the end of 'ev' to find the right insertion point.
   3343  1.1  christos 	 */
   3344  1.1  christos 	TAILQ_FOREACH_REVERSE(e, &ctl->events,
   3345  1.1  christos 	    event_list, ev_timeout_pos.ev_next_with_common_timeout) {
   3346  1.1  christos 		/* This timercmp is a little sneaky, since both ev and e have
   3347  1.1  christos 		 * magic values in tv_usec.  Fortunately, they ought to have
   3348  1.1  christos 		 * the _same_ magic values in tv_usec.  Let's assert for that.
   3349  1.1  christos 		 */
   3350  1.1  christos 		EVUTIL_ASSERT(
   3351  1.1  christos 			is_same_common_timeout(&e->ev_timeout, &ev->ev_timeout));
   3352  1.1  christos 		if (evutil_timercmp(&ev->ev_timeout, &e->ev_timeout, >=)) {
   3353  1.1  christos 			TAILQ_INSERT_AFTER(&ctl->events, e, ev,
   3354  1.1  christos 			    ev_timeout_pos.ev_next_with_common_timeout);
   3355  1.1  christos 			return;
   3356  1.1  christos 		}
   3357  1.1  christos 	}
   3358  1.1  christos 	TAILQ_INSERT_HEAD(&ctl->events, ev,
   3359  1.1  christos 	    ev_timeout_pos.ev_next_with_common_timeout);
   3360  1.1  christos }
   3361  1.1  christos 
   3362  1.1  christos static void
   3363  1.1  christos event_queue_insert_inserted(struct event_base *base, struct event *ev)
   3364  1.1  christos {
   3365  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3366  1.1  christos 
   3367  1.1  christos 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_INSERTED)) {
   3368  1.1  christos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already inserted", __func__,
   3369  1.1  christos 		    ev, EV_SOCK_ARG(ev->ev_fd));
   3370  1.1  christos 		return;
   3371  1.1  christos 	}
   3372  1.1  christos 
   3373  1.1  christos 	INCR_EVENT_COUNT(base, ev->ev_flags);
   3374  1.1  christos 
   3375  1.1  christos 	ev->ev_flags |= EVLIST_INSERTED;
   3376  1.1  christos }
   3377  1.1  christos 
   3378  1.1  christos static void
   3379  1.1  christos event_queue_insert_active(struct event_base *base, struct event_callback *evcb)
   3380  1.1  christos {
   3381  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3382  1.1  christos 
   3383  1.1  christos 	if (evcb->evcb_flags & EVLIST_ACTIVE) {
   3384  1.1  christos 		/* Double insertion is possible for active events */
   3385  1.1  christos 		return;
   3386  1.1  christos 	}
   3387  1.1  christos 
   3388  1.1  christos 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
   3389  1.1  christos 
   3390  1.1  christos 	evcb->evcb_flags |= EVLIST_ACTIVE;
   3391  1.1  christos 
   3392  1.1  christos 	base->event_count_active++;
   3393  1.2  christos 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
   3394  1.1  christos 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
   3395  1.1  christos 	TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri],
   3396  1.1  christos 	    evcb, evcb_active_next);
   3397  1.1  christos }
   3398  1.1  christos 
   3399  1.1  christos static void
   3400  1.1  christos event_queue_insert_active_later(struct event_base *base, struct event_callback *evcb)
   3401  1.1  christos {
   3402  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3403  1.1  christos 	if (evcb->evcb_flags & (EVLIST_ACTIVE_LATER|EVLIST_ACTIVE)) {
   3404  1.1  christos 		/* Double insertion is possible */
   3405  1.1  christos 		return;
   3406  1.1  christos 	}
   3407  1.1  christos 
   3408  1.1  christos 	INCR_EVENT_COUNT(base, evcb->evcb_flags);
   3409  1.1  christos 	evcb->evcb_flags |= EVLIST_ACTIVE_LATER;
   3410  1.1  christos 	base->event_count_active++;
   3411  1.2  christos 	MAX_EVENT_COUNT(base->event_count_active_max, base->event_count_active);
   3412  1.1  christos 	EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
   3413  1.1  christos 	TAILQ_INSERT_TAIL(&base->active_later_queue, evcb, evcb_active_next);
   3414  1.1  christos }
   3415  1.1  christos 
   3416  1.1  christos static void
   3417  1.1  christos event_queue_insert_timeout(struct event_base *base, struct event *ev)
   3418  1.1  christos {
   3419  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3420  1.1  christos 
   3421  1.1  christos 	if (EVUTIL_FAILURE_CHECK(ev->ev_flags & EVLIST_TIMEOUT)) {
   3422  1.1  christos 		event_errx(1, "%s: %p(fd "EV_SOCK_FMT") already on timeout", __func__,
   3423  1.1  christos 		    ev, EV_SOCK_ARG(ev->ev_fd));
   3424  1.1  christos 		return;
   3425  1.1  christos 	}
   3426  1.1  christos 
   3427  1.1  christos 	INCR_EVENT_COUNT(base, ev->ev_flags);
   3428  1.1  christos 
   3429  1.1  christos 	ev->ev_flags |= EVLIST_TIMEOUT;
   3430  1.1  christos 
   3431  1.1  christos 	if (is_common_timeout(&ev->ev_timeout, base)) {
   3432  1.1  christos 		struct common_timeout_list *ctl =
   3433  1.1  christos 		    get_common_timeout_list(base, &ev->ev_timeout);
   3434  1.1  christos 		insert_common_timeout_inorder(ctl, ev);
   3435  1.1  christos 	} else {
   3436  1.1  christos 		min_heap_push_(&base->timeheap, ev);
   3437  1.1  christos 	}
   3438  1.1  christos }
   3439  1.1  christos 
   3440  1.1  christos static void
   3441  1.1  christos event_queue_make_later_events_active(struct event_base *base)
   3442  1.1  christos {
   3443  1.1  christos 	struct event_callback *evcb;
   3444  1.1  christos 	EVENT_BASE_ASSERT_LOCKED(base);
   3445  1.1  christos 
   3446  1.1  christos 	while ((evcb = TAILQ_FIRST(&base->active_later_queue))) {
   3447  1.1  christos 		TAILQ_REMOVE(&base->active_later_queue, evcb, evcb_active_next);
   3448  1.1  christos 		evcb->evcb_flags = (evcb->evcb_flags & ~EVLIST_ACTIVE_LATER) | EVLIST_ACTIVE;
   3449  1.1  christos 		EVUTIL_ASSERT(evcb->evcb_pri < base->nactivequeues);
   3450  1.1  christos 		TAILQ_INSERT_TAIL(&base->activequeues[evcb->evcb_pri], evcb, evcb_active_next);
   3451  1.1  christos 		base->n_deferreds_queued += (evcb->evcb_closure == EV_CLOSURE_CB_SELF);
   3452  1.1  christos 	}
   3453  1.1  christos }
   3454  1.1  christos 
   3455  1.1  christos /* Functions for debugging */
   3456  1.1  christos 
   3457  1.1  christos const char *
   3458  1.1  christos event_get_version(void)
   3459  1.1  christos {
   3460  1.1  christos 	return (EVENT__VERSION);
   3461  1.1  christos }
   3462  1.1  christos 
   3463  1.1  christos ev_uint32_t
   3464  1.1  christos event_get_version_number(void)
   3465  1.1  christos {
   3466  1.1  christos 	return (EVENT__NUMERIC_VERSION);
   3467  1.1  christos }
   3468  1.1  christos 
   3469  1.1  christos /*
   3470  1.1  christos  * No thread-safe interface needed - the information should be the same
   3471  1.1  christos  * for all threads.
   3472  1.1  christos  */
   3473  1.1  christos 
   3474  1.1  christos const char *
   3475  1.1  christos event_get_method(void)
   3476  1.1  christos {
   3477  1.1  christos 	return (current_base->evsel->name);
   3478  1.1  christos }
   3479  1.1  christos 
   3480  1.1  christos #ifndef EVENT__DISABLE_MM_REPLACEMENT
   3481  1.1  christos static void *(*mm_malloc_fn_)(size_t sz) = NULL;
   3482  1.1  christos static void *(*mm_realloc_fn_)(void *p, size_t sz) = NULL;
   3483  1.1  christos static void (*mm_free_fn_)(void *p) = NULL;
   3484  1.1  christos 
   3485  1.1  christos void *
   3486  1.1  christos event_mm_malloc_(size_t sz)
   3487  1.1  christos {
   3488  1.1  christos 	if (sz == 0)
   3489  1.1  christos 		return NULL;
   3490  1.1  christos 
   3491  1.1  christos 	if (mm_malloc_fn_)
   3492  1.1  christos 		return mm_malloc_fn_(sz);
   3493  1.1  christos 	else
   3494  1.1  christos 		return malloc(sz);
   3495  1.1  christos }
   3496  1.1  christos 
   3497  1.1  christos void *
   3498  1.1  christos event_mm_calloc_(size_t count, size_t size)
   3499  1.1  christos {
   3500  1.1  christos 	if (count == 0 || size == 0)
   3501  1.1  christos 		return NULL;
   3502  1.1  christos 
   3503  1.1  christos 	if (mm_malloc_fn_) {
   3504  1.1  christos 		size_t sz = count * size;
   3505  1.1  christos 		void *p = NULL;
   3506  1.1  christos 		if (count > EV_SIZE_MAX / size)
   3507  1.1  christos 			goto error;
   3508  1.1  christos 		p = mm_malloc_fn_(sz);
   3509  1.1  christos 		if (p)
   3510  1.1  christos 			return memset(p, 0, sz);
   3511  1.1  christos 	} else {
   3512  1.1  christos 		void *p = calloc(count, size);
   3513  1.1  christos #ifdef _WIN32
   3514  1.1  christos 		/* Windows calloc doesn't reliably set ENOMEM */
   3515  1.1  christos 		if (p == NULL)
   3516  1.1  christos 			goto error;
   3517  1.1  christos #endif
   3518  1.1  christos 		return p;
   3519  1.1  christos 	}
   3520  1.1  christos 
   3521  1.1  christos error:
   3522  1.1  christos 	errno = ENOMEM;
   3523  1.1  christos 	return NULL;
   3524  1.1  christos }
   3525  1.1  christos 
   3526  1.1  christos char *
   3527  1.1  christos event_mm_strdup_(const char *str)
   3528  1.1  christos {
   3529  1.1  christos 	if (!str) {
   3530  1.1  christos 		errno = EINVAL;
   3531  1.1  christos 		return NULL;
   3532  1.1  christos 	}
   3533  1.1  christos 
   3534  1.1  christos 	if (mm_malloc_fn_) {
   3535  1.1  christos 		size_t ln = strlen(str);
   3536  1.1  christos 		void *p = NULL;
   3537  1.1  christos 		if (ln == EV_SIZE_MAX)
   3538  1.1  christos 			goto error;
   3539  1.1  christos 		p = mm_malloc_fn_(ln+1);
   3540  1.1  christos 		if (p)
   3541  1.1  christos 			return memcpy(p, str, ln+1);
   3542  1.1  christos 	} else
   3543  1.1  christos #ifdef _WIN32
   3544  1.1  christos 		return _strdup(str);
   3545  1.1  christos #else
   3546  1.1  christos 		return strdup(str);
   3547  1.1  christos #endif
   3548  1.1  christos 
   3549  1.1  christos error:
   3550  1.1  christos 	errno = ENOMEM;
   3551  1.1  christos 	return NULL;
   3552  1.1  christos }
   3553  1.1  christos 
   3554  1.1  christos void *
   3555  1.1  christos event_mm_realloc_(void *ptr, size_t sz)
   3556  1.1  christos {
   3557  1.1  christos 	if (mm_realloc_fn_)
   3558  1.1  christos 		return mm_realloc_fn_(ptr, sz);
   3559  1.1  christos 	else
   3560  1.1  christos 		return realloc(ptr, sz);
   3561  1.1  christos }
   3562  1.1  christos 
   3563  1.1  christos void
   3564  1.1  christos event_mm_free_(void *ptr)
   3565  1.1  christos {
   3566  1.1  christos 	if (mm_free_fn_)
   3567  1.1  christos 		mm_free_fn_(ptr);
   3568  1.1  christos 	else
   3569  1.1  christos 		free(ptr);
   3570  1.1  christos }
   3571  1.1  christos 
   3572  1.1  christos void
   3573  1.1  christos event_set_mem_functions(void *(*malloc_fn)(size_t sz),
   3574  1.1  christos 			void *(*realloc_fn)(void *ptr, size_t sz),
   3575  1.1  christos 			void (*free_fn)(void *ptr))
   3576  1.1  christos {
   3577  1.1  christos 	mm_malloc_fn_ = malloc_fn;
   3578  1.1  christos 	mm_realloc_fn_ = realloc_fn;
   3579  1.1  christos 	mm_free_fn_ = free_fn;
   3580  1.1  christos }
   3581  1.1  christos #endif
   3582  1.1  christos 
   3583  1.1  christos #ifdef EVENT__HAVE_EVENTFD
   3584  1.1  christos static void
   3585  1.1  christos evthread_notify_drain_eventfd(evutil_socket_t fd, short what, void *arg)
   3586  1.1  christos {
   3587  1.1  christos 	ev_uint64_t msg;
   3588  1.1  christos 	ev_ssize_t r;
   3589  1.1  christos 	struct event_base *base = arg;
   3590  1.1  christos 
   3591  1.1  christos 	r = read(fd, (void*) &msg, sizeof(msg));
   3592  1.1  christos 	if (r<0 && errno != EAGAIN) {
   3593  1.1  christos 		event_sock_warn(fd, "Error reading from eventfd");
   3594  1.1  christos 	}
   3595  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3596  1.1  christos 	base->is_notify_pending = 0;
   3597  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3598  1.1  christos }
   3599  1.1  christos #endif
   3600  1.1  christos 
   3601  1.1  christos static void
   3602  1.1  christos evthread_notify_drain_default(evutil_socket_t fd, short what, void *arg)
   3603  1.1  christos {
   3604  1.1  christos 	unsigned char buf[1024];
   3605  1.1  christos 	struct event_base *base = arg;
   3606  1.1  christos #ifdef _WIN32
   3607  1.1  christos 	while (recv(fd, (char*)buf, sizeof(buf), 0) > 0)
   3608  1.1  christos 		;
   3609  1.1  christos #else
   3610  1.1  christos 	while (read(fd, (char*)buf, sizeof(buf)) > 0)
   3611  1.1  christos 		;
   3612  1.1  christos #endif
   3613  1.1  christos 
   3614  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3615  1.1  christos 	base->is_notify_pending = 0;
   3616  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3617  1.1  christos }
   3618  1.1  christos 
   3619  1.1  christos int
   3620  1.1  christos evthread_make_base_notifiable(struct event_base *base)
   3621  1.1  christos {
   3622  1.1  christos 	int r;
   3623  1.1  christos 	if (!base)
   3624  1.1  christos 		return -1;
   3625  1.1  christos 
   3626  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3627  1.1  christos 	r = evthread_make_base_notifiable_nolock_(base);
   3628  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3629  1.1  christos 	return r;
   3630  1.1  christos }
   3631  1.1  christos 
   3632  1.1  christos static int
   3633  1.1  christos evthread_make_base_notifiable_nolock_(struct event_base *base)
   3634  1.1  christos {
   3635  1.1  christos 	void (*cb)(evutil_socket_t, short, void *);
   3636  1.1  christos 	int (*notify)(struct event_base *);
   3637  1.1  christos 
   3638  1.1  christos 	if (base->th_notify_fn != NULL) {
   3639  1.1  christos 		/* The base is already notifiable: we're doing fine. */
   3640  1.1  christos 		return 0;
   3641  1.1  christos 	}
   3642  1.1  christos 
   3643  1.1  christos #if defined(EVENT__HAVE_WORKING_KQUEUE)
   3644  1.1  christos 	if (base->evsel == &kqops && event_kq_add_notify_event_(base) == 0) {
   3645  1.1  christos 		base->th_notify_fn = event_kq_notify_base_;
   3646  1.1  christos 		/* No need to add an event here; the backend can wake
   3647  1.1  christos 		 * itself up just fine. */
   3648  1.1  christos 		return 0;
   3649  1.1  christos 	}
   3650  1.1  christos #endif
   3651  1.1  christos 
   3652  1.1  christos #ifdef EVENT__HAVE_EVENTFD
   3653  1.1  christos 	base->th_notify_fd[0] = evutil_eventfd_(0,
   3654  1.1  christos 	    EVUTIL_EFD_CLOEXEC|EVUTIL_EFD_NONBLOCK);
   3655  1.1  christos 	if (base->th_notify_fd[0] >= 0) {
   3656  1.1  christos 		base->th_notify_fd[1] = -1;
   3657  1.1  christos 		notify = evthread_notify_base_eventfd;
   3658  1.1  christos 		cb = evthread_notify_drain_eventfd;
   3659  1.1  christos 	} else
   3660  1.1  christos #endif
   3661  1.1  christos 	if (evutil_make_internal_pipe_(base->th_notify_fd) == 0) {
   3662  1.1  christos 		notify = evthread_notify_base_default;
   3663  1.1  christos 		cb = evthread_notify_drain_default;
   3664  1.1  christos 	} else {
   3665  1.1  christos 		return -1;
   3666  1.1  christos 	}
   3667  1.1  christos 
   3668  1.1  christos 	base->th_notify_fn = notify;
   3669  1.1  christos 
   3670  1.1  christos 	/* prepare an event that we can use for wakeup */
   3671  1.1  christos 	event_assign(&base->th_notify, base, base->th_notify_fd[0],
   3672  1.1  christos 				 EV_READ|EV_PERSIST, cb, base);
   3673  1.1  christos 
   3674  1.1  christos 	/* we need to mark this as internal event */
   3675  1.1  christos 	base->th_notify.ev_flags |= EVLIST_INTERNAL;
   3676  1.1  christos 	event_priority_set(&base->th_notify, 0);
   3677  1.1  christos 
   3678  1.1  christos 	return event_add_nolock_(&base->th_notify, NULL, 0);
   3679  1.1  christos }
   3680  1.1  christos 
   3681  1.1  christos int
   3682  1.1  christos event_base_foreach_event_nolock_(struct event_base *base,
   3683  1.1  christos     event_base_foreach_event_cb fn, void *arg)
   3684  1.1  christos {
   3685  1.1  christos 	int r, i;
   3686  1.1  christos 	unsigned u;
   3687  1.1  christos 	struct event *ev;
   3688  1.1  christos 
   3689  1.1  christos 	/* Start out with all the EVLIST_INSERTED events. */
   3690  1.1  christos 	if ((r = evmap_foreach_event_(base, fn, arg)))
   3691  1.1  christos 		return r;
   3692  1.1  christos 
   3693  1.1  christos 	/* Okay, now we deal with those events that have timeouts and are in
   3694  1.1  christos 	 * the min-heap. */
   3695  1.1  christos 	for (u = 0; u < base->timeheap.n; ++u) {
   3696  1.1  christos 		ev = base->timeheap.p[u];
   3697  1.1  christos 		if (ev->ev_flags & EVLIST_INSERTED) {
   3698  1.1  christos 			/* we already processed this one */
   3699  1.1  christos 			continue;
   3700  1.1  christos 		}
   3701  1.1  christos 		if ((r = fn(base, ev, arg)))
   3702  1.1  christos 			return r;
   3703  1.1  christos 	}
   3704  1.1  christos 
   3705  1.1  christos 	/* Now for the events in one of the timeout queues.
   3706  1.1  christos 	 * the min-heap. */
   3707  1.1  christos 	for (i = 0; i < base->n_common_timeouts; ++i) {
   3708  1.1  christos 		struct common_timeout_list *ctl =
   3709  1.1  christos 		    base->common_timeout_queues[i];
   3710  1.1  christos 		TAILQ_FOREACH(ev, &ctl->events,
   3711  1.1  christos 		    ev_timeout_pos.ev_next_with_common_timeout) {
   3712  1.1  christos 			if (ev->ev_flags & EVLIST_INSERTED) {
   3713  1.1  christos 				/* we already processed this one */
   3714  1.1  christos 				continue;
   3715  1.1  christos 			}
   3716  1.1  christos 			if ((r = fn(base, ev, arg)))
   3717  1.1  christos 				return r;
   3718  1.1  christos 		}
   3719  1.1  christos 	}
   3720  1.1  christos 
   3721  1.1  christos 	/* Finally, we deal wit all the active events that we haven't touched
   3722  1.1  christos 	 * yet. */
   3723  1.1  christos 	for (i = 0; i < base->nactivequeues; ++i) {
   3724  1.1  christos 		struct event_callback *evcb;
   3725  1.1  christos 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
   3726  1.1  christos 			if ((evcb->evcb_flags & (EVLIST_INIT|EVLIST_INSERTED|EVLIST_TIMEOUT)) != EVLIST_INIT) {
   3727  1.1  christos 				/* This isn't an event (evlist_init clear), or
   3728  1.1  christos 				 * we already processed it. (inserted or
   3729  1.1  christos 				 * timeout set */
   3730  1.1  christos 				continue;
   3731  1.1  christos 			}
   3732  1.1  christos 			ev = event_callback_to_event(evcb);
   3733  1.1  christos 			if ((r = fn(base, ev, arg)))
   3734  1.1  christos 				return r;
   3735  1.1  christos 		}
   3736  1.1  christos 	}
   3737  1.1  christos 
   3738  1.1  christos 	return 0;
   3739  1.1  christos }
   3740  1.1  christos 
   3741  1.1  christos /* Helper for event_base_dump_events: called on each event in the event base;
   3742  1.1  christos  * dumps only the inserted events. */
   3743  1.1  christos static int
   3744  1.1  christos dump_inserted_event_fn(const struct event_base *base, const struct event *e, void *arg)
   3745  1.1  christos {
   3746  1.1  christos 	FILE *output = arg;
   3747  1.1  christos 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
   3748  1.1  christos 	    "sig" : "fd ";
   3749  1.1  christos 
   3750  1.1  christos 	if (! (e->ev_flags & (EVLIST_INSERTED|EVLIST_TIMEOUT)))
   3751  1.1  christos 		return 0;
   3752  1.1  christos 
   3753  1.7  christos 	fprintf(output, "  %p [%s "EV_SOCK_FMT"]%s%s%s%s%s%s%s",
   3754  1.1  christos 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd),
   3755  1.1  christos 	    (e->ev_events&EV_READ)?" Read":"",
   3756  1.1  christos 	    (e->ev_events&EV_WRITE)?" Write":"",
   3757  1.2  christos 	    (e->ev_events&EV_CLOSED)?" EOF":"",
   3758  1.1  christos 	    (e->ev_events&EV_SIGNAL)?" Signal":"",
   3759  1.1  christos 	    (e->ev_events&EV_PERSIST)?" Persist":"",
   3760  1.7  christos 	    (e->ev_events&EV_ET)?" ET":"",
   3761  1.1  christos 	    (e->ev_flags&EVLIST_INTERNAL)?" Internal":"");
   3762  1.1  christos 	if (e->ev_flags & EVLIST_TIMEOUT) {
   3763  1.1  christos 		struct timeval tv;
   3764  1.1  christos 		tv.tv_sec = e->ev_timeout.tv_sec;
   3765  1.1  christos 		tv.tv_usec = e->ev_timeout.tv_usec & MICROSECONDS_MASK;
   3766  1.1  christos 		evutil_timeradd(&tv, &base->tv_clock_diff, &tv);
   3767  1.1  christos 		fprintf(output, " Timeout=%ld.%06d",
   3768  1.1  christos 		    (long)tv.tv_sec, (int)(tv.tv_usec & MICROSECONDS_MASK));
   3769  1.1  christos 	}
   3770  1.1  christos 	fputc('\n', output);
   3771  1.1  christos 
   3772  1.1  christos 	return 0;
   3773  1.1  christos }
   3774  1.1  christos 
   3775  1.1  christos /* Helper for event_base_dump_events: called on each event in the event base;
   3776  1.1  christos  * dumps only the active events. */
   3777  1.1  christos static int
   3778  1.1  christos dump_active_event_fn(const struct event_base *base, const struct event *e, void *arg)
   3779  1.1  christos {
   3780  1.1  christos 	FILE *output = arg;
   3781  1.1  christos 	const char *gloss = (e->ev_events & EV_SIGNAL) ?
   3782  1.1  christos 	    "sig" : "fd ";
   3783  1.1  christos 
   3784  1.1  christos 	if (! (e->ev_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)))
   3785  1.1  christos 		return 0;
   3786  1.1  christos 
   3787  1.2  christos 	fprintf(output, "  %p [%s "EV_SOCK_FMT", priority=%d]%s%s%s%s%s active%s%s\n",
   3788  1.1  christos 	    (void*)e, gloss, EV_SOCK_ARG(e->ev_fd), e->ev_pri,
   3789  1.1  christos 	    (e->ev_res&EV_READ)?" Read":"",
   3790  1.1  christos 	    (e->ev_res&EV_WRITE)?" Write":"",
   3791  1.2  christos 	    (e->ev_res&EV_CLOSED)?" EOF":"",
   3792  1.1  christos 	    (e->ev_res&EV_SIGNAL)?" Signal":"",
   3793  1.1  christos 	    (e->ev_res&EV_TIMEOUT)?" Timeout":"",
   3794  1.1  christos 	    (e->ev_flags&EVLIST_INTERNAL)?" [Internal]":"",
   3795  1.1  christos 	    (e->ev_flags&EVLIST_ACTIVE_LATER)?" [NextTime]":"");
   3796  1.1  christos 
   3797  1.1  christos 	return 0;
   3798  1.1  christos }
   3799  1.1  christos 
   3800  1.1  christos int
   3801  1.1  christos event_base_foreach_event(struct event_base *base,
   3802  1.1  christos     event_base_foreach_event_cb fn, void *arg)
   3803  1.1  christos {
   3804  1.1  christos 	int r;
   3805  1.1  christos 	if ((!fn) || (!base)) {
   3806  1.1  christos 		return -1;
   3807  1.1  christos 	}
   3808  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3809  1.1  christos 	r = event_base_foreach_event_nolock_(base, fn, arg);
   3810  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3811  1.1  christos 	return r;
   3812  1.1  christos }
   3813  1.1  christos 
   3814  1.1  christos 
   3815  1.1  christos void
   3816  1.1  christos event_base_dump_events(struct event_base *base, FILE *output)
   3817  1.1  christos {
   3818  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3819  1.1  christos 	fprintf(output, "Inserted events:\n");
   3820  1.1  christos 	event_base_foreach_event_nolock_(base, dump_inserted_event_fn, output);
   3821  1.1  christos 
   3822  1.1  christos 	fprintf(output, "Active events:\n");
   3823  1.1  christos 	event_base_foreach_event_nolock_(base, dump_active_event_fn, output);
   3824  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3825  1.1  christos }
   3826  1.1  christos 
   3827  1.1  christos void
   3828  1.2  christos event_base_active_by_fd(struct event_base *base, evutil_socket_t fd, short events)
   3829  1.2  christos {
   3830  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3831  1.7  christos 
   3832  1.7  christos 	/* Activate any non timer events */
   3833  1.7  christos 	if (!(events & EV_TIMEOUT)) {
   3834  1.7  christos 		evmap_io_active_(base, fd, events & (EV_READ|EV_WRITE|EV_CLOSED));
   3835  1.7  christos 	} else {
   3836  1.7  christos 		/* If we want to activate timer events, loop and activate each event with
   3837  1.7  christos 		 * the same fd in both the timeheap and common timeouts list */
   3838  1.7  christos 		int i;
   3839  1.7  christos 		unsigned u;
   3840  1.7  christos 		struct event *ev;
   3841  1.7  christos 
   3842  1.7  christos 		for (u = 0; u < base->timeheap.n; ++u) {
   3843  1.7  christos 			ev = base->timeheap.p[u];
   3844  1.7  christos 			if (ev->ev_fd == fd) {
   3845  1.7  christos 				event_active_nolock_(ev, EV_TIMEOUT, 1);
   3846  1.7  christos 			}
   3847  1.7  christos 		}
   3848  1.7  christos 
   3849  1.7  christos 		for (i = 0; i < base->n_common_timeouts; ++i) {
   3850  1.7  christos 			struct common_timeout_list *ctl = base->common_timeout_queues[i];
   3851  1.7  christos 			TAILQ_FOREACH(ev, &ctl->events,
   3852  1.7  christos 				ev_timeout_pos.ev_next_with_common_timeout) {
   3853  1.7  christos 				if (ev->ev_fd == fd) {
   3854  1.7  christos 					event_active_nolock_(ev, EV_TIMEOUT, 1);
   3855  1.7  christos 				}
   3856  1.7  christos 			}
   3857  1.7  christos 		}
   3858  1.7  christos 	}
   3859  1.7  christos 
   3860  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3861  1.2  christos }
   3862  1.2  christos 
   3863  1.2  christos void
   3864  1.2  christos event_base_active_by_signal(struct event_base *base, int sig)
   3865  1.2  christos {
   3866  1.2  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3867  1.2  christos 	evmap_signal_active_(base, sig, 1);
   3868  1.2  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3869  1.2  christos }
   3870  1.2  christos 
   3871  1.2  christos 
   3872  1.2  christos void
   3873  1.1  christos event_base_add_virtual_(struct event_base *base)
   3874  1.1  christos {
   3875  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3876  1.1  christos 	base->virtual_event_count++;
   3877  1.2  christos 	MAX_EVENT_COUNT(base->virtual_event_count_max, base->virtual_event_count);
   3878  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3879  1.1  christos }
   3880  1.1  christos 
   3881  1.1  christos void
   3882  1.1  christos event_base_del_virtual_(struct event_base *base)
   3883  1.1  christos {
   3884  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3885  1.1  christos 	EVUTIL_ASSERT(base->virtual_event_count > 0);
   3886  1.1  christos 	base->virtual_event_count--;
   3887  1.1  christos 	if (base->virtual_event_count == 0 && EVBASE_NEED_NOTIFY(base))
   3888  1.1  christos 		evthread_notify_base(base);
   3889  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3890  1.1  christos }
   3891  1.1  christos 
   3892  1.1  christos static void
   3893  1.1  christos event_free_debug_globals_locks(void)
   3894  1.1  christos {
   3895  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   3896  1.1  christos #ifndef EVENT__DISABLE_DEBUG_MODE
   3897  1.1  christos 	if (event_debug_map_lock_ != NULL) {
   3898  1.1  christos 		EVTHREAD_FREE_LOCK(event_debug_map_lock_, 0);
   3899  1.1  christos 		event_debug_map_lock_ = NULL;
   3900  1.3  christos 		evthreadimpl_disable_lock_debugging_();
   3901  1.1  christos 	}
   3902  1.1  christos #endif /* EVENT__DISABLE_DEBUG_MODE */
   3903  1.1  christos #endif /* EVENT__DISABLE_THREAD_SUPPORT */
   3904  1.1  christos 	return;
   3905  1.1  christos }
   3906  1.1  christos 
   3907  1.1  christos static void
   3908  1.1  christos event_free_debug_globals(void)
   3909  1.1  christos {
   3910  1.1  christos 	event_free_debug_globals_locks();
   3911  1.1  christos }
   3912  1.1  christos 
   3913  1.1  christos static void
   3914  1.1  christos event_free_evsig_globals(void)
   3915  1.1  christos {
   3916  1.1  christos 	evsig_free_globals_();
   3917  1.1  christos }
   3918  1.1  christos 
   3919  1.1  christos static void
   3920  1.1  christos event_free_evutil_globals(void)
   3921  1.1  christos {
   3922  1.1  christos 	evutil_free_globals_();
   3923  1.1  christos }
   3924  1.1  christos 
   3925  1.1  christos static void
   3926  1.1  christos event_free_globals(void)
   3927  1.1  christos {
   3928  1.1  christos 	event_free_debug_globals();
   3929  1.1  christos 	event_free_evsig_globals();
   3930  1.1  christos 	event_free_evutil_globals();
   3931  1.1  christos }
   3932  1.1  christos 
   3933  1.1  christos void
   3934  1.1  christos libevent_global_shutdown(void)
   3935  1.1  christos {
   3936  1.3  christos 	event_disable_debug_mode();
   3937  1.1  christos 	event_free_globals();
   3938  1.1  christos }
   3939  1.1  christos 
   3940  1.1  christos #ifndef EVENT__DISABLE_THREAD_SUPPORT
   3941  1.1  christos int
   3942  1.1  christos event_global_setup_locks_(const int enable_locks)
   3943  1.1  christos {
   3944  1.1  christos #ifndef EVENT__DISABLE_DEBUG_MODE
   3945  1.1  christos 	EVTHREAD_SETUP_GLOBAL_LOCK(event_debug_map_lock_, 0);
   3946  1.1  christos #endif
   3947  1.1  christos 	if (evsig_global_setup_locks_(enable_locks) < 0)
   3948  1.1  christos 		return -1;
   3949  1.1  christos 	if (evutil_global_setup_locks_(enable_locks) < 0)
   3950  1.1  christos 		return -1;
   3951  1.1  christos 	if (evutil_secure_rng_global_setup_locks_(enable_locks) < 0)
   3952  1.1  christos 		return -1;
   3953  1.1  christos 	return 0;
   3954  1.1  christos }
   3955  1.1  christos #endif
   3956  1.1  christos 
   3957  1.1  christos void
   3958  1.1  christos event_base_assert_ok_(struct event_base *base)
   3959  1.1  christos {
   3960  1.1  christos 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
   3961  1.1  christos 	event_base_assert_ok_nolock_(base);
   3962  1.1  christos 	EVBASE_RELEASE_LOCK(base, th_base_lock);
   3963  1.1  christos }
   3964  1.1  christos 
   3965  1.1  christos void
   3966  1.1  christos event_base_assert_ok_nolock_(struct event_base *base)
   3967  1.1  christos {
   3968  1.1  christos 	int i;
   3969  1.1  christos 	int count;
   3970  1.1  christos 
   3971  1.1  christos 	/* First do checks on the per-fd and per-signal lists */
   3972  1.1  christos 	evmap_check_integrity_(base);
   3973  1.1  christos 
   3974  1.1  christos 	/* Check the heap property */
   3975  1.1  christos 	for (i = 1; i < (int)base->timeheap.n; ++i) {
   3976  1.1  christos 		int parent = (i - 1) / 2;
   3977  1.1  christos 		struct event *ev, *p_ev;
   3978  1.1  christos 		ev = base->timeheap.p[i];
   3979  1.1  christos 		p_ev = base->timeheap.p[parent];
   3980  1.1  christos 		EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
   3981  1.1  christos 		EVUTIL_ASSERT(evutil_timercmp(&p_ev->ev_timeout, &ev->ev_timeout, <=));
   3982  1.1  christos 		EVUTIL_ASSERT(ev->ev_timeout_pos.min_heap_idx == i);
   3983  1.1  christos 	}
   3984  1.1  christos 
   3985  1.1  christos 	/* Check that the common timeouts are fine */
   3986  1.1  christos 	for (i = 0; i < base->n_common_timeouts; ++i) {
   3987  1.1  christos 		struct common_timeout_list *ctl = base->common_timeout_queues[i];
   3988  1.1  christos 		struct event *last=NULL, *ev;
   3989  1.1  christos 
   3990  1.1  christos 		EVUTIL_ASSERT_TAILQ_OK(&ctl->events, event, ev_timeout_pos.ev_next_with_common_timeout);
   3991  1.1  christos 
   3992  1.1  christos 		TAILQ_FOREACH(ev, &ctl->events, ev_timeout_pos.ev_next_with_common_timeout) {
   3993  1.1  christos 			if (last)
   3994  1.1  christos 				EVUTIL_ASSERT(evutil_timercmp(&last->ev_timeout, &ev->ev_timeout, <=));
   3995  1.1  christos 			EVUTIL_ASSERT(ev->ev_flags & EVLIST_TIMEOUT);
   3996  1.1  christos 			EVUTIL_ASSERT(is_common_timeout(&ev->ev_timeout,base));
   3997  1.1  christos 			EVUTIL_ASSERT(COMMON_TIMEOUT_IDX(&ev->ev_timeout) == i);
   3998  1.1  christos 			last = ev;
   3999  1.1  christos 		}
   4000  1.1  christos 	}
   4001  1.1  christos 
   4002  1.1  christos 	/* Check the active queues. */
   4003  1.1  christos 	count = 0;
   4004  1.1  christos 	for (i = 0; i < base->nactivequeues; ++i) {
   4005  1.1  christos 		struct event_callback *evcb;
   4006  1.1  christos 		EVUTIL_ASSERT_TAILQ_OK(&base->activequeues[i], event_callback, evcb_active_next);
   4007  1.1  christos 		TAILQ_FOREACH(evcb, &base->activequeues[i], evcb_active_next) {
   4008  1.1  christos 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE);
   4009  1.1  christos 			EVUTIL_ASSERT(evcb->evcb_pri == i);
   4010  1.1  christos 			++count;
   4011  1.1  christos 		}
   4012  1.1  christos 	}
   4013  1.1  christos 
   4014  1.1  christos 	{
   4015  1.1  christos 		struct event_callback *evcb;
   4016  1.1  christos 		TAILQ_FOREACH(evcb, &base->active_later_queue, evcb_active_next) {
   4017  1.1  christos 			EVUTIL_ASSERT((evcb->evcb_flags & (EVLIST_ACTIVE|EVLIST_ACTIVE_LATER)) == EVLIST_ACTIVE_LATER);
   4018  1.1  christos 			++count;
   4019  1.1  christos 		}
   4020  1.1  christos 	}
   4021  1.1  christos 	EVUTIL_ASSERT(count == base->event_count_active);
   4022  1.1  christos }
   4023