Home | History | Annotate | Line # | Download | only in dist
kqueue.c revision 1.2
      1  1.2   kamil /*	$NetBSD: kqueue.c,v 1.2 2019/10/03 22:16:52 kamil Exp $	*/
      2  1.1  plunky /*	$OpenBSD: kqueue.c,v 1.5 2002/07/10 14:41:31 art Exp $	*/
      3  1.1  plunky 
      4  1.1  plunky /*
      5  1.2   kamil  * Copyright 2000-2007 Niels Provos <provos (at) citi.umich.edu>
      6  1.2   kamil  * Copyright 2007-2012 Niels Provos and Nick Mathewson
      7  1.1  plunky  *
      8  1.1  plunky  * Redistribution and use in source and binary forms, with or without
      9  1.1  plunky  * modification, are permitted provided that the following conditions
     10  1.1  plunky  * are met:
     11  1.1  plunky  * 1. Redistributions of source code must retain the above copyright
     12  1.1  plunky  *    notice, this list of conditions and the following disclaimer.
     13  1.1  plunky  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1  plunky  *    notice, this list of conditions and the following disclaimer in the
     15  1.1  plunky  *    documentation and/or other materials provided with the distribution.
     16  1.1  plunky  * 3. The name of the author may not be used to endorse or promote products
     17  1.1  plunky  *    derived from this software without specific prior written permission.
     18  1.1  plunky  *
     19  1.1  plunky  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     20  1.1  plunky  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     21  1.1  plunky  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     22  1.1  plunky  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     23  1.1  plunky  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     24  1.1  plunky  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  1.1  plunky  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  1.1  plunky  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  1.1  plunky  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     28  1.1  plunky  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  1.1  plunky  */
     30  1.2   kamil #include "event2/event-config.h"
     31  1.2   kamil #include <sys/cdefs.h>
     32  1.2   kamil __RCSID("$NetBSD: kqueue.c,v 1.2 2019/10/03 22:16:52 kamil Exp $");
     33  1.2   kamil #include "evconfig-private.h"
     34  1.2   kamil 
     35  1.2   kamil #ifdef EVENT__HAVE_KQUEUE
     36  1.1  plunky 
     37  1.1  plunky #include <sys/types.h>
     38  1.2   kamil #ifdef EVENT__HAVE_SYS_TIME_H
     39  1.1  plunky #include <sys/time.h>
     40  1.1  plunky #endif
     41  1.1  plunky #include <sys/queue.h>
     42  1.1  plunky #include <sys/event.h>
     43  1.1  plunky #include <signal.h>
     44  1.1  plunky #include <stdio.h>
     45  1.1  plunky #include <stdlib.h>
     46  1.1  plunky #include <string.h>
     47  1.1  plunky #include <unistd.h>
     48  1.1  plunky #include <errno.h>
     49  1.2   kamil #ifdef EVENT__HAVE_INTTYPES_H
     50  1.1  plunky #include <inttypes.h>
     51  1.1  plunky #endif
     52  1.1  plunky 
     53  1.1  plunky /* Some platforms apparently define the udata field of struct kevent as
     54  1.1  plunky  * intptr_t, whereas others define it as void*.  There doesn't seem to be an
     55  1.1  plunky  * easy way to tell them apart via autoconf, so we need to use OS macros. */
     56  1.2   kamil #if defined(__NetBSD__)
     57  1.2   kamil #define PTR_TO_UDATA(x) ((typeof(((struct kevent *)0)->udata))(x))
     58  1.2   kamil #define INT_TO_UDATA(x) ((typeof(((struct kevent *)0)->udata))(intptr_t)(x))
     59  1.2   kamil #elif defined(EVENT__HAVE_INTTYPES_H) && !defined(__OpenBSD__) && !defined(__FreeBSD__) && !defined(__darwin__) && !defined(__APPLE__) && !defined(__CloudABI__)
     60  1.1  plunky #define PTR_TO_UDATA(x)	((intptr_t)(x))
     61  1.2   kamil #define INT_TO_UDATA(x) ((intptr_t)(x))
     62  1.1  plunky #else
     63  1.1  plunky #define PTR_TO_UDATA(x)	(x)
     64  1.2   kamil #define INT_TO_UDATA(x) ((void*)(x))
     65  1.1  plunky #endif
     66  1.1  plunky 
     67  1.1  plunky #include "event-internal.h"
     68  1.2   kamil #include "log-internal.h"
     69  1.2   kamil #include "evmap-internal.h"
     70  1.2   kamil #include "event2/thread.h"
     71  1.2   kamil #include "evthread-internal.h"
     72  1.2   kamil #include "changelist-internal.h"
     73  1.1  plunky 
     74  1.2   kamil #include "kqueue-internal.h"
     75  1.1  plunky 
     76  1.1  plunky #define NEVENT		64
     77  1.1  plunky 
     78  1.1  plunky struct kqop {
     79  1.1  plunky 	struct kevent *changes;
     80  1.2   kamil 	int changes_size;
     81  1.2   kamil 
     82  1.1  plunky 	struct kevent *events;
     83  1.2   kamil 	int events_size;
     84  1.1  plunky 	int kq;
     85  1.2   kamil 	int notify_event_added;
     86  1.1  plunky 	pid_t pid;
     87  1.1  plunky };
     88  1.1  plunky 
     89  1.2   kamil static void kqop_free(struct kqop *kqop);
     90  1.2   kamil 
     91  1.2   kamil static void *kq_init(struct event_base *);
     92  1.2   kamil static int kq_sig_add(struct event_base *, int, short, short, void *);
     93  1.2   kamil static int kq_sig_del(struct event_base *, int, short, short, void *);
     94  1.2   kamil static int kq_dispatch(struct event_base *, struct timeval *);
     95  1.2   kamil static void kq_dealloc(struct event_base *);
     96  1.1  plunky 
     97  1.1  plunky const struct eventop kqops = {
     98  1.1  plunky 	"kqueue",
     99  1.1  plunky 	kq_init,
    100  1.2   kamil 	event_changelist_add_,
    101  1.2   kamil 	event_changelist_del_,
    102  1.1  plunky 	kq_dispatch,
    103  1.1  plunky 	kq_dealloc,
    104  1.2   kamil 	1 /* need reinit */,
    105  1.2   kamil     EV_FEATURE_ET|EV_FEATURE_O1|EV_FEATURE_FDS,
    106  1.2   kamil 	EVENT_CHANGELIST_FDINFO_SIZE
    107  1.2   kamil };
    108  1.2   kamil 
    109  1.2   kamil static const struct eventop kqsigops = {
    110  1.2   kamil 	"kqueue_signal",
    111  1.2   kamil 	NULL,
    112  1.2   kamil 	kq_sig_add,
    113  1.2   kamil 	kq_sig_del,
    114  1.2   kamil 	NULL,
    115  1.2   kamil 	NULL,
    116  1.2   kamil 	1 /* need reinit */,
    117  1.2   kamil 	0,
    118  1.2   kamil 	0
    119  1.1  plunky };
    120  1.1  plunky 
    121  1.1  plunky static void *
    122  1.1  plunky kq_init(struct event_base *base)
    123  1.1  plunky {
    124  1.2   kamil 	int kq = -1;
    125  1.2   kamil 	struct kqop *kqueueop = NULL;
    126  1.1  plunky 
    127  1.2   kamil 	if (!(kqueueop = mm_calloc(1, sizeof(struct kqop))))
    128  1.1  plunky 		return (NULL);
    129  1.1  plunky 
    130  1.2   kamil /* Initialize the kernel queue */
    131  1.1  plunky 
    132  1.1  plunky 	if ((kq = kqueue()) == -1) {
    133  1.1  plunky 		event_warn("kqueue");
    134  1.2   kamil 		goto err;
    135  1.1  plunky 	}
    136  1.1  plunky 
    137  1.1  plunky 	kqueueop->kq = kq;
    138  1.1  plunky 
    139  1.1  plunky 	kqueueop->pid = getpid();
    140  1.1  plunky 
    141  1.2   kamil 	/* Initialize fields */
    142  1.2   kamil 	kqueueop->changes = mm_calloc(NEVENT, sizeof(struct kevent));
    143  1.2   kamil 	if (kqueueop->changes == NULL)
    144  1.2   kamil 		goto err;
    145  1.2   kamil 	kqueueop->events = mm_calloc(NEVENT, sizeof(struct kevent));
    146  1.2   kamil 	if (kqueueop->events == NULL)
    147  1.2   kamil 		goto err;
    148  1.2   kamil 	kqueueop->events_size = kqueueop->changes_size = NEVENT;
    149  1.1  plunky 
    150  1.1  plunky 	/* Check for Mac OS X kqueue bug. */
    151  1.2   kamil 	memset(&kqueueop->changes[0], 0, sizeof kqueueop->changes[0]);
    152  1.1  plunky 	kqueueop->changes[0].ident = -1;
    153  1.1  plunky 	kqueueop->changes[0].filter = EVFILT_READ;
    154  1.1  plunky 	kqueueop->changes[0].flags = EV_ADD;
    155  1.2   kamil 	/*
    156  1.1  plunky 	 * If kqueue works, then kevent will succeed, and it will
    157  1.1  plunky 	 * stick an error in events[0].  If kqueue is broken, then
    158  1.1  plunky 	 * kevent will fail.
    159  1.1  plunky 	 */
    160  1.1  plunky 	if (kevent(kq,
    161  1.1  plunky 		kqueueop->changes, 1, kqueueop->events, NEVENT, NULL) != 1 ||
    162  1.2   kamil 	    (int)kqueueop->events[0].ident != -1 ||
    163  1.2   kamil 	    !(kqueueop->events[0].flags & EV_ERROR)) {
    164  1.1  plunky 		event_warn("%s: detected broken kqueue; not using.", __func__);
    165  1.2   kamil 		goto err;
    166  1.1  plunky 	}
    167  1.1  plunky 
    168  1.2   kamil 	base->evsigsel = &kqsigops;
    169  1.2   kamil 
    170  1.1  plunky 	return (kqueueop);
    171  1.2   kamil err:
    172  1.2   kamil 	if (kqueueop)
    173  1.2   kamil 		kqop_free(kqueueop);
    174  1.2   kamil 
    175  1.2   kamil 	return (NULL);
    176  1.2   kamil }
    177  1.2   kamil 
    178  1.2   kamil #define ADD_UDATA 0x30303
    179  1.2   kamil 
    180  1.2   kamil static void
    181  1.2   kamil kq_setup_kevent(struct kevent *out, evutil_socket_t fd, int filter, short change)
    182  1.2   kamil {
    183  1.2   kamil 	memset(out, 0, sizeof(struct kevent));
    184  1.2   kamil 	out->ident = fd;
    185  1.2   kamil 	out->filter = filter;
    186  1.2   kamil 
    187  1.2   kamil 	if (change & EV_CHANGE_ADD) {
    188  1.2   kamil 		out->flags = EV_ADD;
    189  1.2   kamil 		/* We set a magic number here so that we can tell 'add'
    190  1.2   kamil 		 * errors from 'del' errors. */
    191  1.2   kamil 		out->udata = INT_TO_UDATA(ADD_UDATA);
    192  1.2   kamil 		if (change & EV_ET)
    193  1.2   kamil 			out->flags |= EV_CLEAR;
    194  1.2   kamil #ifdef NOTE_EOF
    195  1.2   kamil 		/* Make it behave like select() and poll() */
    196  1.2   kamil 		if (filter == EVFILT_READ)
    197  1.2   kamil 			out->fflags = NOTE_EOF;
    198  1.2   kamil #endif
    199  1.2   kamil 	} else {
    200  1.2   kamil 		EVUTIL_ASSERT(change & EV_CHANGE_DEL);
    201  1.2   kamil 		out->flags = EV_DELETE;
    202  1.2   kamil 	}
    203  1.1  plunky }
    204  1.1  plunky 
    205  1.1  plunky static int
    206  1.2   kamil kq_build_changes_list(const struct event_changelist *changelist,
    207  1.2   kamil     struct kqop *kqop)
    208  1.1  plunky {
    209  1.2   kamil 	int i;
    210  1.2   kamil 	int n_changes = 0;
    211  1.1  plunky 
    212  1.2   kamil 	for (i = 0; i < changelist->n_changes; ++i) {
    213  1.2   kamil 		struct event_change *in_ch = &changelist->changes[i];
    214  1.2   kamil 		struct kevent *out_ch;
    215  1.2   kamil 		if (n_changes >= kqop->changes_size - 1) {
    216  1.2   kamil 			int newsize = kqop->changes_size * 2;
    217  1.2   kamil 			struct kevent *newchanges;
    218  1.2   kamil 
    219  1.2   kamil 			newchanges = mm_realloc(kqop->changes,
    220  1.2   kamil 			    newsize * sizeof(struct kevent));
    221  1.2   kamil 			if (newchanges == NULL) {
    222  1.2   kamil 				event_warn("%s: realloc", __func__);
    223  1.2   kamil 				return (-1);
    224  1.2   kamil 			}
    225  1.2   kamil 			kqop->changes = newchanges;
    226  1.2   kamil 			kqop->changes_size = newsize;
    227  1.2   kamil 		}
    228  1.2   kamil 		if (in_ch->read_change) {
    229  1.2   kamil 			out_ch = &kqop->changes[n_changes++];
    230  1.2   kamil 			kq_setup_kevent(out_ch, in_ch->fd, EVFILT_READ,
    231  1.2   kamil 			    in_ch->read_change);
    232  1.1  plunky 		}
    233  1.2   kamil 		if (in_ch->write_change) {
    234  1.2   kamil 			out_ch = &kqop->changes[n_changes++];
    235  1.2   kamil 			kq_setup_kevent(out_ch, in_ch->fd, EVFILT_WRITE,
    236  1.2   kamil 			    in_ch->write_change);
    237  1.1  plunky 		}
    238  1.1  plunky 	}
    239  1.2   kamil 	return n_changes;
    240  1.2   kamil }
    241  1.1  plunky 
    242  1.2   kamil static int
    243  1.2   kamil kq_grow_events(struct kqop *kqop, size_t new_size)
    244  1.2   kamil {
    245  1.2   kamil 	struct kevent *newresult;
    246  1.1  plunky 
    247  1.2   kamil 	newresult = mm_realloc(kqop->events,
    248  1.2   kamil 	    new_size * sizeof(struct kevent));
    249  1.1  plunky 
    250  1.2   kamil 	if (newresult) {
    251  1.2   kamil 		kqop->events = newresult;
    252  1.2   kamil 		kqop->events_size = new_size;
    253  1.2   kamil 		return 0;
    254  1.2   kamil 	} else {
    255  1.2   kamil 		return -1;
    256  1.2   kamil 	}
    257  1.1  plunky }
    258  1.1  plunky 
    259  1.1  plunky static int
    260  1.2   kamil kq_dispatch(struct event_base *base, struct timeval *tv)
    261  1.1  plunky {
    262  1.2   kamil 	struct kqop *kqop = base->evbase;
    263  1.1  plunky 	struct kevent *events = kqop->events;
    264  1.2   kamil 	struct kevent *changes;
    265  1.1  plunky 	struct timespec ts, *ts_p = NULL;
    266  1.2   kamil 	int i, n_changes, res;
    267  1.1  plunky 
    268  1.1  plunky 	if (tv != NULL) {
    269  1.2   kamil 		ts.tv_sec = tv->tv_sec;
    270  1.2   kamil 		ts.tv_nsec = tv->tv_usec * 1000;
    271  1.1  plunky 		ts_p = &ts;
    272  1.1  plunky 	}
    273  1.1  plunky 
    274  1.2   kamil 	/* Build "changes" from "base->changes" */
    275  1.2   kamil 	EVUTIL_ASSERT(kqop->changes);
    276  1.2   kamil 	n_changes = kq_build_changes_list(&base->changelist, kqop);
    277  1.2   kamil 	if (n_changes < 0)
    278  1.2   kamil 		return -1;
    279  1.2   kamil 
    280  1.2   kamil 	event_changelist_remove_all_(&base->changelist, base);
    281  1.2   kamil 
    282  1.2   kamil 	/* steal the changes array in case some broken code tries to call
    283  1.2   kamil 	 * dispatch twice at once. */
    284  1.2   kamil 	changes = kqop->changes;
    285  1.2   kamil 	kqop->changes = NULL;
    286  1.2   kamil 
    287  1.2   kamil 	/* Make sure that 'events' is at least as long as the list of changes:
    288  1.2   kamil 	 * otherwise errors in the changes can get reported as a -1 return
    289  1.2   kamil 	 * value from kevent() rather than as EV_ERROR events in the events
    290  1.2   kamil 	 * array.
    291  1.2   kamil 	 *
    292  1.2   kamil 	 * (We could instead handle -1 return values from kevent() by
    293  1.2   kamil 	 * retrying with a smaller changes array or a larger events array,
    294  1.2   kamil 	 * but this approach seems less risky for now.)
    295  1.2   kamil 	 */
    296  1.2   kamil 	if (kqop->events_size < n_changes) {
    297  1.2   kamil 		int new_size = kqop->events_size;
    298  1.2   kamil 		do {
    299  1.2   kamil 			new_size *= 2;
    300  1.2   kamil 		} while (new_size < n_changes);
    301  1.2   kamil 
    302  1.2   kamil 		kq_grow_events(kqop, new_size);
    303  1.2   kamil 		events = kqop->events;
    304  1.2   kamil 	}
    305  1.2   kamil 
    306  1.2   kamil 	EVBASE_RELEASE_LOCK(base, th_base_lock);
    307  1.2   kamil 
    308  1.2   kamil 	res = kevent(kqop->kq, changes, n_changes,
    309  1.2   kamil 	    events, kqop->events_size, ts_p);
    310  1.2   kamil 
    311  1.2   kamil 	EVBASE_ACQUIRE_LOCK(base, th_base_lock);
    312  1.2   kamil 
    313  1.2   kamil 	EVUTIL_ASSERT(kqop->changes == NULL);
    314  1.2   kamil 	kqop->changes = changes;
    315  1.2   kamil 
    316  1.1  plunky 	if (res == -1) {
    317  1.1  plunky 		if (errno != EINTR) {
    318  1.2   kamil 			event_warn("kevent");
    319  1.1  plunky 			return (-1);
    320  1.1  plunky 		}
    321  1.1  plunky 
    322  1.1  plunky 		return (0);
    323  1.1  plunky 	}
    324  1.1  plunky 
    325  1.1  plunky 	event_debug(("%s: kevent reports %d", __func__, res));
    326  1.1  plunky 
    327  1.1  plunky 	for (i = 0; i < res; i++) {
    328  1.1  plunky 		int which = 0;
    329  1.1  plunky 
    330  1.1  plunky 		if (events[i].flags & EV_ERROR) {
    331  1.2   kamil 			switch (events[i].data) {
    332  1.2   kamil 
    333  1.2   kamil 			/* Can occur on delete if we are not currently
    334  1.2   kamil 			 * watching any events on this fd.  That can
    335  1.2   kamil 			 * happen when the fd was closed and another
    336  1.2   kamil 			 * file was opened with that fd. */
    337  1.2   kamil 			case ENOENT:
    338  1.2   kamil 			/* Can occur for reasons not fully understood
    339  1.2   kamil 			 * on FreeBSD. */
    340  1.2   kamil 			case EINVAL:
    341  1.2   kamil 				continue;
    342  1.2   kamil #if defined(__FreeBSD__)
    343  1.2   kamil 			/*
    344  1.2   kamil 			 * This currently occurs if an FD is closed
    345  1.2   kamil 			 * before the EV_DELETE makes it out via kevent().
    346  1.2   kamil 			 * The FreeBSD capabilities code sees the blank
    347  1.2   kamil 			 * capability set and rejects the request to
    348  1.2   kamil 			 * modify an event.
    349  1.2   kamil 			 *
    350  1.2   kamil 			 * To be strictly correct - when an FD is closed,
    351  1.2   kamil 			 * all the registered events are also removed.
    352  1.2   kamil 			 * Queuing EV_DELETE to a closed FD is wrong.
    353  1.2   kamil 			 * The event(s) should just be deleted from
    354  1.2   kamil 			 * the pending changelist.
    355  1.1  plunky 			 */
    356  1.2   kamil 			case ENOTCAPABLE:
    357  1.1  plunky 				continue;
    358  1.2   kamil #endif
    359  1.1  plunky 
    360  1.2   kamil 			/* Can occur on a delete if the fd is closed. */
    361  1.2   kamil 			case EBADF:
    362  1.2   kamil 				/* XXXX On NetBSD, we can also get EBADF if we
    363  1.2   kamil 				 * try to add the write side of a pipe, but
    364  1.2   kamil 				 * the read side has already been closed.
    365  1.2   kamil 				 * Other BSDs call this situation 'EPIPE'. It
    366  1.2   kamil 				 * would be good if we had a way to report
    367  1.2   kamil 				 * this situation. */
    368  1.2   kamil 				continue;
    369  1.2   kamil 			/* These two can occur on an add if the fd was one side
    370  1.2   kamil 			 * of a pipe, and the other side was closed. */
    371  1.2   kamil 			case EPERM:
    372  1.2   kamil 			case EPIPE:
    373  1.2   kamil 				/* Report read events, if we're listening for
    374  1.2   kamil 				 * them, so that the user can learn about any
    375  1.2   kamil 				 * add errors.  (If the operation was a
    376  1.2   kamil 				 * delete, then udata should be cleared.) */
    377  1.2   kamil 				if (events[i].udata) {
    378  1.2   kamil 					/* The operation was an add:
    379  1.2   kamil 					 * report the error as a read. */
    380  1.2   kamil 					which |= EV_READ;
    381  1.2   kamil 					break;
    382  1.2   kamil 				} else {
    383  1.2   kamil 					/* The operation was a del:
    384  1.2   kamil 					 * report nothing. */
    385  1.2   kamil 					continue;
    386  1.2   kamil 				}
    387  1.2   kamil 
    388  1.2   kamil 			/* Other errors shouldn't occur. */
    389  1.2   kamil 			default:
    390  1.2   kamil 				errno = events[i].data;
    391  1.2   kamil 				return (-1);
    392  1.2   kamil 			}
    393  1.2   kamil 		} else if (events[i].filter == EVFILT_READ) {
    394  1.1  plunky 			which |= EV_READ;
    395  1.1  plunky 		} else if (events[i].filter == EVFILT_WRITE) {
    396  1.1  plunky 			which |= EV_WRITE;
    397  1.1  plunky 		} else if (events[i].filter == EVFILT_SIGNAL) {
    398  1.1  plunky 			which |= EV_SIGNAL;
    399  1.2   kamil #ifdef EVFILT_USER
    400  1.2   kamil 		} else if (events[i].filter == EVFILT_USER) {
    401  1.2   kamil 			base->is_notify_pending = 0;
    402  1.2   kamil #endif
    403  1.1  plunky 		}
    404  1.1  plunky 
    405  1.1  plunky 		if (!which)
    406  1.1  plunky 			continue;
    407  1.1  plunky 
    408  1.1  plunky 		if (events[i].filter == EVFILT_SIGNAL) {
    409  1.2   kamil 			evmap_signal_active_(base, events[i].ident, 1);
    410  1.1  plunky 		} else {
    411  1.2   kamil 			evmap_io_active_(base, events[i].ident, which | EV_ET);
    412  1.2   kamil 		}
    413  1.2   kamil 	}
    414  1.1  plunky 
    415  1.2   kamil 	if (res == kqop->events_size) {
    416  1.2   kamil 		/* We used all the events space that we have. Maybe we should
    417  1.2   kamil 		   make it bigger. */
    418  1.2   kamil 		kq_grow_events(kqop, kqop->events_size * 2);
    419  1.1  plunky 	}
    420  1.1  plunky 
    421  1.1  plunky 	return (0);
    422  1.1  plunky }
    423  1.1  plunky 
    424  1.2   kamil static void
    425  1.2   kamil kqop_free(struct kqop *kqop)
    426  1.2   kamil {
    427  1.2   kamil 	if (kqop->changes)
    428  1.2   kamil 		mm_free(kqop->changes);
    429  1.2   kamil 	if (kqop->events)
    430  1.2   kamil 		mm_free(kqop->events);
    431  1.2   kamil 	if (kqop->kq >= 0 && kqop->pid == getpid())
    432  1.2   kamil 		close(kqop->kq);
    433  1.2   kamil 	memset(kqop, 0, sizeof(struct kqop));
    434  1.2   kamil 	mm_free(kqop);
    435  1.2   kamil }
    436  1.2   kamil 
    437  1.2   kamil static void
    438  1.2   kamil kq_dealloc(struct event_base *base)
    439  1.2   kamil {
    440  1.2   kamil 	struct kqop *kqop = base->evbase;
    441  1.2   kamil 	evsig_dealloc_(base);
    442  1.2   kamil 	kqop_free(kqop);
    443  1.2   kamil }
    444  1.1  plunky 
    445  1.2   kamil /* signal handling */
    446  1.1  plunky static int
    447  1.2   kamil kq_sig_add(struct event_base *base, int nsignal, short old, short events, void *p)
    448  1.1  plunky {
    449  1.2   kamil 	struct kqop *kqop = base->evbase;
    450  1.1  plunky 	struct kevent kev;
    451  1.2   kamil 	struct timespec timeout = { 0, 0 };
    452  1.2   kamil 	(void)p;
    453  1.2   kamil 
    454  1.2   kamil 	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
    455  1.1  plunky 
    456  1.2   kamil 	memset(&kev, 0, sizeof(kev));
    457  1.2   kamil 	kev.ident = nsignal;
    458  1.2   kamil 	kev.filter = EVFILT_SIGNAL;
    459  1.2   kamil 	kev.flags = EV_ADD;
    460  1.2   kamil 
    461  1.2   kamil 	/* Be ready for the signal if it is sent any
    462  1.2   kamil 	 * time between now and the next call to
    463  1.2   kamil 	 * kq_dispatch. */
    464  1.2   kamil 	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
    465  1.2   kamil 		return (-1);
    466  1.2   kamil 
    467  1.2   kamil         /* We can set the handler for most signals to SIG_IGN and
    468  1.2   kamil          * still have them reported to us in the queue.  However,
    469  1.2   kamil          * if the handler for SIGCHLD is SIG_IGN, the system reaps
    470  1.2   kamil          * zombie processes for us, and we don't get any notification.
    471  1.2   kamil          * This appears to be the only signal with this quirk. */
    472  1.2   kamil 	if (evsig_set_handler_(base, nsignal,
    473  1.2   kamil                                nsignal == SIGCHLD ? SIG_DFL : SIG_IGN) == -1)
    474  1.2   kamil 		return (-1);
    475  1.1  plunky 
    476  1.2   kamil 	return (0);
    477  1.2   kamil }
    478  1.1  plunky 
    479  1.2   kamil static int
    480  1.2   kamil kq_sig_del(struct event_base *base, int nsignal, short old, short events, void *p)
    481  1.2   kamil {
    482  1.2   kamil 	struct kqop *kqop = base->evbase;
    483  1.2   kamil 	struct kevent kev;
    484  1.1  plunky 
    485  1.2   kamil 	struct timespec timeout = { 0, 0 };
    486  1.2   kamil 	(void)p;
    487  1.1  plunky 
    488  1.2   kamil 	EVUTIL_ASSERT(nsignal >= 0 && nsignal < NSIG);
    489  1.1  plunky 
    490  1.2   kamil 	memset(&kev, 0, sizeof(kev));
    491  1.2   kamil 	kev.ident = nsignal;
    492  1.2   kamil 	kev.filter = EVFILT_SIGNAL;
    493  1.2   kamil 	kev.flags = EV_DELETE;
    494  1.2   kamil 
    495  1.2   kamil 	/* Because we insert signal events
    496  1.2   kamil 	 * immediately, we need to delete them
    497  1.2   kamil 	 * immediately, too */
    498  1.2   kamil 	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1)
    499  1.2   kamil 		return (-1);
    500  1.1  plunky 
    501  1.2   kamil 	if (evsig_restore_handler_(base, nsignal) == -1)
    502  1.2   kamil 		return (-1);
    503  1.1  plunky 
    504  1.1  plunky 	return (0);
    505  1.1  plunky }
    506  1.1  plunky 
    507  1.2   kamil 
    508  1.2   kamil /* OSX 10.6 and FreeBSD 8.1 add support for EVFILT_USER, which we can use
    509  1.2   kamil  * to wake up the event loop from another thread. */
    510  1.2   kamil 
    511  1.2   kamil /* Magic number we use for our filter ID. */
    512  1.2   kamil #define NOTIFY_IDENT 42
    513  1.2   kamil 
    514  1.2   kamil int
    515  1.2   kamil event_kq_add_notify_event_(struct event_base *base)
    516  1.1  plunky {
    517  1.2   kamil 	struct kqop *kqop = base->evbase;
    518  1.2   kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
    519  1.1  plunky 	struct kevent kev;
    520  1.2   kamil 	struct timespec timeout = { 0, 0 };
    521  1.2   kamil #endif
    522  1.1  plunky 
    523  1.2   kamil 	if (kqop->notify_event_added)
    524  1.2   kamil 		return 0;
    525  1.1  plunky 
    526  1.2   kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
    527  1.2   kamil 	memset(&kev, 0, sizeof(kev));
    528  1.2   kamil 	kev.ident = NOTIFY_IDENT;
    529  1.2   kamil 	kev.filter = EVFILT_USER;
    530  1.2   kamil 	kev.flags = EV_ADD | EV_CLEAR;
    531  1.1  plunky 
    532  1.2   kamil 	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) {
    533  1.2   kamil 		event_warn("kevent: adding EVFILT_USER event");
    534  1.2   kamil 		return -1;
    535  1.2   kamil 	}
    536  1.1  plunky 
    537  1.2   kamil 	kqop->notify_event_added = 1;
    538  1.1  plunky 
    539  1.2   kamil 	return 0;
    540  1.2   kamil #else
    541  1.2   kamil 	return -1;
    542  1.2   kamil #endif
    543  1.2   kamil }
    544  1.1  plunky 
    545  1.2   kamil int
    546  1.2   kamil event_kq_notify_base_(struct event_base *base)
    547  1.2   kamil {
    548  1.2   kamil 	struct kqop *kqop = base->evbase;
    549  1.2   kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
    550  1.2   kamil 	struct kevent kev;
    551  1.2   kamil 	struct timespec timeout = { 0, 0 };
    552  1.2   kamil #endif
    553  1.2   kamil 	if (! kqop->notify_event_added)
    554  1.2   kamil 		return -1;
    555  1.1  plunky 
    556  1.2   kamil #if defined(EVFILT_USER) && defined(NOTE_TRIGGER)
    557  1.2   kamil 	memset(&kev, 0, sizeof(kev));
    558  1.2   kamil 	kev.ident = NOTIFY_IDENT;
    559  1.2   kamil 	kev.filter = EVFILT_USER;
    560  1.2   kamil 	kev.fflags = NOTE_TRIGGER;
    561  1.1  plunky 
    562  1.2   kamil 	if (kevent(kqop->kq, &kev, 1, NULL, 0, &timeout) == -1) {
    563  1.2   kamil 		event_warn("kevent: triggering EVFILT_USER event");
    564  1.2   kamil 		return -1;
    565  1.1  plunky 	}
    566  1.1  plunky 
    567  1.2   kamil 	return 0;
    568  1.2   kamil #else
    569  1.2   kamil 	return -1;
    570  1.2   kamil #endif
    571  1.1  plunky }
    572  1.1  plunky 
    573  1.2   kamil #endif /* EVENT__HAVE_KQUEUE */
    574