Home | History | Annotate | Line # | Download | only in kern
kern_event.c revision 1.93
      1 /*	$NetBSD: kern_event.c,v 1.93 2017/07/03 00:53:33 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.93 2017/07/03 00:53:33 riastradh Exp $");
     62 
     63 #include <sys/param.h>
     64 #include <sys/systm.h>
     65 #include <sys/kernel.h>
     66 #include <sys/wait.h>
     67 #include <sys/proc.h>
     68 #include <sys/file.h>
     69 #include <sys/select.h>
     70 #include <sys/queue.h>
     71 #include <sys/event.h>
     72 #include <sys/eventvar.h>
     73 #include <sys/poll.h>
     74 #include <sys/kmem.h>
     75 #include <sys/stat.h>
     76 #include <sys/filedesc.h>
     77 #include <sys/syscallargs.h>
     78 #include <sys/kauth.h>
     79 #include <sys/conf.h>
     80 #include <sys/atomic.h>
     81 
     82 static int	kqueue_scan(file_t *, size_t, struct kevent *,
     83 			    const struct timespec *, register_t *,
     84 			    const struct kevent_ops *, struct kevent *,
     85 			    size_t);
     86 static int	kqueue_ioctl(file_t *, u_long, void *);
     87 static int	kqueue_fcntl(file_t *, u_int, void *);
     88 static int	kqueue_poll(file_t *, int);
     89 static int	kqueue_kqfilter(file_t *, struct knote *);
     90 static int	kqueue_stat(file_t *, struct stat *);
     91 static int	kqueue_close(file_t *);
     92 static int	kqueue_register(struct kqueue *, struct kevent *);
     93 static void	kqueue_doclose(struct kqueue *, struct klist *, int);
     94 
     95 static void	knote_detach(struct knote *, filedesc_t *fdp, bool);
     96 static void	knote_enqueue(struct knote *);
     97 static void	knote_activate(struct knote *);
     98 
     99 static void	filt_kqdetach(struct knote *);
    100 static int	filt_kqueue(struct knote *, long hint);
    101 static int	filt_procattach(struct knote *);
    102 static void	filt_procdetach(struct knote *);
    103 static int	filt_proc(struct knote *, long hint);
    104 static int	filt_fileattach(struct knote *);
    105 static void	filt_timerexpire(void *x);
    106 static int	filt_timerattach(struct knote *);
    107 static void	filt_timerdetach(struct knote *);
    108 static int	filt_timer(struct knote *, long hint);
    109 
    110 static const struct fileops kqueueops = {
    111 	.fo_read = (void *)enxio,
    112 	.fo_write = (void *)enxio,
    113 	.fo_ioctl = kqueue_ioctl,
    114 	.fo_fcntl = kqueue_fcntl,
    115 	.fo_poll = kqueue_poll,
    116 	.fo_stat = kqueue_stat,
    117 	.fo_close = kqueue_close,
    118 	.fo_kqfilter = kqueue_kqfilter,
    119 	.fo_restart = fnullop_restart,
    120 };
    121 
    122 static const struct filterops kqread_filtops =
    123 	{ 1, NULL, filt_kqdetach, filt_kqueue };
    124 static const struct filterops proc_filtops =
    125 	{ 0, filt_procattach, filt_procdetach, filt_proc };
    126 static const struct filterops file_filtops =
    127 	{ 1, filt_fileattach, NULL, NULL };
    128 static const struct filterops timer_filtops =
    129 	{ 0, filt_timerattach, filt_timerdetach, filt_timer };
    130 
    131 static u_int	kq_ncallouts = 0;
    132 static int	kq_calloutmax = (4 * 1024);
    133 
    134 #define	KN_HASHSIZE		64		/* XXX should be tunable */
    135 #define	KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
    136 
    137 extern const struct filterops sig_filtops;
    138 
    139 /*
    140  * Table for for all system-defined filters.
    141  * These should be listed in the numeric order of the EVFILT_* defines.
    142  * If filtops is NULL, the filter isn't implemented in NetBSD.
    143  * End of list is when name is NULL.
    144  *
    145  * Note that 'refcnt' is meaningless for built-in filters.
    146  */
    147 struct kfilter {
    148 	const char	*name;		/* name of filter */
    149 	uint32_t	filter;		/* id of filter */
    150 	unsigned	refcnt;		/* reference count */
    151 	const struct filterops *filtops;/* operations for filter */
    152 	size_t		namelen;	/* length of name string */
    153 };
    154 
    155 /* System defined filters */
    156 static struct kfilter sys_kfilters[] = {
    157 	{ "EVFILT_READ",	EVFILT_READ,	0, &file_filtops, 0 },
    158 	{ "EVFILT_WRITE",	EVFILT_WRITE,	0, &file_filtops, 0, },
    159 	{ "EVFILT_AIO",		EVFILT_AIO,	0, NULL, 0 },
    160 	{ "EVFILT_VNODE",	EVFILT_VNODE,	0, &file_filtops, 0 },
    161 	{ "EVFILT_PROC",	EVFILT_PROC,	0, &proc_filtops, 0 },
    162 	{ "EVFILT_SIGNAL",	EVFILT_SIGNAL,	0, &sig_filtops, 0 },
    163 	{ "EVFILT_TIMER",	EVFILT_TIMER,	0, &timer_filtops, 0 },
    164 	{ NULL,			0,		0, NULL, 0 },
    165 };
    166 
    167 /* User defined kfilters */
    168 static struct kfilter	*user_kfilters;		/* array */
    169 static int		user_kfilterc;		/* current offset */
    170 static int		user_kfiltermaxc;	/* max size so far */
    171 static size_t		user_kfiltersz;		/* size of allocated memory */
    172 
    173 /* Locks */
    174 static krwlock_t	kqueue_filter_lock;	/* lock on filter lists */
    175 static kmutex_t		kqueue_misc_lock;	/* miscellaneous */
    176 
    177 static kauth_listener_t	kqueue_listener;
    178 
    179 static int
    180 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
    181     void *arg0, void *arg1, void *arg2, void *arg3)
    182 {
    183 	struct proc *p;
    184 	int result;
    185 
    186 	result = KAUTH_RESULT_DEFER;
    187 	p = arg0;
    188 
    189 	if (action != KAUTH_PROCESS_KEVENT_FILTER)
    190 		return result;
    191 
    192 	if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) ||
    193 	    ISSET(p->p_flag, PK_SUGID)))
    194 		return result;
    195 
    196 	result = KAUTH_RESULT_ALLOW;
    197 
    198 	return result;
    199 }
    200 
    201 /*
    202  * Initialize the kqueue subsystem.
    203  */
    204 void
    205 kqueue_init(void)
    206 {
    207 
    208 	rw_init(&kqueue_filter_lock);
    209 	mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE);
    210 
    211 	kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
    212 	    kqueue_listener_cb, NULL);
    213 }
    214 
    215 /*
    216  * Find kfilter entry by name, or NULL if not found.
    217  */
    218 static struct kfilter *
    219 kfilter_byname_sys(const char *name)
    220 {
    221 	int i;
    222 
    223 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    224 
    225 	for (i = 0; sys_kfilters[i].name != NULL; i++) {
    226 		if (strcmp(name, sys_kfilters[i].name) == 0)
    227 			return &sys_kfilters[i];
    228 	}
    229 	return NULL;
    230 }
    231 
    232 static struct kfilter *
    233 kfilter_byname_user(const char *name)
    234 {
    235 	int i;
    236 
    237 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    238 
    239 	/* user filter slots have a NULL name if previously deregistered */
    240 	for (i = 0; i < user_kfilterc ; i++) {
    241 		if (user_kfilters[i].name != NULL &&
    242 		    strcmp(name, user_kfilters[i].name) == 0)
    243 			return &user_kfilters[i];
    244 	}
    245 	return NULL;
    246 }
    247 
    248 static struct kfilter *
    249 kfilter_byname(const char *name)
    250 {
    251 	struct kfilter *kfilter;
    252 
    253 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    254 
    255 	if ((kfilter = kfilter_byname_sys(name)) != NULL)
    256 		return kfilter;
    257 
    258 	return kfilter_byname_user(name);
    259 }
    260 
    261 /*
    262  * Find kfilter entry by filter id, or NULL if not found.
    263  * Assumes entries are indexed in filter id order, for speed.
    264  */
    265 static struct kfilter *
    266 kfilter_byfilter(uint32_t filter)
    267 {
    268 	struct kfilter *kfilter;
    269 
    270 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    271 
    272 	if (filter < EVFILT_SYSCOUNT)	/* it's a system filter */
    273 		kfilter = &sys_kfilters[filter];
    274 	else if (user_kfilters != NULL &&
    275 	    filter < EVFILT_SYSCOUNT + user_kfilterc)
    276 					/* it's a user filter */
    277 		kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
    278 	else
    279 		return (NULL);		/* out of range */
    280 	KASSERT(kfilter->filter == filter);	/* sanity check! */
    281 	return (kfilter);
    282 }
    283 
    284 /*
    285  * Register a new kfilter. Stores the entry in user_kfilters.
    286  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    287  * If retfilter != NULL, the new filterid is returned in it.
    288  */
    289 int
    290 kfilter_register(const char *name, const struct filterops *filtops,
    291 		 int *retfilter)
    292 {
    293 	struct kfilter *kfilter;
    294 	size_t len;
    295 	int i;
    296 
    297 	if (name == NULL || name[0] == '\0' || filtops == NULL)
    298 		return (EINVAL);	/* invalid args */
    299 
    300 	rw_enter(&kqueue_filter_lock, RW_WRITER);
    301 	if (kfilter_byname(name) != NULL) {
    302 		rw_exit(&kqueue_filter_lock);
    303 		return (EEXIST);	/* already exists */
    304 	}
    305 	if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) {
    306 		rw_exit(&kqueue_filter_lock);
    307 		return (EINVAL);	/* too many */
    308 	}
    309 
    310 	for (i = 0; i < user_kfilterc; i++) {
    311 		kfilter = &user_kfilters[i];
    312 		if (kfilter->name == NULL) {
    313 			/* Previously deregistered slot.  Reuse. */
    314 			goto reuse;
    315 		}
    316 	}
    317 
    318 	/* check if need to grow user_kfilters */
    319 	if (user_kfilterc + 1 > user_kfiltermaxc) {
    320 		/* Grow in KFILTER_EXTENT chunks. */
    321 		user_kfiltermaxc += KFILTER_EXTENT;
    322 		len = user_kfiltermaxc * sizeof(*kfilter);
    323 		kfilter = kmem_alloc(len, KM_SLEEP);
    324 		memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz);
    325 		if (user_kfilters != NULL) {
    326 			memcpy(kfilter, user_kfilters, user_kfiltersz);
    327 			kmem_free(user_kfilters, user_kfiltersz);
    328 		}
    329 		user_kfiltersz = len;
    330 		user_kfilters = kfilter;
    331 	}
    332 	/* Adding new slot */
    333 	kfilter = &user_kfilters[user_kfilterc++];
    334 reuse:
    335 	kfilter->namelen = strlen(name) + 1;
    336 	kfilter->name = kmem_alloc(kfilter->namelen, KM_SLEEP);
    337 	memcpy(__UNCONST(kfilter->name), name, kfilter->namelen);
    338 
    339 	kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT;
    340 
    341 	kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP);
    342 	memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops));
    343 
    344 	if (retfilter != NULL)
    345 		*retfilter = kfilter->filter;
    346 	rw_exit(&kqueue_filter_lock);
    347 
    348 	return (0);
    349 }
    350 
    351 /*
    352  * Unregister a kfilter previously registered with kfilter_register.
    353  * This retains the filter id, but clears the name and frees filtops (filter
    354  * operations), so that the number isn't reused during a boot.
    355  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    356  */
    357 int
    358 kfilter_unregister(const char *name)
    359 {
    360 	struct kfilter *kfilter;
    361 
    362 	if (name == NULL || name[0] == '\0')
    363 		return (EINVAL);	/* invalid name */
    364 
    365 	rw_enter(&kqueue_filter_lock, RW_WRITER);
    366 	if (kfilter_byname_sys(name) != NULL) {
    367 		rw_exit(&kqueue_filter_lock);
    368 		return (EINVAL);	/* can't detach system filters */
    369 	}
    370 
    371 	kfilter = kfilter_byname_user(name);
    372 	if (kfilter == NULL) {
    373 		rw_exit(&kqueue_filter_lock);
    374 		return (ENOENT);
    375 	}
    376 	if (kfilter->refcnt != 0) {
    377 		rw_exit(&kqueue_filter_lock);
    378 		return (EBUSY);
    379 	}
    380 
    381 	/* Cast away const (but we know it's safe. */
    382 	kmem_free(__UNCONST(kfilter->name), kfilter->namelen);
    383 	kfilter->name = NULL;	/* mark as `not implemented' */
    384 
    385 	if (kfilter->filtops != NULL) {
    386 		/* Cast away const (but we know it's safe. */
    387 		kmem_free(__UNCONST(kfilter->filtops),
    388 		    sizeof(*kfilter->filtops));
    389 		kfilter->filtops = NULL; /* mark as `not implemented' */
    390 	}
    391 	rw_exit(&kqueue_filter_lock);
    392 
    393 	return (0);
    394 }
    395 
    396 
    397 /*
    398  * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
    399  * descriptors. Calls fileops kqfilter method for given file descriptor.
    400  */
    401 static int
    402 filt_fileattach(struct knote *kn)
    403 {
    404 	file_t *fp;
    405 
    406 	fp = kn->kn_obj;
    407 
    408 	return (*fp->f_ops->fo_kqfilter)(fp, kn);
    409 }
    410 
    411 /*
    412  * Filter detach method for EVFILT_READ on kqueue descriptor.
    413  */
    414 static void
    415 filt_kqdetach(struct knote *kn)
    416 {
    417 	struct kqueue *kq;
    418 
    419 	kq = ((file_t *)kn->kn_obj)->f_kqueue;
    420 
    421 	mutex_spin_enter(&kq->kq_lock);
    422 	SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext);
    423 	mutex_spin_exit(&kq->kq_lock);
    424 }
    425 
    426 /*
    427  * Filter event method for EVFILT_READ on kqueue descriptor.
    428  */
    429 /*ARGSUSED*/
    430 static int
    431 filt_kqueue(struct knote *kn, long hint)
    432 {
    433 	struct kqueue *kq;
    434 	int rv;
    435 
    436 	kq = ((file_t *)kn->kn_obj)->f_kqueue;
    437 
    438 	if (hint != NOTE_SUBMIT)
    439 		mutex_spin_enter(&kq->kq_lock);
    440 	kn->kn_data = kq->kq_count;
    441 	rv = (kn->kn_data > 0);
    442 	if (hint != NOTE_SUBMIT)
    443 		mutex_spin_exit(&kq->kq_lock);
    444 
    445 	return rv;
    446 }
    447 
    448 /*
    449  * Filter attach method for EVFILT_PROC.
    450  */
    451 static int
    452 filt_procattach(struct knote *kn)
    453 {
    454 	struct proc *p;
    455 	struct lwp *curl;
    456 
    457 	curl = curlwp;
    458 
    459 	mutex_enter(proc_lock);
    460 	if (kn->kn_flags & EV_FLAG1) {
    461 		/*
    462 		 * NOTE_TRACK attaches to the child process too early
    463 		 * for proc_find, so do a raw look up and check the state
    464 		 * explicitly.
    465 		 */
    466 		p = proc_find_raw(kn->kn_id);
    467 		if (p != NULL && p->p_stat != SIDL)
    468 			p = NULL;
    469 	} else {
    470 		p = proc_find(kn->kn_id);
    471 	}
    472 
    473 	if (p == NULL) {
    474 		mutex_exit(proc_lock);
    475 		return ESRCH;
    476 	}
    477 
    478 	/*
    479 	 * Fail if it's not owned by you, or the last exec gave us
    480 	 * setuid/setgid privs (unless you're root).
    481 	 */
    482 	mutex_enter(p->p_lock);
    483 	mutex_exit(proc_lock);
    484 	if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER,
    485 	    p, NULL, NULL, NULL) != 0) {
    486 	    	mutex_exit(p->p_lock);
    487 		return EACCES;
    488 	}
    489 
    490 	kn->kn_obj = p;
    491 	kn->kn_flags |= EV_CLEAR;	/* automatically set */
    492 
    493 	/*
    494 	 * internal flag indicating registration done by kernel
    495 	 */
    496 	if (kn->kn_flags & EV_FLAG1) {
    497 		kn->kn_data = kn->kn_sdata;	/* ppid */
    498 		kn->kn_fflags = NOTE_CHILD;
    499 		kn->kn_flags &= ~EV_FLAG1;
    500 	}
    501 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
    502     	mutex_exit(p->p_lock);
    503 
    504 	return 0;
    505 }
    506 
    507 /*
    508  * Filter detach method for EVFILT_PROC.
    509  *
    510  * The knote may be attached to a different process, which may exit,
    511  * leaving nothing for the knote to be attached to.  So when the process
    512  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
    513  * it will be deleted when read out.  However, as part of the knote deletion,
    514  * this routine is called, so a check is needed to avoid actually performing
    515  * a detach, because the original process might not exist any more.
    516  */
    517 static void
    518 filt_procdetach(struct knote *kn)
    519 {
    520 	struct proc *p;
    521 
    522 	if (kn->kn_status & KN_DETACHED)
    523 		return;
    524 
    525 	p = kn->kn_obj;
    526 
    527 	mutex_enter(p->p_lock);
    528 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
    529 	mutex_exit(p->p_lock);
    530 }
    531 
    532 /*
    533  * Filter event method for EVFILT_PROC.
    534  */
    535 static int
    536 filt_proc(struct knote *kn, long hint)
    537 {
    538 	u_int event, fflag;
    539 	struct kevent kev;
    540 	struct kqueue *kq;
    541 	int error;
    542 
    543 	event = (u_int)hint & NOTE_PCTRLMASK;
    544 	kq = kn->kn_kq;
    545 	fflag = 0;
    546 
    547 	/* If the user is interested in this event, record it. */
    548 	if (kn->kn_sfflags & event)
    549 		fflag |= event;
    550 
    551 	if (event == NOTE_EXIT) {
    552 		struct proc *p = kn->kn_obj;
    553 
    554 		if (p != NULL)
    555 			kn->kn_data = P_WAITSTATUS(p);
    556 		/*
    557 		 * Process is gone, so flag the event as finished.
    558 		 *
    559 		 * Detach the knote from watched process and mark
    560 		 * it as such. We can't leave this to kqueue_scan(),
    561 		 * since the process might not exist by then. And we
    562 		 * have to do this now, since psignal KNOTE() is called
    563 		 * also for zombies and we might end up reading freed
    564 		 * memory if the kevent would already be picked up
    565 		 * and knote g/c'ed.
    566 		 */
    567 		filt_procdetach(kn);
    568 
    569 		mutex_spin_enter(&kq->kq_lock);
    570 		kn->kn_status |= KN_DETACHED;
    571 		/* Mark as ONESHOT, so that the knote it g/c'ed when read */
    572 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
    573 		kn->kn_fflags |= fflag;
    574 		mutex_spin_exit(&kq->kq_lock);
    575 
    576 		return 1;
    577 	}
    578 
    579 	mutex_spin_enter(&kq->kq_lock);
    580 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
    581 		/*
    582 		 * Process forked, and user wants to track the new process,
    583 		 * so attach a new knote to it, and immediately report an
    584 		 * event with the parent's pid.  Register knote with new
    585 		 * process.
    586 		 */
    587 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
    588 		kev.filter = kn->kn_filter;
    589 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
    590 		kev.fflags = kn->kn_sfflags;
    591 		kev.data = kn->kn_id;			/* parent */
    592 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
    593 		mutex_spin_exit(&kq->kq_lock);
    594 		error = kqueue_register(kq, &kev);
    595 		mutex_spin_enter(&kq->kq_lock);
    596 		if (error != 0)
    597 			kn->kn_fflags |= NOTE_TRACKERR;
    598 	}
    599 	kn->kn_fflags |= fflag;
    600 	fflag = kn->kn_fflags;
    601 	mutex_spin_exit(&kq->kq_lock);
    602 
    603 	return fflag != 0;
    604 }
    605 
    606 static void
    607 filt_timerexpire(void *knx)
    608 {
    609 	struct knote *kn = knx;
    610 	int tticks;
    611 
    612 	mutex_enter(&kqueue_misc_lock);
    613 	kn->kn_data++;
    614 	knote_activate(kn);
    615 	if ((kn->kn_flags & EV_ONESHOT) == 0) {
    616 		tticks = mstohz(kn->kn_sdata);
    617 		if (tticks <= 0)
    618 			tticks = 1;
    619 		callout_schedule((callout_t *)kn->kn_hook, tticks);
    620 	}
    621 	mutex_exit(&kqueue_misc_lock);
    622 }
    623 
    624 /*
    625  * data contains amount of time to sleep, in milliseconds
    626  */
    627 static int
    628 filt_timerattach(struct knote *kn)
    629 {
    630 	callout_t *calloutp;
    631 	struct kqueue *kq;
    632 	int tticks;
    633 
    634 	tticks = mstohz(kn->kn_sdata);
    635 
    636 	/* if the supplied value is under our resolution, use 1 tick */
    637 	if (tticks == 0) {
    638 		if (kn->kn_sdata == 0)
    639 			return EINVAL;
    640 		tticks = 1;
    641 	}
    642 
    643 	if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax ||
    644 	    (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) {
    645 		atomic_dec_uint(&kq_ncallouts);
    646 		return ENOMEM;
    647 	}
    648 	callout_init(calloutp, CALLOUT_MPSAFE);
    649 
    650 	kq = kn->kn_kq;
    651 	mutex_spin_enter(&kq->kq_lock);
    652 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
    653 	kn->kn_hook = calloutp;
    654 	mutex_spin_exit(&kq->kq_lock);
    655 
    656 	callout_reset(calloutp, tticks, filt_timerexpire, kn);
    657 
    658 	return (0);
    659 }
    660 
    661 static void
    662 filt_timerdetach(struct knote *kn)
    663 {
    664 	callout_t *calloutp;
    665 
    666 	calloutp = (callout_t *)kn->kn_hook;
    667 	callout_halt(calloutp, NULL);
    668 	callout_destroy(calloutp);
    669 	kmem_free(calloutp, sizeof(*calloutp));
    670 	atomic_dec_uint(&kq_ncallouts);
    671 }
    672 
    673 static int
    674 filt_timer(struct knote *kn, long hint)
    675 {
    676 	int rv;
    677 
    678 	mutex_enter(&kqueue_misc_lock);
    679 	rv = (kn->kn_data != 0);
    680 	mutex_exit(&kqueue_misc_lock);
    681 
    682 	return rv;
    683 }
    684 
    685 /*
    686  * filt_seltrue:
    687  *
    688  *	This filter "event" routine simulates seltrue().
    689  */
    690 int
    691 filt_seltrue(struct knote *kn, long hint)
    692 {
    693 
    694 	/*
    695 	 * We don't know how much data can be read/written,
    696 	 * but we know that it *can* be.  This is about as
    697 	 * good as select/poll does as well.
    698 	 */
    699 	kn->kn_data = 0;
    700 	return (1);
    701 }
    702 
    703 /*
    704  * This provides full kqfilter entry for device switch tables, which
    705  * has same effect as filter using filt_seltrue() as filter method.
    706  */
    707 static void
    708 filt_seltruedetach(struct knote *kn)
    709 {
    710 	/* Nothing to do */
    711 }
    712 
    713 const struct filterops seltrue_filtops =
    714 	{ 1, NULL, filt_seltruedetach, filt_seltrue };
    715 
    716 int
    717 seltrue_kqfilter(dev_t dev, struct knote *kn)
    718 {
    719 	switch (kn->kn_filter) {
    720 	case EVFILT_READ:
    721 	case EVFILT_WRITE:
    722 		kn->kn_fop = &seltrue_filtops;
    723 		break;
    724 	default:
    725 		return (EINVAL);
    726 	}
    727 
    728 	/* Nothing more to do */
    729 	return (0);
    730 }
    731 
    732 /*
    733  * kqueue(2) system call.
    734  */
    735 static int
    736 kqueue1(struct lwp *l, int flags, register_t *retval)
    737 {
    738 	struct kqueue *kq;
    739 	file_t *fp;
    740 	int fd, error;
    741 
    742 	if ((error = fd_allocfile(&fp, &fd)) != 0)
    743 		return error;
    744 	fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE));
    745 	fp->f_type = DTYPE_KQUEUE;
    746 	fp->f_ops = &kqueueops;
    747 	kq = kmem_zalloc(sizeof(*kq), KM_SLEEP);
    748 	mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED);
    749 	cv_init(&kq->kq_cv, "kqueue");
    750 	selinit(&kq->kq_sel);
    751 	TAILQ_INIT(&kq->kq_head);
    752 	fp->f_kqueue = kq;
    753 	*retval = fd;
    754 	kq->kq_fdp = curlwp->l_fd;
    755 	fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0);
    756 	fd_affix(curproc, fp, fd);
    757 	return error;
    758 }
    759 
    760 /*
    761  * kqueue(2) system call.
    762  */
    763 int
    764 sys_kqueue(struct lwp *l, const void *v, register_t *retval)
    765 {
    766 	return kqueue1(l, 0, retval);
    767 }
    768 
    769 int
    770 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap,
    771     register_t *retval)
    772 {
    773 	/* {
    774 		syscallarg(int) flags;
    775 	} */
    776 	return kqueue1(l, SCARG(uap, flags), retval);
    777 }
    778 
    779 /*
    780  * kevent(2) system call.
    781  */
    782 int
    783 kevent_fetch_changes(void *ctx, const struct kevent *changelist,
    784     struct kevent *changes, size_t index, int n)
    785 {
    786 
    787 	return copyin(changelist + index, changes, n * sizeof(*changes));
    788 }
    789 
    790 int
    791 kevent_put_events(void *ctx, struct kevent *events,
    792     struct kevent *eventlist, size_t index, int n)
    793 {
    794 
    795 	return copyout(events, eventlist + index, n * sizeof(*events));
    796 }
    797 
    798 static const struct kevent_ops kevent_native_ops = {
    799 	.keo_private = NULL,
    800 	.keo_fetch_timeout = copyin,
    801 	.keo_fetch_changes = kevent_fetch_changes,
    802 	.keo_put_events = kevent_put_events,
    803 };
    804 
    805 int
    806 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap,
    807     register_t *retval)
    808 {
    809 	/* {
    810 		syscallarg(int) fd;
    811 		syscallarg(const struct kevent *) changelist;
    812 		syscallarg(size_t) nchanges;
    813 		syscallarg(struct kevent *) eventlist;
    814 		syscallarg(size_t) nevents;
    815 		syscallarg(const struct timespec *) timeout;
    816 	} */
    817 
    818 	return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist),
    819 	    SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents),
    820 	    SCARG(uap, timeout), &kevent_native_ops);
    821 }
    822 
    823 int
    824 kevent1(register_t *retval, int fd,
    825 	const struct kevent *changelist, size_t nchanges,
    826 	struct kevent *eventlist, size_t nevents,
    827 	const struct timespec *timeout,
    828 	const struct kevent_ops *keops)
    829 {
    830 	struct kevent *kevp;
    831 	struct kqueue *kq;
    832 	struct timespec	ts;
    833 	size_t i, n, ichange;
    834 	int nerrors, error;
    835 	struct kevent kevbuf[KQ_NEVENTS];	/* approx 300 bytes on 64-bit */
    836 	file_t *fp;
    837 
    838 	/* check that we're dealing with a kq */
    839 	fp = fd_getfile(fd);
    840 	if (fp == NULL)
    841 		return (EBADF);
    842 
    843 	if (fp->f_type != DTYPE_KQUEUE) {
    844 		fd_putfile(fd);
    845 		return (EBADF);
    846 	}
    847 
    848 	if (timeout != NULL) {
    849 		error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts));
    850 		if (error)
    851 			goto done;
    852 		timeout = &ts;
    853 	}
    854 
    855 	kq = fp->f_kqueue;
    856 	nerrors = 0;
    857 	ichange = 0;
    858 
    859 	/* traverse list of events to register */
    860 	while (nchanges > 0) {
    861 		n = MIN(nchanges, __arraycount(kevbuf));
    862 		error = (*keops->keo_fetch_changes)(keops->keo_private,
    863 		    changelist, kevbuf, ichange, n);
    864 		if (error)
    865 			goto done;
    866 		for (i = 0; i < n; i++) {
    867 			kevp = &kevbuf[i];
    868 			kevp->flags &= ~EV_SYSFLAGS;
    869 			/* register each knote */
    870 			error = kqueue_register(kq, kevp);
    871 			if (!error && !(kevp->flags & EV_RECEIPT))
    872 				continue;
    873 			if (nevents == 0)
    874 				goto done;
    875 			kevp->flags = EV_ERROR;
    876 			kevp->data = error;
    877 			error = (*keops->keo_put_events)
    878 				(keops->keo_private, kevp,
    879 				 eventlist, nerrors, 1);
    880 			if (error)
    881 				goto done;
    882 			nevents--;
    883 			nerrors++;
    884 		}
    885 		nchanges -= n;	/* update the results */
    886 		ichange += n;
    887 	}
    888 	if (nerrors) {
    889 		*retval = nerrors;
    890 		error = 0;
    891 		goto done;
    892 	}
    893 
    894 	/* actually scan through the events */
    895 	error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops,
    896 	    kevbuf, __arraycount(kevbuf));
    897  done:
    898 	fd_putfile(fd);
    899 	return (error);
    900 }
    901 
    902 /*
    903  * Register a given kevent kev onto the kqueue
    904  */
    905 static int
    906 kqueue_register(struct kqueue *kq, struct kevent *kev)
    907 {
    908 	struct kfilter *kfilter;
    909 	filedesc_t *fdp;
    910 	file_t *fp;
    911 	fdfile_t *ff;
    912 	struct knote *kn, *newkn;
    913 	struct klist *list;
    914 	int error, fd, rv;
    915 
    916 	fdp = kq->kq_fdp;
    917 	fp = NULL;
    918 	kn = NULL;
    919 	error = 0;
    920 	fd = 0;
    921 
    922 	newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP);
    923 
    924 	rw_enter(&kqueue_filter_lock, RW_READER);
    925 	kfilter = kfilter_byfilter(kev->filter);
    926 	if (kfilter == NULL || kfilter->filtops == NULL) {
    927 		/* filter not found nor implemented */
    928 		rw_exit(&kqueue_filter_lock);
    929 		kmem_free(newkn, sizeof(*newkn));
    930 		return (EINVAL);
    931 	}
    932 
    933 	/* search if knote already exists */
    934 	if (kfilter->filtops->f_isfd) {
    935 		/* monitoring a file descriptor */
    936 		/* validate descriptor */
    937 		if (kev->ident > INT_MAX
    938 		    || (fp = fd_getfile(fd = kev->ident)) == NULL) {
    939 			rw_exit(&kqueue_filter_lock);
    940 			kmem_free(newkn, sizeof(*newkn));
    941 			return EBADF;
    942 		}
    943 		mutex_enter(&fdp->fd_lock);
    944 		ff = fdp->fd_dt->dt_ff[fd];
    945 		if (fd <= fdp->fd_lastkqfile) {
    946 			SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) {
    947 				if (kq == kn->kn_kq &&
    948 				    kev->filter == kn->kn_filter)
    949 					break;
    950 			}
    951 		}
    952 	} else {
    953 		/*
    954 		 * not monitoring a file descriptor, so
    955 		 * lookup knotes in internal hash table
    956 		 */
    957 		mutex_enter(&fdp->fd_lock);
    958 		if (fdp->fd_knhashmask != 0) {
    959 			list = &fdp->fd_knhash[
    960 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
    961 			SLIST_FOREACH(kn, list, kn_link) {
    962 				if (kev->ident == kn->kn_id &&
    963 				    kq == kn->kn_kq &&
    964 				    kev->filter == kn->kn_filter)
    965 					break;
    966 			}
    967 		}
    968 	}
    969 
    970 	/*
    971 	 * kn now contains the matching knote, or NULL if no match
    972 	 */
    973 	if (kev->flags & EV_ADD) {
    974 		if (kn == NULL) {
    975 			/* create new knote */
    976 			kn = newkn;
    977 			newkn = NULL;
    978 			kn->kn_obj = fp;
    979 			kn->kn_id = kev->ident;
    980 			kn->kn_kq = kq;
    981 			kn->kn_fop = kfilter->filtops;
    982 			kn->kn_kfilter = kfilter;
    983 			kn->kn_sfflags = kev->fflags;
    984 			kn->kn_sdata = kev->data;
    985 			kev->fflags = 0;
    986 			kev->data = 0;
    987 			kn->kn_kevent = *kev;
    988 
    989 			KASSERT(kn->kn_fop != NULL);
    990 			/*
    991 			 * apply reference count to knote structure, and
    992 			 * do not release it at the end of this routine.
    993 			 */
    994 			fp = NULL;
    995 
    996 			if (!kn->kn_fop->f_isfd) {
    997 				/*
    998 				 * If knote is not on an fd, store on
    999 				 * internal hash table.
   1000 				 */
   1001 				if (fdp->fd_knhashmask == 0) {
   1002 					/* XXXAD can block with fd_lock held */
   1003 					fdp->fd_knhash = hashinit(KN_HASHSIZE,
   1004 					    HASH_LIST, true,
   1005 					    &fdp->fd_knhashmask);
   1006 				}
   1007 				list = &fdp->fd_knhash[KN_HASH(kn->kn_id,
   1008 				    fdp->fd_knhashmask)];
   1009 			} else {
   1010 				/* Otherwise, knote is on an fd. */
   1011 				list = (struct klist *)
   1012 				    &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
   1013 				if ((int)kn->kn_id > fdp->fd_lastkqfile)
   1014 					fdp->fd_lastkqfile = kn->kn_id;
   1015 			}
   1016 			SLIST_INSERT_HEAD(list, kn, kn_link);
   1017 
   1018 			KERNEL_LOCK(1, NULL);		/* XXXSMP */
   1019 			error = (*kfilter->filtops->f_attach)(kn);
   1020 			KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
   1021 			if (error != 0) {
   1022 #ifdef DIAGNOSTIC
   1023 				printf("%s: event type %d not supported for "
   1024 				    "file type %d (error %d)\n", __func__,
   1025 				    kn->kn_filter, kn->kn_obj ?
   1026 				    ((file_t *)kn->kn_obj)->f_type : -1, error);
   1027 #endif
   1028 				/* knote_detach() drops fdp->fd_lock */
   1029 				knote_detach(kn, fdp, false);
   1030 				goto done;
   1031 			}
   1032 			atomic_inc_uint(&kfilter->refcnt);
   1033 		} else {
   1034 			/*
   1035 			 * The user may change some filter values after the
   1036 			 * initial EV_ADD, but doing so will not reset any
   1037 			 * filter which have already been triggered.
   1038 			 */
   1039 			kn->kn_sfflags = kev->fflags;
   1040 			kn->kn_sdata = kev->data;
   1041 			kn->kn_kevent.udata = kev->udata;
   1042 		}
   1043 		/*
   1044 		 * We can get here if we are trying to attach
   1045 		 * an event to a file descriptor that does not
   1046 		 * support events, and the attach routine is
   1047 		 * broken and does not return an error.
   1048 		 */
   1049 		KASSERT(kn->kn_fop != NULL);
   1050 		KASSERT(kn->kn_fop->f_event != NULL);
   1051 		KERNEL_LOCK(1, NULL);			/* XXXSMP */
   1052 		rv = (*kn->kn_fop->f_event)(kn, 0);
   1053 		KERNEL_UNLOCK_ONE(NULL);		/* XXXSMP */
   1054 		if (rv)
   1055 			knote_activate(kn);
   1056 	} else {
   1057 		if (kn == NULL) {
   1058 			error = ENOENT;
   1059 		 	mutex_exit(&fdp->fd_lock);
   1060 			goto done;
   1061 		}
   1062 		if (kev->flags & EV_DELETE) {
   1063 			/* knote_detach() drops fdp->fd_lock */
   1064 			knote_detach(kn, fdp, true);
   1065 			goto done;
   1066 		}
   1067 	}
   1068 
   1069 	/* disable knote */
   1070 	if ((kev->flags & EV_DISABLE)) {
   1071 		mutex_spin_enter(&kq->kq_lock);
   1072 		if ((kn->kn_status & KN_DISABLED) == 0)
   1073 			kn->kn_status |= KN_DISABLED;
   1074 		mutex_spin_exit(&kq->kq_lock);
   1075 	}
   1076 
   1077 	/* enable knote */
   1078 	if ((kev->flags & EV_ENABLE)) {
   1079 		knote_enqueue(kn);
   1080 	}
   1081 	mutex_exit(&fdp->fd_lock);
   1082  done:
   1083 	rw_exit(&kqueue_filter_lock);
   1084 	if (newkn != NULL)
   1085 		kmem_free(newkn, sizeof(*newkn));
   1086 	if (fp != NULL)
   1087 		fd_putfile(fd);
   1088 	return (error);
   1089 }
   1090 
   1091 #if defined(DEBUG)
   1092 static void
   1093 kq_check(struct kqueue *kq)
   1094 {
   1095 	const struct knote *kn;
   1096 	int count;
   1097 	int nmarker;
   1098 
   1099 	KASSERT(mutex_owned(&kq->kq_lock));
   1100 	KASSERT(kq->kq_count >= 0);
   1101 
   1102 	count = 0;
   1103 	nmarker = 0;
   1104 	TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) {
   1105 		if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) {
   1106 			panic("%s: kq=%p kn=%p inconsist 1", __func__, kq, kn);
   1107 		}
   1108 		if ((kn->kn_status & KN_MARKER) == 0) {
   1109 			if (kn->kn_kq != kq) {
   1110 				panic("%s: kq=%p kn=%p inconsist 2",
   1111 				    __func__, kq, kn);
   1112 			}
   1113 			if ((kn->kn_status & KN_ACTIVE) == 0) {
   1114 				panic("%s: kq=%p kn=%p: not active",
   1115 				    __func__, kq, kn);
   1116 			}
   1117 			count++;
   1118 			if (count > kq->kq_count) {
   1119 				goto bad;
   1120 			}
   1121 		} else {
   1122 			nmarker++;
   1123 #if 0
   1124 			if (nmarker > 10000) {
   1125 				panic("%s: kq=%p too many markers: %d != %d, "
   1126 				    "nmarker=%d",
   1127 				    __func__, kq, kq->kq_count, count, nmarker);
   1128 			}
   1129 #endif
   1130 		}
   1131 	}
   1132 	if (kq->kq_count != count) {
   1133 bad:
   1134 		panic("%s: kq=%p inconsist 3: %d != %d, nmarker=%d",
   1135 		    __func__, kq, kq->kq_count, count, nmarker);
   1136 	}
   1137 }
   1138 #else /* defined(DEBUG) */
   1139 #define	kq_check(a)	/* nothing */
   1140 #endif /* defined(DEBUG) */
   1141 
   1142 /*
   1143  * Scan through the list of events on fp (for a maximum of maxevents),
   1144  * returning the results in to ulistp. Timeout is determined by tsp; if
   1145  * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
   1146  * as appropriate.
   1147  */
   1148 static int
   1149 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp,
   1150 	    const struct timespec *tsp, register_t *retval,
   1151 	    const struct kevent_ops *keops, struct kevent *kevbuf,
   1152 	    size_t kevcnt)
   1153 {
   1154 	struct kqueue	*kq;
   1155 	struct kevent	*kevp;
   1156 	struct timespec	ats, sleepts;
   1157 	struct knote	*kn, *marker, morker;
   1158 	size_t		count, nkev, nevents;
   1159 	int		timeout, error, rv;
   1160 	filedesc_t	*fdp;
   1161 
   1162 	fdp = curlwp->l_fd;
   1163 	kq = fp->f_kqueue;
   1164 	count = maxevents;
   1165 	nkev = nevents = error = 0;
   1166 	if (count == 0) {
   1167 		*retval = 0;
   1168 		return 0;
   1169 	}
   1170 
   1171 	if (tsp) {				/* timeout supplied */
   1172 		ats = *tsp;
   1173 		if (inittimeleft(&ats, &sleepts) == -1) {
   1174 			*retval = maxevents;
   1175 			return EINVAL;
   1176 		}
   1177 		timeout = tstohz(&ats);
   1178 		if (timeout <= 0)
   1179 			timeout = -1;           /* do poll */
   1180 	} else {
   1181 		/* no timeout, wait forever */
   1182 		timeout = 0;
   1183 	}
   1184 
   1185 	memset(&morker, 0, sizeof(morker));
   1186 	marker = &morker;
   1187 	marker->kn_status = KN_MARKER;
   1188 	mutex_spin_enter(&kq->kq_lock);
   1189  retry:
   1190 	kevp = kevbuf;
   1191 	if (kq->kq_count == 0) {
   1192 		if (timeout >= 0) {
   1193 			error = cv_timedwait_sig(&kq->kq_cv,
   1194 			    &kq->kq_lock, timeout);
   1195 			if (error == 0) {
   1196 				 if (tsp == NULL || (timeout =
   1197 				     gettimeleft(&ats, &sleepts)) > 0)
   1198 					goto retry;
   1199 			} else {
   1200 				/* don't restart after signals... */
   1201 				if (error == ERESTART)
   1202 					error = EINTR;
   1203 				if (error == EWOULDBLOCK)
   1204 					error = 0;
   1205 			}
   1206 		}
   1207 		mutex_spin_exit(&kq->kq_lock);
   1208 	} else {
   1209 		/* mark end of knote list */
   1210 		TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
   1211 
   1212 		/*
   1213 		 * Acquire the fdp->fd_lock interlock to avoid races with
   1214 		 * file creation/destruction from other threads.
   1215 		 */
   1216 		mutex_spin_exit(&kq->kq_lock);
   1217 		mutex_enter(&fdp->fd_lock);
   1218 		mutex_spin_enter(&kq->kq_lock);
   1219 
   1220 		while (count != 0) {
   1221 			kn = TAILQ_FIRST(&kq->kq_head);	/* get next knote */
   1222 			while ((kn->kn_status & KN_MARKER) != 0) {
   1223 				if (kn == marker) {
   1224 					/* it's our marker, stop */
   1225 					TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1226 					if (count < maxevents || (tsp != NULL &&
   1227 					    (timeout = gettimeleft(&ats,
   1228 					    &sleepts)) <= 0))
   1229 						goto done;
   1230 					mutex_exit(&fdp->fd_lock);
   1231 					goto retry;
   1232 				}
   1233 				/* someone else's marker. */
   1234 				kn = TAILQ_NEXT(kn, kn_tqe);
   1235 			}
   1236 			kq_check(kq);
   1237 			kq->kq_count--;
   1238 			TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1239 			kn->kn_status &= ~KN_QUEUED;
   1240 			kn->kn_status |= KN_BUSY;
   1241 			kq_check(kq);
   1242 			if (kn->kn_status & KN_DISABLED) {
   1243 				kn->kn_status &= ~KN_BUSY;
   1244 				/* don't want disabled events */
   1245 				continue;
   1246 			}
   1247 			if ((kn->kn_flags & EV_ONESHOT) == 0) {
   1248 				mutex_spin_exit(&kq->kq_lock);
   1249 				KASSERT(kn->kn_fop != NULL);
   1250 				KASSERT(kn->kn_fop->f_event != NULL);
   1251 				KERNEL_LOCK(1, NULL);		/* XXXSMP */
   1252 				KASSERT(mutex_owned(&fdp->fd_lock));
   1253 				rv = (*kn->kn_fop->f_event)(kn, 0);
   1254 				KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
   1255 				mutex_spin_enter(&kq->kq_lock);
   1256 				/* Re-poll if note was re-enqueued. */
   1257 				if ((kn->kn_status & KN_QUEUED) != 0) {
   1258 					kn->kn_status &= ~KN_BUSY;
   1259 					continue;
   1260 				}
   1261 				if (rv == 0) {
   1262 					/*
   1263 					 * non-ONESHOT event that hasn't
   1264 					 * triggered again, so de-queue.
   1265 					 */
   1266 					kn->kn_status &= ~(KN_ACTIVE|KN_BUSY);
   1267 					continue;
   1268 				}
   1269 			}
   1270 			/* XXXAD should be got from f_event if !oneshot. */
   1271 			*kevp++ = kn->kn_kevent;
   1272 			nkev++;
   1273 			if (kn->kn_flags & EV_ONESHOT) {
   1274 				/* delete ONESHOT events after retrieval */
   1275 				kn->kn_status &= ~KN_BUSY;
   1276 				mutex_spin_exit(&kq->kq_lock);
   1277 				knote_detach(kn, fdp, true);
   1278 				mutex_enter(&fdp->fd_lock);
   1279 				mutex_spin_enter(&kq->kq_lock);
   1280 			} else if (kn->kn_flags & EV_CLEAR) {
   1281 				/* clear state after retrieval */
   1282 				kn->kn_data = 0;
   1283 				kn->kn_fflags = 0;
   1284 				kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY);
   1285 			} else if (kn->kn_flags & EV_DISPATCH) {
   1286 				kn->kn_status |= KN_DISABLED;
   1287 				kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY);
   1288 			} else {
   1289 				/* add event back on list */
   1290 				kq_check(kq);
   1291 				kn->kn_status |= KN_QUEUED;
   1292 				kn->kn_status &= ~KN_BUSY;
   1293 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1294 				kq->kq_count++;
   1295 				kq_check(kq);
   1296 			}
   1297 			if (nkev == kevcnt) {
   1298 				/* do copyouts in kevcnt chunks */
   1299 				mutex_spin_exit(&kq->kq_lock);
   1300 				mutex_exit(&fdp->fd_lock);
   1301 				error = (*keops->keo_put_events)
   1302 				    (keops->keo_private,
   1303 				    kevbuf, ulistp, nevents, nkev);
   1304 				mutex_enter(&fdp->fd_lock);
   1305 				mutex_spin_enter(&kq->kq_lock);
   1306 				nevents += nkev;
   1307 				nkev = 0;
   1308 				kevp = kevbuf;
   1309 			}
   1310 			count--;
   1311 			if (error != 0 || count == 0) {
   1312 				/* remove marker */
   1313 				TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
   1314 				break;
   1315 			}
   1316 		}
   1317  done:
   1318 		mutex_spin_exit(&kq->kq_lock);
   1319 		mutex_exit(&fdp->fd_lock);
   1320 	}
   1321 	if (nkev != 0) {
   1322 		/* copyout remaining events */
   1323 		error = (*keops->keo_put_events)(keops->keo_private,
   1324 		    kevbuf, ulistp, nevents, nkev);
   1325 	}
   1326 	*retval = maxevents - count;
   1327 
   1328 	return error;
   1329 }
   1330 
   1331 /*
   1332  * fileops ioctl method for a kqueue descriptor.
   1333  *
   1334  * Two ioctls are currently supported. They both use struct kfilter_mapping:
   1335  *	KFILTER_BYNAME		find name for filter, and return result in
   1336  *				name, which is of size len.
   1337  *	KFILTER_BYFILTER	find filter for name. len is ignored.
   1338  */
   1339 /*ARGSUSED*/
   1340 static int
   1341 kqueue_ioctl(file_t *fp, u_long com, void *data)
   1342 {
   1343 	struct kfilter_mapping	*km;
   1344 	const struct kfilter	*kfilter;
   1345 	char			*name;
   1346 	int			error;
   1347 
   1348 	km = data;
   1349 	error = 0;
   1350 	name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP);
   1351 
   1352 	switch (com) {
   1353 	case KFILTER_BYFILTER:	/* convert filter -> name */
   1354 		rw_enter(&kqueue_filter_lock, RW_READER);
   1355 		kfilter = kfilter_byfilter(km->filter);
   1356 		if (kfilter != NULL) {
   1357 			strlcpy(name, kfilter->name, KFILTER_MAXNAME);
   1358 			rw_exit(&kqueue_filter_lock);
   1359 			error = copyoutstr(name, km->name, km->len, NULL);
   1360 		} else {
   1361 			rw_exit(&kqueue_filter_lock);
   1362 			error = ENOENT;
   1363 		}
   1364 		break;
   1365 
   1366 	case KFILTER_BYNAME:	/* convert name -> filter */
   1367 		error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
   1368 		if (error) {
   1369 			break;
   1370 		}
   1371 		rw_enter(&kqueue_filter_lock, RW_READER);
   1372 		kfilter = kfilter_byname(name);
   1373 		if (kfilter != NULL)
   1374 			km->filter = kfilter->filter;
   1375 		else
   1376 			error = ENOENT;
   1377 		rw_exit(&kqueue_filter_lock);
   1378 		break;
   1379 
   1380 	default:
   1381 		error = ENOTTY;
   1382 		break;
   1383 
   1384 	}
   1385 	kmem_free(name, KFILTER_MAXNAME);
   1386 	return (error);
   1387 }
   1388 
   1389 /*
   1390  * fileops fcntl method for a kqueue descriptor.
   1391  */
   1392 static int
   1393 kqueue_fcntl(file_t *fp, u_int com, void *data)
   1394 {
   1395 
   1396 	return (ENOTTY);
   1397 }
   1398 
   1399 /*
   1400  * fileops poll method for a kqueue descriptor.
   1401  * Determine if kqueue has events pending.
   1402  */
   1403 static int
   1404 kqueue_poll(file_t *fp, int events)
   1405 {
   1406 	struct kqueue	*kq;
   1407 	int		revents;
   1408 
   1409 	kq = fp->f_kqueue;
   1410 
   1411 	revents = 0;
   1412 	if (events & (POLLIN | POLLRDNORM)) {
   1413 		mutex_spin_enter(&kq->kq_lock);
   1414 		if (kq->kq_count != 0) {
   1415 			revents |= events & (POLLIN | POLLRDNORM);
   1416 		} else {
   1417 			selrecord(curlwp, &kq->kq_sel);
   1418 		}
   1419 		kq_check(kq);
   1420 		mutex_spin_exit(&kq->kq_lock);
   1421 	}
   1422 
   1423 	return revents;
   1424 }
   1425 
   1426 /*
   1427  * fileops stat method for a kqueue descriptor.
   1428  * Returns dummy info, with st_size being number of events pending.
   1429  */
   1430 static int
   1431 kqueue_stat(file_t *fp, struct stat *st)
   1432 {
   1433 	struct kqueue *kq;
   1434 
   1435 	kq = fp->f_kqueue;
   1436 
   1437 	memset(st, 0, sizeof(*st));
   1438 	st->st_size = kq->kq_count;
   1439 	st->st_blksize = sizeof(struct kevent);
   1440 	st->st_mode = S_IFIFO;
   1441 
   1442 	return 0;
   1443 }
   1444 
   1445 static void
   1446 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd)
   1447 {
   1448 	struct knote *kn;
   1449 	filedesc_t *fdp;
   1450 
   1451 	fdp = kq->kq_fdp;
   1452 
   1453 	KASSERT(mutex_owned(&fdp->fd_lock));
   1454 
   1455 	for (kn = SLIST_FIRST(list); kn != NULL;) {
   1456 		if (kq != kn->kn_kq) {
   1457 			kn = SLIST_NEXT(kn, kn_link);
   1458 			continue;
   1459 		}
   1460 		knote_detach(kn, fdp, true);
   1461 		mutex_enter(&fdp->fd_lock);
   1462 		kn = SLIST_FIRST(list);
   1463 	}
   1464 }
   1465 
   1466 
   1467 /*
   1468  * fileops close method for a kqueue descriptor.
   1469  */
   1470 static int
   1471 kqueue_close(file_t *fp)
   1472 {
   1473 	struct kqueue *kq;
   1474 	filedesc_t *fdp;
   1475 	fdfile_t *ff;
   1476 	int i;
   1477 
   1478 	kq = fp->f_kqueue;
   1479 	fp->f_kqueue = NULL;
   1480 	fp->f_type = 0;
   1481 	fdp = curlwp->l_fd;
   1482 
   1483 	mutex_enter(&fdp->fd_lock);
   1484 	for (i = 0; i <= fdp->fd_lastkqfile; i++) {
   1485 		if ((ff = fdp->fd_dt->dt_ff[i]) == NULL)
   1486 			continue;
   1487 		kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i);
   1488 	}
   1489 	if (fdp->fd_knhashmask != 0) {
   1490 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
   1491 			kqueue_doclose(kq, &fdp->fd_knhash[i], -1);
   1492 		}
   1493 	}
   1494 	mutex_exit(&fdp->fd_lock);
   1495 
   1496 	KASSERT(kq->kq_count == 0);
   1497 	mutex_destroy(&kq->kq_lock);
   1498 	cv_destroy(&kq->kq_cv);
   1499 	seldestroy(&kq->kq_sel);
   1500 	kmem_free(kq, sizeof(*kq));
   1501 
   1502 	return (0);
   1503 }
   1504 
   1505 /*
   1506  * struct fileops kqfilter method for a kqueue descriptor.
   1507  * Event triggered when monitored kqueue changes.
   1508  */
   1509 static int
   1510 kqueue_kqfilter(file_t *fp, struct knote *kn)
   1511 {
   1512 	struct kqueue *kq;
   1513 
   1514 	kq = ((file_t *)kn->kn_obj)->f_kqueue;
   1515 
   1516 	KASSERT(fp == kn->kn_obj);
   1517 
   1518 	if (kn->kn_filter != EVFILT_READ)
   1519 		return 1;
   1520 
   1521 	kn->kn_fop = &kqread_filtops;
   1522 	mutex_enter(&kq->kq_lock);
   1523 	SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext);
   1524 	mutex_exit(&kq->kq_lock);
   1525 
   1526 	return 0;
   1527 }
   1528 
   1529 
   1530 /*
   1531  * Walk down a list of knotes, activating them if their event has
   1532  * triggered.  The caller's object lock (e.g. device driver lock)
   1533  * must be held.
   1534  */
   1535 void
   1536 knote(struct klist *list, long hint)
   1537 {
   1538 	struct knote *kn, *tmpkn;
   1539 
   1540 	SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) {
   1541 		KASSERT(kn->kn_fop != NULL);
   1542 		KASSERT(kn->kn_fop->f_event != NULL);
   1543 		if ((*kn->kn_fop->f_event)(kn, hint))
   1544 			knote_activate(kn);
   1545 	}
   1546 }
   1547 
   1548 /*
   1549  * Remove all knotes referencing a specified fd
   1550  */
   1551 void
   1552 knote_fdclose(int fd)
   1553 {
   1554 	struct klist *list;
   1555 	struct knote *kn;
   1556 	filedesc_t *fdp;
   1557 
   1558 	fdp = curlwp->l_fd;
   1559 	list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist;
   1560 	mutex_enter(&fdp->fd_lock);
   1561 	while ((kn = SLIST_FIRST(list)) != NULL) {
   1562 		knote_detach(kn, fdp, true);
   1563 		mutex_enter(&fdp->fd_lock);
   1564 	}
   1565 	mutex_exit(&fdp->fd_lock);
   1566 }
   1567 
   1568 /*
   1569  * Drop knote.  Called with fdp->fd_lock held, and will drop before
   1570  * returning.
   1571  */
   1572 static void
   1573 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop)
   1574 {
   1575 	struct klist *list;
   1576 	struct kqueue *kq;
   1577 
   1578 	kq = kn->kn_kq;
   1579 
   1580 	KASSERT((kn->kn_status & KN_MARKER) == 0);
   1581 	KASSERT(mutex_owned(&fdp->fd_lock));
   1582 
   1583 	KASSERT(kn->kn_fop != NULL);
   1584 	/* Remove from monitored object. */
   1585 	if (dofop) {
   1586 		KASSERT(kn->kn_fop->f_detach != NULL);
   1587 		KERNEL_LOCK(1, NULL);		/* XXXSMP */
   1588 		(*kn->kn_fop->f_detach)(kn);
   1589 		KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
   1590 	}
   1591 
   1592 	/* Remove from descriptor table. */
   1593 	if (kn->kn_fop->f_isfd)
   1594 		list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
   1595 	else
   1596 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1597 
   1598 	SLIST_REMOVE(list, kn, knote, kn_link);
   1599 
   1600 	/* Remove from kqueue. */
   1601 again:
   1602 	mutex_spin_enter(&kq->kq_lock);
   1603 	if ((kn->kn_status & KN_QUEUED) != 0) {
   1604 		kq_check(kq);
   1605 		kq->kq_count--;
   1606 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1607 		kn->kn_status &= ~KN_QUEUED;
   1608 		kq_check(kq);
   1609 	} else if (kn->kn_status & KN_BUSY) {
   1610 		mutex_spin_exit(&kq->kq_lock);
   1611 		goto again;
   1612 	}
   1613 	mutex_spin_exit(&kq->kq_lock);
   1614 
   1615 	mutex_exit(&fdp->fd_lock);
   1616 	if (kn->kn_fop->f_isfd)
   1617 		fd_putfile(kn->kn_id);
   1618 	atomic_dec_uint(&kn->kn_kfilter->refcnt);
   1619 	kmem_free(kn, sizeof(*kn));
   1620 }
   1621 
   1622 /*
   1623  * Queue new event for knote.
   1624  */
   1625 static void
   1626 knote_enqueue(struct knote *kn)
   1627 {
   1628 	struct kqueue *kq;
   1629 
   1630 	KASSERT((kn->kn_status & KN_MARKER) == 0);
   1631 
   1632 	kq = kn->kn_kq;
   1633 
   1634 	mutex_spin_enter(&kq->kq_lock);
   1635 	if ((kn->kn_status & KN_DISABLED) != 0) {
   1636 		kn->kn_status &= ~KN_DISABLED;
   1637 	}
   1638 	if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) {
   1639 		kq_check(kq);
   1640 		kn->kn_status |= KN_QUEUED;
   1641 		TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1642 		kq->kq_count++;
   1643 		kq_check(kq);
   1644 		cv_broadcast(&kq->kq_cv);
   1645 		selnotify(&kq->kq_sel, 0, NOTE_SUBMIT);
   1646 	}
   1647 	mutex_spin_exit(&kq->kq_lock);
   1648 }
   1649 /*
   1650  * Queue new event for knote.
   1651  */
   1652 static void
   1653 knote_activate(struct knote *kn)
   1654 {
   1655 	struct kqueue *kq;
   1656 
   1657 	KASSERT((kn->kn_status & KN_MARKER) == 0);
   1658 
   1659 	kq = kn->kn_kq;
   1660 
   1661 	mutex_spin_enter(&kq->kq_lock);
   1662 	kn->kn_status |= KN_ACTIVE;
   1663 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) {
   1664 		kq_check(kq);
   1665 		kn->kn_status |= KN_QUEUED;
   1666 		TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1667 		kq->kq_count++;
   1668 		kq_check(kq);
   1669 		cv_broadcast(&kq->kq_cv);
   1670 		selnotify(&kq->kq_sel, 0, NOTE_SUBMIT);
   1671 	}
   1672 	mutex_spin_exit(&kq->kq_lock);
   1673 }
   1674