Home | History | Annotate | Line # | Download | only in kern
kern_event.c revision 1.80.2.3
      1 /*	$NetBSD: kern_event.c,v 1.80.2.3 2018/11/21 12:12:15 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*-
     33  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
     34  * All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  * FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: kern_event.c,v 1.80.2.3 2018/11/21 12:12:15 martin Exp $");
     62 
     63 #include <sys/param.h>
     64 #include <sys/systm.h>
     65 #include <sys/kernel.h>
     66 #include <sys/wait.h>
     67 #include <sys/proc.h>
     68 #include <sys/file.h>
     69 #include <sys/select.h>
     70 #include <sys/queue.h>
     71 #include <sys/event.h>
     72 #include <sys/eventvar.h>
     73 #include <sys/poll.h>
     74 #include <sys/kmem.h>
     75 #include <sys/stat.h>
     76 #include <sys/filedesc.h>
     77 #include <sys/syscallargs.h>
     78 #include <sys/kauth.h>
     79 #include <sys/conf.h>
     80 #include <sys/atomic.h>
     81 
     82 static int	kqueue_scan(file_t *, size_t, struct kevent *,
     83 			    const struct timespec *, register_t *,
     84 			    const struct kevent_ops *, struct kevent *,
     85 			    size_t);
     86 static int	kqueue_ioctl(file_t *, u_long, void *);
     87 static int	kqueue_fcntl(file_t *, u_int, void *);
     88 static int	kqueue_poll(file_t *, int);
     89 static int	kqueue_kqfilter(file_t *, struct knote *);
     90 static int	kqueue_stat(file_t *, struct stat *);
     91 static int	kqueue_close(file_t *);
     92 static int	kqueue_register(struct kqueue *, struct kevent *);
     93 static void	kqueue_doclose(struct kqueue *, struct klist *, int);
     94 
     95 static void	knote_detach(struct knote *, filedesc_t *fdp, bool);
     96 static void	knote_enqueue(struct knote *);
     97 static void	knote_activate(struct knote *);
     98 
     99 static void	filt_kqdetach(struct knote *);
    100 static int	filt_kqueue(struct knote *, long hint);
    101 static int	filt_procattach(struct knote *);
    102 static void	filt_procdetach(struct knote *);
    103 static int	filt_proc(struct knote *, long hint);
    104 static int	filt_fileattach(struct knote *);
    105 static void	filt_timerexpire(void *x);
    106 static int	filt_timerattach(struct knote *);
    107 static void	filt_timerdetach(struct knote *);
    108 static int	filt_timer(struct knote *, long hint);
    109 
    110 static const struct fileops kqueueops = {
    111 	.fo_read = (void *)enxio,
    112 	.fo_write = (void *)enxio,
    113 	.fo_ioctl = kqueue_ioctl,
    114 	.fo_fcntl = kqueue_fcntl,
    115 	.fo_poll = kqueue_poll,
    116 	.fo_stat = kqueue_stat,
    117 	.fo_close = kqueue_close,
    118 	.fo_kqfilter = kqueue_kqfilter,
    119 	.fo_restart = fnullop_restart,
    120 };
    121 
    122 static const struct filterops kqread_filtops =
    123 	{ 1, NULL, filt_kqdetach, filt_kqueue };
    124 static const struct filterops proc_filtops =
    125 	{ 0, filt_procattach, filt_procdetach, filt_proc };
    126 static const struct filterops file_filtops =
    127 	{ 1, filt_fileattach, NULL, NULL };
    128 static const struct filterops timer_filtops =
    129 	{ 0, filt_timerattach, filt_timerdetach, filt_timer };
    130 
    131 static u_int	kq_ncallouts = 0;
    132 static int	kq_calloutmax = (4 * 1024);
    133 
    134 #define	KN_HASHSIZE		64		/* XXX should be tunable */
    135 #define	KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
    136 
    137 extern const struct filterops sig_filtops;
    138 
    139 /*
    140  * Table for for all system-defined filters.
    141  * These should be listed in the numeric order of the EVFILT_* defines.
    142  * If filtops is NULL, the filter isn't implemented in NetBSD.
    143  * End of list is when name is NULL.
    144  *
    145  * Note that 'refcnt' is meaningless for built-in filters.
    146  */
    147 struct kfilter {
    148 	const char	*name;		/* name of filter */
    149 	uint32_t	filter;		/* id of filter */
    150 	unsigned	refcnt;		/* reference count */
    151 	const struct filterops *filtops;/* operations for filter */
    152 	size_t		namelen;	/* length of name string */
    153 };
    154 
    155 /* System defined filters */
    156 static struct kfilter sys_kfilters[] = {
    157 	{ "EVFILT_READ",	EVFILT_READ,	0, &file_filtops, 0 },
    158 	{ "EVFILT_WRITE",	EVFILT_WRITE,	0, &file_filtops, 0, },
    159 	{ "EVFILT_AIO",		EVFILT_AIO,	0, NULL, 0 },
    160 	{ "EVFILT_VNODE",	EVFILT_VNODE,	0, &file_filtops, 0 },
    161 	{ "EVFILT_PROC",	EVFILT_PROC,	0, &proc_filtops, 0 },
    162 	{ "EVFILT_SIGNAL",	EVFILT_SIGNAL,	0, &sig_filtops, 0 },
    163 	{ "EVFILT_TIMER",	EVFILT_TIMER,	0, &timer_filtops, 0 },
    164 	{ NULL,			0,		0, NULL, 0 },
    165 };
    166 
    167 /* User defined kfilters */
    168 static struct kfilter	*user_kfilters;		/* array */
    169 static int		user_kfilterc;		/* current offset */
    170 static int		user_kfiltermaxc;	/* max size so far */
    171 static size_t		user_kfiltersz;		/* size of allocated memory */
    172 
    173 /* Locks */
    174 static krwlock_t	kqueue_filter_lock;	/* lock on filter lists */
    175 static kmutex_t		kqueue_misc_lock;	/* miscellaneous */
    176 
    177 static kauth_listener_t	kqueue_listener;
    178 
    179 static int
    180 kqueue_listener_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
    181     void *arg0, void *arg1, void *arg2, void *arg3)
    182 {
    183 	struct proc *p;
    184 	int result;
    185 
    186 	result = KAUTH_RESULT_DEFER;
    187 	p = arg0;
    188 
    189 	if (action != KAUTH_PROCESS_KEVENT_FILTER)
    190 		return result;
    191 
    192 	if ((kauth_cred_getuid(p->p_cred) != kauth_cred_getuid(cred) ||
    193 	    ISSET(p->p_flag, PK_SUGID)))
    194 		return result;
    195 
    196 	result = KAUTH_RESULT_ALLOW;
    197 
    198 	return result;
    199 }
    200 
    201 /*
    202  * Initialize the kqueue subsystem.
    203  */
    204 void
    205 kqueue_init(void)
    206 {
    207 
    208 	rw_init(&kqueue_filter_lock);
    209 	mutex_init(&kqueue_misc_lock, MUTEX_DEFAULT, IPL_NONE);
    210 
    211 	kqueue_listener = kauth_listen_scope(KAUTH_SCOPE_PROCESS,
    212 	    kqueue_listener_cb, NULL);
    213 }
    214 
    215 /*
    216  * Find kfilter entry by name, or NULL if not found.
    217  */
    218 static struct kfilter *
    219 kfilter_byname_sys(const char *name)
    220 {
    221 	int i;
    222 
    223 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    224 
    225 	for (i = 0; sys_kfilters[i].name != NULL; i++) {
    226 		if (strcmp(name, sys_kfilters[i].name) == 0)
    227 			return &sys_kfilters[i];
    228 	}
    229 	return NULL;
    230 }
    231 
    232 static struct kfilter *
    233 kfilter_byname_user(const char *name)
    234 {
    235 	int i;
    236 
    237 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    238 
    239 	/* user filter slots have a NULL name if previously deregistered */
    240 	for (i = 0; i < user_kfilterc ; i++) {
    241 		if (user_kfilters[i].name != NULL &&
    242 		    strcmp(name, user_kfilters[i].name) == 0)
    243 			return &user_kfilters[i];
    244 	}
    245 	return NULL;
    246 }
    247 
    248 static struct kfilter *
    249 kfilter_byname(const char *name)
    250 {
    251 	struct kfilter *kfilter;
    252 
    253 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    254 
    255 	if ((kfilter = kfilter_byname_sys(name)) != NULL)
    256 		return kfilter;
    257 
    258 	return kfilter_byname_user(name);
    259 }
    260 
    261 /*
    262  * Find kfilter entry by filter id, or NULL if not found.
    263  * Assumes entries are indexed in filter id order, for speed.
    264  */
    265 static struct kfilter *
    266 kfilter_byfilter(uint32_t filter)
    267 {
    268 	struct kfilter *kfilter;
    269 
    270 	KASSERT(rw_lock_held(&kqueue_filter_lock));
    271 
    272 	if (filter < EVFILT_SYSCOUNT)	/* it's a system filter */
    273 		kfilter = &sys_kfilters[filter];
    274 	else if (user_kfilters != NULL &&
    275 	    filter < EVFILT_SYSCOUNT + user_kfilterc)
    276 					/* it's a user filter */
    277 		kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
    278 	else
    279 		return (NULL);		/* out of range */
    280 	KASSERT(kfilter->filter == filter);	/* sanity check! */
    281 	return (kfilter);
    282 }
    283 
    284 /*
    285  * Register a new kfilter. Stores the entry in user_kfilters.
    286  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    287  * If retfilter != NULL, the new filterid is returned in it.
    288  */
    289 int
    290 kfilter_register(const char *name, const struct filterops *filtops,
    291 		 int *retfilter)
    292 {
    293 	struct kfilter *kfilter;
    294 	size_t len;
    295 	int i;
    296 
    297 	if (name == NULL || name[0] == '\0' || filtops == NULL)
    298 		return (EINVAL);	/* invalid args */
    299 
    300 	rw_enter(&kqueue_filter_lock, RW_WRITER);
    301 	if (kfilter_byname(name) != NULL) {
    302 		rw_exit(&kqueue_filter_lock);
    303 		return (EEXIST);	/* already exists */
    304 	}
    305 	if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT) {
    306 		rw_exit(&kqueue_filter_lock);
    307 		return (EINVAL);	/* too many */
    308 	}
    309 
    310 	for (i = 0; i < user_kfilterc; i++) {
    311 		kfilter = &user_kfilters[i];
    312 		if (kfilter->name == NULL) {
    313 			/* Previously deregistered slot.  Reuse. */
    314 			goto reuse;
    315 		}
    316 	}
    317 
    318 	/* check if need to grow user_kfilters */
    319 	if (user_kfilterc + 1 > user_kfiltermaxc) {
    320 		/* Grow in KFILTER_EXTENT chunks. */
    321 		user_kfiltermaxc += KFILTER_EXTENT;
    322 		len = user_kfiltermaxc * sizeof(*kfilter);
    323 		kfilter = kmem_alloc(len, KM_SLEEP);
    324 		memset((char *)kfilter + user_kfiltersz, 0, len - user_kfiltersz);
    325 		if (user_kfilters != NULL) {
    326 			memcpy(kfilter, user_kfilters, user_kfiltersz);
    327 			kmem_free(user_kfilters, user_kfiltersz);
    328 		}
    329 		user_kfiltersz = len;
    330 		user_kfilters = kfilter;
    331 	}
    332 	/* Adding new slot */
    333 	kfilter = &user_kfilters[user_kfilterc++];
    334 reuse:
    335 	kfilter->namelen = strlen(name) + 1;
    336 	kfilter->name = kmem_alloc(kfilter->namelen, KM_SLEEP);
    337 	memcpy(__UNCONST(kfilter->name), name, kfilter->namelen);
    338 
    339 	kfilter->filter = (kfilter - user_kfilters) + EVFILT_SYSCOUNT;
    340 
    341 	kfilter->filtops = kmem_alloc(sizeof(*filtops), KM_SLEEP);
    342 	memcpy(__UNCONST(kfilter->filtops), filtops, sizeof(*filtops));
    343 
    344 	if (retfilter != NULL)
    345 		*retfilter = kfilter->filter;
    346 	rw_exit(&kqueue_filter_lock);
    347 
    348 	return (0);
    349 }
    350 
    351 /*
    352  * Unregister a kfilter previously registered with kfilter_register.
    353  * This retains the filter id, but clears the name and frees filtops (filter
    354  * operations), so that the number isn't reused during a boot.
    355  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    356  */
    357 int
    358 kfilter_unregister(const char *name)
    359 {
    360 	struct kfilter *kfilter;
    361 
    362 	if (name == NULL || name[0] == '\0')
    363 		return (EINVAL);	/* invalid name */
    364 
    365 	rw_enter(&kqueue_filter_lock, RW_WRITER);
    366 	if (kfilter_byname_sys(name) != NULL) {
    367 		rw_exit(&kqueue_filter_lock);
    368 		return (EINVAL);	/* can't detach system filters */
    369 	}
    370 
    371 	kfilter = kfilter_byname_user(name);
    372 	if (kfilter == NULL) {
    373 		rw_exit(&kqueue_filter_lock);
    374 		return (ENOENT);
    375 	}
    376 	if (kfilter->refcnt != 0) {
    377 		rw_exit(&kqueue_filter_lock);
    378 		return (EBUSY);
    379 	}
    380 
    381 	/* Cast away const (but we know it's safe. */
    382 	kmem_free(__UNCONST(kfilter->name), kfilter->namelen);
    383 	kfilter->name = NULL;	/* mark as `not implemented' */
    384 
    385 	if (kfilter->filtops != NULL) {
    386 		/* Cast away const (but we know it's safe. */
    387 		kmem_free(__UNCONST(kfilter->filtops),
    388 		    sizeof(*kfilter->filtops));
    389 		kfilter->filtops = NULL; /* mark as `not implemented' */
    390 	}
    391 	rw_exit(&kqueue_filter_lock);
    392 
    393 	return (0);
    394 }
    395 
    396 
    397 /*
    398  * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
    399  * descriptors. Calls fileops kqfilter method for given file descriptor.
    400  */
    401 static int
    402 filt_fileattach(struct knote *kn)
    403 {
    404 	file_t *fp;
    405 
    406 	fp = kn->kn_obj;
    407 
    408 	return (*fp->f_ops->fo_kqfilter)(fp, kn);
    409 }
    410 
    411 /*
    412  * Filter detach method for EVFILT_READ on kqueue descriptor.
    413  */
    414 static void
    415 filt_kqdetach(struct knote *kn)
    416 {
    417 	struct kqueue *kq;
    418 
    419 	kq = ((file_t *)kn->kn_obj)->f_data;
    420 
    421 	mutex_spin_enter(&kq->kq_lock);
    422 	SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext);
    423 	mutex_spin_exit(&kq->kq_lock);
    424 }
    425 
    426 /*
    427  * Filter event method for EVFILT_READ on kqueue descriptor.
    428  */
    429 /*ARGSUSED*/
    430 static int
    431 filt_kqueue(struct knote *kn, long hint)
    432 {
    433 	struct kqueue *kq;
    434 	int rv;
    435 
    436 	kq = ((file_t *)kn->kn_obj)->f_data;
    437 
    438 	if (hint != NOTE_SUBMIT)
    439 		mutex_spin_enter(&kq->kq_lock);
    440 	kn->kn_data = kq->kq_count;
    441 	rv = (kn->kn_data > 0);
    442 	if (hint != NOTE_SUBMIT)
    443 		mutex_spin_exit(&kq->kq_lock);
    444 
    445 	return rv;
    446 }
    447 
    448 /*
    449  * Filter attach method for EVFILT_PROC.
    450  */
    451 static int
    452 filt_procattach(struct knote *kn)
    453 {
    454 	struct proc *p;
    455 	struct lwp *curl;
    456 
    457 	curl = curlwp;
    458 
    459 	mutex_enter(proc_lock);
    460 	if (kn->kn_flags & EV_FLAG1) {
    461 		/*
    462 		 * NOTE_TRACK attaches to the child process too early
    463 		 * for proc_find, so do a raw look up and check the state
    464 		 * explicitly.
    465 		 */
    466 		p = proc_find_raw(kn->kn_id);
    467 		if (p != NULL && p->p_stat != SIDL)
    468 			p = NULL;
    469 	} else {
    470 		p = proc_find(kn->kn_id);
    471 	}
    472 
    473 	if (p == NULL) {
    474 		mutex_exit(proc_lock);
    475 		return ESRCH;
    476 	}
    477 
    478 	/*
    479 	 * Fail if it's not owned by you, or the last exec gave us
    480 	 * setuid/setgid privs (unless you're root).
    481 	 */
    482 	mutex_enter(p->p_lock);
    483 	mutex_exit(proc_lock);
    484 	if (kauth_authorize_process(curl->l_cred, KAUTH_PROCESS_KEVENT_FILTER,
    485 	    p, NULL, NULL, NULL) != 0) {
    486 	    	mutex_exit(p->p_lock);
    487 		return EACCES;
    488 	}
    489 
    490 	kn->kn_obj = p;
    491 	kn->kn_flags |= EV_CLEAR;	/* automatically set */
    492 
    493 	/*
    494 	 * internal flag indicating registration done by kernel
    495 	 */
    496 	if (kn->kn_flags & EV_FLAG1) {
    497 		kn->kn_data = kn->kn_sdata;	/* ppid */
    498 		kn->kn_fflags = NOTE_CHILD;
    499 		kn->kn_flags &= ~EV_FLAG1;
    500 	}
    501 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
    502     	mutex_exit(p->p_lock);
    503 
    504 	return 0;
    505 }
    506 
    507 /*
    508  * Filter detach method for EVFILT_PROC.
    509  *
    510  * The knote may be attached to a different process, which may exit,
    511  * leaving nothing for the knote to be attached to.  So when the process
    512  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
    513  * it will be deleted when read out.  However, as part of the knote deletion,
    514  * this routine is called, so a check is needed to avoid actually performing
    515  * a detach, because the original process might not exist any more.
    516  */
    517 static void
    518 filt_procdetach(struct knote *kn)
    519 {
    520 	struct proc *p;
    521 
    522 	if (kn->kn_status & KN_DETACHED)
    523 		return;
    524 
    525 	p = kn->kn_obj;
    526 
    527 	mutex_enter(p->p_lock);
    528 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
    529 	mutex_exit(p->p_lock);
    530 }
    531 
    532 /*
    533  * Filter event method for EVFILT_PROC.
    534  */
    535 static int
    536 filt_proc(struct knote *kn, long hint)
    537 {
    538 	u_int event, fflag;
    539 	struct kevent kev;
    540 	struct kqueue *kq;
    541 	int error;
    542 
    543 	event = (u_int)hint & NOTE_PCTRLMASK;
    544 	kq = kn->kn_kq;
    545 	fflag = 0;
    546 
    547 	/* If the user is interested in this event, record it. */
    548 	if (kn->kn_sfflags & event)
    549 		fflag |= event;
    550 
    551 	if (event == NOTE_EXIT) {
    552 		struct proc *p = kn->kn_obj;
    553 
    554 		if (p != NULL)
    555 			kn->kn_data = p->p_xstat;
    556 		/*
    557 		 * Process is gone, so flag the event as finished.
    558 		 *
    559 		 * Detach the knote from watched process and mark
    560 		 * it as such. We can't leave this to kqueue_scan(),
    561 		 * since the process might not exist by then. And we
    562 		 * have to do this now, since psignal KNOTE() is called
    563 		 * also for zombies and we might end up reading freed
    564 		 * memory if the kevent would already be picked up
    565 		 * and knote g/c'ed.
    566 		 */
    567 		filt_procdetach(kn);
    568 
    569 		mutex_spin_enter(&kq->kq_lock);
    570 		kn->kn_status |= KN_DETACHED;
    571 		/* Mark as ONESHOT, so that the knote it g/c'ed when read */
    572 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
    573 		kn->kn_fflags |= fflag;
    574 		mutex_spin_exit(&kq->kq_lock);
    575 
    576 		return 1;
    577 	}
    578 
    579 	mutex_spin_enter(&kq->kq_lock);
    580 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
    581 		/*
    582 		 * Process forked, and user wants to track the new process,
    583 		 * so attach a new knote to it, and immediately report an
    584 		 * event with the parent's pid.  Register knote with new
    585 		 * process.
    586 		 */
    587 		memset(&kev, 0, sizeof(kev));
    588 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
    589 		kev.filter = kn->kn_filter;
    590 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
    591 		kev.fflags = kn->kn_sfflags;
    592 		kev.data = kn->kn_id;			/* parent */
    593 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
    594 		mutex_spin_exit(&kq->kq_lock);
    595 		error = kqueue_register(kq, &kev);
    596 		mutex_spin_enter(&kq->kq_lock);
    597 		if (error != 0)
    598 			kn->kn_fflags |= NOTE_TRACKERR;
    599 	}
    600 	kn->kn_fflags |= fflag;
    601 	fflag = kn->kn_fflags;
    602 	mutex_spin_exit(&kq->kq_lock);
    603 
    604 	return fflag != 0;
    605 }
    606 
    607 static void
    608 filt_timerexpire(void *knx)
    609 {
    610 	struct knote *kn = knx;
    611 	int tticks;
    612 
    613 	mutex_enter(&kqueue_misc_lock);
    614 	kn->kn_data++;
    615 	knote_activate(kn);
    616 	if ((kn->kn_flags & EV_ONESHOT) == 0) {
    617 		tticks = mstohz(kn->kn_sdata);
    618 		if (tticks <= 0)
    619 			tticks = 1;
    620 		callout_schedule((callout_t *)kn->kn_hook, tticks);
    621 	}
    622 	mutex_exit(&kqueue_misc_lock);
    623 }
    624 
    625 /*
    626  * data contains amount of time to sleep, in milliseconds
    627  */
    628 static int
    629 filt_timerattach(struct knote *kn)
    630 {
    631 	callout_t *calloutp;
    632 	struct kqueue *kq;
    633 	int tticks;
    634 
    635 	tticks = mstohz(kn->kn_sdata);
    636 
    637 	/* if the supplied value is under our resolution, use 1 tick */
    638 	if (tticks == 0) {
    639 		if (kn->kn_sdata == 0)
    640 			return EINVAL;
    641 		tticks = 1;
    642 	}
    643 
    644 	if (atomic_inc_uint_nv(&kq_ncallouts) >= kq_calloutmax ||
    645 	    (calloutp = kmem_alloc(sizeof(*calloutp), KM_NOSLEEP)) == NULL) {
    646 		atomic_dec_uint(&kq_ncallouts);
    647 		return ENOMEM;
    648 	}
    649 	callout_init(calloutp, CALLOUT_MPSAFE);
    650 
    651 	kq = kn->kn_kq;
    652 	mutex_spin_enter(&kq->kq_lock);
    653 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
    654 	kn->kn_hook = calloutp;
    655 	mutex_spin_exit(&kq->kq_lock);
    656 
    657 	callout_reset(calloutp, tticks, filt_timerexpire, kn);
    658 
    659 	return (0);
    660 }
    661 
    662 static void
    663 filt_timerdetach(struct knote *kn)
    664 {
    665 	callout_t *calloutp;
    666 
    667 	calloutp = (callout_t *)kn->kn_hook;
    668 	callout_halt(calloutp, NULL);
    669 	callout_destroy(calloutp);
    670 	kmem_free(calloutp, sizeof(*calloutp));
    671 	atomic_dec_uint(&kq_ncallouts);
    672 }
    673 
    674 static int
    675 filt_timer(struct knote *kn, long hint)
    676 {
    677 	int rv;
    678 
    679 	mutex_enter(&kqueue_misc_lock);
    680 	rv = (kn->kn_data != 0);
    681 	mutex_exit(&kqueue_misc_lock);
    682 
    683 	return rv;
    684 }
    685 
    686 /*
    687  * filt_seltrue:
    688  *
    689  *	This filter "event" routine simulates seltrue().
    690  */
    691 int
    692 filt_seltrue(struct knote *kn, long hint)
    693 {
    694 
    695 	/*
    696 	 * We don't know how much data can be read/written,
    697 	 * but we know that it *can* be.  This is about as
    698 	 * good as select/poll does as well.
    699 	 */
    700 	kn->kn_data = 0;
    701 	return (1);
    702 }
    703 
    704 /*
    705  * This provides full kqfilter entry for device switch tables, which
    706  * has same effect as filter using filt_seltrue() as filter method.
    707  */
    708 static void
    709 filt_seltruedetach(struct knote *kn)
    710 {
    711 	/* Nothing to do */
    712 }
    713 
    714 const struct filterops seltrue_filtops =
    715 	{ 1, NULL, filt_seltruedetach, filt_seltrue };
    716 
    717 int
    718 seltrue_kqfilter(dev_t dev, struct knote *kn)
    719 {
    720 	switch (kn->kn_filter) {
    721 	case EVFILT_READ:
    722 	case EVFILT_WRITE:
    723 		kn->kn_fop = &seltrue_filtops;
    724 		break;
    725 	default:
    726 		return (EINVAL);
    727 	}
    728 
    729 	/* Nothing more to do */
    730 	return (0);
    731 }
    732 
    733 /*
    734  * kqueue(2) system call.
    735  */
    736 static int
    737 kqueue1(struct lwp *l, int flags, register_t *retval)
    738 {
    739 	struct kqueue *kq;
    740 	file_t *fp;
    741 	int fd, error;
    742 
    743 	if ((error = fd_allocfile(&fp, &fd)) != 0)
    744 		return error;
    745 	fp->f_flag = FREAD | FWRITE | (flags & (FNONBLOCK|FNOSIGPIPE));
    746 	fp->f_type = DTYPE_KQUEUE;
    747 	fp->f_ops = &kqueueops;
    748 	kq = kmem_zalloc(sizeof(*kq), KM_SLEEP);
    749 	mutex_init(&kq->kq_lock, MUTEX_DEFAULT, IPL_SCHED);
    750 	cv_init(&kq->kq_cv, "kqueue");
    751 	selinit(&kq->kq_sel);
    752 	TAILQ_INIT(&kq->kq_head);
    753 	fp->f_data = kq;
    754 	*retval = fd;
    755 	kq->kq_fdp = curlwp->l_fd;
    756 	fd_set_exclose(l, fd, (flags & O_CLOEXEC) != 0);
    757 	fd_affix(curproc, fp, fd);
    758 	return error;
    759 }
    760 
    761 /*
    762  * kqueue(2) system call.
    763  */
    764 int
    765 sys_kqueue(struct lwp *l, const void *v, register_t *retval)
    766 {
    767 	return kqueue1(l, 0, retval);
    768 }
    769 
    770 int
    771 sys_kqueue1(struct lwp *l, const struct sys_kqueue1_args *uap,
    772     register_t *retval)
    773 {
    774 	/* {
    775 		syscallarg(int) flags;
    776 	} */
    777 	return kqueue1(l, SCARG(uap, flags), retval);
    778 }
    779 
    780 /*
    781  * kevent(2) system call.
    782  */
    783 int
    784 kevent_fetch_changes(void *ctx, const struct kevent *changelist,
    785     struct kevent *changes, size_t index, int n)
    786 {
    787 
    788 	return copyin(changelist + index, changes, n * sizeof(*changes));
    789 }
    790 
    791 int
    792 kevent_put_events(void *ctx, struct kevent *events,
    793     struct kevent *eventlist, size_t index, int n)
    794 {
    795 
    796 	return copyout(events, eventlist + index, n * sizeof(*events));
    797 }
    798 
    799 static const struct kevent_ops kevent_native_ops = {
    800 	.keo_private = NULL,
    801 	.keo_fetch_timeout = copyin,
    802 	.keo_fetch_changes = kevent_fetch_changes,
    803 	.keo_put_events = kevent_put_events,
    804 };
    805 
    806 int
    807 sys___kevent50(struct lwp *l, const struct sys___kevent50_args *uap,
    808     register_t *retval)
    809 {
    810 	/* {
    811 		syscallarg(int) fd;
    812 		syscallarg(const struct kevent *) changelist;
    813 		syscallarg(size_t) nchanges;
    814 		syscallarg(struct kevent *) eventlist;
    815 		syscallarg(size_t) nevents;
    816 		syscallarg(const struct timespec *) timeout;
    817 	} */
    818 
    819 	return kevent1(retval, SCARG(uap, fd), SCARG(uap, changelist),
    820 	    SCARG(uap, nchanges), SCARG(uap, eventlist), SCARG(uap, nevents),
    821 	    SCARG(uap, timeout), &kevent_native_ops);
    822 }
    823 
    824 int
    825 kevent1(register_t *retval, int fd,
    826 	const struct kevent *changelist, size_t nchanges,
    827 	struct kevent *eventlist, size_t nevents,
    828 	const struct timespec *timeout,
    829 	const struct kevent_ops *keops)
    830 {
    831 	struct kevent *kevp;
    832 	struct kqueue *kq;
    833 	struct timespec	ts;
    834 	size_t i, n, ichange;
    835 	int nerrors, error;
    836 	struct kevent kevbuf[KQ_NEVENTS];	/* approx 300 bytes on 64-bit */
    837 	file_t *fp;
    838 
    839 	/* check that we're dealing with a kq */
    840 	fp = fd_getfile(fd);
    841 	if (fp == NULL)
    842 		return (EBADF);
    843 
    844 	if (fp->f_type != DTYPE_KQUEUE) {
    845 		fd_putfile(fd);
    846 		return (EBADF);
    847 	}
    848 
    849 	if (timeout != NULL) {
    850 		error = (*keops->keo_fetch_timeout)(timeout, &ts, sizeof(ts));
    851 		if (error)
    852 			goto done;
    853 		timeout = &ts;
    854 	}
    855 
    856 	kq = fp->f_data;
    857 	nerrors = 0;
    858 	ichange = 0;
    859 
    860 	/* traverse list of events to register */
    861 	while (nchanges > 0) {
    862 		n = MIN(nchanges, __arraycount(kevbuf));
    863 		error = (*keops->keo_fetch_changes)(keops->keo_private,
    864 		    changelist, kevbuf, ichange, n);
    865 		if (error)
    866 			goto done;
    867 		for (i = 0; i < n; i++) {
    868 			kevp = &kevbuf[i];
    869 			kevp->flags &= ~EV_SYSFLAGS;
    870 			/* register each knote */
    871 			error = kqueue_register(kq, kevp);
    872 			if (!error && !(kevp->flags & EV_RECEIPT))
    873 				continue;
    874 			if (nevents == 0)
    875 				goto done;
    876 			kevp->flags = EV_ERROR;
    877 			kevp->data = error;
    878 			error = (*keops->keo_put_events)
    879 				(keops->keo_private, kevp,
    880 				 eventlist, nerrors, 1);
    881 			if (error)
    882 				goto done;
    883 			nevents--;
    884 			nerrors++;
    885 		}
    886 		nchanges -= n;	/* update the results */
    887 		ichange += n;
    888 	}
    889 	if (nerrors) {
    890 		*retval = nerrors;
    891 		error = 0;
    892 		goto done;
    893 	}
    894 
    895 	/* actually scan through the events */
    896 	error = kqueue_scan(fp, nevents, eventlist, timeout, retval, keops,
    897 	    kevbuf, __arraycount(kevbuf));
    898  done:
    899 	fd_putfile(fd);
    900 	return (error);
    901 }
    902 
    903 /*
    904  * Register a given kevent kev onto the kqueue
    905  */
    906 static int
    907 kqueue_register(struct kqueue *kq, struct kevent *kev)
    908 {
    909 	struct kfilter *kfilter;
    910 	filedesc_t *fdp;
    911 	file_t *fp;
    912 	fdfile_t *ff;
    913 	struct knote *kn, *newkn;
    914 	struct klist *list;
    915 	int error, fd, rv;
    916 
    917 	fdp = kq->kq_fdp;
    918 	fp = NULL;
    919 	kn = NULL;
    920 	error = 0;
    921 	fd = 0;
    922 
    923 	newkn = kmem_zalloc(sizeof(*newkn), KM_SLEEP);
    924 
    925 	rw_enter(&kqueue_filter_lock, RW_READER);
    926 	kfilter = kfilter_byfilter(kev->filter);
    927 	if (kfilter == NULL || kfilter->filtops == NULL) {
    928 		/* filter not found nor implemented */
    929 		rw_exit(&kqueue_filter_lock);
    930 		kmem_free(newkn, sizeof(*newkn));
    931 		return (EINVAL);
    932 	}
    933 
    934 	/* search if knote already exists */
    935 	if (kfilter->filtops->f_isfd) {
    936 		/* monitoring a file descriptor */
    937 		/* validate descriptor */
    938 		if (kev->ident > INT_MAX
    939 		    || (fp = fd_getfile(fd = kev->ident)) == NULL) {
    940 			rw_exit(&kqueue_filter_lock);
    941 			kmem_free(newkn, sizeof(*newkn));
    942 			return EBADF;
    943 		}
    944 		mutex_enter(&fdp->fd_lock);
    945 		ff = fdp->fd_dt->dt_ff[fd];
    946 		if (fd <= fdp->fd_lastkqfile) {
    947 			SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) {
    948 				if (kq == kn->kn_kq &&
    949 				    kev->filter == kn->kn_filter)
    950 					break;
    951 			}
    952 		}
    953 	} else {
    954 		/*
    955 		 * not monitoring a file descriptor, so
    956 		 * lookup knotes in internal hash table
    957 		 */
    958 		mutex_enter(&fdp->fd_lock);
    959 		if (fdp->fd_knhashmask != 0) {
    960 			list = &fdp->fd_knhash[
    961 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
    962 			SLIST_FOREACH(kn, list, kn_link) {
    963 				if (kev->ident == kn->kn_id &&
    964 				    kq == kn->kn_kq &&
    965 				    kev->filter == kn->kn_filter)
    966 					break;
    967 			}
    968 		}
    969 	}
    970 
    971 	/*
    972 	 * kn now contains the matching knote, or NULL if no match
    973 	 */
    974 	if (kev->flags & EV_ADD) {
    975 		if (kn == NULL) {
    976 			/* create new knote */
    977 			kn = newkn;
    978 			newkn = NULL;
    979 			kn->kn_obj = fp;
    980 			kn->kn_id = kev->ident;
    981 			kn->kn_kq = kq;
    982 			kn->kn_fop = kfilter->filtops;
    983 			kn->kn_kfilter = kfilter;
    984 			kn->kn_sfflags = kev->fflags;
    985 			kn->kn_sdata = kev->data;
    986 			kev->fflags = 0;
    987 			kev->data = 0;
    988 			kn->kn_kevent = *kev;
    989 
    990 			KASSERT(kn->kn_fop != NULL);
    991 			/*
    992 			 * apply reference count to knote structure, and
    993 			 * do not release it at the end of this routine.
    994 			 */
    995 			fp = NULL;
    996 
    997 			if (!kn->kn_fop->f_isfd) {
    998 				/*
    999 				 * If knote is not on an fd, store on
   1000 				 * internal hash table.
   1001 				 */
   1002 				if (fdp->fd_knhashmask == 0) {
   1003 					/* XXXAD can block with fd_lock held */
   1004 					fdp->fd_knhash = hashinit(KN_HASHSIZE,
   1005 					    HASH_LIST, true,
   1006 					    &fdp->fd_knhashmask);
   1007 				}
   1008 				list = &fdp->fd_knhash[KN_HASH(kn->kn_id,
   1009 				    fdp->fd_knhashmask)];
   1010 			} else {
   1011 				/* Otherwise, knote is on an fd. */
   1012 				list = (struct klist *)
   1013 				    &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
   1014 				if ((int)kn->kn_id > fdp->fd_lastkqfile)
   1015 					fdp->fd_lastkqfile = kn->kn_id;
   1016 			}
   1017 			SLIST_INSERT_HEAD(list, kn, kn_link);
   1018 
   1019 			KERNEL_LOCK(1, NULL);		/* XXXSMP */
   1020 			error = (*kfilter->filtops->f_attach)(kn);
   1021 			KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
   1022 			if (error != 0) {
   1023 #ifdef DIAGNOSTIC
   1024 
   1025 				printf("%s: event type %d not supported for "
   1026 				    "file type %d (error %d)\n", __func__,
   1027 				    kn->kn_filter, kn->kn_obj ?
   1028 				    ((file_t *)kn->kn_obj)->f_type : -1, error);
   1029 #endif
   1030 				/* knote_detach() drops fdp->fd_lock */
   1031 				knote_detach(kn, fdp, false);
   1032 				goto done;
   1033 			}
   1034 			atomic_inc_uint(&kfilter->refcnt);
   1035 		} else {
   1036 			/*
   1037 			 * The user may change some filter values after the
   1038 			 * initial EV_ADD, but doing so will not reset any
   1039 			 * filter which have already been triggered.
   1040 			 */
   1041 			kn->kn_sfflags = kev->fflags;
   1042 			kn->kn_sdata = kev->data;
   1043 			kn->kn_kevent.udata = kev->udata;
   1044 		}
   1045 		/*
   1046 		 * We can get here if we are trying to attach
   1047 		 * an event to a file descriptor that does not
   1048 		 * support events, and the attach routine is
   1049 		 * broken and does not return an error.
   1050 		 */
   1051 		KASSERT(kn->kn_fop != NULL);
   1052 		KASSERT(kn->kn_fop->f_event != NULL);
   1053 		KERNEL_LOCK(1, NULL);			/* XXXSMP */
   1054 		rv = (*kn->kn_fop->f_event)(kn, 0);
   1055 		KERNEL_UNLOCK_ONE(NULL);		/* XXXSMP */
   1056 		if (rv)
   1057 			knote_activate(kn);
   1058 	} else {
   1059 		if (kn == NULL) {
   1060 			error = ENOENT;
   1061 		 	mutex_exit(&fdp->fd_lock);
   1062 			goto done;
   1063 		}
   1064 		if (kev->flags & EV_DELETE) {
   1065 			/* knote_detach() drops fdp->fd_lock */
   1066 			knote_detach(kn, fdp, true);
   1067 			goto done;
   1068 		}
   1069 	}
   1070 
   1071 	/* disable knote */
   1072 	if ((kev->flags & EV_DISABLE)) {
   1073 		mutex_spin_enter(&kq->kq_lock);
   1074 		if ((kn->kn_status & KN_DISABLED) == 0)
   1075 			kn->kn_status |= KN_DISABLED;
   1076 		mutex_spin_exit(&kq->kq_lock);
   1077 	}
   1078 
   1079 	/* enable knote */
   1080 	if ((kev->flags & EV_ENABLE)) {
   1081 		knote_enqueue(kn);
   1082 	}
   1083 	mutex_exit(&fdp->fd_lock);
   1084  done:
   1085 	rw_exit(&kqueue_filter_lock);
   1086 	if (newkn != NULL)
   1087 		kmem_free(newkn, sizeof(*newkn));
   1088 	if (fp != NULL)
   1089 		fd_putfile(fd);
   1090 	return (error);
   1091 }
   1092 
   1093 #if defined(DEBUG)
   1094 static void
   1095 kq_check(struct kqueue *kq)
   1096 {
   1097 	const struct knote *kn;
   1098 	int count;
   1099 	int nmarker;
   1100 
   1101 	KASSERT(mutex_owned(&kq->kq_lock));
   1102 	KASSERT(kq->kq_count >= 0);
   1103 
   1104 	count = 0;
   1105 	nmarker = 0;
   1106 	TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) {
   1107 		if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) {
   1108 			panic("%s: kq=%p kn=%p inconsist 1", __func__, kq, kn);
   1109 		}
   1110 		if ((kn->kn_status & KN_MARKER) == 0) {
   1111 			if (kn->kn_kq != kq) {
   1112 				panic("%s: kq=%p kn=%p inconsist 2",
   1113 				    __func__, kq, kn);
   1114 			}
   1115 			if ((kn->kn_status & KN_ACTIVE) == 0) {
   1116 				panic("%s: kq=%p kn=%p: not active",
   1117 				    __func__, kq, kn);
   1118 			}
   1119 			count++;
   1120 			if (count > kq->kq_count) {
   1121 				goto bad;
   1122 			}
   1123 		} else {
   1124 			nmarker++;
   1125 #if 0
   1126 			if (nmarker > 10000) {
   1127 				panic("%s: kq=%p too many markers: %d != %d, "
   1128 				    "nmarker=%d",
   1129 				    __func__, kq, kq->kq_count, count, nmarker);
   1130 			}
   1131 #endif
   1132 		}
   1133 	}
   1134 	if (kq->kq_count != count) {
   1135 bad:
   1136 		panic("%s: kq=%p inconsist 3: %d != %d, nmarker=%d",
   1137 		    __func__, kq, kq->kq_count, count, nmarker);
   1138 	}
   1139 }
   1140 #else /* defined(DEBUG) */
   1141 #define	kq_check(a)	/* nothing */
   1142 #endif /* defined(DEBUG) */
   1143 
   1144 /*
   1145  * Scan through the list of events on fp (for a maximum of maxevents),
   1146  * returning the results in to ulistp. Timeout is determined by tsp; if
   1147  * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
   1148  * as appropriate.
   1149  */
   1150 static int
   1151 kqueue_scan(file_t *fp, size_t maxevents, struct kevent *ulistp,
   1152 	    const struct timespec *tsp, register_t *retval,
   1153 	    const struct kevent_ops *keops, struct kevent *kevbuf,
   1154 	    size_t kevcnt)
   1155 {
   1156 	struct kqueue	*kq;
   1157 	struct kevent	*kevp;
   1158 	struct timespec	ats, sleepts;
   1159 	struct knote	*kn, *marker, morker;
   1160 	size_t		count, nkev, nevents;
   1161 	int		timeout, error, rv;
   1162 	filedesc_t	*fdp;
   1163 
   1164 	fdp = curlwp->l_fd;
   1165 	kq = fp->f_data;
   1166 	count = maxevents;
   1167 	nkev = nevents = error = 0;
   1168 	if (count == 0) {
   1169 		*retval = 0;
   1170 		return 0;
   1171 	}
   1172 
   1173 	if (tsp) {				/* timeout supplied */
   1174 		ats = *tsp;
   1175 		if (inittimeleft(&ats, &sleepts) == -1) {
   1176 			*retval = maxevents;
   1177 			return EINVAL;
   1178 		}
   1179 		timeout = tstohz(&ats);
   1180 		if (timeout <= 0)
   1181 			timeout = -1;           /* do poll */
   1182 	} else {
   1183 		/* no timeout, wait forever */
   1184 		timeout = 0;
   1185 	}
   1186 
   1187 	memset(&morker, 0, sizeof(morker));
   1188 	marker = &morker;
   1189 	marker->kn_status = KN_MARKER;
   1190 	mutex_spin_enter(&kq->kq_lock);
   1191  retry:
   1192 	kevp = kevbuf;
   1193 	if (kq->kq_count == 0) {
   1194 		if (timeout >= 0) {
   1195 			error = cv_timedwait_sig(&kq->kq_cv,
   1196 			    &kq->kq_lock, timeout);
   1197 			if (error == 0) {
   1198 				 if (tsp == NULL || (timeout =
   1199 				     gettimeleft(&ats, &sleepts)) > 0)
   1200 					goto retry;
   1201 			} else {
   1202 				/* don't restart after signals... */
   1203 				if (error == ERESTART)
   1204 					error = EINTR;
   1205 				if (error == EWOULDBLOCK)
   1206 					error = 0;
   1207 			}
   1208 		}
   1209 		mutex_spin_exit(&kq->kq_lock);
   1210 	} else {
   1211 		/* mark end of knote list */
   1212 		TAILQ_INSERT_TAIL(&kq->kq_head, marker, kn_tqe);
   1213 
   1214 		/*
   1215 		 * Acquire the fdp->fd_lock interlock to avoid races with
   1216 		 * file creation/destruction from other threads.
   1217 		 */
   1218 		mutex_spin_exit(&kq->kq_lock);
   1219 		mutex_enter(&fdp->fd_lock);
   1220 		mutex_spin_enter(&kq->kq_lock);
   1221 
   1222 		while (count != 0) {
   1223 			kn = TAILQ_FIRST(&kq->kq_head);	/* get next knote */
   1224 			while ((kn->kn_status & KN_MARKER) != 0) {
   1225 				if (kn == marker) {
   1226 					/* it's our marker, stop */
   1227 					TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1228 					if (count < maxevents || (tsp != NULL &&
   1229 					    (timeout = gettimeleft(&ats,
   1230 					    &sleepts)) <= 0))
   1231 						goto done;
   1232 					mutex_exit(&fdp->fd_lock);
   1233 					goto retry;
   1234 				}
   1235 				/* someone else's marker. */
   1236 				kn = TAILQ_NEXT(kn, kn_tqe);
   1237 			}
   1238 			kq_check(kq);
   1239 			kq->kq_count--;
   1240 			TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1241 			kn->kn_status &= ~KN_QUEUED;
   1242 			kn->kn_status |= KN_BUSY;
   1243 			kq_check(kq);
   1244 			if (kn->kn_status & KN_DISABLED) {
   1245 				kn->kn_status &= ~KN_BUSY;
   1246 				/* don't want disabled events */
   1247 				continue;
   1248 			}
   1249 			if ((kn->kn_flags & EV_ONESHOT) == 0) {
   1250 				mutex_spin_exit(&kq->kq_lock);
   1251 				KASSERT(kn->kn_fop != NULL);
   1252 				KASSERT(kn->kn_fop->f_event != NULL);
   1253 				KERNEL_LOCK(1, NULL);		/* XXXSMP */
   1254 				KASSERT(mutex_owned(&fdp->fd_lock));
   1255 				rv = (*kn->kn_fop->f_event)(kn, 0);
   1256 				KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
   1257 				mutex_spin_enter(&kq->kq_lock);
   1258 				/* Re-poll if note was re-enqueued. */
   1259 				if ((kn->kn_status & KN_QUEUED) != 0) {
   1260 					kn->kn_status &= ~KN_BUSY;
   1261 					continue;
   1262 				}
   1263 				if (rv == 0) {
   1264 					/*
   1265 					 * non-ONESHOT event that hasn't
   1266 					 * triggered again, so de-queue.
   1267 					 */
   1268 					kn->kn_status &= ~(KN_ACTIVE|KN_BUSY);
   1269 					continue;
   1270 				}
   1271 			}
   1272 			/* XXXAD should be got from f_event if !oneshot. */
   1273 			*kevp++ = kn->kn_kevent;
   1274 			nkev++;
   1275 			if (kn->kn_flags & EV_ONESHOT) {
   1276 				/* delete ONESHOT events after retrieval */
   1277 				kn->kn_status &= ~KN_BUSY;
   1278 				mutex_spin_exit(&kq->kq_lock);
   1279 				knote_detach(kn, fdp, true);
   1280 				mutex_enter(&fdp->fd_lock);
   1281 				mutex_spin_enter(&kq->kq_lock);
   1282 			} else if (kn->kn_flags & EV_CLEAR) {
   1283 				/* clear state after retrieval */
   1284 				kn->kn_data = 0;
   1285 				kn->kn_fflags = 0;
   1286 				kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY);
   1287 			} else if (kn->kn_flags & EV_DISPATCH) {
   1288 				kn->kn_status |= KN_DISABLED;
   1289 				kn->kn_status &= ~(KN_QUEUED|KN_ACTIVE|KN_BUSY);
   1290 			} else {
   1291 				/* add event back on list */
   1292 				kq_check(kq);
   1293 				kn->kn_status |= KN_QUEUED;
   1294 				kn->kn_status &= ~KN_BUSY;
   1295 				TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1296 				kq->kq_count++;
   1297 				kq_check(kq);
   1298 			}
   1299 			if (nkev == kevcnt) {
   1300 				/* do copyouts in kevcnt chunks */
   1301 				mutex_spin_exit(&kq->kq_lock);
   1302 				mutex_exit(&fdp->fd_lock);
   1303 				error = (*keops->keo_put_events)
   1304 				    (keops->keo_private,
   1305 				    kevbuf, ulistp, nevents, nkev);
   1306 				mutex_enter(&fdp->fd_lock);
   1307 				mutex_spin_enter(&kq->kq_lock);
   1308 				nevents += nkev;
   1309 				nkev = 0;
   1310 				kevp = kevbuf;
   1311 			}
   1312 			count--;
   1313 			if (error != 0 || count == 0) {
   1314 				/* remove marker */
   1315 				TAILQ_REMOVE(&kq->kq_head, marker, kn_tqe);
   1316 				break;
   1317 			}
   1318 		}
   1319  done:
   1320 		mutex_spin_exit(&kq->kq_lock);
   1321 		mutex_exit(&fdp->fd_lock);
   1322 	}
   1323 	if (nkev != 0) {
   1324 		/* copyout remaining events */
   1325 		error = (*keops->keo_put_events)(keops->keo_private,
   1326 		    kevbuf, ulistp, nevents, nkev);
   1327 	}
   1328 	*retval = maxevents - count;
   1329 
   1330 	return error;
   1331 }
   1332 
   1333 /*
   1334  * fileops ioctl method for a kqueue descriptor.
   1335  *
   1336  * Two ioctls are currently supported. They both use struct kfilter_mapping:
   1337  *	KFILTER_BYNAME		find name for filter, and return result in
   1338  *				name, which is of size len.
   1339  *	KFILTER_BYFILTER	find filter for name. len is ignored.
   1340  */
   1341 /*ARGSUSED*/
   1342 static int
   1343 kqueue_ioctl(file_t *fp, u_long com, void *data)
   1344 {
   1345 	struct kfilter_mapping	*km;
   1346 	const struct kfilter	*kfilter;
   1347 	char			*name;
   1348 	int			error;
   1349 
   1350 	km = data;
   1351 	error = 0;
   1352 	name = kmem_alloc(KFILTER_MAXNAME, KM_SLEEP);
   1353 
   1354 	switch (com) {
   1355 	case KFILTER_BYFILTER:	/* convert filter -> name */
   1356 		rw_enter(&kqueue_filter_lock, RW_READER);
   1357 		kfilter = kfilter_byfilter(km->filter);
   1358 		if (kfilter != NULL) {
   1359 			strlcpy(name, kfilter->name, KFILTER_MAXNAME);
   1360 			rw_exit(&kqueue_filter_lock);
   1361 			error = copyoutstr(name, km->name, km->len, NULL);
   1362 		} else {
   1363 			rw_exit(&kqueue_filter_lock);
   1364 			error = ENOENT;
   1365 		}
   1366 		break;
   1367 
   1368 	case KFILTER_BYNAME:	/* convert name -> filter */
   1369 		error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
   1370 		if (error) {
   1371 			break;
   1372 		}
   1373 		rw_enter(&kqueue_filter_lock, RW_READER);
   1374 		kfilter = kfilter_byname(name);
   1375 		if (kfilter != NULL)
   1376 			km->filter = kfilter->filter;
   1377 		else
   1378 			error = ENOENT;
   1379 		rw_exit(&kqueue_filter_lock);
   1380 		break;
   1381 
   1382 	default:
   1383 		error = ENOTTY;
   1384 		break;
   1385 
   1386 	}
   1387 	kmem_free(name, KFILTER_MAXNAME);
   1388 	return (error);
   1389 }
   1390 
   1391 /*
   1392  * fileops fcntl method for a kqueue descriptor.
   1393  */
   1394 static int
   1395 kqueue_fcntl(file_t *fp, u_int com, void *data)
   1396 {
   1397 
   1398 	return (ENOTTY);
   1399 }
   1400 
   1401 /*
   1402  * fileops poll method for a kqueue descriptor.
   1403  * Determine if kqueue has events pending.
   1404  */
   1405 static int
   1406 kqueue_poll(file_t *fp, int events)
   1407 {
   1408 	struct kqueue	*kq;
   1409 	int		revents;
   1410 
   1411 	kq = fp->f_data;
   1412 
   1413 	revents = 0;
   1414 	if (events & (POLLIN | POLLRDNORM)) {
   1415 		mutex_spin_enter(&kq->kq_lock);
   1416 		if (kq->kq_count != 0) {
   1417 			revents |= events & (POLLIN | POLLRDNORM);
   1418 		} else {
   1419 			selrecord(curlwp, &kq->kq_sel);
   1420 		}
   1421 		kq_check(kq);
   1422 		mutex_spin_exit(&kq->kq_lock);
   1423 	}
   1424 
   1425 	return revents;
   1426 }
   1427 
   1428 /*
   1429  * fileops stat method for a kqueue descriptor.
   1430  * Returns dummy info, with st_size being number of events pending.
   1431  */
   1432 static int
   1433 kqueue_stat(file_t *fp, struct stat *st)
   1434 {
   1435 	struct kqueue *kq;
   1436 
   1437 	kq = fp->f_data;
   1438 
   1439 	memset(st, 0, sizeof(*st));
   1440 	st->st_size = kq->kq_count;
   1441 	st->st_blksize = sizeof(struct kevent);
   1442 	st->st_mode = S_IFIFO;
   1443 
   1444 	return 0;
   1445 }
   1446 
   1447 static void
   1448 kqueue_doclose(struct kqueue *kq, struct klist *list, int fd)
   1449 {
   1450 	struct knote *kn;
   1451 	filedesc_t *fdp;
   1452 
   1453 	fdp = kq->kq_fdp;
   1454 
   1455 	KASSERT(mutex_owned(&fdp->fd_lock));
   1456 
   1457 	for (kn = SLIST_FIRST(list); kn != NULL;) {
   1458 		if (kq != kn->kn_kq) {
   1459 			kn = SLIST_NEXT(kn, kn_link);
   1460 			continue;
   1461 		}
   1462 		knote_detach(kn, fdp, true);
   1463 		mutex_enter(&fdp->fd_lock);
   1464 		kn = SLIST_FIRST(list);
   1465 	}
   1466 }
   1467 
   1468 
   1469 /*
   1470  * fileops close method for a kqueue descriptor.
   1471  */
   1472 static int
   1473 kqueue_close(file_t *fp)
   1474 {
   1475 	struct kqueue *kq;
   1476 	filedesc_t *fdp;
   1477 	fdfile_t *ff;
   1478 	int i;
   1479 
   1480 	kq = fp->f_data;
   1481 	fp->f_data = NULL;
   1482 	fp->f_type = 0;
   1483 	fdp = curlwp->l_fd;
   1484 
   1485 	mutex_enter(&fdp->fd_lock);
   1486 	for (i = 0; i <= fdp->fd_lastkqfile; i++) {
   1487 		if ((ff = fdp->fd_dt->dt_ff[i]) == NULL)
   1488 			continue;
   1489 		kqueue_doclose(kq, (struct klist *)&ff->ff_knlist, i);
   1490 	}
   1491 	if (fdp->fd_knhashmask != 0) {
   1492 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
   1493 			kqueue_doclose(kq, &fdp->fd_knhash[i], -1);
   1494 		}
   1495 	}
   1496 	mutex_exit(&fdp->fd_lock);
   1497 
   1498 	KASSERT(kq->kq_count == 0);
   1499 	mutex_destroy(&kq->kq_lock);
   1500 	cv_destroy(&kq->kq_cv);
   1501 	seldestroy(&kq->kq_sel);
   1502 	kmem_free(kq, sizeof(*kq));
   1503 
   1504 	return (0);
   1505 }
   1506 
   1507 /*
   1508  * struct fileops kqfilter method for a kqueue descriptor.
   1509  * Event triggered when monitored kqueue changes.
   1510  */
   1511 static int
   1512 kqueue_kqfilter(file_t *fp, struct knote *kn)
   1513 {
   1514 	struct kqueue *kq;
   1515 
   1516 	kq = ((file_t *)kn->kn_obj)->f_data;
   1517 
   1518 	KASSERT(fp == kn->kn_obj);
   1519 
   1520 	if (kn->kn_filter != EVFILT_READ)
   1521 		return 1;
   1522 
   1523 	kn->kn_fop = &kqread_filtops;
   1524 	mutex_enter(&kq->kq_lock);
   1525 	SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext);
   1526 	mutex_exit(&kq->kq_lock);
   1527 
   1528 	return 0;
   1529 }
   1530 
   1531 
   1532 /*
   1533  * Walk down a list of knotes, activating them if their event has
   1534  * triggered.  The caller's object lock (e.g. device driver lock)
   1535  * must be held.
   1536  */
   1537 void
   1538 knote(struct klist *list, long hint)
   1539 {
   1540 	struct knote *kn, *tmpkn;
   1541 
   1542 	SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) {
   1543 		KASSERT(kn->kn_fop != NULL);
   1544 		KASSERT(kn->kn_fop->f_event != NULL);
   1545 		if ((*kn->kn_fop->f_event)(kn, hint))
   1546 			knote_activate(kn);
   1547 	}
   1548 }
   1549 
   1550 /*
   1551  * Remove all knotes referencing a specified fd
   1552  */
   1553 void
   1554 knote_fdclose(int fd)
   1555 {
   1556 	struct klist *list;
   1557 	struct knote *kn;
   1558 	filedesc_t *fdp;
   1559 
   1560 	fdp = curlwp->l_fd;
   1561 	list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist;
   1562 	mutex_enter(&fdp->fd_lock);
   1563 	while ((kn = SLIST_FIRST(list)) != NULL) {
   1564 		knote_detach(kn, fdp, true);
   1565 		mutex_enter(&fdp->fd_lock);
   1566 	}
   1567 	mutex_exit(&fdp->fd_lock);
   1568 }
   1569 
   1570 /*
   1571  * Drop knote.  Called with fdp->fd_lock held, and will drop before
   1572  * returning.
   1573  */
   1574 static void
   1575 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop)
   1576 {
   1577 	struct klist *list;
   1578 	struct kqueue *kq;
   1579 
   1580 	kq = kn->kn_kq;
   1581 
   1582 	KASSERT((kn->kn_status & KN_MARKER) == 0);
   1583 	KASSERT(mutex_owned(&fdp->fd_lock));
   1584 
   1585 	KASSERT(kn->kn_fop != NULL);
   1586 	/* Remove from monitored object. */
   1587 	if (dofop) {
   1588 		KASSERT(kn->kn_fop->f_detach != NULL);
   1589 		KERNEL_LOCK(1, NULL);		/* XXXSMP */
   1590 		(*kn->kn_fop->f_detach)(kn);
   1591 		KERNEL_UNLOCK_ONE(NULL);	/* XXXSMP */
   1592 	}
   1593 
   1594 	/* Remove from descriptor table. */
   1595 	if (kn->kn_fop->f_isfd)
   1596 		list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
   1597 	else
   1598 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1599 
   1600 	SLIST_REMOVE(list, kn, knote, kn_link);
   1601 
   1602 	/* Remove from kqueue. */
   1603 again:
   1604 	mutex_spin_enter(&kq->kq_lock);
   1605 	if ((kn->kn_status & KN_QUEUED) != 0) {
   1606 		kq_check(kq);
   1607 		kq->kq_count--;
   1608 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1609 		kn->kn_status &= ~KN_QUEUED;
   1610 		kq_check(kq);
   1611 	} else if (kn->kn_status & KN_BUSY) {
   1612 		mutex_spin_exit(&kq->kq_lock);
   1613 		goto again;
   1614 	}
   1615 	mutex_spin_exit(&kq->kq_lock);
   1616 
   1617 	mutex_exit(&fdp->fd_lock);
   1618 	if (kn->kn_fop->f_isfd)
   1619 		fd_putfile(kn->kn_id);
   1620 	atomic_dec_uint(&kn->kn_kfilter->refcnt);
   1621 	kmem_free(kn, sizeof(*kn));
   1622 }
   1623 
   1624 /*
   1625  * Queue new event for knote.
   1626  */
   1627 static void
   1628 knote_enqueue(struct knote *kn)
   1629 {
   1630 	struct kqueue *kq;
   1631 
   1632 	KASSERT((kn->kn_status & KN_MARKER) == 0);
   1633 
   1634 	kq = kn->kn_kq;
   1635 
   1636 	mutex_spin_enter(&kq->kq_lock);
   1637 	if ((kn->kn_status & KN_DISABLED) != 0) {
   1638 		kn->kn_status &= ~KN_DISABLED;
   1639 	}
   1640 	if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) {
   1641 		kq_check(kq);
   1642 		kn->kn_status |= KN_QUEUED;
   1643 		TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1644 		kq->kq_count++;
   1645 		kq_check(kq);
   1646 		cv_broadcast(&kq->kq_cv);
   1647 		selnotify(&kq->kq_sel, 0, NOTE_SUBMIT);
   1648 	}
   1649 	mutex_spin_exit(&kq->kq_lock);
   1650 }
   1651 /*
   1652  * Queue new event for knote.
   1653  */
   1654 static void
   1655 knote_activate(struct knote *kn)
   1656 {
   1657 	struct kqueue *kq;
   1658 
   1659 	KASSERT((kn->kn_status & KN_MARKER) == 0);
   1660 
   1661 	kq = kn->kn_kq;
   1662 
   1663 	mutex_spin_enter(&kq->kq_lock);
   1664 	kn->kn_status |= KN_ACTIVE;
   1665 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) {
   1666 		kq_check(kq);
   1667 		kn->kn_status |= KN_QUEUED;
   1668 		TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1669 		kq->kq_count++;
   1670 		kq_check(kq);
   1671 		cv_broadcast(&kq->kq_cv);
   1672 		selnotify(&kq->kq_sel, 0, NOTE_SUBMIT);
   1673 	}
   1674 	mutex_spin_exit(&kq->kq_lock);
   1675 }
   1676