Home | History | Annotate | Line # | Download | only in kern
kern_event.c revision 1.1.1.1.2.15
      1 /*	$NetBSD: kern_event.c,v 1.1.1.1.2.15 2002/09/18 20:48:55 jdolecek Exp $	*/
      2 /*-
      3  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  *
     27  * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $
     28  */
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/kernel.h>
     33 #include <sys/proc.h>
     34 #include <sys/malloc.h>
     35 #include <sys/unistd.h>
     36 #include <sys/file.h>
     37 #include <sys/fcntl.h>
     38 #include <sys/select.h>
     39 #include <sys/queue.h>
     40 #include <sys/event.h>
     41 #include <sys/eventvar.h>
     42 #include <sys/poll.h>
     43 #include <sys/pool.h>
     44 #include <sys/protosw.h>
     45 #include <sys/socket.h>
     46 #include <sys/socketvar.h>
     47 #include <sys/stat.h>
     48 #include <sys/uio.h>
     49 #include <sys/mount.h>
     50 #include <sys/filedesc.h>
     51 #include <sys/syscallargs.h>
     52 
     53 static int	kqueue_scan(struct file *fp, size_t maxevents,
     54 		    struct kevent *ulistp, const struct timespec *timeout,
     55 		    struct proc *p, register_t *retval);
     56 static void	kqueue_wakeup(struct kqueue *kq);
     57 
     58 static int	kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
     59 		    struct ucred *cred, int flags);
     60 static int	kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
     61 		    struct ucred *cred, int flags);
     62 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
     63 		    struct proc *p);
     64 static int	kqueue_fcntl(struct file *fp, u_int com, caddr_t data,
     65 		    struct proc *p);
     66 static int	kqueue_poll(struct file *fp, int events, struct proc *p);
     67 static int	kqueue_kqfilter(struct file *fp, struct knote *kn);
     68 static int	kqueue_stat(struct file *fp, struct stat *sp, struct proc *p);
     69 static int	kqueue_close(struct file *fp, struct proc *p);
     70 
     71 static struct fileops kqueueops = {
     72 	kqueue_read, kqueue_write, kqueue_ioctl, kqueue_fcntl, kqueue_poll,
     73 	kqueue_stat, kqueue_close, kqueue_kqfilter
     74 };
     75 
     76 static void	knote_attach(struct knote *kn, struct filedesc *fdp);
     77 static void	knote_drop(struct knote *kn, struct proc *p);
     78 static void	knote_enqueue(struct knote *kn);
     79 static void	knote_dequeue(struct knote *kn);
     80 
     81 static void	filt_kqdetach(struct knote *kn);
     82 static int	filt_kqueue(struct knote *kn, long hint);
     83 static int	filt_procattach(struct knote *kn);
     84 static void	filt_procdetach(struct knote *kn);
     85 static int	filt_proc(struct knote *kn, long hint);
     86 static int	filt_fileattach(struct knote *kn);
     87 
     88 static const struct filterops kqread_filtops =
     89 	{ 1, NULL, filt_kqdetach, filt_kqueue };
     90 static const struct filterops proc_filtops =
     91 	{ 0, filt_procattach, filt_procdetach, filt_proc };
     92 static const struct filterops file_filtops =
     93 	{ 1, filt_fileattach, NULL, NULL };
     94 
     95 struct pool	kqueue_pool;
     96 struct pool	knote_pool;
     97 
     98 #define	KNOTE_ACTIVATE(kn)						\
     99 do {									\
    100 	kn->kn_status |= KN_ACTIVE;					\
    101 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
    102 		knote_enqueue(kn);					\
    103 } while(0)
    104 
    105 #define	KN_HASHSIZE		64		/* XXX should be tunable */
    106 #define	KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
    107 
    108 extern const struct filterops sig_filtops;
    109 
    110 /*
    111  * Table for for all system-defined filters.
    112  * These should be listed in the numeric order of the EVFILT_* defines.
    113  * If filtops is NULL, the filter isn't implemented in NetBSD.
    114  * End of list is when name is NULL.
    115  */
    116 struct kfilter {
    117 	const char	 *name;		/* name of filter */
    118 	uint32_t	  filter;	/* id of filter */
    119 	const struct filterops *filtops;/* operations for filter */
    120 };
    121 
    122 		/* System defined filters */
    123 static const struct kfilter sys_kfilters[] = {
    124 	{ "EVFILT_READ",	EVFILT_READ,	&file_filtops },
    125 	{ "EVFILT_WRITE",	EVFILT_WRITE,	&file_filtops },
    126 	{ "EVFILT_AIO",		EVFILT_AIO,	NULL },
    127 	{ "EVFILT_VNODE",	EVFILT_VNODE,	&file_filtops },
    128 	{ "EVFILT_PROC",	EVFILT_PROC,	&proc_filtops },
    129 	{ "EVFILT_SIGNAL",	EVFILT_SIGNAL,	&sig_filtops },
    130 	{ NULL,			0,		NULL },	/* end of list */
    131 };
    132 
    133 		/* User defined kfilters */
    134 static struct kfilter	*user_kfilters;		/* array */
    135 static int		user_kfilterc;		/* current offset */
    136 static int		user_kfiltermaxc;	/* max size so far */
    137 
    138 /*
    139  * kqueue_init:
    140  *
    141  *	Initialize the kqueue/knote facility.
    142  */
    143 void
    144 kqueue_init(void)
    145 {
    146 
    147 	pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl",
    148 	    NULL);
    149 	pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
    150 	    NULL);
    151 }
    152 
    153 /*
    154  * Find kfilter entry by name, or NULL if not found.
    155  */
    156 static const struct kfilter *
    157 kfilter_byname_sys(const char *name)
    158 {
    159 	int i;
    160 
    161 	for (i = 0; sys_kfilters[i].name != NULL; i++) {
    162 		if (strcmp(name, sys_kfilters[i].name) == 0)
    163 			return (&sys_kfilters[i]);
    164 	}
    165 	return (NULL);
    166 }
    167 
    168 static struct kfilter *
    169 kfilter_byname_user(const char *name)
    170 {
    171 	int i;
    172 
    173 	/* user_kfilters[] could be NULL if no filters were registered */
    174 	if (!user_kfilters)
    175 		return (NULL);
    176 
    177 	for (i = 0; user_kfilters[i].name != NULL; i++) {
    178 		if (user_kfilters[i].name != '\0' &&
    179 		    strcmp(name, user_kfilters[i].name) == 0)
    180 			return (&user_kfilters[i]);
    181 	}
    182 	return (NULL);
    183 }
    184 
    185 static const struct kfilter *
    186 kfilter_byname(const char *name)
    187 {
    188 	const struct kfilter *kfilter;
    189 
    190 	if ((kfilter = kfilter_byname_sys(name)) != NULL)
    191 		return (kfilter);
    192 
    193 	return (kfilter_byname_user(name));
    194 }
    195 
    196 /*
    197  * Find kfilter entry by filter id, or NULL if not found.
    198  * Assumes entries are indexed in filter id order, for speed.
    199  */
    200 static const struct kfilter *
    201 kfilter_byfilter(uint32_t filter)
    202 {
    203 	const struct kfilter *kfilter;
    204 
    205 	if (filter < EVFILT_SYSCOUNT)	/* it's a system filter */
    206 		kfilter = &sys_kfilters[filter];
    207 	else if (user_kfilters != NULL &&
    208 	    filter < EVFILT_SYSCOUNT + user_kfilterc)
    209 					/* it's a user filter */
    210 		kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
    211 	else
    212 		return (NULL);		/* out of range */
    213 	KASSERT(kfilter->filter == filter);	/* sanity check! */
    214 	return (kfilter);
    215 }
    216 
    217 /*
    218  * Register a new kfilter. Stores the entry in user_kfilters.
    219  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    220  * If retfilter != NULL, the new filterid is returned in it.
    221  */
    222 int
    223 kfilter_register(const char *name, const struct filterops *filtops,
    224     int *retfilter)
    225 {
    226 	struct kfilter *kfilter;
    227 	void *space;
    228 	int len;
    229 
    230 	if (name == NULL || name[0] == '\0' || filtops == NULL)
    231 		return (EINVAL);	/* invalid args */
    232 	if (kfilter_byname(name) != NULL)
    233 		return (EEXIST);	/* already exists */
    234 	if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT)
    235 		return (EINVAL);	/* too many */
    236 
    237 	/* check if need to grow user_kfilters */
    238 	if (user_kfilterc + 1 > user_kfiltermaxc) {
    239 		/*
    240 		 * Grow in KFILTER_EXTENT chunks. Use malloc(9), because we
    241 		 * want to traverse user_kfilters as an array.
    242 		 */
    243 		user_kfiltermaxc += KFILTER_EXTENT;
    244 		kfilter = malloc(user_kfiltermaxc * sizeof(struct filter *),
    245 		    M_KEVENT, M_WAITOK);
    246 
    247 		/* copy existing user_kfilters */
    248 		if (user_kfilters != NULL)
    249 			memcpy((caddr_t)kfilter, (caddr_t)user_kfilters,
    250 			    user_kfilterc * sizeof(struct kfilter *));
    251 					/* zero new sections */
    252 		memset((caddr_t)kfilter +
    253 		    user_kfilterc * sizeof(struct kfilter *), 0,
    254 		    (user_kfiltermaxc - user_kfilterc) *
    255 		    sizeof(struct kfilter *));
    256 					/* switch to new kfilter */
    257 		if (user_kfilters != NULL)
    258 			free(user_kfilters, M_KEVENT);
    259 		user_kfilters = kfilter;
    260 	}
    261 	len = strlen(name) + 1;		/* copy name */
    262 	space = malloc(len, M_KEVENT, M_WAITOK);
    263 	memcpy(space, name, len);
    264 	user_kfilters[user_kfilterc].name = space;
    265 
    266 	user_kfilters[user_kfilterc].filter = user_kfilterc + EVFILT_SYSCOUNT;
    267 
    268 	len = sizeof(struct filterops);	/* copy filtops */
    269 	space = malloc(len, M_KEVENT, M_WAITOK);
    270 	memcpy(space, filtops, len);
    271 	user_kfilters[user_kfilterc].filtops = space;
    272 
    273 	if (retfilter != NULL)
    274 		*retfilter = user_kfilters[user_kfilterc].filter;
    275 	user_kfilterc++;		/* finally, increment count */
    276 	return (0);
    277 }
    278 
    279 /*
    280  * Unregister a kfilter previously registered with kfilter_register.
    281  * This retains the filter id, but clears the name and frees filtops (filter
    282  * operations), so that the number isn't reused during a boot.
    283  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    284  */
    285 int
    286 kfilter_unregister(const char *name)
    287 {
    288 	struct kfilter *kfilter;
    289 
    290 	if (name == NULL || name[0] == '\0')
    291 		return (EINVAL);	/* invalid name */
    292 
    293 	if (kfilter_byname_sys(name) != NULL)
    294 		return (EINVAL);	/* can't detach system filters */
    295 
    296 	kfilter = kfilter_byname_user(name);
    297 	if (kfilter == NULL)		/* not found */
    298 		return (ENOENT);
    299 
    300 	if (kfilter->name[0] != '\0') {
    301 		/* XXX Cast away const (but we know it's safe. */
    302 		free((void *) kfilter->name, M_KEVENT);
    303 		kfilter->name = "";	/* mark as `not implemented' */
    304 	}
    305 	if (kfilter->filtops != NULL) {
    306 		/* XXX Cast away const (but we know it's safe. */
    307 		free((void *) kfilter->filtops, M_KEVENT);
    308 		kfilter->filtops = NULL; /* mark as `not implemented' */
    309 	}
    310 	return (0);
    311 }
    312 
    313 
    314 /*
    315  * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
    316  * descriptors. Calls struct fileops kqfilter method for given file descriptor.
    317  */
    318 static int
    319 filt_fileattach(struct knote *kn)
    320 {
    321 	struct file *fp;
    322 
    323 	fp = kn->kn_fp;
    324 	return ((*fp->f_ops->fo_kqfilter)(fp, kn));
    325 }
    326 
    327 /*
    328  * Filter detach method for EVFILT_READ on kqueue descriptor.
    329  */
    330 static void
    331 filt_kqdetach(struct knote *kn)
    332 {
    333 	struct kqueue *kq;
    334 
    335 	kq = (struct kqueue *)kn->kn_fp->f_data;
    336 	SLIST_REMOVE(&kq->kq_sel.si_klist, kn, knote, kn_selnext);
    337 }
    338 
    339 /*
    340  * Filter event method for EVFILT_READ on kqueue descriptor.
    341  */
    342 /*ARGSUSED*/
    343 static int
    344 filt_kqueue(struct knote *kn, long hint)
    345 {
    346 	struct kqueue *kq;
    347 
    348 	kq = (struct kqueue *)kn->kn_fp->f_data;
    349 	kn->kn_data = kq->kq_count;
    350 	return (kn->kn_data > 0);
    351 }
    352 
    353 /*
    354  * Filter attach method for EVFILT_PROC.
    355  */
    356 static int
    357 filt_procattach(struct knote *kn)
    358 {
    359 	struct proc *p;
    360 
    361 	p = pfind(kn->kn_id);
    362 	if (p == NULL)
    363 		return (ESRCH);
    364 
    365 	kn->kn_ptr.p_proc = p;
    366 	kn->kn_flags |= EV_CLEAR;	/* automatically set */
    367 
    368 	/*
    369 	 * internal flag indicating registration done by kernel
    370 	 */
    371 	if (kn->kn_flags & EV_FLAG1) {
    372 		kn->kn_data = kn->kn_sdata;	/* ppid */
    373 		kn->kn_fflags = NOTE_CHILD;
    374 		kn->kn_flags &= ~EV_FLAG1;
    375 	}
    376 
    377 	/* XXXSMP lock the process? */
    378 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
    379 
    380 	return (0);
    381 }
    382 
    383 /*
    384  * Filter detach method for EVFILT_PROC.
    385  *
    386  * The knote may be attached to a different process, which may exit,
    387  * leaving nothing for the knote to be attached to.  So when the process
    388  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
    389  * it will be deleted when read out.  However, as part of the knote deletion,
    390  * this routine is called, so a check is needed to avoid actually performing
    391  * a detach, because the original process might not exist any more.
    392  */
    393 static void
    394 filt_procdetach(struct knote *kn)
    395 {
    396 	struct proc *p;
    397 
    398 	if (kn->kn_status & KN_DETACHED)
    399 		return;
    400 
    401 	p = kn->kn_ptr.p_proc;
    402 	KASSERT(p->p_stat == SDEAD || pfind(kn->kn_id) == p);
    403 
    404 	/* XXXSMP lock the process? */
    405 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
    406 }
    407 
    408 /*
    409  * Filter event method for EVFILT_PROC.
    410  */
    411 static int
    412 filt_proc(struct knote *kn, long hint)
    413 {
    414 	u_int event;
    415 
    416 	/*
    417 	 * mask off extra data
    418 	 */
    419 	event = (u_int)hint & NOTE_PCTRLMASK;
    420 
    421 	/*
    422 	 * if the user is interested in this event, record it.
    423 	 */
    424 	if (kn->kn_sfflags & event)
    425 		kn->kn_fflags |= event;
    426 
    427 	/*
    428 	 * process is gone, so flag the event as finished.
    429 	 */
    430 	if (event == NOTE_EXIT) {
    431 		/*
    432 		 * Detach the knote from watched process and mark
    433 		 * it as such. We can't leave this to kqueue_scan(),
    434 		 * since the process might not exist by then. And we
    435 		 * have to do this now, since psignal KNOTE() is called
    436 		 * also for zombies and we might end up reading freed
    437 		 * memory if the kevent would already be picked up
    438 		 * and knote g/c'ed.
    439 		 */
    440 		kn->kn_fop->f_detach(kn);
    441 		kn->kn_status |= KN_DETACHED;
    442 
    443 		/* Mark as ONESHOT, so that the knote it g/c'ed when read */
    444 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
    445 		return (1);
    446 	}
    447 
    448 	/*
    449 	 * process forked, and user wants to track the new process,
    450 	 * so attach a new knote to it, and immediately report an
    451 	 * event with the parent's pid.
    452 	 */
    453 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
    454 		struct kevent kev;
    455 		int error;
    456 
    457 		/*
    458 		 * register knote with new process.
    459 		 */
    460 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
    461 		kev.filter = kn->kn_filter;
    462 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
    463 		kev.fflags = kn->kn_sfflags;
    464 		kev.data = kn->kn_id;			/* parent */
    465 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
    466 		error = kqueue_register(kn->kn_kq, &kev, NULL);
    467 		if (error)
    468 			kn->kn_fflags |= NOTE_TRACKERR;
    469 	}
    470 
    471 	return (kn->kn_fflags != 0);
    472 }
    473 
    474 /*
    475  * filt_seltrue:
    476  *
    477  *	This filter "event" routine simulates seltrue().
    478  */
    479 int
    480 filt_seltrue(struct knote *kn, long hint)
    481 {
    482 
    483 	/*
    484 	 * We don't know how much data can be read/written,
    485 	 * but we know that it *can* be.  This is about as
    486 	 * good as select/poll does as well.
    487 	 */
    488 	kn->kn_data = 0;
    489 	return (1);
    490 }
    491 
    492 /*
    493  * kqueue(2) system call.
    494  */
    495 int
    496 sys_kqueue(struct proc *p, void *v, register_t *retval)
    497 {
    498 	struct filedesc	*fdp;
    499 	struct kqueue	*kq;
    500 	struct file	*fp;
    501 	int		fd, error;
    502 
    503 	fdp = p->p_fd;
    504 	error = falloc(p, &fp, &fd);	/* setup a new file descriptor */
    505 	if (error)
    506 		return (error);
    507 	fp->f_flag = FREAD | FWRITE;
    508 	fp->f_type = DTYPE_KQUEUE;
    509 	fp->f_ops = &kqueueops;
    510 	kq = pool_get(&kqueue_pool, PR_WAITOK);
    511 	memset((char *)kq, 0, sizeof(struct kqueue));
    512 	TAILQ_INIT(&kq->kq_head);
    513 	fp->f_data = (caddr_t)kq;	/* store the kqueue with the fp */
    514 	*retval = fd;
    515 	if (fdp->fd_knlistsize < 0)
    516 		fdp->fd_knlistsize = 0;	/* this process has a kq */
    517 	kq->kq_fdp = fdp;
    518 	FILE_SET_MATURE(fp);
    519 	FILE_UNUSE(fp, p);		/* falloc() does FILE_USE() */
    520 	return (error);
    521 }
    522 
    523 /*
    524  * kevent(2) system call.
    525  */
    526 int
    527 sys_kevent(struct proc *p, void *v, register_t *retval)
    528 {
    529 	struct sys_kevent_args /* {
    530 		syscallarg(int) fd;
    531 		syscallarg(const struct kevent *) changelist;
    532 		syscallarg(size_t) nchanges;
    533 		syscallarg(struct kevent *) eventlist;
    534 		syscallarg(size_t) nevents;
    535 		syscallarg(const struct timespec *) timeout;
    536 	} */ *uap = v;
    537 	struct kevent	*kevp;
    538 	struct kqueue	*kq;
    539 	struct file	*fp;
    540 	struct timespec	ts;
    541 	size_t		i, n;
    542 	int		nerrors, error;
    543 
    544 	/* check that we're dealing with a kq */
    545 	fp = fd_getfile(p->p_fd, SCARG(uap, fd));
    546 	if (!fp || fp->f_type != DTYPE_KQUEUE)
    547 		return (EBADF);
    548 
    549 	FILE_USE(fp);
    550 
    551 	if (SCARG(uap, timeout) != NULL) {
    552 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
    553 		if (error)
    554 			goto done;
    555 		SCARG(uap, timeout) = &ts;
    556 	}
    557 
    558 	kq = (struct kqueue *)fp->f_data;
    559 	nerrors = 0;
    560 
    561 	/* traverse list of events to register */
    562 	while (SCARG(uap, nchanges) > 0) {
    563 		/* copyin a maximum of KQ_EVENTS at each pass */
    564 		n = MIN(SCARG(uap, nchanges), KQ_NEVENTS);
    565 		error = copyin(SCARG(uap, changelist), kq->kq_kev,
    566 		    n * sizeof(struct kevent));
    567 		if (error)
    568 			goto done;
    569 		for (i = 0; i < n; i++) {
    570 			kevp = &kq->kq_kev[i];
    571 			kevp->flags &= ~EV_SYSFLAGS;
    572 			/* register each knote */
    573 			error = kqueue_register(kq, kevp, p);
    574 			if (error) {
    575 				if (SCARG(uap, nevents) != 0) {
    576 					kevp->flags = EV_ERROR;
    577 					kevp->data = error;
    578 					error = copyout((caddr_t)kevp,
    579 					    (caddr_t)SCARG(uap, eventlist),
    580 					    sizeof(*kevp));
    581 					if (error)
    582 						goto done;
    583 					SCARG(uap, eventlist)++;
    584 					SCARG(uap, nevents)--;
    585 					nerrors++;
    586 				} else {
    587 					goto done;
    588 				}
    589 			}
    590 		}
    591 		SCARG(uap, nchanges) -= n;	/* update the results */
    592 		SCARG(uap, changelist) += n;
    593 	}
    594 	if (nerrors) {
    595 		*retval = nerrors;
    596 		error = 0;
    597 		goto done;
    598 	}
    599 
    600 	/* actually scan through the events */
    601 	error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
    602 	    SCARG(uap, timeout), p, retval);
    603  done:
    604 	FILE_UNUSE(fp, p);
    605 	return (error);
    606 }
    607 
    608 /*
    609  * Register a given kevent kev onto the kqueue
    610  */
    611 int
    612 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
    613 {
    614 	const struct kfilter *kfilter;
    615 	struct filedesc	*fdp;
    616 	struct file	*fp;
    617 	struct knote	*kn;
    618 	int		s, error;
    619 
    620 	fdp = kq->kq_fdp;
    621 	fp = NULL;
    622 	kn = NULL;
    623 	error = 0;
    624 	kfilter = kfilter_byfilter(kev->filter);
    625 	if (kfilter == NULL || kfilter->filtops == NULL) {
    626 		/* filter not found nor implemented */
    627 		return (EINVAL);
    628 	}
    629 
    630 	/* search if knote already exists */
    631 	if (kfilter->filtops->f_isfd) {
    632 		/* monitoring a file descriptor */
    633 		if ((fp = fd_getfile(fdp, kev->ident)) == NULL)
    634 			return (EBADF);	/* validate descriptor */
    635 		FILE_USE(fp);
    636 
    637 		if (kev->ident < fdp->fd_knlistsize) {
    638 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
    639 				if (kq == kn->kn_kq &&
    640 				    kev->filter == kn->kn_filter)
    641 					break;
    642 		}
    643 	} else {
    644 		/*
    645 		 * not monitoring a file descriptor, so
    646 		 * lookup knotes in internal hash table
    647 		 */
    648 		if (fdp->fd_knhashmask != 0) {
    649 			struct klist *list;
    650 
    651 			list = &fdp->fd_knhash[
    652 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
    653 			SLIST_FOREACH(kn, list, kn_link)
    654 				if (kev->ident == kn->kn_id &&
    655 				    kq == kn->kn_kq &&
    656 				    kev->filter == kn->kn_filter)
    657 					break;
    658 		}
    659 	}
    660 
    661 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
    662 		error = ENOENT;		/* filter not found */
    663 		goto done;
    664 	}
    665 
    666 	/*
    667 	 * kn now contains the matching knote, or NULL if no match
    668 	 */
    669 	if (kev->flags & EV_ADD) {
    670 		/* add knote */
    671 
    672 		if (kn == NULL) {
    673 			/* create new knote */
    674 			kn = pool_get(&knote_pool, PR_WAITOK);
    675 			if (kn == NULL) {
    676 				error = ENOMEM;
    677 				goto done;
    678 			}
    679 			kn->kn_fp = fp;
    680 			kn->kn_kq = kq;
    681 			kn->kn_fop = kfilter->filtops;
    682 
    683 			/*
    684 			 * apply reference count to knote structure, and
    685 			 * do not release it at the end of this routine.
    686 			 */
    687 			fp = NULL;
    688 
    689 			kn->kn_sfflags = kev->fflags;
    690 			kn->kn_sdata = kev->data;
    691 			kev->fflags = 0;
    692 			kev->data = 0;
    693 			kn->kn_kevent = *kev;
    694 
    695 			knote_attach(kn, fdp);
    696 			if ((error = kfilter->filtops->f_attach(kn)) != 0) {
    697 				knote_drop(kn, p);
    698 				goto done;
    699 			}
    700 		} else {
    701 			/* modify existing knote */
    702 
    703 			/*
    704 			 * The user may change some filter values after the
    705 			 * initial EV_ADD, but doing so will not reset any
    706 			 * filter which have already been triggered.
    707 			 */
    708 			kn->kn_sfflags = kev->fflags;
    709 			kn->kn_sdata = kev->data;
    710 			kn->kn_kevent.udata = kev->udata;
    711 		}
    712 
    713 		s = splhigh();
    714 		if (kn->kn_fop->f_event(kn, 0))
    715 			KNOTE_ACTIVATE(kn);
    716 		splx(s);
    717 
    718 	} else if (kev->flags & EV_DELETE) {	/* delete knote */
    719 		kn->kn_fop->f_detach(kn);
    720 		knote_drop(kn, p);
    721 		goto done;
    722 	}
    723 
    724 	/* disable knote */
    725 	if ((kev->flags & EV_DISABLE) &&
    726 	    ((kn->kn_status & KN_DISABLED) == 0)) {
    727 		s = splhigh();
    728 		kn->kn_status |= KN_DISABLED;
    729 		splx(s);
    730 	}
    731 
    732 	/* enable knote */
    733 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
    734 		s = splhigh();
    735 		kn->kn_status &= ~KN_DISABLED;
    736 		if ((kn->kn_status & KN_ACTIVE) &&
    737 		    ((kn->kn_status & KN_QUEUED) == 0))
    738 			knote_enqueue(kn);
    739 		splx(s);
    740 	}
    741 
    742  done:
    743 	if (fp != NULL)
    744 		FILE_UNUSE(fp, p);
    745 	return (error);
    746 }
    747 
    748 /*
    749  * Scan through the list of events on fp (for a maximum of maxevents),
    750  * returning the results in to ulistp. Timeout is determined by tsp; if
    751  * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
    752  * as appropriate.
    753  */
    754 static int
    755 kqueue_scan(struct file *fp, size_t maxevents, struct kevent *ulistp,
    756 	const struct timespec *tsp, struct proc *p, register_t *retval)
    757 {
    758 	struct kqueue	*kq;
    759 	struct kevent	*kevp;
    760 	struct timeval	atv;
    761 	struct knote	*kn, marker;
    762 	size_t		count, nkev;
    763 	int		s, timeout, error;
    764 
    765 	kq = (struct kqueue *)fp->f_data;
    766 	count = maxevents;
    767 	nkev = error = 0;
    768 	if (count == 0)
    769 		goto done;
    770 
    771 	if (tsp != NULL) {			/* timeout supplied */
    772 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
    773 		if (itimerfix(&atv)) {
    774 			error = EINVAL;
    775 			goto done;
    776 		}
    777 		s = splclock();
    778 		timeradd(&atv, &time, &atv);	/* calc. time to wait until */
    779 		splx(s);
    780 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
    781 			timeout = -1;		/* perform a poll */
    782 		else
    783 			timeout = hzto(&atv);	/* calculate hz till timeout */
    784 	} else {
    785 		atv.tv_sec = 0;			/* no timeout, wait forever */
    786 		atv.tv_usec = 0;
    787 		timeout = 0;
    788 	}
    789 	goto start;
    790 
    791  retry:
    792 	if (atv.tv_sec || atv.tv_usec) {	/* timeout requested */
    793 		s = splclock();
    794 		if (timercmp(&time, &atv, >=)) {
    795 			splx(s);
    796 			goto done;		/* timeout reached */
    797 		}
    798 		splx(s);
    799 		timeout = hzto(&atv);		/* recalc. timeout remaining */
    800 	}
    801 
    802  start:
    803 	kevp = kq->kq_kev;
    804 	s = splhigh();
    805 	if (kq->kq_count == 0) {
    806 		if (timeout < 0) {
    807 			error = EWOULDBLOCK;
    808 		} else {
    809 			kq->kq_state |= KQ_SLEEP;
    810 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
    811 		}
    812 		splx(s);
    813 		if (error == 0)
    814 			goto retry;
    815 		/* don't restart after signals... */
    816 		if (error == ERESTART)
    817 			error = EINTR;
    818 		else if (error == EWOULDBLOCK)
    819 			error = 0;
    820 		goto done;
    821 	}
    822 
    823 	/* mark end of knote list */
    824 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
    825 
    826 	while (count) {				/* while user wants data ... */
    827 		kn = TAILQ_FIRST(&kq->kq_head);	/* get next knote */
    828 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
    829 		if (kn == &marker) {		/* if it's our marker, stop */
    830 			splx(s);
    831 			if (count == maxevents)
    832 				goto retry;
    833 			goto done;
    834 		}
    835 		if (kn->kn_status & KN_DISABLED) {
    836 			/* don't want disabled events */
    837 			kn->kn_status &= ~KN_QUEUED;
    838 			kq->kq_count--;
    839 			continue;
    840 		}
    841 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
    842 		    kn->kn_fop->f_event(kn, 0) == 0) {
    843 			/*
    844 			 * non-ONESHOT event that hasn't
    845 			 * triggered again, so de-queue.
    846 			 */
    847 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    848 			kq->kq_count--;
    849 			continue;
    850 		}
    851 		*kevp = kn->kn_kevent;
    852 		kevp++;
    853 		nkev++;
    854 		if (kn->kn_flags & EV_ONESHOT) {
    855 			/* delete ONESHOT events after retrieval */
    856 			kn->kn_status &= ~KN_QUEUED;
    857 			kq->kq_count--;
    858 			splx(s);
    859 			kn->kn_fop->f_detach(kn);
    860 			knote_drop(kn, p);
    861 			s = splhigh();
    862 		} else if (kn->kn_flags & EV_CLEAR) {
    863 			/* clear state after retrieval */
    864 			kn->kn_data = 0;
    865 			kn->kn_fflags = 0;
    866 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    867 			kq->kq_count--;
    868 		} else {
    869 			/* add event back on list */
    870 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
    871 		}
    872 		count--;
    873 		if (nkev == KQ_NEVENTS) {
    874 			/* do copyouts in KQ_NEVENTS chunks */
    875 			splx(s);
    876 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    877 			    sizeof(struct kevent) * nkev);
    878 			ulistp += nkev;
    879 			nkev = 0;
    880 			kevp = kq->kq_kev;
    881 			s = splhigh();
    882 			if (error)
    883 				break;
    884 		}
    885 	}
    886 
    887 	/* remove marker */
    888 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
    889 	splx(s);
    890  done:
    891 	if (nkev != 0) {
    892 		/* copyout remaining events */
    893 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    894 		    sizeof(struct kevent) * nkev);
    895 	}
    896 	*retval = maxevents - count;
    897 
    898 	return (error);
    899 }
    900 
    901 /*
    902  * struct fileops read method for a kqueue descriptor.
    903  * Not implemented.
    904  * XXX: This could be expanded to call kqueue_scan, if desired.
    905  */
    906 /*ARGSUSED*/
    907 static int
    908 kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
    909 	struct ucred *cred, int flags)
    910 {
    911 
    912 	return (ENXIO);
    913 }
    914 
    915 /*
    916  * struct fileops write method for a kqueue descriptor.
    917  * Not implemented.
    918  */
    919 /*ARGSUSED*/
    920 static int
    921 kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
    922 	struct ucred *cred, int flags)
    923 {
    924 
    925 	return (ENXIO);
    926 }
    927 
    928 /*
    929  * struct fileops ioctl method for a kqueue descriptor.
    930  *
    931  * Two ioctls are currently supported. They both use struct kfilter_mapping:
    932  *	KFILTER_BYNAME		find name for filter, and return result in
    933  *				name, which is of size len.
    934  *	KFILTER_BYFILTER	find filter for name. len is ignored.
    935  */
    936 /*ARGSUSED*/
    937 static int
    938 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
    939 {
    940 	struct kfilter_mapping	*km;
    941 	const struct kfilter	*kfilter;
    942 	char			*name;
    943 	int			error;
    944 
    945 	km = (struct kfilter_mapping *)data;
    946 	error = 0;
    947 
    948 	switch (com) {
    949 	case KFILTER_BYFILTER:	/* convert filter -> name */
    950 		kfilter = kfilter_byfilter(km->filter);
    951 		if (kfilter != NULL)
    952 			error = copyoutstr(kfilter->name, km->name, km->len,
    953 			    NULL);
    954 		else
    955 			error = ENOENT;
    956 		break;
    957 
    958 	case KFILTER_BYNAME:	/* convert name -> filter */
    959 		MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK);
    960 		error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
    961 		if (error) {
    962 			FREE(name, M_KEVENT);
    963 			break;
    964 		}
    965 		kfilter = kfilter_byname(name);
    966 		if (kfilter != NULL)
    967 			km->filter = kfilter->filter;
    968 		else
    969 			error = ENOENT;
    970 		FREE(name, M_KEVENT);
    971 		break;
    972 
    973 	default:
    974 		error = ENOTTY;
    975 
    976 	}
    977 	return (error);
    978 }
    979 
    980 /*
    981  * struct fileops fcntl method for a kqueue descriptor.
    982  * Not implemented.
    983  */
    984 /*ARGSUSED*/
    985 static int
    986 kqueue_fcntl(struct file *fp, u_int com, caddr_t data, struct proc *p)
    987 {
    988 
    989 	return (ENOTTY);
    990 }
    991 
    992 /*
    993  * struct fileops poll method for a kqueue descriptor.
    994  * Determine if kqueue has events pending.
    995  */
    996 static int
    997 kqueue_poll(struct file *fp, int events, struct proc *p)
    998 {
    999 	struct kqueue	*kq;
   1000 	int		revents;
   1001 
   1002 	kq = (struct kqueue *)fp->f_data;
   1003 	revents = 0;
   1004 	if (events & (POLLIN | POLLRDNORM)) {
   1005 		if (kq->kq_count) {
   1006 			revents |= events & (POLLIN | POLLRDNORM);
   1007 		} else {
   1008 			selrecord(p, &kq->kq_sel);
   1009 		}
   1010 	}
   1011 	return (revents);
   1012 }
   1013 
   1014 /*
   1015  * struct fileops stat method for a kqueue descriptor.
   1016  * Returns dummy info, with st_size being number of events pending.
   1017  */
   1018 static int
   1019 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
   1020 {
   1021 	struct kqueue	*kq;
   1022 
   1023 	kq = (struct kqueue *)fp->f_data;
   1024 	memset((void *)st, 0, sizeof(*st));
   1025 	st->st_size = kq->kq_count;
   1026 	st->st_blksize = sizeof(struct kevent);
   1027 	st->st_mode = S_IFIFO;
   1028 	return (0);
   1029 }
   1030 
   1031 /*
   1032  * struct fileops close method for a kqueue descriptor.
   1033  * Cleans up kqueue.
   1034  */
   1035 static int
   1036 kqueue_close(struct file *fp, struct proc *p)
   1037 {
   1038 	struct kqueue	*kq;
   1039 	struct filedesc	*fdp;
   1040 	struct knote	**knp, *kn, *kn0;
   1041 	int		i;
   1042 
   1043 	kq = (struct kqueue *)fp->f_data;
   1044 	fdp = p->p_fd;
   1045 	for (i = 0; i < fdp->fd_knlistsize; i++) {
   1046 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
   1047 		kn = *knp;
   1048 		while (kn != NULL) {
   1049 			kn0 = SLIST_NEXT(kn, kn_link);
   1050 			if (kq == kn->kn_kq) {
   1051 				kn->kn_fop->f_detach(kn);
   1052 				FILE_UNUSE(kn->kn_fp, p);
   1053 				pool_put(&knote_pool, kn);
   1054 				*knp = kn0;
   1055 			} else {
   1056 				knp = &SLIST_NEXT(kn, kn_link);
   1057 			}
   1058 			kn = kn0;
   1059 		}
   1060 	}
   1061 	if (fdp->fd_knhashmask != 0) {
   1062 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
   1063 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
   1064 			kn = *knp;
   1065 			while (kn != NULL) {
   1066 				kn0 = SLIST_NEXT(kn, kn_link);
   1067 				if (kq == kn->kn_kq) {
   1068 					kn->kn_fop->f_detach(kn);
   1069 					/* XXX non-fd release of kn->kn_ptr */
   1070 					pool_put(&knote_pool, kn);
   1071 					*knp = kn0;
   1072 				} else {
   1073 					knp = &SLIST_NEXT(kn, kn_link);
   1074 				}
   1075 				kn = kn0;
   1076 			}
   1077 		}
   1078 	}
   1079 	pool_put(&kqueue_pool, kq);
   1080 	fp->f_data = NULL;
   1081 
   1082 	return (0);
   1083 }
   1084 
   1085 /*
   1086  * wakeup a kqueue
   1087  */
   1088 static void
   1089 kqueue_wakeup(struct kqueue *kq)
   1090 {
   1091 
   1092 	if (kq->kq_state & KQ_SLEEP) {		/* if currently sleeping ...  */
   1093 		kq->kq_state &= ~KQ_SLEEP;
   1094 		wakeup(kq);			/* ... wakeup */
   1095 	}
   1096 
   1097 	/* Notify select/poll and kevent. */
   1098 	selnotify(&kq->kq_sel, 0);
   1099 }
   1100 
   1101 /*
   1102  * struct fileops kqfilter method for a kqueue descriptor.
   1103  * Event triggered when monitored kqueue changes.
   1104  */
   1105 /*ARGSUSED*/
   1106 static int
   1107 kqueue_kqfilter(struct file *fp, struct knote *kn)
   1108 {
   1109 	struct kqueue *kq;
   1110 
   1111 	KASSERT(fp == kn->kn_fp);
   1112 	kq = (struct kqueue *)kn->kn_fp->f_data;
   1113 	if (kn->kn_filter != EVFILT_READ)
   1114 		return (1);
   1115 	kn->kn_fop = &kqread_filtops;
   1116 	SLIST_INSERT_HEAD(&kq->kq_sel.si_klist, kn, kn_selnext);
   1117 	return (0);
   1118 }
   1119 
   1120 
   1121 /*
   1122  * Walk down a list of knotes, activating them if their event has triggered.
   1123  */
   1124 void
   1125 knote(struct klist *list, long hint)
   1126 {
   1127 	struct knote *kn;
   1128 
   1129 	SLIST_FOREACH(kn, list, kn_selnext)
   1130 		if (kn->kn_fop->f_event(kn, hint))
   1131 			KNOTE_ACTIVATE(kn);
   1132 }
   1133 
   1134 /*
   1135  * Remove all knotes from a specified klist
   1136  */
   1137 void
   1138 knote_remove(struct proc *p, struct klist *list)
   1139 {
   1140 	struct knote *kn;
   1141 
   1142 	while ((kn = SLIST_FIRST(list)) != NULL) {
   1143 		kn->kn_fop->f_detach(kn);
   1144 		knote_drop(kn, p);
   1145 	}
   1146 }
   1147 
   1148 /*
   1149  * Remove all knotes referencing a specified fd
   1150  */
   1151 void
   1152 knote_fdclose(struct proc *p, int fd)
   1153 {
   1154 	struct filedesc	*fdp;
   1155 	struct klist	*list;
   1156 
   1157 	fdp = p->p_fd;
   1158 	list = &fdp->fd_knlist[fd];
   1159 	knote_remove(p, list);
   1160 }
   1161 
   1162 /*
   1163  * Attach a new knote to a file descriptor
   1164  */
   1165 static void
   1166 knote_attach(struct knote *kn, struct filedesc *fdp)
   1167 {
   1168 	struct klist	*list;
   1169 	int		size;
   1170 
   1171 	if (! kn->kn_fop->f_isfd) {
   1172 		/* if knote is not on an fd, store on internal hash table */
   1173 		if (fdp->fd_knhashmask == 0)
   1174 			fdp->fd_knhash = hashinit(KN_HASHSIZE, HASH_LIST,
   1175 			    M_KEVENT, M_WAITOK, &fdp->fd_knhashmask);
   1176 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1177 		goto done;
   1178 	}
   1179 
   1180 	/*
   1181 	 * otherwise, knote is on an fd.
   1182 	 * knotes are stored in fd_knlist indexed by kn->kn_id.
   1183 	 */
   1184 	if (fdp->fd_knlistsize <= kn->kn_id) {
   1185 		/* expand list, it's too small */
   1186 		size = fdp->fd_knlistsize;
   1187 		while (size <= kn->kn_id) {
   1188 			/* grow in KQ_EXTENT chunks */
   1189 			size += KQ_EXTENT;
   1190 		}
   1191 		list = malloc(size * sizeof(struct klist *), M_KEVENT,M_WAITOK);
   1192 		if (fdp->fd_knlist) {
   1193 			/* copy existing knlist */
   1194 			memcpy((caddr_t)list, (caddr_t)fdp->fd_knlist,
   1195 			    fdp->fd_knlistsize * sizeof(struct klist *));
   1196 		}
   1197 		/*
   1198 		 * Zero new memory. Stylistically, SLIST_INIT() should be
   1199 		 * used here, but that does same thing as the memset() anyway.
   1200 		 */
   1201 		memset(&list[fdp->fd_knlistsize], 0,
   1202 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
   1203 
   1204 		/* switch to new knlist */
   1205 		if (fdp->fd_knlist != NULL)
   1206 			free(fdp->fd_knlist, M_KEVENT);
   1207 		fdp->fd_knlistsize = size;
   1208 		fdp->fd_knlist = list;
   1209 	}
   1210 
   1211 	/* get list head for this fd */
   1212 	list = &fdp->fd_knlist[kn->kn_id];
   1213  done:
   1214 	/* add new knote */
   1215 	SLIST_INSERT_HEAD(list, kn, kn_link);
   1216 	kn->kn_status = 0;
   1217 }
   1218 
   1219 /*
   1220  * Drop knote.
   1221  * Should be called at spl == 0, since we don't want to hold spl
   1222  * while calling FILE_UNUSE and free.
   1223  */
   1224 static void
   1225 knote_drop(struct knote *kn, struct proc *p)
   1226 {
   1227 	struct filedesc	*fdp;
   1228 	struct klist	*list;
   1229 
   1230 	fdp = p->p_fd;
   1231 	if (kn->kn_fop->f_isfd)
   1232 		list = &fdp->fd_knlist[kn->kn_id];
   1233 	else
   1234 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1235 
   1236 	SLIST_REMOVE(list, kn, knote, kn_link);
   1237 	if (kn->kn_status & KN_QUEUED)
   1238 		knote_dequeue(kn);
   1239 	if (kn->kn_fop->f_isfd)
   1240 		FILE_UNUSE(kn->kn_fp, p);
   1241 	pool_put(&knote_pool, kn);
   1242 }
   1243 
   1244 
   1245 /*
   1246  * Queue new event for knote.
   1247  */
   1248 static void
   1249 knote_enqueue(struct knote *kn)
   1250 {
   1251 	struct kqueue	*kq;
   1252 	int		s;
   1253 
   1254 	kq = kn->kn_kq;
   1255 	s = splhigh();
   1256 	KASSERT((kn->kn_status & KN_QUEUED) == 0);
   1257 
   1258 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1259 	kn->kn_status |= KN_QUEUED;
   1260 	kq->kq_count++;
   1261 	splx(s);
   1262 	kqueue_wakeup(kq);
   1263 }
   1264 
   1265 /*
   1266  * Dequeue event for knote.
   1267  */
   1268 static void
   1269 knote_dequeue(struct knote *kn)
   1270 {
   1271 	struct kqueue	*kq;
   1272 	int		s;
   1273 
   1274 	kq = kn->kn_kq;
   1275 	s = splhigh();
   1276 	KASSERT(kn->kn_status & KN_QUEUED);
   1277 
   1278 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1279 	kn->kn_status &= ~KN_QUEUED;
   1280 	kq->kq_count--;
   1281 	splx(s);
   1282 }
   1283