Home | History | Annotate | Line # | Download | only in kern
kern_event.c revision 1.6
      1 /*	$NetBSD: kern_event.c,v 1.6 2003/01/18 10:06:25 thorpej Exp $	*/
      2 /*-
      3  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  *
     27  * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $
     28  */
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/kernel.h>
     33 #include <sys/proc.h>
     34 #include <sys/malloc.h>
     35 #include <sys/unistd.h>
     36 #include <sys/file.h>
     37 #include <sys/fcntl.h>
     38 #include <sys/select.h>
     39 #include <sys/queue.h>
     40 #include <sys/event.h>
     41 #include <sys/eventvar.h>
     42 #include <sys/poll.h>
     43 #include <sys/pool.h>
     44 #include <sys/protosw.h>
     45 #include <sys/socket.h>
     46 #include <sys/socketvar.h>
     47 #include <sys/stat.h>
     48 #include <sys/uio.h>
     49 #include <sys/mount.h>
     50 #include <sys/filedesc.h>
     51 #include <sys/sa.h>
     52 #include <sys/syscallargs.h>
     53 
     54 static int	kqueue_scan(struct file *fp, size_t maxevents,
     55 		    struct kevent *ulistp, const struct timespec *timeout,
     56 		    struct proc *p, register_t *retval);
     57 static void	kqueue_wakeup(struct kqueue *kq);
     58 
     59 static int	kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
     60 		    struct ucred *cred, int flags);
     61 static int	kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
     62 		    struct ucred *cred, int flags);
     63 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
     64 		    struct proc *p);
     65 static int	kqueue_fcntl(struct file *fp, u_int com, caddr_t data,
     66 		    struct proc *p);
     67 static int	kqueue_poll(struct file *fp, int events, struct proc *p);
     68 static int	kqueue_kqfilter(struct file *fp, struct knote *kn);
     69 static int	kqueue_stat(struct file *fp, struct stat *sp, struct proc *p);
     70 static int	kqueue_close(struct file *fp, struct proc *p);
     71 
     72 static struct fileops kqueueops = {
     73 	kqueue_read, kqueue_write, kqueue_ioctl, kqueue_fcntl, kqueue_poll,
     74 	kqueue_stat, kqueue_close, kqueue_kqfilter
     75 };
     76 
     77 static void	knote_attach(struct knote *kn, struct filedesc *fdp);
     78 static void	knote_drop(struct knote *kn, struct proc *p,
     79 		    struct filedesc *fdp);
     80 static void	knote_enqueue(struct knote *kn);
     81 static void	knote_dequeue(struct knote *kn);
     82 
     83 static void	filt_kqdetach(struct knote *kn);
     84 static int	filt_kqueue(struct knote *kn, long hint);
     85 static int	filt_procattach(struct knote *kn);
     86 static void	filt_procdetach(struct knote *kn);
     87 static int	filt_proc(struct knote *kn, long hint);
     88 static int	filt_fileattach(struct knote *kn);
     89 
     90 static const struct filterops kqread_filtops =
     91 	{ 1, NULL, filt_kqdetach, filt_kqueue };
     92 static const struct filterops proc_filtops =
     93 	{ 0, filt_procattach, filt_procdetach, filt_proc };
     94 static const struct filterops file_filtops =
     95 	{ 1, filt_fileattach, NULL, NULL };
     96 
     97 struct pool	kqueue_pool;
     98 struct pool	knote_pool;
     99 
    100 #define	KNOTE_ACTIVATE(kn)						\
    101 do {									\
    102 	kn->kn_status |= KN_ACTIVE;					\
    103 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
    104 		knote_enqueue(kn);					\
    105 } while(0)
    106 
    107 #define	KN_HASHSIZE		64		/* XXX should be tunable */
    108 #define	KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
    109 
    110 extern const struct filterops sig_filtops;
    111 
    112 /*
    113  * Table for for all system-defined filters.
    114  * These should be listed in the numeric order of the EVFILT_* defines.
    115  * If filtops is NULL, the filter isn't implemented in NetBSD.
    116  * End of list is when name is NULL.
    117  */
    118 struct kfilter {
    119 	const char	 *name;		/* name of filter */
    120 	uint32_t	  filter;	/* id of filter */
    121 	const struct filterops *filtops;/* operations for filter */
    122 };
    123 
    124 		/* System defined filters */
    125 static const struct kfilter sys_kfilters[] = {
    126 	{ "EVFILT_READ",	EVFILT_READ,	&file_filtops },
    127 	{ "EVFILT_WRITE",	EVFILT_WRITE,	&file_filtops },
    128 	{ "EVFILT_AIO",		EVFILT_AIO,	NULL },
    129 	{ "EVFILT_VNODE",	EVFILT_VNODE,	&file_filtops },
    130 	{ "EVFILT_PROC",	EVFILT_PROC,	&proc_filtops },
    131 	{ "EVFILT_SIGNAL",	EVFILT_SIGNAL,	&sig_filtops },
    132 	{ NULL,			0,		NULL },	/* end of list */
    133 };
    134 
    135 		/* User defined kfilters */
    136 static struct kfilter	*user_kfilters;		/* array */
    137 static int		user_kfilterc;		/* current offset */
    138 static int		user_kfiltermaxc;	/* max size so far */
    139 
    140 /*
    141  * kqueue_init:
    142  *
    143  *	Initialize the kqueue/knote facility.
    144  */
    145 void
    146 kqueue_init(void)
    147 {
    148 
    149 	pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl",
    150 	    NULL);
    151 	pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
    152 	    NULL);
    153 }
    154 
    155 /*
    156  * Find kfilter entry by name, or NULL if not found.
    157  */
    158 static const struct kfilter *
    159 kfilter_byname_sys(const char *name)
    160 {
    161 	int i;
    162 
    163 	for (i = 0; sys_kfilters[i].name != NULL; i++) {
    164 		if (strcmp(name, sys_kfilters[i].name) == 0)
    165 			return (&sys_kfilters[i]);
    166 	}
    167 	return (NULL);
    168 }
    169 
    170 static struct kfilter *
    171 kfilter_byname_user(const char *name)
    172 {
    173 	int i;
    174 
    175 	/* user_kfilters[] could be NULL if no filters were registered */
    176 	if (!user_kfilters)
    177 		return (NULL);
    178 
    179 	for (i = 0; user_kfilters[i].name != NULL; i++) {
    180 		if (user_kfilters[i].name != '\0' &&
    181 		    strcmp(name, user_kfilters[i].name) == 0)
    182 			return (&user_kfilters[i]);
    183 	}
    184 	return (NULL);
    185 }
    186 
    187 static const struct kfilter *
    188 kfilter_byname(const char *name)
    189 {
    190 	const struct kfilter *kfilter;
    191 
    192 	if ((kfilter = kfilter_byname_sys(name)) != NULL)
    193 		return (kfilter);
    194 
    195 	return (kfilter_byname_user(name));
    196 }
    197 
    198 /*
    199  * Find kfilter entry by filter id, or NULL if not found.
    200  * Assumes entries are indexed in filter id order, for speed.
    201  */
    202 static const struct kfilter *
    203 kfilter_byfilter(uint32_t filter)
    204 {
    205 	const struct kfilter *kfilter;
    206 
    207 	if (filter < EVFILT_SYSCOUNT)	/* it's a system filter */
    208 		kfilter = &sys_kfilters[filter];
    209 	else if (user_kfilters != NULL &&
    210 	    filter < EVFILT_SYSCOUNT + user_kfilterc)
    211 					/* it's a user filter */
    212 		kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
    213 	else
    214 		return (NULL);		/* out of range */
    215 	KASSERT(kfilter->filter == filter);	/* sanity check! */
    216 	return (kfilter);
    217 }
    218 
    219 /*
    220  * Register a new kfilter. Stores the entry in user_kfilters.
    221  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    222  * If retfilter != NULL, the new filterid is returned in it.
    223  */
    224 int
    225 kfilter_register(const char *name, const struct filterops *filtops,
    226     int *retfilter)
    227 {
    228 	struct kfilter *kfilter;
    229 	void *space;
    230 	int len;
    231 
    232 	if (name == NULL || name[0] == '\0' || filtops == NULL)
    233 		return (EINVAL);	/* invalid args */
    234 	if (kfilter_byname(name) != NULL)
    235 		return (EEXIST);	/* already exists */
    236 	if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT)
    237 		return (EINVAL);	/* too many */
    238 
    239 	/* check if need to grow user_kfilters */
    240 	if (user_kfilterc + 1 > user_kfiltermaxc) {
    241 		/*
    242 		 * Grow in KFILTER_EXTENT chunks. Use malloc(9), because we
    243 		 * want to traverse user_kfilters as an array.
    244 		 */
    245 		user_kfiltermaxc += KFILTER_EXTENT;
    246 		kfilter = malloc(user_kfiltermaxc * sizeof(struct filter *),
    247 		    M_KEVENT, M_WAITOK);
    248 
    249 		/* copy existing user_kfilters */
    250 		if (user_kfilters != NULL)
    251 			memcpy((caddr_t)kfilter, (caddr_t)user_kfilters,
    252 			    user_kfilterc * sizeof(struct kfilter *));
    253 					/* zero new sections */
    254 		memset((caddr_t)kfilter +
    255 		    user_kfilterc * sizeof(struct kfilter *), 0,
    256 		    (user_kfiltermaxc - user_kfilterc) *
    257 		    sizeof(struct kfilter *));
    258 					/* switch to new kfilter */
    259 		if (user_kfilters != NULL)
    260 			free(user_kfilters, M_KEVENT);
    261 		user_kfilters = kfilter;
    262 	}
    263 	len = strlen(name) + 1;		/* copy name */
    264 	space = malloc(len, M_KEVENT, M_WAITOK);
    265 	memcpy(space, name, len);
    266 	user_kfilters[user_kfilterc].name = space;
    267 
    268 	user_kfilters[user_kfilterc].filter = user_kfilterc + EVFILT_SYSCOUNT;
    269 
    270 	len = sizeof(struct filterops);	/* copy filtops */
    271 	space = malloc(len, M_KEVENT, M_WAITOK);
    272 	memcpy(space, filtops, len);
    273 	user_kfilters[user_kfilterc].filtops = space;
    274 
    275 	if (retfilter != NULL)
    276 		*retfilter = user_kfilters[user_kfilterc].filter;
    277 	user_kfilterc++;		/* finally, increment count */
    278 	return (0);
    279 }
    280 
    281 /*
    282  * Unregister a kfilter previously registered with kfilter_register.
    283  * This retains the filter id, but clears the name and frees filtops (filter
    284  * operations), so that the number isn't reused during a boot.
    285  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    286  */
    287 int
    288 kfilter_unregister(const char *name)
    289 {
    290 	struct kfilter *kfilter;
    291 
    292 	if (name == NULL || name[0] == '\0')
    293 		return (EINVAL);	/* invalid name */
    294 
    295 	if (kfilter_byname_sys(name) != NULL)
    296 		return (EINVAL);	/* can't detach system filters */
    297 
    298 	kfilter = kfilter_byname_user(name);
    299 	if (kfilter == NULL)		/* not found */
    300 		return (ENOENT);
    301 
    302 	if (kfilter->name[0] != '\0') {
    303 		/* XXX Cast away const (but we know it's safe. */
    304 		free((void *) kfilter->name, M_KEVENT);
    305 		kfilter->name = "";	/* mark as `not implemented' */
    306 	}
    307 	if (kfilter->filtops != NULL) {
    308 		/* XXX Cast away const (but we know it's safe. */
    309 		free((void *) kfilter->filtops, M_KEVENT);
    310 		kfilter->filtops = NULL; /* mark as `not implemented' */
    311 	}
    312 	return (0);
    313 }
    314 
    315 
    316 /*
    317  * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
    318  * descriptors. Calls struct fileops kqfilter method for given file descriptor.
    319  */
    320 static int
    321 filt_fileattach(struct knote *kn)
    322 {
    323 	struct file *fp;
    324 
    325 	fp = kn->kn_fp;
    326 	return ((*fp->f_ops->fo_kqfilter)(fp, kn));
    327 }
    328 
    329 /*
    330  * Filter detach method for EVFILT_READ on kqueue descriptor.
    331  */
    332 static void
    333 filt_kqdetach(struct knote *kn)
    334 {
    335 	struct kqueue *kq;
    336 
    337 	kq = (struct kqueue *)kn->kn_fp->f_data;
    338 	SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext);
    339 }
    340 
    341 /*
    342  * Filter event method for EVFILT_READ on kqueue descriptor.
    343  */
    344 /*ARGSUSED*/
    345 static int
    346 filt_kqueue(struct knote *kn, long hint)
    347 {
    348 	struct kqueue *kq;
    349 
    350 	kq = (struct kqueue *)kn->kn_fp->f_data;
    351 	kn->kn_data = kq->kq_count;
    352 	return (kn->kn_data > 0);
    353 }
    354 
    355 /*
    356  * Filter attach method for EVFILT_PROC.
    357  */
    358 static int
    359 filt_procattach(struct knote *kn)
    360 {
    361 	struct proc *p;
    362 
    363 	p = pfind(kn->kn_id);
    364 	if (p == NULL)
    365 		return (ESRCH);
    366 
    367 	/*
    368 	 * Fail if it's not owned by you, or the last exec gave us
    369 	 * setuid/setgid privs (unless you're root).
    370 	 */
    371 	if ((p->p_cred->p_ruid != curproc->p_cred->p_ruid ||
    372 		(p->p_flag & P_SUGID))
    373 	    && suser(curproc->p_ucred, &curproc->p_acflag) != 0)
    374 		return (EACCES);
    375 
    376 	kn->kn_ptr.p_proc = p;
    377 	kn->kn_flags |= EV_CLEAR;	/* automatically set */
    378 
    379 	/*
    380 	 * internal flag indicating registration done by kernel
    381 	 */
    382 	if (kn->kn_flags & EV_FLAG1) {
    383 		kn->kn_data = kn->kn_sdata;	/* ppid */
    384 		kn->kn_fflags = NOTE_CHILD;
    385 		kn->kn_flags &= ~EV_FLAG1;
    386 	}
    387 
    388 	/* XXXSMP lock the process? */
    389 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
    390 
    391 	return (0);
    392 }
    393 
    394 /*
    395  * Filter detach method for EVFILT_PROC.
    396  *
    397  * The knote may be attached to a different process, which may exit,
    398  * leaving nothing for the knote to be attached to.  So when the process
    399  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
    400  * it will be deleted when read out.  However, as part of the knote deletion,
    401  * this routine is called, so a check is needed to avoid actually performing
    402  * a detach, because the original process might not exist any more.
    403  */
    404 static void
    405 filt_procdetach(struct knote *kn)
    406 {
    407 	struct proc *p;
    408 
    409 	if (kn->kn_status & KN_DETACHED)
    410 		return;
    411 
    412 	p = kn->kn_ptr.p_proc;
    413 	KASSERT(p->p_stat == SDEAD || pfind(kn->kn_id) == p);
    414 
    415 	/* XXXSMP lock the process? */
    416 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
    417 }
    418 
    419 /*
    420  * Filter event method for EVFILT_PROC.
    421  */
    422 static int
    423 filt_proc(struct knote *kn, long hint)
    424 {
    425 	u_int event;
    426 
    427 	/*
    428 	 * mask off extra data
    429 	 */
    430 	event = (u_int)hint & NOTE_PCTRLMASK;
    431 
    432 	/*
    433 	 * if the user is interested in this event, record it.
    434 	 */
    435 	if (kn->kn_sfflags & event)
    436 		kn->kn_fflags |= event;
    437 
    438 	/*
    439 	 * process is gone, so flag the event as finished.
    440 	 */
    441 	if (event == NOTE_EXIT) {
    442 		/*
    443 		 * Detach the knote from watched process and mark
    444 		 * it as such. We can't leave this to kqueue_scan(),
    445 		 * since the process might not exist by then. And we
    446 		 * have to do this now, since psignal KNOTE() is called
    447 		 * also for zombies and we might end up reading freed
    448 		 * memory if the kevent would already be picked up
    449 		 * and knote g/c'ed.
    450 		 */
    451 		kn->kn_fop->f_detach(kn);
    452 		kn->kn_status |= KN_DETACHED;
    453 
    454 		/* Mark as ONESHOT, so that the knote it g/c'ed when read */
    455 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
    456 		return (1);
    457 	}
    458 
    459 	/*
    460 	 * process forked, and user wants to track the new process,
    461 	 * so attach a new knote to it, and immediately report an
    462 	 * event with the parent's pid.
    463 	 */
    464 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
    465 		struct kevent kev;
    466 		int error;
    467 
    468 		/*
    469 		 * register knote with new process.
    470 		 */
    471 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
    472 		kev.filter = kn->kn_filter;
    473 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
    474 		kev.fflags = kn->kn_sfflags;
    475 		kev.data = kn->kn_id;			/* parent */
    476 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
    477 		error = kqueue_register(kn->kn_kq, &kev, NULL);
    478 		if (error)
    479 			kn->kn_fflags |= NOTE_TRACKERR;
    480 	}
    481 
    482 	return (kn->kn_fflags != 0);
    483 }
    484 
    485 /*
    486  * filt_seltrue:
    487  *
    488  *	This filter "event" routine simulates seltrue().
    489  */
    490 int
    491 filt_seltrue(struct knote *kn, long hint)
    492 {
    493 
    494 	/*
    495 	 * We don't know how much data can be read/written,
    496 	 * but we know that it *can* be.  This is about as
    497 	 * good as select/poll does as well.
    498 	 */
    499 	kn->kn_data = 0;
    500 	return (1);
    501 }
    502 
    503 /*
    504  * This provides full kqfilter entry for device switch tables, which
    505  * has same effect as filter using filt_seltrue() as filter method.
    506  */
    507 static void
    508 filt_seltruedetach(struct knote *kn)
    509 {
    510 	/* Nothing to do */
    511 }
    512 
    513 static const struct filterops seltrue_filtops =
    514 	{ 1, NULL, filt_seltruedetach, filt_seltrue };
    515 
    516 int
    517 seltrue_kqfilter(dev_t dev, struct knote *kn)
    518 {
    519 	switch (kn->kn_filter) {
    520 	case EVFILT_READ:
    521 	case EVFILT_WRITE:
    522 		kn->kn_fop = &seltrue_filtops;
    523 		break;
    524 	default:
    525 		return (1);
    526 	}
    527 
    528 	/* Nothing more to do */
    529 	return (0);
    530 }
    531 
    532 /*
    533  * kqueue(2) system call.
    534  */
    535 int
    536 sys_kqueue(struct lwp *l, void *v, register_t *retval)
    537 {
    538 	struct filedesc	*fdp;
    539 	struct kqueue	*kq;
    540 	struct file	*fp;
    541 	struct proc	*p;
    542 	int		fd, error;
    543 
    544 	p = l->l_proc;
    545 	fdp = p->p_fd;
    546 	error = falloc(p, &fp, &fd);	/* setup a new file descriptor */
    547 	if (error)
    548 		return (error);
    549 	fp->f_flag = FREAD | FWRITE;
    550 	fp->f_type = DTYPE_KQUEUE;
    551 	fp->f_ops = &kqueueops;
    552 	kq = pool_get(&kqueue_pool, PR_WAITOK);
    553 	memset((char *)kq, 0, sizeof(struct kqueue));
    554 	TAILQ_INIT(&kq->kq_head);
    555 	fp->f_data = (caddr_t)kq;	/* store the kqueue with the fp */
    556 	*retval = fd;
    557 	if (fdp->fd_knlistsize < 0)
    558 		fdp->fd_knlistsize = 0;	/* this process has a kq */
    559 	kq->kq_fdp = fdp;
    560 	FILE_SET_MATURE(fp);
    561 	FILE_UNUSE(fp, p);		/* falloc() does FILE_USE() */
    562 	return (error);
    563 }
    564 
    565 /*
    566  * kevent(2) system call.
    567  */
    568 int
    569 sys_kevent(struct lwp *l, void *v, register_t *retval)
    570 {
    571 	struct sys_kevent_args /* {
    572 		syscallarg(int) fd;
    573 		syscallarg(const struct kevent *) changelist;
    574 		syscallarg(size_t) nchanges;
    575 		syscallarg(struct kevent *) eventlist;
    576 		syscallarg(size_t) nevents;
    577 		syscallarg(const struct timespec *) timeout;
    578 	} */ *uap = v;
    579 	struct kevent	*kevp;
    580 	struct kqueue	*kq;
    581 	struct file	*fp;
    582 	struct timespec	ts;
    583 	struct proc	*p;
    584 	size_t		i, n;
    585 	int		nerrors, error;
    586 
    587 	p = l->l_proc;
    588 	/* check that we're dealing with a kq */
    589 	fp = fd_getfile(p->p_fd, SCARG(uap, fd));
    590 	if (!fp || fp->f_type != DTYPE_KQUEUE)
    591 		return (EBADF);
    592 
    593 	FILE_USE(fp);
    594 
    595 	if (SCARG(uap, timeout) != NULL) {
    596 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
    597 		if (error)
    598 			goto done;
    599 		SCARG(uap, timeout) = &ts;
    600 	}
    601 
    602 	kq = (struct kqueue *)fp->f_data;
    603 	nerrors = 0;
    604 
    605 	/* traverse list of events to register */
    606 	while (SCARG(uap, nchanges) > 0) {
    607 		/* copyin a maximum of KQ_EVENTS at each pass */
    608 		n = MIN(SCARG(uap, nchanges), KQ_NEVENTS);
    609 		error = copyin(SCARG(uap, changelist), kq->kq_kev,
    610 		    n * sizeof(struct kevent));
    611 		if (error)
    612 			goto done;
    613 		for (i = 0; i < n; i++) {
    614 			kevp = &kq->kq_kev[i];
    615 			kevp->flags &= ~EV_SYSFLAGS;
    616 			/* register each knote */
    617 			error = kqueue_register(kq, kevp, p);
    618 			if (error) {
    619 				if (SCARG(uap, nevents) != 0) {
    620 					kevp->flags = EV_ERROR;
    621 					kevp->data = error;
    622 					error = copyout((caddr_t)kevp,
    623 					    (caddr_t)SCARG(uap, eventlist),
    624 					    sizeof(*kevp));
    625 					if (error)
    626 						goto done;
    627 					SCARG(uap, eventlist)++;
    628 					SCARG(uap, nevents)--;
    629 					nerrors++;
    630 				} else {
    631 					goto done;
    632 				}
    633 			}
    634 		}
    635 		SCARG(uap, nchanges) -= n;	/* update the results */
    636 		SCARG(uap, changelist) += n;
    637 	}
    638 	if (nerrors) {
    639 		*retval = nerrors;
    640 		error = 0;
    641 		goto done;
    642 	}
    643 
    644 	/* actually scan through the events */
    645 	error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
    646 	    SCARG(uap, timeout), p, retval);
    647  done:
    648 	FILE_UNUSE(fp, p);
    649 	return (error);
    650 }
    651 
    652 /*
    653  * Register a given kevent kev onto the kqueue
    654  */
    655 int
    656 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
    657 {
    658 	const struct kfilter *kfilter;
    659 	struct filedesc	*fdp;
    660 	struct file	*fp;
    661 	struct knote	*kn;
    662 	int		s, error;
    663 
    664 	fdp = kq->kq_fdp;
    665 	fp = NULL;
    666 	kn = NULL;
    667 	error = 0;
    668 	kfilter = kfilter_byfilter(kev->filter);
    669 	if (kfilter == NULL || kfilter->filtops == NULL) {
    670 		/* filter not found nor implemented */
    671 		return (EINVAL);
    672 	}
    673 
    674 	/* search if knote already exists */
    675 	if (kfilter->filtops->f_isfd) {
    676 		/* monitoring a file descriptor */
    677 		if ((fp = fd_getfile(fdp, kev->ident)) == NULL)
    678 			return (EBADF);	/* validate descriptor */
    679 		FILE_USE(fp);
    680 
    681 		if (kev->ident < fdp->fd_knlistsize) {
    682 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
    683 				if (kq == kn->kn_kq &&
    684 				    kev->filter == kn->kn_filter)
    685 					break;
    686 		}
    687 	} else {
    688 		/*
    689 		 * not monitoring a file descriptor, so
    690 		 * lookup knotes in internal hash table
    691 		 */
    692 		if (fdp->fd_knhashmask != 0) {
    693 			struct klist *list;
    694 
    695 			list = &fdp->fd_knhash[
    696 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
    697 			SLIST_FOREACH(kn, list, kn_link)
    698 				if (kev->ident == kn->kn_id &&
    699 				    kq == kn->kn_kq &&
    700 				    kev->filter == kn->kn_filter)
    701 					break;
    702 		}
    703 	}
    704 
    705 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
    706 		error = ENOENT;		/* filter not found */
    707 		goto done;
    708 	}
    709 
    710 	/*
    711 	 * kn now contains the matching knote, or NULL if no match
    712 	 */
    713 	if (kev->flags & EV_ADD) {
    714 		/* add knote */
    715 
    716 		if (kn == NULL) {
    717 			/* create new knote */
    718 			kn = pool_get(&knote_pool, PR_WAITOK);
    719 			if (kn == NULL) {
    720 				error = ENOMEM;
    721 				goto done;
    722 			}
    723 			kn->kn_fp = fp;
    724 			kn->kn_kq = kq;
    725 			kn->kn_fop = kfilter->filtops;
    726 
    727 			/*
    728 			 * apply reference count to knote structure, and
    729 			 * do not release it at the end of this routine.
    730 			 */
    731 			fp = NULL;
    732 
    733 			kn->kn_sfflags = kev->fflags;
    734 			kn->kn_sdata = kev->data;
    735 			kev->fflags = 0;
    736 			kev->data = 0;
    737 			kn->kn_kevent = *kev;
    738 
    739 			knote_attach(kn, fdp);
    740 			if ((error = kfilter->filtops->f_attach(kn)) != 0) {
    741 				knote_drop(kn, p, fdp);
    742 				goto done;
    743 			}
    744 		} else {
    745 			/* modify existing knote */
    746 
    747 			/*
    748 			 * The user may change some filter values after the
    749 			 * initial EV_ADD, but doing so will not reset any
    750 			 * filter which have already been triggered.
    751 			 */
    752 			kn->kn_sfflags = kev->fflags;
    753 			kn->kn_sdata = kev->data;
    754 			kn->kn_kevent.udata = kev->udata;
    755 		}
    756 
    757 		s = splhigh();
    758 		if (kn->kn_fop->f_event(kn, 0))
    759 			KNOTE_ACTIVATE(kn);
    760 		splx(s);
    761 
    762 	} else if (kev->flags & EV_DELETE) {	/* delete knote */
    763 		kn->kn_fop->f_detach(kn);
    764 		knote_drop(kn, p, fdp);
    765 		goto done;
    766 	}
    767 
    768 	/* disable knote */
    769 	if ((kev->flags & EV_DISABLE) &&
    770 	    ((kn->kn_status & KN_DISABLED) == 0)) {
    771 		s = splhigh();
    772 		kn->kn_status |= KN_DISABLED;
    773 		splx(s);
    774 	}
    775 
    776 	/* enable knote */
    777 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
    778 		s = splhigh();
    779 		kn->kn_status &= ~KN_DISABLED;
    780 		if ((kn->kn_status & KN_ACTIVE) &&
    781 		    ((kn->kn_status & KN_QUEUED) == 0))
    782 			knote_enqueue(kn);
    783 		splx(s);
    784 	}
    785 
    786  done:
    787 	if (fp != NULL)
    788 		FILE_UNUSE(fp, p);
    789 	return (error);
    790 }
    791 
    792 /*
    793  * Scan through the list of events on fp (for a maximum of maxevents),
    794  * returning the results in to ulistp. Timeout is determined by tsp; if
    795  * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
    796  * as appropriate.
    797  */
    798 static int
    799 kqueue_scan(struct file *fp, size_t maxevents, struct kevent *ulistp,
    800 	const struct timespec *tsp, struct proc *p, register_t *retval)
    801 {
    802 	struct kqueue	*kq;
    803 	struct kevent	*kevp;
    804 	struct timeval	atv;
    805 	struct knote	*kn, marker;
    806 	size_t		count, nkev;
    807 	int		s, timeout, error;
    808 
    809 	kq = (struct kqueue *)fp->f_data;
    810 	count = maxevents;
    811 	nkev = error = 0;
    812 	if (count == 0)
    813 		goto done;
    814 
    815 	if (tsp != NULL) {			/* timeout supplied */
    816 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
    817 		if (itimerfix(&atv)) {
    818 			error = EINVAL;
    819 			goto done;
    820 		}
    821 		s = splclock();
    822 		timeradd(&atv, &time, &atv);	/* calc. time to wait until */
    823 		splx(s);
    824 		if (tsp->tv_sec == 0 && tsp->tv_nsec < 1000 /*<1us*/)
    825 			timeout = -1;		/* perform a poll */
    826 		else
    827 			timeout = hzto(&atv);	/* calculate hz till timeout */
    828 	} else {
    829 		atv.tv_sec = 0;			/* no timeout, wait forever */
    830 		atv.tv_usec = 0;
    831 		timeout = 0;
    832 	}
    833 	goto start;
    834 
    835  retry:
    836 	if (atv.tv_sec || atv.tv_usec) {	/* timeout requested */
    837 		s = splclock();
    838 		if (timercmp(&time, &atv, >=)) {
    839 			splx(s);
    840 			goto done;		/* timeout reached */
    841 		}
    842 		splx(s);
    843 		timeout = hzto(&atv);		/* recalc. timeout remaining */
    844 	}
    845 
    846  start:
    847 	kevp = kq->kq_kev;
    848 	s = splhigh();
    849 	if (kq->kq_count == 0) {
    850 		if (timeout < 0) {
    851 			error = EWOULDBLOCK;
    852 		} else {
    853 			kq->kq_state |= KQ_SLEEP;
    854 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
    855 		}
    856 		splx(s);
    857 		if (error == 0)
    858 			goto retry;
    859 		/* don't restart after signals... */
    860 		if (error == ERESTART)
    861 			error = EINTR;
    862 		else if (error == EWOULDBLOCK)
    863 			error = 0;
    864 		goto done;
    865 	}
    866 
    867 	/* mark end of knote list */
    868 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
    869 
    870 	while (count) {				/* while user wants data ... */
    871 		kn = TAILQ_FIRST(&kq->kq_head);	/* get next knote */
    872 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
    873 		if (kn == &marker) {		/* if it's our marker, stop */
    874 			splx(s);
    875 			if (count == maxevents)
    876 				goto retry;
    877 			goto done;
    878 		}
    879 		if (kn->kn_status & KN_DISABLED) {
    880 			/* don't want disabled events */
    881 			kn->kn_status &= ~KN_QUEUED;
    882 			kq->kq_count--;
    883 			continue;
    884 		}
    885 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
    886 		    kn->kn_fop->f_event(kn, 0) == 0) {
    887 			/*
    888 			 * non-ONESHOT event that hasn't
    889 			 * triggered again, so de-queue.
    890 			 */
    891 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    892 			kq->kq_count--;
    893 			continue;
    894 		}
    895 		*kevp = kn->kn_kevent;
    896 		kevp++;
    897 		nkev++;
    898 		if (kn->kn_flags & EV_ONESHOT) {
    899 			/* delete ONESHOT events after retrieval */
    900 			kn->kn_status &= ~KN_QUEUED;
    901 			kq->kq_count--;
    902 			splx(s);
    903 			kn->kn_fop->f_detach(kn);
    904 			knote_drop(kn, p, p->p_fd);
    905 			s = splhigh();
    906 		} else if (kn->kn_flags & EV_CLEAR) {
    907 			/* clear state after retrieval */
    908 			kn->kn_data = 0;
    909 			kn->kn_fflags = 0;
    910 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    911 			kq->kq_count--;
    912 		} else {
    913 			/* add event back on list */
    914 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
    915 		}
    916 		count--;
    917 		if (nkev == KQ_NEVENTS) {
    918 			/* do copyouts in KQ_NEVENTS chunks */
    919 			splx(s);
    920 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    921 			    sizeof(struct kevent) * nkev);
    922 			ulistp += nkev;
    923 			nkev = 0;
    924 			kevp = kq->kq_kev;
    925 			s = splhigh();
    926 			if (error)
    927 				break;
    928 		}
    929 	}
    930 
    931 	/* remove marker */
    932 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
    933 	splx(s);
    934  done:
    935 	if (nkev != 0) {
    936 		/* copyout remaining events */
    937 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    938 		    sizeof(struct kevent) * nkev);
    939 	}
    940 	*retval = maxevents - count;
    941 
    942 	return (error);
    943 }
    944 
    945 /*
    946  * struct fileops read method for a kqueue descriptor.
    947  * Not implemented.
    948  * XXX: This could be expanded to call kqueue_scan, if desired.
    949  */
    950 /*ARGSUSED*/
    951 static int
    952 kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
    953 	struct ucred *cred, int flags)
    954 {
    955 
    956 	return (ENXIO);
    957 }
    958 
    959 /*
    960  * struct fileops write method for a kqueue descriptor.
    961  * Not implemented.
    962  */
    963 /*ARGSUSED*/
    964 static int
    965 kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
    966 	struct ucred *cred, int flags)
    967 {
    968 
    969 	return (ENXIO);
    970 }
    971 
    972 /*
    973  * struct fileops ioctl method for a kqueue descriptor.
    974  *
    975  * Two ioctls are currently supported. They both use struct kfilter_mapping:
    976  *	KFILTER_BYNAME		find name for filter, and return result in
    977  *				name, which is of size len.
    978  *	KFILTER_BYFILTER	find filter for name. len is ignored.
    979  */
    980 /*ARGSUSED*/
    981 static int
    982 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
    983 {
    984 	struct kfilter_mapping	*km;
    985 	const struct kfilter	*kfilter;
    986 	char			*name;
    987 	int			error;
    988 
    989 	km = (struct kfilter_mapping *)data;
    990 	error = 0;
    991 
    992 	switch (com) {
    993 	case KFILTER_BYFILTER:	/* convert filter -> name */
    994 		kfilter = kfilter_byfilter(km->filter);
    995 		if (kfilter != NULL)
    996 			error = copyoutstr(kfilter->name, km->name, km->len,
    997 			    NULL);
    998 		else
    999 			error = ENOENT;
   1000 		break;
   1001 
   1002 	case KFILTER_BYNAME:	/* convert name -> filter */
   1003 		MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK);
   1004 		error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
   1005 		if (error) {
   1006 			FREE(name, M_KEVENT);
   1007 			break;
   1008 		}
   1009 		kfilter = kfilter_byname(name);
   1010 		if (kfilter != NULL)
   1011 			km->filter = kfilter->filter;
   1012 		else
   1013 			error = ENOENT;
   1014 		FREE(name, M_KEVENT);
   1015 		break;
   1016 
   1017 	default:
   1018 		error = ENOTTY;
   1019 
   1020 	}
   1021 	return (error);
   1022 }
   1023 
   1024 /*
   1025  * struct fileops fcntl method for a kqueue descriptor.
   1026  * Not implemented.
   1027  */
   1028 /*ARGSUSED*/
   1029 static int
   1030 kqueue_fcntl(struct file *fp, u_int com, caddr_t data, struct proc *p)
   1031 {
   1032 
   1033 	return (ENOTTY);
   1034 }
   1035 
   1036 /*
   1037  * struct fileops poll method for a kqueue descriptor.
   1038  * Determine if kqueue has events pending.
   1039  */
   1040 static int
   1041 kqueue_poll(struct file *fp, int events, struct proc *p)
   1042 {
   1043 	struct kqueue	*kq;
   1044 	int		revents;
   1045 
   1046 	kq = (struct kqueue *)fp->f_data;
   1047 	revents = 0;
   1048 	if (events & (POLLIN | POLLRDNORM)) {
   1049 		if (kq->kq_count) {
   1050 			revents |= events & (POLLIN | POLLRDNORM);
   1051 		} else {
   1052 			selrecord(p, &kq->kq_sel);
   1053 		}
   1054 	}
   1055 	return (revents);
   1056 }
   1057 
   1058 /*
   1059  * struct fileops stat method for a kqueue descriptor.
   1060  * Returns dummy info, with st_size being number of events pending.
   1061  */
   1062 static int
   1063 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
   1064 {
   1065 	struct kqueue	*kq;
   1066 
   1067 	kq = (struct kqueue *)fp->f_data;
   1068 	memset((void *)st, 0, sizeof(*st));
   1069 	st->st_size = kq->kq_count;
   1070 	st->st_blksize = sizeof(struct kevent);
   1071 	st->st_mode = S_IFIFO;
   1072 	return (0);
   1073 }
   1074 
   1075 /*
   1076  * struct fileops close method for a kqueue descriptor.
   1077  * Cleans up kqueue.
   1078  */
   1079 static int
   1080 kqueue_close(struct file *fp, struct proc *p)
   1081 {
   1082 	struct kqueue	*kq;
   1083 	struct filedesc	*fdp;
   1084 	struct knote	**knp, *kn, *kn0;
   1085 	int		i;
   1086 
   1087 	kq = (struct kqueue *)fp->f_data;
   1088 	fdp = p->p_fd;
   1089 	for (i = 0; i < fdp->fd_knlistsize; i++) {
   1090 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
   1091 		kn = *knp;
   1092 		while (kn != NULL) {
   1093 			kn0 = SLIST_NEXT(kn, kn_link);
   1094 			if (kq == kn->kn_kq) {
   1095 				kn->kn_fop->f_detach(kn);
   1096 				FILE_UNUSE(kn->kn_fp, p);
   1097 				pool_put(&knote_pool, kn);
   1098 				*knp = kn0;
   1099 			} else {
   1100 				knp = &SLIST_NEXT(kn, kn_link);
   1101 			}
   1102 			kn = kn0;
   1103 		}
   1104 	}
   1105 	if (fdp->fd_knhashmask != 0) {
   1106 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
   1107 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
   1108 			kn = *knp;
   1109 			while (kn != NULL) {
   1110 				kn0 = SLIST_NEXT(kn, kn_link);
   1111 				if (kq == kn->kn_kq) {
   1112 					kn->kn_fop->f_detach(kn);
   1113 					/* XXX non-fd release of kn->kn_ptr */
   1114 					pool_put(&knote_pool, kn);
   1115 					*knp = kn0;
   1116 				} else {
   1117 					knp = &SLIST_NEXT(kn, kn_link);
   1118 				}
   1119 				kn = kn0;
   1120 			}
   1121 		}
   1122 	}
   1123 	pool_put(&kqueue_pool, kq);
   1124 	fp->f_data = NULL;
   1125 
   1126 	return (0);
   1127 }
   1128 
   1129 /*
   1130  * wakeup a kqueue
   1131  */
   1132 static void
   1133 kqueue_wakeup(struct kqueue *kq)
   1134 {
   1135 
   1136 	if (kq->kq_state & KQ_SLEEP) {		/* if currently sleeping ...  */
   1137 		kq->kq_state &= ~KQ_SLEEP;
   1138 		wakeup(kq);			/* ... wakeup */
   1139 	}
   1140 
   1141 	/* Notify select/poll and kevent. */
   1142 	selnotify(&kq->kq_sel, 0);
   1143 }
   1144 
   1145 /*
   1146  * struct fileops kqfilter method for a kqueue descriptor.
   1147  * Event triggered when monitored kqueue changes.
   1148  */
   1149 /*ARGSUSED*/
   1150 static int
   1151 kqueue_kqfilter(struct file *fp, struct knote *kn)
   1152 {
   1153 	struct kqueue *kq;
   1154 
   1155 	KASSERT(fp == kn->kn_fp);
   1156 	kq = (struct kqueue *)kn->kn_fp->f_data;
   1157 	if (kn->kn_filter != EVFILT_READ)
   1158 		return (1);
   1159 	kn->kn_fop = &kqread_filtops;
   1160 	SLIST_INSERT_HEAD(&kq->kq_sel.sel_klist, kn, kn_selnext);
   1161 	return (0);
   1162 }
   1163 
   1164 
   1165 /*
   1166  * Walk down a list of knotes, activating them if their event has triggered.
   1167  */
   1168 void
   1169 knote(struct klist *list, long hint)
   1170 {
   1171 	struct knote *kn;
   1172 
   1173 	SLIST_FOREACH(kn, list, kn_selnext)
   1174 		if (kn->kn_fop->f_event(kn, hint))
   1175 			KNOTE_ACTIVATE(kn);
   1176 }
   1177 
   1178 /*
   1179  * Remove all knotes from a specified klist
   1180  */
   1181 void
   1182 knote_remove(struct proc *p, struct klist *list)
   1183 {
   1184 	struct knote *kn;
   1185 
   1186 	while ((kn = SLIST_FIRST(list)) != NULL) {
   1187 		kn->kn_fop->f_detach(kn);
   1188 		knote_drop(kn, p, p->p_fd);
   1189 	}
   1190 }
   1191 
   1192 /*
   1193  * Remove all knotes referencing a specified fd
   1194  */
   1195 void
   1196 knote_fdclose(struct proc *p, int fd)
   1197 {
   1198 	struct filedesc	*fdp;
   1199 	struct klist	*list;
   1200 
   1201 	fdp = p->p_fd;
   1202 	list = &fdp->fd_knlist[fd];
   1203 	knote_remove(p, list);
   1204 }
   1205 
   1206 /*
   1207  * Attach a new knote to a file descriptor
   1208  */
   1209 static void
   1210 knote_attach(struct knote *kn, struct filedesc *fdp)
   1211 {
   1212 	struct klist	*list;
   1213 	int		size;
   1214 
   1215 	if (! kn->kn_fop->f_isfd) {
   1216 		/* if knote is not on an fd, store on internal hash table */
   1217 		if (fdp->fd_knhashmask == 0)
   1218 			fdp->fd_knhash = hashinit(KN_HASHSIZE, HASH_LIST,
   1219 			    M_KEVENT, M_WAITOK, &fdp->fd_knhashmask);
   1220 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1221 		goto done;
   1222 	}
   1223 
   1224 	/*
   1225 	 * otherwise, knote is on an fd.
   1226 	 * knotes are stored in fd_knlist indexed by kn->kn_id.
   1227 	 */
   1228 	if (fdp->fd_knlistsize <= kn->kn_id) {
   1229 		/* expand list, it's too small */
   1230 		size = fdp->fd_knlistsize;
   1231 		while (size <= kn->kn_id) {
   1232 			/* grow in KQ_EXTENT chunks */
   1233 			size += KQ_EXTENT;
   1234 		}
   1235 		list = malloc(size * sizeof(struct klist *), M_KEVENT,M_WAITOK);
   1236 		if (fdp->fd_knlist) {
   1237 			/* copy existing knlist */
   1238 			memcpy((caddr_t)list, (caddr_t)fdp->fd_knlist,
   1239 			    fdp->fd_knlistsize * sizeof(struct klist *));
   1240 		}
   1241 		/*
   1242 		 * Zero new memory. Stylistically, SLIST_INIT() should be
   1243 		 * used here, but that does same thing as the memset() anyway.
   1244 		 */
   1245 		memset(&list[fdp->fd_knlistsize], 0,
   1246 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
   1247 
   1248 		/* switch to new knlist */
   1249 		if (fdp->fd_knlist != NULL)
   1250 			free(fdp->fd_knlist, M_KEVENT);
   1251 		fdp->fd_knlistsize = size;
   1252 		fdp->fd_knlist = list;
   1253 	}
   1254 
   1255 	/* get list head for this fd */
   1256 	list = &fdp->fd_knlist[kn->kn_id];
   1257  done:
   1258 	/* add new knote */
   1259 	SLIST_INSERT_HEAD(list, kn, kn_link);
   1260 	kn->kn_status = 0;
   1261 }
   1262 
   1263 /*
   1264  * Drop knote.
   1265  * Should be called at spl == 0, since we don't want to hold spl
   1266  * while calling FILE_UNUSE and free.
   1267  */
   1268 static void
   1269 knote_drop(struct knote *kn, struct proc *p, struct filedesc *fdp)
   1270 {
   1271 	struct klist	*list;
   1272 
   1273 	if (kn->kn_fop->f_isfd)
   1274 		list = &fdp->fd_knlist[kn->kn_id];
   1275 	else
   1276 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1277 
   1278 	SLIST_REMOVE(list, kn, knote, kn_link);
   1279 	if (kn->kn_status & KN_QUEUED)
   1280 		knote_dequeue(kn);
   1281 	if (kn->kn_fop->f_isfd)
   1282 		FILE_UNUSE(kn->kn_fp, p);
   1283 	pool_put(&knote_pool, kn);
   1284 }
   1285 
   1286 
   1287 /*
   1288  * Queue new event for knote.
   1289  */
   1290 static void
   1291 knote_enqueue(struct knote *kn)
   1292 {
   1293 	struct kqueue	*kq;
   1294 	int		s;
   1295 
   1296 	kq = kn->kn_kq;
   1297 	s = splhigh();
   1298 	KASSERT((kn->kn_status & KN_QUEUED) == 0);
   1299 
   1300 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1301 	kn->kn_status |= KN_QUEUED;
   1302 	kq->kq_count++;
   1303 	splx(s);
   1304 	kqueue_wakeup(kq);
   1305 }
   1306 
   1307 /*
   1308  * Dequeue event for knote.
   1309  */
   1310 static void
   1311 knote_dequeue(struct knote *kn)
   1312 {
   1313 	struct kqueue	*kq;
   1314 	int		s;
   1315 
   1316 	kq = kn->kn_kq;
   1317 	s = splhigh();
   1318 	KASSERT(kn->kn_status & KN_QUEUED);
   1319 
   1320 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1321 	kn->kn_status &= ~KN_QUEUED;
   1322 	kq->kq_count--;
   1323 	splx(s);
   1324 }
   1325