Home | History | Annotate | Line # | Download | only in kern
kern_event.c revision 1.1.1.1.2.6
      1 /*	$NetBSD: kern_event.c,v 1.1.1.1.2.6 2001/09/08 02:33:48 thorpej Exp $	*/
      2 /*-
      3  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
      4  * All rights reserved.
      5  *
      6  * Redistribution and use in source and binary forms, with or without
      7  * modification, are permitted provided that the following conditions
      8  * are met:
      9  * 1. Redistributions of source code must retain the above copyright
     10  *    notice, this list of conditions and the following disclaimer.
     11  * 2. Redistributions in binary form must reproduce the above copyright
     12  *    notice, this list of conditions and the following disclaimer in the
     13  *    documentation and/or other materials provided with the distribution.
     14  *
     15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     25  * SUCH DAMAGE.
     26  *
     27  * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $
     28  */
     29 
     30 #include <sys/param.h>
     31 #include <sys/systm.h>
     32 #include <sys/kernel.h>
     33 #include <sys/proc.h>
     34 #include <sys/malloc.h>
     35 #include <sys/unistd.h>
     36 #include <sys/file.h>
     37 #include <sys/fcntl.h>
     38 #include <sys/select.h>
     39 #include <sys/queue.h>
     40 #include <sys/event.h>
     41 #include <sys/eventvar.h>
     42 #include <sys/poll.h>
     43 #include <sys/pool.h>
     44 #include <sys/protosw.h>
     45 #include <sys/socket.h>
     46 #include <sys/socketvar.h>
     47 #include <sys/stat.h>
     48 #include <sys/uio.h>
     49 #include <sys/mount.h>
     50 #include <sys/filedesc.h>
     51 #include <sys/syscallargs.h>
     52 
     53 static int	kqueue_scan(struct file *fp, int maxevents,
     54 		    struct kevent *ulistp, const struct timespec *timeout,
     55 		    struct proc *p, register_t *retval);
     56 static void	kqueue_wakeup(struct kqueue *kq);
     57 
     58 static int	kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
     59 		    struct ucred *cred, int flags);
     60 static int	kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
     61 		    struct ucred *cred, int flags);
     62 static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
     63 		    struct proc *p);
     64 static int	kqueue_fcntl(struct file *fp, u_int com, caddr_t data,
     65 		    struct proc *p);
     66 static int	kqueue_poll(struct file *fp, int events, struct proc *p);
     67 static int	kqueue_kqfilter(struct file *fp, struct knote *kn);
     68 static int	kqueue_stat(struct file *fp, struct stat *sp, struct proc *p);
     69 static int	kqueue_close(struct file *fp, struct proc *p);
     70 
     71 static struct fileops kqueueops = {
     72 	kqueue_read, kqueue_write, kqueue_ioctl, kqueue_fcntl, kqueue_poll,
     73 	kqueue_stat, kqueue_close, kqueue_kqfilter
     74 };
     75 
     76 static void	knote_attach(struct knote *kn, struct filedesc *fdp);
     77 static void	knote_drop(struct knote *kn, struct proc *p);
     78 static void	knote_enqueue(struct knote *kn);
     79 static void	knote_dequeue(struct knote *kn);
     80 
     81 static void	filt_kqdetach(struct knote *kn);
     82 static int	filt_kqueue(struct knote *kn, long hint);
     83 static int	filt_procattach(struct knote *kn);
     84 static void	filt_procdetach(struct knote *kn);
     85 static int	filt_proc(struct knote *kn, long hint);
     86 static int	filt_fileattach(struct knote *kn);
     87 
     88 static const struct filterops kqread_filtops =
     89 	{ 1, NULL, filt_kqdetach, filt_kqueue };
     90 static const struct filterops proc_filtops =
     91 	{ 0, filt_procattach, filt_procdetach, filt_proc };
     92 static const struct filterops file_filtops =
     93 	{ 1, filt_fileattach, NULL, NULL };
     94 
     95 struct pool	kqueue_pool;
     96 struct pool	knote_pool;
     97 
     98 #define	KNOTE_ACTIVATE(kn)						\
     99 do {									\
    100 	kn->kn_status |= KN_ACTIVE;					\
    101 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
    102 		knote_enqueue(kn);					\
    103 } while(0)
    104 
    105 #define	KN_HASHSIZE		64		/* XXX should be tunable */
    106 #define	KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
    107 
    108 extern const struct filterops sig_filtops;
    109 
    110 /*
    111  * Table for for all system-defined filters.
    112  * These should be listed in the numeric order of the EVFILT_* defines.
    113  * If filtops is NULL, the filter isn't implemented in NetBSD.
    114  * End of list is when name is NULL.
    115  */
    116 struct kfilter {
    117 	const char	 *name;		/* name of filter */
    118 	uint32_t	  filter;	/* id of filter */
    119 	const struct filterops *filtops;/* operations for filter */
    120 };
    121 
    122 		/* System defined filters */
    123 static const struct kfilter sys_kfilters[] = {
    124 	{ "EVFILT_READ",	EVFILT_READ,	&file_filtops },
    125 	{ "EVFILT_WRITE",	EVFILT_WRITE,	&file_filtops },
    126 	{ "EVFILT_AIO",		EVFILT_AIO,	NULL },
    127 	{ "EVFILT_VNODE",	EVFILT_VNODE,	&file_filtops },
    128 	{ "EVFILT_PROC",	EVFILT_PROC,	&proc_filtops },
    129 	{ "EVFILT_SIGNAL",	EVFILT_SIGNAL,	&sig_filtops },
    130 	{ NULL,			0,		NULL },	/* end of list */
    131 };
    132 
    133 		/* User defined kfilters */
    134 static struct kfilter	*user_kfilters;		/* array */
    135 static int		user_kfilterc;		/* current offset */
    136 static int		user_kfiltermaxc;	/* max size so far */
    137 
    138 /*
    139  * kqueue_init:
    140  *
    141  *	Initialize the kqueue/knote facility.
    142  */
    143 void
    144 kqueue_init(void)
    145 {
    146 
    147 	pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl",
    148 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_KEVENT);
    149 	pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
    150 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_KEVENT);
    151 }
    152 
    153 /*
    154  * Find kfilter entry by name, or NULL if not found.
    155  */
    156 static const struct kfilter *
    157 kfilter_byname_sys(const char *name)
    158 {
    159 	int i;
    160 
    161 	for (i = 0; sys_kfilters[i].name != NULL; i++) {
    162 		if (strcmp(name, sys_kfilters[i].name) == 0)
    163 			return (&sys_kfilters[i]);
    164 	}
    165 	return (NULL);
    166 }
    167 
    168 static struct kfilter *
    169 kfilter_byname_user(const char *name)
    170 {
    171 	int i;
    172 
    173 	for (i = 0; user_kfilters[i].name != NULL; i++) {
    174 		if (user_kfilters[i].name != '\0' &&
    175 		    strcmp(name, user_kfilters[i].name) == 0)
    176 			return (&user_kfilters[i]);
    177 	}
    178 	return (NULL);
    179 }
    180 
    181 static const struct kfilter *
    182 kfilter_byname(const char *name)
    183 {
    184 	const struct kfilter *kfilter;
    185 
    186 	if ((kfilter = kfilter_byname_sys(name)) != NULL)
    187 		return (kfilter);
    188 
    189 	return (kfilter_byname_user(name));
    190 }
    191 
    192 /*
    193  * Find kfilter entry by filter id, or NULL if not found.
    194  * Assumes entries are indexed in filter id order, for speed.
    195  */
    196 static const struct kfilter *
    197 kfilter_byfilter(uint32_t filter)
    198 {
    199 	const struct kfilter *kfilter;
    200 
    201 	if (filter < EVFILT_SYSCOUNT)	/* it's a system filter */
    202 		kfilter = &sys_kfilters[filter];
    203 	else if (user_kfilters != NULL &&
    204 	    filter < EVFILT_SYSCOUNT + user_kfilterc)
    205 					/* it's a user filter */
    206 		kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
    207 	else
    208 		return (NULL);		/* out of range */
    209 	KASSERT(kfilter->filter == filter);	/* sanity check! */
    210 	return (kfilter);
    211 }
    212 
    213 /*
    214  * Register a new kfilter. Stores the entry in user_kfilters.
    215  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    216  * If retfilter != NULL, the new filterid is returned in it.
    217  */
    218 int
    219 kfilter_register(const char *name, const struct filterops *filtops,
    220     int *retfilter)
    221 {
    222 	struct kfilter *kfilter;
    223 	void *space;
    224 	int len;
    225 
    226 	if (name == NULL || name[0] == '\0' || filtops == NULL)
    227 		return (EINVAL);	/* invalid args */
    228 	if (kfilter_byname(name) != NULL)
    229 		return (EEXIST);	/* already exists */
    230 	if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT)
    231 		return (EINVAL);	/* too many */
    232 
    233 					/* need to grow user_kfilters */
    234 	if (user_kfilterc + 1 > user_kfiltermaxc) {
    235 					/*
    236 					 * grow in KFILTER_EXTENT chunks. use
    237 					 * malloc(9), because we want to
    238 					 * traverse user_kfilters as an array.
    239 					 */
    240 		user_kfiltermaxc += KFILTER_EXTENT;
    241 		kfilter = malloc(user_kfiltermaxc * sizeof(struct filter *),
    242 		    M_KEVENT, M_WAITOK);
    243 					/* copy existing user_kfilters */
    244 		if (user_kfilters != NULL)
    245 			memcpy((caddr_t)kfilter, (caddr_t)user_kfilters,
    246 			    user_kfilterc * sizeof(struct kfilter *));
    247 					/* zero new sections */
    248 		memset((caddr_t)kfilter +
    249 		    user_kfilterc * sizeof(struct kfilter *), 0,
    250 		    (user_kfiltermaxc - user_kfilterc) *
    251 		    sizeof(struct kfilter *));
    252 					/* switch to new kfilter */
    253 		if (user_kfilters != NULL)
    254 			FREE(user_kfilters, M_KEVENT);
    255 		user_kfilters = kfilter;
    256 	}
    257 	len = strlen(name) + 1;		/* copy name */
    258 	space = malloc(len, M_KEVENT, M_WAITOK);
    259 	memcpy(space, name, len);
    260 	user_kfilters[user_kfilterc].name = space;
    261 
    262 	user_kfilters[user_kfilterc].filter = user_kfilterc + EVFILT_SYSCOUNT;
    263 
    264 	len = sizeof(struct filterops);	/* copy filtops */
    265 	space = malloc(len, M_KEVENT, M_WAITOK);
    266 	memcpy(space, filtops, len);
    267 	user_kfilters[user_kfilterc].filtops = space;
    268 
    269 	if (retfilter != NULL)
    270 		*retfilter = user_kfilters[user_kfilterc].filter;
    271 	user_kfilterc++;		/* finally, increment count */
    272 	return (0);
    273 }
    274 
    275 /*
    276  * Unregister a kfilter previously registered with kfilter_register.
    277  * This retains the filter id, but clears the name and frees filtops (filter
    278  * operations), so that the number isn't reused during a boot.
    279  * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
    280  */
    281 int
    282 kfilter_unregister(const char *name)
    283 {
    284 	struct kfilter *kfilter;
    285 
    286 	if (name == NULL || name[0] == '\0')
    287 		return (EINVAL);	/* invalid name */
    288 
    289 	if (kfilter_byname_sys(name) != NULL)
    290 		return (EINVAL);	/* can't detach system filters */
    291 
    292 	kfilter = kfilter_byname_user(name);
    293 	if (kfilter == NULL)		/* not found */
    294 		return (ENOENT);
    295 
    296 	if (kfilter->name[0] != '\0') {
    297 		/* XXX Cast away const (but we know it's safe. */
    298 		free((void *) kfilter->name, M_KEVENT);
    299 		kfilter->name = "";	/* mark as `not implemented' */
    300 	}
    301 	if (kfilter->filtops != NULL) {
    302 		/* XXX Cast away const (but we know it's safe. */
    303 		free((void *) kfilter->filtops, M_KEVENT);
    304 		kfilter->filtops = NULL; /* mark as `not implemented' */
    305 	}
    306 	return (0);
    307 }
    308 
    309 
    310 /*
    311  * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
    312  * descriptors. Calls struct fileops kqfilter method for given file descriptor.
    313  */
    314 static int
    315 filt_fileattach(struct knote *kn)
    316 {
    317 	struct file *fp;
    318 
    319 	fp = kn->kn_fp;
    320 	return ((*fp->f_ops->fo_kqfilter)(fp, kn));
    321 }
    322 
    323 /*
    324  * Filter detach method for EVFILT_READ on kqueue descriptor.
    325  */
    326 static void
    327 filt_kqdetach(struct knote *kn)
    328 {
    329 	struct kqueue *kq;
    330 
    331 	kq = (struct kqueue *)kn->kn_fp->f_data;
    332 	SLIST_REMOVE(&kq->kq_sel.si_klist, kn, knote, kn_selnext);
    333 }
    334 
    335 /*
    336  * Filter event method for EVFILT_READ on kqueue descriptor.
    337  */
    338 /*ARGSUSED*/
    339 static int
    340 filt_kqueue(struct knote *kn, long hint)
    341 {
    342 	struct kqueue *kq;
    343 
    344 	kq = (struct kqueue *)kn->kn_fp->f_data;
    345 	kn->kn_data = kq->kq_count;
    346 	return (kn->kn_data > 0);
    347 }
    348 
    349 /*
    350  * Filter attach method for EVFILT_PROC.
    351  */
    352 static int
    353 filt_procattach(struct knote *kn)
    354 {
    355 	struct proc *p;
    356 
    357 	p = pfind(kn->kn_id);
    358 	if (p == NULL)
    359 		return (ESRCH);
    360 
    361 	kn->kn_ptr.p_proc = p;
    362 	kn->kn_flags |= EV_CLEAR;	/* automatically set */
    363 
    364 	/*
    365 	 * internal flag indicating registration done by kernel
    366 	 */
    367 	if (kn->kn_flags & EV_FLAG1) {
    368 		kn->kn_data = kn->kn_sdata;	/* ppid */
    369 		kn->kn_fflags = NOTE_CHILD;
    370 		kn->kn_flags &= ~EV_FLAG1;
    371 	}
    372 
    373 	/* XXXLUKEM */
    374 	/* XXX lock the proc here while adding to the list? */
    375 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
    376 
    377 	return (0);
    378 }
    379 
    380 /*
    381  * Filter detach method for EVFILT_PROC.
    382  *
    383  * The knote may be attached to a different process, which may exit,
    384  * leaving nothing for the knote to be attached to.  So when the process
    385  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
    386  * it will be deleted when read out.  However, as part of the knote deletion,
    387  * this routine is called, so a check is needed to avoid actually performing
    388  * a detach, because the original process does not exist any more.
    389  */
    390 static void
    391 filt_procdetach(struct knote *kn)
    392 {
    393 	struct proc *p;
    394 
    395 	p = kn->kn_ptr.p_proc;
    396 	if (kn->kn_status & KN_DETACHED)
    397 		return;
    398 
    399 	/* XXXLUKEM */
    400 	/* XXX locking?  this might modify another process. */
    401 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
    402 }
    403 
    404 /*
    405  * Filter event method for EVFILT_PROC.
    406  */
    407 static int
    408 filt_proc(struct knote *kn, long hint)
    409 {
    410 	u_int event;
    411 
    412 	/*
    413 	 * mask off extra data
    414 	 */
    415 	event = (u_int)hint & NOTE_PCTRLMASK;
    416 
    417 	/*
    418 	 * if the user is interested in this event, record it.
    419 	 */
    420 	if (kn->kn_sfflags & event)
    421 		kn->kn_fflags |= event;
    422 
    423 	/*
    424 	 * process is gone, so flag the event as finished.
    425 	 */
    426 	if (event == NOTE_EXIT) {
    427 		kn->kn_status |= KN_DETACHED;
    428 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
    429 		return (1);
    430 	}
    431 
    432 	/*
    433 	 * process forked, and user wants to track the new process,
    434 	 * so attach a new knote to it, and immediately report an
    435 	 * event with the parent's pid.
    436 	 */
    437 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
    438 		struct kevent kev;
    439 		int error;
    440 
    441 		/*
    442 		 * register knote with new process.
    443 		 */
    444 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
    445 		kev.filter = kn->kn_filter;
    446 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
    447 		kev.fflags = kn->kn_sfflags;
    448 		kev.data = kn->kn_id;			/* parent */
    449 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
    450 		error = kqueue_register(kn->kn_kq, &kev, NULL);
    451 		if (error)
    452 			kn->kn_fflags |= NOTE_TRACKERR;
    453 	}
    454 
    455 	return (kn->kn_fflags != 0);
    456 }
    457 
    458 /*
    459  * kqueue(2) system call.
    460  */
    461 int
    462 sys_kqueue(struct proc *p, void *v, register_t *retval)
    463 {
    464 	struct filedesc	*fdp;
    465 	struct kqueue	*kq;
    466 	struct file	*fp;
    467 	int		fd, error;
    468 
    469 	fdp = p->p_fd;
    470 	error = falloc(p, &fp, &fd);	/* setup a new file descriptor */
    471 	if (error)
    472 		return (error);
    473 	fp->f_flag = FREAD | FWRITE;
    474 	fp->f_type = DTYPE_KQUEUE;
    475 	fp->f_ops = &kqueueops;
    476 	kq = pool_get(&kqueue_pool, PR_WAITOK);
    477 	memset((char *)kq, 0, sizeof(struct kqueue));
    478 	TAILQ_INIT(&kq->kq_head);
    479 	fp->f_data = (caddr_t)kq;	/* store the kqueue with the fp */
    480 	*retval = fd;
    481 	if (fdp->fd_knlistsize < 0)
    482 		fdp->fd_knlistsize = 0;	/* this process has a kq */
    483 	kq->kq_fdp = fdp;
    484 	FILE_SET_MATURE(fp);
    485 	FILE_UNUSE(fp, p);		/* falloc() does FILE_USE() */
    486 	return (error);
    487 }
    488 
    489 /*
    490  * kevent(2) system call.
    491  */
    492 int
    493 sys_kevent(struct proc *p, void *v, register_t *retval)
    494 {
    495 	struct sys_kevent_args /* {
    496 		syscallarg(int) fd;
    497 		syscallarg(const struct kevent *) changelist;
    498 		syscallarg(int) nchanges;
    499 		syscallarg(struct kevent *) eventlist;
    500 		syscallarg(int) nevents;
    501 		syscallarg(const struct timespec *) timeout;
    502 	} */ *uap = v;
    503 	struct filedesc	*fdp;
    504 	struct kevent	*kevp;
    505 	struct kqueue	*kq;
    506 	struct file	*fp;
    507 	struct timespec	ts;
    508 	int		i, n, nerrors, error;
    509 
    510 	fdp = p->p_fd;			/* check that we're dealing with a kq */
    511 	if ((u_int)SCARG(uap, fd) >= fdp->fd_nfiles ||
    512 	    (fp = fdp->fd_ofiles[SCARG(uap, fd)]) == NULL ||
    513 	    (fp->f_type != DTYPE_KQUEUE))
    514 		return (EBADF);
    515 
    516 	FILE_USE(fp);
    517 
    518 	if (SCARG(uap, timeout) != NULL) {
    519 		error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
    520 		if (error)
    521 			goto done;
    522 		SCARG(uap, timeout) = &ts;
    523 	}
    524 
    525 	kq = (struct kqueue *)fp->f_data;
    526 	nerrors = 0;
    527 
    528 				/* traverse list of events to register */
    529 	while (SCARG(uap, nchanges) > 0) {
    530 				/* copyin a maximum of KQ_EVENTS at each pass */
    531 		n = MIN(SCARG(uap, nchanges), KQ_NEVENTS);
    532 		error = copyin(SCARG(uap, changelist), kq->kq_kev,
    533 		    n * sizeof(struct kevent));
    534 		if (error)
    535 			goto done;
    536 		for (i = 0; i < n; i++) {
    537 			kevp = &kq->kq_kev[i];
    538 			kevp->flags &= ~EV_SYSFLAGS;
    539 					/* register each knote */
    540 			error = kqueue_register(kq, kevp, p);
    541 			if (error) {
    542 				if (SCARG(uap, nevents) != 0) {
    543 					kevp->flags = EV_ERROR;
    544 					kevp->data = error;
    545 					error = copyout((caddr_t)kevp,
    546 					    (caddr_t)SCARG(uap, eventlist),
    547 					    sizeof(*kevp));
    548 					if (error)
    549 						goto done;
    550 					SCARG(uap, eventlist)++;
    551 					SCARG(uap, nevents)--;
    552 					nerrors++;
    553 				} else {
    554 					goto done;
    555 				}
    556 			}
    557 		}
    558 		SCARG(uap, nchanges) -= n;	/* update the results */
    559 		SCARG(uap, changelist) += n;
    560 	}
    561 	if (nerrors) {
    562 		*retval = nerrors;
    563 		error = 0;
    564 		goto done;
    565 	}
    566 
    567 					/* actually scan through the events */
    568 	error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
    569 	    SCARG(uap, timeout), p, retval);
    570  done:
    571 	FILE_UNUSE(fp, p);
    572 	return (error);
    573 }
    574 
    575 /*
    576  * Register a given kevent kev onto the kqueue
    577  */
    578 int
    579 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
    580 {
    581 	const struct kfilter *kfilter;
    582 	struct filedesc	*fdp;
    583 	struct file	*fp;
    584 	struct knote	*kn;
    585 	int		s, error;
    586 
    587 	fdp = kq->kq_fdp;
    588 	fp = NULL;
    589 	kn = NULL;
    590 	error = 0;
    591 	kfilter = kfilter_byfilter(kev->filter);
    592 	if (kfilter == NULL || kfilter->filtops == NULL)
    593 		return (EINVAL);	/* filter not found nor implemented */
    594 
    595 					/* search if knote already exists */
    596 	if (kfilter->filtops->f_isfd) {	/* monitoring a file descriptor */
    597 		if ((u_int)kev->ident >= fdp->fd_nfiles ||
    598 		    (fp = fdp->fd_ofiles[kev->ident]) == NULL)
    599 			return (EBADF);	/* validate descriptor */
    600 		FILE_USE(fp);
    601 
    602 		if (kev->ident < fdp->fd_knlistsize) {
    603 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
    604 				if (kq == kn->kn_kq &&
    605 				    kev->filter == kn->kn_filter)
    606 					break;
    607 		}
    608 	} else {
    609 					/*
    610 					 * not monitoring a file descriptor, so
    611 					 * lookup knotes in internal hash table
    612 					 */
    613 		if (fdp->fd_knhashmask != 0) {
    614 			struct klist *list;
    615 
    616 			list = &fdp->fd_knhash[
    617 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
    618 			SLIST_FOREACH(kn, list, kn_link)
    619 				if (kev->ident == kn->kn_id &&
    620 				    kq == kn->kn_kq &&
    621 				    kev->filter == kn->kn_filter)
    622 					break;
    623 		}
    624 	}
    625 
    626 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
    627 		error = ENOENT;		/* filter not found */
    628 		goto done;
    629 	}
    630 
    631 	/*
    632 	 * kn now contains the matching knote, or NULL if no match
    633 	 */
    634 	if (kev->flags & EV_ADD) {		/* add knote */
    635 
    636 		if (kn == NULL) {		/* create new knote */
    637 			kn = pool_get(&knote_pool, PR_WAITOK);
    638 			if (kn == NULL) {
    639 				error = ENOMEM;
    640 				goto done;
    641 			}
    642 			kn->kn_fp = fp;
    643 			kn->kn_kq = kq;
    644 			kn->kn_fop = kfilter->filtops;
    645 
    646 			/*
    647 			 * apply reference count to knote structure, and
    648 			 * do not release it at the end of this routine.
    649 			 */
    650 			fp = NULL;
    651 
    652 			kn->kn_sfflags = kev->fflags;
    653 			kn->kn_sdata = kev->data;
    654 			kev->fflags = 0;
    655 			kev->data = 0;
    656 			kn->kn_kevent = *kev;
    657 
    658 			knote_attach(kn, fdp);
    659 			if ((error = kfilter->filtops->f_attach(kn)) != 0) {
    660 				knote_drop(kn, p);
    661 				goto done;
    662 			}
    663 		} else {			/* modify existing knote */
    664 			/*
    665 			 * The user may change some filter values after the
    666 			 * initial EV_ADD, but doing so will not reset any
    667 			 * filter which have already been triggered.
    668 			 */
    669 			kn->kn_sfflags = kev->fflags;
    670 			kn->kn_sdata = kev->data;
    671 			kn->kn_kevent.udata = kev->udata;
    672 		}
    673 
    674 		s = splhigh();
    675 		if (kn->kn_fop->f_event(kn, 0))
    676 			KNOTE_ACTIVATE(kn);
    677 		splx(s);
    678 
    679 	} else if (kev->flags & EV_DELETE) {	/* delete knote */
    680 		kn->kn_fop->f_detach(kn);
    681 		knote_drop(kn, p);
    682 		goto done;
    683 	}
    684 
    685 						/* disable knote */
    686 	if ((kev->flags & EV_DISABLE) &&
    687 	    ((kn->kn_status & KN_DISABLED) == 0)) {
    688 		s = splhigh();
    689 		kn->kn_status |= KN_DISABLED;
    690 		splx(s);
    691 	}
    692 
    693 						/* enable knote */
    694 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
    695 		s = splhigh();
    696 		kn->kn_status &= ~KN_DISABLED;
    697 		if ((kn->kn_status & KN_ACTIVE) &&
    698 		    ((kn->kn_status & KN_QUEUED) == 0))
    699 			knote_enqueue(kn);
    700 		splx(s);
    701 	}
    702 
    703  done:
    704 	if (fp != NULL)
    705 		FILE_UNUSE(fp, p);
    706 	return (error);
    707 }
    708 
    709 /*
    710  * Scan through the list of events on fp (for a maximum of maxevents),
    711  * returning the results in to ulistp. Timeout is determined by tsp; if
    712  * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
    713  * as appropriate.
    714  */
    715 static int
    716 kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
    717 	const struct timespec *tsp, struct proc *p, register_t *retval)
    718 {
    719 	struct kqueue	*kq;
    720 	struct kevent	*kevp;
    721 	struct timeval	atv;
    722 	struct knote	*kn, marker;
    723 	int		s, count, timeout, nkev, error;
    724 
    725 	kq = (struct kqueue *)fp->f_data;
    726 	count = maxevents;
    727 	nkev = error = 0;
    728 	if (count == 0)
    729 		goto done;
    730 
    731 	if (tsp != NULL) {			/* timeout supplied */
    732 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
    733 		if (itimerfix(&atv)) {
    734 			error = EINVAL;
    735 			goto done;
    736 		}
    737 		s = splclock();
    738 		timeradd(&atv, &time, &atv);	/* calc. time to wait until */
    739 		splx(s);
    740 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
    741 			timeout = -1;		/* perform a poll */
    742 		else
    743 			timeout = hzto(&atv);	/* calculate hz till timeout */
    744 	} else {
    745 		atv.tv_sec = 0;			/* no timeout, wait forever */
    746 		atv.tv_usec = 0;
    747 		timeout = 0;
    748 	}
    749 	goto start;
    750 
    751  retry:
    752 	if (atv.tv_sec || atv.tv_usec) {	/* timeout requested */
    753 		s = splclock();
    754 		if (timercmp(&time, &atv, >=)) {
    755 			splx(s);
    756 			goto done;		/* timeout reached */
    757 		}
    758 		splx(s);
    759 		timeout = hzto(&atv);		/* recalc. timeout remaining */
    760 	}
    761 
    762  start:
    763 	kevp = kq->kq_kev;
    764 	s = splhigh();
    765 	if (kq->kq_count == 0) {
    766 		if (timeout < 0) {
    767 			error = EWOULDBLOCK;
    768 		} else {
    769 			kq->kq_state |= KQ_SLEEP;
    770 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
    771 		}
    772 		splx(s);
    773 		if (error == 0)
    774 			goto retry;
    775 		/* don't restart after signals... */
    776 		if (error == ERESTART)
    777 			error = EINTR;
    778 		else if (error == EWOULDBLOCK)
    779 			error = 0;
    780 		goto done;
    781 	}
    782 
    783 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
    784 						/* mark end of knote list */
    785 	while (count) {				/* while user wants data ... */
    786 		kn = TAILQ_FIRST(&kq->kq_head);	/* get next knote */
    787 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
    788 		if (kn == &marker) {		/* if it's our marker, stop */
    789 			splx(s);
    790 			if (count == maxevents)
    791 				goto retry;
    792 			goto done;
    793 		}
    794 		if (kn->kn_status & KN_DISABLED) {
    795 						/* don't want disabled events */
    796 			kn->kn_status &= ~KN_QUEUED;
    797 			kq->kq_count--;
    798 			continue;
    799 		}
    800 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
    801 		    kn->kn_fop->f_event(kn, 0) == 0) {
    802 					/*
    803 					 * non-ONESHOT event that hasn't
    804 					 * triggered again, so de-queue.
    805 					 */
    806 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    807 			kq->kq_count--;
    808 			continue;
    809 		}
    810 		*kevp = kn->kn_kevent;
    811 		kevp++;
    812 		nkev++;
    813 		if (kn->kn_flags & EV_ONESHOT) {
    814 				/* delete ONESHOT events after retrieval */
    815 			kn->kn_status &= ~KN_QUEUED;
    816 			kq->kq_count--;
    817 			splx(s);
    818 			kn->kn_fop->f_detach(kn);
    819 			knote_drop(kn, p);
    820 			s = splhigh();
    821 		} else if (kn->kn_flags & EV_CLEAR) {
    822 				/* clear state after retrieval */
    823 			kn->kn_data = 0;
    824 			kn->kn_fflags = 0;
    825 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    826 			kq->kq_count--;
    827 		} else {
    828 				/* add event back on list */
    829 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
    830 		}
    831 		count--;
    832 		if (nkev == KQ_NEVENTS) {
    833 					/* do copyouts in KQ_NEVENTS chunks */
    834 			splx(s);
    835 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    836 			    sizeof(struct kevent) * nkev);
    837 			ulistp += nkev;
    838 			nkev = 0;
    839 			kevp = kq->kq_kev;
    840 			s = splhigh();
    841 			if (error)
    842 				break;
    843 		}
    844 	}
    845 					/* remove marker */
    846 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
    847 	splx(s);
    848  done:
    849 	if (nkev != 0)			/* copyout remaining events */
    850 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    851 		    sizeof(struct kevent) * nkev);
    852 	*retval = maxevents - count;
    853 	return (error);
    854 }
    855 
    856 /*
    857  * struct fileops read method for a kqueue descriptor.
    858  * Not implemented.
    859  * XXX: This could be expanded to call kqueue_scan, if desired.
    860  */
    861 /*ARGSUSED*/
    862 static int
    863 kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
    864 	struct ucred *cred, int flags)
    865 {
    866 
    867 	return (ENXIO);
    868 }
    869 
    870 /*
    871  * struct fileops write method for a kqueue descriptor.
    872  * Not implemented.
    873  */
    874 /*ARGSUSED*/
    875 static int
    876 kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
    877 	struct ucred *cred, int flags)
    878 {
    879 
    880 	return (ENXIO);
    881 }
    882 
    883 /*
    884  * struct fileops ioctl method for a kqueue descriptor.
    885  *
    886  * Two ioctls are currently supported. They both use struct kfilter_mapping:
    887  *	KFILTER_BYNAME		find name for filter, and return result in
    888  *				name, which is of size len.
    889  *	KFILTER_BYFILTER	find filter for name. len is ignored.
    890  */
    891 /*ARGSUSED*/
    892 static int
    893 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
    894 {
    895 	struct kfilter_mapping	*km;
    896 	const struct kfilter	*kfilter;
    897 	char			*name;
    898 	int			error;
    899 
    900 	km = (struct kfilter_mapping *)data;
    901 	error = 0;
    902 
    903 	switch (com) {
    904 	case KFILTER_BYFILTER:	/* convert filter -> name */
    905 		kfilter = kfilter_byfilter(km->filter);
    906 		if (kfilter != NULL)
    907 			error = copyoutstr(kfilter->name, km->name, km->len,
    908 			    NULL);
    909 		else
    910 			error = ENOENT;
    911 		break;
    912 
    913 	case KFILTER_BYNAME:	/* convert name -> filter */
    914 		MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK);
    915 		error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
    916 		if (error) {
    917 			free(name, M_KEVENT);
    918 			break;
    919 		}
    920 		kfilter = kfilter_byname(name);
    921 		if (kfilter != NULL)
    922 			km->filter = kfilter->filter;
    923 		else
    924 			error = ENOENT;
    925 		free(name, M_KEVENT);
    926 		break;
    927 
    928 #if 1		/* XXXLUKEM - test register & unregister */
    929 	case KFILTER_REGISTER:
    930 	case KFILTER_UNREGISTER:
    931 		MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK);
    932 		error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
    933 		if (error) {
    934 			free(name, M_KEVENT);
    935 			break;
    936 		}
    937 		if (com == KFILTER_REGISTER) {
    938 			kfilter = kfilter_byfilter(km->filter);
    939 			if (kfilter != NULL) {
    940 				error = kfilter_register(name,
    941 				    kfilter->filtops, &km->filter);
    942 			} else
    943 				error = ENOENT;
    944 		} else
    945 			error = kfilter_unregister(name);
    946 		free(name, M_KEVENT);
    947 		break;
    948 #endif
    949 
    950 	default:
    951 		error = ENOTTY;
    952 
    953 	}
    954 	return (error);
    955 }
    956 
    957 /*
    958  * struct fileops fcntl method for a kqueue descriptor.
    959  * Not implemented.
    960  */
    961 /*ARGSUSED*/
    962 static int
    963 kqueue_fcntl(struct file *fp, u_int com, caddr_t data, struct proc *p)
    964 {
    965 
    966 	return (ENOTTY);
    967 }
    968 
    969 /*
    970  * struct fileops poll method for a kqueue descriptor.
    971  * Determine if kqueue has events pending.
    972  */
    973 /*ARGSUSED*/
    974 static int
    975 kqueue_poll(struct file *fp, int events, struct proc *p)
    976 {
    977 	struct kqueue	*kq;
    978 	int		revents, s;
    979 
    980 	kq = (struct kqueue *)fp->f_data;
    981 	revents = 0;
    982 	s = splnet();		/* XXXLUKEM: is this correct? */
    983 	if (events & (POLLIN | POLLRDNORM)) {
    984 		if (kq->kq_count) {
    985 			revents |= events & (POLLIN | POLLRDNORM);
    986 		} else {
    987 				/* XXXLUKEM: splsched() for next? */
    988 			selrecord(p, &kq->kq_sel);
    989 		}
    990 	}
    991 	splx(s);
    992 	return (revents);
    993 }
    994 
    995 /*
    996  * struct fileops stat method for a kqueue descriptor.
    997  * Returns dummy info, with st_size being number of events pending.
    998  */
    999 /*ARGSUSED*/
   1000 static int
   1001 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
   1002 {
   1003 	struct kqueue	*kq;
   1004 
   1005 	kq = (struct kqueue *)fp->f_data;
   1006 	memset((void *)st, 0, sizeof(*st));
   1007 	st->st_size = kq->kq_count;
   1008 	st->st_blksize = sizeof(struct kevent);
   1009 	st->st_mode = S_IFIFO;
   1010 	return (0);
   1011 }
   1012 
   1013 /*
   1014  * struct fileops close method for a kqueue descriptor.
   1015  * Cleans up kqueue.
   1016  */
   1017 /*ARGSUSED*/
   1018 static int
   1019 kqueue_close(struct file *fp, struct proc *p)
   1020 {
   1021 	struct kqueue	*kq;
   1022 	struct filedesc	*fdp;
   1023 	struct knote	**knp, *kn, *kn0;
   1024 	int		i;
   1025 
   1026 	kq = (struct kqueue *)fp->f_data;
   1027 	fdp = p->p_fd;
   1028 	for (i = 0; i < fdp->fd_knlistsize; i++) {
   1029 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
   1030 		kn = *knp;
   1031 		while (kn != NULL) {
   1032 			kn0 = SLIST_NEXT(kn, kn_link);
   1033 			if (kq == kn->kn_kq) {
   1034 				kn->kn_fop->f_detach(kn);
   1035 				FILE_UNUSE(kn->kn_fp, p);
   1036 				pool_put(&knote_pool, kn);
   1037 				*knp = kn0;
   1038 			} else {
   1039 				knp = &SLIST_NEXT(kn, kn_link);
   1040 			}
   1041 			kn = kn0;
   1042 		}
   1043 	}
   1044 	if (fdp->fd_knhashmask != 0) {
   1045 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
   1046 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
   1047 			kn = *knp;
   1048 			while (kn != NULL) {
   1049 				kn0 = SLIST_NEXT(kn, kn_link);
   1050 				if (kq == kn->kn_kq) {
   1051 					kn->kn_fop->f_detach(kn);
   1052 		/* XXX non-fd release of kn->kn_ptr */
   1053 					pool_put(&knote_pool, kn);
   1054 					*knp = kn0;
   1055 				} else {
   1056 					knp = &SLIST_NEXT(kn, kn_link);
   1057 				}
   1058 				kn = kn0;
   1059 			}
   1060 		}
   1061 	}
   1062 	pool_put(&kqueue_pool, kq);
   1063 	fp->f_data = NULL;
   1064 
   1065 	return (0);
   1066 }
   1067 
   1068 /*
   1069  * wakeup a kqueue
   1070  */
   1071 static void
   1072 kqueue_wakeup(struct kqueue *kq)
   1073 {
   1074 
   1075 	if (kq->kq_state & KQ_SLEEP) {		/* if currently sleeping ...  */
   1076 		kq->kq_state &= ~KQ_SLEEP;
   1077 		wakeup(kq);			/* ... wakeup */
   1078 	}
   1079 
   1080 	/* Notify select/poll and kevent. */
   1081 	selnotify(&kq->kq_sel, 0);
   1082 }
   1083 
   1084 /*
   1085  * struct fileops kqfilter method for a kqueue descriptor.
   1086  * Event triggered when monitored kqueue changes.
   1087  */
   1088 /*ARGSUSED*/
   1089 static int
   1090 kqueue_kqfilter(struct file *fp, struct knote *kn)
   1091 {
   1092 	struct kqueue *kq;
   1093 
   1094 	kq = (struct kqueue *)kn->kn_fp->f_data;
   1095 	if (kn->kn_filter != EVFILT_READ)
   1096 		return (1);
   1097 	kn->kn_fop = &kqread_filtops;
   1098 	SLIST_INSERT_HEAD(&kq->kq_sel.si_klist, kn, kn_selnext);
   1099 	return (0);
   1100 }
   1101 
   1102 
   1103 /*
   1104  * Walk down a list of knotes, activating them if their event has triggered.
   1105  */
   1106 void
   1107 knote(struct klist *list, long hint)
   1108 {
   1109 	struct knote *kn;
   1110 
   1111 	SLIST_FOREACH(kn, list, kn_selnext)
   1112 		if (kn->kn_fop->f_event(kn, hint))
   1113 			KNOTE_ACTIVATE(kn);
   1114 }
   1115 
   1116 /*
   1117  * Remove all knotes from a specified klist
   1118  */
   1119 void
   1120 knote_remove(struct proc *p, struct klist *list)
   1121 {
   1122 	struct knote *kn;
   1123 
   1124 	while ((kn = SLIST_FIRST(list)) != NULL) {
   1125 		kn->kn_fop->f_detach(kn);
   1126 		knote_drop(kn, p);
   1127 	}
   1128 }
   1129 
   1130 /*
   1131  * Remove all knotes referencing a specified fd
   1132  */
   1133 void
   1134 knote_fdclose(struct proc *p, int fd)
   1135 {
   1136 	struct filedesc	*fdp;
   1137 	struct klist	*list;
   1138 
   1139 	fdp = p->p_fd;
   1140 	list = &fdp->fd_knlist[fd];
   1141 	knote_remove(p, list);
   1142 }
   1143 
   1144 /*
   1145  * Attach a new knote to a file descriptor
   1146  */
   1147 static void
   1148 knote_attach(struct knote *kn, struct filedesc *fdp)
   1149 {
   1150 	struct klist	*list;
   1151 	int		size;
   1152 
   1153 	if (! kn->kn_fop->f_isfd) {
   1154 					/*
   1155 					 * if knote is not on an fd, store
   1156 					 * on internal hash table.
   1157 					 */
   1158 		if (fdp->fd_knhashmask == 0)
   1159 			fdp->fd_knhash = hashinit(KN_HASHSIZE, HASH_LIST,
   1160 			    M_KEVENT, M_WAITOK, &fdp->fd_knhashmask);
   1161 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1162 		goto done;
   1163 	}
   1164 
   1165 					/*
   1166 					 * otherwise, knote is on an fd.
   1167 					 * knotes are stored in fd_knlist
   1168 					 * indexed by kn->kn_id.
   1169 					 */
   1170 	if (fdp->fd_knlistsize <= kn->kn_id) {
   1171 						/* expand list if too small */
   1172 		size = fdp->fd_knlistsize;
   1173 		while (size <= kn->kn_id)
   1174 			size += KQ_EXTENT;	/* grow in KQ_EXTENT chunks */
   1175 		list = malloc(size * sizeof(struct klist *), M_KEVENT,M_WAITOK);
   1176 						/* copy existing knlist */
   1177 		memcpy((caddr_t)list, (caddr_t)fdp->fd_knlist,
   1178 		    fdp->fd_knlistsize * sizeof(struct klist *));
   1179 						/* zero new sections */
   1180 		memset((caddr_t)list +
   1181 		    fdp->fd_knlistsize * sizeof(struct klist *), 0,
   1182 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
   1183 		if (fdp->fd_knlist != NULL)	/* switch to new knlist */
   1184 			FREE(fdp->fd_knlist, M_KEVENT);
   1185 		fdp->fd_knlistsize = size;
   1186 		fdp->fd_knlist = list;
   1187 	}
   1188 	list = &fdp->fd_knlist[kn->kn_id];	/* get list head for this fd */
   1189  done:
   1190 	SLIST_INSERT_HEAD(list, kn, kn_link);	/* add new knote */
   1191 	kn->kn_status = 0;
   1192 }
   1193 
   1194 /*
   1195  * Drop knote.
   1196  * Should be called at spl == 0, since we don't want to hold spl
   1197  * while calling FILE_UNUSE and free.
   1198  */
   1199 static void
   1200 knote_drop(struct knote *kn, struct proc *p)
   1201 {
   1202 	struct filedesc	*fdp;
   1203 	struct klist	*list;
   1204 
   1205 	fdp = p->p_fd;
   1206 	if (kn->kn_fop->f_isfd)
   1207 		list = &fdp->fd_knlist[kn->kn_id];
   1208 	else
   1209 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
   1210 
   1211 	SLIST_REMOVE(list, kn, knote, kn_link);
   1212 	if (kn->kn_status & KN_QUEUED)
   1213 		knote_dequeue(kn);
   1214 	if (kn->kn_fop->f_isfd)
   1215 		FILE_UNUSE(kn->kn_fp, p);
   1216 	pool_put(&knote_pool, kn);
   1217 }
   1218 
   1219 
   1220 /*
   1221  * Queue new event for knote.
   1222  */
   1223 static void
   1224 knote_enqueue(struct knote *kn)
   1225 {
   1226 	struct kqueue	*kq;
   1227 	int		s;
   1228 
   1229 	kq = kn->kn_kq;
   1230 	s = splhigh();
   1231 	KASSERT((kn->kn_status & KN_QUEUED) == 0);
   1232 
   1233 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
   1234 	kn->kn_status |= KN_QUEUED;
   1235 	kq->kq_count++;
   1236 	splx(s);
   1237 	kqueue_wakeup(kq);
   1238 }
   1239 
   1240 /*
   1241  * Dequeue event for knote.
   1242  */
   1243 static void
   1244 knote_dequeue(struct knote *kn)
   1245 {
   1246 	struct kqueue	*kq;
   1247 	int		s;
   1248 
   1249 	kq = kn->kn_kq;
   1250 	s = splhigh();
   1251 	KASSERT(kn->kn_status & KN_QUEUED);
   1252 
   1253 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
   1254 	kn->kn_status &= ~KN_QUEUED;
   1255 	kq->kq_count--;
   1256 	splx(s);
   1257 }
   1258