Home | History | Annotate | Line # | Download | only in kern
kern_event.c revision 1.1
      1  1.1  lukem /*-
      2  1.1  lukem  * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
      3  1.1  lukem  * All rights reserved.
      4  1.1  lukem  *
      5  1.1  lukem  * Redistribution and use in source and binary forms, with or without
      6  1.1  lukem  * modification, are permitted provided that the following conditions
      7  1.1  lukem  * are met:
      8  1.1  lukem  * 1. Redistributions of source code must retain the above copyright
      9  1.1  lukem  *    notice, this list of conditions and the following disclaimer.
     10  1.1  lukem  * 2. Redistributions in binary form must reproduce the above copyright
     11  1.1  lukem  *    notice, this list of conditions and the following disclaimer in the
     12  1.1  lukem  *    documentation and/or other materials provided with the distribution.
     13  1.1  lukem  *
     14  1.1  lukem  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
     15  1.1  lukem  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     16  1.1  lukem  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     17  1.1  lukem  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
     18  1.1  lukem  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     19  1.1  lukem  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     20  1.1  lukem  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     21  1.1  lukem  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     22  1.1  lukem  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     23  1.1  lukem  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     24  1.1  lukem  * SUCH DAMAGE.
     25  1.1  lukem  *
     26  1.1  lukem  * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $
     27  1.1  lukem  */
     28  1.1  lukem 
     29  1.1  lukem #include <sys/param.h>
     30  1.1  lukem #include <sys/systm.h>
     31  1.1  lukem #include <sys/kernel.h>
     32  1.1  lukem #include <sys/lock.h>
     33  1.1  lukem #include <sys/mutex.h>
     34  1.1  lukem #include <sys/proc.h>
     35  1.1  lukem #include <sys/malloc.h>
     36  1.1  lukem #include <sys/unistd.h>
     37  1.1  lukem #include <sys/file.h>
     38  1.1  lukem #include <sys/fcntl.h>
     39  1.1  lukem #include <sys/selinfo.h>
     40  1.1  lukem #include <sys/queue.h>
     41  1.1  lukem #include <sys/event.h>
     42  1.1  lukem #include <sys/eventvar.h>
     43  1.1  lukem #include <sys/poll.h>
     44  1.1  lukem #include <sys/protosw.h>
     45  1.1  lukem #include <sys/socket.h>
     46  1.1  lukem #include <sys/socketvar.h>
     47  1.1  lukem #include <sys/stat.h>
     48  1.1  lukem #include <sys/sysproto.h>
     49  1.1  lukem #include <sys/uio.h>
     50  1.1  lukem 
     51  1.1  lukem #include <vm/vm_zone.h>
     52  1.1  lukem 
     53  1.1  lukem static int	kqueue_scan(struct file *fp, int maxevents,
     54  1.1  lukem 		    struct kevent *ulistp, const struct timespec *timeout,
     55  1.1  lukem 		    struct proc *p);
     56  1.1  lukem static int 	kqueue_read(struct file *fp, struct uio *uio,
     57  1.1  lukem 		    struct ucred *cred, int flags, struct proc *p);
     58  1.1  lukem static int	kqueue_write(struct file *fp, struct uio *uio,
     59  1.1  lukem 		    struct ucred *cred, int flags, struct proc *p);
     60  1.1  lukem static int	kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
     61  1.1  lukem 		    struct proc *p);
     62  1.1  lukem static int 	kqueue_poll(struct file *fp, int events, struct ucred *cred,
     63  1.1  lukem 		    struct proc *p);
     64  1.1  lukem static int 	kqueue_kqfilter(struct file *fp, struct knote *kn);
     65  1.1  lukem static int 	kqueue_stat(struct file *fp, struct stat *st, struct proc *p);
     66  1.1  lukem static int 	kqueue_close(struct file *fp, struct proc *p);
     67  1.1  lukem static void 	kqueue_wakeup(struct kqueue *kq);
     68  1.1  lukem 
     69  1.1  lukem static struct fileops kqueueops = {
     70  1.1  lukem 	kqueue_read,
     71  1.1  lukem 	kqueue_write,
     72  1.1  lukem 	kqueue_ioctl,
     73  1.1  lukem 	kqueue_poll,
     74  1.1  lukem 	kqueue_kqfilter,
     75  1.1  lukem 	kqueue_stat,
     76  1.1  lukem 	kqueue_close
     77  1.1  lukem };
     78  1.1  lukem 
     79  1.1  lukem static void 	knote_attach(struct knote *kn, struct filedesc *fdp);
     80  1.1  lukem static void 	knote_drop(struct knote *kn, struct proc *p);
     81  1.1  lukem static void 	knote_enqueue(struct knote *kn);
     82  1.1  lukem static void 	knote_dequeue(struct knote *kn);
     83  1.1  lukem static void 	knote_init(void);
     84  1.1  lukem static struct 	knote *knote_alloc(void);
     85  1.1  lukem static void 	knote_free(struct knote *kn);
     86  1.1  lukem 
     87  1.1  lukem static void	filt_kqdetach(struct knote *kn);
     88  1.1  lukem static int	filt_kqueue(struct knote *kn, long hint);
     89  1.1  lukem static int	filt_procattach(struct knote *kn);
     90  1.1  lukem static void	filt_procdetach(struct knote *kn);
     91  1.1  lukem static int	filt_proc(struct knote *kn, long hint);
     92  1.1  lukem static int	filt_fileattach(struct knote *kn);
     93  1.1  lukem 
     94  1.1  lukem static struct filterops kqread_filtops =
     95  1.1  lukem 	{ 1, NULL, filt_kqdetach, filt_kqueue };
     96  1.1  lukem static struct filterops proc_filtops =
     97  1.1  lukem 	{ 0, filt_procattach, filt_procdetach, filt_proc };
     98  1.1  lukem static struct filterops file_filtops =
     99  1.1  lukem 	{ 1, filt_fileattach, NULL, NULL };
    100  1.1  lukem 
    101  1.1  lukem static vm_zone_t	knote_zone;
    102  1.1  lukem 
    103  1.1  lukem #define KNOTE_ACTIVATE(kn) do { 					\
    104  1.1  lukem 	kn->kn_status |= KN_ACTIVE;					\
    105  1.1  lukem 	if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0)		\
    106  1.1  lukem 		knote_enqueue(kn);					\
    107  1.1  lukem } while(0)
    108  1.1  lukem 
    109  1.1  lukem #define	KN_HASHSIZE		64		/* XXX should be tunable */
    110  1.1  lukem #define KN_HASH(val, mask)	(((val) ^ (val >> 8)) & (mask))
    111  1.1  lukem 
    112  1.1  lukem extern struct filterops aio_filtops;
    113  1.1  lukem extern struct filterops sig_filtops;
    114  1.1  lukem 
    115  1.1  lukem /*
    116  1.1  lukem  * Table for for all system-defined filters.
    117  1.1  lukem  */
    118  1.1  lukem static struct filterops *sysfilt_ops[] = {
    119  1.1  lukem 	&file_filtops,			/* EVFILT_READ */
    120  1.1  lukem 	&file_filtops,			/* EVFILT_WRITE */
    121  1.1  lukem 	&aio_filtops,			/* EVFILT_AIO */
    122  1.1  lukem 	&file_filtops,			/* EVFILT_VNODE */
    123  1.1  lukem 	&proc_filtops,			/* EVFILT_PROC */
    124  1.1  lukem 	&sig_filtops,			/* EVFILT_SIGNAL */
    125  1.1  lukem };
    126  1.1  lukem 
    127  1.1  lukem static int
    128  1.1  lukem filt_fileattach(struct knote *kn)
    129  1.1  lukem {
    130  1.1  lukem 
    131  1.1  lukem 	return (fo_kqfilter(kn->kn_fp, kn));
    132  1.1  lukem }
    133  1.1  lukem 
    134  1.1  lukem /*ARGSUSED*/
    135  1.1  lukem static int
    136  1.1  lukem kqueue_kqfilter(struct file *fp, struct knote *kn)
    137  1.1  lukem {
    138  1.1  lukem 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
    139  1.1  lukem 
    140  1.1  lukem 	if (kn->kn_filter != EVFILT_READ)
    141  1.1  lukem 		return (1);
    142  1.1  lukem 
    143  1.1  lukem 	kn->kn_fop = &kqread_filtops;
    144  1.1  lukem 	SLIST_INSERT_HEAD(&kq->kq_sel.si_note, kn, kn_selnext);
    145  1.1  lukem 	return (0);
    146  1.1  lukem }
    147  1.1  lukem 
    148  1.1  lukem static void
    149  1.1  lukem filt_kqdetach(struct knote *kn)
    150  1.1  lukem {
    151  1.1  lukem 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
    152  1.1  lukem 
    153  1.1  lukem 	SLIST_REMOVE(&kq->kq_sel.si_note, kn, knote, kn_selnext);
    154  1.1  lukem }
    155  1.1  lukem 
    156  1.1  lukem /*ARGSUSED*/
    157  1.1  lukem static int
    158  1.1  lukem filt_kqueue(struct knote *kn, long hint)
    159  1.1  lukem {
    160  1.1  lukem 	struct kqueue *kq = (struct kqueue *)kn->kn_fp->f_data;
    161  1.1  lukem 
    162  1.1  lukem 	kn->kn_data = kq->kq_count;
    163  1.1  lukem 	return (kn->kn_data > 0);
    164  1.1  lukem }
    165  1.1  lukem 
    166  1.1  lukem static int
    167  1.1  lukem filt_procattach(struct knote *kn)
    168  1.1  lukem {
    169  1.1  lukem 	struct proc *p;
    170  1.1  lukem 	int error;
    171  1.1  lukem 
    172  1.1  lukem 	p = pfind(kn->kn_id);
    173  1.1  lukem 	if (p == NULL)
    174  1.1  lukem 		return (ESRCH);
    175  1.1  lukem 	if ((error = p_cansee(curproc, p))) {
    176  1.1  lukem 		PROC_UNLOCK(p);
    177  1.1  lukem 		return (error);
    178  1.1  lukem 	}
    179  1.1  lukem 
    180  1.1  lukem 	kn->kn_ptr.p_proc = p;
    181  1.1  lukem 	kn->kn_flags |= EV_CLEAR;		/* automatically set */
    182  1.1  lukem 
    183  1.1  lukem 	/*
    184  1.1  lukem 	 * internal flag indicating registration done by kernel
    185  1.1  lukem 	 */
    186  1.1  lukem 	if (kn->kn_flags & EV_FLAG1) {
    187  1.1  lukem 		kn->kn_data = kn->kn_sdata;		/* ppid */
    188  1.1  lukem 		kn->kn_fflags = NOTE_CHILD;
    189  1.1  lukem 		kn->kn_flags &= ~EV_FLAG1;
    190  1.1  lukem 	}
    191  1.1  lukem 
    192  1.1  lukem 	SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
    193  1.1  lukem 	PROC_UNLOCK(p);
    194  1.1  lukem 
    195  1.1  lukem 	return (0);
    196  1.1  lukem }
    197  1.1  lukem 
    198  1.1  lukem /*
    199  1.1  lukem  * The knote may be attached to a different process, which may exit,
    200  1.1  lukem  * leaving nothing for the knote to be attached to.  So when the process
    201  1.1  lukem  * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
    202  1.1  lukem  * it will be deleted when read out.  However, as part of the knote deletion,
    203  1.1  lukem  * this routine is called, so a check is needed to avoid actually performing
    204  1.1  lukem  * a detach, because the original process does not exist any more.
    205  1.1  lukem  */
    206  1.1  lukem static void
    207  1.1  lukem filt_procdetach(struct knote *kn)
    208  1.1  lukem {
    209  1.1  lukem 	struct proc *p = kn->kn_ptr.p_proc;
    210  1.1  lukem 
    211  1.1  lukem 	if (kn->kn_status & KN_DETACHED)
    212  1.1  lukem 		return;
    213  1.1  lukem 
    214  1.1  lukem 	PROC_LOCK(p);
    215  1.1  lukem 	SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
    216  1.1  lukem 	PROC_UNLOCK(p);
    217  1.1  lukem }
    218  1.1  lukem 
    219  1.1  lukem static int
    220  1.1  lukem filt_proc(struct knote *kn, long hint)
    221  1.1  lukem {
    222  1.1  lukem 	u_int event;
    223  1.1  lukem 
    224  1.1  lukem 	/*
    225  1.1  lukem 	 * mask off extra data
    226  1.1  lukem 	 */
    227  1.1  lukem 	event = (u_int)hint & NOTE_PCTRLMASK;
    228  1.1  lukem 
    229  1.1  lukem 	/*
    230  1.1  lukem 	 * if the user is interested in this event, record it.
    231  1.1  lukem 	 */
    232  1.1  lukem 	if (kn->kn_sfflags & event)
    233  1.1  lukem 		kn->kn_fflags |= event;
    234  1.1  lukem 
    235  1.1  lukem 	/*
    236  1.1  lukem 	 * process is gone, so flag the event as finished.
    237  1.1  lukem 	 */
    238  1.1  lukem 	if (event == NOTE_EXIT) {
    239  1.1  lukem 		kn->kn_status |= KN_DETACHED;
    240  1.1  lukem 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
    241  1.1  lukem 		return (1);
    242  1.1  lukem 	}
    243  1.1  lukem 
    244  1.1  lukem 	/*
    245  1.1  lukem 	 * process forked, and user wants to track the new process,
    246  1.1  lukem 	 * so attach a new knote to it, and immediately report an
    247  1.1  lukem 	 * event with the parent's pid.
    248  1.1  lukem 	 */
    249  1.1  lukem 	if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
    250  1.1  lukem 		struct kevent kev;
    251  1.1  lukem 		int error;
    252  1.1  lukem 
    253  1.1  lukem 		/*
    254  1.1  lukem 		 * register knote with new process.
    255  1.1  lukem 		 */
    256  1.1  lukem 		kev.ident = hint & NOTE_PDATAMASK;	/* pid */
    257  1.1  lukem 		kev.filter = kn->kn_filter;
    258  1.1  lukem 		kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
    259  1.1  lukem 		kev.fflags = kn->kn_sfflags;
    260  1.1  lukem 		kev.data = kn->kn_id;			/* parent */
    261  1.1  lukem 		kev.udata = kn->kn_kevent.udata;	/* preserve udata */
    262  1.1  lukem 		error = kqueue_register(kn->kn_kq, &kev, NULL);
    263  1.1  lukem 		if (error)
    264  1.1  lukem 			kn->kn_fflags |= NOTE_TRACKERR;
    265  1.1  lukem 	}
    266  1.1  lukem 
    267  1.1  lukem 	return (kn->kn_fflags != 0);
    268  1.1  lukem }
    269  1.1  lukem 
    270  1.1  lukem int
    271  1.1  lukem kqueue(struct proc *p, struct kqueue_args *uap)
    272  1.1  lukem {
    273  1.1  lukem 	struct filedesc *fdp = p->p_fd;
    274  1.1  lukem 	struct kqueue *kq;
    275  1.1  lukem 	struct file *fp;
    276  1.1  lukem 	int fd, error;
    277  1.1  lukem 
    278  1.1  lukem 	error = falloc(p, &fp, &fd);
    279  1.1  lukem 	if (error)
    280  1.1  lukem 		return (error);
    281  1.1  lukem 	fp->f_flag = FREAD | FWRITE;
    282  1.1  lukem 	fp->f_type = DTYPE_KQUEUE;
    283  1.1  lukem 	fp->f_ops = &kqueueops;
    284  1.1  lukem 	kq = malloc(sizeof(struct kqueue), M_TEMP, M_WAITOK | M_ZERO);
    285  1.1  lukem 	TAILQ_INIT(&kq->kq_head);
    286  1.1  lukem 	fp->f_data = (caddr_t)kq;
    287  1.1  lukem 	p->p_retval[0] = fd;
    288  1.1  lukem 	if (fdp->fd_knlistsize < 0)
    289  1.1  lukem 		fdp->fd_knlistsize = 0;		/* this process has a kq */
    290  1.1  lukem 	kq->kq_fdp = fdp;
    291  1.1  lukem 	return (error);
    292  1.1  lukem }
    293  1.1  lukem 
    294  1.1  lukem #ifndef _SYS_SYSPROTO_H_
    295  1.1  lukem struct kevent_args {
    296  1.1  lukem 	int	fd;
    297  1.1  lukem 	const struct kevent *changelist;
    298  1.1  lukem 	int	nchanges;
    299  1.1  lukem 	struct	kevent *eventlist;
    300  1.1  lukem 	int	nevents;
    301  1.1  lukem 	const struct timespec *timeout;
    302  1.1  lukem };
    303  1.1  lukem #endif
    304  1.1  lukem int
    305  1.1  lukem kevent(struct proc *p, struct kevent_args *uap)
    306  1.1  lukem {
    307  1.1  lukem 	struct filedesc* fdp = p->p_fd;
    308  1.1  lukem 	struct kevent *kevp;
    309  1.1  lukem 	struct kqueue *kq;
    310  1.1  lukem 	struct file *fp = NULL;
    311  1.1  lukem 	struct timespec ts;
    312  1.1  lukem 	int i, n, nerrors, error;
    313  1.1  lukem 
    314  1.1  lukem         if (((u_int)uap->fd) >= fdp->fd_nfiles ||
    315  1.1  lukem             (fp = fdp->fd_ofiles[uap->fd]) == NULL ||
    316  1.1  lukem 	    (fp->f_type != DTYPE_KQUEUE))
    317  1.1  lukem 		return (EBADF);
    318  1.1  lukem 
    319  1.1  lukem 	fhold(fp);
    320  1.1  lukem 
    321  1.1  lukem 	if (uap->timeout != NULL) {
    322  1.1  lukem 		error = copyin(uap->timeout, &ts, sizeof(ts));
    323  1.1  lukem 		if (error)
    324  1.1  lukem 			goto done;
    325  1.1  lukem 		uap->timeout = &ts;
    326  1.1  lukem 	}
    327  1.1  lukem 
    328  1.1  lukem 	kq = (struct kqueue *)fp->f_data;
    329  1.1  lukem 	nerrors = 0;
    330  1.1  lukem 
    331  1.1  lukem 	while (uap->nchanges > 0) {
    332  1.1  lukem 		n = uap->nchanges > KQ_NEVENTS ? KQ_NEVENTS : uap->nchanges;
    333  1.1  lukem 		error = copyin(uap->changelist, kq->kq_kev,
    334  1.1  lukem 		    n * sizeof(struct kevent));
    335  1.1  lukem 		if (error)
    336  1.1  lukem 			goto done;
    337  1.1  lukem 		for (i = 0; i < n; i++) {
    338  1.1  lukem 			kevp = &kq->kq_kev[i];
    339  1.1  lukem 			kevp->flags &= ~EV_SYSFLAGS;
    340  1.1  lukem 			error = kqueue_register(kq, kevp, p);
    341  1.1  lukem 			if (error) {
    342  1.1  lukem 				if (uap->nevents != 0) {
    343  1.1  lukem 					kevp->flags = EV_ERROR;
    344  1.1  lukem 					kevp->data = error;
    345  1.1  lukem 					(void) copyout((caddr_t)kevp,
    346  1.1  lukem 					    (caddr_t)uap->eventlist,
    347  1.1  lukem 					    sizeof(*kevp));
    348  1.1  lukem 					uap->eventlist++;
    349  1.1  lukem 					uap->nevents--;
    350  1.1  lukem 					nerrors++;
    351  1.1  lukem 				} else {
    352  1.1  lukem 					goto done;
    353  1.1  lukem 				}
    354  1.1  lukem 			}
    355  1.1  lukem 		}
    356  1.1  lukem 		uap->nchanges -= n;
    357  1.1  lukem 		uap->changelist += n;
    358  1.1  lukem 	}
    359  1.1  lukem 	if (nerrors) {
    360  1.1  lukem         	p->p_retval[0] = nerrors;
    361  1.1  lukem 		error = 0;
    362  1.1  lukem 		goto done;
    363  1.1  lukem 	}
    364  1.1  lukem 
    365  1.1  lukem 	error = kqueue_scan(fp, uap->nevents, uap->eventlist, uap->timeout, p);
    366  1.1  lukem done:
    367  1.1  lukem 	if (fp != NULL)
    368  1.1  lukem 		fdrop(fp, p);
    369  1.1  lukem 	return (error);
    370  1.1  lukem }
    371  1.1  lukem 
    372  1.1  lukem int
    373  1.1  lukem kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
    374  1.1  lukem {
    375  1.1  lukem 	struct filedesc *fdp = kq->kq_fdp;
    376  1.1  lukem 	struct filterops *fops;
    377  1.1  lukem 	struct file *fp = NULL;
    378  1.1  lukem 	struct knote *kn = NULL;
    379  1.1  lukem 	int s, error = 0;
    380  1.1  lukem 
    381  1.1  lukem 	if (kev->filter < 0) {
    382  1.1  lukem 		if (kev->filter + EVFILT_SYSCOUNT < 0)
    383  1.1  lukem 			return (EINVAL);
    384  1.1  lukem 		fops = sysfilt_ops[~kev->filter];	/* to 0-base index */
    385  1.1  lukem 	} else {
    386  1.1  lukem 		/*
    387  1.1  lukem 		 * XXX
    388  1.1  lukem 		 * filter attach routine is responsible for insuring that
    389  1.1  lukem 		 * the identifier can be attached to it.
    390  1.1  lukem 		 */
    391  1.1  lukem 		printf("unknown filter: %d\n", kev->filter);
    392  1.1  lukem 		return (EINVAL);
    393  1.1  lukem 	}
    394  1.1  lukem 
    395  1.1  lukem 	if (fops->f_isfd) {
    396  1.1  lukem 		/* validate descriptor */
    397  1.1  lukem 		if ((u_int)kev->ident >= fdp->fd_nfiles ||
    398  1.1  lukem 		    (fp = fdp->fd_ofiles[kev->ident]) == NULL)
    399  1.1  lukem 			return (EBADF);
    400  1.1  lukem 		fhold(fp);
    401  1.1  lukem 
    402  1.1  lukem 		if (kev->ident < fdp->fd_knlistsize) {
    403  1.1  lukem 			SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
    404  1.1  lukem 				if (kq == kn->kn_kq &&
    405  1.1  lukem 				    kev->filter == kn->kn_filter)
    406  1.1  lukem 					break;
    407  1.1  lukem 		}
    408  1.1  lukem 	} else {
    409  1.1  lukem 		if (fdp->fd_knhashmask != 0) {
    410  1.1  lukem 			struct klist *list;
    411  1.1  lukem 
    412  1.1  lukem 			list = &fdp->fd_knhash[
    413  1.1  lukem 			    KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
    414  1.1  lukem 			SLIST_FOREACH(kn, list, kn_link)
    415  1.1  lukem 				if (kev->ident == kn->kn_id &&
    416  1.1  lukem 				    kq == kn->kn_kq &&
    417  1.1  lukem 				    kev->filter == kn->kn_filter)
    418  1.1  lukem 					break;
    419  1.1  lukem 		}
    420  1.1  lukem 	}
    421  1.1  lukem 
    422  1.1  lukem 	if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
    423  1.1  lukem 		error = ENOENT;
    424  1.1  lukem 		goto done;
    425  1.1  lukem 	}
    426  1.1  lukem 
    427  1.1  lukem 	/*
    428  1.1  lukem 	 * kn now contains the matching knote, or NULL if no match
    429  1.1  lukem 	 */
    430  1.1  lukem 	if (kev->flags & EV_ADD) {
    431  1.1  lukem 
    432  1.1  lukem 		if (kn == NULL) {
    433  1.1  lukem 			kn = knote_alloc();
    434  1.1  lukem 			if (kn == NULL) {
    435  1.1  lukem 				error = ENOMEM;
    436  1.1  lukem 				goto done;
    437  1.1  lukem 			}
    438  1.1  lukem 			kn->kn_fp = fp;
    439  1.1  lukem 			kn->kn_kq = kq;
    440  1.1  lukem 			kn->kn_fop = fops;
    441  1.1  lukem 
    442  1.1  lukem 			/*
    443  1.1  lukem 			 * apply reference count to knote structure, and
    444  1.1  lukem 			 * do not release it at the end of this routine.
    445  1.1  lukem 			 */
    446  1.1  lukem 			fp = NULL;
    447  1.1  lukem 
    448  1.1  lukem 			kn->kn_sfflags = kev->fflags;
    449  1.1  lukem 			kn->kn_sdata = kev->data;
    450  1.1  lukem 			kev->fflags = 0;
    451  1.1  lukem 			kev->data = 0;
    452  1.1  lukem 			kn->kn_kevent = *kev;
    453  1.1  lukem 
    454  1.1  lukem 			knote_attach(kn, fdp);
    455  1.1  lukem 			if ((error = fops->f_attach(kn)) != 0) {
    456  1.1  lukem 				knote_drop(kn, p);
    457  1.1  lukem 				goto done;
    458  1.1  lukem 			}
    459  1.1  lukem 		} else {
    460  1.1  lukem 			/*
    461  1.1  lukem 			 * The user may change some filter values after the
    462  1.1  lukem 			 * initial EV_ADD, but doing so will not reset any
    463  1.1  lukem 			 * filter which have already been triggered.
    464  1.1  lukem 			 */
    465  1.1  lukem 			kn->kn_sfflags = kev->fflags;
    466  1.1  lukem 			kn->kn_sdata = kev->data;
    467  1.1  lukem 			kn->kn_kevent.udata = kev->udata;
    468  1.1  lukem 		}
    469  1.1  lukem 
    470  1.1  lukem 		s = splhigh();
    471  1.1  lukem 		if (kn->kn_fop->f_event(kn, 0))
    472  1.1  lukem 			KNOTE_ACTIVATE(kn);
    473  1.1  lukem 		splx(s);
    474  1.1  lukem 
    475  1.1  lukem 	} else if (kev->flags & EV_DELETE) {
    476  1.1  lukem 		kn->kn_fop->f_detach(kn);
    477  1.1  lukem 		knote_drop(kn, p);
    478  1.1  lukem 		goto done;
    479  1.1  lukem 	}
    480  1.1  lukem 
    481  1.1  lukem 	if ((kev->flags & EV_DISABLE) &&
    482  1.1  lukem 	    ((kn->kn_status & KN_DISABLED) == 0)) {
    483  1.1  lukem 		s = splhigh();
    484  1.1  lukem 		kn->kn_status |= KN_DISABLED;
    485  1.1  lukem 		splx(s);
    486  1.1  lukem 	}
    487  1.1  lukem 
    488  1.1  lukem 	if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
    489  1.1  lukem 		s = splhigh();
    490  1.1  lukem 		kn->kn_status &= ~KN_DISABLED;
    491  1.1  lukem 		if ((kn->kn_status & KN_ACTIVE) &&
    492  1.1  lukem 		    ((kn->kn_status & KN_QUEUED) == 0))
    493  1.1  lukem 			knote_enqueue(kn);
    494  1.1  lukem 		splx(s);
    495  1.1  lukem 	}
    496  1.1  lukem 
    497  1.1  lukem done:
    498  1.1  lukem 	if (fp != NULL)
    499  1.1  lukem 		fdrop(fp, p);
    500  1.1  lukem 	return (error);
    501  1.1  lukem }
    502  1.1  lukem 
    503  1.1  lukem static int
    504  1.1  lukem kqueue_scan(struct file *fp, int maxevents, struct kevent *ulistp,
    505  1.1  lukem 	const struct timespec *tsp, struct proc *p)
    506  1.1  lukem {
    507  1.1  lukem 	struct kqueue *kq = (struct kqueue *)fp->f_data;
    508  1.1  lukem 	struct kevent *kevp;
    509  1.1  lukem 	struct timeval atv, rtv, ttv;
    510  1.1  lukem 	struct knote *kn, marker;
    511  1.1  lukem 	int s, count, timeout, nkev = 0, error = 0;
    512  1.1  lukem 
    513  1.1  lukem 	count = maxevents;
    514  1.1  lukem 	if (count == 0)
    515  1.1  lukem 		goto done;
    516  1.1  lukem 
    517  1.1  lukem 	if (tsp != NULL) {
    518  1.1  lukem 		TIMESPEC_TO_TIMEVAL(&atv, tsp);
    519  1.1  lukem 		if (itimerfix(&atv)) {
    520  1.1  lukem 			error = EINVAL;
    521  1.1  lukem 			goto done;
    522  1.1  lukem 		}
    523  1.1  lukem 		if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
    524  1.1  lukem 			timeout = -1;
    525  1.1  lukem 		else
    526  1.1  lukem 			timeout = atv.tv_sec > 24 * 60 * 60 ?
    527  1.1  lukem 			    24 * 60 * 60 * hz : tvtohz(&atv);
    528  1.1  lukem 		getmicrouptime(&rtv);
    529  1.1  lukem 		timevaladd(&atv, &rtv);
    530  1.1  lukem 	} else {
    531  1.1  lukem 		atv.tv_sec = 0;
    532  1.1  lukem 		atv.tv_usec = 0;
    533  1.1  lukem 		timeout = 0;
    534  1.1  lukem 	}
    535  1.1  lukem 	goto start;
    536  1.1  lukem 
    537  1.1  lukem retry:
    538  1.1  lukem 	if (atv.tv_sec || atv.tv_usec) {
    539  1.1  lukem 		getmicrouptime(&rtv);
    540  1.1  lukem 		if (timevalcmp(&rtv, &atv, >=))
    541  1.1  lukem 			goto done;
    542  1.1  lukem 		ttv = atv;
    543  1.1  lukem 		timevalsub(&ttv, &rtv);
    544  1.1  lukem 		timeout = ttv.tv_sec > 24 * 60 * 60 ?
    545  1.1  lukem 			24 * 60 * 60 * hz : tvtohz(&ttv);
    546  1.1  lukem 	}
    547  1.1  lukem 
    548  1.1  lukem start:
    549  1.1  lukem 	kevp = kq->kq_kev;
    550  1.1  lukem 	s = splhigh();
    551  1.1  lukem 	if (kq->kq_count == 0) {
    552  1.1  lukem 		if (timeout < 0) {
    553  1.1  lukem 			error = EWOULDBLOCK;
    554  1.1  lukem 		} else {
    555  1.1  lukem 			kq->kq_state |= KQ_SLEEP;
    556  1.1  lukem 			error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
    557  1.1  lukem 		}
    558  1.1  lukem 		splx(s);
    559  1.1  lukem 		if (error == 0)
    560  1.1  lukem 			goto retry;
    561  1.1  lukem 		/* don't restart after signals... */
    562  1.1  lukem 		if (error == ERESTART)
    563  1.1  lukem 			error = EINTR;
    564  1.1  lukem 		else if (error == EWOULDBLOCK)
    565  1.1  lukem 			error = 0;
    566  1.1  lukem 		goto done;
    567  1.1  lukem 	}
    568  1.1  lukem 
    569  1.1  lukem 	TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
    570  1.1  lukem 	while (count) {
    571  1.1  lukem 		kn = TAILQ_FIRST(&kq->kq_head);
    572  1.1  lukem 		TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
    573  1.1  lukem 		if (kn == &marker) {
    574  1.1  lukem 			splx(s);
    575  1.1  lukem 			if (count == maxevents)
    576  1.1  lukem 				goto retry;
    577  1.1  lukem 			goto done;
    578  1.1  lukem 		}
    579  1.1  lukem 		if (kn->kn_status & KN_DISABLED) {
    580  1.1  lukem 			kn->kn_status &= ~KN_QUEUED;
    581  1.1  lukem 			kq->kq_count--;
    582  1.1  lukem 			continue;
    583  1.1  lukem 		}
    584  1.1  lukem 		if ((kn->kn_flags & EV_ONESHOT) == 0 &&
    585  1.1  lukem 		    kn->kn_fop->f_event(kn, 0) == 0) {
    586  1.1  lukem 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    587  1.1  lukem 			kq->kq_count--;
    588  1.1  lukem 			continue;
    589  1.1  lukem 		}
    590  1.1  lukem 		*kevp = kn->kn_kevent;
    591  1.1  lukem 		kevp++;
    592  1.1  lukem 		nkev++;
    593  1.1  lukem 		if (kn->kn_flags & EV_ONESHOT) {
    594  1.1  lukem 			kn->kn_status &= ~KN_QUEUED;
    595  1.1  lukem 			kq->kq_count--;
    596  1.1  lukem 			splx(s);
    597  1.1  lukem 			kn->kn_fop->f_detach(kn);
    598  1.1  lukem 			knote_drop(kn, p);
    599  1.1  lukem 			s = splhigh();
    600  1.1  lukem 		} else if (kn->kn_flags & EV_CLEAR) {
    601  1.1  lukem 			kn->kn_data = 0;
    602  1.1  lukem 			kn->kn_fflags = 0;
    603  1.1  lukem 			kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
    604  1.1  lukem 			kq->kq_count--;
    605  1.1  lukem 		} else {
    606  1.1  lukem 			TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
    607  1.1  lukem 		}
    608  1.1  lukem 		count--;
    609  1.1  lukem 		if (nkev == KQ_NEVENTS) {
    610  1.1  lukem 			splx(s);
    611  1.1  lukem 			error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    612  1.1  lukem 			    sizeof(struct kevent) * nkev);
    613  1.1  lukem 			ulistp += nkev;
    614  1.1  lukem 			nkev = 0;
    615  1.1  lukem 			kevp = kq->kq_kev;
    616  1.1  lukem 			s = splhigh();
    617  1.1  lukem 			if (error)
    618  1.1  lukem 				break;
    619  1.1  lukem 		}
    620  1.1  lukem 	}
    621  1.1  lukem 	TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
    622  1.1  lukem 	splx(s);
    623  1.1  lukem done:
    624  1.1  lukem 	if (nkev != 0)
    625  1.1  lukem 		error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
    626  1.1  lukem 		    sizeof(struct kevent) * nkev);
    627  1.1  lukem         p->p_retval[0] = maxevents - count;
    628  1.1  lukem 	return (error);
    629  1.1  lukem }
    630  1.1  lukem 
    631  1.1  lukem /*
    632  1.1  lukem  * XXX
    633  1.1  lukem  * This could be expanded to call kqueue_scan, if desired.
    634  1.1  lukem  */
    635  1.1  lukem /*ARGSUSED*/
    636  1.1  lukem static int
    637  1.1  lukem kqueue_read(struct file *fp, struct uio *uio, struct ucred *cred,
    638  1.1  lukem 	int flags, struct proc *p)
    639  1.1  lukem {
    640  1.1  lukem 	return (ENXIO);
    641  1.1  lukem }
    642  1.1  lukem 
    643  1.1  lukem /*ARGSUSED*/
    644  1.1  lukem static int
    645  1.1  lukem kqueue_write(struct file *fp, struct uio *uio, struct ucred *cred,
    646  1.1  lukem 	 int flags, struct proc *p)
    647  1.1  lukem {
    648  1.1  lukem 	return (ENXIO);
    649  1.1  lukem }
    650  1.1  lukem 
    651  1.1  lukem /*ARGSUSED*/
    652  1.1  lukem static int
    653  1.1  lukem kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
    654  1.1  lukem {
    655  1.1  lukem 	return (ENOTTY);
    656  1.1  lukem }
    657  1.1  lukem 
    658  1.1  lukem /*ARGSUSED*/
    659  1.1  lukem static int
    660  1.1  lukem kqueue_poll(struct file *fp, int events, struct ucred *cred, struct proc *p)
    661  1.1  lukem {
    662  1.1  lukem 	struct kqueue *kq = (struct kqueue *)fp->f_data;
    663  1.1  lukem 	int revents = 0;
    664  1.1  lukem 	int s = splnet();
    665  1.1  lukem 
    666  1.1  lukem         if (events & (POLLIN | POLLRDNORM)) {
    667  1.1  lukem                 if (kq->kq_count) {
    668  1.1  lukem                         revents |= events & (POLLIN | POLLRDNORM);
    669  1.1  lukem 		} else {
    670  1.1  lukem                         selrecord(p, &kq->kq_sel);
    671  1.1  lukem 			kq->kq_state |= KQ_SEL;
    672  1.1  lukem 		}
    673  1.1  lukem 	}
    674  1.1  lukem 	splx(s);
    675  1.1  lukem 	return (revents);
    676  1.1  lukem }
    677  1.1  lukem 
    678  1.1  lukem /*ARGSUSED*/
    679  1.1  lukem static int
    680  1.1  lukem kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
    681  1.1  lukem {
    682  1.1  lukem 	struct kqueue *kq = (struct kqueue *)fp->f_data;
    683  1.1  lukem 
    684  1.1  lukem 	bzero((void *)st, sizeof(*st));
    685  1.1  lukem 	st->st_size = kq->kq_count;
    686  1.1  lukem 	st->st_blksize = sizeof(struct kevent);
    687  1.1  lukem 	st->st_mode = S_IFIFO;
    688  1.1  lukem 	return (0);
    689  1.1  lukem }
    690  1.1  lukem 
    691  1.1  lukem /*ARGSUSED*/
    692  1.1  lukem static int
    693  1.1  lukem kqueue_close(struct file *fp, struct proc *p)
    694  1.1  lukem {
    695  1.1  lukem 	struct kqueue *kq = (struct kqueue *)fp->f_data;
    696  1.1  lukem 	struct filedesc *fdp = p->p_fd;
    697  1.1  lukem 	struct knote **knp, *kn, *kn0;
    698  1.1  lukem 	int i;
    699  1.1  lukem 
    700  1.1  lukem 	for (i = 0; i < fdp->fd_knlistsize; i++) {
    701  1.1  lukem 		knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
    702  1.1  lukem 		kn = *knp;
    703  1.1  lukem 		while (kn != NULL) {
    704  1.1  lukem 			kn0 = SLIST_NEXT(kn, kn_link);
    705  1.1  lukem 			if (kq == kn->kn_kq) {
    706  1.1  lukem 				kn->kn_fop->f_detach(kn);
    707  1.1  lukem 				fdrop(kn->kn_fp, p);
    708  1.1  lukem 				knote_free(kn);
    709  1.1  lukem 				*knp = kn0;
    710  1.1  lukem 			} else {
    711  1.1  lukem 				knp = &SLIST_NEXT(kn, kn_link);
    712  1.1  lukem 			}
    713  1.1  lukem 			kn = kn0;
    714  1.1  lukem 		}
    715  1.1  lukem 	}
    716  1.1  lukem 	if (fdp->fd_knhashmask != 0) {
    717  1.1  lukem 		for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
    718  1.1  lukem 			knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
    719  1.1  lukem 			kn = *knp;
    720  1.1  lukem 			while (kn != NULL) {
    721  1.1  lukem 				kn0 = SLIST_NEXT(kn, kn_link);
    722  1.1  lukem 				if (kq == kn->kn_kq) {
    723  1.1  lukem 					kn->kn_fop->f_detach(kn);
    724  1.1  lukem 		/* XXX non-fd release of kn->kn_ptr */
    725  1.1  lukem 					knote_free(kn);
    726  1.1  lukem 					*knp = kn0;
    727  1.1  lukem 				} else {
    728  1.1  lukem 					knp = &SLIST_NEXT(kn, kn_link);
    729  1.1  lukem 				}
    730  1.1  lukem 				kn = kn0;
    731  1.1  lukem 			}
    732  1.1  lukem 		}
    733  1.1  lukem 	}
    734  1.1  lukem 	free(kq, M_TEMP);
    735  1.1  lukem 	fp->f_data = NULL;
    736  1.1  lukem 
    737  1.1  lukem 	return (0);
    738  1.1  lukem }
    739  1.1  lukem 
    740  1.1  lukem static void
    741  1.1  lukem kqueue_wakeup(struct kqueue *kq)
    742  1.1  lukem {
    743  1.1  lukem 
    744  1.1  lukem 	if (kq->kq_state & KQ_SLEEP) {
    745  1.1  lukem 		kq->kq_state &= ~KQ_SLEEP;
    746  1.1  lukem 		wakeup(kq);
    747  1.1  lukem 	}
    748  1.1  lukem 	if (kq->kq_state & KQ_SEL) {
    749  1.1  lukem 		kq->kq_state &= ~KQ_SEL;
    750  1.1  lukem 		selwakeup(&kq->kq_sel);
    751  1.1  lukem 	}
    752  1.1  lukem 	KNOTE(&kq->kq_sel.si_note, 0);
    753  1.1  lukem }
    754  1.1  lukem 
    755  1.1  lukem /*
    756  1.1  lukem  * walk down a list of knotes, activating them if their event has triggered.
    757  1.1  lukem  */
    758  1.1  lukem void
    759  1.1  lukem knote(struct klist *list, long hint)
    760  1.1  lukem {
    761  1.1  lukem 	struct knote *kn;
    762  1.1  lukem 
    763  1.1  lukem 	SLIST_FOREACH(kn, list, kn_selnext)
    764  1.1  lukem 		if (kn->kn_fop->f_event(kn, hint))
    765  1.1  lukem 			KNOTE_ACTIVATE(kn);
    766  1.1  lukem }
    767  1.1  lukem 
    768  1.1  lukem /*
    769  1.1  lukem  * remove all knotes from a specified klist
    770  1.1  lukem  */
    771  1.1  lukem void
    772  1.1  lukem knote_remove(struct proc *p, struct klist *list)
    773  1.1  lukem {
    774  1.1  lukem 	struct knote *kn;
    775  1.1  lukem 
    776  1.1  lukem 	while ((kn = SLIST_FIRST(list)) != NULL) {
    777  1.1  lukem 		kn->kn_fop->f_detach(kn);
    778  1.1  lukem 		knote_drop(kn, p);
    779  1.1  lukem 	}
    780  1.1  lukem }
    781  1.1  lukem 
    782  1.1  lukem /*
    783  1.1  lukem  * remove all knotes referencing a specified fd
    784  1.1  lukem  */
    785  1.1  lukem void
    786  1.1  lukem knote_fdclose(struct proc *p, int fd)
    787  1.1  lukem {
    788  1.1  lukem 	struct filedesc *fdp = p->p_fd;
    789  1.1  lukem 	struct klist *list = &fdp->fd_knlist[fd];
    790  1.1  lukem 
    791  1.1  lukem 	knote_remove(p, list);
    792  1.1  lukem }
    793  1.1  lukem 
    794  1.1  lukem static void
    795  1.1  lukem knote_attach(struct knote *kn, struct filedesc *fdp)
    796  1.1  lukem {
    797  1.1  lukem 	struct klist *list;
    798  1.1  lukem 	int size;
    799  1.1  lukem 
    800  1.1  lukem 	if (! kn->kn_fop->f_isfd) {
    801  1.1  lukem 		if (fdp->fd_knhashmask == 0)
    802  1.1  lukem 			fdp->fd_knhash = hashinit(KN_HASHSIZE, M_TEMP,
    803  1.1  lukem 			    &fdp->fd_knhashmask);
    804  1.1  lukem 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
    805  1.1  lukem 		goto done;
    806  1.1  lukem 	}
    807  1.1  lukem 
    808  1.1  lukem 	if (fdp->fd_knlistsize <= kn->kn_id) {
    809  1.1  lukem 		size = fdp->fd_knlistsize;
    810  1.1  lukem 		while (size <= kn->kn_id)
    811  1.1  lukem 			size += KQEXTENT;
    812  1.1  lukem 		MALLOC(list, struct klist *,
    813  1.1  lukem 		    size * sizeof(struct klist *), M_TEMP, M_WAITOK);
    814  1.1  lukem 		bcopy((caddr_t)fdp->fd_knlist, (caddr_t)list,
    815  1.1  lukem 		    fdp->fd_knlistsize * sizeof(struct klist *));
    816  1.1  lukem 		bzero((caddr_t)list +
    817  1.1  lukem 		    fdp->fd_knlistsize * sizeof(struct klist *),
    818  1.1  lukem 		    (size - fdp->fd_knlistsize) * sizeof(struct klist *));
    819  1.1  lukem 		if (fdp->fd_knlist != NULL)
    820  1.1  lukem 			FREE(fdp->fd_knlist, M_TEMP);
    821  1.1  lukem 		fdp->fd_knlistsize = size;
    822  1.1  lukem 		fdp->fd_knlist = list;
    823  1.1  lukem 	}
    824  1.1  lukem 	list = &fdp->fd_knlist[kn->kn_id];
    825  1.1  lukem done:
    826  1.1  lukem 	SLIST_INSERT_HEAD(list, kn, kn_link);
    827  1.1  lukem 	kn->kn_status = 0;
    828  1.1  lukem }
    829  1.1  lukem 
    830  1.1  lukem /*
    831  1.1  lukem  * should be called at spl == 0, since we don't want to hold spl
    832  1.1  lukem  * while calling fdrop and free.
    833  1.1  lukem  */
    834  1.1  lukem static void
    835  1.1  lukem knote_drop(struct knote *kn, struct proc *p)
    836  1.1  lukem {
    837  1.1  lukem         struct filedesc *fdp = p->p_fd;
    838  1.1  lukem 	struct klist *list;
    839  1.1  lukem 
    840  1.1  lukem 	if (kn->kn_fop->f_isfd)
    841  1.1  lukem 		list = &fdp->fd_knlist[kn->kn_id];
    842  1.1  lukem 	else
    843  1.1  lukem 		list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
    844  1.1  lukem 
    845  1.1  lukem 	SLIST_REMOVE(list, kn, knote, kn_link);
    846  1.1  lukem 	if (kn->kn_status & KN_QUEUED)
    847  1.1  lukem 		knote_dequeue(kn);
    848  1.1  lukem 	if (kn->kn_fop->f_isfd)
    849  1.1  lukem 		fdrop(kn->kn_fp, p);
    850  1.1  lukem 	knote_free(kn);
    851  1.1  lukem }
    852  1.1  lukem 
    853  1.1  lukem 
    854  1.1  lukem static void
    855  1.1  lukem knote_enqueue(struct knote *kn)
    856  1.1  lukem {
    857  1.1  lukem 	struct kqueue *kq = kn->kn_kq;
    858  1.1  lukem 	int s = splhigh();
    859  1.1  lukem 
    860  1.1  lukem 	KASSERT((kn->kn_status & KN_QUEUED) == 0, ("knote already queued"));
    861  1.1  lukem 
    862  1.1  lukem 	TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
    863  1.1  lukem 	kn->kn_status |= KN_QUEUED;
    864  1.1  lukem 	kq->kq_count++;
    865  1.1  lukem 	splx(s);
    866  1.1  lukem 	kqueue_wakeup(kq);
    867  1.1  lukem }
    868  1.1  lukem 
    869  1.1  lukem static void
    870  1.1  lukem knote_dequeue(struct knote *kn)
    871  1.1  lukem {
    872  1.1  lukem 	struct kqueue *kq = kn->kn_kq;
    873  1.1  lukem 	int s = splhigh();
    874  1.1  lukem 
    875  1.1  lukem 	KASSERT(kn->kn_status & KN_QUEUED, ("knote not queued"));
    876  1.1  lukem 
    877  1.1  lukem 	TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
    878  1.1  lukem 	kn->kn_status &= ~KN_QUEUED;
    879  1.1  lukem 	kq->kq_count--;
    880  1.1  lukem 	splx(s);
    881  1.1  lukem }
    882  1.1  lukem 
    883  1.1  lukem static void
    884  1.1  lukem knote_init(void)
    885  1.1  lukem {
    886  1.1  lukem 	knote_zone = zinit("KNOTE", sizeof(struct knote), 0, 0, 1);
    887  1.1  lukem }
    888  1.1  lukem SYSINIT(knote, SI_SUB_PSEUDO, SI_ORDER_ANY, knote_init, NULL)
    889  1.1  lukem 
    890  1.1  lukem static struct knote *
    891  1.1  lukem knote_alloc(void)
    892  1.1  lukem {
    893  1.1  lukem 	return ((struct knote *)zalloc(knote_zone));
    894  1.1  lukem }
    895  1.1  lukem 
    896  1.1  lukem static void
    897  1.1  lukem knote_free(struct knote *kn)
    898  1.1  lukem {
    899  1.1  lukem 	zfree(knote_zone, kn);
    900  1.1  lukem }
    901