kern_event.c revision 1.1.1.1.2.17 1 /* $NetBSD: kern_event.c,v 1.1.1.1.2.17 2002/10/02 18:46:44 jdolecek Exp $ */
2 /*-
3 * Copyright (c) 1999,2000,2001 Jonathan Lemon <jlemon (at) FreeBSD.org>
4 * All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25 * SUCH DAMAGE.
26 *
27 * $FreeBSD: src/sys/kern/kern_event.c,v 1.27 2001/07/05 17:10:44 rwatson Exp $
28 */
29
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/proc.h>
34 #include <sys/malloc.h>
35 #include <sys/unistd.h>
36 #include <sys/file.h>
37 #include <sys/fcntl.h>
38 #include <sys/select.h>
39 #include <sys/queue.h>
40 #include <sys/event.h>
41 #include <sys/eventvar.h>
42 #include <sys/poll.h>
43 #include <sys/pool.h>
44 #include <sys/protosw.h>
45 #include <sys/socket.h>
46 #include <sys/socketvar.h>
47 #include <sys/stat.h>
48 #include <sys/uio.h>
49 #include <sys/mount.h>
50 #include <sys/filedesc.h>
51 #include <sys/syscallargs.h>
52
53 static int kqueue_scan(struct file *fp, size_t maxevents,
54 struct kevent *ulistp, const struct timespec *timeout,
55 struct proc *p, register_t *retval);
56 static void kqueue_wakeup(struct kqueue *kq);
57
58 static int kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
59 struct ucred *cred, int flags);
60 static int kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
61 struct ucred *cred, int flags);
62 static int kqueue_ioctl(struct file *fp, u_long com, caddr_t data,
63 struct proc *p);
64 static int kqueue_fcntl(struct file *fp, u_int com, caddr_t data,
65 struct proc *p);
66 static int kqueue_poll(struct file *fp, int events, struct proc *p);
67 static int kqueue_kqfilter(struct file *fp, struct knote *kn);
68 static int kqueue_stat(struct file *fp, struct stat *sp, struct proc *p);
69 static int kqueue_close(struct file *fp, struct proc *p);
70
71 static struct fileops kqueueops = {
72 kqueue_read, kqueue_write, kqueue_ioctl, kqueue_fcntl, kqueue_poll,
73 kqueue_stat, kqueue_close, kqueue_kqfilter
74 };
75
76 static void knote_attach(struct knote *kn, struct filedesc *fdp);
77 static void knote_drop(struct knote *kn, struct proc *p,
78 struct filedesc *fdp);
79 static void knote_enqueue(struct knote *kn);
80 static void knote_dequeue(struct knote *kn);
81
82 static void filt_kqdetach(struct knote *kn);
83 static int filt_kqueue(struct knote *kn, long hint);
84 static int filt_procattach(struct knote *kn);
85 static void filt_procdetach(struct knote *kn);
86 static int filt_proc(struct knote *kn, long hint);
87 static int filt_fileattach(struct knote *kn);
88
89 static const struct filterops kqread_filtops =
90 { 1, NULL, filt_kqdetach, filt_kqueue };
91 static const struct filterops proc_filtops =
92 { 0, filt_procattach, filt_procdetach, filt_proc };
93 static const struct filterops file_filtops =
94 { 1, filt_fileattach, NULL, NULL };
95
96 struct pool kqueue_pool;
97 struct pool knote_pool;
98
99 #define KNOTE_ACTIVATE(kn) \
100 do { \
101 kn->kn_status |= KN_ACTIVE; \
102 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) \
103 knote_enqueue(kn); \
104 } while(0)
105
106 #define KN_HASHSIZE 64 /* XXX should be tunable */
107 #define KN_HASH(val, mask) (((val) ^ (val >> 8)) & (mask))
108
109 extern const struct filterops sig_filtops;
110
111 /*
112 * Table for for all system-defined filters.
113 * These should be listed in the numeric order of the EVFILT_* defines.
114 * If filtops is NULL, the filter isn't implemented in NetBSD.
115 * End of list is when name is NULL.
116 */
117 struct kfilter {
118 const char *name; /* name of filter */
119 uint32_t filter; /* id of filter */
120 const struct filterops *filtops;/* operations for filter */
121 };
122
123 /* System defined filters */
124 static const struct kfilter sys_kfilters[] = {
125 { "EVFILT_READ", EVFILT_READ, &file_filtops },
126 { "EVFILT_WRITE", EVFILT_WRITE, &file_filtops },
127 { "EVFILT_AIO", EVFILT_AIO, NULL },
128 { "EVFILT_VNODE", EVFILT_VNODE, &file_filtops },
129 { "EVFILT_PROC", EVFILT_PROC, &proc_filtops },
130 { "EVFILT_SIGNAL", EVFILT_SIGNAL, &sig_filtops },
131 { NULL, 0, NULL }, /* end of list */
132 };
133
134 /* User defined kfilters */
135 static struct kfilter *user_kfilters; /* array */
136 static int user_kfilterc; /* current offset */
137 static int user_kfiltermaxc; /* max size so far */
138
139 /*
140 * kqueue_init:
141 *
142 * Initialize the kqueue/knote facility.
143 */
144 void
145 kqueue_init(void)
146 {
147
148 pool_init(&kqueue_pool, sizeof(struct kqueue), 0, 0, 0, "kqueuepl",
149 NULL);
150 pool_init(&knote_pool, sizeof(struct knote), 0, 0, 0, "knotepl",
151 NULL);
152 }
153
154 /*
155 * Find kfilter entry by name, or NULL if not found.
156 */
157 static const struct kfilter *
158 kfilter_byname_sys(const char *name)
159 {
160 int i;
161
162 for (i = 0; sys_kfilters[i].name != NULL; i++) {
163 if (strcmp(name, sys_kfilters[i].name) == 0)
164 return (&sys_kfilters[i]);
165 }
166 return (NULL);
167 }
168
169 static struct kfilter *
170 kfilter_byname_user(const char *name)
171 {
172 int i;
173
174 /* user_kfilters[] could be NULL if no filters were registered */
175 if (!user_kfilters)
176 return (NULL);
177
178 for (i = 0; user_kfilters[i].name != NULL; i++) {
179 if (user_kfilters[i].name != '\0' &&
180 strcmp(name, user_kfilters[i].name) == 0)
181 return (&user_kfilters[i]);
182 }
183 return (NULL);
184 }
185
186 static const struct kfilter *
187 kfilter_byname(const char *name)
188 {
189 const struct kfilter *kfilter;
190
191 if ((kfilter = kfilter_byname_sys(name)) != NULL)
192 return (kfilter);
193
194 return (kfilter_byname_user(name));
195 }
196
197 /*
198 * Find kfilter entry by filter id, or NULL if not found.
199 * Assumes entries are indexed in filter id order, for speed.
200 */
201 static const struct kfilter *
202 kfilter_byfilter(uint32_t filter)
203 {
204 const struct kfilter *kfilter;
205
206 if (filter < EVFILT_SYSCOUNT) /* it's a system filter */
207 kfilter = &sys_kfilters[filter];
208 else if (user_kfilters != NULL &&
209 filter < EVFILT_SYSCOUNT + user_kfilterc)
210 /* it's a user filter */
211 kfilter = &user_kfilters[filter - EVFILT_SYSCOUNT];
212 else
213 return (NULL); /* out of range */
214 KASSERT(kfilter->filter == filter); /* sanity check! */
215 return (kfilter);
216 }
217
218 /*
219 * Register a new kfilter. Stores the entry in user_kfilters.
220 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
221 * If retfilter != NULL, the new filterid is returned in it.
222 */
223 int
224 kfilter_register(const char *name, const struct filterops *filtops,
225 int *retfilter)
226 {
227 struct kfilter *kfilter;
228 void *space;
229 int len;
230
231 if (name == NULL || name[0] == '\0' || filtops == NULL)
232 return (EINVAL); /* invalid args */
233 if (kfilter_byname(name) != NULL)
234 return (EEXIST); /* already exists */
235 if (user_kfilterc > 0xffffffff - EVFILT_SYSCOUNT)
236 return (EINVAL); /* too many */
237
238 /* check if need to grow user_kfilters */
239 if (user_kfilterc + 1 > user_kfiltermaxc) {
240 /*
241 * Grow in KFILTER_EXTENT chunks. Use malloc(9), because we
242 * want to traverse user_kfilters as an array.
243 */
244 user_kfiltermaxc += KFILTER_EXTENT;
245 kfilter = malloc(user_kfiltermaxc * sizeof(struct filter *),
246 M_KEVENT, M_WAITOK);
247
248 /* copy existing user_kfilters */
249 if (user_kfilters != NULL)
250 memcpy((caddr_t)kfilter, (caddr_t)user_kfilters,
251 user_kfilterc * sizeof(struct kfilter *));
252 /* zero new sections */
253 memset((caddr_t)kfilter +
254 user_kfilterc * sizeof(struct kfilter *), 0,
255 (user_kfiltermaxc - user_kfilterc) *
256 sizeof(struct kfilter *));
257 /* switch to new kfilter */
258 if (user_kfilters != NULL)
259 free(user_kfilters, M_KEVENT);
260 user_kfilters = kfilter;
261 }
262 len = strlen(name) + 1; /* copy name */
263 space = malloc(len, M_KEVENT, M_WAITOK);
264 memcpy(space, name, len);
265 user_kfilters[user_kfilterc].name = space;
266
267 user_kfilters[user_kfilterc].filter = user_kfilterc + EVFILT_SYSCOUNT;
268
269 len = sizeof(struct filterops); /* copy filtops */
270 space = malloc(len, M_KEVENT, M_WAITOK);
271 memcpy(space, filtops, len);
272 user_kfilters[user_kfilterc].filtops = space;
273
274 if (retfilter != NULL)
275 *retfilter = user_kfilters[user_kfilterc].filter;
276 user_kfilterc++; /* finally, increment count */
277 return (0);
278 }
279
280 /*
281 * Unregister a kfilter previously registered with kfilter_register.
282 * This retains the filter id, but clears the name and frees filtops (filter
283 * operations), so that the number isn't reused during a boot.
284 * Returns 0 if operation succeeded, or an appropriate errno(2) otherwise.
285 */
286 int
287 kfilter_unregister(const char *name)
288 {
289 struct kfilter *kfilter;
290
291 if (name == NULL || name[0] == '\0')
292 return (EINVAL); /* invalid name */
293
294 if (kfilter_byname_sys(name) != NULL)
295 return (EINVAL); /* can't detach system filters */
296
297 kfilter = kfilter_byname_user(name);
298 if (kfilter == NULL) /* not found */
299 return (ENOENT);
300
301 if (kfilter->name[0] != '\0') {
302 /* XXX Cast away const (but we know it's safe. */
303 free((void *) kfilter->name, M_KEVENT);
304 kfilter->name = ""; /* mark as `not implemented' */
305 }
306 if (kfilter->filtops != NULL) {
307 /* XXX Cast away const (but we know it's safe. */
308 free((void *) kfilter->filtops, M_KEVENT);
309 kfilter->filtops = NULL; /* mark as `not implemented' */
310 }
311 return (0);
312 }
313
314
315 /*
316 * Filter attach method for EVFILT_READ and EVFILT_WRITE on normal file
317 * descriptors. Calls struct fileops kqfilter method for given file descriptor.
318 */
319 static int
320 filt_fileattach(struct knote *kn)
321 {
322 struct file *fp;
323
324 fp = kn->kn_fp;
325 return ((*fp->f_ops->fo_kqfilter)(fp, kn));
326 }
327
328 /*
329 * Filter detach method for EVFILT_READ on kqueue descriptor.
330 */
331 static void
332 filt_kqdetach(struct knote *kn)
333 {
334 struct kqueue *kq;
335
336 kq = (struct kqueue *)kn->kn_fp->f_data;
337 SLIST_REMOVE(&kq->kq_sel.si_klist, kn, knote, kn_selnext);
338 }
339
340 /*
341 * Filter event method for EVFILT_READ on kqueue descriptor.
342 */
343 /*ARGSUSED*/
344 static int
345 filt_kqueue(struct knote *kn, long hint)
346 {
347 struct kqueue *kq;
348
349 kq = (struct kqueue *)kn->kn_fp->f_data;
350 kn->kn_data = kq->kq_count;
351 return (kn->kn_data > 0);
352 }
353
354 /*
355 * Filter attach method for EVFILT_PROC.
356 */
357 static int
358 filt_procattach(struct knote *kn)
359 {
360 struct proc *p;
361
362 p = pfind(kn->kn_id);
363 if (p == NULL)
364 return (ESRCH);
365
366 /*
367 * Fail if it's not owned by you, or the last exec gave us
368 * setuid/setgid privs (unless you're root).
369 */
370 if ((p->p_cred->p_ruid != curproc->p_cred->p_ruid ||
371 (p->p_flag & P_SUGID))
372 && suser(curproc->p_ucred, &curproc->p_acflag) != 0)
373 return (EACCES);
374
375 kn->kn_ptr.p_proc = p;
376 kn->kn_flags |= EV_CLEAR; /* automatically set */
377
378 /*
379 * internal flag indicating registration done by kernel
380 */
381 if (kn->kn_flags & EV_FLAG1) {
382 kn->kn_data = kn->kn_sdata; /* ppid */
383 kn->kn_fflags = NOTE_CHILD;
384 kn->kn_flags &= ~EV_FLAG1;
385 }
386
387 /* XXXSMP lock the process? */
388 SLIST_INSERT_HEAD(&p->p_klist, kn, kn_selnext);
389
390 return (0);
391 }
392
393 /*
394 * Filter detach method for EVFILT_PROC.
395 *
396 * The knote may be attached to a different process, which may exit,
397 * leaving nothing for the knote to be attached to. So when the process
398 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
399 * it will be deleted when read out. However, as part of the knote deletion,
400 * this routine is called, so a check is needed to avoid actually performing
401 * a detach, because the original process might not exist any more.
402 */
403 static void
404 filt_procdetach(struct knote *kn)
405 {
406 struct proc *p;
407
408 if (kn->kn_status & KN_DETACHED)
409 return;
410
411 p = kn->kn_ptr.p_proc;
412 KASSERT(p->p_stat == SDEAD || pfind(kn->kn_id) == p);
413
414 /* XXXSMP lock the process? */
415 SLIST_REMOVE(&p->p_klist, kn, knote, kn_selnext);
416 }
417
418 /*
419 * Filter event method for EVFILT_PROC.
420 */
421 static int
422 filt_proc(struct knote *kn, long hint)
423 {
424 u_int event;
425
426 /*
427 * mask off extra data
428 */
429 event = (u_int)hint & NOTE_PCTRLMASK;
430
431 /*
432 * if the user is interested in this event, record it.
433 */
434 if (kn->kn_sfflags & event)
435 kn->kn_fflags |= event;
436
437 /*
438 * process is gone, so flag the event as finished.
439 */
440 if (event == NOTE_EXIT) {
441 /*
442 * Detach the knote from watched process and mark
443 * it as such. We can't leave this to kqueue_scan(),
444 * since the process might not exist by then. And we
445 * have to do this now, since psignal KNOTE() is called
446 * also for zombies and we might end up reading freed
447 * memory if the kevent would already be picked up
448 * and knote g/c'ed.
449 */
450 kn->kn_fop->f_detach(kn);
451 kn->kn_status |= KN_DETACHED;
452
453 /* Mark as ONESHOT, so that the knote it g/c'ed when read */
454 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
455 return (1);
456 }
457
458 /*
459 * process forked, and user wants to track the new process,
460 * so attach a new knote to it, and immediately report an
461 * event with the parent's pid.
462 */
463 if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) {
464 struct kevent kev;
465 int error;
466
467 /*
468 * register knote with new process.
469 */
470 kev.ident = hint & NOTE_PDATAMASK; /* pid */
471 kev.filter = kn->kn_filter;
472 kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1;
473 kev.fflags = kn->kn_sfflags;
474 kev.data = kn->kn_id; /* parent */
475 kev.udata = kn->kn_kevent.udata; /* preserve udata */
476 error = kqueue_register(kn->kn_kq, &kev, NULL);
477 if (error)
478 kn->kn_fflags |= NOTE_TRACKERR;
479 }
480
481 return (kn->kn_fflags != 0);
482 }
483
484 /*
485 * filt_seltrue:
486 *
487 * This filter "event" routine simulates seltrue().
488 */
489 int
490 filt_seltrue(struct knote *kn, long hint)
491 {
492
493 /*
494 * We don't know how much data can be read/written,
495 * but we know that it *can* be. This is about as
496 * good as select/poll does as well.
497 */
498 kn->kn_data = 0;
499 return (1);
500 }
501
502 /*
503 * kqueue(2) system call.
504 */
505 int
506 sys_kqueue(struct proc *p, void *v, register_t *retval)
507 {
508 struct filedesc *fdp;
509 struct kqueue *kq;
510 struct file *fp;
511 int fd, error;
512
513 fdp = p->p_fd;
514 error = falloc(p, &fp, &fd); /* setup a new file descriptor */
515 if (error)
516 return (error);
517 fp->f_flag = FREAD | FWRITE;
518 fp->f_type = DTYPE_KQUEUE;
519 fp->f_ops = &kqueueops;
520 kq = pool_get(&kqueue_pool, PR_WAITOK);
521 memset((char *)kq, 0, sizeof(struct kqueue));
522 TAILQ_INIT(&kq->kq_head);
523 fp->f_data = (caddr_t)kq; /* store the kqueue with the fp */
524 *retval = fd;
525 if (fdp->fd_knlistsize < 0)
526 fdp->fd_knlistsize = 0; /* this process has a kq */
527 kq->kq_fdp = fdp;
528 FILE_SET_MATURE(fp);
529 FILE_UNUSE(fp, p); /* falloc() does FILE_USE() */
530 return (error);
531 }
532
533 /*
534 * kevent(2) system call.
535 */
536 int
537 sys_kevent(struct proc *p, void *v, register_t *retval)
538 {
539 struct sys_kevent_args /* {
540 syscallarg(int) fd;
541 syscallarg(const struct kevent *) changelist;
542 syscallarg(size_t) nchanges;
543 syscallarg(struct kevent *) eventlist;
544 syscallarg(size_t) nevents;
545 syscallarg(const struct timespec *) timeout;
546 } */ *uap = v;
547 struct kevent *kevp;
548 struct kqueue *kq;
549 struct file *fp;
550 struct timespec ts;
551 size_t i, n;
552 int nerrors, error;
553
554 /* check that we're dealing with a kq */
555 fp = fd_getfile(p->p_fd, SCARG(uap, fd));
556 if (!fp || fp->f_type != DTYPE_KQUEUE)
557 return (EBADF);
558
559 FILE_USE(fp);
560
561 if (SCARG(uap, timeout) != NULL) {
562 error = copyin(SCARG(uap, timeout), &ts, sizeof(ts));
563 if (error)
564 goto done;
565 SCARG(uap, timeout) = &ts;
566 }
567
568 kq = (struct kqueue *)fp->f_data;
569 nerrors = 0;
570
571 /* traverse list of events to register */
572 while (SCARG(uap, nchanges) > 0) {
573 /* copyin a maximum of KQ_EVENTS at each pass */
574 n = MIN(SCARG(uap, nchanges), KQ_NEVENTS);
575 error = copyin(SCARG(uap, changelist), kq->kq_kev,
576 n * sizeof(struct kevent));
577 if (error)
578 goto done;
579 for (i = 0; i < n; i++) {
580 kevp = &kq->kq_kev[i];
581 kevp->flags &= ~EV_SYSFLAGS;
582 /* register each knote */
583 error = kqueue_register(kq, kevp, p);
584 if (error) {
585 if (SCARG(uap, nevents) != 0) {
586 kevp->flags = EV_ERROR;
587 kevp->data = error;
588 error = copyout((caddr_t)kevp,
589 (caddr_t)SCARG(uap, eventlist),
590 sizeof(*kevp));
591 if (error)
592 goto done;
593 SCARG(uap, eventlist)++;
594 SCARG(uap, nevents)--;
595 nerrors++;
596 } else {
597 goto done;
598 }
599 }
600 }
601 SCARG(uap, nchanges) -= n; /* update the results */
602 SCARG(uap, changelist) += n;
603 }
604 if (nerrors) {
605 *retval = nerrors;
606 error = 0;
607 goto done;
608 }
609
610 /* actually scan through the events */
611 error = kqueue_scan(fp, SCARG(uap, nevents), SCARG(uap, eventlist),
612 SCARG(uap, timeout), p, retval);
613 done:
614 FILE_UNUSE(fp, p);
615 return (error);
616 }
617
618 /*
619 * Register a given kevent kev onto the kqueue
620 */
621 int
622 kqueue_register(struct kqueue *kq, struct kevent *kev, struct proc *p)
623 {
624 const struct kfilter *kfilter;
625 struct filedesc *fdp;
626 struct file *fp;
627 struct knote *kn;
628 int s, error;
629
630 fdp = kq->kq_fdp;
631 fp = NULL;
632 kn = NULL;
633 error = 0;
634 kfilter = kfilter_byfilter(kev->filter);
635 if (kfilter == NULL || kfilter->filtops == NULL) {
636 /* filter not found nor implemented */
637 return (EINVAL);
638 }
639
640 /* search if knote already exists */
641 if (kfilter->filtops->f_isfd) {
642 /* monitoring a file descriptor */
643 if ((fp = fd_getfile(fdp, kev->ident)) == NULL)
644 return (EBADF); /* validate descriptor */
645 FILE_USE(fp);
646
647 if (kev->ident < fdp->fd_knlistsize) {
648 SLIST_FOREACH(kn, &fdp->fd_knlist[kev->ident], kn_link)
649 if (kq == kn->kn_kq &&
650 kev->filter == kn->kn_filter)
651 break;
652 }
653 } else {
654 /*
655 * not monitoring a file descriptor, so
656 * lookup knotes in internal hash table
657 */
658 if (fdp->fd_knhashmask != 0) {
659 struct klist *list;
660
661 list = &fdp->fd_knhash[
662 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
663 SLIST_FOREACH(kn, list, kn_link)
664 if (kev->ident == kn->kn_id &&
665 kq == kn->kn_kq &&
666 kev->filter == kn->kn_filter)
667 break;
668 }
669 }
670
671 if (kn == NULL && ((kev->flags & EV_ADD) == 0)) {
672 error = ENOENT; /* filter not found */
673 goto done;
674 }
675
676 /*
677 * kn now contains the matching knote, or NULL if no match
678 */
679 if (kev->flags & EV_ADD) {
680 /* add knote */
681
682 if (kn == NULL) {
683 /* create new knote */
684 kn = pool_get(&knote_pool, PR_WAITOK);
685 if (kn == NULL) {
686 error = ENOMEM;
687 goto done;
688 }
689 kn->kn_fp = fp;
690 kn->kn_kq = kq;
691 kn->kn_fop = kfilter->filtops;
692
693 /*
694 * apply reference count to knote structure, and
695 * do not release it at the end of this routine.
696 */
697 fp = NULL;
698
699 kn->kn_sfflags = kev->fflags;
700 kn->kn_sdata = kev->data;
701 kev->fflags = 0;
702 kev->data = 0;
703 kn->kn_kevent = *kev;
704
705 knote_attach(kn, fdp);
706 if ((error = kfilter->filtops->f_attach(kn)) != 0) {
707 knote_drop(kn, p, fdp);
708 goto done;
709 }
710 } else {
711 /* modify existing knote */
712
713 /*
714 * The user may change some filter values after the
715 * initial EV_ADD, but doing so will not reset any
716 * filter which have already been triggered.
717 */
718 kn->kn_sfflags = kev->fflags;
719 kn->kn_sdata = kev->data;
720 kn->kn_kevent.udata = kev->udata;
721 }
722
723 s = splhigh();
724 if (kn->kn_fop->f_event(kn, 0))
725 KNOTE_ACTIVATE(kn);
726 splx(s);
727
728 } else if (kev->flags & EV_DELETE) { /* delete knote */
729 kn->kn_fop->f_detach(kn);
730 knote_drop(kn, p, fdp);
731 goto done;
732 }
733
734 /* disable knote */
735 if ((kev->flags & EV_DISABLE) &&
736 ((kn->kn_status & KN_DISABLED) == 0)) {
737 s = splhigh();
738 kn->kn_status |= KN_DISABLED;
739 splx(s);
740 }
741
742 /* enable knote */
743 if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) {
744 s = splhigh();
745 kn->kn_status &= ~KN_DISABLED;
746 if ((kn->kn_status & KN_ACTIVE) &&
747 ((kn->kn_status & KN_QUEUED) == 0))
748 knote_enqueue(kn);
749 splx(s);
750 }
751
752 done:
753 if (fp != NULL)
754 FILE_UNUSE(fp, p);
755 return (error);
756 }
757
758 /*
759 * Scan through the list of events on fp (for a maximum of maxevents),
760 * returning the results in to ulistp. Timeout is determined by tsp; if
761 * NULL, wait indefinitely, if 0 valued, perform a poll, otherwise wait
762 * as appropriate.
763 */
764 static int
765 kqueue_scan(struct file *fp, size_t maxevents, struct kevent *ulistp,
766 const struct timespec *tsp, struct proc *p, register_t *retval)
767 {
768 struct kqueue *kq;
769 struct kevent *kevp;
770 struct timeval atv;
771 struct knote *kn, marker;
772 size_t count, nkev;
773 int s, timeout, error;
774
775 kq = (struct kqueue *)fp->f_data;
776 count = maxevents;
777 nkev = error = 0;
778 if (count == 0)
779 goto done;
780
781 if (tsp != NULL) { /* timeout supplied */
782 TIMESPEC_TO_TIMEVAL(&atv, tsp);
783 if (itimerfix(&atv)) {
784 error = EINVAL;
785 goto done;
786 }
787 s = splclock();
788 timeradd(&atv, &time, &atv); /* calc. time to wait until */
789 splx(s);
790 if (tsp->tv_sec == 0 && tsp->tv_nsec == 0)
791 timeout = -1; /* perform a poll */
792 else
793 timeout = hzto(&atv); /* calculate hz till timeout */
794 } else {
795 atv.tv_sec = 0; /* no timeout, wait forever */
796 atv.tv_usec = 0;
797 timeout = 0;
798 }
799 goto start;
800
801 retry:
802 if (atv.tv_sec || atv.tv_usec) { /* timeout requested */
803 s = splclock();
804 if (timercmp(&time, &atv, >=)) {
805 splx(s);
806 goto done; /* timeout reached */
807 }
808 splx(s);
809 timeout = hzto(&atv); /* recalc. timeout remaining */
810 }
811
812 start:
813 kevp = kq->kq_kev;
814 s = splhigh();
815 if (kq->kq_count == 0) {
816 if (timeout < 0) {
817 error = EWOULDBLOCK;
818 } else {
819 kq->kq_state |= KQ_SLEEP;
820 error = tsleep(kq, PSOCK | PCATCH, "kqread", timeout);
821 }
822 splx(s);
823 if (error == 0)
824 goto retry;
825 /* don't restart after signals... */
826 if (error == ERESTART)
827 error = EINTR;
828 else if (error == EWOULDBLOCK)
829 error = 0;
830 goto done;
831 }
832
833 /* mark end of knote list */
834 TAILQ_INSERT_TAIL(&kq->kq_head, &marker, kn_tqe);
835
836 while (count) { /* while user wants data ... */
837 kn = TAILQ_FIRST(&kq->kq_head); /* get next knote */
838 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
839 if (kn == &marker) { /* if it's our marker, stop */
840 splx(s);
841 if (count == maxevents)
842 goto retry;
843 goto done;
844 }
845 if (kn->kn_status & KN_DISABLED) {
846 /* don't want disabled events */
847 kn->kn_status &= ~KN_QUEUED;
848 kq->kq_count--;
849 continue;
850 }
851 if ((kn->kn_flags & EV_ONESHOT) == 0 &&
852 kn->kn_fop->f_event(kn, 0) == 0) {
853 /*
854 * non-ONESHOT event that hasn't
855 * triggered again, so de-queue.
856 */
857 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
858 kq->kq_count--;
859 continue;
860 }
861 *kevp = kn->kn_kevent;
862 kevp++;
863 nkev++;
864 if (kn->kn_flags & EV_ONESHOT) {
865 /* delete ONESHOT events after retrieval */
866 kn->kn_status &= ~KN_QUEUED;
867 kq->kq_count--;
868 splx(s);
869 kn->kn_fop->f_detach(kn);
870 knote_drop(kn, p, p->p_fd);
871 s = splhigh();
872 } else if (kn->kn_flags & EV_CLEAR) {
873 /* clear state after retrieval */
874 kn->kn_data = 0;
875 kn->kn_fflags = 0;
876 kn->kn_status &= ~(KN_QUEUED | KN_ACTIVE);
877 kq->kq_count--;
878 } else {
879 /* add event back on list */
880 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
881 }
882 count--;
883 if (nkev == KQ_NEVENTS) {
884 /* do copyouts in KQ_NEVENTS chunks */
885 splx(s);
886 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
887 sizeof(struct kevent) * nkev);
888 ulistp += nkev;
889 nkev = 0;
890 kevp = kq->kq_kev;
891 s = splhigh();
892 if (error)
893 break;
894 }
895 }
896
897 /* remove marker */
898 TAILQ_REMOVE(&kq->kq_head, &marker, kn_tqe);
899 splx(s);
900 done:
901 if (nkev != 0) {
902 /* copyout remaining events */
903 error = copyout((caddr_t)&kq->kq_kev, (caddr_t)ulistp,
904 sizeof(struct kevent) * nkev);
905 }
906 *retval = maxevents - count;
907
908 return (error);
909 }
910
911 /*
912 * struct fileops read method for a kqueue descriptor.
913 * Not implemented.
914 * XXX: This could be expanded to call kqueue_scan, if desired.
915 */
916 /*ARGSUSED*/
917 static int
918 kqueue_read(struct file *fp, off_t *offset, struct uio *uio,
919 struct ucred *cred, int flags)
920 {
921
922 return (ENXIO);
923 }
924
925 /*
926 * struct fileops write method for a kqueue descriptor.
927 * Not implemented.
928 */
929 /*ARGSUSED*/
930 static int
931 kqueue_write(struct file *fp, off_t *offset, struct uio *uio,
932 struct ucred *cred, int flags)
933 {
934
935 return (ENXIO);
936 }
937
938 /*
939 * struct fileops ioctl method for a kqueue descriptor.
940 *
941 * Two ioctls are currently supported. They both use struct kfilter_mapping:
942 * KFILTER_BYNAME find name for filter, and return result in
943 * name, which is of size len.
944 * KFILTER_BYFILTER find filter for name. len is ignored.
945 */
946 /*ARGSUSED*/
947 static int
948 kqueue_ioctl(struct file *fp, u_long com, caddr_t data, struct proc *p)
949 {
950 struct kfilter_mapping *km;
951 const struct kfilter *kfilter;
952 char *name;
953 int error;
954
955 km = (struct kfilter_mapping *)data;
956 error = 0;
957
958 switch (com) {
959 case KFILTER_BYFILTER: /* convert filter -> name */
960 kfilter = kfilter_byfilter(km->filter);
961 if (kfilter != NULL)
962 error = copyoutstr(kfilter->name, km->name, km->len,
963 NULL);
964 else
965 error = ENOENT;
966 break;
967
968 case KFILTER_BYNAME: /* convert name -> filter */
969 MALLOC(name, char *, KFILTER_MAXNAME, M_KEVENT, M_WAITOK);
970 error = copyinstr(km->name, name, KFILTER_MAXNAME, NULL);
971 if (error) {
972 FREE(name, M_KEVENT);
973 break;
974 }
975 kfilter = kfilter_byname(name);
976 if (kfilter != NULL)
977 km->filter = kfilter->filter;
978 else
979 error = ENOENT;
980 FREE(name, M_KEVENT);
981 break;
982
983 default:
984 error = ENOTTY;
985
986 }
987 return (error);
988 }
989
990 /*
991 * struct fileops fcntl method for a kqueue descriptor.
992 * Not implemented.
993 */
994 /*ARGSUSED*/
995 static int
996 kqueue_fcntl(struct file *fp, u_int com, caddr_t data, struct proc *p)
997 {
998
999 return (ENOTTY);
1000 }
1001
1002 /*
1003 * struct fileops poll method for a kqueue descriptor.
1004 * Determine if kqueue has events pending.
1005 */
1006 static int
1007 kqueue_poll(struct file *fp, int events, struct proc *p)
1008 {
1009 struct kqueue *kq;
1010 int revents;
1011
1012 kq = (struct kqueue *)fp->f_data;
1013 revents = 0;
1014 if (events & (POLLIN | POLLRDNORM)) {
1015 if (kq->kq_count) {
1016 revents |= events & (POLLIN | POLLRDNORM);
1017 } else {
1018 selrecord(p, &kq->kq_sel);
1019 }
1020 }
1021 return (revents);
1022 }
1023
1024 /*
1025 * struct fileops stat method for a kqueue descriptor.
1026 * Returns dummy info, with st_size being number of events pending.
1027 */
1028 static int
1029 kqueue_stat(struct file *fp, struct stat *st, struct proc *p)
1030 {
1031 struct kqueue *kq;
1032
1033 kq = (struct kqueue *)fp->f_data;
1034 memset((void *)st, 0, sizeof(*st));
1035 st->st_size = kq->kq_count;
1036 st->st_blksize = sizeof(struct kevent);
1037 st->st_mode = S_IFIFO;
1038 return (0);
1039 }
1040
1041 /*
1042 * struct fileops close method for a kqueue descriptor.
1043 * Cleans up kqueue.
1044 */
1045 static int
1046 kqueue_close(struct file *fp, struct proc *p)
1047 {
1048 struct kqueue *kq;
1049 struct filedesc *fdp;
1050 struct knote **knp, *kn, *kn0;
1051 int i;
1052
1053 kq = (struct kqueue *)fp->f_data;
1054 fdp = p->p_fd;
1055 for (i = 0; i < fdp->fd_knlistsize; i++) {
1056 knp = &SLIST_FIRST(&fdp->fd_knlist[i]);
1057 kn = *knp;
1058 while (kn != NULL) {
1059 kn0 = SLIST_NEXT(kn, kn_link);
1060 if (kq == kn->kn_kq) {
1061 kn->kn_fop->f_detach(kn);
1062 FILE_UNUSE(kn->kn_fp, p);
1063 pool_put(&knote_pool, kn);
1064 *knp = kn0;
1065 } else {
1066 knp = &SLIST_NEXT(kn, kn_link);
1067 }
1068 kn = kn0;
1069 }
1070 }
1071 if (fdp->fd_knhashmask != 0) {
1072 for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
1073 knp = &SLIST_FIRST(&fdp->fd_knhash[i]);
1074 kn = *knp;
1075 while (kn != NULL) {
1076 kn0 = SLIST_NEXT(kn, kn_link);
1077 if (kq == kn->kn_kq) {
1078 kn->kn_fop->f_detach(kn);
1079 /* XXX non-fd release of kn->kn_ptr */
1080 pool_put(&knote_pool, kn);
1081 *knp = kn0;
1082 } else {
1083 knp = &SLIST_NEXT(kn, kn_link);
1084 }
1085 kn = kn0;
1086 }
1087 }
1088 }
1089 pool_put(&kqueue_pool, kq);
1090 fp->f_data = NULL;
1091
1092 return (0);
1093 }
1094
1095 /*
1096 * wakeup a kqueue
1097 */
1098 static void
1099 kqueue_wakeup(struct kqueue *kq)
1100 {
1101
1102 if (kq->kq_state & KQ_SLEEP) { /* if currently sleeping ... */
1103 kq->kq_state &= ~KQ_SLEEP;
1104 wakeup(kq); /* ... wakeup */
1105 }
1106
1107 /* Notify select/poll and kevent. */
1108 selnotify(&kq->kq_sel, 0);
1109 }
1110
1111 /*
1112 * struct fileops kqfilter method for a kqueue descriptor.
1113 * Event triggered when monitored kqueue changes.
1114 */
1115 /*ARGSUSED*/
1116 static int
1117 kqueue_kqfilter(struct file *fp, struct knote *kn)
1118 {
1119 struct kqueue *kq;
1120
1121 KASSERT(fp == kn->kn_fp);
1122 kq = (struct kqueue *)kn->kn_fp->f_data;
1123 if (kn->kn_filter != EVFILT_READ)
1124 return (1);
1125 kn->kn_fop = &kqread_filtops;
1126 SLIST_INSERT_HEAD(&kq->kq_sel.si_klist, kn, kn_selnext);
1127 return (0);
1128 }
1129
1130
1131 /*
1132 * Walk down a list of knotes, activating them if their event has triggered.
1133 */
1134 void
1135 knote(struct klist *list, long hint)
1136 {
1137 struct knote *kn;
1138
1139 SLIST_FOREACH(kn, list, kn_selnext)
1140 if (kn->kn_fop->f_event(kn, hint))
1141 KNOTE_ACTIVATE(kn);
1142 }
1143
1144 /*
1145 * Remove all knotes from a specified klist
1146 */
1147 void
1148 knote_remove(struct proc *p, struct klist *list)
1149 {
1150 struct knote *kn;
1151
1152 while ((kn = SLIST_FIRST(list)) != NULL) {
1153 kn->kn_fop->f_detach(kn);
1154 knote_drop(kn, p, p->p_fd);
1155 }
1156 }
1157
1158 /*
1159 * Remove all knotes referencing a specified fd
1160 */
1161 void
1162 knote_fdclose(struct proc *p, int fd)
1163 {
1164 struct filedesc *fdp;
1165 struct klist *list;
1166
1167 fdp = p->p_fd;
1168 list = &fdp->fd_knlist[fd];
1169 knote_remove(p, list);
1170 }
1171
1172 /*
1173 * Attach a new knote to a file descriptor
1174 */
1175 static void
1176 knote_attach(struct knote *kn, struct filedesc *fdp)
1177 {
1178 struct klist *list;
1179 int size;
1180
1181 if (! kn->kn_fop->f_isfd) {
1182 /* if knote is not on an fd, store on internal hash table */
1183 if (fdp->fd_knhashmask == 0)
1184 fdp->fd_knhash = hashinit(KN_HASHSIZE, HASH_LIST,
1185 M_KEVENT, M_WAITOK, &fdp->fd_knhashmask);
1186 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
1187 goto done;
1188 }
1189
1190 /*
1191 * otherwise, knote is on an fd.
1192 * knotes are stored in fd_knlist indexed by kn->kn_id.
1193 */
1194 if (fdp->fd_knlistsize <= kn->kn_id) {
1195 /* expand list, it's too small */
1196 size = fdp->fd_knlistsize;
1197 while (size <= kn->kn_id) {
1198 /* grow in KQ_EXTENT chunks */
1199 size += KQ_EXTENT;
1200 }
1201 list = malloc(size * sizeof(struct klist *), M_KEVENT,M_WAITOK);
1202 if (fdp->fd_knlist) {
1203 /* copy existing knlist */
1204 memcpy((caddr_t)list, (caddr_t)fdp->fd_knlist,
1205 fdp->fd_knlistsize * sizeof(struct klist *));
1206 }
1207 /*
1208 * Zero new memory. Stylistically, SLIST_INIT() should be
1209 * used here, but that does same thing as the memset() anyway.
1210 */
1211 memset(&list[fdp->fd_knlistsize], 0,
1212 (size - fdp->fd_knlistsize) * sizeof(struct klist *));
1213
1214 /* switch to new knlist */
1215 if (fdp->fd_knlist != NULL)
1216 free(fdp->fd_knlist, M_KEVENT);
1217 fdp->fd_knlistsize = size;
1218 fdp->fd_knlist = list;
1219 }
1220
1221 /* get list head for this fd */
1222 list = &fdp->fd_knlist[kn->kn_id];
1223 done:
1224 /* add new knote */
1225 SLIST_INSERT_HEAD(list, kn, kn_link);
1226 kn->kn_status = 0;
1227 }
1228
1229 /*
1230 * Drop knote.
1231 * Should be called at spl == 0, since we don't want to hold spl
1232 * while calling FILE_UNUSE and free.
1233 */
1234 static void
1235 knote_drop(struct knote *kn, struct proc *p, struct filedesc *fdp)
1236 {
1237 struct klist *list;
1238
1239 if (kn->kn_fop->f_isfd)
1240 list = &fdp->fd_knlist[kn->kn_id];
1241 else
1242 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
1243
1244 SLIST_REMOVE(list, kn, knote, kn_link);
1245 if (kn->kn_status & KN_QUEUED)
1246 knote_dequeue(kn);
1247 if (kn->kn_fop->f_isfd)
1248 FILE_UNUSE(kn->kn_fp, p);
1249 pool_put(&knote_pool, kn);
1250 }
1251
1252
1253 /*
1254 * Queue new event for knote.
1255 */
1256 static void
1257 knote_enqueue(struct knote *kn)
1258 {
1259 struct kqueue *kq;
1260 int s;
1261
1262 kq = kn->kn_kq;
1263 s = splhigh();
1264 KASSERT((kn->kn_status & KN_QUEUED) == 0);
1265
1266 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
1267 kn->kn_status |= KN_QUEUED;
1268 kq->kq_count++;
1269 splx(s);
1270 kqueue_wakeup(kq);
1271 }
1272
1273 /*
1274 * Dequeue event for knote.
1275 */
1276 static void
1277 knote_dequeue(struct knote *kn)
1278 {
1279 struct kqueue *kq;
1280 int s;
1281
1282 kq = kn->kn_kq;
1283 s = splhigh();
1284 KASSERT(kn->kn_status & KN_QUEUED);
1285
1286 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
1287 kn->kn_status &= ~KN_QUEUED;
1288 kq->kq_count--;
1289 splx(s);
1290 }
1291