Home | History | Annotate | Download | only in kern

Lines Matching defs:kn

153 knote_free(struct knote *kn)
155 struct knote_impl *ki = KNOTE_TO_KIMPL(kn);
162 knote_foplock_enter(struct knote *kn)
164 mutex_enter(&KNOTE_TO_KIMPL(kn)->ki_foplock);
168 knote_foplock_exit(struct knote *kn)
170 mutex_exit(&KNOTE_TO_KIMPL(kn)->ki_foplock);
174 knote_foplock_owned(struct knote *kn)
176 return mutex_owned(&KNOTE_TO_KIMPL(kn)->ki_foplock);
194 filt_nopdetach(struct knote *kn __unused)
199 filt_nopevent(struct knote *kn __unused, long hint __unused)
395 kn_in_flux(struct knote *kn)
397 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
398 return KNOTE_TO_KIMPL(kn)->ki_influx != 0;
402 kn_enter_flux(struct knote *kn)
404 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
406 if (kn->kn_status & KN_WILLDETACH) {
410 struct knote_impl *ki = KNOTE_TO_KIMPL(kn);
418 kn_leave_flux(struct knote *kn)
420 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
422 struct knote_impl *ki = KNOTE_TO_KIMPL(kn);
429 kn_wait_flux(struct knote *kn, bool can_loop)
431 struct knote_impl *ki = KNOTE_TO_KIMPL(kn);
434 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
442 KQ_FLUX_WAIT(kn->kn_kq);
446 #define KNOTE_WILLDETACH(kn) \
448 (kn)->kn_status |= KN_WILLDETACH; \
449 (kn)->kn_kevent.udata = curlwp; \
458 knote_detach_quiesce(struct knote *kn)
460 struct kqueue *kq = kn->kn_kq;
486 if ((kn->kn_status & KN_WILLDETACH) != 0 &&
487 kn->kn_kevent.udata != curlwp) {
496 if (kn_in_flux(kn)) {
497 kn_wait_flux(kn, false);
510 KASSERT((kn->kn_status & KN_WILLDETACH) == 0 ||
511 kn->kn_kevent.udata == curlwp);
518 KASSERT((kn->kn_status & KN_WILLDETACH) == 0 ||
519 kn_in_flux(kn) == false);
520 KNOTE_WILLDETACH(kn);
521 if (kn_in_flux(kn)) {
523 kn_wait_flux(kn, true);
530 KASSERT(kn_in_flux(kn) == false);
552 filter_attach(struct knote *kn)
556 KASSERT(knote_foplock_owned(kn));
557 KASSERT(kn->kn_fop != NULL);
558 KASSERT(kn->kn_fop->f_attach != NULL);
561 * N.B. that kn->kn_fop may change as the result of calling
562 * f_attach(). After f_attach() returns, kn->kn_fop may not
565 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
566 rv = kn->kn_fop->f_attach(kn);
569 rv = kn->kn_fop->f_attach(kn);
577 filter_detach(struct knote *kn)
580 KASSERT(knote_foplock_owned(kn));
581 KASSERT(kn->kn_fop != NULL);
582 KASSERT(kn->kn_fop->f_detach != NULL);
584 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
585 kn->kn_fop->f_detach(kn);
588 kn->kn_fop->f_detach(kn);
594 filter_event(struct knote *kn, long hint, bool submitting)
599 KASSERT(submitting || knote_foplock_owned(kn));
600 KASSERT(kn->kn_fop != NULL);
601 KASSERT(kn->kn_fop->f_event != NULL);
603 if (kn->kn_fop->f_flags & FILTEROP_MPSAFE) {
604 rv = kn->kn_fop->f_event(kn, hint);
607 rv = kn->kn_fop->f_event(kn, hint);
615 filter_touch(struct knote *kn, struct kevent *kev, long type)
627 KASSERT(kn->kn_fop != NULL);
628 KASSERT(kn->kn_fop->f_touch != NULL);
630 return kn->kn_fop->f_touch(kn, kev, type);
855 filt_fileattach(struct knote *kn)
859 fp = kn->kn_obj;
861 return (*fp->f_ops->fo_kqfilter)(fp, kn);
868 filt_kqdetach(struct knote *kn)
872 kq = ((file_t *)kn->kn_obj)->f_kqueue;
875 selremove_knote(&kq->kq_sel, kn);
884 filt_kqueue(struct knote *kn, long hint)
889 kq = ((file_t *)kn->kn_obj)->f_kqueue;
893 kn->kn_data = KQ_COUNT(kq);
894 rv = (kn->kn_data > 0);
905 filt_procattach(struct knote *kn)
910 p = proc_find(kn->kn_id);
928 kn->kn_obj = p;
929 kn->kn_flags |= EV_CLEAR; /* automatically set */
935 kn->kn_sfflags &= ~NOTE_CHILD;
937 klist_insert(&p->p_klist, kn);
954 filt_procdetach(struct knote *kn)
956 struct kqueue *kq = kn->kn_kq;
962 * because we can't be sure kn->kn_obj is valid unless
967 if ((kn->kn_status & KN_DETACHED) == 0) {
968 p = kn->kn_obj;
974 kn->kn_status |= KN_DETACHED;
975 klist_remove(&p->p_klist, kn);
989 filt_proc(struct knote *kn, long hint)
991 struct kqueue *kq = kn->kn_kq;
1002 fflags = kn->kn_fflags;
1011 struct knote *kn, *tmpkn;
1017 SLIST_FOREACH_SAFE(kn, &p->p_klist, kn_selnext, tmpkn) {
1019 if (kn->kn_fop == &sig_filtops) {
1022 KASSERT(kn->kn_fop == &proc_filtops);
1024 kq = kn->kn_kq;
1026 fflags = (kn->kn_fflags |= (kn->kn_sfflags & NOTE_EXEC));
1028 knote_activate_locked(kn);
1206 struct knote *kn;
1223 SLIST_FOREACH(kn, &p1->p_klist, kn_selnext) {
1225 if (kn->kn_fop == &sig_filtops) {
1228 KASSERT(kn->kn_fop == &proc_filtops);
1230 kq = kn->kn_kq;
1232 kn->kn_fflags |= (kn->kn_sfflags & NOTE_FORK);
1233 if (__predict_false(kn->kn_sfflags & NOTE_TRACK)) {
1238 if (knote_proc_fork_track(p1, p2, kn)) {
1239 kn->kn_fflags |= NOTE_TRACKERR;
1244 fflags = kn->kn_fflags;
1246 knote_activate_locked(kn);
1257 struct knote *kn;
1263 kn = SLIST_FIRST(&p->p_klist);
1264 kq = kn->kn_kq;
1266 KASSERT(kn->kn_obj == p);
1269 kn->kn_data = P_WAITSTATUS(p);
1274 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
1275 kn->kn_fflags |= kn->kn_sfflags & NOTE_EXIT;
1286 KASSERT(kn->kn_fop == &proc_filtops);
1287 if ((kn->kn_status & KN_DETACHED) == 0) {
1288 kn->kn_status |= KN_DETACHED;
1297 knote_activate_locked(kn);
1398 struct knote *kn = knx;
1399 struct kqueue *kq = kn->kn_kq;
1402 kn->kn_data++;
1403 knote_activate_locked(kn);
1404 if (kn->kn_sdata != FILT_TIMER_NOSCHED) {
1405 KASSERT(kn->kn_sdata > 0);
1406 KASSERT(kn->kn_sdata <= INT_MAX);
1407 callout_schedule((callout_t *)kn->kn_hook,
1408 (int)kn->kn_sdata);
1414 filt_timerstart(struct knote *kn, uintptr_t tticks)
1416 callout_t *calloutp = kn->kn_hook;
1418 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
1422 kn->kn_data = 1;
1425 callout_reset(calloutp, (int)tticks, filt_timerexpire, kn);
1430 filt_timerattach(struct knote *kn)
1438 .flags = kn->kn_flags,
1439 .fflags = kn->kn_sfflags,
1440 .data = kn->kn_sdata,
1455 kq = kn->kn_kq;
1458 kn->kn_sdata = kev.data;
1459 kn->kn_flags = kev.flags;
1460 KASSERT(kn->kn_sfflags == kev.fflags);
1461 kn->kn_hook = calloutp;
1463 filt_timerstart(kn, tticks);
1471 filt_timerdetach(struct knote *kn)
1474 struct kqueue *kq = kn->kn_kq;
1478 kn->kn_sdata = FILT_TIMER_NOSCHED;
1481 calloutp = (callout_t *)kn->kn_hook;
1495 filt_timertouch(struct knote *kn, struct kevent *kev, long type)
1497 struct kqueue *kq = kn->kn_kq;
1522 calloutp = kn->kn_hook;
1525 knote_deactivate_locked(kn);
1526 kn->kn_data = 0;
1532 kn->kn_sdata = kev->data;
1533 kn->kn_flags = kev->flags;
1534 kn->kn_sfflags = kev->fflags;
1535 filt_timerstart(kn, tticks);
1539 *kev = kn->kn_kevent;
1550 filt_timer(struct knote *kn, long hint)
1552 struct kqueue *kq = kn->kn_kq;
1556 rv = (kn->kn_data != 0);
1563 filt_userattach(struct knote *kn)
1565 struct kqueue *kq = kn->kn_kq;
1571 kn->kn_hook = NULL;
1572 if (kn->kn_fflags & NOTE_TRIGGER)
1573 kn->kn_hookid = 1;
1575 kn->kn_hookid = 0;
1581 filt_userdetach(struct knote *kn)
1590 filt_user(struct knote *kn, long hint)
1592 struct kqueue *kq = kn->kn_kq;
1596 hookid = kn->kn_hookid;
1603 filt_usertouch(struct knote *kn, struct kevent *kev, long type)
1607 KASSERT(mutex_owned(&kn->kn_kq->kq_lock));
1612 kn->kn_hookid = 1;
1621 kn->kn_sfflags &= kev->fflags;
1625 kn->kn_sfflags |= kev->fflags;
1629 kn->kn_sfflags = kev->fflags;
1636 kn->kn_sdata = kev->data;
1638 kn->kn_hookid = 0;
1639 kn->kn_data = 0;
1640 kn->kn_fflags = 0;
1645 *kev = kn->kn_kevent;
1646 kev->fflags = kn->kn_sfflags;
1647 kev->data = kn->kn_sdata;
1648 if (kn->kn_flags & EV_CLEAR) {
1649 kn->kn_hookid = 0;
1650 kn->kn_data = 0;
1651 kn->kn_fflags = 0;
1669 filt_seltrue(struct knote *kn, long hint)
1677 kn->kn_data = 0;
1686 filt_seltruedetach(struct knote *kn)
1699 seltrue_kqfilter(dev_t dev, struct knote *kn)
1701 switch (kn->kn_filter) {
1704 kn->kn_fop = &seltrue_filtops;
1894 struct knote *kn, *newkn;
1900 kn = NULL;
1932 SLIST_FOREACH(kn, &ff->ff_knlist, kn_link) {
1933 if (kq == kn->kn_kq &&
1934 kev->filter == kn->kn_filter)
1947 SLIST_FOREACH(kn, list, kn_link) {
1948 if (kev->ident == kn->kn_id &&
1949 kq == kn->kn_kq &&
1950 kev->filter == kn->kn_filter)
1961 * kn now contains the matching knote, or NULL if no match
1963 if (kn == NULL) {
1966 kn = newkn;
1968 kn->kn_obj = fp;
1969 kn->kn_id = kev->ident;
1970 kn->kn_kq = kq;
1971 kn->kn_fop = kfilter->filtops;
1972 kn->kn_kfilter = kfilter;
1973 kn->kn_sfflags = kev->fflags;
1974 kn->kn_sdata = kev->data;
1977 kn->kn_kevent = *kev;
1979 KASSERT(kn->kn_fop != NULL);
1984 if (kn->kn_fop->f_touch != NULL &&
1985 kn->kn_fop != &timer_filtops &&
1986 kn->kn_fop != &user_filtops) {
1997 if (!(kn->kn_fop->f_flags & FILTEROP_ISFD)) {
2008 list = &fdp->fd_knhash[KN_HASH(kn->kn_id,
2013 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
2014 if ((int)kn->kn_id > fdp->fd_lastkqfile)
2015 fdp->fd_lastkqfile = kn->kn_id;
2017 SLIST_INSERT_HEAD(list, kn, kn_link);
2020 * N.B. kn->kn_fop may change as the result
2023 knote_foplock_enter(kn);
2024 error = filter_attach(kn);
2028 const file_t *ft = kn->kn_obj;
2033 kn->kn_filter, ft ? ft->f_type : -1,
2045 knote_foplock_exit(kn);
2047 KNOTE_WILLDETACH(kn);
2048 KASSERT(kn_in_flux(kn) == false);
2050 knote_detach(kn, fdp, false);
2069 if (kn->kn_status & KN_WILLDETACH) {
2077 KNOTE_WILLDETACH(kn);
2078 if (kn_in_flux(kn)) {
2085 kn_wait_flux(kn, true);
2086 KASSERT(kn_in_flux(kn) == false);
2094 knote_detach(kn, fdp, true);
2103 knote_foplock_enter(kn);
2104 kn->kn_kevent.udata = kev->udata;
2105 KASSERT(kn->kn_fop != NULL);
2106 if (!(kn->kn_fop->f_flags & FILTEROP_ISFD) &&
2107 kn->kn_fop->f_touch != NULL) {
2109 error = filter_touch(kn, kev, EVENT_REGISTER);
2114 knote_foplock_exit(kn);
2118 kn->kn_sfflags = kev->fflags;
2119 kn->kn_sdata = kev->data;
2129 rv = filter_event(kn, 0, false);
2131 knote_activate(kn);
2133 knote_foplock_exit(kn);
2138 if ((kn->kn_status & KN_DISABLED) == 0)
2139 kn->kn_status |= KN_DISABLED;
2145 knote_enqueue(kn);
2158 #define KN_FMT(buf, kn) \
2159 (snprintb((buf), sizeof(buf), __KN_FLAG_BITS, (kn)->kn_status), buf)
2165 const struct knote *kn;
2176 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) {
2177 if (kn->kn_status & KN_MARKER) {
2183 kn, kn->kn_kq, KN_FMT(buf, kn));
2185 (u_long)kn->kn_id, (u_long)kn->kn_id, kn->kn_filter);
2186 if (kn->kn_kq != kq) {
2187 (*pr)(" !!! kn->kn_kq != kq\n");
2201 const struct knote *kn;
2210 TAILQ_FOREACH(kn, &kq->kq_head, kn_tqe) {
2211 if ((kn->kn_status & (KN_MARKER | KN_QUEUED)) == 0) {
2212 panic("%s,%zu: kq=%p kn=%p !(MARKER|QUEUED) %s",
2213 func, line, kq, kn, KN_FMT(buf, kn));
2215 if ((kn->kn_status & KN_MARKER) == 0) {
2216 if (kn->kn_kq != kq) {
2217 panic("%s,%zu: kq=%p kn(%p) != kn->kq(%p): %s",
2218 func, line, kq, kn, kn->kn_kq,
2219 KN_FMT(buf, kn));
2221 if ((kn->kn_status & KN_ACTIVE) == 0) {
2222 panic("%s,%zu: kq=%p kn=%p: !ACTIVE %s",
2223 func, line, kq, kn, KN_FMT(buf, kn));
2276 struct knote *kn, *marker;
2355 kn = TAILQ_FIRST(&kq->kq_head);
2358 (kn->kn_status & KN_MARKER) != 0 && kn != marker;
2359 bool kn_is_detaching = (kn->kn_status & KN_WILLDETACH) != 0;
2360 bool kn_is_in_flux = kn_in_flux(kn);
2389 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2390 if (kn == marker) {
2399 KASSERT((kn->kn_status & KN_BUSY) == 0);
2402 kn->kn_status &= ~KN_QUEUED;
2403 kn->kn_status |= KN_BUSY;
2405 if (kn->kn_status & KN_DISABLED) {
2406 kn->kn_status &= ~KN_BUSY;
2411 if ((kn->kn_flags & EV_ONESHOT) == 0) {
2414 knote_foplock_enter(kn);
2415 rv = filter_event(kn, 0, false);
2416 knote_foplock_exit(kn);
2419 if ((kn->kn_status & KN_QUEUED) != 0) {
2420 kn->kn_status &= ~KN_BUSY;
2431 kn->kn_status &= ~(KN_ACTIVE|KN_BUSY);
2442 KASSERT(kn->kn_fop != NULL);
2443 touch = (!(kn->kn_fop->f_flags & FILTEROP_ISFD) &&
2444 kn->kn_fop->f_touch != NULL);
2446 KASSERT((kn->kn_status & KN_WILLDETACH) == 0);
2448 (void)filter_touch(kn, kevp, EVENT_PROCESS);
2450 *kevp = kn->kn_kevent;
2455 if (kn->kn_flags & EV_ONESHOT) {
2457 KNOTE_WILLDETACH(kn);
2458 kn->kn_status &= ~KN_BUSY;
2460 KASSERT(kn_in_flux(kn) == false);
2461 KASSERT((kn->kn_status & KN_WILLDETACH) != 0);
2462 KASSERT(kn->kn_kevent.udata == curlwp);
2464 knote_detach(kn, fdp, true);
2467 } else if (kn->kn_flags & EV_CLEAR) {
2469 kn->kn_data = 0;
2470 kn->kn_fflags = 0;
2476 kn->kn_data = 0;
2477 kn->kn_fflags = 0;
2479 kn->kn_status &= ~(KN_ACTIVE|KN_BUSY);
2481 } else if (kn->kn_flags & EV_DISPATCH) {
2482 kn->kn_status |= KN_DISABLED;
2483 kn->kn_status &= ~(KN_ACTIVE|KN_BUSY);
2488 kn->kn_status |= KN_QUEUED;
2489 kn->kn_status &= ~KN_BUSY;
2490 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2651 struct knote *kn;
2659 for (kn = SLIST_FIRST(list); kn != NULL;) {
2660 if (kq != kn->kn_kq) {
2661 kn = SLIST_NEXT(kn, kn_link);
2664 if (knote_detach_quiesce(kn)) {
2668 knote_detach(kn, fdp, true);
2670 kn = SLIST_FIRST(list);
2744 kqueue_kqfilter(file_t *fp, struct knote *kn)
2748 kq = ((file_t *)kn->kn_obj)->f_kqueue;
2750 KASSERT(fp == kn->kn_obj);
2752 if (kn->kn_filter != EVFILT_READ)
2755 kn->kn_fop = &kqread_filtops;
2757 selrecord_knote(&kq->kq_sel, kn);
2772 struct knote *kn, *tmpkn;
2774 SLIST_FOREACH_SAFE(kn, list, kn_selnext, tmpkn) {
2783 if (filter_event(kn, hint, true)) {
2784 knote_activate(kn);
2796 struct knote *kn;
2803 while ((kn = SLIST_FIRST(list)) != NULL) {
2804 if (knote_detach_quiesce(kn)) {
2807 knote_detach(kn, fdp, true);
2818 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop)
2823 kq = kn->kn_kq;
2825 KASSERT((kn->kn_status & KN_MARKER) == 0);
2826 KASSERT((kn->kn_status & KN_WILLDETACH) != 0);
2827 KASSERT(kn->kn_fop != NULL);
2832 knote_foplock_enter(kn);
2833 filter_detach(kn);
2834 knote_foplock_exit(kn);
2838 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
2839 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
2841 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
2843 SLIST_REMOVE(list, kn, knote, kn_link);
2848 KASSERT(kn_in_flux(kn) == false);
2849 if ((kn->kn_status & KN_QUEUED) != 0) {
2853 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2854 kn->kn_status &= ~KN_QUEUED;
2856 } else if (kn->kn_status & KN_BUSY) {
2863 if (kn->kn_fop->f_flags & FILTEROP_ISFD)
2864 fd_putfile(kn->kn_id);
2865 atomic_dec_uint(&kn->kn_kfilter->refcnt);
2866 knote_free(kn);
2873 knote_enqueue(struct knote *kn)
2877 KASSERT((kn->kn_status & KN_MARKER) == 0);
2879 kq = kn->kn_kq;
2882 if (__predict_false(kn->kn_status & KN_WILLDETACH)) {
2886 if ((kn->kn_status & KN_DISABLED) != 0) {
2887 kn->kn_status &= ~KN_DISABLED;
2889 if ((kn->kn_status & (KN_ACTIVE | KN_QUEUED)) == KN_ACTIVE) {
2891 kn->kn_status |= KN_QUEUED;
2892 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2906 knote_activate_locked(struct knote *kn)
2910 KASSERT((kn->kn_status & KN_MARKER) == 0);
2912 kq = kn->kn_kq;
2914 if (__predict_false(kn->kn_status & KN_WILLDETACH)) {
2918 kn->kn_status |= KN_ACTIVE;
2919 if ((kn->kn_status & (KN_QUEUED | KN_DISABLED)) == 0) {
2921 kn->kn_status |= KN_QUEUED;
2922 TAILQ_INSERT_TAIL(&kq->kq_head, kn, kn_tqe);
2932 knote_activate(struct knote *kn)
2934 struct kqueue *kq = kn->kn_kq;
2937 knote_activate_locked(kn);
2942 knote_deactivate_locked(struct knote *kn)
2944 struct kqueue *kq = kn->kn_kq;
2946 if (kn->kn_status & KN_QUEUED) {
2948 kn->kn_status &= ~KN_QUEUED;
2949 TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe);
2954 kn->kn_status &= ~KN_ACTIVE;
2962 knote_set_eof(struct knote *kn, uint32_t flags)
2964 struct kqueue *kq = kn->kn_kq;
2967 kn->kn_flags |= EV_EOF | flags;
2975 knote_clear_eof(struct knote *kn)
2977 struct kqueue *kq = kn->kn_kq;
2980 kn->kn_flags &= ~EV_EOF;
2999 struct knote *kn;
3009 SLIST_FOREACH(kn, list, kn_selnext) {
3010 knote_foplock_enter(kn);
3011 KASSERT(kn->kn_fop != NULL);
3012 if (kn->kn_fop->f_flags & FILTEROP_ISFD) {
3013 kn->kn_fop = &nop_fd_filtops;
3015 kn->kn_fop = &nop_filtops;
3017 knote_foplock_exit(kn);
3025 klist_insert(struct klist *list, struct knote *kn)
3027 SLIST_INSERT_HEAD(list, kn, kn_selnext);
3035 klist_remove(struct klist *list, struct knote *kn)
3037 SLIST_REMOVE(list, kn, knote, kn_selnext);