Lines Matching defs:knote
94 static int kqueue_kqfilter(file_t *, struct knote *);
102 static void knote_detach(struct knote *, filedesc_t *fdp, bool);
103 static void knote_enqueue(struct knote *);
104 static void knote_activate(struct knote *);
105 static void knote_activate_locked(struct knote *);
106 static void knote_deactivate_locked(struct knote *);
108 static void filt_kqdetach(struct knote *);
109 static int filt_kqueue(struct knote *, long hint);
110 static int filt_procattach(struct knote *);
111 static void filt_procdetach(struct knote *);
112 static int filt_proc(struct knote *, long hint);
113 static int filt_fileattach(struct knote *);
115 static int filt_timerattach(struct knote *);
116 static void filt_timerdetach(struct knote *);
117 static int filt_timer(struct knote *, long hint);
118 static int filt_timertouch(struct knote *, struct kevent *, long type);
119 static int filt_userattach(struct knote *);
120 static void filt_userdetach(struct knote *);
121 static int filt_user(struct knote *, long hint);
122 static int filt_usertouch(struct knote *, struct kevent *, long type);
125 * Private knote state that should never be exposed outside
133 struct knote ki_knote;
141 static inline struct knote *
153 knote_free(struct knote *kn)
162 knote_foplock_enter(struct knote *kn)
168 knote_foplock_exit(struct knote *kn)
174 knote_foplock_owned(struct knote *kn)
194 filt_nopdetach(struct knote *kn __unused)
199 filt_nopevent(struct knote *kn __unused, long hint __unused)
235 * however, will override the knote's filterops, and thus will
314 * -> knote foplock (if taken)
321 * f_attach: fdp->fd_lock -> knote foplock ->
324 * f_detach: fdp->fd_lock -> knote foplock ->
327 * f_event via kevent: fdp->fd_lock -> knote foplock ->
332 * f_event via knote (via backing object: Whatever caller guarantees.
339 * N.B. the knote foplock will **not** be acquired in this case. The
341 * with knote().
344 * N.B. knote foplock is **not** acquired in this case and
352 * of each knote on the klist.
356 * There are some situations where knote submission may require dropping
358 * to mark a knote as being 'in-flux'. Such a knote is guaranteed not to
362 * detach an in-flux knote must wait until the knote is no longer in-flux.
363 * When this happens, the knote is marked for death (KN_WILLDETACH) and the
364 * LWP who gets to finish the detach operation is recorded in the knote's
366 * a knote is so marked). Code paths that lead to knote_detach() must ensure
368 * the in-flux status of the knote to clear. Note that once a knote is
372 * acquired in the proper order (object lock -> kq_lock), the knote taken
379 * When kqueue_scan() encounters an in-flux knote, the situation is
395 kn_in_flux(struct knote *kn)
402 kn_enter_flux(struct knote *kn)
418 kn_leave_flux(struct knote *kn)
429 kn_wait_flux(struct knote *kn, bool can_loop)
437 * It may not be safe for us to touch the knote again after
453 * Wait until the specified knote is in a quiescent state and
458 knote_detach_quiesce(struct knote *kn)
469 * 1. Someone else has already started detaching the knote but
477 * knote (which will never be used again for its usual purpose
481 * Otherwise, once we have claimed the knote for ourselves, we
483 * where touching a detaching knote is safe after dropping the
489 * N.B. it is NOT safe for us to touch the knote again
492 * the knote is in-flux, we want to block to minimize
525 * It is safe for us to touch the knote again after
544 * knote foplock before calling into the filter ops. When a driver
546 * each knote, acquires its foplock, and replaces the filterops with a
547 * nop stub, allowing knote detach (when descriptors are closed) to safely
552 filter_attach(struct knote *kn)
577 filter_detach(struct knote *kn)
594 filter_event(struct knote *kn, long hint, bool submitting)
598 /* See knote(). */
615 filter_touch(struct knote *kn, struct kevent *kev, long type)
619 * XXX We cannot assert that the knote foplock is held here
855 filt_fileattach(struct knote *kn)
868 filt_kqdetach(struct knote *kn)
884 filt_kqueue(struct knote *kn, long hint)
905 filt_procattach(struct knote *kn)
946 * The knote may be attached to a different process, which may exit,
947 * leaving nothing for the knote to be attached to. So when the process
948 * exits, the knote is marked as DETACHED and also flagged as ONESHOT so
949 * it will be deleted when read out. However, as part of the knote deletion,
954 filt_procdetach(struct knote *kn)
985 * entry points for delivering knote submissions. filt_proc() is used
989 filt_proc(struct knote *kn, long hint)
1011 struct knote *kn, *tmpkn;
1037 knote_proc_fork_track(struct proc *p1, struct proc *p2, struct knote *okn)
1045 * We're going to put this knote into flux while we drop
1046 * the locks and create and attach a new knote to track the
1047 * child. If we are not able to enter flux, then this knote
1066 * If we only register a single knote, then it's possible for
1087 * ==> Directly attaches the new tracking knote to the child
1091 * knote is held in-flux, and to avoid doing extra work in general
1096 struct knote *knchild, *kntrack;
1137 * attaching the knote if so. Normally, this isn't necessary
1206 struct knote *kn;
1214 * don't want to pre-fetch the next knote; in the event we
1215 * have to drop p_lock, we will have put the knote in-flux,
1217 * have taken the knote out of flux. However, that does
1257 struct knote *kn;
1271 * Mark as ONESHOT, so that the knote is g/c'ed
1278 * Detach the knote from the process and mark it as such.
1293 * Always activate the knote for NOTE_EXIT regardless
1398 struct knote *kn = knx;
1414 filt_timerstart(struct knote *kn, uintptr_t tticks)
1430 filt_timerattach(struct knote *kn)
1471 filt_timerdetach(struct knote *kn)
1495 filt_timertouch(struct knote *kn, struct kevent *kev, long type)
1550 filt_timer(struct knote *kn, long hint)
1563 filt_userattach(struct knote *kn)
1581 filt_userdetach(struct knote *kn)
1590 filt_user(struct knote *kn, long hint)
1603 filt_usertouch(struct knote *kn, struct kevent *kev, long type)
1669 filt_seltrue(struct knote *kn, long hint)
1686 filt_seltruedetach(struct knote *kn)
1699 seltrue_kqfilter(dev_t dev, struct knote *kn)
1851 /* register each knote */
1894 struct knote *kn, *newkn;
1915 /* search if knote already exists */
1961 * kn now contains the matching knote, or NULL if no match
1965 /* create new knote */
1992 * apply reference count to knote structure, and
1999 * If knote is not on an fd, store on
2011 /* Otherwise, knote is on an fd. */
2056 /* No matching knote and the EV_ADD flag is not set. */
2064 * Let the world know that this knote is about to go
2071 * This knote is already on its way out,
2082 * this knote to settle because we know we'll
2112 /* Never a new knote (which would consume newkn). */
2135 /* disable knote */
2143 /* enable knote */
2165 const struct knote *kn;
2182 (*pr)(" knote %p: kq=%p status=%s\n",
2201 const struct knote *kn;
2276 struct knote *kn, *marker;
2337 /* mark end of knote list */
2352 * Get next knote. We are guaranteed this will never
2363 * If we found a marker that's not ours, or this knote
2651 struct knote *kn;
2698 * need to fail, lest they sneak in to attach a knote after
2744 kqueue_kqfilter(file_t *fp, struct knote *kn)
2770 knote(struct klist *list, long hint)
2772 struct knote *kn, *tmpkn;
2778 * so acquiring the knote foplock would create a
2796 struct knote *kn;
2814 * Drop knote. Called with fdp->fd_lock held, and will drop before
2818 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop)
2843 SLIST_REMOVE(list, kn, knote, kn_link);
2870 * Queue new event for knote.
2873 knote_enqueue(struct knote *kn)
2883 /* Don't bother enqueueing a dying knote. */
2903 * Queue new event for knote.
2906 knote_activate_locked(struct knote *kn)
2915 /* Don't bother enqueueing a dying knote. */
2932 knote_activate(struct knote *kn)
2942 knote_deactivate_locked(struct knote *kn)
2958 * Set EV_EOF on the specified knote. Also allows additional
2962 knote_set_eof(struct knote *kn, uint32_t flags)
2972 * Clear EV_EOF on the specified knote.
2975 knote_clear_eof(struct knote *kn)
2999 struct knote *kn;
3006 * with acquiring the knote foplock ), and that we can traverse
3022 * Insert a knote into a klist.
3025 klist_insert(struct klist *list, struct knote *kn)
3031 * Remove a knote from a klist. Returns true if the last
3032 * knote was removed and the list is now empty.
3035 klist_remove(struct klist *list, struct knote *kn)
3037 SLIST_REMOVE(list, kn, knote, kn_selnext);