Lines Matching defs:fdp
102 static void knote_detach(struct knote *, filedesc_t *fdp, bool);
321 * f_attach: fdp->fd_lock -> knote foplock ->
324 * f_detach: fdp->fd_lock -> knote foplock ->
327 * f_event via kevent: fdp->fd_lock -> knote foplock ->
343 * f_touch: fdp->fd_lock -> kn_kq->kq_lock (spin lock)
461 filedesc_t *fdp = kq->kq_fdp;
463 KASSERT(mutex_owned(&fdp->fd_lock));
495 mutex_exit(&fdp->fd_lock);
522 mutex_exit(&fdp->fd_lock);
1095 filedesc_t *fdp = kq->kq_fdp;
1133 mutex_enter(&fdp->fd_lock);
1146 mutex_exit(&fdp->fd_lock);
1162 mutex_exit(&fdp->fd_lock);
1169 KASSERT(fdp->fd_knhashmask != 0);
1170 KASSERT(fdp->fd_knhash != NULL);
1171 struct klist *list = &fdp->fd_knhash[KN_HASH(kntrack->kn_id,
1172 fdp->fd_knhashmask)];
1184 mutex_exit(&fdp->fd_lock);
1516 * because fdp->fd_lock will be held throughout,
1891 filedesc_t *fdp;
1898 fdp = kq->kq_fdp;
1925 mutex_enter(&fdp->fd_lock);
1926 ff = fdp->fd_dt->dt_ff[fd];
1931 if (fd <= fdp->fd_lastkqfile) {
1943 mutex_enter(&fdp->fd_lock);
1944 if (fdp->fd_knhashmask != 0) {
1945 list = &fdp->fd_knhash[
1946 KN_HASH((u_long)kev->ident, fdp->fd_knhashmask)];
1957 KASSERT(mutex_owned(&fdp->fd_lock));
2002 if (fdp->fd_knhashmask == 0) {
2004 fdp->fd_knhash = hashinit(KN_HASHSIZE,
2006 &fdp->fd_knhashmask);
2008 list = &fdp->fd_knhash[KN_HASH(kn->kn_id,
2009 fdp->fd_knhashmask)];
2013 &fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
2014 if ((int)kn->kn_id > fdp->fd_lastkqfile)
2015 fdp->fd_lastkqfile = kn->kn_id;
2043 * knote_detach() drops fdp->fd_lock
2050 knote_detach(kn, fdp, false);
2079 mutex_exit(&fdp->fd_lock);
2088 mutex_enter(&fdp->fd_lock);
2093 /* knote_detach() drops fdp->fd_lock */
2094 knote_detach(kn, fdp, true);
2148 mutex_exit(&fdp->fd_lock);
2280 filedesc_t *fdp;
2282 fdp = curlwp->l_fd;
2342 * Acquire the fdp->fd_lock interlock to avoid races with
2347 mutex_enter(&fdp->fd_lock);
2372 mutex_exit(&fdp->fd_lock);
2394 mutex_exit(&fdp->fd_lock);
2413 KASSERT(mutex_owned(&fdp->fd_lock));
2464 knote_detach(kn, fdp, true);
2465 mutex_enter(&fdp->fd_lock);
2499 mutex_exit(&fdp->fd_lock);
2503 mutex_enter(&fdp->fd_lock);
2518 mutex_exit(&fdp->fd_lock);
2652 filedesc_t *fdp;
2654 fdp = kq->kq_fdp;
2656 KASSERT(mutex_owned(&fdp->fd_lock));
2665 mutex_enter(&fdp->fd_lock);
2668 knote_detach(kn, fdp, true);
2669 mutex_enter(&fdp->fd_lock);
2681 filedesc_t *fdp;
2688 fdp = curlwp->l_fd;
2690 KASSERT(kq->kq_fdp == fdp);
2692 mutex_enter(&fdp->fd_lock);
2711 for (i = 0; i <= fdp->fd_lastkqfile; i++) {
2712 if ((ff = fdp->fd_dt->dt_ff[i]) == NULL)
2716 if (fdp->fd_knhashmask != 0) {
2717 for (i = 0; i < fdp->fd_knhashmask + 1; i++) {
2718 kqueue_doclose(kq, &fdp->fd_knhash[i], -1);
2722 mutex_exit(&fdp->fd_lock);
2797 filedesc_t *fdp;
2800 fdp = curlwp->l_fd;
2801 mutex_enter(&fdp->fd_lock);
2802 list = (struct klist *)&fdp->fd_dt->dt_ff[fd]->ff_knlist;
2807 knote_detach(kn, fdp, true);
2808 mutex_enter(&fdp->fd_lock);
2810 mutex_exit(&fdp->fd_lock);
2814 * Drop knote. Called with fdp->fd_lock held, and will drop before
2818 knote_detach(struct knote *kn, filedesc_t *fdp, bool dofop)
2828 KASSERT(mutex_owned(&fdp->fd_lock));
2839 list = (struct klist *)&fdp->fd_dt->dt_ff[kn->kn_id]->ff_knlist;
2841 list = &fdp->fd_knhash[KN_HASH(kn->kn_id, fdp->fd_knhashmask)];
2862 mutex_exit(&fdp->fd_lock);