Home | History | Annotate | Download | only in lfs

Lines Matching refs:vp

146  * Called with vp->v_uobj.vmobjlock held; return with it held.
149 wait_for_page(struct vnode *vp, struct vm_page *pg, const char *label)
151 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
171 uvm_pagewait(pg, vp->v_uobj.vmobjlock, "lfsput");
172 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
184 * Called with vp->v_uobj.vmobjlock held; return with it held.
188 write_and_wait(struct lfs *fs, struct vnode *vp, struct vm_page *pg,
191 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
193 struct inode *ip = VTOI(vp);
201 pg->uobject == &vp->v_uobj) {
202 rw_exit(vp->v_uobj.vmobjlock);
212 KASSERT(sp->vp == vp);
217 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
218 wait_for_page(vp, pg, label);
228 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
241 check_dirty(struct lfs *fs, struct vnode *vp,
256 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
271 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
273 pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
293 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
298 wait_for_page(vp, pg, NULL);
299 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
302 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
323 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
332 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
365 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
390 * (2) We are called with vp->v_uobj.vmobjlock held; we must return with it
423 struct vnode *vp;
439 vp = ap->a_vp;
440 ip = VTOI(vp);
447 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
450 if (vp == fs->lfs_ivnode || vp->v_type != VREG) {
451 rw_exit(vp->v_uobj.vmobjlock);
459 if (vp->v_uobj.uo_npages == 0) {
460 mutex_enter(vp->v_interlock);
461 if ((vp->v_iflag & VI_ONWORKLST) &&
462 LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
463 vn_syncer_remove_from_worklist(vp);
465 mutex_exit(vp->v_interlock);
468 rw_exit(vp->v_uobj.vmobjlock);
478 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));
497 pg = uvm_pagelookup(&vp->v_uobj, off);
500 uvm_pagewait(pg, vp->v_uobj.vmobjlock, "lfsput2");
501 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
504 uvm_pagelookup(&vp->v_uobj, off));
512 rw_exit(vp->v_uobj.vmobjlock);
538 rw_exit(vp->v_uobj.vmobjlock);
553 vp, (int)ip->i_number, ap->a_flags));
555 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));
562 trans_mp = vp->v_mount;
565 rw_exit(vp->v_uobj.vmobjlock);
574 rw_exit(vp->v_uobj.vmobjlock);
575 trans_mp = vp->v_mount;
577 if (vp->v_mount != trans_mp) {
582 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
598 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
601 r = check_dirty(fs, vp, startoffset, endoffset, blkeof,
605 rw_exit(vp->v_uobj.vmobjlock);
619 r = genfs_do_putpages(vp, startoffset, endoffset,
623 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));
629 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
630 wait_for_page(vp, busypg, "dirtyclean");
649 * XXX We must drop the vp->interlock before taking the lfs_lock or we
653 rw_exit(vp->v_uobj.vmobjlock);
662 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));
674 (vp->v_uflag & VU_DIROP)) {
683 rw_exit(vp->v_uobj.vmobjlock);
687 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
721 * Account for a new FIP in the segment header, and set sp->vp.
726 rw_exit(vp->v_uobj.vmobjlock);
729 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));
732 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
736 KASSERT(sp->vp == NULL);
737 sp->vp = vp;
740 mutex_enter(vp->v_interlock);
741 if (vdead_check(vp, VDEAD_NOWAIT) != 0) {
745 mutex_exit(vp->v_interlock);
751 if (!seglocked && vp->v_uflag & VU_DIROP) {
769 KASSERT(rw_write_held(vp->v_uobj.vmobjlock));
770 if (check_dirty(fs, vp, startoffset, endoffset, blkeof,
772 write_and_wait(fs, vp, busypg, seglocked, NULL);
774 rw_exit(vp->v_uobj.vmobjlock);
777 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
779 sp->vp = NULL;
786 error = genfs_do_putpages(vp, startoffset, endoffset,
797 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
798 write_and_wait(fs, vp, busypg, seglocked, "again");
799 rw_exit(vp->v_uobj.vmobjlock);
816 DLOG((DLOG_PAGE, "vp %p ino %d vi_flags %x a_flags %x avoiding vclean panic\n",
817 vp, (int)ip->i_number, vp->v_iflag, ap->a_flags));
818 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
821 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
828 KASSERT(sp != NULL && sp->vp == vp);
830 sp->vp = NULL;
833 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
834 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
835 lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
837 KASSERT(sp->vp == NULL);
838 sp->vp = vp;
846 KASSERT(sp->vp == vp);
847 sp->vp = NULL;
854 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));
888 mutex_enter(vp->v_interlock);
889 while (vp->v_numoutput > 0) {
891 " num %d\n", ip->i_number, vp->v_numoutput));
892 cv_wait(&vp->v_cv, vp->v_interlock);
894 mutex_exit(vp->v_interlock);
900 KASSERT(!rw_write_held(vp->v_uobj.vmobjlock));