Lines Matching refs:vp
176 struct vnode *vp = NULL;
229 vp = nd.ni_vp;
241 if (vp == NULL) {
250 vput(vp);
251 vp = NULL;
254 vput(vp);
255 vp = NULL;
278 vp = nd.ni_vp;
279 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
288 vp = nd.ni_vp;
303 vp = nd.ni_vp;
307 vp = nd.ni_vp;
309 if (vp->v_type == VSOCK) {
319 error = vn_openchk(vp, cred, fmode);
327 error = VOP_SETATTR(vp, &va, cred);
331 if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
334 mutex_enter(vp->v_interlock);
335 vp->v_writecount++;
336 mutex_exit(vp->v_interlock);
341 vput(vp);
342 vp = NULL;
361 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
362 *ret_vp = vp;
374 vn_writechk(struct vnode *vp)
381 if (vp->v_iflag & VI_TEXT)
387 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
392 if (vp->v_type == VNON || vp->v_type == VBAD)
395 if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
398 if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG)
409 if (vp->v_type == VDIR) {
413 error = vn_writechk(vp);
417 error = VOP_ACCESS(vp, permbits, cred);
426 vn_markexec(struct vnode *vp)
429 if ((vp->v_iflag & VI_EXECMAP) != 0) {
434 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
435 mutex_enter(vp->v_interlock);
436 if ((vp->v_iflag & VI_EXECMAP) == 0) {
437 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
438 vp->v_iflag |= VI_EXECMAP;
440 mutex_exit(vp->v_interlock);
441 rw_exit(vp->v_uobj.vmobjlock);
449 vn_marktext(struct vnode *vp)
452 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
457 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
458 mutex_enter(vp->v_interlock);
459 if (vp->v_writecount != 0) {
460 KASSERT((vp->v_iflag & VI_TEXT) == 0);
461 mutex_exit(vp->v_interlock);
462 rw_exit(vp->v_uobj.vmobjlock);
465 if ((vp->v_iflag & VI_EXECMAP) == 0) {
466 cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
468 vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
469 mutex_exit(vp->v_interlock);
470 rw_exit(vp->v_uobj.vmobjlock);
480 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
484 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
486 mutex_enter(vp->v_interlock);
487 KASSERT(vp->v_writecount > 0);
488 vp->v_writecount--;
489 mutex_exit(vp->v_interlock);
491 error = VOP_CLOSE(vp, flags, cred);
492 vput(vp);
497 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
502 if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
505 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
507 testoff = vp->v_size;
526 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
536 vn_lock(vp, LK_SHARED | LK_RETRY);
538 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
554 if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
558 error = VOP_READ(vp, &auio, ioflg, cred);
560 error = VOP_WRITE(vp, &auio, ioflg, cred);
571 VOP_UNLOCK(vp);
580 struct vnode *vp = fp->f_vnode;
589 if (vp->v_type != VDIR)
603 vn_lock(vp, LK_SHARED | LK_RETRY);
607 error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
612 VOP_UNLOCK(vp);
617 struct vnode *ovp = vp;
619 vp, fp, l);
622 if (vp != ovp)
626 if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
627 (vp->v_mount->mnt_flag & MNT_UNION)) {
628 struct vnode *tvp = vp;
629 vp = vp->v_mount->mnt_vnodecovered;
630 vref(vp);
632 fp->f_vnode = vp;
649 struct vnode *vp = fp->f_vnode;
664 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
666 vn_lock(vp, LK_SHARED | LK_RETRY);
667 if (__predict_false(vp->v_type == VDIR) &&
671 if (__predict_false(vp->v_type == VDIR) &&
675 error = VOP_READ(vp, uio, ioflag, cred);
678 VOP_UNLOCK(vp);
689 struct vnode *vp = fp->f_vnode;
695 if (vp->v_type == VREG && (fflag & O_APPEND))
700 (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
708 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
712 if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
715 error = VOP_WRITE(vp, uio, ioflag, cred);
734 VOP_UNLOCK(vp);
744 struct vnode *vp = fp->f_vnode;
747 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
748 error = vn_stat(vp, sb);
749 VOP_UNLOCK(vp);
754 vn_stat(struct vnode *vp, struct stat *sb)
761 error = VOP_GETATTR(vp, &va, kauth_cred_get());
771 switch (vp->v_type) {
819 struct vnode *vp = fp->f_vnode;
822 error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
832 struct vnode *vp = fp->f_vnode, *ovp;
836 switch (vp->v_type) {
841 vn_lock(vp, LK_SHARED | LK_RETRY);
842 error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
844 if (vp->v_type == VDIR)
847 if (vp->v_type == VDIR)
850 VOP_UNLOCK(vp);
870 vn_lock(vp, LK_SHARED | LK_RETRY);
871 error = VOP_BMAP(vp, *block, NULL, block, NULL);
872 VOP_UNLOCK(vp);
881 vn_lock(vp, LK_SHARED | LK_RETRY);
882 error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
883 VOP_UNLOCK(vp);
893 error = VOP_IOCTL(vp, com, data, fp->f_flag, kauth_cred_get());
895 vref(vp);
898 curproc->p_session->s_ttyvp = vp;
935 struct vnode *vp;
951 vp = fp->f_vnode;
952 if (vp->v_type != VREG && vp->v_type != VCHR &&
953 vp->v_type != VBLK) {
957 if (vp->v_type != VCHR && off < 0) {
961 if (vp->v_type != VCHR && size > __type_max(off_t)) {
965 if (vp->v_type != VCHR && off > __type_max(off_t) - size) {
971 if (vp->v_type == VCHR &&
972 (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
989 vp->v_type == VCHR ? "MAP_SHARED" : "MAP_PRIVATE",
993 if (vp->v_type == VCHR)
1004 if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
1028 vn_lock(vp, LK_SHARED | LK_RETRY);
1029 error = VOP_GETATTR(vp, &va, l->l_cred);
1030 VOP_UNLOCK(vp);
1053 (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
1057 if (vp->v_type != VCHR) {
1058 error = VOP_MMAP(vp, prot, curlwp->l_cred);
1062 vref(vp);
1063 uobj = &vp->v_uobj;
1070 vn_markexec(vp);
1081 uobj = udv_attach(vp->v_rdev,
1100 needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
1103 if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
1104 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1105 vp->v_vflag |= VV_MAPPED;
1107 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1108 mutex_enter(vp->v_interlock);
1109 vp->v_iflag |= VI_WRMAP;
1110 mutex_exit(vp->v_interlock);
1111 rw_exit(vp->v_uobj.vmobjlock);
1113 VOP_UNLOCK(vp);
1124 if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
1157 struct vnode *vp = fp->f_vnode;
1161 if (vp->v_type == VFIFO)
1165 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1167 vn_lock(vp, LK_SHARED | LK_RETRY);
1170 if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
1173 if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
1191 error = VOP_GETATTR(vp, &vattr, cred);
1210 error = VOP_SEEK(vp, oldoff, newoff, cred);
1221 out: VOP_UNLOCK(vp);
1228 struct vnode *const vp = fp->f_vnode;
1231 vn_lock(vp, LK_SHARED | LK_RETRY);
1233 VOP_UNLOCK(vp);
1236 return VOP_ADVLOCK(vp, id, op, fl, flags);
1242 struct vnode *const vp = fp->f_vnode;
1245 vn_lock(vp, LK_SHARED | LK_RETRY);
1246 error = VOP_PATHCONF(vp, name, retval);
1247 VOP_UNLOCK(vp);
1256 struct vnode *vp = fp->f_vnode;
1278 if (vp->v_type != VREG && vp->v_type != VBLK)
1298 error = uvm_readahead(&vp->v_uobj, offset, endoffset - offset);
1311 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1312 error = VOP_PUTPAGES(vp,
1336 struct vnode *vp;
1344 vp = fp->f_vnode;
1345 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1346 if (vp->v_type == VDIR)
1348 else if ((error = vn_writechk(vp)) == 0) {
1351 error = VOP_SETATTR(vp
1353 VOP_UNLOCK(vp);
1364 vn_lock(struct vnode *vp, int flags)
1369 KASSERT(vrefcnt(vp) > 0);
1372 KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock));
1375 if (wapbl_vphaswapbl(vp))
1376 WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
1384 error = VOP_LOCK(vp, flags);
1422 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
1440 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1442 error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL,
1446 VOP_UNLOCK(vp);
1458 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
1476 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1479 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED);
1482 VOP_UNLOCK(vp);
1489 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
1495 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1498 error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED);
1500 error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
1504 VOP_UNLOCK(vp);
1550 struct vnode *vp;
1555 error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL);
1559 vt = vp->v_type;
1561 dev = vp->v_rdev;
1563 VOP_UNLOCK(vp);
1564 (void) vn_close(vp, FREAD | FWRITE, l->l_cred);
1598 vn_knote_attach(struct vnode *vp, struct knote *kn)
1600 struct vnode_klist *vk = vp->v_klist;
1607 KASSERT(kn->kn_hook == vp);
1608 KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
1618 mutex_enter(vp->v_interlock);
1624 mutex_exit(vp->v_interlock);
1628 vn_knote_detach(struct vnode *vp, struct knote *kn)
1630 struct vnode_klist *vk = vp->v_klist;
1634 KASSERT(kn->kn_hook == vp);
1635 KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
1648 mutex_enter(vp->v_interlock);
1671 mutex_exit(vp->v_interlock);