Home | History | Annotate | Line # | Download | only in kern
vfs_vnops.c revision 1.235.4.3
      1 /*	$NetBSD: vfs_vnops.c,v 1.235.4.3 2025/07/12 10:57:57 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1982, 1986, 1989, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  * (c) UNIX System Laboratories, Inc.
     36  * All or some portions of this file are derived from material licensed
     37  * to the University of California by American Telephone and Telegraph
     38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     39  * the permission of UNIX System Laboratories, Inc.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.235.4.3 2025/07/12 10:57:57 martin Exp $");
     70 
     71 #include "veriexec.h"
     72 
     73 #include <sys/param.h>
     74 #include <sys/systm.h>
     75 #include <sys/kernel.h>
     76 #include <sys/file.h>
     77 #include <sys/stat.h>
     78 #include <sys/buf.h>
     79 #include <sys/proc.h>
     80 #include <sys/mount.h>
     81 #include <sys/namei.h>
     82 #include <sys/vnode_impl.h>
     83 #include <sys/ioctl.h>
     84 #include <sys/tty.h>
     85 #include <sys/poll.h>
     86 #include <sys/kauth.h>
     87 #include <sys/syslog.h>
     88 #include <sys/fstrans.h>
     89 #include <sys/atomic.h>
     90 #include <sys/filedesc.h>
     91 #include <sys/wapbl.h>
     92 #include <sys/mman.h>
     93 
     94 #include <miscfs/specfs/specdev.h>
     95 #include <miscfs/fifofs/fifo.h>
     96 
     97 #include <uvm/uvm_extern.h>
     98 #include <uvm/uvm_readahead.h>
     99 #include <uvm/uvm_device.h>
    100 
    101 #ifdef UNION
    102 #include <fs/union/union.h>
    103 #endif
    104 
    105 #ifndef COMPAT_ZERODEV
    106 #define COMPAT_ZERODEV(dev)	(0)
    107 #endif
    108 
    109 int (*vn_union_readdir_hook)(struct vnode **, struct file *, struct lwp *);
    110 
    111 #include <sys/verified_exec.h>
    112 
    113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
    114     kauth_cred_t cred, int flags);
    115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
    116     kauth_cred_t cred, int flags);
    117 static int vn_closefile(file_t *fp);
    118 static int vn_poll(file_t *fp, int events);
    119 static int vn_fcntl(file_t *fp, u_int com, void *data);
    120 static int vn_statfile(file_t *fp, struct stat *sb);
    121 static int vn_ioctl(file_t *fp, u_long com, void *data);
    122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *,
    123     struct uvm_object **, int *);
    124 static int vn_seek(struct file *, off_t, int, off_t *, int);
    125 
    126 const struct fileops vnops = {
    127 	.fo_name = "vn",
    128 	.fo_read = vn_read,
    129 	.fo_write = vn_write,
    130 	.fo_ioctl = vn_ioctl,
    131 	.fo_fcntl = vn_fcntl,
    132 	.fo_poll = vn_poll,
    133 	.fo_stat = vn_statfile,
    134 	.fo_close = vn_closefile,
    135 	.fo_kqfilter = vn_kqfilter,
    136 	.fo_restart = fnullop_restart,
    137 	.fo_mmap = vn_mmap,
    138 	.fo_seek = vn_seek,
    139 };
    140 
    141 /*
    142  * Common code for vnode open operations.
    143  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
    144  *
    145  * at_dvp is the directory for openat(), if any.
    146  * pb is the path.
    147  * nmode is additional namei flags, restricted to TRYEMULROOT and NOCHROOT.
    148  * fmode is the open flags, converted from O_* to F*
    149  * cmode is the creation file permissions.
    150  *
    151  * XXX shouldn't cmode be mode_t?
    152  *
    153  * On success produces either a vnode in *ret_vp, or if that is NULL,
    154  * a file descriptor number in ret_fd.
    155  *
    156  * The caller may pass NULL for ret_fd (and ret_domove), in which case
    157  * EOPNOTSUPP will be produced in the cases that would otherwise return
    158  * a file descriptor.
    159  *
    160  * Note that callers that want no-follow behavior should pass
    161  * O_NOFOLLOW in fmode. Neither FOLLOW nor NOFOLLOW in nmode is
    162  * honored.
    163  */
    164 int
    165 vn_open(struct vnode *at_dvp, struct pathbuf *pb,
    166 	int nmode, int fmode, int cmode,
    167 	struct vnode **ret_vp, bool *ret_domove, int *ret_fd)
    168 {
    169 	struct nameidata nd;
    170 	struct vnode *vp = NULL;
    171 	struct lwp *l = curlwp;
    172 	kauth_cred_t cred = l->l_cred;
    173 	struct vattr va;
    174 	int error;
    175 	const char *pathstring;
    176 
    177 	KASSERT((nmode & (TRYEMULROOT | NOCHROOT)) == nmode);
    178 
    179 	KASSERT(ret_vp != NULL);
    180 	KASSERT((ret_domove == NULL) == (ret_fd == NULL));
    181 
    182 	if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY))
    183 		return EINVAL;
    184 
    185 	NDINIT(&nd, LOOKUP, nmode, pb);
    186 	if (at_dvp != NULL)
    187 		NDAT(&nd, at_dvp);
    188 
    189 	nd.ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT;
    190 
    191 	if (fmode & O_CREAT) {
    192 		nd.ni_cnd.cn_nameiop = CREATE;
    193 		nd.ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
    194 		if ((fmode & O_EXCL) == 0 &&
    195 		    ((fmode & O_NOFOLLOW) == 0))
    196 			nd.ni_cnd.cn_flags |= FOLLOW;
    197 		if ((fmode & O_EXCL) == 0)
    198 			nd.ni_cnd.cn_flags |= NONEXCLHACK;
    199 	} else {
    200 		nd.ni_cnd.cn_nameiop = LOOKUP;
    201 		nd.ni_cnd.cn_flags |= LOCKLEAF;
    202 		if ((fmode & O_NOFOLLOW) == 0)
    203 			nd.ni_cnd.cn_flags |= FOLLOW;
    204 	}
    205 
    206 	pathstring = pathbuf_stringcopy_get(nd.ni_pathbuf);
    207 	if (pathstring == NULL) {
    208 		return ENOMEM;
    209 	}
    210 
    211 	/*
    212 	 * When this "interface" was exposed to do_open() it used
    213 	 * to initialize l_dupfd to -newfd-1 (thus passing in the
    214 	 * new file handle number to use)... but nothing in the
    215 	 * kernel uses that value. So just send 0.
    216 	 */
    217 	l->l_dupfd = 0;
    218 
    219 	error = namei(&nd);
    220 	if (error)
    221 		goto out;
    222 
    223 	vp = nd.ni_vp;
    224 
    225 #if NVERIEXEC > 0
    226 	error = veriexec_openchk(l, nd.ni_vp, pathstring, fmode);
    227 	if (error) {
    228 		/* We have to release the locks ourselves */
    229 		/*
    230 		 * 20210604 dholland passing NONEXCLHACK means we can
    231 		 * get ni_dvp == NULL back if ni_vp exists, and we should
    232 		 * treat that like the non-O_CREAT case.
    233 		 */
    234 		if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
    235 			if (vp == NULL) {
    236 				vput(nd.ni_dvp);
    237 			} else {
    238 				VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
    239 				if (nd.ni_dvp == nd.ni_vp)
    240 					vrele(nd.ni_dvp);
    241 				else
    242 					vput(nd.ni_dvp);
    243 				nd.ni_dvp = NULL;
    244 				vput(vp);
    245 			}
    246 		} else {
    247 			vput(vp);
    248 		}
    249 		goto out;
    250 	}
    251 #endif /* NVERIEXEC > 0 */
    252 
    253 	/*
    254 	 * 20210604 dholland ditto
    255 	 */
    256 	if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
    257 		if (nd.ni_vp == NULL) {
    258 			vattr_null(&va);
    259 			va.va_type = VREG;
    260 			va.va_mode = cmode;
    261 			if (fmode & O_EXCL)
    262 				 va.va_vaflags |= VA_EXCLUSIVE;
    263 			error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
    264 					   &nd.ni_cnd, &va);
    265 			if (error) {
    266 				vput(nd.ni_dvp);
    267 				goto out;
    268 			}
    269 			fmode &= ~O_TRUNC;
    270 			vp = nd.ni_vp;
    271 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    272 			vput(nd.ni_dvp);
    273 		} else {
    274 			VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
    275 			if (nd.ni_dvp == nd.ni_vp)
    276 				vrele(nd.ni_dvp);
    277 			else
    278 				vput(nd.ni_dvp);
    279 			nd.ni_dvp = NULL;
    280 			vp = nd.ni_vp;
    281 			if (fmode & O_EXCL) {
    282 				error = EEXIST;
    283 				goto bad;
    284 			}
    285 			fmode &= ~O_CREAT;
    286 		}
    287 	} else if ((fmode & O_CREAT) != 0) {
    288 		/*
    289 		 * 20210606 dholland passing NONEXCLHACK means this
    290 		 * case exists; it is the same as the following one
    291 		 * but also needs to do things in the second (exists)
    292 		 * half of the following block. (Besides handle
    293 		 * ni_dvp, anyway.)
    294 		 */
    295 		vp = nd.ni_vp;
    296 		KASSERT((fmode & O_EXCL) == 0);
    297 		fmode &= ~O_CREAT;
    298 	} else {
    299 		vp = nd.ni_vp;
    300 	}
    301 	if (vp->v_type == VSOCK) {
    302 		error = EOPNOTSUPP;
    303 		goto bad;
    304 	}
    305 	if (nd.ni_vp->v_type == VLNK) {
    306 		error = EFTYPE;
    307 		goto bad;
    308 	}
    309 
    310 	if ((fmode & O_CREAT) == 0) {
    311 		error = vn_openchk(vp, cred, fmode);
    312 		if (error != 0)
    313 			goto bad;
    314 	}
    315 
    316 	if (fmode & O_TRUNC) {
    317 		vattr_null(&va);
    318 		va.va_size = 0;
    319 		error = VOP_SETATTR(vp, &va, cred);
    320 		if (error != 0)
    321 			goto bad;
    322 	}
    323 	if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
    324 		goto bad;
    325 	if (fmode & FWRITE) {
    326 		mutex_enter(vp->v_interlock);
    327 		vp->v_writecount++;
    328 		mutex_exit(vp->v_interlock);
    329 	}
    330 
    331 bad:
    332 	if (error)
    333 		vput(vp);
    334 out:
    335 	pathbuf_stringcopy_put(nd.ni_pathbuf, pathstring);
    336 
    337 	switch (error) {
    338 	case EDUPFD:
    339 	case EMOVEFD:
    340 		/* if the caller isn't prepared to handle fds, fail for them */
    341 		if (ret_fd == NULL) {
    342 			error = EOPNOTSUPP;
    343 			break;
    344 		}
    345 		*ret_vp = NULL;
    346 		*ret_domove = error == EMOVEFD;
    347 		*ret_fd = l->l_dupfd;
    348 		error = 0;
    349 		break;
    350 	case 0:
    351 		*ret_vp = vp;
    352 		break;
    353 	}
    354 	l->l_dupfd = 0;
    355 	return error;
    356 }
    357 
    358 /*
    359  * Check for write permissions on the specified vnode.
    360  * Prototype text segments cannot be written.
    361  */
    362 int
    363 vn_writechk(struct vnode *vp)
    364 {
    365 
    366 	/*
    367 	 * If the vnode is in use as a process's text,
    368 	 * we can't allow writing.
    369 	 */
    370 	if (vp->v_iflag & VI_TEXT)
    371 		return ETXTBSY;
    372 	return 0;
    373 }
    374 
    375 int
    376 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
    377 {
    378 	int permbits = 0;
    379 	int error;
    380 
    381 	if (vp->v_type == VNON || vp->v_type == VBAD)
    382 		return ENXIO;
    383 
    384 	if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
    385 		return ENOTDIR;
    386 
    387 	if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG)
    388 		return EFTYPE;
    389 
    390 	if ((fflags & FREAD) != 0) {
    391 		permbits = VREAD;
    392 	}
    393 	if ((fflags & FEXEC) != 0) {
    394 		permbits |= VEXEC;
    395 	}
    396 	if ((fflags & (FWRITE | O_TRUNC)) != 0) {
    397 		permbits |= VWRITE;
    398 		if (vp->v_type == VDIR) {
    399 			error = EISDIR;
    400 			goto bad;
    401 		}
    402 		error = vn_writechk(vp);
    403 		if (error != 0)
    404 			goto bad;
    405 	}
    406 	error = VOP_ACCESS(vp, permbits, cred);
    407 bad:
    408 	return error;
    409 }
    410 
    411 /*
    412  * Mark a vnode as having executable mappings.
    413  */
    414 void
    415 vn_markexec(struct vnode *vp)
    416 {
    417 
    418 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
    419 		/* Safe unlocked, as long as caller holds a reference. */
    420 		return;
    421 	}
    422 
    423 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    424 	mutex_enter(vp->v_interlock);
    425 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
    426 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
    427 		vp->v_iflag |= VI_EXECMAP;
    428 	}
    429 	mutex_exit(vp->v_interlock);
    430 	rw_exit(vp->v_uobj.vmobjlock);
    431 }
    432 
    433 /*
    434  * Mark a vnode as being the text of a process.
    435  * Fail if the vnode is currently writable.
    436  */
    437 int
    438 vn_marktext(struct vnode *vp)
    439 {
    440 
    441 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
    442 		/* Safe unlocked, as long as caller holds a reference. */
    443 		return 0;
    444 	}
    445 
    446 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    447 	mutex_enter(vp->v_interlock);
    448 	if (vp->v_writecount != 0) {
    449 		KASSERT((vp->v_iflag & VI_TEXT) == 0);
    450 		mutex_exit(vp->v_interlock);
    451 		rw_exit(vp->v_uobj.vmobjlock);
    452 		return ETXTBSY;
    453 	}
    454 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
    455 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
    456 	}
    457 	vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
    458 	mutex_exit(vp->v_interlock);
    459 	rw_exit(vp->v_uobj.vmobjlock);
    460 	return 0;
    461 }
    462 
    463 /*
    464  * Vnode close call
    465  *
    466  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
    467  */
    468 int
    469 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
    470 {
    471 	int error;
    472 
    473 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    474 	if (flags & FWRITE) {
    475 		mutex_enter(vp->v_interlock);
    476 		KASSERT(vp->v_writecount > 0);
    477 		vp->v_writecount--;
    478 		mutex_exit(vp->v_interlock);
    479 	}
    480 	error = VOP_CLOSE(vp, flags, cred);
    481 	vput(vp);
    482 	return error;
    483 }
    484 
    485 static int
    486 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
    487 {
    488 	struct lwp *l = curlwp;
    489 	off_t testoff;
    490 
    491 	if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
    492 		return 0;
    493 
    494 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
    495 	if (ioflag & IO_APPEND)
    496 		testoff = vp->v_size;
    497 	else
    498 		testoff = uio->uio_offset;
    499 
    500 	if (testoff + uio->uio_resid >
    501 	    l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    502 		mutex_enter(&proc_lock);
    503 		psignal(l->l_proc, SIGXFSZ);
    504 		mutex_exit(&proc_lock);
    505 		return EFBIG;
    506 	}
    507 
    508 	return 0;
    509 }
    510 
    511 /*
    512  * Package up an I/O request on a vnode into a uio and do it.
    513  */
    514 int
    515 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
    516     enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
    517     struct lwp *l)
    518 {
    519 	struct uio auio;
    520 	struct iovec aiov;
    521 	int error;
    522 
    523 	if ((ioflg & IO_NODELOCKED) == 0) {
    524 		if (rw == UIO_READ) {
    525 			vn_lock(vp, LK_SHARED | LK_RETRY);
    526 		} else /* UIO_WRITE */ {
    527 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    528 		}
    529 	}
    530 	auio.uio_iov = &aiov;
    531 	auio.uio_iovcnt = 1;
    532 	aiov.iov_base = base;
    533 	aiov.iov_len = len;
    534 	auio.uio_resid = len;
    535 	auio.uio_offset = offset;
    536 	auio.uio_rw = rw;
    537 	if (segflg == UIO_SYSSPACE) {
    538 		UIO_SETUP_SYSSPACE(&auio);
    539 	} else {
    540 		auio.uio_vmspace = l->l_proc->p_vmspace;
    541 	}
    542 
    543 	if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
    544 		goto out;
    545 
    546 	if (rw == UIO_READ) {
    547 		error = VOP_READ(vp, &auio, ioflg, cred);
    548 	} else {
    549 		error = VOP_WRITE(vp, &auio, ioflg, cred);
    550 	}
    551 
    552 	if (aresid)
    553 		*aresid = auio.uio_resid;
    554 	else
    555 		if (auio.uio_resid && error == 0)
    556 			error = EIO;
    557 
    558  out:
    559 	if ((ioflg & IO_NODELOCKED) == 0) {
    560 		VOP_UNLOCK(vp);
    561 	}
    562 	return error;
    563 }
    564 
    565 int
    566 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
    567     struct lwp *l, off_t **cookies, int *ncookies)
    568 {
    569 	struct vnode *vp = fp->f_vnode;
    570 	struct iovec aiov;
    571 	struct uio auio;
    572 	int error, eofflag;
    573 
    574 	/* Limit the size on any kernel buffers used by VOP_READDIR */
    575 	count = uimin(MAXBSIZE, count);
    576 
    577 unionread:
    578 	if (vp->v_type != VDIR)
    579 		return EINVAL;
    580 	aiov.iov_base = bf;
    581 	aiov.iov_len = count;
    582 	auio.uio_iov = &aiov;
    583 	auio.uio_iovcnt = 1;
    584 	auio.uio_rw = UIO_READ;
    585 	if (segflg == UIO_SYSSPACE) {
    586 		UIO_SETUP_SYSSPACE(&auio);
    587 	} else {
    588 		KASSERT(l == curlwp);
    589 		auio.uio_vmspace = l->l_proc->p_vmspace;
    590 	}
    591 	auio.uio_resid = count;
    592 	vn_lock(vp, LK_SHARED | LK_RETRY);
    593 	mutex_enter(&fp->f_lock);
    594 	auio.uio_offset = fp->f_offset;
    595 	mutex_exit(&fp->f_lock);
    596 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
    597 	    ncookies);
    598 	mutex_enter(&fp->f_lock);
    599 	fp->f_offset = auio.uio_offset;
    600 	mutex_exit(&fp->f_lock);
    601 	VOP_UNLOCK(vp);
    602 	if (error)
    603 		return error;
    604 
    605 	if (count == auio.uio_resid && vn_union_readdir_hook) {
    606 		struct vnode *ovp = vp;
    607 
    608 		error = (*vn_union_readdir_hook)(&vp, fp, l);
    609 		if (error)
    610 			return error;
    611 		if (vp != ovp)
    612 			goto unionread;
    613 	}
    614 
    615 	if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
    616 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
    617 		struct vnode *tvp = vp;
    618 		vp = vp->v_mount->mnt_vnodecovered;
    619 		vref(vp);
    620 		mutex_enter(&fp->f_lock);
    621 		fp->f_vnode = vp;
    622 		fp->f_offset = 0;
    623 		mutex_exit(&fp->f_lock);
    624 		vrele(tvp);
    625 		goto unionread;
    626 	}
    627 	*done = count - auio.uio_resid;
    628 	return error;
    629 }
    630 
    631 /*
    632  * File table vnode read routine.
    633  */
    634 static int
    635 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    636     int flags)
    637 {
    638 	struct vnode *vp = fp->f_vnode;
    639 	int error, ioflag, fflag;
    640 	size_t count;
    641 
    642 	ioflag = IO_ADV_ENCODE(fp->f_advice);
    643 	fflag = fp->f_flag;
    644 	if (fflag & FNONBLOCK)
    645 		ioflag |= IO_NDELAY;
    646 	if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
    647 		ioflag |= IO_SYNC;
    648 	if (fflag & FALTIO)
    649 		ioflag |= IO_ALTSEMANTICS;
    650 	if (fflag & FDIRECT)
    651 		ioflag |= IO_DIRECT;
    652 	if (offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) != 0)
    653 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    654 	else
    655 		vn_lock(vp, LK_SHARED | LK_RETRY);
    656 	if (__predict_false(vp->v_type == VDIR) &&
    657 	    offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) == 0)
    658 		mutex_enter(&fp->f_lock);
    659 	uio->uio_offset = *offset;
    660 	if (__predict_false(vp->v_type == VDIR) &&
    661 	    offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) == 0)
    662 		mutex_exit(&fp->f_lock);
    663 	count = uio->uio_resid;
    664 	error = VOP_READ(vp, uio, ioflag, cred);
    665 	if (flags & FOF_UPDATE_OFFSET)
    666 		*offset += count - uio->uio_resid;
    667 	VOP_UNLOCK(vp);
    668 	return error;
    669 }
    670 
    671 /*
    672  * File table vnode write routine.
    673  */
    674 static int
    675 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    676     int flags)
    677 {
    678 	struct vnode *vp = fp->f_vnode;
    679 	int error, ioflag, fflag;
    680 	size_t count;
    681 
    682 	ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
    683 	fflag = fp->f_flag;
    684 	if (vp->v_type == VREG && (fflag & O_APPEND))
    685 		ioflag |= IO_APPEND;
    686 	if (fflag & FNONBLOCK)
    687 		ioflag |= IO_NDELAY;
    688 	if (fflag & FFSYNC ||
    689 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
    690 		ioflag |= IO_SYNC;
    691 	else if (fflag & FDSYNC)
    692 		ioflag |= IO_DSYNC;
    693 	if (fflag & FALTIO)
    694 		ioflag |= IO_ALTSEMANTICS;
    695 	if (fflag & FDIRECT)
    696 		ioflag |= IO_DIRECT;
    697 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    698 	uio->uio_offset = *offset;
    699 	count = uio->uio_resid;
    700 
    701 	if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
    702 		goto out;
    703 
    704 	error = VOP_WRITE(vp, uio, ioflag, cred);
    705 
    706 	if (flags & FOF_UPDATE_OFFSET) {
    707 		if (ioflag & IO_APPEND) {
    708 			/*
    709 			 * SUSv3 describes behaviour for count = 0 as following:
    710 			 * "Before any action ... is taken, and if nbyte is zero
    711 			 * and the file is a regular file, the write() function
    712 			 * ... in the absence of errors ... shall return zero
    713 			 * and have no other results."
    714 			 */
    715 			if (count)
    716 				*offset = uio->uio_offset;
    717 		} else
    718 			*offset += count - uio->uio_resid;
    719 	}
    720 
    721  out:
    722 	VOP_UNLOCK(vp);
    723 	return error;
    724 }
    725 
    726 /*
    727  * File table vnode stat routine.
    728  */
    729 static int
    730 vn_statfile(file_t *fp, struct stat *sb)
    731 {
    732 	struct vnode *vp = fp->f_vnode;
    733 	int error;
    734 
    735 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    736 	error = vn_stat(vp, sb);
    737 	VOP_UNLOCK(vp);
    738 	return error;
    739 }
    740 
    741 int
    742 vn_stat(struct vnode *vp, struct stat *sb)
    743 {
    744 	struct vattr va;
    745 	int error;
    746 	mode_t mode;
    747 
    748 	memset(&va, 0, sizeof(va));
    749 	error = VOP_GETATTR(vp, &va, kauth_cred_get());
    750 	if (error)
    751 		return error;
    752 	/*
    753 	 * Copy from vattr table
    754 	 */
    755 	memset(sb, 0, sizeof(*sb));
    756 	sb->st_dev = va.va_fsid;
    757 	sb->st_ino = va.va_fileid;
    758 	mode = va.va_mode;
    759 	switch (vp->v_type) {
    760 	case VREG:
    761 		mode |= S_IFREG;
    762 		break;
    763 	case VDIR:
    764 		mode |= S_IFDIR;
    765 		break;
    766 	case VBLK:
    767 		mode |= S_IFBLK;
    768 		break;
    769 	case VCHR:
    770 		mode |= S_IFCHR;
    771 		break;
    772 	case VLNK:
    773 		mode |= S_IFLNK;
    774 		break;
    775 	case VSOCK:
    776 		mode |= S_IFSOCK;
    777 		break;
    778 	case VFIFO:
    779 		mode |= S_IFIFO;
    780 		break;
    781 	default:
    782 		return EBADF;
    783 	}
    784 	sb->st_mode = mode;
    785 	sb->st_nlink = va.va_nlink;
    786 	sb->st_uid = va.va_uid;
    787 	sb->st_gid = va.va_gid;
    788 	sb->st_rdev = va.va_rdev;
    789 	sb->st_size = va.va_size;
    790 	sb->st_atimespec = va.va_atime;
    791 	sb->st_mtimespec = va.va_mtime;
    792 	sb->st_ctimespec = va.va_ctime;
    793 	sb->st_birthtimespec = va.va_birthtime;
    794 	sb->st_blksize = va.va_blocksize;
    795 	sb->st_flags = va.va_flags;
    796 	sb->st_gen = 0;
    797 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
    798 	return 0;
    799 }
    800 
    801 /*
    802  * File table vnode fcntl routine.
    803  */
    804 static int
    805 vn_fcntl(file_t *fp, u_int com, void *data)
    806 {
    807 	struct vnode *vp = fp->f_vnode;
    808 	int error;
    809 
    810 	error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
    811 	return error;
    812 }
    813 
    814 /*
    815  * File table vnode ioctl routine.
    816  */
    817 static int
    818 vn_ioctl(file_t *fp, u_long com, void *data)
    819 {
    820 	struct vnode *vp = fp->f_vnode, *ovp;
    821 	struct vattr vattr;
    822 	int error;
    823 
    824 	switch (vp->v_type) {
    825 
    826 	case VREG:
    827 	case VDIR:
    828 		if (com == FIONREAD) {
    829 			vn_lock(vp, LK_SHARED | LK_RETRY);
    830 			error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
    831 			if (error == 0) {
    832 				if (vp->v_type == VDIR)
    833 					mutex_enter(&fp->f_lock);
    834 				*(int *)data = vattr.va_size - fp->f_offset;
    835 				if (vp->v_type == VDIR)
    836 					mutex_exit(&fp->f_lock);
    837 			}
    838 			VOP_UNLOCK(vp);
    839 			if (error)
    840 				return error;
    841 			return 0;
    842 		}
    843 		if ((com == FIONWRITE) || (com == FIONSPACE)) {
    844 			/*
    845 			 * Files don't have send queues, so there never
    846 			 * are any bytes in them, nor is there any
    847 			 * open space in them.
    848 			 */
    849 			*(int *)data = 0;
    850 			return 0;
    851 		}
    852 		if (com == FIOGETBMAP) {
    853 			daddr_t *block;
    854 
    855 			if (*(daddr_t *)data < 0)
    856 				return EINVAL;
    857 			block = (daddr_t *)data;
    858 			vn_lock(vp, LK_SHARED | LK_RETRY);
    859 			error = VOP_BMAP(vp, *block, NULL, block, NULL);
    860 			VOP_UNLOCK(vp);
    861 			return error;
    862 		}
    863 		if (com == OFIOGETBMAP) {
    864 			daddr_t ibn, obn;
    865 
    866 			if (*(int32_t *)data < 0)
    867 				return EINVAL;
    868 			ibn = (daddr_t)*(int32_t *)data;
    869 			vn_lock(vp, LK_SHARED | LK_RETRY);
    870 			error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
    871 			VOP_UNLOCK(vp);
    872 			*(int32_t *)data = (int32_t)obn;
    873 			return error;
    874 		}
    875 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
    876 			return 0;			/* XXX */
    877 		/* FALLTHROUGH */
    878 	case VFIFO:
    879 	case VCHR:
    880 	case VBLK:
    881 		error = VOP_IOCTL(vp, com, data, fp->f_flag,
    882 		    kauth_cred_get());
    883 		if (error == 0 && com == TIOCSCTTY) {
    884 			vref(vp);
    885 			mutex_enter(&proc_lock);
    886 			ovp = curproc->p_session->s_ttyvp;
    887 			curproc->p_session->s_ttyvp = vp;
    888 			mutex_exit(&proc_lock);
    889 			if (ovp != NULL)
    890 				vrele(ovp);
    891 		}
    892 		return error;
    893 
    894 	default:
    895 		return EPASSTHROUGH;
    896 	}
    897 }
    898 
    899 /*
    900  * File table vnode poll routine.
    901  */
    902 static int
    903 vn_poll(file_t *fp, int events)
    904 {
    905 
    906 	return VOP_POLL(fp->f_vnode, events);
    907 }
    908 
    909 /*
    910  * File table vnode kqfilter routine.
    911  */
    912 int
    913 vn_kqfilter(file_t *fp, struct knote *kn)
    914 {
    915 
    916 	return VOP_KQFILTER(fp->f_vnode, kn);
    917 }
    918 
    919 static int
    920 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp,
    921     int *advicep, struct uvm_object **uobjp, int *maxprotp)
    922 {
    923 	struct uvm_object *uobj;
    924 	struct vnode *vp;
    925 	struct vattr va;
    926 	struct lwp *l;
    927 	vm_prot_t maxprot;
    928 	off_t off;
    929 	int error, flags;
    930 	bool needwritemap;
    931 
    932 	l = curlwp;
    933 
    934 	off = *offp;
    935 	flags = *flagsp;
    936 	maxprot = VM_PROT_EXECUTE;
    937 
    938 	KASSERT(size > 0);
    939 
    940 	vp = fp->f_vnode;
    941 	if (vp->v_type != VREG && vp->v_type != VCHR &&
    942 	    vp->v_type != VBLK) {
    943 		/* only REG/CHR/BLK support mmap */
    944 		return ENODEV;
    945 	}
    946 	if (vp->v_type != VCHR && off < 0) {
    947 		return EINVAL;
    948 	}
    949 #if SIZE_MAX > UINT32_MAX	/* XXX -Wtype-limits */
    950 	if (vp->v_type != VCHR && size > __type_max(off_t)) {
    951 		return EOVERFLOW;
    952 	}
    953 #endif
    954 	if (vp->v_type != VCHR && off > __type_max(off_t) - size) {
    955 		/* no offset wrapping */
    956 		return EOVERFLOW;
    957 	}
    958 
    959 	/* special case: catch SunOS style /dev/zero */
    960 	if (vp->v_type == VCHR &&
    961 	    (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    962 		*uobjp = NULL;
    963 		*maxprotp = VM_PROT_ALL;
    964 		return 0;
    965 	}
    966 
    967 	/*
    968 	 * Old programs may not select a specific sharing type, so
    969 	 * default to an appropriate one.
    970 	 *
    971 	 * XXX: how does MAP_ANON fit in the picture?
    972 	 */
    973 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    974 #if defined(DEBUG)
    975 		struct proc *p = l->l_proc;
    976 		printf("WARNING: defaulted mmap() share type to "
    977 		       "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    978 		       "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    979 		       p->p_comm);
    980 #endif
    981 		if (vp->v_type == VCHR)
    982 			flags |= MAP_SHARED;	/* for a device */
    983 		else
    984 			flags |= MAP_PRIVATE;	/* for a file */
    985 	}
    986 
    987 	/*
    988 	 * MAP_PRIVATE device mappings don't make sense (and aren't
    989 	 * supported anyway).  However, some programs rely on this,
    990 	 * so just change it to MAP_SHARED.
    991 	 */
    992 	if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    993 		flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    994 	}
    995 
    996 	/*
    997 	 * now check protection
    998 	 */
    999 
   1000 	/* check read access */
   1001 	if (fp->f_flag & FREAD)
   1002 		maxprot |= VM_PROT_READ;
   1003 	else if (prot & PROT_READ) {
   1004 		return EACCES;
   1005 	}
   1006 
   1007 	/* check write access, shared case first */
   1008 	if (flags & MAP_SHARED) {
   1009 		/*
   1010 		 * if the file is writable, only add PROT_WRITE to
   1011 		 * maxprot if the file is not immutable, append-only.
   1012 		 * otherwise, if we have asked for PROT_WRITE, return
   1013 		 * EPERM.
   1014 		 */
   1015 		if (fp->f_flag & FWRITE) {
   1016 			vn_lock(vp, LK_SHARED | LK_RETRY);
   1017 			error = VOP_GETATTR(vp, &va, l->l_cred);
   1018 			VOP_UNLOCK(vp);
   1019 			if (error) {
   1020 				return error;
   1021 			}
   1022 			if ((va.va_flags &
   1023 			     (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
   1024 				maxprot |= VM_PROT_WRITE;
   1025 			else if (prot & PROT_WRITE) {
   1026 				return EPERM;
   1027 			}
   1028 		} else if (prot & PROT_WRITE) {
   1029 			return EACCES;
   1030 		}
   1031 	} else {
   1032 		/* MAP_PRIVATE mappings can always write to */
   1033 		maxprot |= VM_PROT_WRITE;
   1034 	}
   1035 
   1036 	/*
   1037 	 * Don't allow mmap for EXEC if the file system
   1038 	 * is mounted NOEXEC.
   1039 	 */
   1040 	if ((prot & PROT_EXEC) != 0 &&
   1041 	    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
   1042 		return EACCES;
   1043 	}
   1044 
   1045 	if (vp->v_type != VCHR) {
   1046 		error = VOP_MMAP(vp, prot, curlwp->l_cred);
   1047 		if (error) {
   1048 			return error;
   1049 		}
   1050 		vref(vp);
   1051 		uobj = &vp->v_uobj;
   1052 
   1053 		/*
   1054 		 * If the vnode is being mapped with PROT_EXEC,
   1055 		 * then mark it as text.
   1056 		 */
   1057 		if (prot & PROT_EXEC) {
   1058 			vn_markexec(vp);
   1059 		}
   1060 	} else {
   1061 		int i = maxprot;
   1062 
   1063 		/*
   1064 		 * XXX Some devices don't like to be mapped with
   1065 		 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1066 		 * XXX have a better way of handling this, right now
   1067 		 */
   1068 		do {
   1069 			uobj = udv_attach(vp->v_rdev,
   1070 					  (flags & MAP_SHARED) ? i :
   1071 					  (i & ~VM_PROT_WRITE), off, size);
   1072 			i--;
   1073 		} while ((uobj == NULL) && (i > 0));
   1074 		if (uobj == NULL) {
   1075 			return EINVAL;
   1076 		}
   1077 		*advicep = UVM_ADV_RANDOM;
   1078 	}
   1079 
   1080 	/*
   1081 	 * Set vnode flags to indicate the new kinds of mapping.
   1082 	 * We take the vnode lock in exclusive mode here to serialize
   1083 	 * with direct I/O.
   1084 	 *
   1085 	 * Safe to check for these flag values without a lock, as
   1086 	 * long as a reference to the vnode is held.
   1087 	 */
   1088 	needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
   1089 		(flags & MAP_SHARED) != 0 &&
   1090 		(maxprot & VM_PROT_WRITE) != 0;
   1091 	if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
   1092 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1093 		vp->v_vflag |= VV_MAPPED;
   1094 		if (needwritemap) {
   1095 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1096 			mutex_enter(vp->v_interlock);
   1097 			vp->v_iflag |= VI_WRMAP;
   1098 			mutex_exit(vp->v_interlock);
   1099 			rw_exit(vp->v_uobj.vmobjlock);
   1100 		}
   1101 		VOP_UNLOCK(vp);
   1102 	}
   1103 
   1104 #if NVERIEXEC > 0
   1105 
   1106 	/*
   1107 	 * Check if the file can be executed indirectly.
   1108 	 *
   1109 	 * XXX: This gives false warnings about "Incorrect access type"
   1110 	 * XXX: if the mapping is not executable. Harmless, but will be
   1111 	 * XXX: fixed as part of other changes.
   1112 	 */
   1113 	if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
   1114 			    NULL)) {
   1115 
   1116 		/*
   1117 		 * Don't allow executable mappings if we can't
   1118 		 * indirectly execute the file.
   1119 		 */
   1120 		if (prot & VM_PROT_EXECUTE) {
   1121 			return EPERM;
   1122 		}
   1123 
   1124 		/*
   1125 		 * Strip the executable bit from 'maxprot' to make sure
   1126 		 * it can't be made executable later.
   1127 		 */
   1128 		maxprot &= ~VM_PROT_EXECUTE;
   1129 	}
   1130 #endif /* NVERIEXEC > 0 */
   1131 
   1132 	*uobjp = uobj;
   1133 	*maxprotp = maxprot;
   1134 	*flagsp = flags;
   1135 
   1136 	return 0;
   1137 }
   1138 
   1139 static int
   1140 vn_seek(struct file *fp, off_t delta, int whence, off_t *newoffp,
   1141     int flags)
   1142 {
   1143 	const off_t OFF_MIN = __type_min(off_t);
   1144 	const off_t OFF_MAX = __type_max(off_t);
   1145 	kauth_cred_t cred = fp->f_cred;
   1146 	off_t oldoff, newoff;
   1147 	struct vnode *vp = fp->f_vnode;
   1148 	struct vattr vattr;
   1149 	int error;
   1150 
   1151 	if (vp->v_type == VFIFO)
   1152 		return ESPIPE;
   1153 
   1154 	if (flags & FOF_UPDATE_OFFSET)
   1155 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1156 	else
   1157 		vn_lock(vp, LK_SHARED | LK_RETRY);
   1158 
   1159 	/* Compute the old and new offsets.  */
   1160 	if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
   1161 		mutex_enter(&fp->f_lock);
   1162 	oldoff = fp->f_offset;
   1163 	if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
   1164 		mutex_exit(&fp->f_lock);
   1165 	switch (whence) {
   1166 	case SEEK_CUR:
   1167 		if (delta > 0) {
   1168 			if (oldoff > 0 && delta > OFF_MAX - oldoff) {
   1169 				newoff = OFF_MAX;
   1170 				break;
   1171 			}
   1172 		} else {
   1173 			if (oldoff < 0 && delta < OFF_MIN - oldoff) {
   1174 				newoff = OFF_MIN;
   1175 				break;
   1176 			}
   1177 		}
   1178 		newoff = oldoff + delta;
   1179 		break;
   1180 	case SEEK_END:
   1181 		error = VOP_GETATTR(vp, &vattr, cred);
   1182 		if (error)
   1183 			goto out;
   1184 		if (vattr.va_size > OFF_MAX ||
   1185 		    delta > OFF_MAX - (off_t)vattr.va_size) {
   1186 			newoff = OFF_MAX;
   1187 			break;
   1188 		}
   1189 		newoff = delta + vattr.va_size;
   1190 		break;
   1191 	case SEEK_SET:
   1192 		newoff = delta;
   1193 		break;
   1194 	default:
   1195 		error = EINVAL;
   1196 		goto out;
   1197 	}
   1198 
   1199 	/* Pass the proposed change to the file system to audit.  */
   1200 	error = VOP_SEEK(vp, oldoff, newoff, cred);
   1201 	if (error)
   1202 		goto out;
   1203 
   1204 	/* Success!  */
   1205 	if (newoffp)
   1206 		*newoffp = newoff;
   1207 	if (flags & FOF_UPDATE_OFFSET)
   1208 		fp->f_offset = newoff;
   1209 	error = 0;
   1210 
   1211 out:	VOP_UNLOCK(vp);
   1212 	return error;
   1213 }
   1214 
   1215 /*
   1216  * Check that the vnode is still valid, and if so
   1217  * acquire requested lock.
   1218  */
   1219 int
   1220 vn_lock(struct vnode *vp, int flags)
   1221 {
   1222 	struct lwp *l;
   1223 	int error;
   1224 
   1225 	KASSERT(vrefcnt(vp) > 0);
   1226 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
   1227 	    LK_UPGRADE|LK_DOWNGRADE)) == 0);
   1228 	KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock));
   1229 
   1230 #ifdef DIAGNOSTIC
   1231 	if (wapbl_vphaswapbl(vp))
   1232 		WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
   1233 #endif
   1234 
   1235 	/* Get a more useful report for lockstat. */
   1236 	l = curlwp;
   1237 	KASSERT(l->l_rwcallsite == 0);
   1238 	l->l_rwcallsite = (uintptr_t)__builtin_return_address(0);
   1239 
   1240 	error = VOP_LOCK(vp, flags);
   1241 
   1242 	l->l_rwcallsite = 0;
   1243 
   1244 	switch (flags & (LK_RETRY | LK_NOWAIT)) {
   1245 	case 0:
   1246 		KASSERT(error == 0 || error == ENOENT);
   1247 		break;
   1248 	case LK_RETRY:
   1249 		KASSERT(error == 0);
   1250 		break;
   1251 	case LK_NOWAIT:
   1252 		KASSERT(error == 0 || error == EBUSY || error == ENOENT);
   1253 		break;
   1254 	case LK_RETRY | LK_NOWAIT:
   1255 		KASSERT(error == 0 || error == EBUSY);
   1256 		break;
   1257 	}
   1258 
   1259 	return error;
   1260 }
   1261 
   1262 /*
   1263  * File table vnode close routine.
   1264  */
   1265 static int
   1266 vn_closefile(file_t *fp)
   1267 {
   1268 
   1269 	return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred);
   1270 }
   1271 
   1272 /*
   1273  * Simplified in-kernel wrapper calls for extended attribute access.
   1274  * Both calls pass in a NULL credential, authorizing a "kernel" access.
   1275  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
   1276  */
   1277 int
   1278 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
   1279     const char *attrname, size_t *buflen, void *bf, struct lwp *l)
   1280 {
   1281 	struct uio auio;
   1282 	struct iovec aiov;
   1283 	int error;
   1284 
   1285 	aiov.iov_len = *buflen;
   1286 	aiov.iov_base = bf;
   1287 
   1288 	auio.uio_iov = &aiov;
   1289 	auio.uio_iovcnt = 1;
   1290 	auio.uio_rw = UIO_READ;
   1291 	auio.uio_offset = 0;
   1292 	auio.uio_resid = *buflen;
   1293 	UIO_SETUP_SYSSPACE(&auio);
   1294 
   1295 	if ((ioflg & IO_NODELOCKED) == 0)
   1296 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1297 
   1298 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL,
   1299 	    NOCRED);
   1300 
   1301 	if ((ioflg & IO_NODELOCKED) == 0)
   1302 		VOP_UNLOCK(vp);
   1303 
   1304 	if (error == 0)
   1305 		*buflen = *buflen - auio.uio_resid;
   1306 
   1307 	return error;
   1308 }
   1309 
   1310 /*
   1311  * XXX Failure mode if partially written?
   1312  */
   1313 int
   1314 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
   1315     const char *attrname, size_t buflen, const void *bf, struct lwp *l)
   1316 {
   1317 	struct uio auio;
   1318 	struct iovec aiov;
   1319 	int error;
   1320 
   1321 	aiov.iov_len = buflen;
   1322 	aiov.iov_base = __UNCONST(bf);		/* XXXUNCONST kills const */
   1323 
   1324 	auio.uio_iov = &aiov;
   1325 	auio.uio_iovcnt = 1;
   1326 	auio.uio_rw = UIO_WRITE;
   1327 	auio.uio_offset = 0;
   1328 	auio.uio_resid = buflen;
   1329 	UIO_SETUP_SYSSPACE(&auio);
   1330 
   1331 	if ((ioflg & IO_NODELOCKED) == 0) {
   1332 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1333 	}
   1334 
   1335 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED);
   1336 
   1337 	if ((ioflg & IO_NODELOCKED) == 0) {
   1338 		VOP_UNLOCK(vp);
   1339 	}
   1340 
   1341 	return error;
   1342 }
   1343 
   1344 int
   1345 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
   1346     const char *attrname, struct lwp *l)
   1347 {
   1348 	int error;
   1349 
   1350 	if ((ioflg & IO_NODELOCKED) == 0) {
   1351 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1352 	}
   1353 
   1354 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED);
   1355 	if (error == EOPNOTSUPP)
   1356 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
   1357 		    NOCRED);
   1358 
   1359 	if ((ioflg & IO_NODELOCKED) == 0) {
   1360 		VOP_UNLOCK(vp);
   1361 	}
   1362 
   1363 	return error;
   1364 }
   1365 
   1366 int
   1367 vn_fifo_bypass(void *v)
   1368 {
   1369 	struct vop_generic_args *ap = v;
   1370 
   1371 	return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v);
   1372 }
   1373 
   1374 /*
   1375  * Open block device by device number
   1376  */
   1377 int
   1378 vn_bdev_open(dev_t dev, struct vnode **vpp, struct lwp *l)
   1379 {
   1380 	int     error;
   1381 
   1382 	if ((error = bdevvp(dev, vpp)) != 0)
   1383 		return error;
   1384 
   1385 	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
   1386 	if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
   1387 		vput(*vpp);
   1388 		return error;
   1389 	}
   1390 	mutex_enter((*vpp)->v_interlock);
   1391 	(*vpp)->v_writecount++;
   1392 	mutex_exit((*vpp)->v_interlock);
   1393 	VOP_UNLOCK(*vpp);
   1394 
   1395 	return 0;
   1396 }
   1397 
   1398 /*
   1399  * Lookup the provided name in the filesystem.  If the file exists,
   1400  * is a valid block device, and isn't being used by anyone else,
   1401  * set *vpp to the file's vnode.
   1402  */
   1403 int
   1404 vn_bdev_openpath(struct pathbuf *pb, struct vnode **vpp, struct lwp *l)
   1405 {
   1406 	struct vnode *vp;
   1407 	dev_t dev;
   1408 	enum vtype vt;
   1409 	int     error;
   1410 
   1411 	error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL);
   1412 	if (error != 0)
   1413 		return error;
   1414 
   1415 	vt = vp->v_type;
   1416 	if (vt == VBLK)
   1417 		dev = vp->v_rdev;
   1418 
   1419 	VOP_UNLOCK(vp);
   1420 	(void) vn_close(vp, FREAD | FWRITE, l->l_cred);
   1421 
   1422 	if (vt != VBLK)
   1423 		return ENOTBLK;
   1424 
   1425 	return vn_bdev_open(dev, vpp, l);
   1426 }
   1427 
   1428 static long
   1429 vn_knote_to_interest(const struct knote *kn)
   1430 {
   1431 	switch (kn->kn_filter) {
   1432 	case EVFILT_READ:
   1433 		/*
   1434 		 * Writing to the file or changing its attributes can
   1435 		 * set the file size, which impacts the readability
   1436 		 * filter.
   1437 		 *
   1438 		 * (No need to set NOTE_EXTEND here; it's only ever
   1439 		 * send with other hints; see vnode_if.c.)
   1440 		 */
   1441 		return NOTE_WRITE | NOTE_ATTRIB;
   1442 
   1443 	case EVFILT_VNODE:
   1444 		return kn->kn_sfflags;
   1445 
   1446 	case EVFILT_WRITE:
   1447 	default:
   1448 		return 0;
   1449 	}
   1450 }
   1451 
   1452 void
   1453 vn_knote_attach(struct vnode *vp, struct knote *kn)
   1454 {
   1455 	struct vnode_klist *vk = vp->v_klist;
   1456 	long interest = 0;
   1457 
   1458 	/*
   1459 	 * In the case of layered / stacked file systems, knotes
   1460 	 * should only ever be associated with the base vnode.
   1461 	 */
   1462 	KASSERT(kn->kn_hook == vp);
   1463 	KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
   1464 
   1465 	/*
   1466 	 * We maintain a bitmask of the kevents that there is interest in,
   1467 	 * to minimize the impact of having watchers.  It's silly to have
   1468 	 * to traverse vn_klist every time a read or write happens simply
   1469 	 * because there is someone interested in knowing when the file
   1470 	 * is deleted, for example.
   1471 	 */
   1472 
   1473 	mutex_enter(vp->v_interlock);
   1474 	SLIST_INSERT_HEAD(&vk->vk_klist, kn, kn_selnext);
   1475 	SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) {
   1476 		interest |= vn_knote_to_interest(kn);
   1477 	}
   1478 	vk->vk_interest = interest;
   1479 	mutex_exit(vp->v_interlock);
   1480 }
   1481 
   1482 void
   1483 vn_knote_detach(struct vnode *vp, struct knote *kn)
   1484 {
   1485 	struct vnode_klist *vk = vp->v_klist;
   1486 	long interest = 0;
   1487 
   1488 	/* See above. */
   1489 	KASSERT(kn->kn_hook == vp);
   1490 	KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
   1491 
   1492 	/*
   1493 	 * We special case removing the head of the list, because:
   1494 	 *
   1495 	 * 1. It's extremely likely that we're detaching the only
   1496 	 *    knote.
   1497 	 *
   1498 	 * 2. We're already traversing the whole list, so we don't
   1499 	 *    want to use the generic SLIST_REMOVE() which would
   1500 	 *    traverse it *again*.
   1501 	 */
   1502 
   1503 	mutex_enter(vp->v_interlock);
   1504 	if (__predict_true(kn == SLIST_FIRST(&vk->vk_klist))) {
   1505 		SLIST_REMOVE_HEAD(&vk->vk_klist, kn_selnext);
   1506 		SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) {
   1507 			interest |= vn_knote_to_interest(kn);
   1508 		}
   1509 		vk->vk_interest = interest;
   1510 	} else {
   1511 		struct knote *thiskn, *nextkn, *prevkn = NULL;
   1512 
   1513 		SLIST_FOREACH_SAFE(thiskn, &vk->vk_klist, kn_selnext, nextkn) {
   1514 			if (thiskn == kn) {
   1515 				KASSERT(kn != NULL);
   1516 				KASSERT(prevkn != NULL);
   1517 				SLIST_REMOVE_AFTER(prevkn, kn_selnext);
   1518 				kn = NULL;
   1519 			} else {
   1520 				interest |= vn_knote_to_interest(thiskn);
   1521 				prevkn = thiskn;
   1522 			}
   1523 		}
   1524 		vk->vk_interest = interest;
   1525 	}
   1526 	mutex_exit(vp->v_interlock);
   1527 }
   1528