Home | History | Annotate | Line # | Download | only in kern
vfs_vnops.c revision 1.238
      1 /*	$NetBSD: vfs_vnops.c,v 1.238 2023/04/22 11:22:36 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2009 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1982, 1986, 1989, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  * (c) UNIX System Laboratories, Inc.
     36  * All or some portions of this file are derived from material licensed
     37  * to the University of California by American Telephone and Telegraph
     38  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     39  * the permission of UNIX System Laboratories, Inc.
     40  *
     41  * Redistribution and use in source and binary forms, with or without
     42  * modification, are permitted provided that the following conditions
     43  * are met:
     44  * 1. Redistributions of source code must retain the above copyright
     45  *    notice, this list of conditions and the following disclaimer.
     46  * 2. Redistributions in binary form must reproduce the above copyright
     47  *    notice, this list of conditions and the following disclaimer in the
     48  *    documentation and/or other materials provided with the distribution.
     49  * 3. Neither the name of the University nor the names of its contributors
     50  *    may be used to endorse or promote products derived from this software
     51  *    without specific prior written permission.
     52  *
     53  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     54  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     55  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     56  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     57  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     58  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     59  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     60  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     61  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     62  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     63  * SUCH DAMAGE.
     64  *
     65  *	@(#)vfs_vnops.c	8.14 (Berkeley) 6/15/95
     66  */
     67 
     68 #include <sys/cdefs.h>
     69 __KERNEL_RCSID(0, "$NetBSD: vfs_vnops.c,v 1.238 2023/04/22 11:22:36 riastradh Exp $");
     70 
     71 #include "veriexec.h"
     72 
     73 #include <sys/param.h>
     74 #include <sys/systm.h>
     75 #include <sys/kernel.h>
     76 #include <sys/file.h>
     77 #include <sys/stat.h>
     78 #include <sys/buf.h>
     79 #include <sys/proc.h>
     80 #include <sys/mount.h>
     81 #include <sys/namei.h>
     82 #include <sys/vnode_impl.h>
     83 #include <sys/ioctl.h>
     84 #include <sys/tty.h>
     85 #include <sys/poll.h>
     86 #include <sys/kauth.h>
     87 #include <sys/syslog.h>
     88 #include <sys/fstrans.h>
     89 #include <sys/atomic.h>
     90 #include <sys/filedesc.h>
     91 #include <sys/wapbl.h>
     92 #include <sys/mman.h>
     93 
     94 #include <miscfs/specfs/specdev.h>
     95 #include <miscfs/fifofs/fifo.h>
     96 
     97 #include <uvm/uvm_extern.h>
     98 #include <uvm/uvm_readahead.h>
     99 #include <uvm/uvm_device.h>
    100 
    101 #ifdef UNION
    102 #include <fs/union/union.h>
    103 #endif
    104 
    105 #ifndef COMPAT_ZERODEV
    106 #define COMPAT_ZERODEV(dev)	(0)
    107 #endif
    108 
    109 int (*vn_union_readdir_hook)(struct vnode **, struct file *, struct lwp *);
    110 
    111 #include <sys/verified_exec.h>
    112 
    113 static int vn_read(file_t *fp, off_t *offset, struct uio *uio,
    114     kauth_cred_t cred, int flags);
    115 static int vn_write(file_t *fp, off_t *offset, struct uio *uio,
    116     kauth_cred_t cred, int flags);
    117 static int vn_closefile(file_t *fp);
    118 static int vn_poll(file_t *fp, int events);
    119 static int vn_fcntl(file_t *fp, u_int com, void *data);
    120 static int vn_statfile(file_t *fp, struct stat *sb);
    121 static int vn_ioctl(file_t *fp, u_long com, void *data);
    122 static int vn_mmap(struct file *, off_t *, size_t, int, int *, int *,
    123     struct uvm_object **, int *);
    124 static int vn_seek(struct file *, off_t, int, off_t *, int);
    125 
    126 const struct fileops vnops = {
    127 	.fo_name = "vn",
    128 	.fo_read = vn_read,
    129 	.fo_write = vn_write,
    130 	.fo_ioctl = vn_ioctl,
    131 	.fo_fcntl = vn_fcntl,
    132 	.fo_poll = vn_poll,
    133 	.fo_stat = vn_statfile,
    134 	.fo_close = vn_closefile,
    135 	.fo_kqfilter = vn_kqfilter,
    136 	.fo_restart = fnullop_restart,
    137 	.fo_mmap = vn_mmap,
    138 	.fo_seek = vn_seek,
    139 };
    140 
    141 /*
    142  * Common code for vnode open operations.
    143  * Check permissions, and call the VOP_OPEN or VOP_CREATE routine.
    144  *
    145  * at_dvp is the directory for openat(), if any.
    146  * pb is the path.
    147  * nmode is additional namei flags, restricted to TRYEMULROOT and NOCHROOT.
    148  * fmode is the open flags, converted from O_* to F*
    149  * cmode is the creation file permissions.
    150  *
    151  * XXX shouldn't cmode be mode_t?
    152  *
    153  * On success produces either a locked vnode in *ret_vp, or NULL in
    154  * *ret_vp and a file descriptor number in *ret_fd.
    155  *
    156  * The caller may pass NULL for ret_fd (and ret_domove), in which case
    157  * EOPNOTSUPP will be produced in the cases that would otherwise return
    158  * a file descriptor.
    159  *
    160  * Note that callers that want no-follow behavior should pass
    161  * O_NOFOLLOW in fmode. Neither FOLLOW nor NOFOLLOW in nmode is
    162  * honored.
    163  */
    164 int
    165 vn_open(struct vnode *at_dvp, struct pathbuf *pb,
    166 	int nmode, int fmode, int cmode,
    167 	struct vnode **ret_vp, bool *ret_domove, int *ret_fd)
    168 {
    169 	struct nameidata nd;
    170 	struct vnode *vp = NULL;
    171 	struct lwp *l = curlwp;
    172 	kauth_cred_t cred = l->l_cred;
    173 	struct vattr va;
    174 	int error;
    175 	const char *pathstring;
    176 
    177 	KASSERT((nmode & (TRYEMULROOT | NOCHROOT)) == nmode);
    178 
    179 	KASSERT(ret_vp != NULL);
    180 	KASSERT((ret_domove == NULL) == (ret_fd == NULL));
    181 
    182 	if ((fmode & (O_CREAT | O_DIRECTORY)) == (O_CREAT | O_DIRECTORY))
    183 		return EINVAL;
    184 
    185 	NDINIT(&nd, LOOKUP, nmode, pb);
    186 	if (at_dvp != NULL)
    187 		NDAT(&nd, at_dvp);
    188 
    189 	nd.ni_cnd.cn_flags &= TRYEMULROOT | NOCHROOT;
    190 
    191 	if (fmode & O_CREAT) {
    192 		nd.ni_cnd.cn_nameiop = CREATE;
    193 		nd.ni_cnd.cn_flags |= LOCKPARENT | LOCKLEAF;
    194 		if ((fmode & O_EXCL) == 0 &&
    195 		    ((fmode & O_NOFOLLOW) == 0))
    196 			nd.ni_cnd.cn_flags |= FOLLOW;
    197 		if ((fmode & O_EXCL) == 0)
    198 			nd.ni_cnd.cn_flags |= NONEXCLHACK;
    199 	} else {
    200 		nd.ni_cnd.cn_nameiop = LOOKUP;
    201 		nd.ni_cnd.cn_flags |= LOCKLEAF;
    202 		if ((fmode & O_NOFOLLOW) == 0)
    203 			nd.ni_cnd.cn_flags |= FOLLOW;
    204 	}
    205 
    206 	pathstring = pathbuf_stringcopy_get(nd.ni_pathbuf);
    207 	if (pathstring == NULL) {
    208 		return ENOMEM;
    209 	}
    210 
    211 	/*
    212 	 * When this "interface" was exposed to do_open() it used
    213 	 * to initialize l_dupfd to -newfd-1 (thus passing in the
    214 	 * new file handle number to use)... but nothing in the
    215 	 * kernel uses that value. So just send 0.
    216 	 */
    217 	l->l_dupfd = 0;
    218 
    219 	error = namei(&nd);
    220 	if (error)
    221 		goto out;
    222 
    223 	vp = nd.ni_vp;
    224 
    225 #if NVERIEXEC > 0
    226 	error = veriexec_openchk(l, nd.ni_vp, pathstring, fmode);
    227 	if (error) {
    228 		/* We have to release the locks ourselves */
    229 		/*
    230 		 * 20210604 dholland passing NONEXCLHACK means we can
    231 		 * get ni_dvp == NULL back if ni_vp exists, and we should
    232 		 * treat that like the non-O_CREAT case.
    233 		 */
    234 		if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
    235 			if (vp == NULL) {
    236 				vput(nd.ni_dvp);
    237 			} else {
    238 				VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
    239 				if (nd.ni_dvp == nd.ni_vp)
    240 					vrele(nd.ni_dvp);
    241 				else
    242 					vput(nd.ni_dvp);
    243 				nd.ni_dvp = NULL;
    244 				vput(vp);
    245 				vp = NULL;
    246 			}
    247 		} else {
    248 			vput(vp);
    249 			vp = NULL;
    250 		}
    251 		goto out;
    252 	}
    253 #endif /* NVERIEXEC > 0 */
    254 
    255 	/*
    256 	 * 20210604 dholland ditto
    257 	 */
    258 	if ((fmode & O_CREAT) != 0 && nd.ni_dvp != NULL) {
    259 		if (nd.ni_vp == NULL) {
    260 			vattr_null(&va);
    261 			va.va_type = VREG;
    262 			va.va_mode = cmode;
    263 			if (fmode & O_EXCL)
    264 				 va.va_vaflags |= VA_EXCLUSIVE;
    265 			error = VOP_CREATE(nd.ni_dvp, &nd.ni_vp,
    266 					   &nd.ni_cnd, &va);
    267 			if (error) {
    268 				vput(nd.ni_dvp);
    269 				goto out;
    270 			}
    271 			fmode &= ~O_TRUNC;
    272 			vp = nd.ni_vp;
    273 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    274 			vput(nd.ni_dvp);
    275 		} else {
    276 			VOP_ABORTOP(nd.ni_dvp, &nd.ni_cnd);
    277 			if (nd.ni_dvp == nd.ni_vp)
    278 				vrele(nd.ni_dvp);
    279 			else
    280 				vput(nd.ni_dvp);
    281 			nd.ni_dvp = NULL;
    282 			vp = nd.ni_vp;
    283 			if (fmode & O_EXCL) {
    284 				error = EEXIST;
    285 				goto bad;
    286 			}
    287 			fmode &= ~O_CREAT;
    288 		}
    289 	} else if ((fmode & O_CREAT) != 0) {
    290 		/*
    291 		 * 20210606 dholland passing NONEXCLHACK means this
    292 		 * case exists; it is the same as the following one
    293 		 * but also needs to do things in the second (exists)
    294 		 * half of the following block. (Besides handle
    295 		 * ni_dvp, anyway.)
    296 		 */
    297 		vp = nd.ni_vp;
    298 		KASSERT((fmode & O_EXCL) == 0);
    299 		fmode &= ~O_CREAT;
    300 	} else {
    301 		vp = nd.ni_vp;
    302 	}
    303 	if (vp->v_type == VSOCK) {
    304 		error = EOPNOTSUPP;
    305 		goto bad;
    306 	}
    307 	if (nd.ni_vp->v_type == VLNK) {
    308 		error = EFTYPE;
    309 		goto bad;
    310 	}
    311 
    312 	if ((fmode & O_CREAT) == 0) {
    313 		error = vn_openchk(vp, cred, fmode);
    314 		if (error != 0)
    315 			goto bad;
    316 	}
    317 
    318 	if (fmode & O_TRUNC) {
    319 		vattr_null(&va);
    320 		va.va_size = 0;
    321 		error = VOP_SETATTR(vp, &va, cred);
    322 		if (error != 0)
    323 			goto bad;
    324 	}
    325 	if ((error = VOP_OPEN(vp, fmode, cred)) != 0)
    326 		goto bad;
    327 	if (fmode & FWRITE) {
    328 		mutex_enter(vp->v_interlock);
    329 		vp->v_writecount++;
    330 		mutex_exit(vp->v_interlock);
    331 	}
    332 
    333 bad:
    334 	if (error) {
    335 		vput(vp);
    336 		vp = NULL;
    337 	}
    338 out:
    339 	pathbuf_stringcopy_put(nd.ni_pathbuf, pathstring);
    340 
    341 	switch (error) {
    342 	case EDUPFD:
    343 	case EMOVEFD:
    344 		/* if the caller isn't prepared to handle fds, fail for them */
    345 		if (ret_fd == NULL) {
    346 			error = EOPNOTSUPP;
    347 			break;
    348 		}
    349 		*ret_vp = NULL;
    350 		*ret_domove = error == EMOVEFD;
    351 		*ret_fd = l->l_dupfd;
    352 		error = 0;
    353 		break;
    354 	case 0:
    355 		KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
    356 		*ret_vp = vp;
    357 		break;
    358 	}
    359 	l->l_dupfd = 0;
    360 	return error;
    361 }
    362 
    363 /*
    364  * Check for write permissions on the specified vnode.
    365  * Prototype text segments cannot be written.
    366  */
    367 int
    368 vn_writechk(struct vnode *vp)
    369 {
    370 
    371 	/*
    372 	 * If the vnode is in use as a process's text,
    373 	 * we can't allow writing.
    374 	 */
    375 	if (vp->v_iflag & VI_TEXT)
    376 		return ETXTBSY;
    377 	return 0;
    378 }
    379 
    380 int
    381 vn_openchk(struct vnode *vp, kauth_cred_t cred, int fflags)
    382 {
    383 	int permbits = 0;
    384 	int error;
    385 
    386 	if (vp->v_type == VNON || vp->v_type == VBAD)
    387 		return ENXIO;
    388 
    389 	if ((fflags & O_DIRECTORY) != 0 && vp->v_type != VDIR)
    390 		return ENOTDIR;
    391 
    392 	if ((fflags & O_REGULAR) != 0 && vp->v_type != VREG)
    393 		return EFTYPE;
    394 
    395 	if ((fflags & FREAD) != 0) {
    396 		permbits = VREAD;
    397 	}
    398 	if ((fflags & FEXEC) != 0) {
    399 		permbits |= VEXEC;
    400 	}
    401 	if ((fflags & (FWRITE | O_TRUNC)) != 0) {
    402 		permbits |= VWRITE;
    403 		if (vp->v_type == VDIR) {
    404 			error = EISDIR;
    405 			goto bad;
    406 		}
    407 		error = vn_writechk(vp);
    408 		if (error != 0)
    409 			goto bad;
    410 	}
    411 	error = VOP_ACCESS(vp, permbits, cred);
    412 bad:
    413 	return error;
    414 }
    415 
    416 /*
    417  * Mark a vnode as having executable mappings.
    418  */
    419 void
    420 vn_markexec(struct vnode *vp)
    421 {
    422 
    423 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
    424 		/* Safe unlocked, as long as caller holds a reference. */
    425 		return;
    426 	}
    427 
    428 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    429 	mutex_enter(vp->v_interlock);
    430 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
    431 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
    432 		vp->v_iflag |= VI_EXECMAP;
    433 	}
    434 	mutex_exit(vp->v_interlock);
    435 	rw_exit(vp->v_uobj.vmobjlock);
    436 }
    437 
    438 /*
    439  * Mark a vnode as being the text of a process.
    440  * Fail if the vnode is currently writable.
    441  */
    442 int
    443 vn_marktext(struct vnode *vp)
    444 {
    445 
    446 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP)) == (VI_TEXT|VI_EXECMAP)) {
    447 		/* Safe unlocked, as long as caller holds a reference. */
    448 		return 0;
    449 	}
    450 
    451 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    452 	mutex_enter(vp->v_interlock);
    453 	if (vp->v_writecount != 0) {
    454 		KASSERT((vp->v_iflag & VI_TEXT) == 0);
    455 		mutex_exit(vp->v_interlock);
    456 		rw_exit(vp->v_uobj.vmobjlock);
    457 		return ETXTBSY;
    458 	}
    459 	if ((vp->v_iflag & VI_EXECMAP) == 0) {
    460 		cpu_count(CPU_COUNT_EXECPAGES, vp->v_uobj.uo_npages);
    461 	}
    462 	vp->v_iflag |= (VI_TEXT | VI_EXECMAP);
    463 	mutex_exit(vp->v_interlock);
    464 	rw_exit(vp->v_uobj.vmobjlock);
    465 	return 0;
    466 }
    467 
    468 /*
    469  * Vnode close call
    470  *
    471  * Note: takes an unlocked vnode, while VOP_CLOSE takes a locked node.
    472  */
    473 int
    474 vn_close(struct vnode *vp, int flags, kauth_cred_t cred)
    475 {
    476 	int error;
    477 
    478 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    479 	if (flags & FWRITE) {
    480 		mutex_enter(vp->v_interlock);
    481 		KASSERT(vp->v_writecount > 0);
    482 		vp->v_writecount--;
    483 		mutex_exit(vp->v_interlock);
    484 	}
    485 	error = VOP_CLOSE(vp, flags, cred);
    486 	vput(vp);
    487 	return error;
    488 }
    489 
    490 static int
    491 enforce_rlimit_fsize(struct vnode *vp, struct uio *uio, int ioflag)
    492 {
    493 	struct lwp *l = curlwp;
    494 	off_t testoff;
    495 
    496 	if (uio->uio_rw != UIO_WRITE || vp->v_type != VREG)
    497 		return 0;
    498 
    499 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
    500 	if (ioflag & IO_APPEND)
    501 		testoff = vp->v_size;
    502 	else
    503 		testoff = uio->uio_offset;
    504 
    505 	if (testoff + uio->uio_resid >
    506 	    l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    507 		mutex_enter(&proc_lock);
    508 		psignal(l->l_proc, SIGXFSZ);
    509 		mutex_exit(&proc_lock);
    510 		return EFBIG;
    511 	}
    512 
    513 	return 0;
    514 }
    515 
    516 /*
    517  * Package up an I/O request on a vnode into a uio and do it.
    518  */
    519 int
    520 vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset,
    521     enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid,
    522     struct lwp *l)
    523 {
    524 	struct uio auio;
    525 	struct iovec aiov;
    526 	int error;
    527 
    528 	if ((ioflg & IO_NODELOCKED) == 0) {
    529 		if (rw == UIO_READ) {
    530 			vn_lock(vp, LK_SHARED | LK_RETRY);
    531 		} else /* UIO_WRITE */ {
    532 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    533 		}
    534 	}
    535 	auio.uio_iov = &aiov;
    536 	auio.uio_iovcnt = 1;
    537 	aiov.iov_base = base;
    538 	aiov.iov_len = len;
    539 	auio.uio_resid = len;
    540 	auio.uio_offset = offset;
    541 	auio.uio_rw = rw;
    542 	if (segflg == UIO_SYSSPACE) {
    543 		UIO_SETUP_SYSSPACE(&auio);
    544 	} else {
    545 		auio.uio_vmspace = l->l_proc->p_vmspace;
    546 	}
    547 
    548 	if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0)
    549 		goto out;
    550 
    551 	if (rw == UIO_READ) {
    552 		error = VOP_READ(vp, &auio, ioflg, cred);
    553 	} else {
    554 		error = VOP_WRITE(vp, &auio, ioflg, cred);
    555 	}
    556 
    557 	if (aresid)
    558 		*aresid = auio.uio_resid;
    559 	else
    560 		if (auio.uio_resid && error == 0)
    561 			error = EIO;
    562 
    563  out:
    564 	if ((ioflg & IO_NODELOCKED) == 0) {
    565 		VOP_UNLOCK(vp);
    566 	}
    567 	return error;
    568 }
    569 
    570 int
    571 vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done,
    572     struct lwp *l, off_t **cookies, int *ncookies)
    573 {
    574 	struct vnode *vp = fp->f_vnode;
    575 	struct iovec aiov;
    576 	struct uio auio;
    577 	int error, eofflag;
    578 
    579 	/* Limit the size on any kernel buffers used by VOP_READDIR */
    580 	count = uimin(MAXBSIZE, count);
    581 
    582 unionread:
    583 	if (vp->v_type != VDIR)
    584 		return EINVAL;
    585 	aiov.iov_base = bf;
    586 	aiov.iov_len = count;
    587 	auio.uio_iov = &aiov;
    588 	auio.uio_iovcnt = 1;
    589 	auio.uio_rw = UIO_READ;
    590 	if (segflg == UIO_SYSSPACE) {
    591 		UIO_SETUP_SYSSPACE(&auio);
    592 	} else {
    593 		KASSERT(l == curlwp);
    594 		auio.uio_vmspace = l->l_proc->p_vmspace;
    595 	}
    596 	auio.uio_resid = count;
    597 	vn_lock(vp, LK_SHARED | LK_RETRY);
    598 	mutex_enter(&fp->f_lock);
    599 	auio.uio_offset = fp->f_offset;
    600 	mutex_exit(&fp->f_lock);
    601 	error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies,
    602 	    ncookies);
    603 	mutex_enter(&fp->f_lock);
    604 	fp->f_offset = auio.uio_offset;
    605 	mutex_exit(&fp->f_lock);
    606 	VOP_UNLOCK(vp);
    607 	if (error)
    608 		return error;
    609 
    610 	if (count == auio.uio_resid && vn_union_readdir_hook) {
    611 		struct vnode *ovp = vp;
    612 
    613 		error = (*vn_union_readdir_hook)(&vp, fp, l);
    614 		if (error)
    615 			return error;
    616 		if (vp != ovp)
    617 			goto unionread;
    618 	}
    619 
    620 	if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) &&
    621 	    (vp->v_mount->mnt_flag & MNT_UNION)) {
    622 		struct vnode *tvp = vp;
    623 		vp = vp->v_mount->mnt_vnodecovered;
    624 		vref(vp);
    625 		mutex_enter(&fp->f_lock);
    626 		fp->f_vnode = vp;
    627 		fp->f_offset = 0;
    628 		mutex_exit(&fp->f_lock);
    629 		vrele(tvp);
    630 		goto unionread;
    631 	}
    632 	*done = count - auio.uio_resid;
    633 	return error;
    634 }
    635 
    636 /*
    637  * File table vnode read routine.
    638  */
    639 static int
    640 vn_read(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    641     int flags)
    642 {
    643 	struct vnode *vp = fp->f_vnode;
    644 	int error, ioflag, fflag;
    645 	size_t count;
    646 
    647 	ioflag = IO_ADV_ENCODE(fp->f_advice);
    648 	fflag = fp->f_flag;
    649 	if (fflag & FNONBLOCK)
    650 		ioflag |= IO_NDELAY;
    651 	if ((fflag & (FFSYNC | FRSYNC)) == (FFSYNC | FRSYNC))
    652 		ioflag |= IO_SYNC;
    653 	if (fflag & FALTIO)
    654 		ioflag |= IO_ALTSEMANTICS;
    655 	if (fflag & FDIRECT)
    656 		ioflag |= IO_DIRECT;
    657 	if (offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) != 0)
    658 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    659 	else
    660 		vn_lock(vp, LK_SHARED | LK_RETRY);
    661 	if (__predict_false(vp->v_type == VDIR) &&
    662 	    offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) == 0)
    663 		mutex_enter(&fp->f_lock);
    664 	uio->uio_offset = *offset;
    665 	if (__predict_false(vp->v_type == VDIR) &&
    666 	    offset == &fp->f_offset && (flags & FOF_UPDATE_OFFSET) == 0)
    667 		mutex_enter(&fp->f_lock);
    668 	count = uio->uio_resid;
    669 	error = VOP_READ(vp, uio, ioflag, cred);
    670 	if (flags & FOF_UPDATE_OFFSET)
    671 		*offset += count - uio->uio_resid;
    672 	VOP_UNLOCK(vp);
    673 	return error;
    674 }
    675 
    676 /*
    677  * File table vnode write routine.
    678  */
    679 static int
    680 vn_write(file_t *fp, off_t *offset, struct uio *uio, kauth_cred_t cred,
    681     int flags)
    682 {
    683 	struct vnode *vp = fp->f_vnode;
    684 	int error, ioflag, fflag;
    685 	size_t count;
    686 
    687 	ioflag = IO_ADV_ENCODE(fp->f_advice) | IO_UNIT;
    688 	fflag = fp->f_flag;
    689 	if (vp->v_type == VREG && (fflag & O_APPEND))
    690 		ioflag |= IO_APPEND;
    691 	if (fflag & FNONBLOCK)
    692 		ioflag |= IO_NDELAY;
    693 	if (fflag & FFSYNC ||
    694 	    (vp->v_mount && (vp->v_mount->mnt_flag & MNT_SYNCHRONOUS)))
    695 		ioflag |= IO_SYNC;
    696 	else if (fflag & FDSYNC)
    697 		ioflag |= IO_DSYNC;
    698 	if (fflag & FALTIO)
    699 		ioflag |= IO_ALTSEMANTICS;
    700 	if (fflag & FDIRECT)
    701 		ioflag |= IO_DIRECT;
    702 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    703 	uio->uio_offset = *offset;
    704 	count = uio->uio_resid;
    705 
    706 	if ((error = enforce_rlimit_fsize(vp, uio, ioflag)) != 0)
    707 		goto out;
    708 
    709 	error = VOP_WRITE(vp, uio, ioflag, cred);
    710 
    711 	if (flags & FOF_UPDATE_OFFSET) {
    712 		if (ioflag & IO_APPEND) {
    713 			/*
    714 			 * SUSv3 describes behaviour for count = 0 as following:
    715 			 * "Before any action ... is taken, and if nbyte is zero
    716 			 * and the file is a regular file, the write() function
    717 			 * ... in the absence of errors ... shall return zero
    718 			 * and have no other results."
    719 			 */
    720 			if (count)
    721 				*offset = uio->uio_offset;
    722 		} else
    723 			*offset += count - uio->uio_resid;
    724 	}
    725 
    726  out:
    727 	VOP_UNLOCK(vp);
    728 	return error;
    729 }
    730 
    731 /*
    732  * File table vnode stat routine.
    733  */
    734 static int
    735 vn_statfile(file_t *fp, struct stat *sb)
    736 {
    737 	struct vnode *vp = fp->f_vnode;
    738 	int error;
    739 
    740 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    741 	error = vn_stat(vp, sb);
    742 	VOP_UNLOCK(vp);
    743 	return error;
    744 }
    745 
    746 int
    747 vn_stat(struct vnode *vp, struct stat *sb)
    748 {
    749 	struct vattr va;
    750 	int error;
    751 	mode_t mode;
    752 
    753 	memset(&va, 0, sizeof(va));
    754 	error = VOP_GETATTR(vp, &va, kauth_cred_get());
    755 	if (error)
    756 		return error;
    757 	/*
    758 	 * Copy from vattr table
    759 	 */
    760 	memset(sb, 0, sizeof(*sb));
    761 	sb->st_dev = va.va_fsid;
    762 	sb->st_ino = va.va_fileid;
    763 	mode = va.va_mode;
    764 	switch (vp->v_type) {
    765 	case VREG:
    766 		mode |= S_IFREG;
    767 		break;
    768 	case VDIR:
    769 		mode |= S_IFDIR;
    770 		break;
    771 	case VBLK:
    772 		mode |= S_IFBLK;
    773 		break;
    774 	case VCHR:
    775 		mode |= S_IFCHR;
    776 		break;
    777 	case VLNK:
    778 		mode |= S_IFLNK;
    779 		break;
    780 	case VSOCK:
    781 		mode |= S_IFSOCK;
    782 		break;
    783 	case VFIFO:
    784 		mode |= S_IFIFO;
    785 		break;
    786 	default:
    787 		return EBADF;
    788 	}
    789 	sb->st_mode = mode;
    790 	sb->st_nlink = va.va_nlink;
    791 	sb->st_uid = va.va_uid;
    792 	sb->st_gid = va.va_gid;
    793 	sb->st_rdev = va.va_rdev;
    794 	sb->st_size = va.va_size;
    795 	sb->st_atimespec = va.va_atime;
    796 	sb->st_mtimespec = va.va_mtime;
    797 	sb->st_ctimespec = va.va_ctime;
    798 	sb->st_birthtimespec = va.va_birthtime;
    799 	sb->st_blksize = va.va_blocksize;
    800 	sb->st_flags = va.va_flags;
    801 	sb->st_gen = 0;
    802 	sb->st_blocks = va.va_bytes / S_BLKSIZE;
    803 	return 0;
    804 }
    805 
    806 /*
    807  * File table vnode fcntl routine.
    808  */
    809 static int
    810 vn_fcntl(file_t *fp, u_int com, void *data)
    811 {
    812 	struct vnode *vp = fp->f_vnode;
    813 	int error;
    814 
    815 	error = VOP_FCNTL(vp, com, data, fp->f_flag, kauth_cred_get());
    816 	return error;
    817 }
    818 
    819 /*
    820  * File table vnode ioctl routine.
    821  */
    822 static int
    823 vn_ioctl(file_t *fp, u_long com, void *data)
    824 {
    825 	struct vnode *vp = fp->f_vnode, *ovp;
    826 	struct vattr vattr;
    827 	int error;
    828 
    829 	switch (vp->v_type) {
    830 
    831 	case VREG:
    832 	case VDIR:
    833 		if (com == FIONREAD) {
    834 			vn_lock(vp, LK_SHARED | LK_RETRY);
    835 			error = VOP_GETATTR(vp, &vattr, kauth_cred_get());
    836 			if (error == 0) {
    837 				if (vp->v_type == VDIR)
    838 					mutex_enter(&fp->f_lock);
    839 				*(int *)data = vattr.va_size - fp->f_offset;
    840 				if (vp->v_type == VDIR)
    841 					mutex_exit(&fp->f_lock);
    842 			}
    843 			VOP_UNLOCK(vp);
    844 			if (error)
    845 				return error;
    846 			return 0;
    847 		}
    848 		if ((com == FIONWRITE) || (com == FIONSPACE)) {
    849 			/*
    850 			 * Files don't have send queues, so there never
    851 			 * are any bytes in them, nor is there any
    852 			 * open space in them.
    853 			 */
    854 			*(int *)data = 0;
    855 			return 0;
    856 		}
    857 		if (com == FIOGETBMAP) {
    858 			daddr_t *block;
    859 
    860 			if (*(daddr_t *)data < 0)
    861 				return EINVAL;
    862 			block = (daddr_t *)data;
    863 			vn_lock(vp, LK_SHARED | LK_RETRY);
    864 			error = VOP_BMAP(vp, *block, NULL, block, NULL);
    865 			VOP_UNLOCK(vp);
    866 			return error;
    867 		}
    868 		if (com == OFIOGETBMAP) {
    869 			daddr_t ibn, obn;
    870 
    871 			if (*(int32_t *)data < 0)
    872 				return EINVAL;
    873 			ibn = (daddr_t)*(int32_t *)data;
    874 			vn_lock(vp, LK_SHARED | LK_RETRY);
    875 			error = VOP_BMAP(vp, ibn, NULL, &obn, NULL);
    876 			VOP_UNLOCK(vp);
    877 			*(int32_t *)data = (int32_t)obn;
    878 			return error;
    879 		}
    880 		if (com == FIONBIO || com == FIOASYNC)	/* XXX */
    881 			return 0;			/* XXX */
    882 		/* FALLTHROUGH */
    883 	case VFIFO:
    884 	case VCHR:
    885 	case VBLK:
    886 		error = VOP_IOCTL(vp, com, data, fp->f_flag,
    887 		    kauth_cred_get());
    888 		if (error == 0 && com == TIOCSCTTY) {
    889 			vref(vp);
    890 			mutex_enter(&proc_lock);
    891 			ovp = curproc->p_session->s_ttyvp;
    892 			curproc->p_session->s_ttyvp = vp;
    893 			mutex_exit(&proc_lock);
    894 			if (ovp != NULL)
    895 				vrele(ovp);
    896 		}
    897 		return error;
    898 
    899 	default:
    900 		return EPASSTHROUGH;
    901 	}
    902 }
    903 
    904 /*
    905  * File table vnode poll routine.
    906  */
    907 static int
    908 vn_poll(file_t *fp, int events)
    909 {
    910 
    911 	return VOP_POLL(fp->f_vnode, events);
    912 }
    913 
    914 /*
    915  * File table vnode kqfilter routine.
    916  */
    917 int
    918 vn_kqfilter(file_t *fp, struct knote *kn)
    919 {
    920 
    921 	return VOP_KQFILTER(fp->f_vnode, kn);
    922 }
    923 
    924 static int
    925 vn_mmap(struct file *fp, off_t *offp, size_t size, int prot, int *flagsp,
    926     int *advicep, struct uvm_object **uobjp, int *maxprotp)
    927 {
    928 	struct uvm_object *uobj;
    929 	struct vnode *vp;
    930 	struct vattr va;
    931 	struct lwp *l;
    932 	vm_prot_t maxprot;
    933 	off_t off;
    934 	int error, flags;
    935 	bool needwritemap;
    936 
    937 	l = curlwp;
    938 
    939 	off = *offp;
    940 	flags = *flagsp;
    941 	maxprot = VM_PROT_EXECUTE;
    942 
    943 	KASSERT(size > 0);
    944 
    945 	vp = fp->f_vnode;
    946 	if (vp->v_type != VREG && vp->v_type != VCHR &&
    947 	    vp->v_type != VBLK) {
    948 		/* only REG/CHR/BLK support mmap */
    949 		return ENODEV;
    950 	}
    951 	if (vp->v_type != VCHR && off < 0) {
    952 		return EINVAL;
    953 	}
    954 #if SIZE_MAX > UINT32_MAX	/* XXX -Wtype-limits */
    955 	if (vp->v_type != VCHR && size > __type_max(off_t)) {
    956 		return EOVERFLOW;
    957 	}
    958 #endif
    959 	if (vp->v_type != VCHR && off > __type_max(off_t) - size) {
    960 		/* no offset wrapping */
    961 		return EOVERFLOW;
    962 	}
    963 
    964 	/* special case: catch SunOS style /dev/zero */
    965 	if (vp->v_type == VCHR &&
    966 	    (vp->v_rdev == zerodev || COMPAT_ZERODEV(vp->v_rdev))) {
    967 		*uobjp = NULL;
    968 		*maxprotp = VM_PROT_ALL;
    969 		return 0;
    970 	}
    971 
    972 	/*
    973 	 * Old programs may not select a specific sharing type, so
    974 	 * default to an appropriate one.
    975 	 *
    976 	 * XXX: how does MAP_ANON fit in the picture?
    977 	 */
    978 	if ((flags & (MAP_SHARED|MAP_PRIVATE)) == 0) {
    979 #if defined(DEBUG)
    980 		struct proc *p = l->l_proc;
    981 		printf("WARNING: defaulted mmap() share type to "
    982 		       "%s (pid %d command %s)\n", vp->v_type == VCHR ?
    983 		       "MAP_SHARED" : "MAP_PRIVATE", p->p_pid,
    984 		       p->p_comm);
    985 #endif
    986 		if (vp->v_type == VCHR)
    987 			flags |= MAP_SHARED;	/* for a device */
    988 		else
    989 			flags |= MAP_PRIVATE;	/* for a file */
    990 	}
    991 
    992 	/*
    993 	 * MAP_PRIVATE device mappings don't make sense (and aren't
    994 	 * supported anyway).  However, some programs rely on this,
    995 	 * so just change it to MAP_SHARED.
    996 	 */
    997 	if (vp->v_type == VCHR && (flags & MAP_PRIVATE) != 0) {
    998 		flags = (flags & ~MAP_PRIVATE) | MAP_SHARED;
    999 	}
   1000 
   1001 	/*
   1002 	 * now check protection
   1003 	 */
   1004 
   1005 	/* check read access */
   1006 	if (fp->f_flag & FREAD)
   1007 		maxprot |= VM_PROT_READ;
   1008 	else if (prot & PROT_READ) {
   1009 		return EACCES;
   1010 	}
   1011 
   1012 	/* check write access, shared case first */
   1013 	if (flags & MAP_SHARED) {
   1014 		/*
   1015 		 * if the file is writable, only add PROT_WRITE to
   1016 		 * maxprot if the file is not immutable, append-only.
   1017 		 * otherwise, if we have asked for PROT_WRITE, return
   1018 		 * EPERM.
   1019 		 */
   1020 		if (fp->f_flag & FWRITE) {
   1021 			vn_lock(vp, LK_SHARED | LK_RETRY);
   1022 			error = VOP_GETATTR(vp, &va, l->l_cred);
   1023 			VOP_UNLOCK(vp);
   1024 			if (error) {
   1025 				return error;
   1026 			}
   1027 			if ((va.va_flags &
   1028 			     (SF_SNAPSHOT|IMMUTABLE|APPEND)) == 0)
   1029 				maxprot |= VM_PROT_WRITE;
   1030 			else if (prot & PROT_WRITE) {
   1031 				return EPERM;
   1032 			}
   1033 		} else if (prot & PROT_WRITE) {
   1034 			return EACCES;
   1035 		}
   1036 	} else {
   1037 		/* MAP_PRIVATE mappings can always write to */
   1038 		maxprot |= VM_PROT_WRITE;
   1039 	}
   1040 
   1041 	/*
   1042 	 * Don't allow mmap for EXEC if the file system
   1043 	 * is mounted NOEXEC.
   1044 	 */
   1045 	if ((prot & PROT_EXEC) != 0 &&
   1046 	    (vp->v_mount->mnt_flag & MNT_NOEXEC) != 0) {
   1047 		return EACCES;
   1048 	}
   1049 
   1050 	if (vp->v_type != VCHR) {
   1051 		error = VOP_MMAP(vp, prot, curlwp->l_cred);
   1052 		if (error) {
   1053 			return error;
   1054 		}
   1055 		vref(vp);
   1056 		uobj = &vp->v_uobj;
   1057 
   1058 		/*
   1059 		 * If the vnode is being mapped with PROT_EXEC,
   1060 		 * then mark it as text.
   1061 		 */
   1062 		if (prot & PROT_EXEC) {
   1063 			vn_markexec(vp);
   1064 		}
   1065 	} else {
   1066 		int i = maxprot;
   1067 
   1068 		/*
   1069 		 * XXX Some devices don't like to be mapped with
   1070 		 * XXX PROT_EXEC or PROT_WRITE, but we don't really
   1071 		 * XXX have a better way of handling this, right now
   1072 		 */
   1073 		do {
   1074 			uobj = udv_attach(vp->v_rdev,
   1075 					  (flags & MAP_SHARED) ? i :
   1076 					  (i & ~VM_PROT_WRITE), off, size);
   1077 			i--;
   1078 		} while ((uobj == NULL) && (i > 0));
   1079 		if (uobj == NULL) {
   1080 			return EINVAL;
   1081 		}
   1082 		*advicep = UVM_ADV_RANDOM;
   1083 	}
   1084 
   1085 	/*
   1086 	 * Set vnode flags to indicate the new kinds of mapping.
   1087 	 * We take the vnode lock in exclusive mode here to serialize
   1088 	 * with direct I/O.
   1089 	 *
   1090 	 * Safe to check for these flag values without a lock, as
   1091 	 * long as a reference to the vnode is held.
   1092 	 */
   1093 	needwritemap = (vp->v_iflag & VI_WRMAP) == 0 &&
   1094 		(flags & MAP_SHARED) != 0 &&
   1095 		(maxprot & VM_PROT_WRITE) != 0;
   1096 	if ((vp->v_vflag & VV_MAPPED) == 0 || needwritemap) {
   1097 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1098 		vp->v_vflag |= VV_MAPPED;
   1099 		if (needwritemap) {
   1100 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1101 			mutex_enter(vp->v_interlock);
   1102 			vp->v_iflag |= VI_WRMAP;
   1103 			mutex_exit(vp->v_interlock);
   1104 			rw_exit(vp->v_uobj.vmobjlock);
   1105 		}
   1106 		VOP_UNLOCK(vp);
   1107 	}
   1108 
   1109 #if NVERIEXEC > 0
   1110 
   1111 	/*
   1112 	 * Check if the file can be executed indirectly.
   1113 	 *
   1114 	 * XXX: This gives false warnings about "Incorrect access type"
   1115 	 * XXX: if the mapping is not executable. Harmless, but will be
   1116 	 * XXX: fixed as part of other changes.
   1117 	 */
   1118 	if (veriexec_verify(l, vp, "(mmap)", VERIEXEC_INDIRECT,
   1119 			    NULL)) {
   1120 
   1121 		/*
   1122 		 * Don't allow executable mappings if we can't
   1123 		 * indirectly execute the file.
   1124 		 */
   1125 		if (prot & VM_PROT_EXECUTE) {
   1126 			return EPERM;
   1127 		}
   1128 
   1129 		/*
   1130 		 * Strip the executable bit from 'maxprot' to make sure
   1131 		 * it can't be made executable later.
   1132 		 */
   1133 		maxprot &= ~VM_PROT_EXECUTE;
   1134 	}
   1135 #endif /* NVERIEXEC > 0 */
   1136 
   1137 	*uobjp = uobj;
   1138 	*maxprotp = maxprot;
   1139 	*flagsp = flags;
   1140 
   1141 	return 0;
   1142 }
   1143 
   1144 static int
   1145 vn_seek(struct file *fp, off_t delta, int whence, off_t *newoffp,
   1146     int flags)
   1147 {
   1148 	const off_t OFF_MIN = __type_min(off_t);
   1149 	const off_t OFF_MAX = __type_max(off_t);
   1150 	kauth_cred_t cred = fp->f_cred;
   1151 	off_t oldoff, newoff;
   1152 	struct vnode *vp = fp->f_vnode;
   1153 	struct vattr vattr;
   1154 	int error;
   1155 
   1156 	if (vp->v_type == VFIFO)
   1157 		return ESPIPE;
   1158 
   1159 	if (flags & FOF_UPDATE_OFFSET)
   1160 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1161 	else
   1162 		vn_lock(vp, LK_SHARED | LK_RETRY);
   1163 
   1164 	/* Compute the old and new offsets.  */
   1165 	if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
   1166 		mutex_enter(&fp->f_lock);
   1167 	oldoff = fp->f_offset;
   1168 	if (vp->v_type == VDIR && (flags & FOF_UPDATE_OFFSET) == 0)
   1169 		mutex_exit(&fp->f_lock);
   1170 	switch (whence) {
   1171 	case SEEK_CUR:
   1172 		if (delta > 0) {
   1173 			if (oldoff > 0 && delta > OFF_MAX - oldoff) {
   1174 				newoff = OFF_MAX;
   1175 				break;
   1176 			}
   1177 		} else {
   1178 			if (oldoff < 0 && delta < OFF_MIN - oldoff) {
   1179 				newoff = OFF_MIN;
   1180 				break;
   1181 			}
   1182 		}
   1183 		newoff = oldoff + delta;
   1184 		break;
   1185 	case SEEK_END:
   1186 		error = VOP_GETATTR(vp, &vattr, cred);
   1187 		if (error)
   1188 			goto out;
   1189 		if (vattr.va_size > OFF_MAX ||
   1190 		    delta > OFF_MAX - (off_t)vattr.va_size) {
   1191 			newoff = OFF_MAX;
   1192 			break;
   1193 		}
   1194 		newoff = delta + vattr.va_size;
   1195 		break;
   1196 	case SEEK_SET:
   1197 		newoff = delta;
   1198 		break;
   1199 	default:
   1200 		error = EINVAL;
   1201 		goto out;
   1202 	}
   1203 
   1204 	/* Pass the proposed change to the file system to audit.  */
   1205 	error = VOP_SEEK(vp, oldoff, newoff, cred);
   1206 	if (error)
   1207 		goto out;
   1208 
   1209 	/* Success!  */
   1210 	if (newoffp)
   1211 		*newoffp = newoff;
   1212 	if (flags & FOF_UPDATE_OFFSET)
   1213 		fp->f_offset = newoff;
   1214 	error = 0;
   1215 
   1216 out:	VOP_UNLOCK(vp);
   1217 	return error;
   1218 }
   1219 
   1220 /*
   1221  * Check that the vnode is still valid, and if so
   1222  * acquire requested lock.
   1223  */
   1224 int
   1225 vn_lock(struct vnode *vp, int flags)
   1226 {
   1227 	struct lwp *l;
   1228 	int error;
   1229 
   1230 	KASSERT(vrefcnt(vp) > 0);
   1231 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT|LK_RETRY|
   1232 	    LK_UPGRADE|LK_DOWNGRADE)) == 0);
   1233 	KASSERT((flags & LK_NOWAIT) != 0 || !mutex_owned(vp->v_interlock));
   1234 
   1235 #ifdef DIAGNOSTIC
   1236 	if (wapbl_vphaswapbl(vp))
   1237 		WAPBL_JUNLOCK_ASSERT(wapbl_vptomp(vp));
   1238 #endif
   1239 
   1240 	/* Get a more useful report for lockstat. */
   1241 	l = curlwp;
   1242 	KASSERT(l->l_rwcallsite == 0);
   1243 	l->l_rwcallsite = (uintptr_t)__builtin_return_address(0);
   1244 
   1245 	error = VOP_LOCK(vp, flags);
   1246 
   1247 	l->l_rwcallsite = 0;
   1248 
   1249 	switch (flags & (LK_RETRY | LK_NOWAIT)) {
   1250 	case 0:
   1251 		KASSERT(error == 0 || error == ENOENT);
   1252 		break;
   1253 	case LK_RETRY:
   1254 		KASSERT(error == 0);
   1255 		break;
   1256 	case LK_NOWAIT:
   1257 		KASSERT(error == 0 || error == EBUSY || error == ENOENT);
   1258 		break;
   1259 	case LK_RETRY | LK_NOWAIT:
   1260 		KASSERT(error == 0 || error == EBUSY);
   1261 		break;
   1262 	}
   1263 
   1264 	return error;
   1265 }
   1266 
   1267 /*
   1268  * File table vnode close routine.
   1269  */
   1270 static int
   1271 vn_closefile(file_t *fp)
   1272 {
   1273 
   1274 	return vn_close(fp->f_vnode, fp->f_flag, fp->f_cred);
   1275 }
   1276 
   1277 /*
   1278  * Simplified in-kernel wrapper calls for extended attribute access.
   1279  * Both calls pass in a NULL credential, authorizing a "kernel" access.
   1280  * Set IO_NODELOCKED in ioflg if the vnode is already locked.
   1281  */
   1282 int
   1283 vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace,
   1284     const char *attrname, size_t *buflen, void *bf, struct lwp *l)
   1285 {
   1286 	struct uio auio;
   1287 	struct iovec aiov;
   1288 	int error;
   1289 
   1290 	aiov.iov_len = *buflen;
   1291 	aiov.iov_base = bf;
   1292 
   1293 	auio.uio_iov = &aiov;
   1294 	auio.uio_iovcnt = 1;
   1295 	auio.uio_rw = UIO_READ;
   1296 	auio.uio_offset = 0;
   1297 	auio.uio_resid = *buflen;
   1298 	UIO_SETUP_SYSSPACE(&auio);
   1299 
   1300 	if ((ioflg & IO_NODELOCKED) == 0)
   1301 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1302 
   1303 	error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL,
   1304 	    NOCRED);
   1305 
   1306 	if ((ioflg & IO_NODELOCKED) == 0)
   1307 		VOP_UNLOCK(vp);
   1308 
   1309 	if (error == 0)
   1310 		*buflen = *buflen - auio.uio_resid;
   1311 
   1312 	return error;
   1313 }
   1314 
   1315 /*
   1316  * XXX Failure mode if partially written?
   1317  */
   1318 int
   1319 vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace,
   1320     const char *attrname, size_t buflen, const void *bf, struct lwp *l)
   1321 {
   1322 	struct uio auio;
   1323 	struct iovec aiov;
   1324 	int error;
   1325 
   1326 	aiov.iov_len = buflen;
   1327 	aiov.iov_base = __UNCONST(bf);		/* XXXUNCONST kills const */
   1328 
   1329 	auio.uio_iov = &aiov;
   1330 	auio.uio_iovcnt = 1;
   1331 	auio.uio_rw = UIO_WRITE;
   1332 	auio.uio_offset = 0;
   1333 	auio.uio_resid = buflen;
   1334 	UIO_SETUP_SYSSPACE(&auio);
   1335 
   1336 	if ((ioflg & IO_NODELOCKED) == 0) {
   1337 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1338 	}
   1339 
   1340 	error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NOCRED);
   1341 
   1342 	if ((ioflg & IO_NODELOCKED) == 0) {
   1343 		VOP_UNLOCK(vp);
   1344 	}
   1345 
   1346 	return error;
   1347 }
   1348 
   1349 int
   1350 vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace,
   1351     const char *attrname, struct lwp *l)
   1352 {
   1353 	int error;
   1354 
   1355 	if ((ioflg & IO_NODELOCKED) == 0) {
   1356 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1357 	}
   1358 
   1359 	error = VOP_DELETEEXTATTR(vp, attrnamespace, attrname, NOCRED);
   1360 	if (error == EOPNOTSUPP)
   1361 		error = VOP_SETEXTATTR(vp, attrnamespace, attrname, NULL,
   1362 		    NOCRED);
   1363 
   1364 	if ((ioflg & IO_NODELOCKED) == 0) {
   1365 		VOP_UNLOCK(vp);
   1366 	}
   1367 
   1368 	return error;
   1369 }
   1370 
   1371 int
   1372 vn_fifo_bypass(void *v)
   1373 {
   1374 	struct vop_generic_args *ap = v;
   1375 
   1376 	return VOCALL(fifo_vnodeop_p, ap->a_desc->vdesc_offset, v);
   1377 }
   1378 
   1379 /*
   1380  * Open block device by device number
   1381  */
   1382 int
   1383 vn_bdev_open(dev_t dev, struct vnode **vpp, struct lwp *l)
   1384 {
   1385 	int     error;
   1386 
   1387 	if ((error = bdevvp(dev, vpp)) != 0)
   1388 		return error;
   1389 
   1390 	vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY);
   1391 	if ((error = VOP_OPEN(*vpp, FREAD | FWRITE, l->l_cred)) != 0) {
   1392 		vput(*vpp);
   1393 		return error;
   1394 	}
   1395 	mutex_enter((*vpp)->v_interlock);
   1396 	(*vpp)->v_writecount++;
   1397 	mutex_exit((*vpp)->v_interlock);
   1398 	VOP_UNLOCK(*vpp);
   1399 
   1400 	return 0;
   1401 }
   1402 
   1403 /*
   1404  * Lookup the provided name in the filesystem.  If the file exists,
   1405  * is a valid block device, and isn't being used by anyone else,
   1406  * set *vpp to the file's vnode.
   1407  */
   1408 int
   1409 vn_bdev_openpath(struct pathbuf *pb, struct vnode **vpp, struct lwp *l)
   1410 {
   1411 	struct vnode *vp;
   1412 	dev_t dev;
   1413 	enum vtype vt;
   1414 	int     error;
   1415 
   1416 	error = vn_open(NULL, pb, 0, FREAD | FWRITE, 0, &vp, NULL, NULL);
   1417 	if (error != 0)
   1418 		return error;
   1419 
   1420 	dev = vp->v_rdev;
   1421 	vt = vp->v_type;
   1422 
   1423 	VOP_UNLOCK(vp);
   1424 	(void) vn_close(vp, FREAD | FWRITE, l->l_cred);
   1425 
   1426 	if (vt != VBLK)
   1427 		return ENOTBLK;
   1428 
   1429 	return vn_bdev_open(dev, vpp, l);
   1430 }
   1431 
   1432 static long
   1433 vn_knote_to_interest(const struct knote *kn)
   1434 {
   1435 	switch (kn->kn_filter) {
   1436 	case EVFILT_READ:
   1437 		/*
   1438 		 * Writing to the file or changing its attributes can
   1439 		 * set the file size, which impacts the readability
   1440 		 * filter.
   1441 		 *
   1442 		 * (No need to set NOTE_EXTEND here; it's only ever
   1443 		 * send with other hints; see vnode_if.c.)
   1444 		 */
   1445 		return NOTE_WRITE | NOTE_ATTRIB;
   1446 
   1447 	case EVFILT_VNODE:
   1448 		return kn->kn_sfflags;
   1449 
   1450 	case EVFILT_WRITE:
   1451 	default:
   1452 		return 0;
   1453 	}
   1454 }
   1455 
   1456 void
   1457 vn_knote_attach(struct vnode *vp, struct knote *kn)
   1458 {
   1459 	struct vnode_klist *vk = vp->v_klist;
   1460 	long interest = 0;
   1461 
   1462 	/*
   1463 	 * In the case of layered / stacked file systems, knotes
   1464 	 * should only ever be associated with the base vnode.
   1465 	 */
   1466 	KASSERT(kn->kn_hook == vp);
   1467 	KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
   1468 
   1469 	/*
   1470 	 * We maintain a bitmask of the kevents that there is interest in,
   1471 	 * to minimize the impact of having watchers.  It's silly to have
   1472 	 * to traverse vn_klist every time a read or write happens simply
   1473 	 * because there is someone interested in knowing when the file
   1474 	 * is deleted, for example.
   1475 	 */
   1476 
   1477 	mutex_enter(vp->v_interlock);
   1478 	SLIST_INSERT_HEAD(&vk->vk_klist, kn, kn_selnext);
   1479 	SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) {
   1480 		interest |= vn_knote_to_interest(kn);
   1481 	}
   1482 	vk->vk_interest = interest;
   1483 	mutex_exit(vp->v_interlock);
   1484 }
   1485 
   1486 void
   1487 vn_knote_detach(struct vnode *vp, struct knote *kn)
   1488 {
   1489 	struct vnode_klist *vk = vp->v_klist;
   1490 	long interest = 0;
   1491 
   1492 	/* See above. */
   1493 	KASSERT(kn->kn_hook == vp);
   1494 	KASSERT(vp->v_klist == &VNODE_TO_VIMPL(vp)->vi_klist);
   1495 
   1496 	/*
   1497 	 * We special case removing the head of the list, because:
   1498 	 *
   1499 	 * 1. It's extremely likely that we're detaching the only
   1500 	 *    knote.
   1501 	 *
   1502 	 * 2. We're already traversing the whole list, so we don't
   1503 	 *    want to use the generic SLIST_REMOVE() which would
   1504 	 *    traverse it *again*.
   1505 	 */
   1506 
   1507 	mutex_enter(vp->v_interlock);
   1508 	if (__predict_true(kn == SLIST_FIRST(&vk->vk_klist))) {
   1509 		SLIST_REMOVE_HEAD(&vk->vk_klist, kn_selnext);
   1510 		SLIST_FOREACH(kn, &vk->vk_klist, kn_selnext) {
   1511 			interest |= vn_knote_to_interest(kn);
   1512 		}
   1513 		vk->vk_interest = interest;
   1514 	} else {
   1515 		struct knote *thiskn, *nextkn, *prevkn = NULL;
   1516 
   1517 		SLIST_FOREACH_SAFE(thiskn, &vk->vk_klist, kn_selnext, nextkn) {
   1518 			if (thiskn == kn) {
   1519 				KASSERT(kn != NULL);
   1520 				KASSERT(prevkn != NULL);
   1521 				SLIST_REMOVE_AFTER(prevkn, kn_selnext);
   1522 				kn = NULL;
   1523 			} else {
   1524 				interest |= vn_knote_to_interest(thiskn);
   1525 				prevkn = thiskn;
   1526 			}
   1527 		}
   1528 		vk->vk_interest = interest;
   1529 	}
   1530 	mutex_exit(vp->v_interlock);
   1531 }
   1532