Home | History | Annotate | Line # | Download | only in nfs
      1 /*	$NetBSD: nfs_bio.c,v 1.202 2024/02/13 21:40:02 andvar Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.202 2024/02/13 21:40:02 andvar Exp $");
     39 
     40 #ifdef _KERNEL_OPT
     41 #include "opt_nfs.h"
     42 #include "opt_ddb.h"
     43 #endif
     44 
     45 #include <sys/param.h>
     46 #include <sys/systm.h>
     47 #include <sys/resourcevar.h>
     48 #include <sys/signalvar.h>
     49 #include <sys/proc.h>
     50 #include <sys/buf.h>
     51 #include <sys/vnode.h>
     52 #include <sys/mount.h>
     53 #include <sys/kernel.h>
     54 #include <sys/namei.h>
     55 #include <sys/dirent.h>
     56 #include <sys/kauth.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_extern.h>
     60 
     61 #include <nfs/rpcv2.h>
     62 #include <nfs/nfsproto.h>
     63 #include <nfs/nfs.h>
     64 #include <nfs/nfsmount.h>
     65 #include <nfs/nfsnode.h>
     66 #include <nfs/nfs_var.h>
     67 
     68 extern int nfs_numasync;
     69 extern int nfs_commitsize;
     70 extern struct nfsstats nfsstats;
     71 
     72 static int nfs_doio_read(struct buf *, struct uio *);
     73 static int nfs_doio_write(struct buf *, struct uio *);
     74 static int nfs_doio_phys(struct buf *, struct uio *);
     75 
     76 /*
     77  * Vnode op for read using bio
     78  * Any similarity to readip() is purely coincidental
     79  */
     80 int
     81 nfs_bioread(struct vnode *vp, struct uio *uio, int ioflag,
     82 	    kauth_cred_t cred, int cflag)
     83 {
     84 	struct nfsnode *np = VTONFS(vp);
     85 	struct buf *bp = NULL, *rabp;
     86 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     87 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     88 	void *baddr;
     89 	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
     90 	int enough = 0;
     91 	struct dirent *dp, *pdp, *edp, *ep;
     92 	off_t curoff = 0;
     93 	int advice;
     94 	struct lwp *l = curlwp;
     95 
     96 #ifdef DIAGNOSTIC
     97 	if (uio->uio_rw != UIO_READ)
     98 		panic("nfs_read mode");
     99 #endif
    100 	if (uio->uio_resid == 0)
    101 		return (0);
    102 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    103 		return (EINVAL);
    104 #ifndef NFS_V2_ONLY
    105 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    106 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    107 		(void)nfs_fsinfo(nmp, vp, cred, l);
    108 #endif
    109 	if (vp->v_type != VDIR &&
    110 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    111 		return (EFBIG);
    112 
    113 	/*
    114 	 * For nfs, cache consistency can only be maintained approximately.
    115 	 * Although RFC1094 does not specify the criteria, the following is
    116 	 * believed to be compatible with the reference port.
    117 	 *
    118 	 * If the file's modify time on the server has changed since the
    119 	 * last read rpc or you have written to the file,
    120 	 * you may have lost data cache consistency with the
    121 	 * server, so flush all of the file's data out of the cache.
    122 	 * Then force a getattr rpc to ensure that you have up to date
    123 	 * attributes.
    124 	 * NB: This implies that cache data can be read when up to
    125 	 * nfs_attrtimeo seconds out of date. If you find that you need current
    126 	 * attributes this could be forced by setting n_attrstamp to 0 before
    127 	 * the VOP_GETATTR() call.
    128 	 */
    129 
    130 	if (vp->v_type != VLNK) {
    131 		error = nfs_flushstalebuf(vp, cred, l,
    132 		    NFS_FLUSHSTALEBUF_MYWRITE);
    133 		if (error)
    134 			return error;
    135 	}
    136 
    137 	do {
    138 	    /*
    139 	     * Don't cache symlinks.
    140 	     */
    141 	    if ((vp->v_vflag & VV_ROOT) && vp->v_type == VLNK) {
    142 		return (nfs_readlinkrpc(vp, uio, cred));
    143 	    }
    144 	    baddr = (void *)0;
    145 	    switch (vp->v_type) {
    146 	    case VREG:
    147 		nfsstats.biocache_reads++;
    148 
    149 		advice = IO_ADV_DECODE(ioflag);
    150 		error = 0;
    151 		while (uio->uio_resid > 0) {
    152 			vsize_t bytelen;
    153 
    154 			nfs_delayedtruncate(vp);
    155 			if (np->n_size <= uio->uio_offset) {
    156 				break;
    157 			}
    158 			bytelen =
    159 			    MIN(np->n_size - uio->uio_offset, uio->uio_resid);
    160 			error = ubc_uiomove(&vp->v_uobj, uio, bytelen, advice,
    161 			    UBC_READ | UBC_PARTIALOK | UBC_VNODE_FLAGS(vp));
    162 			if (error) {
    163 				/*
    164 				 * XXXkludge
    165 				 * the file has been truncated on the server.
    166 				 * there isn't much we can do.
    167 				 */
    168 				if (uio->uio_offset >= np->n_size) {
    169 					/* end of file */
    170 					error = 0;
    171 				} else {
    172 					break;
    173 				}
    174 			}
    175 		}
    176 		break;
    177 
    178 	    case VLNK:
    179 		nfsstats.biocache_readlinks++;
    180 		bp = nfs_getcacheblk(vp, (daddr_t)0, MAXPATHLEN, l);
    181 		if (!bp)
    182 			return (EINTR);
    183 		if ((bp->b_oflags & BO_DONE) == 0) {
    184 			bp->b_flags |= B_READ;
    185 			error = nfs_doio(bp);
    186 			if (error) {
    187 				brelse(bp, 0);
    188 				return (error);
    189 			}
    190 		}
    191 		n = MIN(uio->uio_resid, MAXPATHLEN - bp->b_resid);
    192 		got_buf = 1;
    193 		on = 0;
    194 		break;
    195 	    case VDIR:
    196 diragain:
    197 		nfsstats.biocache_readdirs++;
    198 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    199 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    200 		if (!ndp) {
    201 			/*
    202 			 * We've been handed a cookie that is not
    203 			 * in the cache. If we're not translating
    204 			 * 32 <-> 64, it may be a value that was
    205 			 * flushed out of the cache because it grew
    206 			 * too big. Let the server judge if it's
    207 			 * valid or not. In the translation case,
    208 			 * we have no way of validating this value,
    209 			 * so punt.
    210 			 */
    211 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    212 				return (EINVAL);
    213 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    214 				uio->uio_offset, 0, 0);
    215 		}
    216 
    217 		if (NFS_EOFVALID(np) &&
    218 		    ndp->dc_cookie == np->n_direofoffset) {
    219 			nfs_putdircache(np, ndp);
    220 			nfsstats.direofcache_hits++;
    221 			return (0);
    222 		}
    223 
    224 		bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
    225 		if (!bp)
    226 		    return (EINTR);
    227 		if ((bp->b_oflags & BO_DONE) == 0) {
    228 		    bp->b_flags |= B_READ;
    229 		    bp->b_dcookie = ndp->dc_blkcookie;
    230 		    error = nfs_doio(bp);
    231 		    if (error) {
    232 			/*
    233 			 * Yuck! The directory has been modified on the
    234 			 * server. Punt and let the userland code
    235 			 * deal with it.
    236 			 */
    237 			nfs_putdircache(np, ndp);
    238 			brelse(bp, 0);
    239 			/*
    240 			 * nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
    241 			 */
    242 			if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
    243 			    nfs_invaldircache(vp, 0);
    244 			    nfs_vinvalbuf(vp, 0, cred, l, 1);
    245 			}
    246 			return (error);
    247 		    }
    248 		}
    249 
    250 		/*
    251 		 * Just return if we hit EOF right away with this
    252 		 * block. Always check here, because direofoffset
    253 		 * may have been set by an nfsiod since the last
    254 		 * check.
    255 		 *
    256 		 * also, empty block implies EOF.
    257 		 */
    258 
    259 		if (bp->b_bcount == bp->b_resid ||
    260 		    (NFS_EOFVALID(np) &&
    261 		    ndp->dc_blkcookie == np->n_direofoffset)) {
    262 			KASSERT(bp->b_bcount != bp->b_resid ||
    263 			    ndp->dc_blkcookie == bp->b_dcookie);
    264 			nfs_putdircache(np, ndp);
    265 			brelse(bp, BC_NOCACHE);
    266 			return 0;
    267 		}
    268 
    269 		/*
    270 		 * Find the entry we were looking for in the block.
    271 		 */
    272 
    273 		en = ndp->dc_entry;
    274 
    275 		pdp = dp = (struct dirent *)bp->b_data;
    276 		edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
    277 		    bp->b_resid);
    278 		enn = 0;
    279 		while (enn < en && dp < edp) {
    280 			pdp = dp;
    281 			dp = _DIRENT_NEXT(dp);
    282 			enn++;
    283 		}
    284 
    285 		/*
    286 		 * If the entry number was bigger than the number of
    287 		 * entries in the block, or the cookie of the previous
    288 		 * entry doesn't match, the directory cache is
    289 		 * stale. Flush it and try again (i.e. go to
    290 		 * the server).
    291 		 */
    292 		if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
    293 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    294 #ifdef DEBUG
    295 		    	printf("invalid cache: %p %p %p off %jx %jx\n",
    296 				pdp, dp, edp,
    297 				(uintmax_t)uio->uio_offset,
    298 				(uintmax_t)NFS_GETCOOKIE(pdp));
    299 #endif
    300 			nfs_putdircache(np, ndp);
    301 			brelse(bp, 0);
    302 			nfs_invaldircache(vp, 0);
    303 			nfs_vinvalbuf(vp, 0, cred, l, 0);
    304 			goto diragain;
    305 		}
    306 
    307 		on = (char *)dp - (char *)bp->b_data;
    308 
    309 		/*
    310 		 * Cache all entries that may be exported to the
    311 		 * user, as they may be thrown back at us. The
    312 		 * NFSBIO_CACHECOOKIES flag indicates that all
    313 		 * entries are being 'exported', so cache them all.
    314 		 */
    315 
    316 		if (en == 0 && pdp == dp) {
    317 			dp = _DIRENT_NEXT(dp);
    318 			enn++;
    319 		}
    320 
    321 		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
    322 			n = uio->uio_resid;
    323 			enough = 1;
    324 		} else
    325 			n = bp->b_bcount - bp->b_resid - on;
    326 
    327 		ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
    328 
    329 		/*
    330 		 * Find last complete entry to copy, caching entries
    331 		 * (if requested) as we go.
    332 		 */
    333 
    334 		while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
    335 			if (cflag & NFSBIO_CACHECOOKIES) {
    336 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    337 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    338 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    339 					NFS_STASHCOOKIE32(pdp,
    340 					    nndp->dc_cookie32);
    341 				}
    342 				nfs_putdircache(np, nndp);
    343 			}
    344 			pdp = dp;
    345 			dp = _DIRENT_NEXT(dp);
    346 			enn++;
    347 		}
    348 		nfs_putdircache(np, ndp);
    349 
    350 		/*
    351 		 * If the last requested entry was not the last in the
    352 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    353 		 * cache the cookie of the last requested one, and
    354 		 * set of the offset to it.
    355 		 */
    356 
    357 		if ((on + n) < bp->b_bcount - bp->b_resid) {
    358 			curoff = NFS_GETCOOKIE(pdp);
    359 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    360 			    enn, bp->b_lblkno);
    361 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    362 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    363 				curoff = nndp->dc_cookie32;
    364 			}
    365 			nfs_putdircache(np, nndp);
    366 		} else
    367 			curoff = bp->b_dcookie;
    368 
    369 		/*
    370 		 * Always cache the entry for the next block,
    371 		 * so that readaheads can use it.
    372 		 */
    373 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    374 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    375 			if (curoff == bp->b_dcookie) {
    376 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    377 				curoff = nndp->dc_cookie32;
    378 			}
    379 		}
    380 
    381 		n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
    382 
    383 		/*
    384 		 * If not eof and read aheads are enabled, start one.
    385 		 * (You need the current block first, so that you have the
    386 		 *  directory offset cookie of the next block.)
    387 		 */
    388 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    389 		    !NFS_EOFVALID(np)) {
    390 			rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
    391 						NFS_DIRBLKSIZ, l);
    392 			if (rabp) {
    393 			    if ((rabp->b_oflags & (BO_DONE | BO_DELWRI)) == 0) {
    394 				rabp->b_dcookie = nndp->dc_cookie;
    395 				rabp->b_flags |= (B_READ | B_ASYNC);
    396 				if (nfs_asyncio(rabp)) {
    397 				    brelse(rabp, BC_INVAL);
    398 				}
    399 			    } else
    400 				brelse(rabp, 0);
    401 			}
    402 		}
    403 		nfs_putdircache(np, nndp);
    404 		got_buf = 1;
    405 		break;
    406 	    default:
    407 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    408 		break;
    409 	    }
    410 
    411 	    if (n > 0) {
    412 		if (!baddr)
    413 			baddr = bp->b_data;
    414 		error = uiomove((char *)baddr + on, (int)n, uio);
    415 	    }
    416 	    switch (vp->v_type) {
    417 	    case VREG:
    418 		break;
    419 	    case VLNK:
    420 		n = 0;
    421 		break;
    422 	    case VDIR:
    423 		uio->uio_offset = curoff;
    424 		if (enough)
    425 			n = 0;
    426 		break;
    427 	    default:
    428 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    429 	    }
    430 	    if (got_buf)
    431 		brelse(bp, 0);
    432 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    433 	return (error);
    434 }
    435 
    436 /*
    437  * Vnode op for write using bio
    438  */
    439 int
    440 nfs_write(void *v)
    441 {
    442 	struct vop_write_args /* {
    443 		struct vnode *a_vp;
    444 		struct uio *a_uio;
    445 		int  a_ioflag;
    446 		kauth_cred_t a_cred;
    447 	} */ *ap = v;
    448 	struct uio *uio = ap->a_uio;
    449 	struct lwp *l = curlwp;
    450 	struct vnode *vp = ap->a_vp;
    451 	struct nfsnode *np = VTONFS(vp);
    452 	kauth_cred_t cred = ap->a_cred;
    453 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    454 	voff_t oldoff, origoff;
    455 	vsize_t bytelen;
    456 	int error = 0;
    457 	int ioflag = ap->a_ioflag;
    458 
    459 #ifdef DIAGNOSTIC
    460 	if (uio->uio_rw != UIO_WRITE)
    461 		panic("nfs_write mode");
    462 #endif
    463 	if (vp->v_type != VREG)
    464 		return (EIO);
    465 	if (np->n_flag & NWRITEERR) {
    466 		np->n_flag &= ~NWRITEERR;
    467 		return (np->n_error);
    468 	}
    469 #ifndef NFS_V2_ONLY
    470 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    471 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    472 		(void)nfs_fsinfo(nmp, vp, cred, l);
    473 #endif
    474 	if (ioflag & IO_APPEND) {
    475 		NFS_INVALIDATE_ATTRCACHE(np);
    476 		error = nfs_flushstalebuf(vp, cred, l,
    477 		    NFS_FLUSHSTALEBUF_MYWRITE);
    478 		if (error)
    479 			return (error);
    480 		uio->uio_offset = np->n_size;
    481 
    482 		/*
    483 		 * This is already checked above VOP_WRITE, but recheck
    484 		 * the append case here to make sure our idea of the
    485 		 * file size is as fresh as possible.
    486 		 */
    487 		if (uio->uio_offset + uio->uio_resid >
    488 		      l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    489 			mutex_enter(&proc_lock);
    490 			psignal(l->l_proc, SIGXFSZ);
    491 			mutex_exit(&proc_lock);
    492 			return (EFBIG);
    493 		}
    494 	}
    495 	if (uio->uio_offset < 0)
    496 		return (EINVAL);
    497 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    498 		return (EFBIG);
    499 	if (uio->uio_resid == 0)
    500 		return (0);
    501 
    502 	origoff = uio->uio_offset;
    503 	do {
    504 		bool overwrite; /* if we are overwriting whole pages */
    505 		u_quad_t oldsize;
    506 		oldoff = uio->uio_offset;
    507 		bytelen = uio->uio_resid;
    508 
    509 		nfsstats.biocache_writes++;
    510 
    511 		oldsize = np->n_size;
    512 		np->n_flag |= NMODIFIED;
    513 		if (np->n_size < uio->uio_offset + bytelen) {
    514 			np->n_size = uio->uio_offset + bytelen;
    515 		}
    516 		overwrite = false;
    517 		if ((uio->uio_offset & PAGE_MASK) == 0) {
    518 			if ((vp->v_vflag & VV_MAPPED) == 0 &&
    519 			    bytelen > PAGE_SIZE) {
    520 				bytelen = trunc_page(bytelen);
    521 				overwrite = true;
    522 			} else if ((bytelen & PAGE_MASK) == 0 &&
    523 			    uio->uio_offset >= vp->v_size) {
    524 				overwrite = true;
    525 			}
    526 		}
    527 		if (vp->v_size < uio->uio_offset + bytelen) {
    528 			uvm_vnp_setwritesize(vp, uio->uio_offset + bytelen);
    529 		}
    530 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
    531 		    UVM_ADV_RANDOM, UBC_WRITE | UBC_PARTIALOK |
    532 		    (overwrite ? UBC_FAULTBUSY : 0) |
    533 		    UBC_VNODE_FLAGS(vp));
    534 		if (error) {
    535 			uvm_vnp_setwritesize(vp, vp->v_size);
    536 			if (overwrite && np->n_size != oldsize) {
    537 				/*
    538 				 * backout size and free pages past eof.
    539 				 */
    540 				np->n_size = oldsize;
    541 				rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    542 				(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
    543 				    0, PGO_SYNCIO | PGO_FREE);
    544 			}
    545 			break;
    546 		}
    547 
    548 		/*
    549 		 * update UVM's notion of the size now that we've
    550 		 * copied the data into the vnode's pages.
    551 		 */
    552 
    553 		if (vp->v_size < uio->uio_offset) {
    554 			uvm_vnp_setsize(vp, uio->uio_offset);
    555 		}
    556 
    557 		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
    558 		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
    559 			rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    560 			error = VOP_PUTPAGES(vp,
    561 			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
    562 			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    563 				       ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
    564 		}
    565 	} while (uio->uio_resid > 0);
    566 	if (error == 0 && (ioflag & IO_SYNC) != 0) {
    567 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    568 		error = VOP_PUTPAGES(vp,
    569 		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
    570 		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    571 			       ~(nmp->nm_wsize - 1)),
    572 		    PGO_CLEANIT | PGO_SYNCIO);
    573 	}
    574 	return error;
    575 }
    576 
    577 /*
    578  * Get an nfs cache block.
    579  * Allocate a new one if the block isn't currently in the cache
    580  * and return the block marked busy. If the calling process is
    581  * interrupted by a signal for an interruptible mount point, return
    582  * NULL.
    583  */
    584 struct buf *
    585 nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct lwp *l)
    586 {
    587 	struct buf *bp;
    588 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    589 
    590 	if (nmp->nm_flag & NFSMNT_INT) {
    591 		bp = getblk(vp, bn, size, PCATCH, 0);
    592 		while (bp == NULL) {
    593 			if (nfs_sigintr(nmp, NULL, l))
    594 				return (NULL);
    595 			bp = getblk(vp, bn, size, 0, 2 * hz);
    596 		}
    597 	} else
    598 		bp = getblk(vp, bn, size, 0, 0);
    599 	return (bp);
    600 }
    601 
    602 /*
    603  * Flush and invalidate all dirty buffers. If another process is already
    604  * doing the flush, just wait for completion.
    605  */
    606 int
    607 nfs_vinvalbuf(struct vnode *vp, int flags, kauth_cred_t cred,
    608 		struct lwp *l, int intrflg)
    609 {
    610 	struct nfsnode *np = VTONFS(vp);
    611 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    612 	int error = 0, allerror = 0, slptimeo;
    613 	bool catch_p;
    614 
    615 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    616 		intrflg = 0;
    617 	if (intrflg) {
    618 		catch_p = true;
    619 		slptimeo = 2 * hz;
    620 	} else {
    621 		catch_p = false;
    622 		if (nmp->nm_flag & NFSMNT_SOFT)
    623 			slptimeo = nmp->nm_retry * nmp->nm_timeo;
    624 		else
    625 			slptimeo = 0;
    626 	}
    627 	/*
    628 	 * First wait for any other process doing a flush to complete.
    629 	 */
    630 	mutex_enter(vp->v_interlock);
    631 	while (np->n_flag & NFLUSHINPROG) {
    632 		np->n_flag |= NFLUSHWANT;
    633 		error = mtsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
    634 			slptimeo, vp->v_interlock);
    635 		if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
    636 			mutex_exit(vp->v_interlock);
    637 			return EINTR;
    638 		}
    639 	}
    640 
    641 	/*
    642 	 * Now, flush as required.
    643 	 */
    644 	np->n_flag |= NFLUSHINPROG;
    645 	mutex_exit(vp->v_interlock);
    646 	error = vinvalbuf(vp, flags, cred, l, catch_p, 0);
    647 	while (error) {
    648 		if (allerror == 0)
    649 			allerror = error;
    650 		if (intrflg && nfs_sigintr(nmp, NULL, l)) {
    651 			error = EINTR;
    652 			break;
    653 		}
    654 		error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
    655 	}
    656 	mutex_enter(vp->v_interlock);
    657 	if (allerror != 0) {
    658 		/*
    659 		 * Keep error from vinvalbuf so fsync/close will know.
    660 		 */
    661 		np->n_error = allerror;
    662 		np->n_flag |= NWRITEERR;
    663 	}
    664 	if (error == 0)
    665 		np->n_flag &= ~NMODIFIED;
    666 	np->n_flag &= ~NFLUSHINPROG;
    667 	if (np->n_flag & NFLUSHWANT) {
    668 		np->n_flag &= ~NFLUSHWANT;
    669 		wakeup(&np->n_flag);
    670 	}
    671 	mutex_exit(vp->v_interlock);
    672 	return error;
    673 }
    674 
    675 /*
    676  * nfs_flushstalebuf: flush cache if it's stale.
    677  *
    678  * => caller shouldn't own any pages or buffers which belong to the vnode.
    679  */
    680 
    681 int
    682 nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
    683     int flags)
    684 {
    685 	struct nfsnode *np = VTONFS(vp);
    686 	struct vattr vattr;
    687 	int error;
    688 
    689 	if (np->n_flag & NMODIFIED) {
    690 		if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
    691 		    || vp->v_type != VREG) {
    692 			error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
    693 			if (error)
    694 				return error;
    695 			if (vp->v_type == VDIR) {
    696 				nfs_invaldircache(vp, 0);
    697 			}
    698 		} else {
    699 			/*
    700 			 * XXX assuming writes are ours.
    701 			 */
    702 		}
    703 		NFS_INVALIDATE_ATTRCACHE(np);
    704 		error = VOP_GETATTR(vp, &vattr, cred);
    705 		if (error)
    706 			return error;
    707 		np->n_mtime = vattr.va_mtime;
    708 	} else {
    709 		error = VOP_GETATTR(vp, &vattr, cred);
    710 		if (error)
    711 			return error;
    712 		if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
    713 			if (vp->v_type == VDIR) {
    714 				nfs_invaldircache(vp, 0);
    715 			}
    716 			error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
    717 			if (error)
    718 				return error;
    719 			np->n_mtime = vattr.va_mtime;
    720 		}
    721 	}
    722 
    723 	return error;
    724 }
    725 
    726 /*
    727  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    728  * This is mainly to avoid queueing async I/O requests when the nfsiods
    729  * are all hung on a dead server.
    730  */
    731 
    732 int
    733 nfs_asyncio(struct buf *bp)
    734 {
    735 	struct nfs_iod *iod;
    736 	struct nfsmount *nmp;
    737 	int slptimeo = 0, error;
    738 	bool catch_p = false;
    739 
    740 	if (nfs_numasync == 0)
    741 		return (EIO);
    742 
    743 	nmp = VFSTONFS(bp->b_vp->v_mount);
    744 
    745 	if (nmp->nm_flag & NFSMNT_SOFT)
    746 		slptimeo = nmp->nm_retry * nmp->nm_timeo;
    747 
    748 	if (nmp->nm_iflag & NFSMNT_DISMNTFORCE)
    749 		slptimeo = hz;
    750 
    751 again:
    752 	if (nmp->nm_flag & NFSMNT_INT)
    753 		catch_p = true;
    754 
    755 	/*
    756 	 * Find a free iod to process this request.
    757 	 */
    758 
    759 	mutex_enter(&nfs_iodlist_lock);
    760 	iod = LIST_FIRST(&nfs_iodlist_idle);
    761 	if (iod) {
    762 		/*
    763 		 * Found one, so wake it up and tell it which
    764 		 * mount to process.
    765 		 */
    766 		LIST_REMOVE(iod, nid_idle);
    767 		mutex_enter(&iod->nid_lock);
    768 		mutex_exit(&nfs_iodlist_lock);
    769 		KASSERT(iod->nid_mount == NULL);
    770 		iod->nid_mount = nmp;
    771 		cv_signal(&iod->nid_cv);
    772 		mutex_enter(&nmp->nm_lock);
    773 		mutex_exit(&iod->nid_lock);
    774 		nmp->nm_bufqiods++;
    775 		if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
    776 			cv_broadcast(&nmp->nm_aiocv);
    777 		}
    778 	} else {
    779 		mutex_exit(&nfs_iodlist_lock);
    780 		mutex_enter(&nmp->nm_lock);
    781 	}
    782 
    783 	KASSERT(mutex_owned(&nmp->nm_lock));
    784 
    785 	/*
    786 	 * If we have an iod which can process the request, then queue
    787 	 * the buffer.  However, even if we have an iod, do not initiate
    788 	 * queue cleaning if curproc is the pageout daemon. if the NFS mount
    789 	 * is via local loopback, we may put curproc (pagedaemon) to sleep
    790 	 * waiting for the writes to complete. But the server (ourself)
    791 	 * may block the write, waiting for its (ie., our) pagedaemon
    792 	 * to produce clean pages to handle the write: deadlock.
    793 	 * XXX: start non-loopback mounts straight away?  If "lots free",
    794 	 * let pagedaemon start loopback writes anyway?
    795 	 */
    796 	if (nmp->nm_bufqiods > 0) {
    797 
    798 		/*
    799 		 * Ensure that the queue never grows too large.
    800 		 */
    801 		if (curlwp == uvm.pagedaemon_lwp) {
    802 	  		/* Enqueue for later, to avoid free-page deadlock */
    803 		} else while (nmp->nm_bufqlen >= 2 * nmp->nm_bufqiods) {
    804 			if (catch_p) {
    805 				error = cv_timedwait_sig(&nmp->nm_aiocv,
    806 				    &nmp->nm_lock, slptimeo);
    807 			} else {
    808 				error = cv_timedwait(&nmp->nm_aiocv,
    809 				    &nmp->nm_lock, slptimeo);
    810 			}
    811 			if (error) {
    812 				if (error == EWOULDBLOCK &&
    813 				    nmp->nm_flag & NFSMNT_SOFT) {
    814 					mutex_exit(&nmp->nm_lock);
    815 					bp->b_error = EIO;
    816 					return (EIO);
    817 				}
    818 
    819 				if (nfs_sigintr(nmp, NULL, curlwp)) {
    820 					mutex_exit(&nmp->nm_lock);
    821 					return (EINTR);
    822 				}
    823 				if (catch_p) {
    824 					catch_p = false;
    825 					slptimeo = 2 * hz;
    826 				}
    827 			}
    828 
    829 			/*
    830 			 * We might have lost our iod while sleeping,
    831 			 * so check and loop if necessary.
    832 			 */
    833 
    834 			if (nmp->nm_bufqiods == 0) {
    835 				mutex_exit(&nmp->nm_lock);
    836 				goto again;
    837 			}
    838 		}
    839 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    840 		nmp->nm_bufqlen++;
    841 		mutex_exit(&nmp->nm_lock);
    842 		return (0);
    843 	}
    844 	mutex_exit(&nmp->nm_lock);
    845 
    846 	/*
    847 	 * All the iods are busy on other mounts, so return EIO to
    848 	 * force the caller to process the i/o synchronously.
    849 	 */
    850 
    851 	return (EIO);
    852 }
    853 
    854 /*
    855  * nfs_doio for read.
    856  */
    857 static int
    858 nfs_doio_read(struct buf *bp, struct uio *uiop)
    859 {
    860 	struct vnode *vp = bp->b_vp;
    861 	struct nfsnode *np = VTONFS(vp);
    862 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    863 	int error = 0;
    864 
    865 	uiop->uio_rw = UIO_READ;
    866 	switch (vp->v_type) {
    867 	case VREG:
    868 		nfsstats.read_bios++;
    869 		error = nfs_readrpc(vp, uiop);
    870 		if (!error && uiop->uio_resid) {
    871 			int diff, len;
    872 
    873 			/*
    874 			 * If uio_resid > 0, there is a hole in the file and
    875 			 * no writes after the hole have been pushed to
    876 			 * the server yet or the file has been truncated
    877 			 * on the server.
    878 			 * Just zero fill the rest of the valid area.
    879 			 */
    880 
    881 			diff = bp->b_bcount - uiop->uio_resid;
    882 			len = uiop->uio_resid;
    883 			memset((char *)bp->b_data + diff, 0, len);
    884 			uiop->uio_resid = 0;
    885 		}
    886 #if 0
    887 		if (uiop->uio_lwp && (vp->v_iflag & VI_TEXT) &&
    888 		    timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
    889 		    	mutex_enter(&proc_lock);
    890 			killproc(uiop->uio_lwp->l_proc, "process text file was modified");
    891 		    	mutex_exit(&proc_lock);
    892 #if 0 /* XXX NJWLWP */
    893 			uiop->uio_lwp->l_proc->p_holdcnt++;
    894 #endif
    895 		}
    896 #endif
    897 		break;
    898 	case VLNK:
    899 		KASSERT(uiop->uio_offset == (off_t)0);
    900 		nfsstats.readlink_bios++;
    901 		error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
    902 		break;
    903 	case VDIR:
    904 		nfsstats.readdir_bios++;
    905 		uiop->uio_offset = bp->b_dcookie;
    906 #ifndef NFS_V2_ONLY
    907 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
    908 			error = nfs_readdirplusrpc(vp, uiop,
    909 			    curlwp->l_cred);
    910 			/*
    911 			 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
    912 			 */
    913 			if (error == ENOTSUP)
    914 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    915 		}
    916 #else
    917 		nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    918 #endif
    919 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
    920 			error = nfs_readdirrpc(vp, uiop,
    921 			    curlwp->l_cred);
    922 		if (!error) {
    923 			bp->b_dcookie = uiop->uio_offset;
    924 		}
    925 		break;
    926 	default:
    927 		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
    928 		break;
    929 	}
    930 	bp->b_error = error;
    931 	return error;
    932 }
    933 
    934 /*
    935  * nfs_doio for write.
    936  */
    937 static int
    938 nfs_doio_write(struct buf *bp, struct uio *uiop)
    939 {
    940 	struct vnode *vp = bp->b_vp;
    941 	struct nfsnode *np = VTONFS(vp);
    942 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    943 	int iomode;
    944 	bool stalewriteverf = false;
    945 	int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
    946 	struct vm_page **pgs, *spgs[UBC_MAX_PAGES];
    947 #ifndef NFS_V2_ONLY
    948 	bool needcommit = true; /* need only COMMIT RPC */
    949 #else
    950 	bool needcommit = false; /* need only COMMIT RPC */
    951 #endif
    952 	bool pageprotected;
    953 	struct uvm_object *uobj = &vp->v_uobj;
    954 	int error;
    955 	off_t off, cnt;
    956 
    957 	if (npages < __arraycount(spgs))
    958 		pgs = spgs;
    959 	else {
    960 		if ((pgs = kmem_alloc(sizeof(*pgs) * npages, KM_NOSLEEP)) ==
    961 		    NULL)
    962 			return ENOMEM;
    963 	}
    964 
    965 	if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
    966 		iomode = NFSV3WRITE_UNSTABLE;
    967 	} else {
    968 		iomode = NFSV3WRITE_FILESYNC;
    969 	}
    970 
    971 #ifndef NFS_V2_ONLY
    972 again:
    973 #endif
    974 	rw_enter(&nmp->nm_writeverflock, RW_READER);
    975 
    976 	for (i = 0; i < npages; i++) {
    977 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    978 		if (pgs[i]->uobject == uobj &&
    979 		    pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
    980 			KASSERT(pgs[i]->flags & PG_BUSY);
    981 			/*
    982 			 * this page belongs to our object.
    983 			 */
    984 			rw_enter(uobj->vmobjlock, RW_WRITER);
    985 			/*
    986 			 * write out the page stably if it's about to
    987 			 * be released because we can't resend it
    988 			 * on the server crash.
    989 			 *
    990 			 * XXX assuming PG_RELEASE|PG_PAGEOUT won't be
    991 			 * changed until unbusy the page.
    992 			 */
    993 			if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
    994 				iomode = NFSV3WRITE_FILESYNC;
    995 			/*
    996 			 * if we met a page which hasn't been sent yet,
    997 			 * we need do WRITE RPC.
    998 			 */
    999 			if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
   1000 				needcommit = false;
   1001 			rw_exit(uobj->vmobjlock);
   1002 		} else {
   1003 			iomode = NFSV3WRITE_FILESYNC;
   1004 			needcommit = false;
   1005 		}
   1006 	}
   1007 	if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
   1008 		rw_enter(uobj->vmobjlock, RW_WRITER);
   1009 		for (i = 0; i < npages; i++) {
   1010 			pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
   1011 			pmap_page_protect(pgs[i], VM_PROT_READ);
   1012 		}
   1013 		rw_exit(uobj->vmobjlock);
   1014 		pageprotected = true; /* pages can't be modified during i/o. */
   1015 	} else
   1016 		pageprotected = false;
   1017 
   1018 	/*
   1019 	 * Send the data to the server if necessary,
   1020 	 * otherwise just send a commit rpc.
   1021 	 */
   1022 #ifndef NFS_V2_ONLY
   1023 	if (needcommit) {
   1024 
   1025 		/*
   1026 		 * If the buffer is in the range that we already committed,
   1027 		 * there's nothing to do.
   1028 		 *
   1029 		 * If it's in the range that we need to commit, push the
   1030 		 * whole range at once, otherwise only push the buffer.
   1031 		 * In both these cases, acquire the commit lock to avoid
   1032 		 * other processes modifying the range.
   1033 		 */
   1034 
   1035 		off = uiop->uio_offset;
   1036 		cnt = bp->b_bcount;
   1037 		mutex_enter(&np->n_commitlock);
   1038 		if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
   1039 			bool pushedrange;
   1040 			if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
   1041 				pushedrange = true;
   1042 				off = np->n_pushlo;
   1043 				cnt = np->n_pushhi - np->n_pushlo;
   1044 			} else {
   1045 				pushedrange = false;
   1046 			}
   1047 			error = nfs_commit(vp, off, cnt, curlwp);
   1048 			if (error == 0) {
   1049 				if (pushedrange) {
   1050 					nfs_merge_commit_ranges(vp);
   1051 				} else {
   1052 					nfs_add_committed_range(vp, off, cnt);
   1053 				}
   1054 			}
   1055 		} else {
   1056 			error = 0;
   1057 		}
   1058 		mutex_exit(&np->n_commitlock);
   1059 		rw_exit(&nmp->nm_writeverflock);
   1060 		if (!error) {
   1061 			/*
   1062 			 * pages are now on stable storage.
   1063 			 */
   1064 			uiop->uio_resid = 0;
   1065 			rw_enter(uobj->vmobjlock, RW_WRITER);
   1066 			for (i = 0; i < npages; i++) {
   1067 				pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1068 			}
   1069 			rw_exit(uobj->vmobjlock);
   1070 			goto out;
   1071 		} else if (error == NFSERR_STALEWRITEVERF) {
   1072 			nfs_clearcommit(vp->v_mount);
   1073 			goto again;
   1074 		}
   1075 		if (error) {
   1076 			bp->b_error = np->n_error = error;
   1077 			np->n_flag |= NWRITEERR;
   1078 		}
   1079 		goto out;
   1080 	}
   1081 #endif
   1082 	off = uiop->uio_offset;
   1083 	cnt = bp->b_bcount;
   1084 	uiop->uio_rw = UIO_WRITE;
   1085 	nfsstats.write_bios++;
   1086 	error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
   1087 #ifndef NFS_V2_ONLY
   1088 	if (!error && iomode == NFSV3WRITE_UNSTABLE) {
   1089 		/*
   1090 		 * we need to commit pages later.
   1091 		 */
   1092 		mutex_enter(&np->n_commitlock);
   1093 		nfs_add_tobecommitted_range(vp, off, cnt);
   1094 		/*
   1095 		 * if there can be too many uncommitted pages, commit them now.
   1096 		 */
   1097 		if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
   1098 			off = np->n_pushlo;
   1099 			cnt = nfs_commitsize >> 1;
   1100 			error = nfs_commit(vp, off, cnt, curlwp);
   1101 			if (!error) {
   1102 				nfs_add_committed_range(vp, off, cnt);
   1103 				nfs_del_tobecommitted_range(vp, off, cnt);
   1104 			}
   1105 			if (error == NFSERR_STALEWRITEVERF) {
   1106 				stalewriteverf = true;
   1107 				error = 0; /* it isn't a real error */
   1108 			}
   1109 		} else {
   1110 			/*
   1111 			 * re-dirty pages so that they will be passed
   1112 			 * to us later again.
   1113 			 */
   1114 			rw_enter(uobj->vmobjlock, RW_WRITER);
   1115 			for (i = 0; i < npages; i++) {
   1116 				uvm_pagemarkdirty(pgs[i],
   1117 				    UVM_PAGE_STATUS_DIRTY);
   1118 			}
   1119 			rw_exit(uobj->vmobjlock);
   1120 		}
   1121 		mutex_exit(&np->n_commitlock);
   1122 	} else
   1123 #endif
   1124 	if (!error) {
   1125 		/*
   1126 		 * pages are now on stable storage.
   1127 		 */
   1128 		mutex_enter(&np->n_commitlock);
   1129 		nfs_del_committed_range(vp, off, cnt);
   1130 		mutex_exit(&np->n_commitlock);
   1131 		rw_enter(uobj->vmobjlock, RW_WRITER);
   1132 		for (i = 0; i < npages; i++) {
   1133 			pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1134 		}
   1135 		rw_exit(uobj->vmobjlock);
   1136 	} else {
   1137 		/*
   1138 		 * we got an error.
   1139 		 */
   1140 		bp->b_error = np->n_error = error;
   1141 		np->n_flag |= NWRITEERR;
   1142 	}
   1143 
   1144 	rw_exit(&nmp->nm_writeverflock);
   1145 
   1146 
   1147 	if (stalewriteverf) {
   1148 		nfs_clearcommit(vp->v_mount);
   1149 	}
   1150 #ifndef NFS_V2_ONLY
   1151 out:
   1152 #endif
   1153 	if (pgs != spgs)
   1154 		kmem_free(pgs, sizeof(*pgs) * npages);
   1155 	return error;
   1156 }
   1157 
   1158 /*
   1159  * nfs_doio for B_PHYS.
   1160  */
   1161 static int
   1162 nfs_doio_phys(struct buf *bp, struct uio *uiop)
   1163 {
   1164 	struct vnode *vp = bp->b_vp;
   1165 	int error;
   1166 
   1167 	uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
   1168 	if (bp->b_flags & B_READ) {
   1169 		uiop->uio_rw = UIO_READ;
   1170 		nfsstats.read_physios++;
   1171 		error = nfs_readrpc(vp, uiop);
   1172 	} else {
   1173 		int iomode = NFSV3WRITE_DATASYNC;
   1174 		bool stalewriteverf;
   1175 		struct nfsmount *nmp = VFSTONFS(vp->v_mount);
   1176 
   1177 		uiop->uio_rw = UIO_WRITE;
   1178 		nfsstats.write_physios++;
   1179 		rw_enter(&nmp->nm_writeverflock, RW_READER);
   1180 		error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
   1181 		rw_exit(&nmp->nm_writeverflock);
   1182 		if (stalewriteverf) {
   1183 			nfs_clearcommit(bp->b_vp->v_mount);
   1184 		}
   1185 	}
   1186 	bp->b_error = error;
   1187 	return error;
   1188 }
   1189 
   1190 /*
   1191  * Do an I/O operation to/from a cache block. This may be called
   1192  * synchronously or from an nfsiod.
   1193  */
   1194 int
   1195 nfs_doio(struct buf *bp)
   1196 {
   1197 	int error;
   1198 	struct uio uio;
   1199 	struct uio *uiop = &uio;
   1200 	struct iovec io;
   1201 	UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
   1202 
   1203 	uiop->uio_iov = &io;
   1204 	uiop->uio_iovcnt = 1;
   1205 	uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
   1206 	UIO_SETUP_SYSSPACE(uiop);
   1207 	io.iov_base = bp->b_data;
   1208 	io.iov_len = uiop->uio_resid = bp->b_bcount;
   1209 
   1210 	/*
   1211 	 * Historically, paging was done with physio, but no more...
   1212 	 */
   1213 	if (bp->b_flags & B_PHYS) {
   1214 		/*
   1215 		 * ...though reading /dev/drum still gets us here.
   1216 		 */
   1217 		error = nfs_doio_phys(bp, uiop);
   1218 	} else if (bp->b_flags & B_READ) {
   1219 		error = nfs_doio_read(bp, uiop);
   1220 	} else {
   1221 		error = nfs_doio_write(bp, uiop);
   1222 	}
   1223 	bp->b_resid = uiop->uio_resid;
   1224 	biodone(bp);
   1225 	return (error);
   1226 }
   1227 
   1228 /*
   1229  * Vnode op for VM getpages.
   1230  */
   1231 
   1232 int
   1233 nfs_getpages(void *v)
   1234 {
   1235 	struct vop_getpages_args /* {
   1236 		struct vnode *a_vp;
   1237 		voff_t a_offset;
   1238 		struct vm_page **a_m;
   1239 		int *a_count;
   1240 		int a_centeridx;
   1241 		vm_prot_t a_access_type;
   1242 		int a_advice;
   1243 		int a_flags;
   1244 	} */ *ap = v;
   1245 
   1246 	struct vnode *vp = ap->a_vp;
   1247 	struct uvm_object *uobj = &vp->v_uobj;
   1248 	struct nfsnode *np = VTONFS(vp);
   1249 	const int npages = *ap->a_count;
   1250 	struct vm_page *pg, **pgs, **opgs, *spgs[UBC_MAX_PAGES];
   1251 	off_t origoffset, len;
   1252 	int i, error;
   1253 	bool v3 = NFS_ISV3(vp);
   1254 	bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1255 	bool locked = (ap->a_flags & PGO_LOCKED) != 0;
   1256 
   1257 	/*
   1258 	 * XXX NFS wants to modify the pages below and that can't be done
   1259 	 * with a read lock.  We can't upgrade the lock here because it
   1260 	 * would screw up UVM fault processing.  Have NFS take the I/O
   1261 	 * path.
   1262 	 */
   1263 	if (locked && rw_lock_op(uobj->vmobjlock) == RW_READER) {
   1264 		*ap->a_count = 0;
   1265 		ap->a_m[ap->a_centeridx] = NULL;
   1266 		return EBUSY;
   1267 	}
   1268 
   1269 	/*
   1270 	 * If we are not locked we are not really using opgs,
   1271 	 * so just initialize it
   1272 	 */
   1273 	if (!locked || npages < __arraycount(spgs))
   1274 		opgs = spgs;
   1275 	else {
   1276 		if ((opgs = kmem_alloc(npages * sizeof(*opgs), KM_NOSLEEP)) ==
   1277 		    NULL)
   1278 			return ENOMEM;
   1279 	}
   1280 
   1281 	/*
   1282 	 * call the genfs code to get the pages.  `pgs' may be NULL
   1283 	 * when doing read-ahead.
   1284 	 */
   1285 	pgs = ap->a_m;
   1286 	if (write && locked && v3) {
   1287 		KASSERT(pgs != NULL);
   1288 #ifdef DEBUG
   1289 
   1290 		/*
   1291 		 * If PGO_LOCKED is set, real pages shouldn't exists
   1292 		 * in the array.
   1293 		 */
   1294 
   1295 		for (i = 0; i < npages; i++)
   1296 			KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
   1297 #endif
   1298 		memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
   1299 	}
   1300 	error = genfs_getpages(v);
   1301 	if (error)
   1302 		goto out;
   1303 
   1304 	/*
   1305 	 * for read faults where the nfs node is not yet marked NMODIFIED,
   1306 	 * set PG_RDONLY on the pages so that we come back here if someone
   1307 	 * tries to modify later via the mapping that will be entered for
   1308 	 * this fault.
   1309 	 */
   1310 
   1311 	if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
   1312 		if (!locked) {
   1313 			rw_enter(uobj->vmobjlock, RW_WRITER);
   1314 		}
   1315 		for (i = 0; i < npages; i++) {
   1316 			pg = pgs[i];
   1317 			if (pg == NULL || pg == PGO_DONTCARE) {
   1318 				continue;
   1319 			}
   1320 			pg->flags |= PG_RDONLY;
   1321 		}
   1322 		if (!locked) {
   1323 			rw_exit(uobj->vmobjlock);
   1324 		}
   1325 	}
   1326 	if (!write)
   1327 		goto out;
   1328 
   1329 	/*
   1330 	 * this is a write fault, update the commit info.
   1331 	 */
   1332 
   1333 	origoffset = ap->a_offset;
   1334 	len = npages << PAGE_SHIFT;
   1335 
   1336 	if (v3) {
   1337 		if (!locked) {
   1338 			mutex_enter(&np->n_commitlock);
   1339 		} else {
   1340 			if (!mutex_tryenter(&np->n_commitlock)) {
   1341 
   1342 				/*
   1343 				 * tell the caller that there are no pages
   1344 				 * available and put back original pgs array.
   1345 				 */
   1346 
   1347 				*ap->a_count = 0;
   1348 				memcpy(pgs, opgs,
   1349 				    npages * sizeof(struct vm_pages *));
   1350 				error = EBUSY;
   1351 				goto out;
   1352 			}
   1353 		}
   1354 		nfs_del_committed_range(vp, origoffset, len);
   1355 		nfs_del_tobecommitted_range(vp, origoffset, len);
   1356 	}
   1357 	np->n_flag |= NMODIFIED;
   1358 	if (!locked) {
   1359 		rw_enter(uobj->vmobjlock, RW_WRITER);
   1360 	}
   1361 	for (i = 0; i < npages; i++) {
   1362 		pg = pgs[i];
   1363 		if (pg == NULL || pg == PGO_DONTCARE) {
   1364 			continue;
   1365 		}
   1366 		pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1367 	}
   1368 	if (!locked) {
   1369 		rw_exit(uobj->vmobjlock);
   1370 	}
   1371 	if (v3) {
   1372 		mutex_exit(&np->n_commitlock);
   1373 	}
   1374 out:
   1375 	if (opgs != spgs)
   1376 		kmem_free(opgs, sizeof(*opgs) * npages);
   1377 	return error;
   1378 }
   1379