Home | History | Annotate | Line # | Download | only in nfs
nfs_bio.c revision 1.171.4.1
      1 /*	$NetBSD: nfs_bio.c,v 1.171.4.1 2008/01/02 21:57:40 bouyer Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.171.4.1 2008/01/02 21:57:40 bouyer Exp $");
     39 
     40 #include "opt_nfs.h"
     41 #include "opt_ddb.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/resourcevar.h>
     46 #include <sys/signalvar.h>
     47 #include <sys/proc.h>
     48 #include <sys/buf.h>
     49 #include <sys/vnode.h>
     50 #include <sys/mount.h>
     51 #include <sys/kernel.h>
     52 #include <sys/namei.h>
     53 #include <sys/dirent.h>
     54 #include <sys/kauth.h>
     55 
     56 #include <uvm/uvm_extern.h>
     57 #include <uvm/uvm.h>
     58 
     59 #include <nfs/rpcv2.h>
     60 #include <nfs/nfsproto.h>
     61 #include <nfs/nfs.h>
     62 #include <nfs/nfsmount.h>
     63 #include <nfs/nfsnode.h>
     64 #include <nfs/nfs_var.h>
     65 
     66 extern int nfs_numasync;
     67 extern int nfs_commitsize;
     68 extern struct nfsstats nfsstats;
     69 
     70 static int nfs_doio_read __P((struct buf *, struct uio *));
     71 static int nfs_doio_write __P((struct buf *, struct uio *));
     72 static int nfs_doio_phys __P((struct buf *, struct uio *));
     73 
     74 /*
     75  * Vnode op for read using bio
     76  * Any similarity to readip() is purely coincidental
     77  */
     78 int
     79 nfs_bioread(vp, uio, ioflag, cred, cflag)
     80 	struct vnode *vp;
     81 	struct uio *uio;
     82 	int ioflag, cflag;
     83 	kauth_cred_t cred;
     84 {
     85 	struct nfsnode *np = VTONFS(vp);
     86 	struct buf *bp = NULL, *rabp;
     87 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     88 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     89 	void *baddr;
     90 	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
     91 	int enough = 0;
     92 	struct dirent *dp, *pdp, *edp, *ep;
     93 	off_t curoff = 0;
     94 	int advice;
     95 	struct lwp *l = curlwp;
     96 
     97 #ifdef DIAGNOSTIC
     98 	if (uio->uio_rw != UIO_READ)
     99 		panic("nfs_read mode");
    100 #endif
    101 	if (uio->uio_resid == 0)
    102 		return (0);
    103 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    104 		return (EINVAL);
    105 #ifndef NFS_V2_ONLY
    106 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    107 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    108 		(void)nfs_fsinfo(nmp, vp, cred, l);
    109 #endif
    110 	if (vp->v_type != VDIR &&
    111 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    112 		return (EFBIG);
    113 
    114 	/*
    115 	 * For nfs, cache consistency can only be maintained approximately.
    116 	 * Although RFC1094 does not specify the criteria, the following is
    117 	 * believed to be compatible with the reference port.
    118 	 *
    119 	 * If the file's modify time on the server has changed since the
    120 	 * last read rpc or you have written to the file,
    121 	 * you may have lost data cache consistency with the
    122 	 * server, so flush all of the file's data out of the cache.
    123 	 * Then force a getattr rpc to ensure that you have up to date
    124 	 * attributes.
    125 	 * NB: This implies that cache data can be read when up to
    126 	 * nfs_attrtimeo seconds out of date. If you find that you need current
    127 	 * attributes this could be forced by setting n_attrstamp to 0 before
    128 	 * the VOP_GETATTR() call.
    129 	 */
    130 
    131 	if (vp->v_type != VLNK) {
    132 		error = nfs_flushstalebuf(vp, cred, l,
    133 		    NFS_FLUSHSTALEBUF_MYWRITE);
    134 		if (error)
    135 			return error;
    136 	}
    137 
    138 	do {
    139 	    /*
    140 	     * Don't cache symlinks.
    141 	     */
    142 	    if ((vp->v_vflag & VV_ROOT) && vp->v_type == VLNK) {
    143 		return (nfs_readlinkrpc(vp, uio, cred));
    144 	    }
    145 	    baddr = (void *)0;
    146 	    switch (vp->v_type) {
    147 	    case VREG:
    148 		nfsstats.biocache_reads++;
    149 
    150 		advice = IO_ADV_DECODE(ioflag);
    151 		error = 0;
    152 		while (uio->uio_resid > 0) {
    153 			vsize_t bytelen;
    154 
    155 			nfs_delayedtruncate(vp);
    156 			if (np->n_size <= uio->uio_offset) {
    157 				break;
    158 			}
    159 			bytelen =
    160 			    MIN(np->n_size - uio->uio_offset, uio->uio_resid);
    161 			error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
    162 			    advice, UBC_READ | UBC_PARTIALOK |
    163 			    (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
    164 			if (error) {
    165 				/*
    166 				 * XXXkludge
    167 				 * the file has been truncated on the server.
    168 				 * there isn't much we can do.
    169 				 */
    170 				if (uio->uio_offset >= np->n_size) {
    171 					/* end of file */
    172 					error = 0;
    173 				} else {
    174 					break;
    175 				}
    176 			}
    177 		}
    178 		break;
    179 
    180 	    case VLNK:
    181 		nfsstats.biocache_readlinks++;
    182 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, l);
    183 		if (!bp)
    184 			return (EINTR);
    185 		if ((bp->b_oflags & BO_DONE) == 0) {
    186 			bp->b_flags |= B_READ;
    187 			error = nfs_doio(bp);
    188 			if (error) {
    189 				brelse(bp, 0);
    190 				return (error);
    191 			}
    192 		}
    193 		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
    194 		got_buf = 1;
    195 		on = 0;
    196 		break;
    197 	    case VDIR:
    198 diragain:
    199 		nfsstats.biocache_readdirs++;
    200 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    201 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    202 		if (!ndp) {
    203 			/*
    204 			 * We've been handed a cookie that is not
    205 			 * in the cache. If we're not translating
    206 			 * 32 <-> 64, it may be a value that was
    207 			 * flushed out of the cache because it grew
    208 			 * too big. Let the server judge if it's
    209 			 * valid or not. In the translation case,
    210 			 * we have no way of validating this value,
    211 			 * so punt.
    212 			 */
    213 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    214 				return (EINVAL);
    215 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    216 				uio->uio_offset, 0, 0);
    217 		}
    218 
    219 		if (NFS_EOFVALID(np) &&
    220 		    ndp->dc_cookie == np->n_direofoffset) {
    221 			nfs_putdircache(np, ndp);
    222 			nfsstats.direofcache_hits++;
    223 			return (0);
    224 		}
    225 
    226 		bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
    227 		if (!bp)
    228 		    return (EINTR);
    229 		if ((bp->b_oflags & BO_DONE) == 0) {
    230 		    bp->b_flags |= B_READ;
    231 		    bp->b_dcookie = ndp->dc_blkcookie;
    232 		    error = nfs_doio(bp);
    233 		    if (error) {
    234 			/*
    235 			 * Yuck! The directory has been modified on the
    236 			 * server. Punt and let the userland code
    237 			 * deal with it.
    238 			 */
    239 			nfs_putdircache(np, ndp);
    240 			brelse(bp, 0);
    241 			/*
    242 			 * nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
    243 			 */
    244 			if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
    245 			    nfs_invaldircache(vp, 0);
    246 			    nfs_vinvalbuf(vp, 0, cred, l, 1);
    247 			}
    248 			return (error);
    249 		    }
    250 		}
    251 
    252 		/*
    253 		 * Just return if we hit EOF right away with this
    254 		 * block. Always check here, because direofoffset
    255 		 * may have been set by an nfsiod since the last
    256 		 * check.
    257 		 *
    258 		 * also, empty block implies EOF.
    259 		 */
    260 
    261 		if (bp->b_bcount == bp->b_resid ||
    262 		    (NFS_EOFVALID(np) &&
    263 		    ndp->dc_blkcookie == np->n_direofoffset)) {
    264 			KASSERT(bp->b_bcount != bp->b_resid ||
    265 			    ndp->dc_blkcookie == bp->b_dcookie);
    266 			nfs_putdircache(np, ndp);
    267 			brelse(bp, BC_NOCACHE);
    268 			return 0;
    269 		}
    270 
    271 		/*
    272 		 * Find the entry we were looking for in the block.
    273 		 */
    274 
    275 		en = ndp->dc_entry;
    276 
    277 		pdp = dp = (struct dirent *)bp->b_data;
    278 		edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
    279 		    bp->b_resid);
    280 		enn = 0;
    281 		while (enn < en && dp < edp) {
    282 			pdp = dp;
    283 			dp = _DIRENT_NEXT(dp);
    284 			enn++;
    285 		}
    286 
    287 		/*
    288 		 * If the entry number was bigger than the number of
    289 		 * entries in the block, or the cookie of the previous
    290 		 * entry doesn't match, the directory cache is
    291 		 * stale. Flush it and try again (i.e. go to
    292 		 * the server).
    293 		 */
    294 		if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
    295 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    296 #ifdef DEBUG
    297 		    	printf("invalid cache: %p %p %p off %lx %lx\n",
    298 				pdp, dp, edp,
    299 				(unsigned long)uio->uio_offset,
    300 				(unsigned long)NFS_GETCOOKIE(pdp));
    301 #endif
    302 			nfs_putdircache(np, ndp);
    303 			brelse(bp, 0);
    304 			nfs_invaldircache(vp, 0);
    305 			nfs_vinvalbuf(vp, 0, cred, l, 0);
    306 			goto diragain;
    307 		}
    308 
    309 		on = (char *)dp - (char *)bp->b_data;
    310 
    311 		/*
    312 		 * Cache all entries that may be exported to the
    313 		 * user, as they may be thrown back at us. The
    314 		 * NFSBIO_CACHECOOKIES flag indicates that all
    315 		 * entries are being 'exported', so cache them all.
    316 		 */
    317 
    318 		if (en == 0 && pdp == dp) {
    319 			dp = _DIRENT_NEXT(dp);
    320 			enn++;
    321 		}
    322 
    323 		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
    324 			n = uio->uio_resid;
    325 			enough = 1;
    326 		} else
    327 			n = bp->b_bcount - bp->b_resid - on;
    328 
    329 		ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
    330 
    331 		/*
    332 		 * Find last complete entry to copy, caching entries
    333 		 * (if requested) as we go.
    334 		 */
    335 
    336 		while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
    337 			if (cflag & NFSBIO_CACHECOOKIES) {
    338 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    339 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    340 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    341 					NFS_STASHCOOKIE32(pdp,
    342 					    nndp->dc_cookie32);
    343 				}
    344 				nfs_putdircache(np, nndp);
    345 			}
    346 			pdp = dp;
    347 			dp = _DIRENT_NEXT(dp);
    348 			enn++;
    349 		}
    350 		nfs_putdircache(np, ndp);
    351 
    352 		/*
    353 		 * If the last requested entry was not the last in the
    354 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    355 		 * cache the cookie of the last requested one, and
    356 		 * set of the offset to it.
    357 		 */
    358 
    359 		if ((on + n) < bp->b_bcount - bp->b_resid) {
    360 			curoff = NFS_GETCOOKIE(pdp);
    361 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    362 			    enn, bp->b_lblkno);
    363 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    364 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    365 				curoff = nndp->dc_cookie32;
    366 			}
    367 			nfs_putdircache(np, nndp);
    368 		} else
    369 			curoff = bp->b_dcookie;
    370 
    371 		/*
    372 		 * Always cache the entry for the next block,
    373 		 * so that readaheads can use it.
    374 		 */
    375 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    376 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    377 			if (curoff == bp->b_dcookie) {
    378 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    379 				curoff = nndp->dc_cookie32;
    380 			}
    381 		}
    382 
    383 		n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
    384 
    385 		/*
    386 		 * If not eof and read aheads are enabled, start one.
    387 		 * (You need the current block first, so that you have the
    388 		 *  directory offset cookie of the next block.)
    389 		 */
    390 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    391 		    !NFS_EOFVALID(np)) {
    392 			rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
    393 						NFS_DIRBLKSIZ, l);
    394 			if (rabp) {
    395 			    if ((rabp->b_oflags & (BO_DONE | BO_DELWRI)) == 0) {
    396 				rabp->b_dcookie = nndp->dc_cookie;
    397 				rabp->b_flags |= (B_READ | B_ASYNC);
    398 				if (nfs_asyncio(rabp)) {
    399 				    brelse(rabp, BC_INVAL);
    400 				}
    401 			    } else
    402 				brelse(rabp, 0);
    403 			}
    404 		}
    405 		nfs_putdircache(np, nndp);
    406 		got_buf = 1;
    407 		break;
    408 	    default:
    409 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    410 		break;
    411 	    }
    412 
    413 	    if (n > 0) {
    414 		if (!baddr)
    415 			baddr = bp->b_data;
    416 		error = uiomove((char *)baddr + on, (int)n, uio);
    417 	    }
    418 	    switch (vp->v_type) {
    419 	    case VREG:
    420 		break;
    421 	    case VLNK:
    422 		n = 0;
    423 		break;
    424 	    case VDIR:
    425 		uio->uio_offset = curoff;
    426 		if (enough)
    427 			n = 0;
    428 		break;
    429 	    default:
    430 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    431 	    }
    432 	    if (got_buf)
    433 		brelse(bp, 0);
    434 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    435 	return (error);
    436 }
    437 
    438 /*
    439  * Vnode op for write using bio
    440  */
    441 int
    442 nfs_write(v)
    443 	void *v;
    444 {
    445 	struct vop_write_args /* {
    446 		struct vnode *a_vp;
    447 		struct uio *a_uio;
    448 		int  a_ioflag;
    449 		kauth_cred_t a_cred;
    450 	} */ *ap = v;
    451 	struct uio *uio = ap->a_uio;
    452 	struct lwp *l = curlwp;
    453 	struct vnode *vp = ap->a_vp;
    454 	struct nfsnode *np = VTONFS(vp);
    455 	kauth_cred_t cred = ap->a_cred;
    456 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    457 	voff_t oldoff, origoff;
    458 	vsize_t bytelen;
    459 	int error = 0;
    460 	int ioflag = ap->a_ioflag;
    461 	int extended = 0, wrotedata = 0;
    462 
    463 #ifdef DIAGNOSTIC
    464 	if (uio->uio_rw != UIO_WRITE)
    465 		panic("nfs_write mode");
    466 #endif
    467 	if (vp->v_type != VREG)
    468 		return (EIO);
    469 	if (np->n_flag & NWRITEERR) {
    470 		np->n_flag &= ~NWRITEERR;
    471 		return (np->n_error);
    472 	}
    473 #ifndef NFS_V2_ONLY
    474 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    475 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    476 		(void)nfs_fsinfo(nmp, vp, cred, l);
    477 #endif
    478 	if (ioflag & IO_APPEND) {
    479 		NFS_INVALIDATE_ATTRCACHE(np);
    480 		error = nfs_flushstalebuf(vp, cred, l,
    481 		    NFS_FLUSHSTALEBUF_MYWRITE);
    482 		if (error)
    483 			return (error);
    484 		uio->uio_offset = np->n_size;
    485 	}
    486 	if (uio->uio_offset < 0)
    487 		return (EINVAL);
    488 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    489 		return (EFBIG);
    490 	if (uio->uio_resid == 0)
    491 		return (0);
    492 	/*
    493 	 * Maybe this should be above the vnode op call, but so long as
    494 	 * file servers have no limits, i don't think it matters
    495 	 */
    496 	if (l && l->l_proc && uio->uio_offset + uio->uio_resid >
    497 	      l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    498 		mutex_enter(&proclist_mutex);
    499 		psignal(l->l_proc, SIGXFSZ);
    500 		mutex_exit(&proclist_mutex);
    501 		return (EFBIG);
    502 	}
    503 
    504 	origoff = uio->uio_offset;
    505 	do {
    506 		bool overwrite; /* if we are overwriting whole pages */
    507 		u_quad_t oldsize;
    508 		oldoff = uio->uio_offset;
    509 		bytelen = uio->uio_resid;
    510 
    511 		nfsstats.biocache_writes++;
    512 
    513 		oldsize = np->n_size;
    514 		np->n_flag |= NMODIFIED;
    515 		if (np->n_size < uio->uio_offset + bytelen) {
    516 			np->n_size = uio->uio_offset + bytelen;
    517 		}
    518 		overwrite = false;
    519 		if ((uio->uio_offset & PAGE_MASK) == 0) {
    520 			if ((vp->v_vflag & VV_MAPPED) == 0 &&
    521 			    bytelen > PAGE_SIZE) {
    522 				bytelen = trunc_page(bytelen);
    523 				overwrite = true;
    524 			} else if ((bytelen & PAGE_MASK) == 0 &&
    525 			    uio->uio_offset >= vp->v_size) {
    526 				overwrite = true;
    527 			}
    528 		}
    529 		if (vp->v_size < uio->uio_offset + bytelen) {
    530 			uvm_vnp_setwritesize(vp, uio->uio_offset + bytelen);
    531 		}
    532 		error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
    533 		    UVM_ADV_RANDOM, UBC_WRITE | UBC_PARTIALOK |
    534 		    (overwrite ? UBC_FAULTBUSY : 0) |
    535 		    (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
    536 		if (error) {
    537 			uvm_vnp_setwritesize(vp, vp->v_size);
    538 			if (overwrite && np->n_size != oldsize) {
    539 				/*
    540 				 * backout size and free pages past eof.
    541 				 */
    542 				np->n_size = oldsize;
    543 				mutex_enter(&vp->v_interlock);
    544 				(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
    545 				    0, PGO_SYNCIO | PGO_FREE);
    546 			}
    547 			break;
    548 		}
    549 		wrotedata = 1;
    550 
    551 		/*
    552 		 * update UVM's notion of the size now that we've
    553 		 * copied the data into the vnode's pages.
    554 		 */
    555 
    556 		if (vp->v_size < uio->uio_offset) {
    557 			uvm_vnp_setsize(vp, uio->uio_offset);
    558 			extended = 1;
    559 		}
    560 
    561 		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
    562 		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
    563 			mutex_enter(&vp->v_interlock);
    564 			error = VOP_PUTPAGES(vp,
    565 			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
    566 			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    567 				       ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
    568 		}
    569 	} while (uio->uio_resid > 0);
    570 	if (wrotedata)
    571 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
    572 	if (error == 0 && (ioflag & IO_SYNC) != 0) {
    573 		mutex_enter(&vp->v_interlock);
    574 		error = VOP_PUTPAGES(vp,
    575 		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
    576 		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    577 			       ~(nmp->nm_wsize - 1)),
    578 		    PGO_CLEANIT | PGO_SYNCIO);
    579 	}
    580 	return error;
    581 }
    582 
    583 /*
    584  * Get an nfs cache block.
    585  * Allocate a new one if the block isn't currently in the cache
    586  * and return the block marked busy. If the calling process is
    587  * interrupted by a signal for an interruptible mount point, return
    588  * NULL.
    589  */
    590 struct buf *
    591 nfs_getcacheblk(vp, bn, size, l)
    592 	struct vnode *vp;
    593 	daddr_t bn;
    594 	int size;
    595 	struct lwp *l;
    596 {
    597 	struct buf *bp;
    598 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    599 
    600 	if (nmp->nm_flag & NFSMNT_INT) {
    601 		bp = getblk(vp, bn, size, PCATCH, 0);
    602 		while (bp == NULL) {
    603 			if (nfs_sigintr(nmp, NULL, l))
    604 				return (NULL);
    605 			bp = getblk(vp, bn, size, 0, 2 * hz);
    606 		}
    607 	} else
    608 		bp = getblk(vp, bn, size, 0, 0);
    609 	return (bp);
    610 }
    611 
    612 /*
    613  * Flush and invalidate all dirty buffers. If another process is already
    614  * doing the flush, just wait for completion.
    615  */
    616 int
    617 nfs_vinvalbuf(vp, flags, cred, l, intrflg)
    618 	struct vnode *vp;
    619 	int flags;
    620 	kauth_cred_t cred;
    621 	struct lwp *l;
    622 	int intrflg;
    623 {
    624 	struct nfsnode *np = VTONFS(vp);
    625 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    626 	int error = 0, slptimeo;
    627 	bool catch;
    628 
    629 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    630 		intrflg = 0;
    631 	if (intrflg) {
    632 		catch = true;
    633 		slptimeo = 2 * hz;
    634 	} else {
    635 		catch = false;
    636 		slptimeo = 0;
    637 	}
    638 	/*
    639 	 * First wait for any other process doing a flush to complete.
    640 	 */
    641 	mutex_enter(&vp->v_interlock);
    642 	while (np->n_flag & NFLUSHINPROG) {
    643 		np->n_flag |= NFLUSHWANT;
    644 		error = mtsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
    645 			slptimeo, &vp->v_interlock);
    646 		if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
    647 			mutex_exit(&vp->v_interlock);
    648 			return EINTR;
    649 		}
    650 	}
    651 
    652 	/*
    653 	 * Now, flush as required.
    654 	 */
    655 	np->n_flag |= NFLUSHINPROG;
    656 	mutex_exit(&vp->v_interlock);
    657 	error = vinvalbuf(vp, flags, cred, l, catch, 0);
    658 	while (error) {
    659 		if (intrflg && nfs_sigintr(nmp, NULL, l)) {
    660 			error = EINTR;
    661 			break;
    662 		}
    663 		error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
    664 	}
    665 	mutex_enter(&vp->v_interlock);
    666 	if (error == 0)
    667 		np->n_flag &= ~NMODIFIED;
    668 	np->n_flag &= ~NFLUSHINPROG;
    669 	if (np->n_flag & NFLUSHWANT) {
    670 		np->n_flag &= ~NFLUSHWANT;
    671 		wakeup(&np->n_flag);
    672 	}
    673 	mutex_exit(&vp->v_interlock);
    674 	return error;
    675 }
    676 
    677 /*
    678  * nfs_flushstalebuf: flush cache if it's stale.
    679  *
    680  * => caller shouldn't own any pages or buffers which belong to the vnode.
    681  */
    682 
    683 int
    684 nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
    685     int flags)
    686 {
    687 	struct nfsnode *np = VTONFS(vp);
    688 	struct vattr vattr;
    689 	int error;
    690 
    691 	if (np->n_flag & NMODIFIED) {
    692 		if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
    693 		    || vp->v_type != VREG) {
    694 			error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
    695 			if (error)
    696 				return error;
    697 			if (vp->v_type == VDIR) {
    698 				nfs_invaldircache(vp, 0);
    699 			}
    700 		} else {
    701 			/*
    702 			 * XXX assuming writes are ours.
    703 			 */
    704 		}
    705 		NFS_INVALIDATE_ATTRCACHE(np);
    706 		error = VOP_GETATTR(vp, &vattr, cred);
    707 		if (error)
    708 			return error;
    709 		np->n_mtime = vattr.va_mtime;
    710 	} else {
    711 		error = VOP_GETATTR(vp, &vattr, cred);
    712 		if (error)
    713 			return error;
    714 		if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
    715 			if (vp->v_type == VDIR) {
    716 				nfs_invaldircache(vp, 0);
    717 			}
    718 			error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
    719 			if (error)
    720 				return error;
    721 			np->n_mtime = vattr.va_mtime;
    722 		}
    723 	}
    724 
    725 	return error;
    726 }
    727 
    728 /*
    729  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    730  * This is mainly to avoid queueing async I/O requests when the nfsiods
    731  * are all hung on a dead server.
    732  */
    733 
    734 int
    735 nfs_asyncio(bp)
    736 	struct buf *bp;
    737 {
    738 	struct nfs_iod *iod;
    739 	struct nfsmount *nmp;
    740 	int slptimeo = 0, error;
    741 	bool catch = false;
    742 
    743 	if (nfs_numasync == 0)
    744 		return (EIO);
    745 
    746 	nmp = VFSTONFS(bp->b_vp->v_mount);
    747 again:
    748 	if (nmp->nm_flag & NFSMNT_INT)
    749 		catch = true;
    750 
    751 	/*
    752 	 * Find a free iod to process this request.
    753 	 */
    754 
    755 	mutex_enter(&nfs_iodlist_lock);
    756 	iod = LIST_FIRST(&nfs_iodlist_idle);
    757 	if (iod) {
    758 		/*
    759 		 * Found one, so wake it up and tell it which
    760 		 * mount to process.
    761 		 */
    762 		LIST_REMOVE(iod, nid_idle);
    763 		mutex_enter(&iod->nid_lock);
    764 		mutex_exit(&nfs_iodlist_lock);
    765 		KASSERT(iod->nid_mount == NULL);
    766 		iod->nid_mount = nmp;
    767 		cv_signal(&iod->nid_cv);
    768 		mutex_enter(&nmp->nm_lock);
    769 		mutex_exit(&iod->nid_lock);
    770 		nmp->nm_bufqiods++;
    771 		if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
    772 			cv_broadcast(&nmp->nm_aiocv);
    773 		}
    774 	} else {
    775 		mutex_exit(&nfs_iodlist_lock);
    776 		mutex_enter(&nmp->nm_lock);
    777 	}
    778 
    779 	KASSERT(mutex_owned(&nmp->nm_lock));
    780 
    781 	/*
    782 	 * If we have an iod which can process the request, then queue
    783 	 * the buffer.  However, even if we have an iod, do not initiate
    784 	 * queue cleaning if curproc is the pageout daemon. if the NFS mount
    785 	 * is via local loopback, we may put curproc (pagedaemon) to sleep
    786 	 * waiting for the writes to complete. But the server (ourself)
    787 	 * may block the write, waiting for its (ie., our) pagedaemon
    788 	 * to produce clean pages to handle the write: deadlock.
    789 	 * XXX: start non-loopback mounts straight away?  If "lots free",
    790 	 * let pagedaemon start loopback writes anyway?
    791 	 */
    792 	if (nmp->nm_bufqiods > 0) {
    793 
    794 		/*
    795 		 * Ensure that the queue never grows too large.
    796 		 */
    797 		if (curlwp == uvm.pagedaemon_lwp) {
    798 	  		/* Enque for later, to avoid free-page deadlock */
    799 		} else while (nmp->nm_bufqlen >= 2 * nmp->nm_bufqiods) {
    800 			if (catch) {
    801 				error = cv_timedwait_sig(&nmp->nm_aiocv,
    802 				    &nmp->nm_lock, slptimeo);
    803 			} else {
    804 				error = cv_timedwait(&nmp->nm_aiocv,
    805 				    &nmp->nm_lock, slptimeo);
    806 			}
    807 			if (error) {
    808 				if (nfs_sigintr(nmp, NULL, curlwp)) {
    809 					mutex_exit(&nmp->nm_lock);
    810 					return (EINTR);
    811 				}
    812 				if (catch) {
    813 					catch = false;
    814 					slptimeo = 2 * hz;
    815 				}
    816 			}
    817 
    818 			/*
    819 			 * We might have lost our iod while sleeping,
    820 			 * so check and loop if necessary.
    821 			 */
    822 
    823 			if (nmp->nm_bufqiods == 0) {
    824 				mutex_exit(&nmp->nm_lock);
    825 				goto again;
    826 			}
    827 		}
    828 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    829 		nmp->nm_bufqlen++;
    830 		mutex_exit(&nmp->nm_lock);
    831 		return (0);
    832 	}
    833 	mutex_exit(&nmp->nm_lock);
    834 
    835 	/*
    836 	 * All the iods are busy on other mounts, so return EIO to
    837 	 * force the caller to process the i/o synchronously.
    838 	 */
    839 
    840 	return (EIO);
    841 }
    842 
    843 /*
    844  * nfs_doio for read.
    845  */
    846 static int
    847 nfs_doio_read(bp, uiop)
    848 	struct buf *bp;
    849 	struct uio *uiop;
    850 {
    851 	struct vnode *vp = bp->b_vp;
    852 	struct nfsnode *np = VTONFS(vp);
    853 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    854 	int error = 0;
    855 
    856 	uiop->uio_rw = UIO_READ;
    857 	switch (vp->v_type) {
    858 	case VREG:
    859 		nfsstats.read_bios++;
    860 		error = nfs_readrpc(vp, uiop);
    861 		if (!error && uiop->uio_resid) {
    862 			int diff, len;
    863 
    864 			/*
    865 			 * If uio_resid > 0, there is a hole in the file and
    866 			 * no writes after the hole have been pushed to
    867 			 * the server yet or the file has been truncated
    868 			 * on the server.
    869 			 * Just zero fill the rest of the valid area.
    870 			 */
    871 
    872 			KASSERT(vp->v_size >=
    873 			    uiop->uio_offset + uiop->uio_resid);
    874 			diff = bp->b_bcount - uiop->uio_resid;
    875 			len = uiop->uio_resid;
    876 			memset((char *)bp->b_data + diff, 0, len);
    877 			uiop->uio_resid = 0;
    878 		}
    879 #if 0
    880 		if (uiop->uio_lwp && (vp->v_iflag & VI_TEXT) &&
    881 		    timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
    882 			killproc(uiop->uio_lwp->l_proc, "process text file was modified");
    883 #if 0 /* XXX NJWLWP */
    884 			uiop->uio_lwp->l_proc->p_holdcnt++;
    885 #endif
    886 		}
    887 #endif
    888 		break;
    889 	case VLNK:
    890 		KASSERT(uiop->uio_offset == (off_t)0);
    891 		nfsstats.readlink_bios++;
    892 		error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
    893 		break;
    894 	case VDIR:
    895 		nfsstats.readdir_bios++;
    896 		uiop->uio_offset = bp->b_dcookie;
    897 #ifndef NFS_V2_ONLY
    898 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
    899 			error = nfs_readdirplusrpc(vp, uiop,
    900 			    curlwp->l_cred);
    901 			/*
    902 			 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
    903 			 */
    904 			if (error == ENOTSUP)
    905 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    906 		}
    907 #else
    908 		nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    909 #endif
    910 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
    911 			error = nfs_readdirrpc(vp, uiop,
    912 			    curlwp->l_cred);
    913 		if (!error) {
    914 			bp->b_dcookie = uiop->uio_offset;
    915 		}
    916 		break;
    917 	default:
    918 		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
    919 		break;
    920 	}
    921 	bp->b_error = error;
    922 	return error;
    923 }
    924 
    925 /*
    926  * nfs_doio for write.
    927  */
    928 static int
    929 nfs_doio_write(bp, uiop)
    930 	struct buf *bp;
    931 	struct uio *uiop;
    932 {
    933 	struct vnode *vp = bp->b_vp;
    934 	struct nfsnode *np = VTONFS(vp);
    935 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    936 	int iomode;
    937 	bool stalewriteverf = false;
    938 	int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
    939 	struct vm_page *pgs[npages];
    940 #ifndef NFS_V2_ONLY
    941 	bool needcommit = true; /* need only COMMIT RPC */
    942 #else
    943 	bool needcommit = false; /* need only COMMIT RPC */
    944 #endif
    945 	bool pageprotected;
    946 	struct uvm_object *uobj = &vp->v_uobj;
    947 	int error;
    948 	off_t off, cnt;
    949 
    950 	if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
    951 		iomode = NFSV3WRITE_UNSTABLE;
    952 	} else {
    953 		iomode = NFSV3WRITE_FILESYNC;
    954 	}
    955 
    956 #ifndef NFS_V2_ONLY
    957 again:
    958 #endif
    959 	rw_enter(&nmp->nm_writeverflock, RW_READER);
    960 
    961 	for (i = 0; i < npages; i++) {
    962 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    963 		if (pgs[i]->uobject == uobj &&
    964 		    pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
    965 			KASSERT(pgs[i]->flags & PG_BUSY);
    966 			/*
    967 			 * this page belongs to our object.
    968 			 */
    969 			mutex_enter(&uobj->vmobjlock);
    970 			/*
    971 			 * write out the page stably if it's about to
    972 			 * be released because we can't resend it
    973 			 * on the server crash.
    974 			 *
    975 			 * XXX assuming PG_RELEASE|PG_PAGEOUT won't be
    976 			 * changed until unbusy the page.
    977 			 */
    978 			if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
    979 				iomode = NFSV3WRITE_FILESYNC;
    980 			/*
    981 			 * if we met a page which hasn't been sent yet,
    982 			 * we need do WRITE RPC.
    983 			 */
    984 			if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
    985 				needcommit = false;
    986 			mutex_exit(&uobj->vmobjlock);
    987 		} else {
    988 			iomode = NFSV3WRITE_FILESYNC;
    989 			needcommit = false;
    990 		}
    991 	}
    992 	if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
    993 		mutex_enter(&uobj->vmobjlock);
    994 		for (i = 0; i < npages; i++) {
    995 			pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
    996 			pmap_page_protect(pgs[i], VM_PROT_READ);
    997 		}
    998 		mutex_exit(&uobj->vmobjlock);
    999 		pageprotected = true; /* pages can't be modified during i/o. */
   1000 	} else
   1001 		pageprotected = false;
   1002 
   1003 	/*
   1004 	 * Send the data to the server if necessary,
   1005 	 * otherwise just send a commit rpc.
   1006 	 */
   1007 #ifndef NFS_V2_ONLY
   1008 	if (needcommit) {
   1009 
   1010 		/*
   1011 		 * If the buffer is in the range that we already committed,
   1012 		 * there's nothing to do.
   1013 		 *
   1014 		 * If it's in the range that we need to commit, push the
   1015 		 * whole range at once, otherwise only push the buffer.
   1016 		 * In both these cases, acquire the commit lock to avoid
   1017 		 * other processes modifying the range.
   1018 		 */
   1019 
   1020 		off = uiop->uio_offset;
   1021 		cnt = bp->b_bcount;
   1022 		mutex_enter(&np->n_commitlock);
   1023 		if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
   1024 			bool pushedrange;
   1025 			if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
   1026 				pushedrange = true;
   1027 				off = np->n_pushlo;
   1028 				cnt = np->n_pushhi - np->n_pushlo;
   1029 			} else {
   1030 				pushedrange = false;
   1031 			}
   1032 			error = nfs_commit(vp, off, cnt, curlwp);
   1033 			if (error == 0) {
   1034 				if (pushedrange) {
   1035 					nfs_merge_commit_ranges(vp);
   1036 				} else {
   1037 					nfs_add_committed_range(vp, off, cnt);
   1038 				}
   1039 			}
   1040 		} else {
   1041 			error = 0;
   1042 		}
   1043 		mutex_exit(&np->n_commitlock);
   1044 		rw_exit(&nmp->nm_writeverflock);
   1045 		if (!error) {
   1046 			/*
   1047 			 * pages are now on stable storage.
   1048 			 */
   1049 			uiop->uio_resid = 0;
   1050 			mutex_enter(&uobj->vmobjlock);
   1051 			for (i = 0; i < npages; i++) {
   1052 				pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1053 			}
   1054 			mutex_exit(&uobj->vmobjlock);
   1055 			return 0;
   1056 		} else if (error == NFSERR_STALEWRITEVERF) {
   1057 			nfs_clearcommit(vp->v_mount);
   1058 			goto again;
   1059 		}
   1060 		if (error) {
   1061 			bp->b_error = np->n_error = error;
   1062 			np->n_flag |= NWRITEERR;
   1063 		}
   1064 		return error;
   1065 	}
   1066 #endif
   1067 	off = uiop->uio_offset;
   1068 	cnt = bp->b_bcount;
   1069 	uiop->uio_rw = UIO_WRITE;
   1070 	nfsstats.write_bios++;
   1071 	error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
   1072 #ifndef NFS_V2_ONLY
   1073 	if (!error && iomode == NFSV3WRITE_UNSTABLE) {
   1074 		/*
   1075 		 * we need to commit pages later.
   1076 		 */
   1077 		mutex_enter(&np->n_commitlock);
   1078 		nfs_add_tobecommitted_range(vp, off, cnt);
   1079 		/*
   1080 		 * if there can be too many uncommitted pages, commit them now.
   1081 		 */
   1082 		if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
   1083 			off = np->n_pushlo;
   1084 			cnt = nfs_commitsize >> 1;
   1085 			error = nfs_commit(vp, off, cnt, curlwp);
   1086 			if (!error) {
   1087 				nfs_add_committed_range(vp, off, cnt);
   1088 				nfs_del_tobecommitted_range(vp, off, cnt);
   1089 			}
   1090 			if (error == NFSERR_STALEWRITEVERF) {
   1091 				stalewriteverf = true;
   1092 				error = 0; /* it isn't a real error */
   1093 			}
   1094 		} else {
   1095 			/*
   1096 			 * re-dirty pages so that they will be passed
   1097 			 * to us later again.
   1098 			 */
   1099 			mutex_enter(&uobj->vmobjlock);
   1100 			for (i = 0; i < npages; i++) {
   1101 				pgs[i]->flags &= ~PG_CLEAN;
   1102 			}
   1103 			mutex_exit(&uobj->vmobjlock);
   1104 		}
   1105 		mutex_exit(&np->n_commitlock);
   1106 	} else
   1107 #endif
   1108 	if (!error) {
   1109 		/*
   1110 		 * pages are now on stable storage.
   1111 		 */
   1112 		mutex_enter(&np->n_commitlock);
   1113 		nfs_del_committed_range(vp, off, cnt);
   1114 		mutex_exit(&np->n_commitlock);
   1115 		mutex_enter(&uobj->vmobjlock);
   1116 		for (i = 0; i < npages; i++) {
   1117 			pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1118 		}
   1119 		mutex_exit(&uobj->vmobjlock);
   1120 	} else {
   1121 		/*
   1122 		 * we got an error.
   1123 		 */
   1124 		bp->b_error = np->n_error = error;
   1125 		np->n_flag |= NWRITEERR;
   1126 	}
   1127 
   1128 	rw_exit(&nmp->nm_writeverflock);
   1129 
   1130 	if (stalewriteverf) {
   1131 		nfs_clearcommit(vp->v_mount);
   1132 	}
   1133 	return error;
   1134 }
   1135 
   1136 /*
   1137  * nfs_doio for B_PHYS.
   1138  */
   1139 static int
   1140 nfs_doio_phys(bp, uiop)
   1141 	struct buf *bp;
   1142 	struct uio *uiop;
   1143 {
   1144 	struct vnode *vp = bp->b_vp;
   1145 	int error;
   1146 
   1147 	uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
   1148 	if (bp->b_flags & B_READ) {
   1149 		uiop->uio_rw = UIO_READ;
   1150 		nfsstats.read_physios++;
   1151 		error = nfs_readrpc(vp, uiop);
   1152 	} else {
   1153 		int iomode = NFSV3WRITE_DATASYNC;
   1154 		bool stalewriteverf;
   1155 		struct nfsmount *nmp = VFSTONFS(vp->v_mount);
   1156 
   1157 		uiop->uio_rw = UIO_WRITE;
   1158 		nfsstats.write_physios++;
   1159 		rw_enter(&nmp->nm_writeverflock, RW_READER);
   1160 		error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
   1161 		rw_exit(&nmp->nm_writeverflock);
   1162 		if (stalewriteverf) {
   1163 			nfs_clearcommit(bp->b_vp->v_mount);
   1164 		}
   1165 	}
   1166 	bp->b_error = error;
   1167 	return error;
   1168 }
   1169 
   1170 /*
   1171  * Do an I/O operation to/from a cache block. This may be called
   1172  * synchronously or from an nfsiod.
   1173  */
   1174 int
   1175 nfs_doio(bp)
   1176 	struct buf *bp;
   1177 {
   1178 	int error;
   1179 	struct uio uio;
   1180 	struct uio *uiop = &uio;
   1181 	struct iovec io;
   1182 	UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
   1183 
   1184 	uiop->uio_iov = &io;
   1185 	uiop->uio_iovcnt = 1;
   1186 	uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
   1187 	UIO_SETUP_SYSSPACE(uiop);
   1188 	io.iov_base = bp->b_data;
   1189 	io.iov_len = uiop->uio_resid = bp->b_bcount;
   1190 
   1191 	/*
   1192 	 * Historically, paging was done with physio, but no more...
   1193 	 */
   1194 	if (bp->b_flags & B_PHYS) {
   1195 		/*
   1196 		 * ...though reading /dev/drum still gets us here.
   1197 		 */
   1198 		error = nfs_doio_phys(bp, uiop);
   1199 	} else if (bp->b_flags & B_READ) {
   1200 		error = nfs_doio_read(bp, uiop);
   1201 	} else {
   1202 		error = nfs_doio_write(bp, uiop);
   1203 	}
   1204 	bp->b_resid = uiop->uio_resid;
   1205 	biodone(bp);
   1206 	return (error);
   1207 }
   1208 
   1209 /*
   1210  * Vnode op for VM getpages.
   1211  */
   1212 
   1213 int
   1214 nfs_getpages(v)
   1215 	void *v;
   1216 {
   1217 	struct vop_getpages_args /* {
   1218 		struct vnode *a_vp;
   1219 		voff_t a_offset;
   1220 		struct vm_page **a_m;
   1221 		int *a_count;
   1222 		int a_centeridx;
   1223 		vm_prot_t a_access_type;
   1224 		int a_advice;
   1225 		int a_flags;
   1226 	} */ *ap = v;
   1227 
   1228 	struct vnode *vp = ap->a_vp;
   1229 	struct uvm_object *uobj = &vp->v_uobj;
   1230 	struct nfsnode *np = VTONFS(vp);
   1231 	const int npages = *ap->a_count;
   1232 	struct vm_page *pg, **pgs, *opgs[npages];
   1233 	off_t origoffset, len;
   1234 	int i, error;
   1235 	bool v3 = NFS_ISV3(vp);
   1236 	bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1237 	bool locked = (ap->a_flags & PGO_LOCKED) != 0;
   1238 
   1239 	/*
   1240 	 * call the genfs code to get the pages.  `pgs' may be NULL
   1241 	 * when doing read-ahead.
   1242 	 */
   1243 
   1244 	pgs = ap->a_m;
   1245 	if (write && locked && v3) {
   1246 		KASSERT(pgs != NULL);
   1247 #ifdef DEBUG
   1248 
   1249 		/*
   1250 		 * If PGO_LOCKED is set, real pages shouldn't exists
   1251 		 * in the array.
   1252 		 */
   1253 
   1254 		for (i = 0; i < npages; i++)
   1255 			KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
   1256 #endif
   1257 		memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
   1258 	}
   1259 	error = genfs_getpages(v);
   1260 	if (error) {
   1261 		return (error);
   1262 	}
   1263 
   1264 	/*
   1265 	 * for read faults where the nfs node is not yet marked NMODIFIED,
   1266 	 * set PG_RDONLY on the pages so that we come back here if someone
   1267 	 * tries to modify later via the mapping that will be entered for
   1268 	 * this fault.
   1269 	 */
   1270 
   1271 	if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
   1272 		if (!locked) {
   1273 			mutex_enter(&uobj->vmobjlock);
   1274 		}
   1275 		for (i = 0; i < npages; i++) {
   1276 			pg = pgs[i];
   1277 			if (pg == NULL || pg == PGO_DONTCARE) {
   1278 				continue;
   1279 			}
   1280 			pg->flags |= PG_RDONLY;
   1281 		}
   1282 		if (!locked) {
   1283 			mutex_exit(&uobj->vmobjlock);
   1284 		}
   1285 	}
   1286 	if (!write) {
   1287 		return (0);
   1288 	}
   1289 
   1290 	/*
   1291 	 * this is a write fault, update the commit info.
   1292 	 */
   1293 
   1294 	origoffset = ap->a_offset;
   1295 	len = npages << PAGE_SHIFT;
   1296 
   1297 	if (v3) {
   1298 		if (!locked) {
   1299 			mutex_enter(&np->n_commitlock);
   1300 		} else {
   1301 			if (!mutex_tryenter(&np->n_commitlock)) {
   1302 
   1303 				/*
   1304 				 * Since PGO_LOCKED is set, we need to unbusy
   1305 				 * all pages fetched by genfs_getpages() above,
   1306 				 * tell the caller that there are no pages
   1307 				 * available and put back original pgs array.
   1308 				 */
   1309 
   1310 				mutex_enter(&uvm_pageqlock);
   1311 				uvm_page_unbusy(pgs, npages);
   1312 				mutex_exit(&uvm_pageqlock);
   1313 				*ap->a_count = 0;
   1314 				memcpy(pgs, opgs,
   1315 				    npages * sizeof(struct vm_pages *));
   1316 				return EBUSY;
   1317 			}
   1318 		}
   1319 		nfs_del_committed_range(vp, origoffset, len);
   1320 		nfs_del_tobecommitted_range(vp, origoffset, len);
   1321 	}
   1322 	np->n_flag |= NMODIFIED;
   1323 	if (!locked) {
   1324 		mutex_enter(&uobj->vmobjlock);
   1325 	}
   1326 	for (i = 0; i < npages; i++) {
   1327 		pg = pgs[i];
   1328 		if (pg == NULL || pg == PGO_DONTCARE) {
   1329 			continue;
   1330 		}
   1331 		pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1332 	}
   1333 	if (!locked) {
   1334 		mutex_exit(&uobj->vmobjlock);
   1335 	}
   1336 	if (v3) {
   1337 		mutex_exit(&np->n_commitlock);
   1338 	}
   1339 	return (0);
   1340 }
   1341