Home | History | Annotate | Line # | Download | only in nfs
nfs_bio.c revision 1.107
      1 /*	$NetBSD: nfs_bio.c,v 1.107 2003/08/07 16:33:49 agc Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. Neither the name of the University nor the names of its contributors
     19  *    may be used to endorse or promote products derived from this software
     20  *    without specific prior written permission.
     21  *
     22  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     25  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     32  * SUCH DAMAGE.
     33  *
     34  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.107 2003/08/07 16:33:49 agc Exp $");
     39 
     40 #include "opt_nfs.h"
     41 #include "opt_ddb.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/resourcevar.h>
     46 #include <sys/signalvar.h>
     47 #include <sys/proc.h>
     48 #include <sys/buf.h>
     49 #include <sys/vnode.h>
     50 #include <sys/mount.h>
     51 #include <sys/kernel.h>
     52 #include <sys/namei.h>
     53 #include <sys/dirent.h>
     54 #include <sys/malloc.h>
     55 
     56 #include <uvm/uvm_extern.h>
     57 #include <uvm/uvm.h>
     58 
     59 #include <nfs/rpcv2.h>
     60 #include <nfs/nfsproto.h>
     61 #include <nfs/nfs.h>
     62 #include <nfs/nfsmount.h>
     63 #include <nfs/nqnfs.h>
     64 #include <nfs/nfsnode.h>
     65 #include <nfs/nfs_var.h>
     66 
     67 extern int nfs_numasync;
     68 extern int nfs_commitsize;
     69 extern struct nfsstats nfsstats;
     70 
     71 static int nfs_doio_read __P((struct buf *, struct uio *));
     72 static int nfs_doio_write __P((struct buf *, struct uio *));
     73 static int nfs_doio_phys __P((struct buf *, struct uio *));
     74 
     75 /*
     76  * Vnode op for read using bio
     77  * Any similarity to readip() is purely coincidental
     78  */
     79 int
     80 nfs_bioread(vp, uio, ioflag, cred, cflag)
     81 	struct vnode *vp;
     82 	struct uio *uio;
     83 	int ioflag, cflag;
     84 	struct ucred *cred;
     85 {
     86 	struct nfsnode *np = VTONFS(vp);
     87 	struct buf *bp = NULL, *rabp;
     88 	struct vattr vattr;
     89 	struct proc *p;
     90 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     91 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     92 	caddr_t baddr, ep, edp;
     93 	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
     94 	int enough = 0;
     95 	struct dirent *dp, *pdp;
     96 	off_t curoff = 0;
     97 
     98 #ifdef DIAGNOSTIC
     99 	if (uio->uio_rw != UIO_READ)
    100 		panic("nfs_read mode");
    101 #endif
    102 	if (uio->uio_resid == 0)
    103 		return (0);
    104 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    105 		return (EINVAL);
    106 	p = uio->uio_procp;
    107 #ifndef NFS_V2_ONLY
    108 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    109 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    110 		(void)nfs_fsinfo(nmp, vp, cred, p);
    111 #endif
    112 	if (vp->v_type != VDIR &&
    113 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    114 		return (EFBIG);
    115 
    116 	/*
    117 	 * For nfs, cache consistency can only be maintained approximately.
    118 	 * Although RFC1094 does not specify the criteria, the following is
    119 	 * believed to be compatible with the reference port.
    120 	 * For nqnfs, full cache consistency is maintained within the loop.
    121 	 * For nfs:
    122 	 * If the file's modify time on the server has changed since the
    123 	 * last read rpc or you have written to the file,
    124 	 * you may have lost data cache consistency with the
    125 	 * server, so flush all of the file's data out of the cache.
    126 	 * Then force a getattr rpc to ensure that you have up to date
    127 	 * attributes.
    128 	 * NB: This implies that cache data can be read when up to
    129 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
    130 	 * attributes this could be forced by setting n_attrstamp to 0 before
    131 	 * the VOP_GETATTR() call.
    132 	 */
    133 
    134 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
    135 		if (np->n_flag & NMODIFIED) {
    136 			if (vp->v_type != VREG) {
    137 				if (vp->v_type != VDIR)
    138 					panic("nfs: bioread, not dir");
    139 				nfs_invaldircache(vp, 0);
    140 				np->n_direofoffset = 0;
    141 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    142 				if (error)
    143 					return (error);
    144 			}
    145 			np->n_attrstamp = 0;
    146 			error = VOP_GETATTR(vp, &vattr, cred, p);
    147 			if (error)
    148 				return (error);
    149 			np->n_mtime = vattr.va_mtime.tv_sec;
    150 		} else {
    151 			error = VOP_GETATTR(vp, &vattr, cred, p);
    152 			if (error)
    153 				return (error);
    154 			if (np->n_mtime != vattr.va_mtime.tv_sec) {
    155 				if (vp->v_type == VDIR) {
    156 					nfs_invaldircache(vp, 0);
    157 					np->n_direofoffset = 0;
    158 				}
    159 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    160 				if (error)
    161 					return (error);
    162 				np->n_mtime = vattr.va_mtime.tv_sec;
    163 			}
    164 		}
    165 	}
    166 
    167 	/*
    168 	 * update the cached read creds for this node.
    169 	 */
    170 
    171 	if (np->n_rcred) {
    172 		crfree(np->n_rcred);
    173 	}
    174 	np->n_rcred = cred;
    175 	crhold(cred);
    176 
    177 	do {
    178 #ifndef NFS_V2_ONLY
    179 	    /*
    180 	     * Get a valid lease. If cached data is stale, flush it.
    181 	     */
    182 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
    183 		if (NQNFS_CKINVALID(vp, np, ND_READ)) {
    184 		    do {
    185 			error = nqnfs_getlease(vp, ND_READ, cred, p);
    186 		    } while (error == NQNFS_EXPIRED);
    187 		    if (error)
    188 			return (error);
    189 		    if (np->n_lrev != np->n_brev ||
    190 			(np->n_flag & NQNFSNONCACHE) ||
    191 			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
    192 			if (vp->v_type == VDIR) {
    193 				nfs_invaldircache(vp, 0);
    194 				np->n_direofoffset = 0;
    195 			}
    196 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    197 			if (error)
    198 			    return (error);
    199 			np->n_brev = np->n_lrev;
    200 		    }
    201 		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
    202 		    nfs_invaldircache(vp, 0);
    203 		    error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    204 		    np->n_direofoffset = 0;
    205 		    if (error)
    206 			return (error);
    207 		}
    208 	    }
    209 #endif
    210 	    /*
    211 	     * Don't cache symlinks.
    212 	     */
    213 	    if (np->n_flag & NQNFSNONCACHE
    214 		|| ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
    215 		switch (vp->v_type) {
    216 		case VREG:
    217 			return (nfs_readrpc(vp, uio));
    218 		case VLNK:
    219 			return (nfs_readlinkrpc(vp, uio, cred));
    220 		case VDIR:
    221 			break;
    222 		default:
    223 			printf(" NQNFSNONCACHE: type %x unexpected\n",
    224 			    vp->v_type);
    225 		};
    226 	    }
    227 	    baddr = (caddr_t)0;
    228 	    switch (vp->v_type) {
    229 	    case VREG:
    230 		nfsstats.biocache_reads++;
    231 
    232 		error = 0;
    233 		if (uio->uio_offset >= np->n_size) {
    234 			break;
    235 		}
    236 		while (uio->uio_resid > 0) {
    237 			void *win;
    238 			vsize_t bytelen = MIN(np->n_size - uio->uio_offset,
    239 					      uio->uio_resid);
    240 
    241 			if (bytelen == 0)
    242 				break;
    243 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
    244 					&bytelen, UBC_READ);
    245 			error = uiomove(win, bytelen, uio);
    246 			ubc_release(win, 0);
    247 			if (error) {
    248 				break;
    249 			}
    250 		}
    251 		n = 0;
    252 		break;
    253 
    254 	    case VLNK:
    255 		nfsstats.biocache_readlinks++;
    256 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
    257 		if (!bp)
    258 			return (EINTR);
    259 		if ((bp->b_flags & B_DONE) == 0) {
    260 			bp->b_flags |= B_READ;
    261 			error = nfs_doio(bp, p);
    262 			if (error) {
    263 				brelse(bp);
    264 				return (error);
    265 			}
    266 		}
    267 		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
    268 		got_buf = 1;
    269 		on = 0;
    270 		break;
    271 	    case VDIR:
    272 diragain:
    273 		nfsstats.biocache_readdirs++;
    274 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    275 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    276 		if (!ndp) {
    277 			/*
    278 			 * We've been handed a cookie that is not
    279 			 * in the cache. If we're not translating
    280 			 * 32 <-> 64, it may be a value that was
    281 			 * flushed out of the cache because it grew
    282 			 * too big. Let the server judge if it's
    283 			 * valid or not. In the translation case,
    284 			 * we have no way of validating this value,
    285 			 * so punt.
    286 			 */
    287 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    288 				return (EINVAL);
    289 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    290 				uio->uio_offset, 0, 0);
    291 		}
    292 
    293 		if (uio->uio_offset != 0 &&
    294 		    ndp->dc_cookie == np->n_direofoffset) {
    295 			nfsstats.direofcache_hits++;
    296 			return (0);
    297 		}
    298 
    299 		bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
    300 		if (!bp)
    301 		    return (EINTR);
    302 		if ((bp->b_flags & B_DONE) == 0) {
    303 		    bp->b_flags |= B_READ;
    304 		    bp->b_dcookie = ndp->dc_blkcookie;
    305 		    error = nfs_doio(bp, p);
    306 		    if (error) {
    307 			/*
    308 			 * Yuck! The directory has been modified on the
    309 			 * server. Punt and let the userland code
    310 			 * deal with it.
    311 			 */
    312 			brelse(bp);
    313 			if (error == NFSERR_BAD_COOKIE) {
    314 			    nfs_invaldircache(vp, 0);
    315 			    nfs_vinvalbuf(vp, 0, cred, p, 1);
    316 			    error = EINVAL;
    317 			}
    318 			return (error);
    319 		    }
    320 		}
    321 
    322 		/*
    323 		 * Just return if we hit EOF right away with this
    324 		 * block. Always check here, because direofoffset
    325 		 * may have been set by an nfsiod since the last
    326 		 * check.
    327 		 */
    328 		if (np->n_direofoffset != 0 &&
    329 			ndp->dc_blkcookie == np->n_direofoffset) {
    330 			brelse(bp);
    331 			return (0);
    332 		}
    333 
    334 		/*
    335 		 * Find the entry we were looking for in the block.
    336 		 */
    337 
    338 		en = ndp->dc_entry;
    339 
    340 		pdp = dp = (struct dirent *)bp->b_data;
    341 		edp = bp->b_data + bp->b_bcount - bp->b_resid;
    342 		enn = 0;
    343 		while (enn < en && (caddr_t)dp < edp) {
    344 			pdp = dp;
    345 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    346 			enn++;
    347 		}
    348 
    349 		/*
    350 		 * If the entry number was bigger than the number of
    351 		 * entries in the block, or the cookie of the previous
    352 		 * entry doesn't match, the directory cache is
    353 		 * stale. Flush it and try again (i.e. go to
    354 		 * the server).
    355 		 */
    356 		if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
    357 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    358 #ifdef DEBUG
    359 		    	printf("invalid cache: %p %p %p off %lx %lx\n",
    360 				pdp, dp, edp,
    361 				(unsigned long)uio->uio_offset,
    362 				(unsigned long)NFS_GETCOOKIE(pdp));
    363 #endif
    364 			brelse(bp);
    365 			nfs_invaldircache(vp, 0);
    366 			nfs_vinvalbuf(vp, 0, cred, p, 0);
    367 			goto diragain;
    368 		}
    369 
    370 		on = (caddr_t)dp - bp->b_data;
    371 
    372 		/*
    373 		 * Cache all entries that may be exported to the
    374 		 * user, as they may be thrown back at us. The
    375 		 * NFSBIO_CACHECOOKIES flag indicates that all
    376 		 * entries are being 'exported', so cache them all.
    377 		 */
    378 
    379 		if (en == 0 && pdp == dp) {
    380 			dp = (struct dirent *)
    381 			    ((caddr_t)dp + dp->d_reclen);
    382 			enn++;
    383 		}
    384 
    385 		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
    386 			n = uio->uio_resid;
    387 			enough = 1;
    388 		} else
    389 			n = bp->b_bcount - bp->b_resid - on;
    390 
    391 		ep = bp->b_data + on + n;
    392 
    393 		/*
    394 		 * Find last complete entry to copy, caching entries
    395 		 * (if requested) as we go.
    396 		 */
    397 
    398 		while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
    399 			if (cflag & NFSBIO_CACHECOOKIES) {
    400 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    401 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    402 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    403 					NFS_STASHCOOKIE32(pdp,
    404 					    nndp->dc_cookie32);
    405 				}
    406 			}
    407 			pdp = dp;
    408 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    409 			enn++;
    410 		}
    411 
    412 		/*
    413 		 * If the last requested entry was not the last in the
    414 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    415 		 * cache the cookie of the last requested one, and
    416 		 * set of the offset to it.
    417 		 */
    418 
    419 		if ((on + n) < bp->b_bcount - bp->b_resid) {
    420 			curoff = NFS_GETCOOKIE(pdp);
    421 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    422 			    enn, bp->b_lblkno);
    423 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    424 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    425 				curoff = nndp->dc_cookie32;
    426 			}
    427 		} else
    428 			curoff = bp->b_dcookie;
    429 
    430 		/*
    431 		 * Always cache the entry for the next block,
    432 		 * so that readaheads can use it.
    433 		 */
    434 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    435 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    436 			if (curoff == bp->b_dcookie) {
    437 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    438 				curoff = nndp->dc_cookie32;
    439 			}
    440 		}
    441 
    442 		n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
    443 
    444 		/*
    445 		 * If not eof and read aheads are enabled, start one.
    446 		 * (You need the current block first, so that you have the
    447 		 *  directory offset cookie of the next block.)
    448 		 */
    449 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    450 		    np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
    451 			rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
    452 						NFS_DIRBLKSIZ, p);
    453 			if (rabp) {
    454 			    if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
    455 				rabp->b_dcookie = nndp->dc_cookie;
    456 				rabp->b_flags |= (B_READ | B_ASYNC);
    457 				if (nfs_asyncio(rabp)) {
    458 				    rabp->b_flags |= B_INVAL;
    459 				    brelse(rabp);
    460 				}
    461 			    } else
    462 				brelse(rabp);
    463 			}
    464 		}
    465 		got_buf = 1;
    466 		break;
    467 	    default:
    468 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    469 		break;
    470 	    }
    471 
    472 	    if (n > 0) {
    473 		if (!baddr)
    474 			baddr = bp->b_data;
    475 		error = uiomove(baddr + on, (int)n, uio);
    476 	    }
    477 	    switch (vp->v_type) {
    478 	    case VREG:
    479 		break;
    480 	    case VLNK:
    481 		n = 0;
    482 		break;
    483 	    case VDIR:
    484 		if (np->n_flag & NQNFSNONCACHE)
    485 			bp->b_flags |= B_INVAL;
    486 		uio->uio_offset = curoff;
    487 		if (enough)
    488 			n = 0;
    489 		break;
    490 	    default:
    491 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    492 	    }
    493 	    if (got_buf)
    494 		brelse(bp);
    495 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    496 	return (error);
    497 }
    498 
    499 /*
    500  * Vnode op for write using bio
    501  */
    502 int
    503 nfs_write(v)
    504 	void *v;
    505 {
    506 	struct vop_write_args /* {
    507 		struct vnode *a_vp;
    508 		struct uio *a_uio;
    509 		int  a_ioflag;
    510 		struct ucred *a_cred;
    511 	} */ *ap = v;
    512 	struct uio *uio = ap->a_uio;
    513 	struct proc *p = uio->uio_procp;
    514 	struct vnode *vp = ap->a_vp;
    515 	struct nfsnode *np = VTONFS(vp);
    516 	struct ucred *cred = ap->a_cred;
    517 	int ioflag = ap->a_ioflag;
    518 	struct vattr vattr;
    519 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    520 	void *win;
    521 	voff_t oldoff, origoff;
    522 	vsize_t bytelen;
    523 	int error = 0;
    524 	int extended = 0, wrotedta = 0;
    525 
    526 #ifdef DIAGNOSTIC
    527 	if (uio->uio_rw != UIO_WRITE)
    528 		panic("nfs_write mode");
    529 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    530 		panic("nfs_write proc");
    531 #endif
    532 	if (vp->v_type != VREG)
    533 		return (EIO);
    534 	if (np->n_flag & NWRITEERR) {
    535 		np->n_flag &= ~NWRITEERR;
    536 		return (np->n_error);
    537 	}
    538 #ifndef NFS_V2_ONLY
    539 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    540 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    541 		(void)nfs_fsinfo(nmp, vp, cred, p);
    542 #endif
    543 	if (ioflag & (IO_APPEND | IO_SYNC)) {
    544 		if (np->n_flag & NMODIFIED) {
    545 			np->n_attrstamp = 0;
    546 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    547 			if (error)
    548 				return (error);
    549 		}
    550 		if (ioflag & IO_APPEND) {
    551 			np->n_attrstamp = 0;
    552 			error = VOP_GETATTR(vp, &vattr, cred, p);
    553 			if (error)
    554 				return (error);
    555 			uio->uio_offset = np->n_size;
    556 		}
    557 	}
    558 	if (uio->uio_offset < 0)
    559 		return (EINVAL);
    560 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    561 		return (EFBIG);
    562 	if (uio->uio_resid == 0)
    563 		return (0);
    564 	/*
    565 	 * Maybe this should be above the vnode op call, but so long as
    566 	 * file servers have no limits, i don't think it matters
    567 	 */
    568 	if (p && uio->uio_offset + uio->uio_resid >
    569 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    570 		psignal(p, SIGXFSZ);
    571 		return (EFBIG);
    572 	}
    573 
    574 	/*
    575 	 * update the cached write creds for this node.
    576 	 */
    577 
    578 	if (np->n_wcred) {
    579 		crfree(np->n_wcred);
    580 	}
    581 	np->n_wcred = cred;
    582 	crhold(cred);
    583 
    584 	if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
    585 		int iomode = NFSV3WRITE_FILESYNC;
    586 		boolean_t stalewriteverf = FALSE;
    587 
    588 		lockmgr(&nmp->nm_writeverflock, LK_SHARED, NULL);
    589 		error = nfs_writerpc(vp, uio, &iomode, FALSE, &stalewriteverf);
    590 		lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
    591 		if (stalewriteverf)
    592 			nfs_clearcommit(vp->v_mount);
    593 		return (error);
    594 	}
    595 
    596 	origoff = uio->uio_offset;
    597 	do {
    598 		boolean_t extending; /* if we are extending whole pages */
    599 		u_quad_t oldsize;
    600 		oldoff = uio->uio_offset;
    601 		bytelen = uio->uio_resid;
    602 
    603 #ifndef NFS_V2_ONLY
    604 		/*
    605 		 * Check for a valid write lease.
    606 		 */
    607 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
    608 		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
    609 			do {
    610 				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
    611 			} while (error == NQNFS_EXPIRED);
    612 			if (error)
    613 				return (error);
    614 			if (np->n_lrev != np->n_brev ||
    615 			    (np->n_flag & NQNFSNONCACHE)) {
    616 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    617 				if (error)
    618 					return (error);
    619 				np->n_brev = np->n_lrev;
    620 			}
    621 		}
    622 #endif
    623 		nfsstats.biocache_writes++;
    624 
    625 		oldsize = np->n_size;
    626 		np->n_flag |= NMODIFIED;
    627 		if (np->n_size < uio->uio_offset + bytelen) {
    628 			np->n_size = uio->uio_offset + bytelen;
    629 		}
    630 		extending = ((uio->uio_offset & PAGE_MASK) == 0 &&
    631 		    (bytelen & PAGE_MASK) == 0 &&
    632 		    uio->uio_offset >= vp->v_size);
    633 		win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    634 			    UBC_WRITE | (extending ? UBC_FAULTBUSY : 0));
    635 		error = uiomove(win, bytelen, uio);
    636 		ubc_release(win, 0);
    637 		if (error) {
    638 			if (extending) {
    639 				/*
    640 				 * backout size and free pages past eof.
    641 				 */
    642 				np->n_size = oldsize;
    643 				(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
    644 				    0, PGO_SYNCIO | PGO_FREE);
    645 			}
    646 			break;
    647 		}
    648 		wrotedta = 1;
    649 
    650 		/*
    651 		 * update UVM's notion of the size now that we've
    652 		 * copied the data into the vnode's pages.
    653 		 */
    654 
    655 		if (vp->v_size < uio->uio_offset) {
    656 			uvm_vnp_setsize(vp, uio->uio_offset);
    657 			extended = 1;
    658 		}
    659 
    660 		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
    661 		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
    662 			simple_lock(&vp->v_interlock);
    663 			error = VOP_PUTPAGES(vp,
    664 			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
    665 			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    666 				       ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
    667 		}
    668 	} while (uio->uio_resid > 0);
    669 	if (wrotedta)
    670 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
    671 	if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
    672 		simple_lock(&vp->v_interlock);
    673 		error = VOP_PUTPAGES(vp,
    674 		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
    675 		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    676 			       ~(nmp->nm_wsize - 1)),
    677 		    PGO_CLEANIT | PGO_SYNCIO);
    678 	}
    679 	return error;
    680 }
    681 
    682 /*
    683  * Get an nfs cache block.
    684  * Allocate a new one if the block isn't currently in the cache
    685  * and return the block marked busy. If the calling process is
    686  * interrupted by a signal for an interruptible mount point, return
    687  * NULL.
    688  */
    689 struct buf *
    690 nfs_getcacheblk(vp, bn, size, p)
    691 	struct vnode *vp;
    692 	daddr_t bn;
    693 	int size;
    694 	struct proc *p;
    695 {
    696 	struct buf *bp;
    697 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    698 
    699 	if (nmp->nm_flag & NFSMNT_INT) {
    700 		bp = getblk(vp, bn, size, PCATCH, 0);
    701 		while (bp == NULL) {
    702 			if (nfs_sigintr(nmp, NULL, p))
    703 				return (NULL);
    704 			bp = getblk(vp, bn, size, 0, 2 * hz);
    705 		}
    706 	} else
    707 		bp = getblk(vp, bn, size, 0, 0);
    708 	return (bp);
    709 }
    710 
    711 /*
    712  * Flush and invalidate all dirty buffers. If another process is already
    713  * doing the flush, just wait for completion.
    714  */
    715 int
    716 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
    717 	struct vnode *vp;
    718 	int flags;
    719 	struct ucred *cred;
    720 	struct proc *p;
    721 	int intrflg;
    722 {
    723 	struct nfsnode *np = VTONFS(vp);
    724 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    725 	int error = 0, slpflag, slptimeo;
    726 
    727 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    728 		intrflg = 0;
    729 	if (intrflg) {
    730 		slpflag = PCATCH;
    731 		slptimeo = 2 * hz;
    732 	} else {
    733 		slpflag = 0;
    734 		slptimeo = 0;
    735 	}
    736 	/*
    737 	 * First wait for any other process doing a flush to complete.
    738 	 */
    739 	simple_lock(&vp->v_interlock);
    740 	while (np->n_flag & NFLUSHINPROG) {
    741 		np->n_flag |= NFLUSHWANT;
    742 		error = ltsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
    743 			slptimeo, &vp->v_interlock);
    744 		if (error && intrflg && nfs_sigintr(nmp, NULL, p)) {
    745 			simple_unlock(&vp->v_interlock);
    746 			return EINTR;
    747 		}
    748 	}
    749 
    750 	/*
    751 	 * Now, flush as required.
    752 	 */
    753 	np->n_flag |= NFLUSHINPROG;
    754 	simple_unlock(&vp->v_interlock);
    755 	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
    756 	while (error) {
    757 		if (intrflg && nfs_sigintr(nmp, NULL, p)) {
    758 			error = EINTR;
    759 			break;
    760 		}
    761 		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
    762 	}
    763 	simple_lock(&vp->v_interlock);
    764 	if (error == 0)
    765 		np->n_flag &= ~NMODIFIED;
    766 	np->n_flag &= ~NFLUSHINPROG;
    767 	if (np->n_flag & NFLUSHWANT) {
    768 		np->n_flag &= ~NFLUSHWANT;
    769 		wakeup(&np->n_flag);
    770 	}
    771 	simple_unlock(&vp->v_interlock);
    772 	return error;
    773 }
    774 
    775 /*
    776  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    777  * This is mainly to avoid queueing async I/O requests when the nfsiods
    778  * are all hung on a dead server.
    779  */
    780 
    781 int
    782 nfs_asyncio(bp)
    783 	struct buf *bp;
    784 {
    785 	int i;
    786 	struct nfsmount *nmp;
    787 	int gotiod, slpflag = 0, slptimeo = 0, error;
    788 
    789 	if (nfs_numasync == 0)
    790 		return (EIO);
    791 
    792 	nmp = VFSTONFS(bp->b_vp->v_mount);
    793 again:
    794 	if (nmp->nm_flag & NFSMNT_INT)
    795 		slpflag = PCATCH;
    796 	gotiod = FALSE;
    797 
    798 	/*
    799 	 * Find a free iod to process this request.
    800 	 */
    801 
    802 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
    803 		struct nfs_iod *iod = &nfs_asyncdaemon[i];
    804 
    805 		simple_lock(&iod->nid_slock);
    806 		if (iod->nid_want) {
    807 			/*
    808 			 * Found one, so wake it up and tell it which
    809 			 * mount to process.
    810 			 */
    811 			iod->nid_want = NULL;
    812 			iod->nid_mount = nmp;
    813 			wakeup(&iod->nid_want);
    814 			simple_lock(&nmp->nm_slock);
    815 			simple_unlock(&iod->nid_slock);
    816 			nmp->nm_bufqiods++;
    817 			gotiod = TRUE;
    818 			break;
    819 		}
    820 		simple_unlock(&iod->nid_slock);
    821 	}
    822 
    823 	/*
    824 	 * If none are free, we may already have an iod working on this mount
    825 	 * point.  If so, it will process our request.
    826 	 */
    827 
    828 	if (!gotiod) {
    829 		simple_lock(&nmp->nm_slock);
    830 		if (nmp->nm_bufqiods > 0)
    831 			gotiod = TRUE;
    832 	}
    833 
    834 	LOCK_ASSERT(simple_lock_held(&nmp->nm_slock));
    835 
    836 	/*
    837 	 * If we have an iod which can process the request, then queue
    838 	 * the buffer.
    839 	 */
    840 
    841 	if (gotiod) {
    842 
    843 		/*
    844 		 * Ensure that the queue never grows too large.
    845 		 */
    846 
    847 		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
    848 			nmp->nm_bufqwant = TRUE;
    849 			error = ltsleep(&nmp->nm_bufq,
    850 			    slpflag | PRIBIO | PNORELOCK,
    851 			    "nfsaio", slptimeo, &nmp->nm_slock);
    852 			if (error) {
    853 				if (nfs_sigintr(nmp, NULL, curproc))
    854 					return (EINTR);
    855 				if (slpflag == PCATCH) {
    856 					slpflag = 0;
    857 					slptimeo = 2 * hz;
    858 				}
    859 			}
    860 
    861 			/*
    862 			 * We might have lost our iod while sleeping,
    863 			 * so check and loop if nescessary.
    864 			 */
    865 
    866 			if (nmp->nm_bufqiods == 0)
    867 				goto again;
    868 
    869 			simple_lock(&nmp->nm_slock);
    870 		}
    871 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    872 		nmp->nm_bufqlen++;
    873 		simple_unlock(&nmp->nm_slock);
    874 		return (0);
    875 	}
    876 	simple_unlock(&nmp->nm_slock);
    877 
    878 	/*
    879 	 * All the iods are busy on other mounts, so return EIO to
    880 	 * force the caller to process the i/o synchronously.
    881 	 */
    882 
    883 	return (EIO);
    884 }
    885 
    886 /*
    887  * nfs_doio for read.
    888  */
    889 static int
    890 nfs_doio_read(bp, uiop)
    891 	struct buf *bp;
    892 	struct uio *uiop;
    893 {
    894 	struct vnode *vp = bp->b_vp;
    895 	struct nfsnode *np = VTONFS(vp);
    896 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    897 	int error = 0;
    898 
    899 	uiop->uio_rw = UIO_READ;
    900 	switch (vp->v_type) {
    901 	case VREG:
    902 		nfsstats.read_bios++;
    903 		error = nfs_readrpc(vp, uiop);
    904 		if (!error && uiop->uio_resid) {
    905 			int diff, len;
    906 
    907 			/*
    908 			 * If len > 0, there is a hole in the file and
    909 			 * no writes after the hole have been pushed to
    910 			 * the server yet.
    911 			 * Just zero fill the rest of the valid area.
    912 			 */
    913 
    914 			diff = bp->b_bcount - uiop->uio_resid;
    915 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
    916 				+ diff);
    917 			if (len > 0) {
    918 				len = MIN(len, uiop->uio_resid);
    919 				memset((char *)bp->b_data + diff, 0, len);
    920 			}
    921 		}
    922 		if (uiop->uio_procp && (vp->v_flag & VTEXT) &&
    923 			(((nmp->nm_flag & NFSMNT_NQNFS) &&
    924 			  NQNFS_CKINVALID(vp, np, ND_READ) &&
    925 			  np->n_lrev != np->n_brev) ||
    926 			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
    927 			  np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
    928 			uprintf("Process killed due to "
    929 				"text file modification\n");
    930 			psignal(uiop->uio_procp, SIGKILL);
    931 #if 0 /* XXX NJWLWP */
    932 			uiop->uio_procp->p_holdcnt++;
    933 #endif
    934 		}
    935 		break;
    936 	case VLNK:
    937 		KASSERT(uiop->uio_offset == (off_t)0);
    938 		nfsstats.readlink_bios++;
    939 		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
    940 		break;
    941 	case VDIR:
    942 		nfsstats.readdir_bios++;
    943 		uiop->uio_offset = bp->b_dcookie;
    944 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
    945 			error = nfs_readdirplusrpc(vp, uiop, curproc->p_ucred);
    946 			if (error == NFSERR_NOTSUPP)
    947 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    948 		}
    949 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
    950 			error = nfs_readdirrpc(vp, uiop, curproc->p_ucred);
    951 		if (!error) {
    952 			bp->b_dcookie = uiop->uio_offset;
    953 		}
    954 		break;
    955 	default:
    956 		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
    957 		break;
    958 	}
    959 	if (error) {
    960 		bp->b_flags |= B_ERROR;
    961 		bp->b_error = error;
    962 	}
    963 	return error;
    964 }
    965 
    966 /*
    967  * nfs_doio for write.
    968  */
    969 static int
    970 nfs_doio_write(bp, uiop)
    971 	struct buf *bp;
    972 	struct uio *uiop;
    973 {
    974 	struct vnode *vp = bp->b_vp;
    975 	struct nfsnode *np = VTONFS(vp);
    976 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    977 	int iomode;
    978 	boolean_t stalewriteverf = FALSE;
    979 	int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
    980 	struct vm_page *pgs[npages];
    981 	boolean_t needcommit = TRUE;
    982 	boolean_t pageprotected;
    983 	struct uvm_object *uobj = &vp->v_uobj;
    984 	int error;
    985 	off_t off, cnt;
    986 
    987 	if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
    988 		iomode = NFSV3WRITE_UNSTABLE;
    989 	} else {
    990 		iomode = NFSV3WRITE_FILESYNC;
    991 	}
    992 
    993 again:
    994 	lockmgr(&nmp->nm_writeverflock, LK_SHARED, NULL);
    995 
    996 	for (i = 0; i < npages; i++) {
    997 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
    998 		if (pgs[i]->uobject == uobj &&
    999 		    pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
   1000 			KASSERT(pgs[i]->flags & PG_BUSY);
   1001 			/*
   1002 			 * this page belongs to our object.
   1003 			 */
   1004 			simple_lock(&uobj->vmobjlock);
   1005 			if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
   1006 				iomode = NFSV3WRITE_FILESYNC;
   1007 			if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
   1008 				needcommit = FALSE;
   1009 			simple_unlock(&uobj->vmobjlock);
   1010 		} else {
   1011 			iomode = NFSV3WRITE_FILESYNC;
   1012 			needcommit = FALSE;
   1013 		}
   1014 	}
   1015 	if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
   1016 		simple_lock(&uobj->vmobjlock);
   1017 		for (i = 0; i < npages; i++) {
   1018 			pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
   1019 			pmap_page_protect(pgs[i], VM_PROT_READ);
   1020 		}
   1021 		simple_unlock(&uobj->vmobjlock);
   1022 		pageprotected = TRUE; /* pages can't be modified during i/o. */
   1023 	} else
   1024 		pageprotected = FALSE;
   1025 
   1026 	/*
   1027 	 * Send the data to the server if necessary,
   1028 	 * otherwise just send a commit rpc.
   1029 	 */
   1030 
   1031 	if (needcommit) {
   1032 
   1033 		/*
   1034 		 * If the buffer is in the range that we already committed,
   1035 		 * there's nothing to do.
   1036 		 *
   1037 		 * If it's in the range that we need to commit, push the
   1038 		 * whole range at once, otherwise only push the buffer.
   1039 		 * In both these cases, acquire the commit lock to avoid
   1040 		 * other processes modifying the range.
   1041 		 */
   1042 
   1043 		off = uiop->uio_offset;
   1044 		cnt = bp->b_bcount;
   1045 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1046 		if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
   1047 			boolean_t pushedrange;
   1048 			if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
   1049 				pushedrange = TRUE;
   1050 				off = np->n_pushlo;
   1051 				cnt = np->n_pushhi - np->n_pushlo;
   1052 			} else {
   1053 				pushedrange = FALSE;
   1054 			}
   1055 			error = nfs_commit(vp, off, cnt, curproc);
   1056 			if (error == 0) {
   1057 				if (pushedrange) {
   1058 					nfs_merge_commit_ranges(vp);
   1059 				} else {
   1060 					nfs_add_committed_range(vp, off, cnt);
   1061 				}
   1062 			}
   1063 		} else {
   1064 			error = 0;
   1065 		}
   1066 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1067 		lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
   1068 		if (!error) {
   1069 			/*
   1070 			 * pages are now on stable storage.
   1071 			 */
   1072 			uiop->uio_resid = 0;
   1073 			simple_lock(&uobj->vmobjlock);
   1074 			for (i = 0; i < npages; i++) {
   1075 				pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1076 			}
   1077 			simple_unlock(&uobj->vmobjlock);
   1078 			return 0;
   1079 		} else if (error == NFSERR_STALEWRITEVERF) {
   1080 			nfs_clearcommit(vp->v_mount);
   1081 			goto again;
   1082 		}
   1083 		if (error) {
   1084 			bp->b_flags |= B_ERROR;
   1085 			bp->b_error = np->n_error = error;
   1086 			np->n_flag |= NWRITEERR;
   1087 		}
   1088 		return error;
   1089 	}
   1090 	off = uiop->uio_offset;
   1091 	cnt = bp->b_bcount;
   1092 	uiop->uio_rw = UIO_WRITE;
   1093 	nfsstats.write_bios++;
   1094 	error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
   1095 	if (!error && iomode == NFSV3WRITE_UNSTABLE) {
   1096 		/*
   1097 		 * we need to commit pages later.
   1098 		 */
   1099 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1100 		nfs_add_tobecommitted_range(vp, off, cnt);
   1101 		/*
   1102 		 * if there can be too many uncommitted pages, commit them now.
   1103 		 */
   1104 		if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
   1105 			off = np->n_pushlo;
   1106 			cnt = nfs_commitsize >> 1;
   1107 			error = nfs_commit(vp, off, cnt, curproc);
   1108 			if (!error) {
   1109 				nfs_add_committed_range(vp, off, cnt);
   1110 				nfs_del_tobecommitted_range(vp, off, cnt);
   1111 			}
   1112 			if (error == NFSERR_STALEWRITEVERF) {
   1113 				stalewriteverf = TRUE;
   1114 				error = 0; /* it isn't a real error */
   1115 			}
   1116 		} else {
   1117 			/*
   1118 			 * re-dirty pages so that they will be passed
   1119 			 * to us later again.
   1120 			 */
   1121 			simple_lock(&uobj->vmobjlock);
   1122 			for (i = 0; i < npages; i++) {
   1123 				pgs[i]->flags &= ~PG_CLEAN;
   1124 			}
   1125 			simple_unlock(&uobj->vmobjlock);
   1126 		}
   1127 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1128 	} else if (!error) {
   1129 		/*
   1130 		 * pages are now on stable storage.
   1131 		 */
   1132 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1133 		nfs_del_committed_range(vp, off, cnt);
   1134 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1135 		simple_lock(&uobj->vmobjlock);
   1136 		for (i = 0; i < npages; i++) {
   1137 			pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1138 		}
   1139 		simple_unlock(&uobj->vmobjlock);
   1140 	} else {
   1141 		/*
   1142 		 * we got an error.
   1143 		 */
   1144 		bp->b_flags |= B_ERROR;
   1145 		bp->b_error = np->n_error = error;
   1146 		np->n_flag |= NWRITEERR;
   1147 	}
   1148 
   1149 	lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
   1150 
   1151 	if (stalewriteverf) {
   1152 		nfs_clearcommit(vp->v_mount);
   1153 	}
   1154 	return error;
   1155 }
   1156 
   1157 /*
   1158  * nfs_doio for B_PHYS.
   1159  */
   1160 static int
   1161 nfs_doio_phys(bp, uiop)
   1162 	struct buf *bp;
   1163 	struct uio *uiop;
   1164 {
   1165 	struct vnode *vp = bp->b_vp;
   1166 	int error;
   1167 
   1168 	uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
   1169 	if (bp->b_flags & B_READ) {
   1170 		uiop->uio_rw = UIO_READ;
   1171 		nfsstats.read_physios++;
   1172 		error = nfs_readrpc(vp, uiop);
   1173 	} else {
   1174 		int iomode = NFSV3WRITE_DATASYNC;
   1175 		boolean_t stalewriteverf;
   1176 		struct nfsmount *nmp = VFSTONFS(vp->v_mount);
   1177 
   1178 		uiop->uio_rw = UIO_WRITE;
   1179 		nfsstats.write_physios++;
   1180 		lockmgr(&nmp->nm_writeverflock, LK_SHARED, NULL);
   1181 		error = nfs_writerpc(vp, uiop, &iomode, FALSE, &stalewriteverf);
   1182 		lockmgr(&nmp->nm_writeverflock, LK_RELEASE, NULL);
   1183 		if (stalewriteverf) {
   1184 			nfs_clearcommit(bp->b_vp->v_mount);
   1185 		}
   1186 	}
   1187 	if (error) {
   1188 		bp->b_flags |= B_ERROR;
   1189 		bp->b_error = error;
   1190 	}
   1191 	return error;
   1192 }
   1193 
   1194 /*
   1195  * Do an I/O operation to/from a cache block. This may be called
   1196  * synchronously or from an nfsiod.
   1197  */
   1198 int
   1199 nfs_doio(bp, p)
   1200 	struct buf *bp;
   1201 	struct proc *p;
   1202 {
   1203 	int error;
   1204 	struct uio uio;
   1205 	struct uio *uiop = &uio;
   1206 	struct iovec io;
   1207 	UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
   1208 
   1209 	uiop->uio_iov = &io;
   1210 	uiop->uio_iovcnt = 1;
   1211 	uiop->uio_segflg = UIO_SYSSPACE;
   1212 	uiop->uio_procp = p;
   1213 	uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
   1214 	io.iov_base = bp->b_data;
   1215 	io.iov_len = uiop->uio_resid = bp->b_bcount;
   1216 
   1217 	/*
   1218 	 * Historically, paging was done with physio, but no more...
   1219 	 */
   1220 	if (bp->b_flags & B_PHYS) {
   1221 		/*
   1222 		 * ...though reading /dev/drum still gets us here.
   1223 		 */
   1224 		error = nfs_doio_phys(bp, uiop);
   1225 	} else if (bp->b_flags & B_READ) {
   1226 		error = nfs_doio_read(bp, uiop);
   1227 	} else {
   1228 		error = nfs_doio_write(bp, uiop);
   1229 	}
   1230 	bp->b_resid = uiop->uio_resid;
   1231 	biodone(bp);
   1232 	return (error);
   1233 }
   1234 
   1235 /*
   1236  * Vnode op for VM getpages.
   1237  */
   1238 
   1239 int
   1240 nfs_getpages(v)
   1241 	void *v;
   1242 {
   1243 	struct vop_getpages_args /* {
   1244 		struct vnode *a_vp;
   1245 		voff_t a_offset;
   1246 		struct vm_page **a_m;
   1247 		int *a_count;
   1248 		int a_centeridx;
   1249 		vm_prot_t a_access_type;
   1250 		int a_advice;
   1251 		int a_flags;
   1252 	} */ *ap = v;
   1253 
   1254 	struct vnode *vp = ap->a_vp;
   1255 	struct uvm_object *uobj = &vp->v_uobj;
   1256 	struct nfsnode *np = VTONFS(vp);
   1257 	const int npages = *ap->a_count;
   1258 	struct vm_page *pg, **pgs, *opgs[npages];
   1259 	off_t origoffset, len;
   1260 	int i, error;
   1261 	boolean_t v3 = NFS_ISV3(vp);
   1262 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1263 	boolean_t locked = (ap->a_flags & PGO_LOCKED) != 0;
   1264 
   1265 	/*
   1266 	 * update the cached read creds for this node.
   1267 	 */
   1268 
   1269 	if (np->n_rcred) {
   1270 		crfree(np->n_rcred);
   1271 	}
   1272 	np->n_rcred = curproc->p_ucred;
   1273 	crhold(np->n_rcred);
   1274 
   1275 	/*
   1276 	 * if we have delayed truncation and it's safe, do it now.
   1277 	 */
   1278 
   1279 	if (ap->a_flags & PGO_SYNCIO) {
   1280 		nfs_delayedtruncate(vp);
   1281 	}
   1282 
   1283 	/*
   1284 	 * call the genfs code to get the pages.  `pgs' may be NULL
   1285 	 * when doing read-ahead.
   1286 	 */
   1287 
   1288 	pgs = ap->a_m;
   1289 	if (write && locked && v3) {
   1290 		KASSERT(pgs != NULL);
   1291 #ifdef DEBUG
   1292 
   1293 		/*
   1294 		 * If PGO_LOCKED is set, real pages shouldn't exists
   1295 		 * in the array.
   1296 		 */
   1297 
   1298 		for (i = 0; i < npages; i++)
   1299 			KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
   1300 #endif
   1301 		memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
   1302 	}
   1303 	error = genfs_getpages(v);
   1304 	if (error) {
   1305 		return (error);
   1306 	}
   1307 
   1308 	/*
   1309 	 * for read faults where the nfs node is not yet marked NMODIFIED,
   1310 	 * set PG_RDONLY on the pages so that we come back here if someone
   1311 	 * tries to modify later via the mapping that will be entered for
   1312 	 * this fault.
   1313 	 */
   1314 
   1315 	if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
   1316 		if (!locked) {
   1317 			simple_lock(&uobj->vmobjlock);
   1318 		}
   1319 		for (i = 0; i < npages; i++) {
   1320 			pg = pgs[i];
   1321 			if (pg == NULL || pg == PGO_DONTCARE) {
   1322 				continue;
   1323 			}
   1324 			pg->flags |= PG_RDONLY;
   1325 		}
   1326 		if (!locked) {
   1327 			simple_unlock(&uobj->vmobjlock);
   1328 		}
   1329 	}
   1330 	if (!write) {
   1331 		return (0);
   1332 	}
   1333 
   1334 	/*
   1335 	 * this is a write fault, update the commit info.
   1336 	 */
   1337 
   1338 	origoffset = ap->a_offset;
   1339 	len = npages << PAGE_SHIFT;
   1340 
   1341 	if (v3) {
   1342 		error = lockmgr(&np->n_commitlock,
   1343 		    LK_EXCLUSIVE | (locked ? LK_NOWAIT : 0), NULL);
   1344 		if (error) {
   1345 			KASSERT(locked != 0);
   1346 
   1347 			/*
   1348 			 * Since PGO_LOCKED is set, we need to unbusy
   1349 			 * all pages fetched by genfs_getpages() above,
   1350 			 * tell the caller that there are no pages
   1351 			 * available and put back original pgs array.
   1352 			 */
   1353 
   1354 			uvm_lock_pageq();
   1355 			uvm_page_unbusy(pgs, npages);
   1356 			uvm_unlock_pageq();
   1357 			*ap->a_count = 0;
   1358 			memcpy(pgs, opgs,
   1359 			    npages * sizeof(struct vm_pages *));
   1360 			return (error);
   1361 		}
   1362 		nfs_del_committed_range(vp, origoffset, len);
   1363 		nfs_del_tobecommitted_range(vp, origoffset, len);
   1364 	}
   1365 	np->n_flag |= NMODIFIED;
   1366 	if (!locked) {
   1367 		simple_lock(&uobj->vmobjlock);
   1368 	}
   1369 	for (i = 0; i < npages; i++) {
   1370 		pg = pgs[i];
   1371 		if (pg == NULL || pg == PGO_DONTCARE) {
   1372 			continue;
   1373 		}
   1374 		pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1375 	}
   1376 	if (!locked) {
   1377 		simple_unlock(&uobj->vmobjlock);
   1378 	}
   1379 	if (v3) {
   1380 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1381 	}
   1382 	return (0);
   1383 }
   1384