Home | History | Annotate | Line # | Download | only in nfs
nfs_bio.c revision 1.53
      1 /*	$NetBSD: nfs_bio.c,v 1.53 2000/09/19 23:26:26 bjh21 Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     39  */
     40 
     41 #include "opt_nfs.h"
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/resourcevar.h>
     46 #include <sys/signalvar.h>
     47 #include <sys/proc.h>
     48 #include <sys/buf.h>
     49 #include <sys/vnode.h>
     50 #include <sys/trace.h>
     51 #include <sys/mount.h>
     52 #include <sys/kernel.h>
     53 #include <sys/namei.h>
     54 #include <sys/dirent.h>
     55 
     56 #include <uvm/uvm_extern.h>
     57 
     58 #include <nfs/rpcv2.h>
     59 #include <nfs/nfsproto.h>
     60 #include <nfs/nfs.h>
     61 #include <nfs/nfsmount.h>
     62 #include <nfs/nqnfs.h>
     63 #include <nfs/nfsnode.h>
     64 #include <nfs/nfs_var.h>
     65 
     66 extern int nfs_numasync;
     67 extern struct nfsstats nfsstats;
     68 
     69 /*
     70  * Vnode op for read using bio
     71  * Any similarity to readip() is purely coincidental
     72  */
     73 int
     74 nfs_bioread(vp, uio, ioflag, cred, cflag)
     75 	struct vnode *vp;
     76 	struct uio *uio;
     77 	int ioflag, cflag;
     78 	struct ucred *cred;
     79 {
     80 	struct nfsnode *np = VTONFS(vp);
     81 	int biosize, diff;
     82 	struct buf *bp = NULL, *rabp;
     83 	struct vattr vattr;
     84 	struct proc *p;
     85 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     86 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     87 	daddr_t lbn, bn, rabn;
     88 	caddr_t baddr, ep, edp;
     89 	int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin, en, enn;
     90 	int enough = 0;
     91 	struct dirent *dp, *pdp;
     92 	off_t curoff = 0, offdiff;
     93 
     94 #ifdef DIAGNOSTIC
     95 	if (uio->uio_rw != UIO_READ)
     96 		panic("nfs_read mode");
     97 #endif
     98 	if (uio->uio_resid == 0)
     99 		return (0);
    100 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    101 		return (EINVAL);
    102 	p = uio->uio_procp;
    103 #ifndef NFS_V2_ONLY
    104 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    105 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    106 		(void)nfs_fsinfo(nmp, vp, cred, p);
    107 #endif
    108 	if (vp->v_type != VDIR &&
    109 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    110 		return (EFBIG);
    111 	biosize = nmp->nm_rsize;
    112 	/*
    113 	 * For nfs, cache consistency can only be maintained approximately.
    114 	 * Although RFC1094 does not specify the criteria, the following is
    115 	 * believed to be compatible with the reference port.
    116 	 * For nqnfs, full cache consistency is maintained within the loop.
    117 	 * For nfs:
    118 	 * If the file's modify time on the server has changed since the
    119 	 * last read rpc or you have written to the file,
    120 	 * you may have lost data cache consistency with the
    121 	 * server, so flush all of the file's data out of the cache.
    122 	 * Then force a getattr rpc to ensure that you have up to date
    123 	 * attributes.
    124 	 * NB: This implies that cache data can be read when up to
    125 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
    126 	 * attributes this could be forced by setting n_attrstamp to 0 before
    127 	 * the VOP_GETATTR() call.
    128 	 */
    129 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
    130 		if (np->n_flag & NMODIFIED) {
    131 			if (vp->v_type != VREG) {
    132 				if (vp->v_type != VDIR)
    133 					panic("nfs: bioread, not dir");
    134 				nfs_invaldircache(vp, 0);
    135 				np->n_direofoffset = 0;
    136 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    137 				if (error)
    138 					return (error);
    139 			}
    140 			np->n_attrstamp = 0;
    141 			error = VOP_GETATTR(vp, &vattr, cred, p);
    142 			if (error)
    143 				return (error);
    144 			np->n_mtime = vattr.va_mtime.tv_sec;
    145 		} else {
    146 			error = VOP_GETATTR(vp, &vattr, cred, p);
    147 			if (error)
    148 				return (error);
    149 			if (np->n_mtime != vattr.va_mtime.tv_sec) {
    150 				if (vp->v_type == VDIR) {
    151 					nfs_invaldircache(vp, 0);
    152 					np->n_direofoffset = 0;
    153 				}
    154 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    155 				if (error)
    156 					return (error);
    157 				np->n_mtime = vattr.va_mtime.tv_sec;
    158 			}
    159 		}
    160 	}
    161 	do {
    162 
    163 #ifndef NFS_V2_ONLY
    164 	    /*
    165 	     * Get a valid lease. If cached data is stale, flush it.
    166 	     */
    167 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
    168 		if (NQNFS_CKINVALID(vp, np, ND_READ)) {
    169 		    do {
    170 			error = nqnfs_getlease(vp, ND_READ, cred, p);
    171 		    } while (error == NQNFS_EXPIRED);
    172 		    if (error)
    173 			return (error);
    174 		    if (np->n_lrev != np->n_brev ||
    175 			(np->n_flag & NQNFSNONCACHE) ||
    176 			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
    177 			if (vp->v_type == VDIR) {
    178 				nfs_invaldircache(vp, 0);
    179 				np->n_direofoffset = 0;
    180 			}
    181 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    182 			if (error)
    183 			    return (error);
    184 			np->n_brev = np->n_lrev;
    185 		    }
    186 		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
    187 		    nfs_invaldircache(vp, 0);
    188 		    error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    189 		    np->n_direofoffset = 0;
    190 		    if (error)
    191 			return (error);
    192 		}
    193 	    }
    194 #endif
    195 	    /*
    196 	     * Don't cache symlinks.
    197 	     */
    198 	    if (np->n_flag & NQNFSNONCACHE
    199 		|| ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
    200 		switch (vp->v_type) {
    201 		case VREG:
    202 			return (nfs_readrpc(vp, uio, cred));
    203 		case VLNK:
    204 			return (nfs_readlinkrpc(vp, uio, cred));
    205 		case VDIR:
    206 			break;
    207 		default:
    208 			printf(" NQNFSNONCACHE: type %x unexpected\n",
    209 			    vp->v_type);
    210 		};
    211 	    }
    212 	    baddr = (caddr_t)0;
    213 	    switch (vp->v_type) {
    214 	    case VREG:
    215 		nfsstats.biocache_reads++;
    216 		lbn = uio->uio_offset / biosize;
    217 		on = uio->uio_offset & (biosize - 1);
    218 		bn = lbn * (biosize / DEV_BSIZE);
    219 		not_readin = 1;
    220 
    221 		/*
    222 		 * Start the read ahead(s), as required.
    223 		 */
    224 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    225 		    lbn - 1 == vp->v_lastr) {
    226 		    for (nra = 0; nra < nmp->nm_readahead &&
    227 			(lbn + 1 + nra) * biosize < np->n_size; nra++) {
    228 			rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
    229 			if (!incore(vp, rabn)) {
    230 			    rabp = nfs_getcacheblk(vp, rabn, biosize, p);
    231 			    if (!rabp)
    232 				return (EINTR);
    233 			    if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
    234 				rabp->b_flags |= (B_READ | B_ASYNC);
    235 				if (nfs_asyncio(rabp, cred)) {
    236 				    rabp->b_flags |= B_INVAL;
    237 				    brelse(rabp);
    238 				}
    239 			    } else
    240 				brelse(rabp);
    241 			}
    242 		    }
    243 		}
    244 
    245 		/*
    246 		 * If the block is in the cache and has the required data
    247 		 * in a valid region, just copy it out.
    248 		 * Otherwise, get the block and write back/read in,
    249 		 * as required.
    250 		 */
    251 		if ((bp = incore(vp, bn)) &&
    252 		    (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
    253 		    (B_BUSY | B_WRITEINPROG))
    254 			got_buf = 0;
    255 		else {
    256 again:
    257 			bp = nfs_getcacheblk(vp, bn, biosize, p);
    258 			if (!bp)
    259 				return (EINTR);
    260 			got_buf = 1;
    261 			if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
    262 				bp->b_flags |= B_READ;
    263 				not_readin = 0;
    264 				error = nfs_doio(bp, cred, p);
    265 				if (error) {
    266 				    brelse(bp);
    267 				    return (error);
    268 				}
    269 			}
    270 		}
    271 		n = min((unsigned)(biosize - on), uio->uio_resid);
    272 		offdiff = np->n_size - uio->uio_offset;
    273 		if (offdiff < (off_t)n)
    274 			n = (int)offdiff;
    275 		if (not_readin && n > 0) {
    276 			if (on < bp->b_validoff || (on + n) > bp->b_validend) {
    277 				if (!got_buf) {
    278 				    bp = nfs_getcacheblk(vp, bn, biosize, p);
    279 				    if (!bp)
    280 					return (EINTR);
    281 				    got_buf = 1;
    282 				}
    283 				bp->b_flags |= B_INVAFTERWRITE;
    284 				if (bp->b_dirtyend > 0) {
    285 				    if ((bp->b_flags & B_DELWRI) == 0)
    286 					panic("nfsbioread");
    287 				    if (VOP_BWRITE(bp) == EINTR)
    288 					return (EINTR);
    289 				} else
    290 				    brelse(bp);
    291 				goto again;
    292 			}
    293 		}
    294 		vp->v_lastr = lbn;
    295 		diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
    296 		if (diff < n)
    297 			n = diff;
    298 		break;
    299 	    case VLNK:
    300 		nfsstats.biocache_readlinks++;
    301 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
    302 		if (!bp)
    303 			return (EINTR);
    304 		if ((bp->b_flags & B_DONE) == 0) {
    305 			bp->b_flags |= B_READ;
    306 			error = nfs_doio(bp, cred, p);
    307 			if (error) {
    308 				brelse(bp);
    309 				return (error);
    310 			}
    311 		}
    312 		n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
    313 		got_buf = 1;
    314 		on = 0;
    315 		break;
    316 	    case VDIR:
    317 diragain:
    318 		nfsstats.biocache_readdirs++;
    319 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    320 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    321 		if (!ndp) {
    322 			/*
    323 			 * We've been handed a cookie that is not
    324 			 * in the cache. If we're not translating
    325 			 * 32 <-> 64, it may be a value that was
    326 			 * flushed out of the cache because it grew
    327 			 * too big. Let the server judge if it's
    328 			 * valid or not. In the translation case,
    329 			 * we have no way of validating this value,
    330 			 * so punt.
    331 			 */
    332 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    333 				return (EINVAL);
    334 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    335 				uio->uio_offset, 0, 0);
    336 		}
    337 
    338 		if (uio->uio_offset != 0 &&
    339 		    ndp->dc_cookie == np->n_direofoffset) {
    340 			nfsstats.direofcache_hits++;
    341 			return (0);
    342 		}
    343 
    344 		bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
    345 		if (!bp)
    346 		    return (EINTR);
    347 		if ((bp->b_flags & B_DONE) == 0) {
    348 		    bp->b_flags |= B_READ;
    349 		    bp->b_dcookie = ndp->dc_blkcookie;
    350 		    error = nfs_doio(bp, cred, p);
    351 		    if (error) {
    352 			/*
    353 			 * Yuck! The directory has been modified on the
    354 			 * server. Punt and let the userland code
    355 			 * deal with it.
    356 			 */
    357 			brelse(bp);
    358 			if (error == NFSERR_BAD_COOKIE) {
    359 			    nfs_invaldircache(vp, 0);
    360 			    nfs_vinvalbuf(vp, 0, cred, p, 1);
    361 			    error = EINVAL;
    362 			}
    363 			return (error);
    364 		    }
    365 		}
    366 
    367 		/*
    368 		 * Just return if we hit EOF right away with this
    369 		 * block. Always check here, because direofoffset
    370 		 * may have been set by an nfsiod since the last
    371 		 * check.
    372 		 */
    373 		if (np->n_direofoffset != 0 &&
    374 			ndp->dc_blkcookie == np->n_direofoffset) {
    375 			brelse(bp);
    376 			return (0);
    377 		}
    378 
    379 		/*
    380 		 * Find the entry we were looking for in the block.
    381 		 */
    382 
    383 		en = ndp->dc_entry;
    384 
    385 		pdp = dp = (struct dirent *)bp->b_data;
    386 		edp = bp->b_data + bp->b_validend;
    387 		enn = 0;
    388 		while (enn < en && (caddr_t)dp < edp) {
    389 			pdp = dp;
    390 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    391 			enn++;
    392 		}
    393 
    394 		/*
    395 		 * If the entry number was bigger than the number of
    396 		 * entries in the block, or the cookie of the previous
    397 		 * entry doesn't match, the directory cache is
    398 		 * stale. Flush it and try again (i.e. go to
    399 		 * the server).
    400 		 */
    401 		if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
    402 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    403 #ifdef DEBUG
    404 		    	printf("invalid cache: %p %p %p off %lx %lx\n",
    405 				pdp, dp, edp,
    406 				(unsigned long)uio->uio_offset,
    407 				(unsigned long)NFS_GETCOOKIE(pdp));
    408 #endif
    409 			brelse(bp);
    410 			nfs_invaldircache(vp, 0);
    411 			nfs_vinvalbuf(vp, 0, cred, p, 0);
    412 			goto diragain;
    413 		}
    414 
    415 		on = (caddr_t)dp - bp->b_data;
    416 
    417 		/*
    418 		 * Cache all entries that may be exported to the
    419 		 * user, as they may be thrown back at us. The
    420 		 * NFSBIO_CACHECOOKIES flag indicates that all
    421 		 * entries are being 'exported', so cache them all.
    422 		 */
    423 
    424 		if (en == 0 && pdp == dp) {
    425 			dp = (struct dirent *)
    426 			    ((caddr_t)dp + dp->d_reclen);
    427 			enn++;
    428 		}
    429 
    430 		if (uio->uio_resid < (bp->b_validend - on)) {
    431 			n = uio->uio_resid;
    432 			enough = 1;
    433 		} else
    434 			n = bp->b_validend - on;
    435 
    436 		ep = bp->b_data + on + n;
    437 
    438 		/*
    439 		 * Find last complete entry to copy, caching entries
    440 		 * (if requested) as we go.
    441 		 */
    442 
    443 		while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
    444 			if (cflag & NFSBIO_CACHECOOKIES) {
    445 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    446 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    447 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    448 					NFS_STASHCOOKIE32(pdp,
    449 					    nndp->dc_cookie32);
    450 				}
    451 			}
    452 			pdp = dp;
    453 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    454 			enn++;
    455 		}
    456 
    457 		/*
    458 		 * If the last requested entry was not the last in the
    459 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    460 		 * cache the cookie of the last requested one, and
    461 		 * set of the offset to it.
    462 		 */
    463 
    464 		if ((on + n) < bp->b_validend) {
    465 			curoff = NFS_GETCOOKIE(pdp);
    466 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    467 			    enn, bp->b_lblkno);
    468 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    469 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    470 				curoff = nndp->dc_cookie32;
    471 			}
    472 		} else
    473 			curoff = bp->b_dcookie;
    474 
    475 		/*
    476 		 * Always cache the entry for the next block,
    477 		 * so that readaheads can use it.
    478 		 */
    479 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    480 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    481 			if (curoff == bp->b_dcookie) {
    482 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    483 				curoff = nndp->dc_cookie32;
    484 			}
    485 		}
    486 
    487 		n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
    488 
    489 		/*
    490 		 * If not eof and read aheads are enabled, start one.
    491 		 * (You need the current block first, so that you have the
    492 		 *  directory offset cookie of the next block.)
    493 		 */
    494 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    495 		    np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
    496 			rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
    497 						NFS_DIRBLKSIZ, p);
    498 			if (rabp) {
    499 			    if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
    500 				rabp->b_dcookie = nndp->dc_cookie;
    501 				rabp->b_flags |= (B_READ | B_ASYNC);
    502 				if (nfs_asyncio(rabp, cred)) {
    503 				    rabp->b_flags |= B_INVAL;
    504 				    brelse(rabp);
    505 				}
    506 			    } else
    507 				brelse(rabp);
    508 			}
    509 		}
    510 		got_buf = 1;
    511 		break;
    512 	    default:
    513 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    514 		break;
    515 	    };
    516 
    517 	    if (n > 0) {
    518 		if (!baddr)
    519 			baddr = bp->b_data;
    520 		error = uiomove(baddr + on, (int)n, uio);
    521 	    }
    522 	    switch (vp->v_type) {
    523 	    case VREG:
    524 		break;
    525 	    case VLNK:
    526 		n = 0;
    527 		break;
    528 	    case VDIR:
    529 		if (np->n_flag & NQNFSNONCACHE)
    530 			bp->b_flags |= B_INVAL;
    531 		uio->uio_offset = curoff;
    532 		if (enough)
    533 			n = 0;
    534 		break;
    535 	    default:
    536 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    537 	    }
    538 	    if (got_buf)
    539 		brelse(bp);
    540 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    541 	return (error);
    542 }
    543 
    544 /*
    545  * Vnode op for write using bio
    546  */
    547 int
    548 nfs_write(v)
    549 	void *v;
    550 {
    551 	struct vop_write_args /* {
    552 		struct vnode *a_vp;
    553 		struct uio *a_uio;
    554 		int  a_ioflag;
    555 		struct ucred *a_cred;
    556 	} */ *ap = v;
    557 	int biosize;
    558 	struct uio *uio = ap->a_uio;
    559 	struct proc *p = uio->uio_procp;
    560 	struct vnode *vp = ap->a_vp;
    561 	struct nfsnode *np = VTONFS(vp);
    562 	struct ucred *cred = ap->a_cred;
    563 	int ioflag = ap->a_ioflag;
    564 	struct buf *bp;
    565 	struct vattr vattr;
    566 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    567 	daddr_t lbn, bn;
    568 	int n, on, error = 0, iomode, must_commit;
    569 
    570 #ifdef DIAGNOSTIC
    571 	if (uio->uio_rw != UIO_WRITE)
    572 		panic("nfs_write mode");
    573 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    574 		panic("nfs_write proc");
    575 #endif
    576 	if (vp->v_type != VREG)
    577 		return (EIO);
    578 	if (np->n_flag & NWRITEERR) {
    579 		np->n_flag &= ~NWRITEERR;
    580 		return (np->n_error);
    581 	}
    582 #ifndef NFS_V2_ONLY
    583 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    584 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    585 		(void)nfs_fsinfo(nmp, vp, cred, p);
    586 #endif
    587 	if (ioflag & (IO_APPEND | IO_SYNC)) {
    588 		if (np->n_flag & NMODIFIED) {
    589 			np->n_attrstamp = 0;
    590 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    591 			if (error)
    592 				return (error);
    593 		}
    594 		if (ioflag & IO_APPEND) {
    595 			np->n_attrstamp = 0;
    596 			error = VOP_GETATTR(vp, &vattr, cred, p);
    597 			if (error)
    598 				return (error);
    599 			uio->uio_offset = np->n_size;
    600 		}
    601 	}
    602 	if (uio->uio_offset < 0)
    603 		return (EINVAL);
    604 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    605 		return (EFBIG);
    606 	if (uio->uio_resid == 0)
    607 		return (0);
    608 	/*
    609 	 * Maybe this should be above the vnode op call, but so long as
    610 	 * file servers have no limits, i don't think it matters
    611 	 */
    612 	if (p && uio->uio_offset + uio->uio_resid >
    613 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    614 		psignal(p, SIGXFSZ);
    615 		return (EFBIG);
    616 	}
    617 	/*
    618 	 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
    619 	 * will be the same size within a filesystem. nfs_writerpc will
    620 	 * still use nm_wsize when sizing the rpc's.
    621 	 */
    622 	biosize = nmp->nm_rsize;
    623 	do {
    624 
    625 		/*
    626 		 * XXX make sure we aren't cached in the VM page cache
    627 		 */
    628 		(void)uvm_vnp_uncache(vp);
    629 
    630 #ifndef NFS_V2_ONLY
    631 		/*
    632 		 * Check for a valid write lease.
    633 		 */
    634 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
    635 		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
    636 			do {
    637 				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
    638 			} while (error == NQNFS_EXPIRED);
    639 			if (error)
    640 				return (error);
    641 			if (np->n_lrev != np->n_brev ||
    642 			    (np->n_flag & NQNFSNONCACHE)) {
    643 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    644 				if (error)
    645 					return (error);
    646 				np->n_brev = np->n_lrev;
    647 			}
    648 		}
    649 #endif
    650 		if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
    651 		    iomode = NFSV3WRITE_FILESYNC;
    652 		    error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit);
    653 		    if (must_commit)
    654 			nfs_clearcommit(vp->v_mount);
    655 		    return (error);
    656 		}
    657 		nfsstats.biocache_writes++;
    658 		lbn = uio->uio_offset / biosize;
    659 		on = uio->uio_offset & (biosize-1);
    660 		n = min((unsigned)(biosize - on), uio->uio_resid);
    661 		bn = lbn * (biosize / DEV_BSIZE);
    662 again:
    663 		bp = nfs_getcacheblk(vp, bn, biosize, p);
    664 		if (!bp)
    665 			return (EINTR);
    666 		if (bp->b_wcred == NOCRED) {
    667 			crhold(cred);
    668 			bp->b_wcred = cred;
    669 		}
    670 		np->n_flag |= NMODIFIED;
    671 		if (uio->uio_offset + n > np->n_size) {
    672 			np->n_size = uio->uio_offset + n;
    673 			uvm_vnp_setsize(vp, np->n_size);
    674 		}
    675 
    676 		/*
    677 		 * If the new write will leave a contiguous dirty
    678 		 * area, just update the b_dirtyoff and b_dirtyend,
    679 		 * otherwise force a write rpc of the old dirty area.
    680 		 */
    681 		if (bp->b_dirtyend > 0 &&
    682 		    (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
    683 			bp->b_proc = p;
    684 			if (VOP_BWRITE(bp) == EINTR)
    685 				return (EINTR);
    686 			goto again;
    687 		}
    688 
    689 #ifndef NFS_V2_ONLY
    690 		/*
    691 		 * Check for valid write lease and get one as required.
    692 		 * In case getblk() and/or bwrite() delayed us.
    693 		 */
    694 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
    695 		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
    696 			do {
    697 				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
    698 			} while (error == NQNFS_EXPIRED);
    699 			if (error) {
    700 				brelse(bp);
    701 				return (error);
    702 			}
    703 			if (np->n_lrev != np->n_brev ||
    704 			    (np->n_flag & NQNFSNONCACHE)) {
    705 				brelse(bp);
    706 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    707 				if (error)
    708 					return (error);
    709 				np->n_brev = np->n_lrev;
    710 				goto again;
    711 			}
    712 		}
    713 #endif
    714 		error = uiomove((char *)bp->b_data + on, n, uio);
    715 		if (error) {
    716 			bp->b_flags |= B_ERROR;
    717 			brelse(bp);
    718 			return (error);
    719 		}
    720 		if (bp->b_dirtyend > 0) {
    721 			bp->b_dirtyoff = min(on, bp->b_dirtyoff);
    722 			bp->b_dirtyend = max((on + n), bp->b_dirtyend);
    723 		} else {
    724 			bp->b_dirtyoff = on;
    725 			bp->b_dirtyend = on + n;
    726 		}
    727 		if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
    728 		    bp->b_validoff > bp->b_dirtyend) {
    729 			bp->b_validoff = bp->b_dirtyoff;
    730 			bp->b_validend = bp->b_dirtyend;
    731 		} else {
    732 			bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
    733 			bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
    734 		}
    735 
    736 		/*
    737 		 * Since this block is being modified, it must be written
    738 		 * again and not just committed.
    739 		 */
    740 		if (NFS_ISV3(vp)) {
    741 			lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
    742 			if (bp->b_flags & B_NEEDCOMMIT) {
    743 				bp->b_flags &= ~B_NEEDCOMMIT;
    744 				nfs_del_tobecommitted_range(vp, bp);
    745 			}
    746 			nfs_del_committed_range(vp, bp);
    747 			lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
    748 		}
    749 
    750 		/*
    751 		 * If the lease is non-cachable or IO_SYNC do bwrite().
    752 		 */
    753 		if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
    754 			bp->b_proc = p;
    755 			error = VOP_BWRITE(bp);
    756 			if (error)
    757 				return (error);
    758 			if (np->n_flag & NQNFSNONCACHE) {
    759 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    760 				if (error)
    761 					return (error);
    762 			}
    763 		} else if ((n + on) == biosize &&
    764 			(nmp->nm_flag & NFSMNT_NQNFS) == 0) {
    765 			bp->b_proc = (struct proc *)0;
    766 			bawrite(bp);
    767 		} else {
    768 			bdwrite(bp);
    769 		}
    770 	} while (uio->uio_resid > 0 && n > 0);
    771 	return (0);
    772 }
    773 
    774 /*
    775  * Get an nfs cache block.
    776  * Allocate a new one if the block isn't currently in the cache
    777  * and return the block marked busy. If the calling process is
    778  * interrupted by a signal for an interruptible mount point, return
    779  * NULL.
    780  */
    781 struct buf *
    782 nfs_getcacheblk(vp, bn, size, p)
    783 	struct vnode *vp;
    784 	daddr_t bn;
    785 	int size;
    786 	struct proc *p;
    787 {
    788 	struct buf *bp;
    789 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    790 
    791 	if (nmp->nm_flag & NFSMNT_INT) {
    792 		bp = getblk(vp, bn, size, PCATCH, 0);
    793 		while (bp == (struct buf *)0) {
    794 			if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
    795 				return ((struct buf *)0);
    796 			bp = getblk(vp, bn, size, 0, 2 * hz);
    797 		}
    798 	} else
    799 		bp = getblk(vp, bn, size, 0, 0);
    800 	return (bp);
    801 }
    802 
    803 /*
    804  * Flush and invalidate all dirty buffers. If another process is already
    805  * doing the flush, just wait for completion.
    806  */
    807 int
    808 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
    809 	struct vnode *vp;
    810 	int flags;
    811 	struct ucred *cred;
    812 	struct proc *p;
    813 	int intrflg;
    814 {
    815 	struct nfsnode *np = VTONFS(vp);
    816 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    817 	int error = 0, slpflag, slptimeo;
    818 
    819 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    820 		intrflg = 0;
    821 	if (intrflg) {
    822 		slpflag = PCATCH;
    823 		slptimeo = 2 * hz;
    824 	} else {
    825 		slpflag = 0;
    826 		slptimeo = 0;
    827 	}
    828 	/*
    829 	 * First wait for any other process doing a flush to complete.
    830 	 */
    831 	while (np->n_flag & NFLUSHINPROG) {
    832 		np->n_flag |= NFLUSHWANT;
    833 		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
    834 			slptimeo);
    835 		if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
    836 			return (EINTR);
    837 	}
    838 
    839 	/*
    840 	 * Now, flush as required.
    841 	 */
    842 	np->n_flag |= NFLUSHINPROG;
    843 	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
    844 	while (error) {
    845 		if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
    846 			np->n_flag &= ~NFLUSHINPROG;
    847 			if (np->n_flag & NFLUSHWANT) {
    848 				np->n_flag &= ~NFLUSHWANT;
    849 				wakeup((caddr_t)&np->n_flag);
    850 			}
    851 			return (EINTR);
    852 		}
    853 		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
    854 	}
    855 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
    856 	if (np->n_flag & NFLUSHWANT) {
    857 		np->n_flag &= ~NFLUSHWANT;
    858 		wakeup((caddr_t)&np->n_flag);
    859 	}
    860 	return (0);
    861 }
    862 
    863 /*
    864  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    865  * This is mainly to avoid queueing async I/O requests when the nfsiods
    866  * are all hung on a dead server.
    867  */
    868 int
    869 nfs_asyncio(bp, cred)
    870 	struct buf *bp;
    871 	struct ucred *cred;
    872 {
    873 	int i;
    874 	struct nfsmount *nmp;
    875 	int gotiod, slpflag = 0, slptimeo = 0, error;
    876 
    877 	if (nfs_numasync == 0)
    878 		return (EIO);
    879 
    880 
    881 	nmp = VFSTONFS(bp->b_vp->v_mount);
    882 again:
    883 	if (nmp->nm_flag & NFSMNT_INT)
    884 		slpflag = PCATCH;
    885 	gotiod = FALSE;
    886 
    887 	/*
    888 	 * Find a free iod to process this request.
    889 	 */
    890 
    891 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
    892 		if (nfs_iodwant[i]) {
    893 			/*
    894 			 * Found one, so wake it up and tell it which
    895 			 * mount to process.
    896 			 */
    897 			nfs_iodwant[i] = (struct proc *)0;
    898 			nfs_iodmount[i] = nmp;
    899 			nmp->nm_bufqiods++;
    900 			wakeup((caddr_t)&nfs_iodwant[i]);
    901 			gotiod = TRUE;
    902 			break;
    903 		}
    904 	/*
    905 	 * If none are free, we may already have an iod working on this mount
    906 	 * point.  If so, it will process our request.
    907 	 */
    908 	if (!gotiod && nmp->nm_bufqiods > 0)
    909 		gotiod = TRUE;
    910 
    911 	/*
    912 	 * If we have an iod which can process the request, then queue
    913 	 * the buffer.
    914 	 */
    915 	if (gotiod) {
    916 		/*
    917 		 * Ensure that the queue never grows too large.
    918 		 */
    919 		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
    920 			nmp->nm_bufqwant = TRUE;
    921 			error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
    922 				"nfsaio", slptimeo);
    923 			if (error) {
    924 				if (nfs_sigintr(nmp, NULL, bp->b_proc))
    925 					return (EINTR);
    926 				if (slpflag == PCATCH) {
    927 					slpflag = 0;
    928 					slptimeo = 2 * hz;
    929 				}
    930 			}
    931 			/*
    932 			 * We might have lost our iod while sleeping,
    933 			 * so check and loop if nescessary.
    934 			 */
    935 			if (nmp->nm_bufqiods == 0)
    936 				goto again;
    937 		}
    938 
    939 		if (bp->b_flags & B_READ) {
    940 			if (bp->b_rcred == NOCRED && cred != NOCRED) {
    941 				crhold(cred);
    942 				bp->b_rcred = cred;
    943 			}
    944 		} else {
    945 			bp->b_flags |= B_WRITEINPROG;
    946 			if (bp->b_wcred == NOCRED && cred != NOCRED) {
    947 				crhold(cred);
    948 				bp->b_wcred = cred;
    949 			}
    950 		}
    951 
    952 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    953 		nmp->nm_bufqlen++;
    954 		return (0);
    955 	    }
    956 
    957 	/*
    958 	 * All the iods are busy on other mounts, so return EIO to
    959 	 * force the caller to process the i/o synchronously.
    960 	 */
    961 	return (EIO);
    962 }
    963 
    964 /*
    965  * Do an I/O operation to/from a cache block. This may be called
    966  * synchronously or from an nfsiod.
    967  */
    968 int
    969 nfs_doio(bp, cr, p)
    970 	struct buf *bp;
    971 	struct ucred *cr;
    972 	struct proc *p;
    973 {
    974 	struct uio *uiop;
    975 	struct vnode *vp;
    976 	struct nfsnode *np;
    977 	struct nfsmount *nmp;
    978 	int error = 0, diff, len, iomode, must_commit = 0, s, retv = 0;
    979 	int pushedrange;
    980 	unsigned cnt;
    981 	struct uio uio;
    982 	struct iovec io;
    983 	off_t off;
    984 
    985 	vp = bp->b_vp;
    986 	np = VTONFS(vp);
    987 	nmp = VFSTONFS(vp->v_mount);
    988 	uiop = &uio;
    989 	uiop->uio_iov = &io;
    990 	uiop->uio_iovcnt = 1;
    991 	uiop->uio_segflg = UIO_SYSSPACE;
    992 	uiop->uio_procp = p;
    993 
    994 	/*
    995 	 * Historically, paging was done with physio, but no more...
    996 	 */
    997 	if (bp->b_flags & B_PHYS) {
    998 	    /*
    999 	     * ...though reading /dev/drum still gets us here.
   1000 	     */
   1001 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
   1002 	    /* mapping was done by vmapbuf() */
   1003 	    io.iov_base = bp->b_data;
   1004 	    uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
   1005 	    if (bp->b_flags & B_READ) {
   1006 		uiop->uio_rw = UIO_READ;
   1007 		nfsstats.read_physios++;
   1008 		error = nfs_readrpc(vp, uiop, cr);
   1009 	    } else {
   1010 		iomode = NFSV3WRITE_DATASYNC;
   1011 		uiop->uio_rw = UIO_WRITE;
   1012 		nfsstats.write_physios++;
   1013 		error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
   1014 	    }
   1015 	    if (error) {
   1016 		bp->b_flags |= B_ERROR;
   1017 		bp->b_error = error;
   1018 	    }
   1019 	} else if (bp->b_flags & B_READ) {
   1020 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
   1021 	    io.iov_base = bp->b_data;
   1022 	    uiop->uio_rw = UIO_READ;
   1023 	    switch (vp->v_type) {
   1024 	    case VREG:
   1025 		uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
   1026 		nfsstats.read_bios++;
   1027 		error = nfs_readrpc(vp, uiop, cr);
   1028 		if (!error) {
   1029 		    bp->b_validoff = 0;
   1030 		    if (uiop->uio_resid) {
   1031 			/*
   1032 			 * If len > 0, there is a hole in the file and
   1033 			 * no writes after the hole have been pushed to
   1034 			 * the server yet.
   1035 			 * Just zero fill the rest of the valid area.
   1036 			 */
   1037 			diff = bp->b_bcount - uiop->uio_resid;
   1038 			len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE
   1039 				+ diff);
   1040 			if (len > 0) {
   1041 			    len = min(len, uiop->uio_resid);
   1042 			    memset((char *)bp->b_data + diff, 0, len);
   1043 			    bp->b_validend = diff + len;
   1044 			} else
   1045 			    bp->b_validend = diff;
   1046 		    } else
   1047 			bp->b_validend = bp->b_bcount;
   1048 		}
   1049 		if (p && (vp->v_flag & VTEXT) &&
   1050 			(((nmp->nm_flag & NFSMNT_NQNFS) &&
   1051 			  NQNFS_CKINVALID(vp, np, ND_READ) &&
   1052 			  np->n_lrev != np->n_brev) ||
   1053 			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
   1054 			  np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
   1055 			uprintf("Process killed due to text file modification\n");
   1056 			psignal(p, SIGKILL);
   1057 			p->p_holdcnt++;
   1058 		}
   1059 		break;
   1060 	    case VLNK:
   1061 		uiop->uio_offset = (off_t)0;
   1062 		nfsstats.readlink_bios++;
   1063 		error = nfs_readlinkrpc(vp, uiop, cr);
   1064 		break;
   1065 	    case VDIR:
   1066 		nfsstats.readdir_bios++;
   1067 		uiop->uio_offset = bp->b_dcookie;
   1068 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
   1069 			error = nfs_readdirplusrpc(vp, uiop, cr);
   1070 			if (error == NFSERR_NOTSUPP)
   1071 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
   1072 		}
   1073 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
   1074 			error = nfs_readdirrpc(vp, uiop, cr);
   1075 		if (!error) {
   1076 			bp->b_dcookie = uiop->uio_offset;
   1077 			bp->b_validoff = 0;
   1078 			bp->b_validend = bp->b_bcount - uiop->uio_resid;
   1079 		}
   1080 		break;
   1081 	    default:
   1082 		printf("nfs_doio:  type %x unexpected\n",vp->v_type);
   1083 		break;
   1084 	    };
   1085 	    if (error) {
   1086 		bp->b_flags |= B_ERROR;
   1087 		bp->b_error = error;
   1088 	    }
   1089 	} else {
   1090 	    /*
   1091 	     * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
   1092 	     * an actual write will have to be scheduled.
   1093 	     */
   1094 	    if (bp->b_flags & B_NEEDCOMMIT) {
   1095 		/*
   1096 		 * If the buffer is in the range that we already committed,
   1097 		 * there's nothing to do.
   1098 		 *
   1099 		 * If it's in the range that we need to commit, push the
   1100 		 * whole range at once. Else only push the buffer. In
   1101 		 * both these cases, acquire the commit lock to avoid
   1102 		 * other processes modifying the range. Normally the
   1103 		 * vnode lock should have handled this, but there are
   1104 		 * no proper vnode locks for NFS yet (XXX).
   1105 		 */
   1106 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1107 		if (!(bp->b_flags & B_NEEDCOMMIT)) {
   1108 			lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1109 			goto dowrite;
   1110 		}
   1111 		if (!nfs_in_committed_range(vp, bp)) {
   1112 			if (nfs_in_tobecommitted_range(vp, bp)) {
   1113 				pushedrange = 1;
   1114 				off = np->n_pushlo;
   1115 				/* XXX will be too big if > 2G buffer cache */
   1116 				cnt = np->n_pushhi - np->n_pushlo;
   1117 			} else {
   1118 				pushedrange = 0;
   1119 				off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE;
   1120 				cnt = bp->b_dirtyend;
   1121 			}
   1122 			bp->b_flags |= B_WRITEINPROG;
   1123 			retv = nfs_commit(bp->b_vp, off, cnt,
   1124 			    bp->b_wcred, bp->b_proc);
   1125 
   1126 			bp->b_flags &= ~B_WRITEINPROG;
   1127 			if (retv == 0) {
   1128 				if (pushedrange) {
   1129 					nfs_merge_commit_ranges(vp);
   1130 				}
   1131 				else
   1132 					nfs_add_committed_range(vp, bp);
   1133 			}
   1134 		}
   1135 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1136 		if (!retv) {
   1137 			bp->b_resid = bp->b_dirtyoff = bp->b_dirtyend = 0;
   1138 			bp->b_flags &= ~B_NEEDCOMMIT;
   1139 			biodone(bp);
   1140 			return (0);
   1141 		} else if (retv == NFSERR_STALEWRITEVERF)
   1142 			nfs_clearcommit(bp->b_vp->v_mount);
   1143 	    }
   1144 dowrite:
   1145 	    io.iov_len = uiop->uio_resid = bp->b_dirtyend
   1146 		- bp->b_dirtyoff;
   1147 	    uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
   1148 		+ bp->b_dirtyoff;
   1149 	    io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
   1150 	    uiop->uio_rw = UIO_WRITE;
   1151 	    nfsstats.write_bios++;
   1152 	    if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
   1153 		iomode = NFSV3WRITE_UNSTABLE;
   1154 	    else
   1155 		iomode = NFSV3WRITE_FILESYNC;
   1156 
   1157 	    bp->b_flags |= B_WRITEINPROG;
   1158 	    error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
   1159 	    s = splbio();
   1160 	    if (!error && iomode == NFSV3WRITE_UNSTABLE) {
   1161 		bp->b_flags |= B_NEEDCOMMIT;
   1162 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1163 		nfs_add_tobecommitted_range(vp, bp);
   1164 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1165 	    } else if (!error && bp->b_flags & B_NEEDCOMMIT) {
   1166 		bp->b_flags &= ~B_NEEDCOMMIT;
   1167 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1168 		nfs_del_committed_range(vp, bp);
   1169 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1170 	    }
   1171 	    /* XXX the use of NOCACHE is a hack */
   1172 	    bp->b_flags &= ~(B_WRITEINPROG|B_NOCACHE);
   1173 
   1174 	    /*
   1175 	     * For an interrupted write, the buffer is still valid and the
   1176 	     * write hasn't been pushed to the server yet, so we can't set
   1177 	     * B_ERROR and report the interruption by setting B_EINTR. For
   1178 	     * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
   1179 	     * is essentially a noop.
   1180 	     * For the case of a V3 write rpc not being committed to stable
   1181 	     * storage, the block is still dirty and requires either a commit
   1182 	     * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
   1183 	     * before the block is reused. This is indicated by setting the
   1184 	     * B_DELWRI and B_NEEDCOMMIT flags.
   1185 	     */
   1186 	    if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
   1187 		bp->b_flags |= B_DELWRI;
   1188 		/*
   1189 		 * A B_ASYNC block still needs to be committed, so put
   1190 		 * it back on the dirty list.
   1191 		 */
   1192 		if (bp->b_flags & B_ASYNC)
   1193 			reassignbuf(bp, vp);
   1194 		else if (error)
   1195 		    bp->b_flags |= B_EINTR;
   1196 	    } else {
   1197 		if (error) {
   1198 		    bp->b_flags |= B_ERROR;
   1199 		    bp->b_error = np->n_error = error;
   1200 		    np->n_flag |= NWRITEERR;
   1201 		}
   1202 		bp->b_dirtyoff = bp->b_dirtyend = 0;
   1203 	    }
   1204 	    splx(s);
   1205 	}
   1206 	bp->b_resid = uiop->uio_resid;
   1207 	if (must_commit)
   1208 		nfs_clearcommit(vp->v_mount);
   1209 	biodone(bp);
   1210 	return (error);
   1211 }
   1212