Home | History | Annotate | Line # | Download | only in nfs
nfs_bio.c revision 1.69
      1 /*	$NetBSD: nfs_bio.c,v 1.69 2001/09/15 20:36:39 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     39  */
     40 
     41 #include "opt_nfs.h"
     42 #include "opt_ddb.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/resourcevar.h>
     47 #include <sys/signalvar.h>
     48 #include <sys/proc.h>
     49 #include <sys/buf.h>
     50 #include <sys/vnode.h>
     51 #include <sys/mount.h>
     52 #include <sys/kernel.h>
     53 #include <sys/namei.h>
     54 #include <sys/dirent.h>
     55 #include <sys/malloc.h>
     56 
     57 #include <uvm/uvm_extern.h>
     58 #include <uvm/uvm.h>
     59 
     60 #include <nfs/rpcv2.h>
     61 #include <nfs/nfsproto.h>
     62 #include <nfs/nfs.h>
     63 #include <nfs/nfsmount.h>
     64 #include <nfs/nqnfs.h>
     65 #include <nfs/nfsnode.h>
     66 #include <nfs/nfs_var.h>
     67 
     68 extern int nfs_numasync;
     69 extern struct nfsstats nfsstats;
     70 
     71 /*
     72  * Vnode op for read using bio
     73  * Any similarity to readip() is purely coincidental
     74  */
     75 int
     76 nfs_bioread(vp, uio, ioflag, cred, cflag)
     77 	struct vnode *vp;
     78 	struct uio *uio;
     79 	int ioflag, cflag;
     80 	struct ucred *cred;
     81 {
     82 	struct nfsnode *np = VTONFS(vp);
     83 	int biosize;
     84 	struct buf *bp = NULL, *rabp;
     85 	struct vattr vattr;
     86 	struct proc *p;
     87 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     88 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     89 	caddr_t baddr, ep, edp;
     90 	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
     91 	int enough = 0;
     92 	struct dirent *dp, *pdp;
     93 	off_t curoff = 0;
     94 
     95 #ifdef DIAGNOSTIC
     96 	if (uio->uio_rw != UIO_READ)
     97 		panic("nfs_read mode");
     98 #endif
     99 	if (uio->uio_resid == 0)
    100 		return (0);
    101 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    102 		return (EINVAL);
    103 	p = uio->uio_procp;
    104 #ifndef NFS_V2_ONLY
    105 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    106 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    107 		(void)nfs_fsinfo(nmp, vp, cred, p);
    108 #endif
    109 	if (vp->v_type != VDIR &&
    110 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    111 		return (EFBIG);
    112 	biosize = nmp->nm_rsize;
    113 
    114 	/*
    115 	 * For nfs, cache consistency can only be maintained approximately.
    116 	 * Although RFC1094 does not specify the criteria, the following is
    117 	 * believed to be compatible with the reference port.
    118 	 * For nqnfs, full cache consistency is maintained within the loop.
    119 	 * For nfs:
    120 	 * If the file's modify time on the server has changed since the
    121 	 * last read rpc or you have written to the file,
    122 	 * you may have lost data cache consistency with the
    123 	 * server, so flush all of the file's data out of the cache.
    124 	 * Then force a getattr rpc to ensure that you have up to date
    125 	 * attributes.
    126 	 * NB: This implies that cache data can be read when up to
    127 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
    128 	 * attributes this could be forced by setting n_attrstamp to 0 before
    129 	 * the VOP_GETATTR() call.
    130 	 */
    131 
    132 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
    133 		if (np->n_flag & NMODIFIED) {
    134 			if (vp->v_type != VREG) {
    135 				if (vp->v_type != VDIR)
    136 					panic("nfs: bioread, not dir");
    137 				nfs_invaldircache(vp, 0);
    138 				np->n_direofoffset = 0;
    139 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    140 				if (error)
    141 					return (error);
    142 			}
    143 			np->n_attrstamp = 0;
    144 			error = VOP_GETATTR(vp, &vattr, cred, p);
    145 			if (error)
    146 				return (error);
    147 			np->n_mtime = vattr.va_mtime.tv_sec;
    148 		} else {
    149 			error = VOP_GETATTR(vp, &vattr, cred, p);
    150 			if (error)
    151 				return (error);
    152 			if (np->n_mtime != vattr.va_mtime.tv_sec) {
    153 				if (vp->v_type == VDIR) {
    154 					nfs_invaldircache(vp, 0);
    155 					np->n_direofoffset = 0;
    156 				}
    157 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    158 				if (error)
    159 					return (error);
    160 				np->n_mtime = vattr.va_mtime.tv_sec;
    161 			}
    162 		}
    163 	}
    164 
    165 	/*
    166 	 * update the cached read creds for this node.
    167 	 */
    168 
    169 	if (np->n_rcred) {
    170 		crfree(np->n_rcred);
    171 	}
    172 	np->n_rcred = cred;
    173 	crhold(cred);
    174 
    175 	do {
    176 #ifndef NFS_V2_ONLY
    177 	    /*
    178 	     * Get a valid lease. If cached data is stale, flush it.
    179 	     */
    180 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
    181 		if (NQNFS_CKINVALID(vp, np, ND_READ)) {
    182 		    do {
    183 			error = nqnfs_getlease(vp, ND_READ, cred, p);
    184 		    } while (error == NQNFS_EXPIRED);
    185 		    if (error)
    186 			return (error);
    187 		    if (np->n_lrev != np->n_brev ||
    188 			(np->n_flag & NQNFSNONCACHE) ||
    189 			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
    190 			if (vp->v_type == VDIR) {
    191 				nfs_invaldircache(vp, 0);
    192 				np->n_direofoffset = 0;
    193 			}
    194 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    195 			if (error)
    196 			    return (error);
    197 			np->n_brev = np->n_lrev;
    198 		    }
    199 		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
    200 		    nfs_invaldircache(vp, 0);
    201 		    error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    202 		    np->n_direofoffset = 0;
    203 		    if (error)
    204 			return (error);
    205 		}
    206 	    }
    207 #endif
    208 	    /*
    209 	     * Don't cache symlinks.
    210 	     */
    211 	    if (np->n_flag & NQNFSNONCACHE
    212 		|| ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
    213 		switch (vp->v_type) {
    214 		case VREG:
    215 			return (nfs_readrpc(vp, uio));
    216 		case VLNK:
    217 			return (nfs_readlinkrpc(vp, uio, cred));
    218 		case VDIR:
    219 			break;
    220 		default:
    221 			printf(" NQNFSNONCACHE: type %x unexpected\n",
    222 			    vp->v_type);
    223 		};
    224 	    }
    225 	    baddr = (caddr_t)0;
    226 	    switch (vp->v_type) {
    227 	    case VREG:
    228 		nfsstats.biocache_reads++;
    229 
    230 		error = 0;
    231 		if (uio->uio_offset >= np->n_size) {
    232 			break;
    233 		}
    234 		while (uio->uio_resid > 0) {
    235 			void *win;
    236 			vsize_t bytelen = MIN(np->n_size - uio->uio_offset,
    237 					      uio->uio_resid);
    238 
    239 			if (bytelen == 0)
    240 				break;
    241 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
    242 					&bytelen, UBC_READ);
    243 			error = uiomove(win, bytelen, uio);
    244 			ubc_release(win, 0);
    245 			if (error) {
    246 				break;
    247 			}
    248 		}
    249 		n = 0;
    250 		break;
    251 
    252 	    case VLNK:
    253 		nfsstats.biocache_readlinks++;
    254 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
    255 		if (!bp)
    256 			return (EINTR);
    257 		if ((bp->b_flags & B_DONE) == 0) {
    258 			bp->b_flags |= B_READ;
    259 			error = nfs_doio(bp, p);
    260 			if (error) {
    261 				brelse(bp);
    262 				return (error);
    263 			}
    264 		}
    265 		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
    266 		got_buf = 1;
    267 		on = 0;
    268 		break;
    269 	    case VDIR:
    270 diragain:
    271 		nfsstats.biocache_readdirs++;
    272 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    273 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    274 		if (!ndp) {
    275 			/*
    276 			 * We've been handed a cookie that is not
    277 			 * in the cache. If we're not translating
    278 			 * 32 <-> 64, it may be a value that was
    279 			 * flushed out of the cache because it grew
    280 			 * too big. Let the server judge if it's
    281 			 * valid or not. In the translation case,
    282 			 * we have no way of validating this value,
    283 			 * so punt.
    284 			 */
    285 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    286 				return (EINVAL);
    287 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    288 				uio->uio_offset, 0, 0);
    289 		}
    290 
    291 		if (uio->uio_offset != 0 &&
    292 		    ndp->dc_cookie == np->n_direofoffset) {
    293 			nfsstats.direofcache_hits++;
    294 			return (0);
    295 		}
    296 
    297 		bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
    298 		if (!bp)
    299 		    return (EINTR);
    300 		if ((bp->b_flags & B_DONE) == 0) {
    301 		    bp->b_flags |= B_READ;
    302 		    bp->b_dcookie = ndp->dc_blkcookie;
    303 		    error = nfs_doio(bp, p);
    304 		    if (error) {
    305 			/*
    306 			 * Yuck! The directory has been modified on the
    307 			 * server. Punt and let the userland code
    308 			 * deal with it.
    309 			 */
    310 			brelse(bp);
    311 			if (error == NFSERR_BAD_COOKIE) {
    312 			    nfs_invaldircache(vp, 0);
    313 			    nfs_vinvalbuf(vp, 0, cred, p, 1);
    314 			    error = EINVAL;
    315 			}
    316 			return (error);
    317 		    }
    318 		}
    319 
    320 		/*
    321 		 * Just return if we hit EOF right away with this
    322 		 * block. Always check here, because direofoffset
    323 		 * may have been set by an nfsiod since the last
    324 		 * check.
    325 		 */
    326 		if (np->n_direofoffset != 0 &&
    327 			ndp->dc_blkcookie == np->n_direofoffset) {
    328 			brelse(bp);
    329 			return (0);
    330 		}
    331 
    332 		/*
    333 		 * Find the entry we were looking for in the block.
    334 		 */
    335 
    336 		en = ndp->dc_entry;
    337 
    338 		pdp = dp = (struct dirent *)bp->b_data;
    339 		edp = bp->b_data + bp->b_bcount - bp->b_resid;
    340 		enn = 0;
    341 		while (enn < en && (caddr_t)dp < edp) {
    342 			pdp = dp;
    343 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    344 			enn++;
    345 		}
    346 
    347 		/*
    348 		 * If the entry number was bigger than the number of
    349 		 * entries in the block, or the cookie of the previous
    350 		 * entry doesn't match, the directory cache is
    351 		 * stale. Flush it and try again (i.e. go to
    352 		 * the server).
    353 		 */
    354 		if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
    355 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    356 #ifdef DEBUG
    357 		    	printf("invalid cache: %p %p %p off %lx %lx\n",
    358 				pdp, dp, edp,
    359 				(unsigned long)uio->uio_offset,
    360 				(unsigned long)NFS_GETCOOKIE(pdp));
    361 #endif
    362 			brelse(bp);
    363 			nfs_invaldircache(vp, 0);
    364 			nfs_vinvalbuf(vp, 0, cred, p, 0);
    365 			goto diragain;
    366 		}
    367 
    368 		on = (caddr_t)dp - bp->b_data;
    369 
    370 		/*
    371 		 * Cache all entries that may be exported to the
    372 		 * user, as they may be thrown back at us. The
    373 		 * NFSBIO_CACHECOOKIES flag indicates that all
    374 		 * entries are being 'exported', so cache them all.
    375 		 */
    376 
    377 		if (en == 0 && pdp == dp) {
    378 			dp = (struct dirent *)
    379 			    ((caddr_t)dp + dp->d_reclen);
    380 			enn++;
    381 		}
    382 
    383 		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
    384 			n = uio->uio_resid;
    385 			enough = 1;
    386 		} else
    387 			n = bp->b_bcount - bp->b_resid - on;
    388 
    389 		ep = bp->b_data + on + n;
    390 
    391 		/*
    392 		 * Find last complete entry to copy, caching entries
    393 		 * (if requested) as we go.
    394 		 */
    395 
    396 		while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
    397 			if (cflag & NFSBIO_CACHECOOKIES) {
    398 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    399 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    400 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    401 					NFS_STASHCOOKIE32(pdp,
    402 					    nndp->dc_cookie32);
    403 				}
    404 			}
    405 			pdp = dp;
    406 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    407 			enn++;
    408 		}
    409 
    410 		/*
    411 		 * If the last requested entry was not the last in the
    412 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    413 		 * cache the cookie of the last requested one, and
    414 		 * set of the offset to it.
    415 		 */
    416 
    417 		if ((on + n) < bp->b_bcount - bp->b_resid) {
    418 			curoff = NFS_GETCOOKIE(pdp);
    419 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    420 			    enn, bp->b_lblkno);
    421 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    422 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    423 				curoff = nndp->dc_cookie32;
    424 			}
    425 		} else
    426 			curoff = bp->b_dcookie;
    427 
    428 		/*
    429 		 * Always cache the entry for the next block,
    430 		 * so that readaheads can use it.
    431 		 */
    432 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    433 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    434 			if (curoff == bp->b_dcookie) {
    435 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    436 				curoff = nndp->dc_cookie32;
    437 			}
    438 		}
    439 
    440 		n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
    441 
    442 		/*
    443 		 * If not eof and read aheads are enabled, start one.
    444 		 * (You need the current block first, so that you have the
    445 		 *  directory offset cookie of the next block.)
    446 		 */
    447 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    448 		    np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
    449 			rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
    450 						NFS_DIRBLKSIZ, p);
    451 			if (rabp) {
    452 			    if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
    453 				rabp->b_dcookie = nndp->dc_cookie;
    454 				rabp->b_flags |= (B_READ | B_ASYNC);
    455 				if (nfs_asyncio(rabp)) {
    456 				    rabp->b_flags |= B_INVAL;
    457 				    brelse(rabp);
    458 				}
    459 			    } else
    460 				brelse(rabp);
    461 			}
    462 		}
    463 		got_buf = 1;
    464 		break;
    465 	    default:
    466 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    467 		break;
    468 	    }
    469 
    470 	    if (n > 0) {
    471 		if (!baddr)
    472 			baddr = bp->b_data;
    473 		error = uiomove(baddr + on, (int)n, uio);
    474 	    }
    475 	    switch (vp->v_type) {
    476 	    case VREG:
    477 		break;
    478 	    case VLNK:
    479 		n = 0;
    480 		break;
    481 	    case VDIR:
    482 		if (np->n_flag & NQNFSNONCACHE)
    483 			bp->b_flags |= B_INVAL;
    484 		uio->uio_offset = curoff;
    485 		if (enough)
    486 			n = 0;
    487 		break;
    488 	    default:
    489 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    490 	    }
    491 	    if (got_buf)
    492 		brelse(bp);
    493 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    494 	return (error);
    495 }
    496 
    497 /*
    498  * Vnode op for write using bio
    499  */
    500 int
    501 nfs_write(v)
    502 	void *v;
    503 {
    504 	struct vop_write_args /* {
    505 		struct vnode *a_vp;
    506 		struct uio *a_uio;
    507 		int  a_ioflag;
    508 		struct ucred *a_cred;
    509 	} */ *ap = v;
    510 	struct uio *uio = ap->a_uio;
    511 	struct proc *p = uio->uio_procp;
    512 	struct vnode *vp = ap->a_vp;
    513 	struct nfsnode *np = VTONFS(vp);
    514 	struct ucred *cred = ap->a_cred;
    515 	int ioflag = ap->a_ioflag;
    516 	struct vattr vattr;
    517 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    518 	void *win;
    519 	voff_t oldoff, origoff;
    520 	vsize_t bytelen;
    521 	int error = 0, iomode, must_commit;
    522 
    523 #ifdef DIAGNOSTIC
    524 	if (uio->uio_rw != UIO_WRITE)
    525 		panic("nfs_write mode");
    526 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    527 		panic("nfs_write proc");
    528 #endif
    529 	if (vp->v_type != VREG)
    530 		return (EIO);
    531 	if (np->n_flag & NWRITEERR) {
    532 		np->n_flag &= ~NWRITEERR;
    533 		return (np->n_error);
    534 	}
    535 #ifndef NFS_V2_ONLY
    536 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    537 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    538 		(void)nfs_fsinfo(nmp, vp, cred, p);
    539 #endif
    540 	if (ioflag & (IO_APPEND | IO_SYNC)) {
    541 		if (np->n_flag & NMODIFIED) {
    542 			np->n_attrstamp = 0;
    543 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    544 			if (error)
    545 				return (error);
    546 		}
    547 		if (ioflag & IO_APPEND) {
    548 			np->n_attrstamp = 0;
    549 			error = VOP_GETATTR(vp, &vattr, cred, p);
    550 			if (error)
    551 				return (error);
    552 			uio->uio_offset = np->n_size;
    553 		}
    554 	}
    555 	if (uio->uio_offset < 0)
    556 		return (EINVAL);
    557 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    558 		return (EFBIG);
    559 	if (uio->uio_resid == 0)
    560 		return (0);
    561 	/*
    562 	 * Maybe this should be above the vnode op call, but so long as
    563 	 * file servers have no limits, i don't think it matters
    564 	 */
    565 	if (p && uio->uio_offset + uio->uio_resid >
    566 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    567 		psignal(p, SIGXFSZ);
    568 		return (EFBIG);
    569 	}
    570 
    571 	/*
    572 	 * update the cached write creds for this node.
    573 	 */
    574 
    575 	if (np->n_wcred) {
    576 		crfree(np->n_wcred);
    577 	}
    578 	np->n_wcred = cred;
    579 	crhold(cred);
    580 
    581 	if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
    582 		iomode = NFSV3WRITE_FILESYNC;
    583 		error = nfs_writerpc(vp, uio, &iomode, &must_commit);
    584 		if (must_commit)
    585 			nfs_clearcommit(vp->v_mount);
    586 		return (error);
    587 	}
    588 
    589 	origoff = uio->uio_offset;
    590 	do {
    591 		oldoff = uio->uio_offset;
    592 		bytelen = uio->uio_resid;
    593 
    594 #ifndef NFS_V2_ONLY
    595 		/*
    596 		 * Check for a valid write lease.
    597 		 */
    598 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
    599 		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
    600 			do {
    601 				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
    602 			} while (error == NQNFS_EXPIRED);
    603 			if (error)
    604 				return (error);
    605 			if (np->n_lrev != np->n_brev ||
    606 			    (np->n_flag & NQNFSNONCACHE)) {
    607 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    608 				if (error)
    609 					return (error);
    610 				np->n_brev = np->n_lrev;
    611 			}
    612 		}
    613 #endif
    614 		nfsstats.biocache_writes++;
    615 
    616 		np->n_flag |= NMODIFIED;
    617 		if (np->n_size < uio->uio_offset + bytelen) {
    618 			np->n_size = uio->uio_offset + bytelen;
    619 		}
    620 		if ((uio->uio_offset & PAGE_MASK) == 0 &&
    621 		    ((uio->uio_offset + bytelen) & PAGE_MASK) == 0) {
    622 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    623 			    UBC_WRITE | UBC_FAULTBUSY);
    624 		} else {
    625 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    626 			    UBC_WRITE);
    627 		}
    628 		error = uiomove(win, bytelen, uio);
    629 		ubc_release(win, 0);
    630 		if (error) {
    631 			break;
    632 		}
    633 
    634 		/*
    635 		 * update UVM's notion of the size now that we've
    636 		 * copied the data into the vnode's pages.
    637 		 */
    638 
    639 		if (vp->v_size < uio->uio_offset) {
    640 			uvm_vnp_setsize(vp, uio->uio_offset);
    641 		}
    642 
    643 		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
    644 		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
    645 			simple_lock(&vp->v_uobj.vmobjlock);
    646 			error = (vp->v_uobj.pgops->pgo_put)(&vp->v_uobj,
    647 			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
    648 			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    649 				       ~(nmp->nm_wsize - 1)),
    650 			    PGO_CLEANIT|PGO_WEAK);
    651 		}
    652 	} while (uio->uio_resid > 0);
    653 	if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
    654 		simple_lock(&vp->v_uobj.vmobjlock);
    655 		error = (vp->v_uobj.pgops->pgo_put)(&vp->v_uobj,
    656 		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
    657 		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    658 			       ~(nmp->nm_wsize - 1)),
    659 		    PGO_CLEANIT|PGO_SYNCIO);
    660 	}
    661 	return error;
    662 }
    663 
    664 /*
    665  * Get an nfs cache block.
    666  * Allocate a new one if the block isn't currently in the cache
    667  * and return the block marked busy. If the calling process is
    668  * interrupted by a signal for an interruptible mount point, return
    669  * NULL.
    670  */
    671 struct buf *
    672 nfs_getcacheblk(vp, bn, size, p)
    673 	struct vnode *vp;
    674 	daddr_t bn;
    675 	int size;
    676 	struct proc *p;
    677 {
    678 	struct buf *bp;
    679 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    680 
    681 	if (nmp->nm_flag & NFSMNT_INT) {
    682 		bp = getblk(vp, bn, size, PCATCH, 0);
    683 		while (bp == NULL) {
    684 			if (nfs_sigintr(nmp, NULL, p))
    685 				return (NULL);
    686 			bp = getblk(vp, bn, size, 0, 2 * hz);
    687 		}
    688 	} else
    689 		bp = getblk(vp, bn, size, 0, 0);
    690 	return (bp);
    691 }
    692 
    693 /*
    694  * Flush and invalidate all dirty buffers. If another process is already
    695  * doing the flush, just wait for completion.
    696  */
    697 int
    698 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
    699 	struct vnode *vp;
    700 	int flags;
    701 	struct ucred *cred;
    702 	struct proc *p;
    703 	int intrflg;
    704 {
    705 	struct nfsnode *np = VTONFS(vp);
    706 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    707 	int error = 0, slpflag, slptimeo;
    708 
    709 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    710 		intrflg = 0;
    711 	if (intrflg) {
    712 		slpflag = PCATCH;
    713 		slptimeo = 2 * hz;
    714 	} else {
    715 		slpflag = 0;
    716 		slptimeo = 0;
    717 	}
    718 	/*
    719 	 * First wait for any other process doing a flush to complete.
    720 	 */
    721 	while (np->n_flag & NFLUSHINPROG) {
    722 		np->n_flag |= NFLUSHWANT;
    723 		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
    724 			slptimeo);
    725 		if (error && intrflg && nfs_sigintr(nmp, NULL, p))
    726 			return (EINTR);
    727 	}
    728 
    729 	/*
    730 	 * Now, flush as required.
    731 	 */
    732 	np->n_flag |= NFLUSHINPROG;
    733 	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
    734 	while (error) {
    735 		if (intrflg && nfs_sigintr(nmp, NULL, p)) {
    736 			np->n_flag &= ~NFLUSHINPROG;
    737 			if (np->n_flag & NFLUSHWANT) {
    738 				np->n_flag &= ~NFLUSHWANT;
    739 				wakeup((caddr_t)&np->n_flag);
    740 			}
    741 			return (EINTR);
    742 		}
    743 		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
    744 	}
    745 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
    746 	if (np->n_flag & NFLUSHWANT) {
    747 		np->n_flag &= ~NFLUSHWANT;
    748 		wakeup((caddr_t)&np->n_flag);
    749 	}
    750 	return (0);
    751 }
    752 
    753 /*
    754  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    755  * This is mainly to avoid queueing async I/O requests when the nfsiods
    756  * are all hung on a dead server.
    757  */
    758 
    759 int
    760 nfs_asyncio(bp)
    761 	struct buf *bp;
    762 {
    763 	int i;
    764 	struct nfsmount *nmp;
    765 	int gotiod, slpflag = 0, slptimeo = 0, error;
    766 
    767 	if (nfs_numasync == 0)
    768 		return (EIO);
    769 
    770 
    771 	nmp = VFSTONFS(bp->b_vp->v_mount);
    772 again:
    773 	if (nmp->nm_flag & NFSMNT_INT)
    774 		slpflag = PCATCH;
    775 	gotiod = FALSE;
    776 
    777 	/*
    778 	 * Find a free iod to process this request.
    779 	 */
    780 
    781 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
    782 		if (nfs_iodwant[i]) {
    783 			/*
    784 			 * Found one, so wake it up and tell it which
    785 			 * mount to process.
    786 			 */
    787 			nfs_iodwant[i] = NULL;
    788 			nfs_iodmount[i] = nmp;
    789 			nmp->nm_bufqiods++;
    790 			wakeup((caddr_t)&nfs_iodwant[i]);
    791 			gotiod = TRUE;
    792 			break;
    793 		}
    794 	/*
    795 	 * If none are free, we may already have an iod working on this mount
    796 	 * point.  If so, it will process our request.
    797 	 */
    798 	if (!gotiod && nmp->nm_bufqiods > 0)
    799 		gotiod = TRUE;
    800 
    801 	/*
    802 	 * If we have an iod which can process the request, then queue
    803 	 * the buffer.
    804 	 */
    805 	if (gotiod) {
    806 		/*
    807 		 * Ensure that the queue never grows too large.
    808 		 */
    809 		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
    810 			nmp->nm_bufqwant = TRUE;
    811 			error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
    812 				"nfsaio", slptimeo);
    813 			if (error) {
    814 				if (nfs_sigintr(nmp, NULL, bp->b_proc))
    815 					return (EINTR);
    816 				if (slpflag == PCATCH) {
    817 					slpflag = 0;
    818 					slptimeo = 2 * hz;
    819 				}
    820 			}
    821 			/*
    822 			 * We might have lost our iod while sleeping,
    823 			 * so check and loop if nescessary.
    824 			 */
    825 			if (nmp->nm_bufqiods == 0)
    826 				goto again;
    827 		}
    828 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    829 		nmp->nm_bufqlen++;
    830 		return (0);
    831 	    }
    832 
    833 	/*
    834 	 * All the iods are busy on other mounts, so return EIO to
    835 	 * force the caller to process the i/o synchronously.
    836 	 */
    837 	return (EIO);
    838 }
    839 
    840 /*
    841  * Do an I/O operation to/from a cache block. This may be called
    842  * synchronously or from an nfsiod.
    843  */
    844 int
    845 nfs_doio(bp, p)
    846 	struct buf *bp;
    847 	struct proc *p;
    848 {
    849 	struct uio *uiop;
    850 	struct vnode *vp;
    851 	struct nfsnode *np;
    852 	struct nfsmount *nmp;
    853 	int error = 0, diff, len, iomode, must_commit = 0;
    854 	struct uio uio;
    855 	struct iovec io;
    856 
    857 	vp = bp->b_vp;
    858 	np = VTONFS(vp);
    859 	nmp = VFSTONFS(vp->v_mount);
    860 	uiop = &uio;
    861 	uiop->uio_iov = &io;
    862 	uiop->uio_iovcnt = 1;
    863 	uiop->uio_segflg = UIO_SYSSPACE;
    864 	uiop->uio_procp = p;
    865 
    866 	/*
    867 	 * Historically, paging was done with physio, but no more...
    868 	 */
    869 	if (bp->b_flags & B_PHYS) {
    870 	    /*
    871 	     * ...though reading /dev/drum still gets us here.
    872 	     */
    873 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
    874 	    /* mapping was done by vmapbuf() */
    875 	    io.iov_base = bp->b_data;
    876 	    uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
    877 	    if (bp->b_flags & B_READ) {
    878 		uiop->uio_rw = UIO_READ;
    879 		nfsstats.read_physios++;
    880 		error = nfs_readrpc(vp, uiop);
    881 	    } else {
    882 		iomode = NFSV3WRITE_DATASYNC;
    883 		uiop->uio_rw = UIO_WRITE;
    884 		nfsstats.write_physios++;
    885 		error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
    886 	    }
    887 	    if (error) {
    888 		bp->b_flags |= B_ERROR;
    889 		bp->b_error = error;
    890 	    }
    891 	} else if (bp->b_flags & B_READ) {
    892 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
    893 	    io.iov_base = bp->b_data;
    894 	    uiop->uio_rw = UIO_READ;
    895 	    switch (vp->v_type) {
    896 	    case VREG:
    897 		uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
    898 		nfsstats.read_bios++;
    899 		error = nfs_readrpc(vp, uiop);
    900 		if (!error && uiop->uio_resid) {
    901 
    902 			/*
    903 			 * If len > 0, there is a hole in the file and
    904 			 * no writes after the hole have been pushed to
    905 			 * the server yet.
    906 			 * Just zero fill the rest of the valid area.
    907 			 */
    908 
    909 			diff = bp->b_bcount - uiop->uio_resid;
    910 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
    911 				+ diff);
    912 			if (len > 0) {
    913 				len = MIN(len, uiop->uio_resid);
    914 				memset((char *)bp->b_data + diff, 0, len);
    915 			}
    916 		}
    917 		if (p && (vp->v_flag & VTEXT) &&
    918 			(((nmp->nm_flag & NFSMNT_NQNFS) &&
    919 			  NQNFS_CKINVALID(vp, np, ND_READ) &&
    920 			  np->n_lrev != np->n_brev) ||
    921 			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
    922 			  np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
    923 			uprintf("Process killed due to "
    924 				"text file modification\n");
    925 			psignal(p, SIGKILL);
    926 			p->p_holdcnt++;
    927 		}
    928 		break;
    929 	    case VLNK:
    930 		uiop->uio_offset = (off_t)0;
    931 		nfsstats.readlink_bios++;
    932 		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
    933 		break;
    934 	    case VDIR:
    935 		nfsstats.readdir_bios++;
    936 		uiop->uio_offset = bp->b_dcookie;
    937 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
    938 			error = nfs_readdirplusrpc(vp, uiop, curproc->p_ucred);
    939 			if (error == NFSERR_NOTSUPP)
    940 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    941 		}
    942 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
    943 			error = nfs_readdirrpc(vp, uiop, curproc->p_ucred);
    944 		if (!error) {
    945 			bp->b_dcookie = uiop->uio_offset;
    946 		}
    947 		break;
    948 	    default:
    949 		printf("nfs_doio:  type %x unexpected\n",vp->v_type);
    950 		break;
    951 	    }
    952 	    if (error) {
    953 		bp->b_flags |= B_ERROR;
    954 		bp->b_error = error;
    955 	    }
    956 	} else {
    957 	    /*
    958 	     * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
    959 	     * an actual write will have to be scheduled.
    960 	     */
    961 
    962 	    io.iov_base = bp->b_data;
    963 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
    964 	    uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
    965 	    uiop->uio_rw = UIO_WRITE;
    966 	    nfsstats.write_bios++;
    967 	    iomode = NFSV3WRITE_UNSTABLE;
    968 	    error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
    969 	}
    970 	bp->b_resid = uiop->uio_resid;
    971 	if (must_commit)
    972 		nfs_clearcommit(vp->v_mount);
    973 	biodone(bp);
    974 	return (error);
    975 }
    976 
    977 /*
    978  * Vnode op for VM getpages.
    979  */
    980 
    981 int
    982 nfs_getpages(v)
    983 	void *v;
    984 {
    985 	struct vop_getpages_args /* {
    986 		struct vnode *a_vp;
    987 		voff_t a_offset;
    988 		struct vm_page **a_m;
    989 		int *a_count;
    990 		int a_centeridx;
    991 		vm_prot_t a_access_type;
    992 		int a_advice;
    993 		int a_flags;
    994 	} */ *ap = v;
    995 
    996 	struct vnode *vp = ap->a_vp;
    997 	struct uvm_object *uobj = &vp->v_uobj;
    998 	struct nfsnode *np = VTONFS(vp);
    999 	struct vm_page *pg, **pgs;
   1000 	off_t origoffset;
   1001 	int i, error, npages;
   1002 	boolean_t v3 = NFS_ISV3(vp);
   1003 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1004 	UVMHIST_FUNC("nfs_getpages"); UVMHIST_CALLED(ubchist);
   1005 
   1006 	/*
   1007 	 * update the cached read creds for this node.
   1008 	 */
   1009 
   1010 	if (np->n_rcred) {
   1011 		crfree(np->n_rcred);
   1012 	}
   1013 	np->n_rcred = curproc->p_ucred;
   1014 	crhold(np->n_rcred);
   1015 
   1016 	/*
   1017 	 * call the genfs code to get the pages.
   1018 	 */
   1019 
   1020 	npages = *ap->a_count;
   1021 	error = genfs_getpages(v);
   1022 	if (error || !write || !v3) {
   1023 		return error;
   1024 	}
   1025 
   1026 	/*
   1027 	 * this is a write fault, update the commit info.
   1028 	 */
   1029 
   1030 	origoffset = ap->a_offset;
   1031 	pgs = ap->a_m;
   1032 
   1033 	lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1034 	nfs_del_committed_range(vp, origoffset, npages);
   1035 	nfs_del_tobecommitted_range(vp, origoffset, npages);
   1036 	simple_lock(&uobj->vmobjlock);
   1037 	for (i = 0; i < npages; i++) {
   1038 		pg = pgs[i];
   1039 		if (pg == NULL || pg == PGO_DONTCARE) {
   1040 			continue;
   1041 		}
   1042 		pg->flags &= ~(PG_NEEDCOMMIT|PG_RDONLY);
   1043 	}
   1044 	simple_unlock(&uobj->vmobjlock);
   1045 	lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1046 	return 0;
   1047 }
   1048 
   1049 int
   1050 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1051 {
   1052 	struct uvm_object *uobj = &vp->v_uobj;
   1053 	struct nfsnode *np = VTONFS(vp);
   1054 	off_t origoffset, commitoff;
   1055 	uint32_t commitbytes;
   1056 	int error, i;
   1057 	int bytes;
   1058 	boolean_t v3 = NFS_ISV3(vp);
   1059 	boolean_t weak = flags & PGO_WEAK;
   1060 	UVMHIST_FUNC("nfs_gop_write"); UVMHIST_CALLED(ubchist);
   1061 
   1062 	/* XXX for now, skip the v3 stuff. */
   1063 	v3 = FALSE;
   1064 
   1065 	/*
   1066 	 * for NFSv2, just write normally.
   1067 	 */
   1068 
   1069 	if (!v3) {
   1070 		return genfs_gop_write(vp, pgs, npages, flags);
   1071 	}
   1072 
   1073 	/*
   1074 	 * for NFSv3, use delayed writes and the "commit" operation
   1075 	 * to avoid sync writes.
   1076 	 */
   1077 
   1078 	origoffset = pgs[0]->offset;
   1079 	bytes = npages << PAGE_SHIFT;
   1080 	lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1081 	if (nfs_in_committed_range(vp, origoffset, bytes)) {
   1082 		goto committed;
   1083 	}
   1084 	if (nfs_in_tobecommitted_range(vp, origoffset, bytes)) {
   1085 		if (weak) {
   1086 			lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1087 			return 0;
   1088 		} else {
   1089 			commitoff = np->n_pushlo;
   1090 			commitbytes = (uint32_t)(np->n_pushhi - np->n_pushlo);
   1091 			goto commit;
   1092 		}
   1093 	} else {
   1094 		commitoff = origoffset;
   1095 		commitbytes = npages << PAGE_SHIFT;
   1096 	}
   1097 	simple_lock(&uobj->vmobjlock);
   1098 	for (i = 0; i < npages; i++) {
   1099 		pgs[i]->flags |= PG_NEEDCOMMIT|PG_RDONLY;
   1100 		pgs[i]->flags &= ~PG_CLEAN;
   1101 	}
   1102 	simple_unlock(&uobj->vmobjlock);
   1103 	lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1104 	error = genfs_gop_write(vp, pgs, npages, flags);
   1105 	if (error) {
   1106 		return error;
   1107 	}
   1108 	lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1109 	if (weak) {
   1110 		nfs_add_tobecommitted_range(vp, origoffset,
   1111 		    npages << PAGE_SHIFT);
   1112 	} else {
   1113 commit:
   1114 		error = nfs_commit(vp, commitoff, commitbytes, curproc);
   1115 		nfs_del_tobecommitted_range(vp, commitoff, commitbytes);
   1116 committed:
   1117 		simple_lock(&uobj->vmobjlock);
   1118 		for (i = 0; i < npages; i++) {
   1119 			pgs[i]->flags &= ~(PG_NEEDCOMMIT|PG_RDONLY);
   1120 		}
   1121 		simple_unlock(&uobj->vmobjlock);
   1122 	}
   1123 	lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1124 	return error;
   1125 }
   1126