Home | History | Annotate | Line # | Download | only in nfs
nfs_bio.c revision 1.93
      1 /*	$NetBSD: nfs_bio.c,v 1.93 2003/04/12 14:41:28 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     39  */
     40 
     41 #include <sys/cdefs.h>
     42 __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.93 2003/04/12 14:41:28 yamt Exp $");
     43 
     44 #include "opt_nfs.h"
     45 #include "opt_ddb.h"
     46 
     47 #include <sys/param.h>
     48 #include <sys/systm.h>
     49 #include <sys/resourcevar.h>
     50 #include <sys/signalvar.h>
     51 #include <sys/proc.h>
     52 #include <sys/buf.h>
     53 #include <sys/vnode.h>
     54 #include <sys/mount.h>
     55 #include <sys/kernel.h>
     56 #include <sys/namei.h>
     57 #include <sys/dirent.h>
     58 #include <sys/malloc.h>
     59 
     60 #include <uvm/uvm_extern.h>
     61 #include <uvm/uvm.h>
     62 
     63 #include <nfs/rpcv2.h>
     64 #include <nfs/nfsproto.h>
     65 #include <nfs/nfs.h>
     66 #include <nfs/nfsmount.h>
     67 #include <nfs/nqnfs.h>
     68 #include <nfs/nfsnode.h>
     69 #include <nfs/nfs_var.h>
     70 
     71 extern int nfs_numasync;
     72 extern int nfs_commitsize;
     73 extern struct nfsstats nfsstats;
     74 
     75 static int nfs_doio_read __P((struct buf *, struct uio *));
     76 static int nfs_doio_write __P((struct buf *, struct uio *));
     77 static int nfs_doio_phys __P((struct buf *, struct uio *));
     78 
     79 /*
     80  * Vnode op for read using bio
     81  * Any similarity to readip() is purely coincidental
     82  */
     83 int
     84 nfs_bioread(vp, uio, ioflag, cred, cflag)
     85 	struct vnode *vp;
     86 	struct uio *uio;
     87 	int ioflag, cflag;
     88 	struct ucred *cred;
     89 {
     90 	struct nfsnode *np = VTONFS(vp);
     91 	struct buf *bp = NULL, *rabp;
     92 	struct vattr vattr;
     93 	struct proc *p;
     94 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     95 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     96 	caddr_t baddr, ep, edp;
     97 	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
     98 	int enough = 0;
     99 	struct dirent *dp, *pdp;
    100 	off_t curoff = 0;
    101 
    102 #ifdef DIAGNOSTIC
    103 	if (uio->uio_rw != UIO_READ)
    104 		panic("nfs_read mode");
    105 #endif
    106 	if (uio->uio_resid == 0)
    107 		return (0);
    108 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    109 		return (EINVAL);
    110 	p = uio->uio_procp;
    111 #ifndef NFS_V2_ONLY
    112 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    113 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    114 		(void)nfs_fsinfo(nmp, vp, cred, p);
    115 #endif
    116 	if (vp->v_type != VDIR &&
    117 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    118 		return (EFBIG);
    119 
    120 	/*
    121 	 * For nfs, cache consistency can only be maintained approximately.
    122 	 * Although RFC1094 does not specify the criteria, the following is
    123 	 * believed to be compatible with the reference port.
    124 	 * For nqnfs, full cache consistency is maintained within the loop.
    125 	 * For nfs:
    126 	 * If the file's modify time on the server has changed since the
    127 	 * last read rpc or you have written to the file,
    128 	 * you may have lost data cache consistency with the
    129 	 * server, so flush all of the file's data out of the cache.
    130 	 * Then force a getattr rpc to ensure that you have up to date
    131 	 * attributes.
    132 	 * NB: This implies that cache data can be read when up to
    133 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
    134 	 * attributes this could be forced by setting n_attrstamp to 0 before
    135 	 * the VOP_GETATTR() call.
    136 	 */
    137 
    138 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
    139 		if (np->n_flag & NMODIFIED) {
    140 			if (vp->v_type != VREG) {
    141 				if (vp->v_type != VDIR)
    142 					panic("nfs: bioread, not dir");
    143 				nfs_invaldircache(vp, 0);
    144 				np->n_direofoffset = 0;
    145 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    146 				if (error)
    147 					return (error);
    148 			}
    149 			np->n_attrstamp = 0;
    150 			error = VOP_GETATTR(vp, &vattr, cred, p);
    151 			if (error)
    152 				return (error);
    153 			np->n_mtime = vattr.va_mtime.tv_sec;
    154 		} else {
    155 			error = VOP_GETATTR(vp, &vattr, cred, p);
    156 			if (error)
    157 				return (error);
    158 			if (np->n_mtime != vattr.va_mtime.tv_sec) {
    159 				if (vp->v_type == VDIR) {
    160 					nfs_invaldircache(vp, 0);
    161 					np->n_direofoffset = 0;
    162 				}
    163 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    164 				if (error)
    165 					return (error);
    166 				np->n_mtime = vattr.va_mtime.tv_sec;
    167 			}
    168 		}
    169 	}
    170 
    171 	/*
    172 	 * update the cached read creds for this node.
    173 	 */
    174 
    175 	if (np->n_rcred) {
    176 		crfree(np->n_rcred);
    177 	}
    178 	np->n_rcred = cred;
    179 	crhold(cred);
    180 
    181 	do {
    182 #ifndef NFS_V2_ONLY
    183 	    /*
    184 	     * Get a valid lease. If cached data is stale, flush it.
    185 	     */
    186 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
    187 		if (NQNFS_CKINVALID(vp, np, ND_READ)) {
    188 		    do {
    189 			error = nqnfs_getlease(vp, ND_READ, cred, p);
    190 		    } while (error == NQNFS_EXPIRED);
    191 		    if (error)
    192 			return (error);
    193 		    if (np->n_lrev != np->n_brev ||
    194 			(np->n_flag & NQNFSNONCACHE) ||
    195 			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
    196 			if (vp->v_type == VDIR) {
    197 				nfs_invaldircache(vp, 0);
    198 				np->n_direofoffset = 0;
    199 			}
    200 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    201 			if (error)
    202 			    return (error);
    203 			np->n_brev = np->n_lrev;
    204 		    }
    205 		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
    206 		    nfs_invaldircache(vp, 0);
    207 		    error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    208 		    np->n_direofoffset = 0;
    209 		    if (error)
    210 			return (error);
    211 		}
    212 	    }
    213 #endif
    214 	    /*
    215 	     * Don't cache symlinks.
    216 	     */
    217 	    if (np->n_flag & NQNFSNONCACHE
    218 		|| ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
    219 		switch (vp->v_type) {
    220 		case VREG:
    221 			return (nfs_readrpc(vp, uio));
    222 		case VLNK:
    223 			return (nfs_readlinkrpc(vp, uio, cred));
    224 		case VDIR:
    225 			break;
    226 		default:
    227 			printf(" NQNFSNONCACHE: type %x unexpected\n",
    228 			    vp->v_type);
    229 		};
    230 	    }
    231 	    baddr = (caddr_t)0;
    232 	    switch (vp->v_type) {
    233 	    case VREG:
    234 		nfsstats.biocache_reads++;
    235 
    236 		error = 0;
    237 		if (uio->uio_offset >= np->n_size) {
    238 			break;
    239 		}
    240 		while (uio->uio_resid > 0) {
    241 			void *win;
    242 			vsize_t bytelen = MIN(np->n_size - uio->uio_offset,
    243 					      uio->uio_resid);
    244 
    245 			if (bytelen == 0)
    246 				break;
    247 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
    248 					&bytelen, UBC_READ);
    249 			error = uiomove(win, bytelen, uio);
    250 			ubc_release(win, 0);
    251 			if (error) {
    252 				break;
    253 			}
    254 		}
    255 		n = 0;
    256 		break;
    257 
    258 	    case VLNK:
    259 		nfsstats.biocache_readlinks++;
    260 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
    261 		if (!bp)
    262 			return (EINTR);
    263 		if ((bp->b_flags & B_DONE) == 0) {
    264 			bp->b_flags |= B_READ;
    265 			error = nfs_doio(bp, p);
    266 			if (error) {
    267 				brelse(bp);
    268 				return (error);
    269 			}
    270 		}
    271 		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
    272 		got_buf = 1;
    273 		on = 0;
    274 		break;
    275 	    case VDIR:
    276 diragain:
    277 		nfsstats.biocache_readdirs++;
    278 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    279 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    280 		if (!ndp) {
    281 			/*
    282 			 * We've been handed a cookie that is not
    283 			 * in the cache. If we're not translating
    284 			 * 32 <-> 64, it may be a value that was
    285 			 * flushed out of the cache because it grew
    286 			 * too big. Let the server judge if it's
    287 			 * valid or not. In the translation case,
    288 			 * we have no way of validating this value,
    289 			 * so punt.
    290 			 */
    291 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    292 				return (EINVAL);
    293 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    294 				uio->uio_offset, 0, 0);
    295 		}
    296 
    297 		if (uio->uio_offset != 0 &&
    298 		    ndp->dc_cookie == np->n_direofoffset) {
    299 			nfsstats.direofcache_hits++;
    300 			return (0);
    301 		}
    302 
    303 		bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
    304 		if (!bp)
    305 		    return (EINTR);
    306 		if ((bp->b_flags & B_DONE) == 0) {
    307 		    bp->b_flags |= B_READ;
    308 		    bp->b_dcookie = ndp->dc_blkcookie;
    309 		    error = nfs_doio(bp, p);
    310 		    if (error) {
    311 			/*
    312 			 * Yuck! The directory has been modified on the
    313 			 * server. Punt and let the userland code
    314 			 * deal with it.
    315 			 */
    316 			brelse(bp);
    317 			if (error == NFSERR_BAD_COOKIE) {
    318 			    nfs_invaldircache(vp, 0);
    319 			    nfs_vinvalbuf(vp, 0, cred, p, 1);
    320 			    error = EINVAL;
    321 			}
    322 			return (error);
    323 		    }
    324 		}
    325 
    326 		/*
    327 		 * Just return if we hit EOF right away with this
    328 		 * block. Always check here, because direofoffset
    329 		 * may have been set by an nfsiod since the last
    330 		 * check.
    331 		 */
    332 		if (np->n_direofoffset != 0 &&
    333 			ndp->dc_blkcookie == np->n_direofoffset) {
    334 			brelse(bp);
    335 			return (0);
    336 		}
    337 
    338 		/*
    339 		 * Find the entry we were looking for in the block.
    340 		 */
    341 
    342 		en = ndp->dc_entry;
    343 
    344 		pdp = dp = (struct dirent *)bp->b_data;
    345 		edp = bp->b_data + bp->b_bcount - bp->b_resid;
    346 		enn = 0;
    347 		while (enn < en && (caddr_t)dp < edp) {
    348 			pdp = dp;
    349 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    350 			enn++;
    351 		}
    352 
    353 		/*
    354 		 * If the entry number was bigger than the number of
    355 		 * entries in the block, or the cookie of the previous
    356 		 * entry doesn't match, the directory cache is
    357 		 * stale. Flush it and try again (i.e. go to
    358 		 * the server).
    359 		 */
    360 		if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
    361 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    362 #ifdef DEBUG
    363 		    	printf("invalid cache: %p %p %p off %lx %lx\n",
    364 				pdp, dp, edp,
    365 				(unsigned long)uio->uio_offset,
    366 				(unsigned long)NFS_GETCOOKIE(pdp));
    367 #endif
    368 			brelse(bp);
    369 			nfs_invaldircache(vp, 0);
    370 			nfs_vinvalbuf(vp, 0, cred, p, 0);
    371 			goto diragain;
    372 		}
    373 
    374 		on = (caddr_t)dp - bp->b_data;
    375 
    376 		/*
    377 		 * Cache all entries that may be exported to the
    378 		 * user, as they may be thrown back at us. The
    379 		 * NFSBIO_CACHECOOKIES flag indicates that all
    380 		 * entries are being 'exported', so cache them all.
    381 		 */
    382 
    383 		if (en == 0 && pdp == dp) {
    384 			dp = (struct dirent *)
    385 			    ((caddr_t)dp + dp->d_reclen);
    386 			enn++;
    387 		}
    388 
    389 		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
    390 			n = uio->uio_resid;
    391 			enough = 1;
    392 		} else
    393 			n = bp->b_bcount - bp->b_resid - on;
    394 
    395 		ep = bp->b_data + on + n;
    396 
    397 		/*
    398 		 * Find last complete entry to copy, caching entries
    399 		 * (if requested) as we go.
    400 		 */
    401 
    402 		while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
    403 			if (cflag & NFSBIO_CACHECOOKIES) {
    404 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    405 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    406 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    407 					NFS_STASHCOOKIE32(pdp,
    408 					    nndp->dc_cookie32);
    409 				}
    410 			}
    411 			pdp = dp;
    412 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    413 			enn++;
    414 		}
    415 
    416 		/*
    417 		 * If the last requested entry was not the last in the
    418 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    419 		 * cache the cookie of the last requested one, and
    420 		 * set of the offset to it.
    421 		 */
    422 
    423 		if ((on + n) < bp->b_bcount - bp->b_resid) {
    424 			curoff = NFS_GETCOOKIE(pdp);
    425 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    426 			    enn, bp->b_lblkno);
    427 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    428 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    429 				curoff = nndp->dc_cookie32;
    430 			}
    431 		} else
    432 			curoff = bp->b_dcookie;
    433 
    434 		/*
    435 		 * Always cache the entry for the next block,
    436 		 * so that readaheads can use it.
    437 		 */
    438 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    439 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    440 			if (curoff == bp->b_dcookie) {
    441 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    442 				curoff = nndp->dc_cookie32;
    443 			}
    444 		}
    445 
    446 		n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
    447 
    448 		/*
    449 		 * If not eof and read aheads are enabled, start one.
    450 		 * (You need the current block first, so that you have the
    451 		 *  directory offset cookie of the next block.)
    452 		 */
    453 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    454 		    np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
    455 			rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
    456 						NFS_DIRBLKSIZ, p);
    457 			if (rabp) {
    458 			    if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
    459 				rabp->b_dcookie = nndp->dc_cookie;
    460 				rabp->b_flags |= (B_READ | B_ASYNC);
    461 				if (nfs_asyncio(rabp)) {
    462 				    rabp->b_flags |= B_INVAL;
    463 				    brelse(rabp);
    464 				}
    465 			    } else
    466 				brelse(rabp);
    467 			}
    468 		}
    469 		got_buf = 1;
    470 		break;
    471 	    default:
    472 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    473 		break;
    474 	    }
    475 
    476 	    if (n > 0) {
    477 		if (!baddr)
    478 			baddr = bp->b_data;
    479 		error = uiomove(baddr + on, (int)n, uio);
    480 	    }
    481 	    switch (vp->v_type) {
    482 	    case VREG:
    483 		break;
    484 	    case VLNK:
    485 		n = 0;
    486 		break;
    487 	    case VDIR:
    488 		if (np->n_flag & NQNFSNONCACHE)
    489 			bp->b_flags |= B_INVAL;
    490 		uio->uio_offset = curoff;
    491 		if (enough)
    492 			n = 0;
    493 		break;
    494 	    default:
    495 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    496 	    }
    497 	    if (got_buf)
    498 		brelse(bp);
    499 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    500 	return (error);
    501 }
    502 
    503 /*
    504  * Vnode op for write using bio
    505  */
    506 int
    507 nfs_write(v)
    508 	void *v;
    509 {
    510 	struct vop_write_args /* {
    511 		struct vnode *a_vp;
    512 		struct uio *a_uio;
    513 		int  a_ioflag;
    514 		struct ucred *a_cred;
    515 	} */ *ap = v;
    516 	struct uio *uio = ap->a_uio;
    517 	struct proc *p = uio->uio_procp;
    518 	struct vnode *vp = ap->a_vp;
    519 	struct nfsnode *np = VTONFS(vp);
    520 	struct ucred *cred = ap->a_cred;
    521 	int ioflag = ap->a_ioflag;
    522 	struct vattr vattr;
    523 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    524 	void *win;
    525 	voff_t oldoff, origoff;
    526 	vsize_t bytelen;
    527 	int error = 0, iomode, stalewriteverf;
    528 	int extended = 0, wrotedta = 0;
    529 
    530 #ifdef DIAGNOSTIC
    531 	if (uio->uio_rw != UIO_WRITE)
    532 		panic("nfs_write mode");
    533 	if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
    534 		panic("nfs_write proc");
    535 #endif
    536 	if (vp->v_type != VREG)
    537 		return (EIO);
    538 	if (np->n_flag & NWRITEERR) {
    539 		np->n_flag &= ~NWRITEERR;
    540 		return (np->n_error);
    541 	}
    542 #ifndef NFS_V2_ONLY
    543 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    544 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    545 		(void)nfs_fsinfo(nmp, vp, cred, p);
    546 #endif
    547 	if (ioflag & (IO_APPEND | IO_SYNC)) {
    548 		if (np->n_flag & NMODIFIED) {
    549 			np->n_attrstamp = 0;
    550 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    551 			if (error)
    552 				return (error);
    553 		}
    554 		if (ioflag & IO_APPEND) {
    555 			np->n_attrstamp = 0;
    556 			error = VOP_GETATTR(vp, &vattr, cred, p);
    557 			if (error)
    558 				return (error);
    559 			uio->uio_offset = np->n_size;
    560 		}
    561 	}
    562 	if (uio->uio_offset < 0)
    563 		return (EINVAL);
    564 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    565 		return (EFBIG);
    566 	if (uio->uio_resid == 0)
    567 		return (0);
    568 	/*
    569 	 * Maybe this should be above the vnode op call, but so long as
    570 	 * file servers have no limits, i don't think it matters
    571 	 */
    572 	if (p && uio->uio_offset + uio->uio_resid >
    573 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    574 		psignal(p, SIGXFSZ);
    575 		return (EFBIG);
    576 	}
    577 
    578 	/*
    579 	 * update the cached write creds for this node.
    580 	 */
    581 
    582 	if (np->n_wcred) {
    583 		crfree(np->n_wcred);
    584 	}
    585 	np->n_wcred = cred;
    586 	crhold(cred);
    587 
    588 	if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
    589 		iomode = NFSV3WRITE_FILESYNC;
    590 		error = nfs_writerpc(vp, uio, &iomode, &stalewriteverf);
    591 		if (stalewriteverf)
    592 			nfs_clearcommit(vp->v_mount);
    593 		return (error);
    594 	}
    595 
    596 	origoff = uio->uio_offset;
    597 	do {
    598 		boolean_t extending; /* if we are extending whole pages */
    599 		u_quad_t oldsize;
    600 		oldoff = uio->uio_offset;
    601 		bytelen = uio->uio_resid;
    602 
    603 #ifndef NFS_V2_ONLY
    604 		/*
    605 		 * Check for a valid write lease.
    606 		 */
    607 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
    608 		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
    609 			do {
    610 				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
    611 			} while (error == NQNFS_EXPIRED);
    612 			if (error)
    613 				return (error);
    614 			if (np->n_lrev != np->n_brev ||
    615 			    (np->n_flag & NQNFSNONCACHE)) {
    616 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    617 				if (error)
    618 					return (error);
    619 				np->n_brev = np->n_lrev;
    620 			}
    621 		}
    622 #endif
    623 		nfsstats.biocache_writes++;
    624 
    625 		oldsize = np->n_size;
    626 		np->n_flag |= NMODIFIED;
    627 		if (np->n_size < uio->uio_offset + bytelen) {
    628 			np->n_size = uio->uio_offset + bytelen;
    629 		}
    630 		extending = ((uio->uio_offset & PAGE_MASK) == 0 &&
    631 		    (bytelen & PAGE_MASK) == 0 &&
    632 		    uio->uio_offset >= vp->v_size);
    633 		if (extending) {
    634 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    635 			    UBC_WRITE | UBC_FAULTBUSY);
    636 		} else {
    637 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    638 			    UBC_WRITE);
    639 		}
    640 		error = uiomove(win, bytelen, uio);
    641 		ubc_release(win, 0);
    642 		if (error) {
    643 			if (extending) {
    644 				/*
    645 				 * backout size and free pages past eof.
    646 				 */
    647 				np->n_size = oldsize;
    648 				(void)VOP_PUTPAGES(vp, round_page(vp->v_size),
    649 				    0, PGO_SYNCIO | PGO_FREE);
    650 			}
    651 			break;
    652 		}
    653 		wrotedta = 1;
    654 
    655 		/*
    656 		 * update UVM's notion of the size now that we've
    657 		 * copied the data into the vnode's pages.
    658 		 */
    659 
    660 		if (vp->v_size < uio->uio_offset) {
    661 			uvm_vnp_setsize(vp, uio->uio_offset);
    662 			extended = 1;
    663 		}
    664 
    665 		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
    666 		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
    667 			simple_lock(&vp->v_interlock);
    668 			error = VOP_PUTPAGES(vp,
    669 			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
    670 			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    671 				       ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
    672 		}
    673 	} while (uio->uio_resid > 0);
    674 	if (wrotedta)
    675 		VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
    676 	if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
    677 		simple_lock(&vp->v_interlock);
    678 		error = VOP_PUTPAGES(vp,
    679 		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
    680 		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    681 			       ~(nmp->nm_wsize - 1)),
    682 		    PGO_CLEANIT | PGO_SYNCIO);
    683 	}
    684 	return error;
    685 }
    686 
    687 /*
    688  * Get an nfs cache block.
    689  * Allocate a new one if the block isn't currently in the cache
    690  * and return the block marked busy. If the calling process is
    691  * interrupted by a signal for an interruptible mount point, return
    692  * NULL.
    693  */
    694 struct buf *
    695 nfs_getcacheblk(vp, bn, size, p)
    696 	struct vnode *vp;
    697 	daddr_t bn;
    698 	int size;
    699 	struct proc *p;
    700 {
    701 	struct buf *bp;
    702 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    703 
    704 	if (nmp->nm_flag & NFSMNT_INT) {
    705 		bp = getblk(vp, bn, size, PCATCH, 0);
    706 		while (bp == NULL) {
    707 			if (nfs_sigintr(nmp, NULL, p))
    708 				return (NULL);
    709 			bp = getblk(vp, bn, size, 0, 2 * hz);
    710 		}
    711 	} else
    712 		bp = getblk(vp, bn, size, 0, 0);
    713 	return (bp);
    714 }
    715 
    716 /*
    717  * Flush and invalidate all dirty buffers. If another process is already
    718  * doing the flush, just wait for completion.
    719  */
    720 int
    721 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
    722 	struct vnode *vp;
    723 	int flags;
    724 	struct ucred *cred;
    725 	struct proc *p;
    726 	int intrflg;
    727 {
    728 	struct nfsnode *np = VTONFS(vp);
    729 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    730 	int error = 0, slpflag, slptimeo;
    731 
    732 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    733 		intrflg = 0;
    734 	if (intrflg) {
    735 		slpflag = PCATCH;
    736 		slptimeo = 2 * hz;
    737 	} else {
    738 		slpflag = 0;
    739 		slptimeo = 0;
    740 	}
    741 	/*
    742 	 * First wait for any other process doing a flush to complete.
    743 	 */
    744 	while (np->n_flag & NFLUSHINPROG) {
    745 		np->n_flag |= NFLUSHWANT;
    746 		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
    747 			slptimeo);
    748 		if (error && intrflg && nfs_sigintr(nmp, NULL, p))
    749 			return (EINTR);
    750 	}
    751 
    752 	/*
    753 	 * Now, flush as required.
    754 	 */
    755 	np->n_flag |= NFLUSHINPROG;
    756 	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
    757 	while (error) {
    758 		if (intrflg && nfs_sigintr(nmp, NULL, p)) {
    759 			np->n_flag &= ~NFLUSHINPROG;
    760 			if (np->n_flag & NFLUSHWANT) {
    761 				np->n_flag &= ~NFLUSHWANT;
    762 				wakeup((caddr_t)&np->n_flag);
    763 			}
    764 			return (EINTR);
    765 		}
    766 		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
    767 	}
    768 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
    769 	if (np->n_flag & NFLUSHWANT) {
    770 		np->n_flag &= ~NFLUSHWANT;
    771 		wakeup((caddr_t)&np->n_flag);
    772 	}
    773 	return (0);
    774 }
    775 
    776 /*
    777  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    778  * This is mainly to avoid queueing async I/O requests when the nfsiods
    779  * are all hung on a dead server.
    780  */
    781 
    782 int
    783 nfs_asyncio(bp)
    784 	struct buf *bp;
    785 {
    786 	int i;
    787 	struct nfsmount *nmp;
    788 	int gotiod, slpflag = 0, slptimeo = 0, error;
    789 
    790 	if (nfs_numasync == 0)
    791 		return (EIO);
    792 
    793 	nmp = VFSTONFS(bp->b_vp->v_mount);
    794 again:
    795 	if (nmp->nm_flag & NFSMNT_INT)
    796 		slpflag = PCATCH;
    797 	gotiod = FALSE;
    798 
    799 	/*
    800 	 * Find a free iod to process this request.
    801 	 */
    802 
    803 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
    804 		struct nfs_iod *iod = &nfs_asyncdaemon[i];
    805 
    806 		if (iod->nid_want) {
    807 			/*
    808 			 * Found one, so wake it up and tell it which
    809 			 * mount to process.
    810 			 */
    811 			iod->nid_want = NULL;
    812 			iod->nid_mount = nmp;
    813 			nmp->nm_bufqiods++;
    814 			wakeup((caddr_t)&iod->nid_want);
    815 			gotiod = TRUE;
    816 			break;
    817 		}
    818 	}
    819 
    820 	/*
    821 	 * If none are free, we may already have an iod working on this mount
    822 	 * point.  If so, it will process our request.
    823 	 */
    824 
    825 	if (!gotiod && nmp->nm_bufqiods > 0)
    826 		gotiod = TRUE;
    827 
    828 	/*
    829 	 * If we have an iod which can process the request, then queue
    830 	 * the buffer.
    831 	 */
    832 
    833 	if (gotiod) {
    834 
    835 		/*
    836 		 * Ensure that the queue never grows too large.
    837 		 */
    838 
    839 		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
    840 			nmp->nm_bufqwant = TRUE;
    841 			error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
    842 				"nfsaio", slptimeo);
    843 			if (error) {
    844 				if (nfs_sigintr(nmp, NULL, curproc))
    845 					return (EINTR);
    846 				if (slpflag == PCATCH) {
    847 					slpflag = 0;
    848 					slptimeo = 2 * hz;
    849 				}
    850 			}
    851 
    852 			/*
    853 			 * We might have lost our iod while sleeping,
    854 			 * so check and loop if nescessary.
    855 			 */
    856 
    857 			if (nmp->nm_bufqiods == 0)
    858 				goto again;
    859 		}
    860 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    861 		nmp->nm_bufqlen++;
    862 		return (0);
    863 	}
    864 
    865 	/*
    866 	 * All the iods are busy on other mounts, so return EIO to
    867 	 * force the caller to process the i/o synchronously.
    868 	 */
    869 
    870 	return (EIO);
    871 }
    872 
    873 /*
    874  * nfs_doio for read.
    875  */
    876 static int
    877 nfs_doio_read(bp, uiop)
    878 	struct buf *bp;
    879 	struct uio *uiop;
    880 {
    881 	struct vnode *vp = bp->b_vp;
    882 	struct nfsnode *np = VTONFS(vp);
    883 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    884 	int error = 0;
    885 
    886 	uiop->uio_rw = UIO_READ;
    887 	switch (vp->v_type) {
    888 	case VREG:
    889 		nfsstats.read_bios++;
    890 		error = nfs_readrpc(vp, uiop);
    891 		if (!error && uiop->uio_resid) {
    892 			int diff, len;
    893 
    894 			/*
    895 			 * If len > 0, there is a hole in the file and
    896 			 * no writes after the hole have been pushed to
    897 			 * the server yet.
    898 			 * Just zero fill the rest of the valid area.
    899 			 */
    900 
    901 			diff = bp->b_bcount - uiop->uio_resid;
    902 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
    903 				+ diff);
    904 			if (len > 0) {
    905 				len = MIN(len, uiop->uio_resid);
    906 				memset((char *)bp->b_data + diff, 0, len);
    907 			}
    908 		}
    909 		if (uiop->uio_procp && (vp->v_flag & VTEXT) &&
    910 			(((nmp->nm_flag & NFSMNT_NQNFS) &&
    911 			  NQNFS_CKINVALID(vp, np, ND_READ) &&
    912 			  np->n_lrev != np->n_brev) ||
    913 			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
    914 			  np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
    915 			uprintf("Process killed due to "
    916 				"text file modification\n");
    917 			psignal(uiop->uio_procp, SIGKILL);
    918 #if 0 /* XXX NJWLWP */
    919 			uiop->uio_procp->p_holdcnt++;
    920 #endif
    921 		}
    922 		break;
    923 	case VLNK:
    924 		KASSERT(uiop->uio_offset == (off_t)0);
    925 		nfsstats.readlink_bios++;
    926 		error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
    927 		break;
    928 	case VDIR:
    929 		nfsstats.readdir_bios++;
    930 		uiop->uio_offset = bp->b_dcookie;
    931 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
    932 			error = nfs_readdirplusrpc(vp, uiop, curproc->p_ucred);
    933 			if (error == NFSERR_NOTSUPP)
    934 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    935 		}
    936 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
    937 			error = nfs_readdirrpc(vp, uiop, curproc->p_ucred);
    938 		if (!error) {
    939 			bp->b_dcookie = uiop->uio_offset;
    940 		}
    941 		break;
    942 	default:
    943 		printf("nfs_doio:  type %x unexpected\n", vp->v_type);
    944 		break;
    945 	}
    946 	if (error) {
    947 		bp->b_flags |= B_ERROR;
    948 		bp->b_error = error;
    949 	}
    950 	return error;
    951 }
    952 
    953 /*
    954  * nfs_doio for write.
    955  */
    956 static int
    957 nfs_doio_write(bp, uiop)
    958 	struct buf *bp;
    959 	struct uio *uiop;
    960 {
    961 	struct vnode *vp = bp->b_vp;
    962 	struct nfsnode *np = VTONFS(vp);
    963 	int iomode;
    964 	int stalewriteverf = 0;
    965 	int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
    966 	struct vm_page *pgs[npages];
    967 	boolean_t needcommit = TRUE;
    968 	struct uvm_object *uobj = &vp->v_uobj;
    969 	int error;
    970 	off_t off, cnt;
    971 
    972 	if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
    973 		iomode = NFSV3WRITE_UNSTABLE;
    974 	} else {
    975 		iomode = NFSV3WRITE_FILESYNC;
    976 	}
    977 
    978 	for (i = 0; i < npages; i++) {
    979 		pgs[i] = uvm_pageratop((vaddr_t)bp->b_data +
    980 				       (i << PAGE_SHIFT));
    981 		KASSERT((pgs[i]->flags & PG_BUSY) ||
    982 		    pgs[i]->uobject != uobj);
    983 		if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0) {
    984 			needcommit = FALSE;
    985 		}
    986 		if ((pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT)) ||
    987 		    pgs[i]->uobject != uobj) {
    988 			KASSERT(i == 0 || iomode == NFSV3WRITE_FILESYNC);
    989 			iomode = NFSV3WRITE_FILESYNC;
    990 		}
    991 	}
    992 	if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
    993 		for (i = 0; i < npages; i++) {
    994 			pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
    995 			pmap_page_protect(pgs[i], VM_PROT_READ);
    996 		}
    997 	}
    998 
    999 	/*
   1000 	 * Send the data to the server if necessary,
   1001 	 * otherwise just send a commit rpc.
   1002 	 */
   1003 
   1004 	if (needcommit) {
   1005 
   1006 		/*
   1007 		 * If the buffer is in the range that we already committed,
   1008 		 * there's nothing to do.
   1009 		 *
   1010 		 * If it's in the range that we need to commit, push the
   1011 		 * whole range at once, otherwise only push the buffer.
   1012 		 * In both these cases, acquire the commit lock to avoid
   1013 		 * other processes modifying the range.
   1014 		 */
   1015 
   1016 		off = uiop->uio_offset;
   1017 		cnt = bp->b_bcount;
   1018 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1019 		if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
   1020 			boolean_t pushedrange;
   1021 			if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
   1022 				pushedrange = TRUE;
   1023 				off = np->n_pushlo;
   1024 				cnt = np->n_pushhi - np->n_pushlo;
   1025 			} else {
   1026 				pushedrange = FALSE;
   1027 			}
   1028 			error = nfs_commit(vp, off, cnt, curproc);
   1029 			if (error == 0) {
   1030 				if (pushedrange) {
   1031 					nfs_merge_commit_ranges(vp);
   1032 				} else {
   1033 					nfs_add_committed_range(vp, off, cnt);
   1034 				}
   1035 			}
   1036 		}
   1037 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1038 		if (!error) {
   1039 			uiop->uio_resid = 0;
   1040 			simple_lock(&uobj->vmobjlock);
   1041 			for (i = 0; i < npages; i++) {
   1042 				pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1043 			}
   1044 			simple_unlock(&uobj->vmobjlock);
   1045 			return 0;
   1046 		} else if (error == NFSERR_STALEWRITEVERF) {
   1047 			nfs_clearcommit(bp->b_vp->v_mount);
   1048 		}
   1049 	}
   1050 	off = uiop->uio_offset;
   1051 	cnt = bp->b_bcount;
   1052 	uiop->uio_rw = UIO_WRITE;
   1053 	nfsstats.write_bios++;
   1054 	error = nfs_writerpc(vp, uiop, &iomode, &stalewriteverf);
   1055 	if (!error && iomode == NFSV3WRITE_UNSTABLE) {
   1056 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1057 		nfs_add_tobecommitted_range(vp, off, cnt);
   1058 		simple_lock(&uobj->vmobjlock);
   1059 		for (i = 0; i < npages; i++) {
   1060 			pgs[i]->flags &= ~PG_CLEAN;
   1061 		}
   1062 		simple_unlock(&uobj->vmobjlock);
   1063 		if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
   1064 			off = np->n_pushlo;
   1065 			cnt = nfs_commitsize >> 1;
   1066 			error = nfs_commit(vp, off, cnt, curproc);
   1067 			if (!error) {
   1068 				nfs_add_committed_range(vp, off, cnt);
   1069 				nfs_del_tobecommitted_range(vp, off, cnt);
   1070 			}
   1071 		}
   1072 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1073 	} else if (!error && needcommit) {
   1074 		lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1075 		nfs_del_committed_range(vp, off, cnt);
   1076 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1077 		simple_lock(&uobj->vmobjlock);
   1078 		for (i = 0; i < npages; i++) {
   1079 			pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1080 		}
   1081 		simple_unlock(&uobj->vmobjlock);
   1082 	} else {
   1083 		if (error) {
   1084 			bp->b_flags |= B_ERROR;
   1085 			bp->b_error = np->n_error = error;
   1086 			np->n_flag |= NWRITEERR;
   1087 		}
   1088 	}
   1089 	if (stalewriteverf || (error == NFSERR_STALEWRITEVERF)) {
   1090 		nfs_clearcommit(vp->v_mount);
   1091 	}
   1092 	return error;
   1093 }
   1094 
   1095 /*
   1096  * nfs_doio for B_PHYS.
   1097  */
   1098 static int
   1099 nfs_doio_phys(bp, uiop)
   1100 	struct buf *bp;
   1101 	struct uio *uiop;
   1102 {
   1103 	struct vnode *vp = bp->b_vp;
   1104 	int error;
   1105 
   1106 	uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
   1107 	if (bp->b_flags & B_READ) {
   1108 		uiop->uio_rw = UIO_READ;
   1109 		nfsstats.read_physios++;
   1110 		error = nfs_readrpc(vp, uiop);
   1111 	} else {
   1112 		int iomode = NFSV3WRITE_DATASYNC;
   1113 		int stalewriteverf;
   1114 
   1115 		uiop->uio_rw = UIO_WRITE;
   1116 		nfsstats.write_physios++;
   1117 		error = nfs_writerpc(vp, uiop, &iomode, &stalewriteverf);
   1118 		if (stalewriteverf) {
   1119 			nfs_clearcommit(bp->b_vp->v_mount);
   1120 		}
   1121 	}
   1122 	if (error) {
   1123 		bp->b_flags |= B_ERROR;
   1124 		bp->b_error = error;
   1125 	}
   1126 	return error;
   1127 }
   1128 
   1129 /*
   1130  * Do an I/O operation to/from a cache block. This may be called
   1131  * synchronously or from an nfsiod.
   1132  */
   1133 int
   1134 nfs_doio(bp, p)
   1135 	struct buf *bp;
   1136 	struct proc *p;
   1137 {
   1138 	int error;
   1139 	struct uio uio;
   1140 	struct uio *uiop = &uio;
   1141 	struct iovec io;
   1142 	UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
   1143 
   1144 	uiop->uio_iov = &io;
   1145 	uiop->uio_iovcnt = 1;
   1146 	uiop->uio_segflg = UIO_SYSSPACE;
   1147 	uiop->uio_procp = p;
   1148 	uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
   1149 	io.iov_base = bp->b_data;
   1150 	io.iov_len = uiop->uio_resid = bp->b_bcount;
   1151 
   1152 	/*
   1153 	 * Historically, paging was done with physio, but no more...
   1154 	 */
   1155 	if (bp->b_flags & B_PHYS) {
   1156 		/*
   1157 		 * ...though reading /dev/drum still gets us here.
   1158 		 */
   1159 		error = nfs_doio_phys(bp, uiop);
   1160 	} else if (bp->b_flags & B_READ) {
   1161 		error = nfs_doio_read(bp, uiop);
   1162 	} else {
   1163 		error = nfs_doio_write(bp, uiop);
   1164 	}
   1165 	bp->b_resid = uiop->uio_resid;
   1166 	biodone(bp);
   1167 	return (error);
   1168 }
   1169 
   1170 /*
   1171  * Vnode op for VM getpages.
   1172  */
   1173 
   1174 int
   1175 nfs_getpages(v)
   1176 	void *v;
   1177 {
   1178 	struct vop_getpages_args /* {
   1179 		struct vnode *a_vp;
   1180 		voff_t a_offset;
   1181 		struct vm_page **a_m;
   1182 		int *a_count;
   1183 		int a_centeridx;
   1184 		vm_prot_t a_access_type;
   1185 		int a_advice;
   1186 		int a_flags;
   1187 	} */ *ap = v;
   1188 
   1189 	struct vnode *vp = ap->a_vp;
   1190 	struct uvm_object *uobj = &vp->v_uobj;
   1191 	struct nfsnode *np = VTONFS(vp);
   1192 	const int npages = *ap->a_count;
   1193 	struct vm_page *pg, **pgs, *opgs[npages];
   1194 	off_t origoffset, len;
   1195 	int i, error;
   1196 	boolean_t v3 = NFS_ISV3(vp);
   1197 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1198 	boolean_t locked = (ap->a_flags & PGO_LOCKED) != 0;
   1199 
   1200 	/*
   1201 	 * update the cached read creds for this node.
   1202 	 */
   1203 
   1204 	if (np->n_rcred) {
   1205 		crfree(np->n_rcred);
   1206 	}
   1207 	np->n_rcred = curproc->p_ucred;
   1208 	crhold(np->n_rcred);
   1209 
   1210 	/*
   1211 	 * if we have delayed truncation and it's safe, do it now.
   1212 	 */
   1213 
   1214 	if (ap->a_flags & PGO_SYNCIO) {
   1215 		nfs_delayedtruncate(vp);
   1216 	}
   1217 
   1218 	/*
   1219 	 * call the genfs code to get the pages.  `pgs' may be NULL
   1220 	 * when doing read-ahead.
   1221 	 */
   1222 
   1223 	pgs = ap->a_m;
   1224 	if (write && locked && v3) {
   1225 		KASSERT(pgs != NULL);
   1226 #ifdef DEBUG
   1227 
   1228 		/*
   1229 		 * If PGO_LOCKED is set, real pages shouldn't exists
   1230 		 * in the array.
   1231 		 */
   1232 
   1233 		for (i = 0; i < npages; i++)
   1234 			KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
   1235 #endif
   1236 		memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
   1237 	}
   1238 	error = genfs_getpages(v);
   1239 	if (error) {
   1240 		return (error);
   1241 	}
   1242 
   1243 	/*
   1244 	 * for read faults where the nfs node is not yet marked NMODIFIED,
   1245 	 * set PG_RDONLY on the pages so that we come back here if someone
   1246 	 * tries to modify later via the mapping that will be entered for
   1247 	 * this fault.
   1248 	 */
   1249 
   1250 	if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
   1251 		if (!locked) {
   1252 			simple_lock(&uobj->vmobjlock);
   1253 		}
   1254 		for (i = 0; i < npages; i++) {
   1255 			pg = pgs[i];
   1256 			if (pg == NULL || pg == PGO_DONTCARE) {
   1257 				continue;
   1258 			}
   1259 			pg->flags |= PG_RDONLY;
   1260 		}
   1261 		if (!locked) {
   1262 			simple_unlock(&uobj->vmobjlock);
   1263 		}
   1264 	}
   1265 	if (!write) {
   1266 		return (0);
   1267 	}
   1268 
   1269 	/*
   1270 	 * this is a write fault, update the commit info.
   1271 	 */
   1272 
   1273 	origoffset = ap->a_offset;
   1274 	len = npages << PAGE_SHIFT;
   1275 
   1276 	if (v3) {
   1277 		error = lockmgr(&np->n_commitlock,
   1278 		    LK_EXCLUSIVE | (locked ? LK_NOWAIT : 0), NULL);
   1279 		if (error) {
   1280 			KASSERT(locked != 0);
   1281 
   1282 			/*
   1283 			 * Since PGO_LOCKED is set, we need to unbusy
   1284 			 * all pages fetched by genfs_getpages() above,
   1285 			 * tell the caller that there are no pages
   1286 			 * available and put back original pgs array.
   1287 			 */
   1288 
   1289 			uvm_lock_pageq();
   1290 			uvm_page_unbusy(pgs, npages);
   1291 			uvm_unlock_pageq();
   1292 			*ap->a_count = 0;
   1293 			memcpy(pgs, opgs,
   1294 			    npages * sizeof(struct vm_pages *));
   1295 			return (error);
   1296 		}
   1297 		nfs_del_committed_range(vp, origoffset, len);
   1298 		nfs_del_tobecommitted_range(vp, origoffset, len);
   1299 	}
   1300 	np->n_flag |= NMODIFIED;
   1301 	if (!locked) {
   1302 		simple_lock(&uobj->vmobjlock);
   1303 	}
   1304 	for (i = 0; i < npages; i++) {
   1305 		pg = pgs[i];
   1306 		if (pg == NULL || pg == PGO_DONTCARE) {
   1307 			continue;
   1308 		}
   1309 		pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
   1310 	}
   1311 	if (!locked) {
   1312 		simple_unlock(&uobj->vmobjlock);
   1313 	}
   1314 	if (v3) {
   1315 		lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1316 	}
   1317 	return (0);
   1318 }
   1319