Home | History | Annotate | Line # | Download | only in nfs
nfs_bio.c revision 1.63.2.6
      1 /*	$NetBSD: nfs_bio.c,v 1.63.2.6 2001/10/22 20:42:09 nathanw Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * This code is derived from software contributed to Berkeley by
      8  * Rick Macklem at The University of Guelph.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the University of
     21  *	California, Berkeley and its contributors.
     22  * 4. Neither the name of the University nor the names of its contributors
     23  *    may be used to endorse or promote products derived from this software
     24  *    without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     36  * SUCH DAMAGE.
     37  *
     38  *	@(#)nfs_bio.c	8.9 (Berkeley) 3/30/95
     39  */
     40 
     41 #include "opt_nfs.h"
     42 #include "opt_ddb.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/resourcevar.h>
     47 #include <sys/signalvar.h>
     48 #include <sys/lwp.h>
     49 #include <sys/proc.h>
     50 #include <sys/buf.h>
     51 #include <sys/vnode.h>
     52 #include <sys/mount.h>
     53 #include <sys/kernel.h>
     54 #include <sys/namei.h>
     55 #include <sys/dirent.h>
     56 #include <sys/malloc.h>
     57 
     58 #include <uvm/uvm_extern.h>
     59 #include <uvm/uvm.h>
     60 
     61 #include <nfs/rpcv2.h>
     62 #include <nfs/nfsproto.h>
     63 #include <nfs/nfs.h>
     64 #include <nfs/nfsmount.h>
     65 #include <nfs/nqnfs.h>
     66 #include <nfs/nfsnode.h>
     67 #include <nfs/nfs_var.h>
     68 
     69 extern int nfs_numasync;
     70 extern struct nfsstats nfsstats;
     71 
     72 /*
     73  * Vnode op for read using bio
     74  * Any similarity to readip() is purely coincidental
     75  */
     76 int
     77 nfs_bioread(vp, uio, ioflag, cred, cflag)
     78 	struct vnode *vp;
     79 	struct uio *uio;
     80 	int ioflag, cflag;
     81 	struct ucred *cred;
     82 {
     83 	struct nfsnode *np = VTONFS(vp);
     84 	struct buf *bp = NULL, *rabp;
     85 	struct vattr vattr;
     86 	struct proc *p;
     87 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
     88 	struct nfsdircache *ndp = NULL, *nndp = NULL;
     89 	caddr_t baddr, ep, edp;
     90 	int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
     91 	int enough = 0;
     92 	struct dirent *dp, *pdp;
     93 	off_t curoff = 0;
     94 
     95 #ifdef DIAGNOSTIC
     96 	if (uio->uio_rw != UIO_READ)
     97 		panic("nfs_read mode");
     98 #endif
     99 	if (uio->uio_resid == 0)
    100 		return (0);
    101 	if (vp->v_type != VDIR && uio->uio_offset < 0)
    102 		return (EINVAL);
    103 	p = uio->uio_procp;
    104 #ifndef NFS_V2_ONLY
    105 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    106 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    107 		(void)nfs_fsinfo(nmp, vp, cred, p);
    108 #endif
    109 	if (vp->v_type != VDIR &&
    110 	    (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    111 		return (EFBIG);
    112 
    113 	/*
    114 	 * For nfs, cache consistency can only be maintained approximately.
    115 	 * Although RFC1094 does not specify the criteria, the following is
    116 	 * believed to be compatible with the reference port.
    117 	 * For nqnfs, full cache consistency is maintained within the loop.
    118 	 * For nfs:
    119 	 * If the file's modify time on the server has changed since the
    120 	 * last read rpc or you have written to the file,
    121 	 * you may have lost data cache consistency with the
    122 	 * server, so flush all of the file's data out of the cache.
    123 	 * Then force a getattr rpc to ensure that you have up to date
    124 	 * attributes.
    125 	 * NB: This implies that cache data can be read when up to
    126 	 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
    127 	 * attributes this could be forced by setting n_attrstamp to 0 before
    128 	 * the VOP_GETATTR() call.
    129 	 */
    130 
    131 	if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
    132 		if (np->n_flag & NMODIFIED) {
    133 			if (vp->v_type != VREG) {
    134 				if (vp->v_type != VDIR)
    135 					panic("nfs: bioread, not dir");
    136 				nfs_invaldircache(vp, 0);
    137 				np->n_direofoffset = 0;
    138 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    139 				if (error)
    140 					return (error);
    141 			}
    142 			np->n_attrstamp = 0;
    143 			error = VOP_GETATTR(vp, &vattr, cred, p);
    144 			if (error)
    145 				return (error);
    146 			np->n_mtime = vattr.va_mtime.tv_sec;
    147 		} else {
    148 			error = VOP_GETATTR(vp, &vattr, cred, p);
    149 			if (error)
    150 				return (error);
    151 			if (np->n_mtime != vattr.va_mtime.tv_sec) {
    152 				if (vp->v_type == VDIR) {
    153 					nfs_invaldircache(vp, 0);
    154 					np->n_direofoffset = 0;
    155 				}
    156 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    157 				if (error)
    158 					return (error);
    159 				np->n_mtime = vattr.va_mtime.tv_sec;
    160 			}
    161 		}
    162 	}
    163 
    164 	/*
    165 	 * update the cached read creds for this node.
    166 	 */
    167 
    168 	if (np->n_rcred) {
    169 		crfree(np->n_rcred);
    170 	}
    171 	np->n_rcred = cred;
    172 	crhold(cred);
    173 
    174 	do {
    175 #ifndef NFS_V2_ONLY
    176 	    /*
    177 	     * Get a valid lease. If cached data is stale, flush it.
    178 	     */
    179 	    if (nmp->nm_flag & NFSMNT_NQNFS) {
    180 		if (NQNFS_CKINVALID(vp, np, ND_READ)) {
    181 		    do {
    182 			error = nqnfs_getlease(vp, ND_READ, cred, p);
    183 		    } while (error == NQNFS_EXPIRED);
    184 		    if (error)
    185 			return (error);
    186 		    if (np->n_lrev != np->n_brev ||
    187 			(np->n_flag & NQNFSNONCACHE) ||
    188 			((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
    189 			if (vp->v_type == VDIR) {
    190 				nfs_invaldircache(vp, 0);
    191 				np->n_direofoffset = 0;
    192 			}
    193 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    194 			if (error)
    195 			    return (error);
    196 			np->n_brev = np->n_lrev;
    197 		    }
    198 		} else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
    199 		    nfs_invaldircache(vp, 0);
    200 		    error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    201 		    np->n_direofoffset = 0;
    202 		    if (error)
    203 			return (error);
    204 		}
    205 	    }
    206 #endif
    207 	    /*
    208 	     * Don't cache symlinks.
    209 	     */
    210 	    if (np->n_flag & NQNFSNONCACHE
    211 		|| ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
    212 		switch (vp->v_type) {
    213 		case VREG:
    214 			return (nfs_readrpc(vp, uio));
    215 		case VLNK:
    216 			return (nfs_readlinkrpc(vp, uio, cred));
    217 		case VDIR:
    218 			break;
    219 		default:
    220 			printf(" NQNFSNONCACHE: type %x unexpected\n",
    221 			    vp->v_type);
    222 		};
    223 	    }
    224 	    baddr = (caddr_t)0;
    225 	    switch (vp->v_type) {
    226 	    case VREG:
    227 		nfsstats.biocache_reads++;
    228 
    229 		error = 0;
    230 		if (uio->uio_offset >= np->n_size) {
    231 			break;
    232 		}
    233 		while (uio->uio_resid > 0) {
    234 			void *win;
    235 			vsize_t bytelen = MIN(np->n_size - uio->uio_offset,
    236 					      uio->uio_resid);
    237 
    238 			if (bytelen == 0)
    239 				break;
    240 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
    241 					&bytelen, UBC_READ);
    242 			error = uiomove(win, bytelen, uio);
    243 			ubc_release(win, 0);
    244 			if (error) {
    245 				break;
    246 			}
    247 		}
    248 		n = 0;
    249 		break;
    250 
    251 	    case VLNK:
    252 		nfsstats.biocache_readlinks++;
    253 		bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
    254 		if (!bp)
    255 			return (EINTR);
    256 		if ((bp->b_flags & B_DONE) == 0) {
    257 			bp->b_flags |= B_READ;
    258 			error = nfs_doio(bp, p);
    259 			if (error) {
    260 				brelse(bp);
    261 				return (error);
    262 			}
    263 		}
    264 		n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
    265 		got_buf = 1;
    266 		on = 0;
    267 		break;
    268 	    case VDIR:
    269 diragain:
    270 		nfsstats.biocache_readdirs++;
    271 		ndp = nfs_searchdircache(vp, uio->uio_offset,
    272 			(nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
    273 		if (!ndp) {
    274 			/*
    275 			 * We've been handed a cookie that is not
    276 			 * in the cache. If we're not translating
    277 			 * 32 <-> 64, it may be a value that was
    278 			 * flushed out of the cache because it grew
    279 			 * too big. Let the server judge if it's
    280 			 * valid or not. In the translation case,
    281 			 * we have no way of validating this value,
    282 			 * so punt.
    283 			 */
    284 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
    285 				return (EINVAL);
    286 			ndp = nfs_enterdircache(vp, uio->uio_offset,
    287 				uio->uio_offset, 0, 0);
    288 		}
    289 
    290 		if (uio->uio_offset != 0 &&
    291 		    ndp->dc_cookie == np->n_direofoffset) {
    292 			nfsstats.direofcache_hits++;
    293 			return (0);
    294 		}
    295 
    296 		bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
    297 		if (!bp)
    298 		    return (EINTR);
    299 		if ((bp->b_flags & B_DONE) == 0) {
    300 		    bp->b_flags |= B_READ;
    301 		    bp->b_dcookie = ndp->dc_blkcookie;
    302 		    error = nfs_doio(bp, p);
    303 		    if (error) {
    304 			/*
    305 			 * Yuck! The directory has been modified on the
    306 			 * server. Punt and let the userland code
    307 			 * deal with it.
    308 			 */
    309 			brelse(bp);
    310 			if (error == NFSERR_BAD_COOKIE) {
    311 			    nfs_invaldircache(vp, 0);
    312 			    nfs_vinvalbuf(vp, 0, cred, p, 1);
    313 			    error = EINVAL;
    314 			}
    315 			return (error);
    316 		    }
    317 		}
    318 
    319 		/*
    320 		 * Just return if we hit EOF right away with this
    321 		 * block. Always check here, because direofoffset
    322 		 * may have been set by an nfsiod since the last
    323 		 * check.
    324 		 */
    325 		if (np->n_direofoffset != 0 &&
    326 			ndp->dc_blkcookie == np->n_direofoffset) {
    327 			brelse(bp);
    328 			return (0);
    329 		}
    330 
    331 		/*
    332 		 * Find the entry we were looking for in the block.
    333 		 */
    334 
    335 		en = ndp->dc_entry;
    336 
    337 		pdp = dp = (struct dirent *)bp->b_data;
    338 		edp = bp->b_data + bp->b_bcount - bp->b_resid;
    339 		enn = 0;
    340 		while (enn < en && (caddr_t)dp < edp) {
    341 			pdp = dp;
    342 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    343 			enn++;
    344 		}
    345 
    346 		/*
    347 		 * If the entry number was bigger than the number of
    348 		 * entries in the block, or the cookie of the previous
    349 		 * entry doesn't match, the directory cache is
    350 		 * stale. Flush it and try again (i.e. go to
    351 		 * the server).
    352 		 */
    353 		if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
    354 		    (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
    355 #ifdef DEBUG
    356 		    	printf("invalid cache: %p %p %p off %lx %lx\n",
    357 				pdp, dp, edp,
    358 				(unsigned long)uio->uio_offset,
    359 				(unsigned long)NFS_GETCOOKIE(pdp));
    360 #endif
    361 			brelse(bp);
    362 			nfs_invaldircache(vp, 0);
    363 			nfs_vinvalbuf(vp, 0, cred, p, 0);
    364 			goto diragain;
    365 		}
    366 
    367 		on = (caddr_t)dp - bp->b_data;
    368 
    369 		/*
    370 		 * Cache all entries that may be exported to the
    371 		 * user, as they may be thrown back at us. The
    372 		 * NFSBIO_CACHECOOKIES flag indicates that all
    373 		 * entries are being 'exported', so cache them all.
    374 		 */
    375 
    376 		if (en == 0 && pdp == dp) {
    377 			dp = (struct dirent *)
    378 			    ((caddr_t)dp + dp->d_reclen);
    379 			enn++;
    380 		}
    381 
    382 		if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
    383 			n = uio->uio_resid;
    384 			enough = 1;
    385 		} else
    386 			n = bp->b_bcount - bp->b_resid - on;
    387 
    388 		ep = bp->b_data + on + n;
    389 
    390 		/*
    391 		 * Find last complete entry to copy, caching entries
    392 		 * (if requested) as we go.
    393 		 */
    394 
    395 		while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
    396 			if (cflag & NFSBIO_CACHECOOKIES) {
    397 				nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
    398 				    ndp->dc_blkcookie, enn, bp->b_lblkno);
    399 				if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    400 					NFS_STASHCOOKIE32(pdp,
    401 					    nndp->dc_cookie32);
    402 				}
    403 			}
    404 			pdp = dp;
    405 			dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
    406 			enn++;
    407 		}
    408 
    409 		/*
    410 		 * If the last requested entry was not the last in the
    411 		 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
    412 		 * cache the cookie of the last requested one, and
    413 		 * set of the offset to it.
    414 		 */
    415 
    416 		if ((on + n) < bp->b_bcount - bp->b_resid) {
    417 			curoff = NFS_GETCOOKIE(pdp);
    418 			nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
    419 			    enn, bp->b_lblkno);
    420 			if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    421 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    422 				curoff = nndp->dc_cookie32;
    423 			}
    424 		} else
    425 			curoff = bp->b_dcookie;
    426 
    427 		/*
    428 		 * Always cache the entry for the next block,
    429 		 * so that readaheads can use it.
    430 		 */
    431 		nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
    432 		if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
    433 			if (curoff == bp->b_dcookie) {
    434 				NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
    435 				curoff = nndp->dc_cookie32;
    436 			}
    437 		}
    438 
    439 		n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
    440 
    441 		/*
    442 		 * If not eof and read aheads are enabled, start one.
    443 		 * (You need the current block first, so that you have the
    444 		 *  directory offset cookie of the next block.)
    445 		 */
    446 		if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
    447 		    np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
    448 			rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
    449 						NFS_DIRBLKSIZ, p);
    450 			if (rabp) {
    451 			    if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
    452 				rabp->b_dcookie = nndp->dc_cookie;
    453 				rabp->b_flags |= (B_READ | B_ASYNC);
    454 				if (nfs_asyncio(rabp)) {
    455 				    rabp->b_flags |= B_INVAL;
    456 				    brelse(rabp);
    457 				}
    458 			    } else
    459 				brelse(rabp);
    460 			}
    461 		}
    462 		got_buf = 1;
    463 		break;
    464 	    default:
    465 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    466 		break;
    467 	    }
    468 
    469 	    if (n > 0) {
    470 		if (!baddr)
    471 			baddr = bp->b_data;
    472 		error = uiomove(baddr + on, (int)n, uio);
    473 	    }
    474 	    switch (vp->v_type) {
    475 	    case VREG:
    476 		break;
    477 	    case VLNK:
    478 		n = 0;
    479 		break;
    480 	    case VDIR:
    481 		if (np->n_flag & NQNFSNONCACHE)
    482 			bp->b_flags |= B_INVAL;
    483 		uio->uio_offset = curoff;
    484 		if (enough)
    485 			n = 0;
    486 		break;
    487 	    default:
    488 		printf(" nfsbioread: type %x unexpected\n",vp->v_type);
    489 	    }
    490 	    if (got_buf)
    491 		brelse(bp);
    492 	} while (error == 0 && uio->uio_resid > 0 && n > 0);
    493 	return (error);
    494 }
    495 
    496 /*
    497  * Vnode op for write using bio
    498  */
    499 int
    500 nfs_write(v)
    501 	void *v;
    502 {
    503 	struct vop_write_args /* {
    504 		struct vnode *a_vp;
    505 		struct uio *a_uio;
    506 		int  a_ioflag;
    507 		struct ucred *a_cred;
    508 	} */ *ap = v;
    509 	struct uio *uio = ap->a_uio;
    510 	struct proc *p = uio->uio_procp;
    511 	struct vnode *vp = ap->a_vp;
    512 	struct nfsnode *np = VTONFS(vp);
    513 	struct ucred *cred = ap->a_cred;
    514 	int ioflag = ap->a_ioflag;
    515 	struct vattr vattr;
    516 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    517 	void *win;
    518 	voff_t oldoff, origoff;
    519 	vsize_t bytelen;
    520 	int error = 0, iomode, must_commit;
    521 
    522 #ifdef DIAGNOSTIC
    523 	if (uio->uio_rw != UIO_WRITE)
    524 		panic("nfs_write mode");
    525 	if (uio->uio_segflg == UIO_USERSPACE &&
    526 	    uio->uio_procp != curproc->l_proc)
    527 		panic("nfs_write proc");
    528 #endif
    529 	if (vp->v_type != VREG)
    530 		return (EIO);
    531 	if (np->n_flag & NWRITEERR) {
    532 		np->n_flag &= ~NWRITEERR;
    533 		return (np->n_error);
    534 	}
    535 #ifndef NFS_V2_ONLY
    536 	if ((nmp->nm_flag & NFSMNT_NFSV3) &&
    537 	    !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
    538 		(void)nfs_fsinfo(nmp, vp, cred, p);
    539 #endif
    540 	if (ioflag & (IO_APPEND | IO_SYNC)) {
    541 		if (np->n_flag & NMODIFIED) {
    542 			np->n_attrstamp = 0;
    543 			error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    544 			if (error)
    545 				return (error);
    546 		}
    547 		if (ioflag & IO_APPEND) {
    548 			np->n_attrstamp = 0;
    549 			error = VOP_GETATTR(vp, &vattr, cred, p);
    550 			if (error)
    551 				return (error);
    552 			uio->uio_offset = np->n_size;
    553 		}
    554 	}
    555 	if (uio->uio_offset < 0)
    556 		return (EINVAL);
    557 	if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
    558 		return (EFBIG);
    559 	if (uio->uio_resid == 0)
    560 		return (0);
    561 	/*
    562 	 * Maybe this should be above the vnode op call, but so long as
    563 	 * file servers have no limits, i don't think it matters
    564 	 */
    565 	if (p && uio->uio_offset + uio->uio_resid >
    566 	      p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
    567 		psignal(p, SIGXFSZ);
    568 		return (EFBIG);
    569 	}
    570 
    571 	/*
    572 	 * update the cached write creds for this node.
    573 	 */
    574 
    575 	if (np->n_wcred) {
    576 		crfree(np->n_wcred);
    577 	}
    578 	np->n_wcred = cred;
    579 	crhold(cred);
    580 
    581 	if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
    582 		iomode = NFSV3WRITE_FILESYNC;
    583 		error = nfs_writerpc(vp, uio, &iomode, &must_commit);
    584 		if (must_commit)
    585 			nfs_clearcommit(vp->v_mount);
    586 		return (error);
    587 	}
    588 
    589 	origoff = uio->uio_offset;
    590 	do {
    591 		oldoff = uio->uio_offset;
    592 		bytelen = uio->uio_resid;
    593 
    594 #ifndef NFS_V2_ONLY
    595 		/*
    596 		 * Check for a valid write lease.
    597 		 */
    598 		if ((nmp->nm_flag & NFSMNT_NQNFS) &&
    599 		    NQNFS_CKINVALID(vp, np, ND_WRITE)) {
    600 			do {
    601 				error = nqnfs_getlease(vp, ND_WRITE, cred, p);
    602 			} while (error == NQNFS_EXPIRED);
    603 			if (error)
    604 				return (error);
    605 			if (np->n_lrev != np->n_brev ||
    606 			    (np->n_flag & NQNFSNONCACHE)) {
    607 				error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
    608 				if (error)
    609 					return (error);
    610 				np->n_brev = np->n_lrev;
    611 			}
    612 		}
    613 #endif
    614 		nfsstats.biocache_writes++;
    615 
    616 		np->n_flag |= NMODIFIED;
    617 		if (np->n_size < uio->uio_offset + bytelen) {
    618 			np->n_size = uio->uio_offset + bytelen;
    619 		}
    620 		if ((uio->uio_offset & PAGE_MASK) == 0 &&
    621 		    ((uio->uio_offset + bytelen) & PAGE_MASK) == 0) {
    622 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    623 			    UBC_WRITE | UBC_FAULTBUSY);
    624 		} else {
    625 			win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
    626 			    UBC_WRITE);
    627 		}
    628 		error = uiomove(win, bytelen, uio);
    629 		ubc_release(win, 0);
    630 		if (error) {
    631 			break;
    632 		}
    633 
    634 		/*
    635 		 * update UVM's notion of the size now that we've
    636 		 * copied the data into the vnode's pages.
    637 		 */
    638 
    639 		if (vp->v_size < uio->uio_offset) {
    640 			uvm_vnp_setsize(vp, uio->uio_offset);
    641 		}
    642 
    643 		if ((oldoff & ~(nmp->nm_wsize - 1)) !=
    644 		    (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
    645 			simple_lock(&vp->v_uobj.vmobjlock);
    646 			error = (vp->v_uobj.pgops->pgo_put)(&vp->v_uobj,
    647 			    trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
    648 			    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    649 				       ~(nmp->nm_wsize - 1)),
    650 			    PGO_CLEANIT|PGO_WEAK);
    651 		}
    652 	} while (uio->uio_resid > 0);
    653 	if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
    654 		simple_lock(&vp->v_uobj.vmobjlock);
    655 		error = (vp->v_uobj.pgops->pgo_put)(&vp->v_uobj,
    656 		    trunc_page(origoff & ~(nmp->nm_wsize - 1)),
    657 		    round_page((uio->uio_offset + nmp->nm_wsize - 1) &
    658 			       ~(nmp->nm_wsize - 1)),
    659 		    PGO_CLEANIT|PGO_SYNCIO);
    660 	}
    661 	return error;
    662 }
    663 
    664 /*
    665  * Get an nfs cache block.
    666  * Allocate a new one if the block isn't currently in the cache
    667  * and return the block marked busy. If the calling process is
    668  * interrupted by a signal for an interruptible mount point, return
    669  * NULL.
    670  */
    671 struct buf *
    672 nfs_getcacheblk(vp, bn, size, p)
    673 	struct vnode *vp;
    674 	daddr_t bn;
    675 	int size;
    676 	struct proc *p;
    677 {
    678 	struct buf *bp;
    679 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    680 
    681 	if (nmp->nm_flag & NFSMNT_INT) {
    682 		bp = getblk(vp, bn, size, PCATCH, 0);
    683 		while (bp == NULL) {
    684 			if (nfs_sigintr(nmp, NULL, p))
    685 				return (NULL);
    686 			bp = getblk(vp, bn, size, 0, 2 * hz);
    687 		}
    688 	} else
    689 		bp = getblk(vp, bn, size, 0, 0);
    690 	return (bp);
    691 }
    692 
    693 /*
    694  * Flush and invalidate all dirty buffers. If another process is already
    695  * doing the flush, just wait for completion.
    696  */
    697 int
    698 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
    699 	struct vnode *vp;
    700 	int flags;
    701 	struct ucred *cred;
    702 	struct proc *p;
    703 	int intrflg;
    704 {
    705 	struct nfsnode *np = VTONFS(vp);
    706 	struct nfsmount *nmp = VFSTONFS(vp->v_mount);
    707 	int error = 0, slpflag, slptimeo;
    708 
    709 	if ((nmp->nm_flag & NFSMNT_INT) == 0)
    710 		intrflg = 0;
    711 	if (intrflg) {
    712 		slpflag = PCATCH;
    713 		slptimeo = 2 * hz;
    714 	} else {
    715 		slpflag = 0;
    716 		slptimeo = 0;
    717 	}
    718 	/*
    719 	 * First wait for any other process doing a flush to complete.
    720 	 */
    721 	while (np->n_flag & NFLUSHINPROG) {
    722 		np->n_flag |= NFLUSHWANT;
    723 		error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
    724 			slptimeo);
    725 		if (error && intrflg && nfs_sigintr(nmp, NULL, p))
    726 			return (EINTR);
    727 	}
    728 
    729 	/*
    730 	 * Now, flush as required.
    731 	 */
    732 	np->n_flag |= NFLUSHINPROG;
    733 	error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
    734 	while (error) {
    735 		if (intrflg && nfs_sigintr(nmp, NULL, p)) {
    736 			np->n_flag &= ~NFLUSHINPROG;
    737 			if (np->n_flag & NFLUSHWANT) {
    738 				np->n_flag &= ~NFLUSHWANT;
    739 				wakeup((caddr_t)&np->n_flag);
    740 			}
    741 			return (EINTR);
    742 		}
    743 		error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
    744 	}
    745 	np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
    746 	if (np->n_flag & NFLUSHWANT) {
    747 		np->n_flag &= ~NFLUSHWANT;
    748 		wakeup((caddr_t)&np->n_flag);
    749 	}
    750 	return (0);
    751 }
    752 
    753 /*
    754  * Initiate asynchronous I/O. Return an error if no nfsiods are available.
    755  * This is mainly to avoid queueing async I/O requests when the nfsiods
    756  * are all hung on a dead server.
    757  */
    758 
    759 int
    760 nfs_asyncio(bp)
    761 	struct buf *bp;
    762 {
    763 	int i;
    764 	struct nfsmount *nmp;
    765 	int gotiod, slpflag = 0, slptimeo = 0, error;
    766 
    767 	if (nfs_numasync == 0)
    768 		return (EIO);
    769 
    770 
    771 	nmp = VFSTONFS(bp->b_vp->v_mount);
    772 again:
    773 	if (nmp->nm_flag & NFSMNT_INT)
    774 		slpflag = PCATCH;
    775 	gotiod = FALSE;
    776 
    777 	/*
    778 	 * Find a free iod to process this request.
    779 	 */
    780 
    781 	for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
    782 		if (nfs_iodwant[i]) {
    783 			/*
    784 			 * Found one, so wake it up and tell it which
    785 			 * mount to process.
    786 			 */
    787 			nfs_iodwant[i] = NULL;
    788 			nfs_iodmount[i] = nmp;
    789 			nmp->nm_bufqiods++;
    790 			wakeup((caddr_t)&nfs_iodwant[i]);
    791 			gotiod = TRUE;
    792 			break;
    793 		}
    794 	/*
    795 	 * If none are free, we may already have an iod working on this mount
    796 	 * point.  If so, it will process our request.
    797 	 */
    798 	if (!gotiod && nmp->nm_bufqiods > 0)
    799 		gotiod = TRUE;
    800 
    801 	/*
    802 	 * If we have an iod which can process the request, then queue
    803 	 * the buffer.
    804 	 */
    805 	if (gotiod) {
    806 		/*
    807 		 * Ensure that the queue never grows too large.
    808 		 */
    809 		while (nmp->nm_bufqlen >= 2*nfs_numasync) {
    810 			nmp->nm_bufqwant = TRUE;
    811 			error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
    812 				"nfsaio", slptimeo);
    813 			if (error) {
    814 				if (nfs_sigintr(nmp, NULL, bp->b_proc))
    815 					return (EINTR);
    816 				if (slpflag == PCATCH) {
    817 					slpflag = 0;
    818 					slptimeo = 2 * hz;
    819 				}
    820 			}
    821 			/*
    822 			 * We might have lost our iod while sleeping,
    823 			 * so check and loop if nescessary.
    824 			 */
    825 			if (nmp->nm_bufqiods == 0)
    826 				goto again;
    827 		}
    828 		TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
    829 		nmp->nm_bufqlen++;
    830 		return (0);
    831 	    }
    832 
    833 	/*
    834 	 * All the iods are busy on other mounts, so return EIO to
    835 	 * force the caller to process the i/o synchronously.
    836 	 */
    837 	return (EIO);
    838 }
    839 
    840 /*
    841  * Do an I/O operation to/from a cache block. This may be called
    842  * synchronously or from an nfsiod.
    843  */
    844 int
    845 nfs_doio(bp, p)
    846 	struct buf *bp;
    847 	struct proc *p;
    848 {
    849 	struct uio *uiop;
    850 	struct vnode *vp;
    851 	struct nfsnode *np;
    852 	struct nfsmount *nmp;
    853 	int error = 0, diff, len, iomode, must_commit = 0;
    854 	struct uio uio;
    855 	struct iovec io;
    856 
    857 	vp = bp->b_vp;
    858 	np = VTONFS(vp);
    859 	nmp = VFSTONFS(vp->v_mount);
    860 	uiop = &uio;
    861 	uiop->uio_iov = &io;
    862 	uiop->uio_iovcnt = 1;
    863 	uiop->uio_segflg = UIO_SYSSPACE;
    864 	uiop->uio_procp = p;
    865 
    866 	/*
    867 	 * Historically, paging was done with physio, but no more...
    868 	 */
    869 	if (bp->b_flags & B_PHYS) {
    870 	    /*
    871 	     * ...though reading /dev/drum still gets us here.
    872 	     */
    873 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
    874 	    /* mapping was done by vmapbuf() */
    875 	    io.iov_base = bp->b_data;
    876 	    uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
    877 	    if (bp->b_flags & B_READ) {
    878 		uiop->uio_rw = UIO_READ;
    879 		nfsstats.read_physios++;
    880 		error = nfs_readrpc(vp, uiop);
    881 	    } else {
    882 		iomode = NFSV3WRITE_DATASYNC;
    883 		uiop->uio_rw = UIO_WRITE;
    884 		nfsstats.write_physios++;
    885 		error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
    886 	    }
    887 	    if (error) {
    888 		bp->b_flags |= B_ERROR;
    889 		bp->b_error = error;
    890 	    }
    891 	} else if (bp->b_flags & B_READ) {
    892 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
    893 	    io.iov_base = bp->b_data;
    894 	    uiop->uio_rw = UIO_READ;
    895 	    switch (vp->v_type) {
    896 	    case VREG:
    897 		uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
    898 		nfsstats.read_bios++;
    899 		error = nfs_readrpc(vp, uiop);
    900 		if (!error && uiop->uio_resid) {
    901 
    902 			/*
    903 			 * If len > 0, there is a hole in the file and
    904 			 * no writes after the hole have been pushed to
    905 			 * the server yet.
    906 			 * Just zero fill the rest of the valid area.
    907 			 */
    908 
    909 			diff = bp->b_bcount - uiop->uio_resid;
    910 			len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
    911 				+ diff);
    912 			if (len > 0) {
    913 				len = MIN(len, uiop->uio_resid);
    914 				memset((char *)bp->b_data + diff, 0, len);
    915 			}
    916 		}
    917 		if (p && (vp->v_flag & VTEXT) &&
    918 			(((nmp->nm_flag & NFSMNT_NQNFS) &&
    919 			  NQNFS_CKINVALID(vp, np, ND_READ) &&
    920 			  np->n_lrev != np->n_brev) ||
    921 			 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
    922 			  np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
    923 			uprintf("Process killed due to "
    924 				"text file modification\n");
    925 			psignal(p, SIGKILL);
    926 #if 0 /* XXX NJWLWP */
    927 			p->p_holdcnt++;
    928 #endif
    929 		}
    930 		break;
    931 	    case VLNK:
    932 		uiop->uio_offset = (off_t)0;
    933 		nfsstats.readlink_bios++;
    934 		error = nfs_readlinkrpc(vp, uiop, curproc->l_proc->p_ucred);
    935 		break;
    936 	    case VDIR:
    937 		nfsstats.readdir_bios++;
    938 		uiop->uio_offset = bp->b_dcookie;
    939 		if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
    940 			error = nfs_readdirplusrpc(vp, uiop, curproc->l_proc->p_ucred);
    941 			if (error == NFSERR_NOTSUPP)
    942 				nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
    943 		}
    944 		if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
    945 			error = nfs_readdirrpc(vp, uiop, curproc->l_proc->p_ucred);
    946 		if (!error) {
    947 			bp->b_dcookie = uiop->uio_offset;
    948 		}
    949 		break;
    950 	    default:
    951 		printf("nfs_doio:  type %x unexpected\n",vp->v_type);
    952 		break;
    953 	    }
    954 	    if (error) {
    955 		bp->b_flags |= B_ERROR;
    956 		bp->b_error = error;
    957 	    }
    958 	} else {
    959 	    /*
    960 	     * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
    961 	     * an actual write will have to be scheduled.
    962 	     */
    963 
    964 	    io.iov_base = bp->b_data;
    965 	    io.iov_len = uiop->uio_resid = bp->b_bcount;
    966 	    uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
    967 	    uiop->uio_rw = UIO_WRITE;
    968 	    nfsstats.write_bios++;
    969 	    iomode = NFSV3WRITE_UNSTABLE;
    970 	    error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
    971 	}
    972 	bp->b_resid = uiop->uio_resid;
    973 	if (must_commit)
    974 		nfs_clearcommit(vp->v_mount);
    975 	biodone(bp);
    976 	return (error);
    977 }
    978 
    979 /*
    980  * Vnode op for VM getpages.
    981  */
    982 
    983 int
    984 nfs_getpages(v)
    985 	void *v;
    986 {
    987 	struct vop_getpages_args /* {
    988 		struct vnode *a_vp;
    989 		voff_t a_offset;
    990 		struct vm_page **a_m;
    991 		int *a_count;
    992 		int a_centeridx;
    993 		vm_prot_t a_access_type;
    994 		int a_advice;
    995 		int a_flags;
    996 	} */ *ap = v;
    997 
    998 	struct vnode *vp = ap->a_vp;
    999 	struct uvm_object *uobj = &vp->v_uobj;
   1000 	struct nfsnode *np = VTONFS(vp);
   1001 	struct vm_page *pg, **pgs;
   1002 	off_t origoffset;
   1003 	int i, error, npages;
   1004 	boolean_t v3 = NFS_ISV3(vp);
   1005 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1006 	UVMHIST_FUNC("nfs_getpages"); UVMHIST_CALLED(ubchist);
   1007 
   1008 	/*
   1009 	 * update the cached read creds for this node.
   1010 	 */
   1011 
   1012 	if (np->n_rcred) {
   1013 		crfree(np->n_rcred);
   1014 	}
   1015 	np->n_rcred = curproc->l_proc->p_ucred;
   1016 	crhold(np->n_rcred);
   1017 
   1018 	/*
   1019 	 * call the genfs code to get the pages.
   1020 	 */
   1021 
   1022 	npages = *ap->a_count;
   1023 	error = genfs_getpages(v);
   1024 	if (error || !write || !v3) {
   1025 		return error;
   1026 	}
   1027 
   1028 	/*
   1029 	 * this is a write fault, update the commit info.
   1030 	 */
   1031 
   1032 	origoffset = ap->a_offset;
   1033 	pgs = ap->a_m;
   1034 
   1035 	lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1036 	nfs_del_committed_range(vp, origoffset, npages);
   1037 	nfs_del_tobecommitted_range(vp, origoffset, npages);
   1038 	simple_lock(&uobj->vmobjlock);
   1039 	for (i = 0; i < npages; i++) {
   1040 		pg = pgs[i];
   1041 		if (pg == NULL || pg == PGO_DONTCARE) {
   1042 			continue;
   1043 		}
   1044 		pg->flags &= ~(PG_NEEDCOMMIT|PG_RDONLY);
   1045 	}
   1046 	simple_unlock(&uobj->vmobjlock);
   1047 	lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1048 	return 0;
   1049 }
   1050 
   1051 int
   1052 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1053 {
   1054 	struct uvm_object *uobj = &vp->v_uobj;
   1055 	struct nfsnode *np = VTONFS(vp);
   1056 	off_t origoffset, commitoff;
   1057 	uint32_t commitbytes;
   1058 	int error, i;
   1059 	int bytes;
   1060 	boolean_t v3 = NFS_ISV3(vp);
   1061 	boolean_t weak = flags & PGO_WEAK;
   1062 	UVMHIST_FUNC("nfs_gop_write"); UVMHIST_CALLED(ubchist);
   1063 
   1064 	/* XXX for now, skip the v3 stuff. */
   1065 	v3 = FALSE;
   1066 
   1067 	/*
   1068 	 * for NFSv2, just write normally.
   1069 	 */
   1070 
   1071 	if (!v3) {
   1072 		return genfs_gop_write(vp, pgs, npages, flags);
   1073 	}
   1074 
   1075 	/*
   1076 	 * for NFSv3, use delayed writes and the "commit" operation
   1077 	 * to avoid sync writes.
   1078 	 */
   1079 
   1080 	origoffset = pgs[0]->offset;
   1081 	bytes = npages << PAGE_SHIFT;
   1082 	lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1083 	if (nfs_in_committed_range(vp, origoffset, bytes)) {
   1084 		goto committed;
   1085 	}
   1086 	if (nfs_in_tobecommitted_range(vp, origoffset, bytes)) {
   1087 		if (weak) {
   1088 			lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1089 			return 0;
   1090 		} else {
   1091 			commitoff = np->n_pushlo;
   1092 			commitbytes = (uint32_t)(np->n_pushhi - np->n_pushlo);
   1093 			goto commit;
   1094 		}
   1095 	} else {
   1096 		commitoff = origoffset;
   1097 		commitbytes = npages << PAGE_SHIFT;
   1098 	}
   1099 	simple_lock(&uobj->vmobjlock);
   1100 	for (i = 0; i < npages; i++) {
   1101 		pgs[i]->flags |= PG_NEEDCOMMIT|PG_RDONLY;
   1102 		pgs[i]->flags &= ~PG_CLEAN;
   1103 	}
   1104 	simple_unlock(&uobj->vmobjlock);
   1105 	lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1106 	error = genfs_gop_write(vp, pgs, npages, flags);
   1107 	if (error) {
   1108 		return error;
   1109 	}
   1110 	lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
   1111 	if (weak) {
   1112 		nfs_add_tobecommitted_range(vp, origoffset,
   1113 		    npages << PAGE_SHIFT);
   1114 	} else {
   1115 commit:
   1116 		error = nfs_commit(vp, commitoff, commitbytes, curproc->l_proc);
   1117 		nfs_del_tobecommitted_range(vp, commitoff, commitbytes);
   1118 committed:
   1119 		simple_lock(&uobj->vmobjlock);
   1120 		for (i = 0; i < npages; i++) {
   1121 			pgs[i]->flags &= ~(PG_NEEDCOMMIT|PG_RDONLY);
   1122 		}
   1123 		simple_unlock(&uobj->vmobjlock);
   1124 	}
   1125 	lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
   1126 	return error;
   1127 }
   1128