nfs_bio.c revision 1.42 1 /* $NetBSD: nfs_bio.c,v 1.42 1998/02/10 14:10:08 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
39 */
40
41 #include "opt_uvm.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/proc.h>
48 #include <sys/buf.h>
49 #include <sys/vnode.h>
50 #include <sys/trace.h>
51 #include <sys/mount.h>
52 #include <sys/kernel.h>
53 #include <sys/namei.h>
54 #include <sys/dirent.h>
55
56 #include <vm/vm.h>
57
58 #if defined(UVM)
59 #include <uvm/uvm_extern.h>
60 #endif
61
62 #include <nfs/rpcv2.h>
63 #include <nfs/nfsproto.h>
64 #include <nfs/nfs.h>
65 #include <nfs/nfsmount.h>
66 #include <nfs/nqnfs.h>
67 #include <nfs/nfsnode.h>
68 #include <nfs/nfs_var.h>
69
70 extern int nfs_numasync;
71 extern struct nfsstats nfsstats;
72
73 /*
74 * Vnode op for read using bio
75 * Any similarity to readip() is purely coincidental
76 */
77 int
78 nfs_bioread(vp, uio, ioflag, cred, cflag)
79 register struct vnode *vp;
80 register struct uio *uio;
81 int ioflag, cflag;
82 struct ucred *cred;
83 {
84 register struct nfsnode *np = VTONFS(vp);
85 register int biosize, diff;
86 struct buf *bp = NULL, *rabp;
87 struct vattr vattr;
88 struct proc *p;
89 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
90 struct nfsdircache *ndp = NULL, *nndp = NULL;
91 daddr_t lbn, bn, rabn;
92 caddr_t baddr, ep, edp;
93 int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin, en, enn;
94 int enough = 0;
95 struct dirent *dp, *pdp;
96 off_t curoff = 0;
97
98 #ifdef DIAGNOSTIC
99 if (uio->uio_rw != UIO_READ)
100 panic("nfs_read mode");
101 #endif
102 if (uio->uio_resid == 0)
103 return (0);
104 if (vp->v_type != VDIR && uio->uio_offset < 0)
105 return (EINVAL);
106 p = uio->uio_procp;
107 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
108 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
109 (void)nfs_fsinfo(nmp, vp, cred, p);
110 if (vp->v_type != VDIR &&
111 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
112 return (EFBIG);
113 biosize = nmp->nm_rsize;
114 /*
115 * For nfs, cache consistency can only be maintained approximately.
116 * Although RFC1094 does not specify the criteria, the following is
117 * believed to be compatible with the reference port.
118 * For nqnfs, full cache consistency is maintained within the loop.
119 * For nfs:
120 * If the file's modify time on the server has changed since the
121 * last read rpc or you have written to the file,
122 * you may have lost data cache consistency with the
123 * server, so flush all of the file's data out of the cache.
124 * Then force a getattr rpc to ensure that you have up to date
125 * attributes.
126 * NB: This implies that cache data can be read when up to
127 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
128 * attributes this could be forced by setting n_attrstamp to 0 before
129 * the VOP_GETATTR() call.
130 */
131 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
132 if (np->n_flag & NMODIFIED) {
133 if (vp->v_type != VREG) {
134 if (vp->v_type != VDIR)
135 panic("nfs: bioread, not dir");
136 nfs_invaldircache(vp, 0);
137 np->n_direofoffset = 0;
138 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
139 if (error)
140 return (error);
141 }
142 np->n_attrstamp = 0;
143 error = VOP_GETATTR(vp, &vattr, cred, p);
144 if (error)
145 return (error);
146 np->n_mtime = vattr.va_mtime.tv_sec;
147 } else {
148 error = VOP_GETATTR(vp, &vattr, cred, p);
149 if (error)
150 return (error);
151 if (np->n_mtime != vattr.va_mtime.tv_sec) {
152 if (vp->v_type == VDIR) {
153 nfs_invaldircache(vp, 0);
154 np->n_direofoffset = 0;
155 }
156 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
157 if (error)
158 return (error);
159 np->n_mtime = vattr.va_mtime.tv_sec;
160 }
161 }
162 }
163 do {
164
165 /*
166 * Get a valid lease. If cached data is stale, flush it.
167 */
168 if (nmp->nm_flag & NFSMNT_NQNFS) {
169 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
170 do {
171 error = nqnfs_getlease(vp, ND_READ, cred, p);
172 } while (error == NQNFS_EXPIRED);
173 if (error)
174 return (error);
175 if (np->n_lrev != np->n_brev ||
176 (np->n_flag & NQNFSNONCACHE) ||
177 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
178 if (vp->v_type == VDIR) {
179 nfs_invaldircache(vp, 0);
180 np->n_direofoffset = 0;
181 }
182 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
183 if (error)
184 return (error);
185 np->n_brev = np->n_lrev;
186 }
187 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
188 nfs_invaldircache(vp, 0);
189 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
190 np->n_direofoffset = 0;
191 if (error)
192 return (error);
193 }
194 }
195 /*
196 * Don't cache symlinks.
197 */
198 if (np->n_flag & NQNFSNONCACHE
199 || ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
200 switch (vp->v_type) {
201 case VREG:
202 return (nfs_readrpc(vp, uio, cred));
203 case VLNK:
204 return (nfs_readlinkrpc(vp, uio, cred));
205 case VDIR:
206 break;
207 default:
208 printf(" NQNFSNONCACHE: type %x unexpected\n",
209 vp->v_type);
210 };
211 }
212 baddr = (caddr_t)0;
213 switch (vp->v_type) {
214 case VREG:
215 nfsstats.biocache_reads++;
216 lbn = uio->uio_offset / biosize;
217 on = uio->uio_offset & (biosize - 1);
218 bn = lbn * (biosize / DEV_BSIZE);
219 not_readin = 1;
220
221 /*
222 * Start the read ahead(s), as required.
223 */
224 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
225 lbn - 1 == vp->v_lastr) {
226 for (nra = 0; nra < nmp->nm_readahead &&
227 (lbn + 1 + nra) * biosize < np->n_size; nra++) {
228 rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
229 if (!incore(vp, rabn)) {
230 rabp = nfs_getcacheblk(vp, rabn, biosize, p);
231 if (!rabp)
232 return (EINTR);
233 if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
234 rabp->b_flags |= (B_READ | B_ASYNC);
235 if (nfs_asyncio(rabp, cred)) {
236 rabp->b_flags |= B_INVAL;
237 brelse(rabp);
238 }
239 } else
240 brelse(rabp);
241 }
242 }
243 }
244
245 /*
246 * If the block is in the cache and has the required data
247 * in a valid region, just copy it out.
248 * Otherwise, get the block and write back/read in,
249 * as required.
250 */
251 if ((bp = incore(vp, bn)) &&
252 (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
253 (B_BUSY | B_WRITEINPROG))
254 got_buf = 0;
255 else {
256 again:
257 bp = nfs_getcacheblk(vp, bn, biosize, p);
258 if (!bp)
259 return (EINTR);
260 got_buf = 1;
261 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
262 bp->b_flags |= B_READ;
263 not_readin = 0;
264 error = nfs_doio(bp, cred, p);
265 if (error) {
266 brelse(bp);
267 return (error);
268 }
269 }
270 }
271 n = min((unsigned)(biosize - on), uio->uio_resid);
272 diff = np->n_size - uio->uio_offset;
273 if (diff < n)
274 n = diff;
275 if (not_readin && n > 0) {
276 if (on < bp->b_validoff || (on + n) > bp->b_validend) {
277 if (!got_buf) {
278 bp = nfs_getcacheblk(vp, bn, biosize, p);
279 if (!bp)
280 return (EINTR);
281 got_buf = 1;
282 }
283 bp->b_flags |= B_INVAFTERWRITE;
284 if (bp->b_dirtyend > 0) {
285 if ((bp->b_flags & B_DELWRI) == 0)
286 panic("nfsbioread");
287 if (VOP_BWRITE(bp) == EINTR)
288 return (EINTR);
289 } else
290 brelse(bp);
291 goto again;
292 }
293 }
294 vp->v_lastr = lbn;
295 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
296 if (diff < n)
297 n = diff;
298 break;
299 case VLNK:
300 nfsstats.biocache_readlinks++;
301 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
302 if (!bp)
303 return (EINTR);
304 if ((bp->b_flags & B_DONE) == 0) {
305 bp->b_flags |= B_READ;
306 error = nfs_doio(bp, cred, p);
307 if (error) {
308 brelse(bp);
309 return (error);
310 }
311 }
312 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
313 got_buf = 1;
314 on = 0;
315 break;
316 case VDIR:
317 diragain:
318 nfsstats.biocache_readdirs++;
319 ndp = nfs_searchdircache(vp, uio->uio_offset,
320 (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
321 if (!ndp) {
322 /*
323 * We've been handed a cookie that is not
324 * in the cache. If we're not translating
325 * 32 <-> 64, it may be a value that was
326 * flushed out of the cache because it grew
327 * too big. Let the server judge if it's
328 * valid or not. In the translation case,
329 * we have no way of validating this value,
330 * so punt.
331 */
332 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
333 return (EINVAL);
334 ndp = nfs_enterdircache(vp, uio->uio_offset,
335 uio->uio_offset, 0, 0);
336 }
337
338 if (uio->uio_offset != 0 &&
339 ndp->dc_cookie == np->n_direofoffset) {
340 nfsstats.direofcache_hits++;
341 return (0);
342 }
343
344 bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
345 if (!bp)
346 return (EINTR);
347 if ((bp->b_flags & B_DONE) == 0) {
348 bp->b_flags |= B_READ;
349 bp->b_dcookie = ndp->dc_blkcookie;
350 error = nfs_doio(bp, cred, p);
351 if (error) {
352 /*
353 * Yuck! The directory has been modified on the
354 * server. Punt and let the userland code
355 * deal with it.
356 */
357 brelse(bp);
358 if (error == NFSERR_BAD_COOKIE) {
359 nfs_invaldircache(vp, 0);
360 nfs_vinvalbuf(vp, 0, cred, p, 1);
361 error = EINVAL;
362 }
363 return (error);
364 }
365 }
366
367 /*
368 * Just return if we hit EOF right away with this
369 * block. Always check here, because direofoffset
370 * may have been set by an nfsiod since the last
371 * check.
372 */
373 if (np->n_direofoffset != 0 &&
374 ndp->dc_blkcookie == np->n_direofoffset) {
375 brelse(bp);
376 return (0);
377 }
378
379 /*
380 * Find the entry we were looking for in the block.
381 */
382
383 en = ndp->dc_entry;
384
385 pdp = dp = (struct dirent *)bp->b_data;
386 edp = bp->b_data + bp->b_validend;
387 enn = 0;
388 while (enn < en && (caddr_t)dp < edp) {
389 pdp = dp;
390 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
391 enn++;
392 }
393
394 /*
395 * If the entry number was bigger than the number of
396 * entries in the block, or the cookie of the previous
397 * entry doesn't match, the directory cache is
398 * stale. Flush it and try again (i.e. go to
399 * the server).
400 */
401 if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
402 (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
403 #ifdef DEBUG
404 printf("invalid cache: %p %p %p off %lx %lx\n",
405 pdp, dp, edp,
406 (unsigned long)uio->uio_offset,
407 (unsigned long)NFS_GETCOOKIE(pdp));
408 #endif
409 brelse(bp);
410 nfs_invaldircache(vp, 0);
411 nfs_vinvalbuf(vp, 0, cred, p, 0);
412 goto diragain;
413 }
414
415 on = (caddr_t)dp - bp->b_data;
416
417 /*
418 * Cache all entries that may be exported to the
419 * user, as they may be thrown back at us. The
420 * NFSBIO_CACHECOOKIES flag indicates that all
421 * entries are being 'exported', so cache them all.
422 */
423
424 if (en == 0 && pdp == dp) {
425 dp = (struct dirent *)
426 ((caddr_t)dp + dp->d_reclen);
427 enn++;
428 }
429
430 if (uio->uio_resid < (bp->b_validend - on)) {
431 n = uio->uio_resid;
432 enough = 1;
433 } else
434 n = bp->b_validend - on;
435
436 ep = bp->b_data + on + n;
437
438 /*
439 * Find last complete entry to copy, caching entries
440 * (if requested) as we go.
441 */
442
443 while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
444 if (cflag & NFSBIO_CACHECOOKIES) {
445 nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
446 ndp->dc_blkcookie, enn, bp->b_lblkno);
447 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
448 NFS_STASHCOOKIE32(pdp,
449 nndp->dc_cookie32);
450 }
451 }
452 pdp = dp;
453 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
454 enn++;
455 }
456
457 /*
458 * If the last requested entry was not the last in the
459 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
460 * cache the cookie of the last requested one, and
461 * set of the offset to it.
462 */
463
464 if ((on + n) < bp->b_validend) {
465 curoff = NFS_GETCOOKIE(pdp);
466 nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
467 enn, bp->b_lblkno);
468 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
469 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
470 curoff = nndp->dc_cookie32;
471 }
472 } else
473 curoff = bp->b_dcookie;
474
475 /*
476 * Always cache the entry for the next block,
477 * so that readaheads can use it.
478 */
479 nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
480 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
481 if (curoff == bp->b_dcookie) {
482 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
483 curoff = nndp->dc_cookie32;
484 }
485 }
486
487 n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
488
489 /*
490 * If not eof and read aheads are enabled, start one.
491 * (You need the current block first, so that you have the
492 * directory offset cookie of the next block.)
493 */
494 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
495 np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
496 rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
497 NFS_DIRBLKSIZ, p);
498 if (rabp) {
499 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
500 rabp->b_dcookie = nndp->dc_cookie;
501 rabp->b_flags |= (B_READ | B_ASYNC);
502 if (nfs_asyncio(rabp, cred)) {
503 rabp->b_flags |= B_INVAL;
504 brelse(rabp);
505 }
506 } else
507 brelse(rabp);
508 }
509 }
510 got_buf = 1;
511 break;
512 default:
513 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
514 break;
515 };
516
517 if (n > 0) {
518 if (!baddr)
519 baddr = bp->b_data;
520 error = uiomove(baddr + on, (int)n, uio);
521 }
522 switch (vp->v_type) {
523 case VREG:
524 break;
525 case VLNK:
526 n = 0;
527 break;
528 case VDIR:
529 if (np->n_flag & NQNFSNONCACHE)
530 bp->b_flags |= B_INVAL;
531 uio->uio_offset = curoff;
532 if (enough)
533 n = 0;
534 break;
535 default:
536 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
537 }
538 if (got_buf)
539 brelse(bp);
540 } while (error == 0 && uio->uio_resid > 0 && n > 0);
541 return (error);
542 }
543
544 /*
545 * Vnode op for write using bio
546 */
547 int
548 nfs_write(v)
549 void *v;
550 {
551 struct vop_write_args /* {
552 struct vnode *a_vp;
553 struct uio *a_uio;
554 int a_ioflag;
555 struct ucred *a_cred;
556 } */ *ap = v;
557 register int biosize;
558 register struct uio *uio = ap->a_uio;
559 struct proc *p = uio->uio_procp;
560 register struct vnode *vp = ap->a_vp;
561 struct nfsnode *np = VTONFS(vp);
562 register struct ucred *cred = ap->a_cred;
563 int ioflag = ap->a_ioflag;
564 struct buf *bp;
565 struct vattr vattr;
566 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
567 daddr_t lbn, bn;
568 int n, on, error = 0, iomode, must_commit;
569
570 #ifdef DIAGNOSTIC
571 if (uio->uio_rw != UIO_WRITE)
572 panic("nfs_write mode");
573 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
574 panic("nfs_write proc");
575 #endif
576 if (vp->v_type != VREG)
577 return (EIO);
578 if (np->n_flag & NWRITEERR) {
579 np->n_flag &= ~NWRITEERR;
580 return (np->n_error);
581 }
582 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
583 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
584 (void)nfs_fsinfo(nmp, vp, cred, p);
585 if (ioflag & (IO_APPEND | IO_SYNC)) {
586 if (np->n_flag & NMODIFIED) {
587 np->n_attrstamp = 0;
588 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
589 if (error)
590 return (error);
591 }
592 if (ioflag & IO_APPEND) {
593 np->n_attrstamp = 0;
594 error = VOP_GETATTR(vp, &vattr, cred, p);
595 if (error)
596 return (error);
597 uio->uio_offset = np->n_size;
598 }
599 }
600 if (uio->uio_offset < 0)
601 return (EINVAL);
602 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
603 return (EFBIG);
604 if (uio->uio_resid == 0)
605 return (0);
606 /*
607 * Maybe this should be above the vnode op call, but so long as
608 * file servers have no limits, i don't think it matters
609 */
610 if (p && uio->uio_offset + uio->uio_resid >
611 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
612 psignal(p, SIGXFSZ);
613 return (EFBIG);
614 }
615 /*
616 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
617 * will be the same size within a filesystem. nfs_writerpc will
618 * still use nm_wsize when sizing the rpc's.
619 */
620 biosize = nmp->nm_rsize;
621 do {
622
623 /*
624 * XXX make sure we aren't cached in the VM page cache
625 */
626 #if defined(UVM)
627 (void)uvm_vnp_uncache(vp);
628 #else
629 (void)vnode_pager_uncache(vp);
630 #endif
631
632 /*
633 * Check for a valid write lease.
634 */
635 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
636 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
637 do {
638 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
639 } while (error == NQNFS_EXPIRED);
640 if (error)
641 return (error);
642 if (np->n_lrev != np->n_brev ||
643 (np->n_flag & NQNFSNONCACHE)) {
644 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
645 if (error)
646 return (error);
647 np->n_brev = np->n_lrev;
648 }
649 }
650 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
651 iomode = NFSV3WRITE_FILESYNC;
652 error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit);
653 if (must_commit)
654 nfs_clearcommit(vp->v_mount);
655 return (error);
656 }
657 nfsstats.biocache_writes++;
658 lbn = uio->uio_offset / biosize;
659 on = uio->uio_offset & (biosize-1);
660 n = min((unsigned)(biosize - on), uio->uio_resid);
661 bn = lbn * (biosize / DEV_BSIZE);
662 again:
663 bp = nfs_getcacheblk(vp, bn, biosize, p);
664 if (!bp)
665 return (EINTR);
666 if (bp->b_wcred == NOCRED) {
667 crhold(cred);
668 bp->b_wcred = cred;
669 }
670 np->n_flag |= NMODIFIED;
671 if (uio->uio_offset + n > np->n_size) {
672 np->n_size = uio->uio_offset + n;
673 #if defined(UVM)
674 uvm_vnp_setsize(vp, np->n_size);
675 #else
676 vnode_pager_setsize(vp, np->n_size);
677 #endif
678 }
679
680 /*
681 * If the new write will leave a contiguous dirty
682 * area, just update the b_dirtyoff and b_dirtyend,
683 * otherwise force a write rpc of the old dirty area.
684 */
685 if (bp->b_dirtyend > 0 &&
686 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
687 bp->b_proc = p;
688 if (VOP_BWRITE(bp) == EINTR)
689 return (EINTR);
690 goto again;
691 }
692
693 /*
694 * Check for valid write lease and get one as required.
695 * In case getblk() and/or bwrite() delayed us.
696 */
697 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
698 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
699 do {
700 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
701 } while (error == NQNFS_EXPIRED);
702 if (error) {
703 brelse(bp);
704 return (error);
705 }
706 if (np->n_lrev != np->n_brev ||
707 (np->n_flag & NQNFSNONCACHE)) {
708 brelse(bp);
709 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
710 if (error)
711 return (error);
712 np->n_brev = np->n_lrev;
713 goto again;
714 }
715 }
716 error = uiomove((char *)bp->b_data + on, n, uio);
717 if (error) {
718 bp->b_flags |= B_ERROR;
719 brelse(bp);
720 return (error);
721 }
722 if (bp->b_dirtyend > 0) {
723 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
724 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
725 } else {
726 bp->b_dirtyoff = on;
727 bp->b_dirtyend = on + n;
728 }
729 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
730 bp->b_validoff > bp->b_dirtyend) {
731 bp->b_validoff = bp->b_dirtyoff;
732 bp->b_validend = bp->b_dirtyend;
733 } else {
734 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
735 bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
736 }
737
738 /*
739 * Since this block is being modified, it must be written
740 * again and not just committed.
741 */
742 bp->b_flags &= ~B_NEEDCOMMIT;
743
744 /*
745 * If the lease is non-cachable or IO_SYNC do bwrite().
746 */
747 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
748 bp->b_proc = p;
749 error = VOP_BWRITE(bp);
750 if (error)
751 return (error);
752 if (np->n_flag & NQNFSNONCACHE) {
753 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
754 if (error)
755 return (error);
756 }
757 } else if ((n + on) == biosize &&
758 (nmp->nm_flag & NFSMNT_NQNFS) == 0) {
759 bp->b_proc = (struct proc *)0;
760 bp->b_flags |= B_ASYNC;
761 (void)nfs_writebp(bp, 0);
762 } else {
763 bdwrite(bp);
764 }
765 } while (uio->uio_resid > 0 && n > 0);
766 return (0);
767 }
768
769 /*
770 * Get an nfs cache block.
771 * Allocate a new one if the block isn't currently in the cache
772 * and return the block marked busy. If the calling process is
773 * interrupted by a signal for an interruptible mount point, return
774 * NULL.
775 */
776 struct buf *
777 nfs_getcacheblk(vp, bn, size, p)
778 struct vnode *vp;
779 daddr_t bn;
780 int size;
781 struct proc *p;
782 {
783 register struct buf *bp;
784 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
785
786 if (nmp->nm_flag & NFSMNT_INT) {
787 bp = getblk(vp, bn, size, PCATCH, 0);
788 while (bp == (struct buf *)0) {
789 if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
790 return ((struct buf *)0);
791 bp = getblk(vp, bn, size, 0, 2 * hz);
792 }
793 } else
794 bp = getblk(vp, bn, size, 0, 0);
795 return (bp);
796 }
797
798 /*
799 * Flush and invalidate all dirty buffers. If another process is already
800 * doing the flush, just wait for completion.
801 */
802 int
803 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
804 struct vnode *vp;
805 int flags;
806 struct ucred *cred;
807 struct proc *p;
808 int intrflg;
809 {
810 register struct nfsnode *np = VTONFS(vp);
811 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
812 int error = 0, slpflag, slptimeo;
813
814 if ((nmp->nm_flag & NFSMNT_INT) == 0)
815 intrflg = 0;
816 if (intrflg) {
817 slpflag = PCATCH;
818 slptimeo = 2 * hz;
819 } else {
820 slpflag = 0;
821 slptimeo = 0;
822 }
823 /*
824 * First wait for any other process doing a flush to complete.
825 */
826 while (np->n_flag & NFLUSHINPROG) {
827 np->n_flag |= NFLUSHWANT;
828 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
829 slptimeo);
830 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
831 return (EINTR);
832 }
833
834 /*
835 * Now, flush as required.
836 */
837 np->n_flag |= NFLUSHINPROG;
838 error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
839 while (error) {
840 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
841 np->n_flag &= ~NFLUSHINPROG;
842 if (np->n_flag & NFLUSHWANT) {
843 np->n_flag &= ~NFLUSHWANT;
844 wakeup((caddr_t)&np->n_flag);
845 }
846 return (EINTR);
847 }
848 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
849 }
850 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
851 if (np->n_flag & NFLUSHWANT) {
852 np->n_flag &= ~NFLUSHWANT;
853 wakeup((caddr_t)&np->n_flag);
854 }
855 return (0);
856 }
857
858 /*
859 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
860 * This is mainly to avoid queueing async I/O requests when the nfsiods
861 * are all hung on a dead server.
862 */
863 int
864 nfs_asyncio(bp, cred)
865 register struct buf *bp;
866 struct ucred *cred;
867 {
868 register int i;
869 register struct nfsmount *nmp;
870 int gotiod, slpflag = 0, slptimeo = 0, error;
871
872 if (nfs_numasync == 0)
873 return (EIO);
874
875
876 nmp = VFSTONFS(bp->b_vp->v_mount);
877 again:
878 if (nmp->nm_flag & NFSMNT_INT)
879 slpflag = PCATCH;
880 gotiod = FALSE;
881
882 /*
883 * Find a free iod to process this request.
884 */
885
886 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
887 if (nfs_iodwant[i]) {
888 /*
889 * Found one, so wake it up and tell it which
890 * mount to process.
891 */
892 nfs_iodwant[i] = (struct proc *)0;
893 nfs_iodmount[i] = nmp;
894 nmp->nm_bufqiods++;
895 wakeup((caddr_t)&nfs_iodwant[i]);
896 gotiod = TRUE;
897 break;
898 }
899 /*
900 * If none are free, we may already have an iod working on this mount
901 * point. If so, it will process our request.
902 */
903 if (!gotiod && nmp->nm_bufqiods > 0)
904 gotiod = TRUE;
905
906 /*
907 * If we have an iod which can process the request, then queue
908 * the buffer.
909 */
910 if (gotiod) {
911 /*
912 * Ensure that the queue never grows too large.
913 */
914 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
915 nmp->nm_bufqwant = TRUE;
916 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
917 "nfsaio", slptimeo);
918 if (error) {
919 if (nfs_sigintr(nmp, NULL, bp->b_proc))
920 return (EINTR);
921 if (slpflag == PCATCH) {
922 slpflag = 0;
923 slptimeo = 2 * hz;
924 }
925 }
926 /*
927 * We might have lost our iod while sleeping,
928 * so check and loop if nescessary.
929 */
930 if (nmp->nm_bufqiods == 0)
931 goto again;
932 }
933
934 if (bp->b_flags & B_READ) {
935 if (bp->b_rcred == NOCRED && cred != NOCRED) {
936 crhold(cred);
937 bp->b_rcred = cred;
938 }
939 } else {
940 bp->b_flags |= B_WRITEINPROG;
941 if (bp->b_wcred == NOCRED && cred != NOCRED) {
942 crhold(cred);
943 bp->b_wcred = cred;
944 }
945 }
946
947 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
948 nmp->nm_bufqlen++;
949 return (0);
950 }
951
952 /*
953 * All the iods are busy on other mounts, so return EIO to
954 * force the caller to process the i/o synchronously.
955 */
956 return (EIO);
957 }
958
959 /*
960 * Do an I/O operation to/from a cache block. This may be called
961 * synchronously or from an nfsiod.
962 */
963 int
964 nfs_doio(bp, cr, p)
965 register struct buf *bp;
966 struct ucred *cr;
967 struct proc *p;
968 {
969 register struct uio *uiop;
970 register struct vnode *vp;
971 struct nfsnode *np;
972 struct nfsmount *nmp;
973 int error = 0, diff, len, iomode, must_commit = 0;
974 struct uio uio;
975 struct iovec io;
976
977 vp = bp->b_vp;
978 np = VTONFS(vp);
979 nmp = VFSTONFS(vp->v_mount);
980 uiop = &uio;
981 uiop->uio_iov = &io;
982 uiop->uio_iovcnt = 1;
983 uiop->uio_segflg = UIO_SYSSPACE;
984 uiop->uio_procp = p;
985
986 /*
987 * Historically, paging was done with physio, but no more...
988 */
989 if (bp->b_flags & B_PHYS) {
990 /*
991 * ...though reading /dev/drum still gets us here.
992 */
993 io.iov_len = uiop->uio_resid = bp->b_bcount;
994 /* mapping was done by vmapbuf() */
995 io.iov_base = bp->b_data;
996 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
997 if (bp->b_flags & B_READ) {
998 uiop->uio_rw = UIO_READ;
999 nfsstats.read_physios++;
1000 error = nfs_readrpc(vp, uiop, cr);
1001 } else {
1002 iomode = NFSV3WRITE_DATASYNC;
1003 uiop->uio_rw = UIO_WRITE;
1004 nfsstats.write_physios++;
1005 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
1006 }
1007 if (error) {
1008 bp->b_flags |= B_ERROR;
1009 bp->b_error = error;
1010 }
1011 } else if (bp->b_flags & B_READ) {
1012 io.iov_len = uiop->uio_resid = bp->b_bcount;
1013 io.iov_base = bp->b_data;
1014 uiop->uio_rw = UIO_READ;
1015 switch (vp->v_type) {
1016 case VREG:
1017 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1018 nfsstats.read_bios++;
1019 error = nfs_readrpc(vp, uiop, cr);
1020 if (!error) {
1021 bp->b_validoff = 0;
1022 if (uiop->uio_resid) {
1023 /*
1024 * If len > 0, there is a hole in the file and
1025 * no writes after the hole have been pushed to
1026 * the server yet.
1027 * Just zero fill the rest of the valid area.
1028 */
1029 diff = bp->b_bcount - uiop->uio_resid;
1030 len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE
1031 + diff);
1032 if (len > 0) {
1033 len = min(len, uiop->uio_resid);
1034 bzero((char *)bp->b_data + diff, len);
1035 bp->b_validend = diff + len;
1036 } else
1037 bp->b_validend = diff;
1038 } else
1039 bp->b_validend = bp->b_bcount;
1040 }
1041 if (p && (vp->v_flag & VTEXT) &&
1042 (((nmp->nm_flag & NFSMNT_NQNFS) &&
1043 NQNFS_CKINVALID(vp, np, ND_READ) &&
1044 np->n_lrev != np->n_brev) ||
1045 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
1046 np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
1047 uprintf("Process killed due to text file modification\n");
1048 psignal(p, SIGKILL);
1049 p->p_holdcnt++;
1050 }
1051 break;
1052 case VLNK:
1053 uiop->uio_offset = (off_t)0;
1054 nfsstats.readlink_bios++;
1055 error = nfs_readlinkrpc(vp, uiop, cr);
1056 break;
1057 case VDIR:
1058 nfsstats.readdir_bios++;
1059 uiop->uio_offset = bp->b_dcookie;
1060 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
1061 error = nfs_readdirplusrpc(vp, uiop, cr);
1062 if (error == NFSERR_NOTSUPP)
1063 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1064 }
1065 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1066 error = nfs_readdirrpc(vp, uiop, cr);
1067 if (!error) {
1068 bp->b_dcookie = uiop->uio_offset;
1069 bp->b_validoff = 0;
1070 bp->b_validend = bp->b_bcount - uiop->uio_resid;
1071 }
1072 break;
1073 default:
1074 printf("nfs_doio: type %x unexpected\n",vp->v_type);
1075 break;
1076 };
1077 if (error) {
1078 bp->b_flags |= B_ERROR;
1079 bp->b_error = error;
1080 }
1081 } else {
1082 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1083 - bp->b_dirtyoff;
1084 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
1085 + bp->b_dirtyoff;
1086 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1087 uiop->uio_rw = UIO_WRITE;
1088 nfsstats.write_bios++;
1089 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
1090 iomode = NFSV3WRITE_UNSTABLE;
1091 else
1092 iomode = NFSV3WRITE_FILESYNC;
1093 bp->b_flags |= B_WRITEINPROG;
1094 #ifdef fvdl_debug
1095 printf("nfs_doio(%x): bp %x doff %d dend %d\n",
1096 vp, bp, bp->b_dirtyoff, bp->b_dirtyend);
1097 #endif
1098 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
1099 if (!error && iomode == NFSV3WRITE_UNSTABLE)
1100 bp->b_flags |= B_NEEDCOMMIT;
1101 else
1102 bp->b_flags &= ~B_NEEDCOMMIT;
1103 bp->b_flags &= ~B_WRITEINPROG;
1104
1105 /*
1106 * For an interrupted write, the buffer is still valid and the
1107 * write hasn't been pushed to the server yet, so we can't set
1108 * B_ERROR and report the interruption by setting B_EINTR. For
1109 * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
1110 * is essentially a noop.
1111 * For the case of a V3 write rpc not being committed to stable
1112 * storage, the block is still dirty and requires either a commit
1113 * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
1114 * before the block is reused. This is indicated by setting the
1115 * B_DELWRI and B_NEEDCOMMIT flags.
1116 */
1117 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1118 bp->b_flags |= B_DELWRI;
1119
1120 /*
1121 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the
1122 * buffer to the clean list, we have to reassign it back to the
1123 * dirty one. Ugh.
1124 */
1125 if (bp->b_flags & B_ASYNC)
1126 reassignbuf(bp, vp);
1127 else if (error)
1128 bp->b_flags |= B_EINTR;
1129 } else {
1130 if (error) {
1131 bp->b_flags |= B_ERROR;
1132 bp->b_error = np->n_error = error;
1133 np->n_flag |= NWRITEERR;
1134 }
1135 bp->b_dirtyoff = bp->b_dirtyend = 0;
1136 }
1137 }
1138 bp->b_resid = uiop->uio_resid;
1139 if (must_commit)
1140 nfs_clearcommit(vp->v_mount);
1141 biodone(bp);
1142 return (error);
1143 }
1144