nfs_bio.c revision 1.41 1 /* $NetBSD: nfs_bio.c,v 1.41 1998/02/05 08:00:19 mrg Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
39 */
40
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/resourcevar.h>
45 #include <sys/signalvar.h>
46 #include <sys/proc.h>
47 #include <sys/buf.h>
48 #include <sys/vnode.h>
49 #include <sys/trace.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/namei.h>
53 #include <sys/dirent.h>
54
55 #include <vm/vm.h>
56
57 #if defined(UVM)
58 #include <uvm/uvm_extern.h>
59 #endif
60
61 #include <nfs/rpcv2.h>
62 #include <nfs/nfsproto.h>
63 #include <nfs/nfs.h>
64 #include <nfs/nfsmount.h>
65 #include <nfs/nqnfs.h>
66 #include <nfs/nfsnode.h>
67 #include <nfs/nfs_var.h>
68
69 extern int nfs_numasync;
70 extern struct nfsstats nfsstats;
71
72 /*
73 * Vnode op for read using bio
74 * Any similarity to readip() is purely coincidental
75 */
76 int
77 nfs_bioread(vp, uio, ioflag, cred, cflag)
78 register struct vnode *vp;
79 register struct uio *uio;
80 int ioflag, cflag;
81 struct ucred *cred;
82 {
83 register struct nfsnode *np = VTONFS(vp);
84 register int biosize, diff;
85 struct buf *bp = NULL, *rabp;
86 struct vattr vattr;
87 struct proc *p;
88 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
89 struct nfsdircache *ndp = NULL, *nndp = NULL;
90 daddr_t lbn, bn, rabn;
91 caddr_t baddr, ep, edp;
92 int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin, en, enn;
93 int enough = 0;
94 struct dirent *dp, *pdp;
95 off_t curoff = 0;
96
97 #ifdef DIAGNOSTIC
98 if (uio->uio_rw != UIO_READ)
99 panic("nfs_read mode");
100 #endif
101 if (uio->uio_resid == 0)
102 return (0);
103 if (vp->v_type != VDIR && uio->uio_offset < 0)
104 return (EINVAL);
105 p = uio->uio_procp;
106 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
107 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
108 (void)nfs_fsinfo(nmp, vp, cred, p);
109 if (vp->v_type != VDIR &&
110 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
111 return (EFBIG);
112 biosize = nmp->nm_rsize;
113 /*
114 * For nfs, cache consistency can only be maintained approximately.
115 * Although RFC1094 does not specify the criteria, the following is
116 * believed to be compatible with the reference port.
117 * For nqnfs, full cache consistency is maintained within the loop.
118 * For nfs:
119 * If the file's modify time on the server has changed since the
120 * last read rpc or you have written to the file,
121 * you may have lost data cache consistency with the
122 * server, so flush all of the file's data out of the cache.
123 * Then force a getattr rpc to ensure that you have up to date
124 * attributes.
125 * NB: This implies that cache data can be read when up to
126 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
127 * attributes this could be forced by setting n_attrstamp to 0 before
128 * the VOP_GETATTR() call.
129 */
130 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
131 if (np->n_flag & NMODIFIED) {
132 if (vp->v_type != VREG) {
133 if (vp->v_type != VDIR)
134 panic("nfs: bioread, not dir");
135 nfs_invaldircache(vp, 0);
136 np->n_direofoffset = 0;
137 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
138 if (error)
139 return (error);
140 }
141 np->n_attrstamp = 0;
142 error = VOP_GETATTR(vp, &vattr, cred, p);
143 if (error)
144 return (error);
145 np->n_mtime = vattr.va_mtime.tv_sec;
146 } else {
147 error = VOP_GETATTR(vp, &vattr, cred, p);
148 if (error)
149 return (error);
150 if (np->n_mtime != vattr.va_mtime.tv_sec) {
151 if (vp->v_type == VDIR) {
152 nfs_invaldircache(vp, 0);
153 np->n_direofoffset = 0;
154 }
155 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
156 if (error)
157 return (error);
158 np->n_mtime = vattr.va_mtime.tv_sec;
159 }
160 }
161 }
162 do {
163
164 /*
165 * Get a valid lease. If cached data is stale, flush it.
166 */
167 if (nmp->nm_flag & NFSMNT_NQNFS) {
168 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
169 do {
170 error = nqnfs_getlease(vp, ND_READ, cred, p);
171 } while (error == NQNFS_EXPIRED);
172 if (error)
173 return (error);
174 if (np->n_lrev != np->n_brev ||
175 (np->n_flag & NQNFSNONCACHE) ||
176 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
177 if (vp->v_type == VDIR) {
178 nfs_invaldircache(vp, 0);
179 np->n_direofoffset = 0;
180 }
181 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
182 if (error)
183 return (error);
184 np->n_brev = np->n_lrev;
185 }
186 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
187 nfs_invaldircache(vp, 0);
188 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
189 np->n_direofoffset = 0;
190 if (error)
191 return (error);
192 }
193 }
194 /*
195 * Don't cache symlinks.
196 */
197 if (np->n_flag & NQNFSNONCACHE
198 || ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
199 switch (vp->v_type) {
200 case VREG:
201 return (nfs_readrpc(vp, uio, cred));
202 case VLNK:
203 return (nfs_readlinkrpc(vp, uio, cred));
204 case VDIR:
205 break;
206 default:
207 printf(" NQNFSNONCACHE: type %x unexpected\n",
208 vp->v_type);
209 };
210 }
211 baddr = (caddr_t)0;
212 switch (vp->v_type) {
213 case VREG:
214 nfsstats.biocache_reads++;
215 lbn = uio->uio_offset / biosize;
216 on = uio->uio_offset & (biosize - 1);
217 bn = lbn * (biosize / DEV_BSIZE);
218 not_readin = 1;
219
220 /*
221 * Start the read ahead(s), as required.
222 */
223 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
224 lbn - 1 == vp->v_lastr) {
225 for (nra = 0; nra < nmp->nm_readahead &&
226 (lbn + 1 + nra) * biosize < np->n_size; nra++) {
227 rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
228 if (!incore(vp, rabn)) {
229 rabp = nfs_getcacheblk(vp, rabn, biosize, p);
230 if (!rabp)
231 return (EINTR);
232 if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
233 rabp->b_flags |= (B_READ | B_ASYNC);
234 if (nfs_asyncio(rabp, cred)) {
235 rabp->b_flags |= B_INVAL;
236 brelse(rabp);
237 }
238 } else
239 brelse(rabp);
240 }
241 }
242 }
243
244 /*
245 * If the block is in the cache and has the required data
246 * in a valid region, just copy it out.
247 * Otherwise, get the block and write back/read in,
248 * as required.
249 */
250 if ((bp = incore(vp, bn)) &&
251 (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
252 (B_BUSY | B_WRITEINPROG))
253 got_buf = 0;
254 else {
255 again:
256 bp = nfs_getcacheblk(vp, bn, biosize, p);
257 if (!bp)
258 return (EINTR);
259 got_buf = 1;
260 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
261 bp->b_flags |= B_READ;
262 not_readin = 0;
263 error = nfs_doio(bp, cred, p);
264 if (error) {
265 brelse(bp);
266 return (error);
267 }
268 }
269 }
270 n = min((unsigned)(biosize - on), uio->uio_resid);
271 diff = np->n_size - uio->uio_offset;
272 if (diff < n)
273 n = diff;
274 if (not_readin && n > 0) {
275 if (on < bp->b_validoff || (on + n) > bp->b_validend) {
276 if (!got_buf) {
277 bp = nfs_getcacheblk(vp, bn, biosize, p);
278 if (!bp)
279 return (EINTR);
280 got_buf = 1;
281 }
282 bp->b_flags |= B_INVAFTERWRITE;
283 if (bp->b_dirtyend > 0) {
284 if ((bp->b_flags & B_DELWRI) == 0)
285 panic("nfsbioread");
286 if (VOP_BWRITE(bp) == EINTR)
287 return (EINTR);
288 } else
289 brelse(bp);
290 goto again;
291 }
292 }
293 vp->v_lastr = lbn;
294 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
295 if (diff < n)
296 n = diff;
297 break;
298 case VLNK:
299 nfsstats.biocache_readlinks++;
300 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
301 if (!bp)
302 return (EINTR);
303 if ((bp->b_flags & B_DONE) == 0) {
304 bp->b_flags |= B_READ;
305 error = nfs_doio(bp, cred, p);
306 if (error) {
307 brelse(bp);
308 return (error);
309 }
310 }
311 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
312 got_buf = 1;
313 on = 0;
314 break;
315 case VDIR:
316 diragain:
317 nfsstats.biocache_readdirs++;
318 ndp = nfs_searchdircache(vp, uio->uio_offset,
319 (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
320 if (!ndp) {
321 /*
322 * We've been handed a cookie that is not
323 * in the cache. If we're not translating
324 * 32 <-> 64, it may be a value that was
325 * flushed out of the cache because it grew
326 * too big. Let the server judge if it's
327 * valid or not. In the translation case,
328 * we have no way of validating this value,
329 * so punt.
330 */
331 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
332 return (EINVAL);
333 ndp = nfs_enterdircache(vp, uio->uio_offset,
334 uio->uio_offset, 0, 0);
335 }
336
337 if (uio->uio_offset != 0 &&
338 ndp->dc_cookie == np->n_direofoffset) {
339 nfsstats.direofcache_hits++;
340 return (0);
341 }
342
343 bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
344 if (!bp)
345 return (EINTR);
346 if ((bp->b_flags & B_DONE) == 0) {
347 bp->b_flags |= B_READ;
348 bp->b_dcookie = ndp->dc_blkcookie;
349 error = nfs_doio(bp, cred, p);
350 if (error) {
351 /*
352 * Yuck! The directory has been modified on the
353 * server. Punt and let the userland code
354 * deal with it.
355 */
356 brelse(bp);
357 if (error == NFSERR_BAD_COOKIE) {
358 nfs_invaldircache(vp, 0);
359 nfs_vinvalbuf(vp, 0, cred, p, 1);
360 error = EINVAL;
361 }
362 return (error);
363 }
364 }
365
366 /*
367 * Just return if we hit EOF right away with this
368 * block. Always check here, because direofoffset
369 * may have been set by an nfsiod since the last
370 * check.
371 */
372 if (np->n_direofoffset != 0 &&
373 ndp->dc_blkcookie == np->n_direofoffset) {
374 brelse(bp);
375 return (0);
376 }
377
378 /*
379 * Find the entry we were looking for in the block.
380 */
381
382 en = ndp->dc_entry;
383
384 pdp = dp = (struct dirent *)bp->b_data;
385 edp = bp->b_data + bp->b_validend;
386 enn = 0;
387 while (enn < en && (caddr_t)dp < edp) {
388 pdp = dp;
389 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
390 enn++;
391 }
392
393 /*
394 * If the entry number was bigger than the number of
395 * entries in the block, or the cookie of the previous
396 * entry doesn't match, the directory cache is
397 * stale. Flush it and try again (i.e. go to
398 * the server).
399 */
400 if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
401 (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
402 #ifdef DEBUG
403 printf("invalid cache: %p %p %p off %lx %lx\n",
404 pdp, dp, edp,
405 (unsigned long)uio->uio_offset,
406 (unsigned long)NFS_GETCOOKIE(pdp));
407 #endif
408 brelse(bp);
409 nfs_invaldircache(vp, 0);
410 nfs_vinvalbuf(vp, 0, cred, p, 0);
411 goto diragain;
412 }
413
414 on = (caddr_t)dp - bp->b_data;
415
416 /*
417 * Cache all entries that may be exported to the
418 * user, as they may be thrown back at us. The
419 * NFSBIO_CACHECOOKIES flag indicates that all
420 * entries are being 'exported', so cache them all.
421 */
422
423 if (en == 0 && pdp == dp) {
424 dp = (struct dirent *)
425 ((caddr_t)dp + dp->d_reclen);
426 enn++;
427 }
428
429 if (uio->uio_resid < (bp->b_validend - on)) {
430 n = uio->uio_resid;
431 enough = 1;
432 } else
433 n = bp->b_validend - on;
434
435 ep = bp->b_data + on + n;
436
437 /*
438 * Find last complete entry to copy, caching entries
439 * (if requested) as we go.
440 */
441
442 while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
443 if (cflag & NFSBIO_CACHECOOKIES) {
444 nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
445 ndp->dc_blkcookie, enn, bp->b_lblkno);
446 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
447 NFS_STASHCOOKIE32(pdp,
448 nndp->dc_cookie32);
449 }
450 }
451 pdp = dp;
452 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
453 enn++;
454 }
455
456 /*
457 * If the last requested entry was not the last in the
458 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
459 * cache the cookie of the last requested one, and
460 * set of the offset to it.
461 */
462
463 if ((on + n) < bp->b_validend) {
464 curoff = NFS_GETCOOKIE(pdp);
465 nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
466 enn, bp->b_lblkno);
467 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
468 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
469 curoff = nndp->dc_cookie32;
470 }
471 } else
472 curoff = bp->b_dcookie;
473
474 /*
475 * Always cache the entry for the next block,
476 * so that readaheads can use it.
477 */
478 nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
479 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
480 if (curoff == bp->b_dcookie) {
481 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
482 curoff = nndp->dc_cookie32;
483 }
484 }
485
486 n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
487
488 /*
489 * If not eof and read aheads are enabled, start one.
490 * (You need the current block first, so that you have the
491 * directory offset cookie of the next block.)
492 */
493 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
494 np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
495 rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
496 NFS_DIRBLKSIZ, p);
497 if (rabp) {
498 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
499 rabp->b_dcookie = nndp->dc_cookie;
500 rabp->b_flags |= (B_READ | B_ASYNC);
501 if (nfs_asyncio(rabp, cred)) {
502 rabp->b_flags |= B_INVAL;
503 brelse(rabp);
504 }
505 } else
506 brelse(rabp);
507 }
508 }
509 got_buf = 1;
510 break;
511 default:
512 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
513 break;
514 };
515
516 if (n > 0) {
517 if (!baddr)
518 baddr = bp->b_data;
519 error = uiomove(baddr + on, (int)n, uio);
520 }
521 switch (vp->v_type) {
522 case VREG:
523 break;
524 case VLNK:
525 n = 0;
526 break;
527 case VDIR:
528 if (np->n_flag & NQNFSNONCACHE)
529 bp->b_flags |= B_INVAL;
530 uio->uio_offset = curoff;
531 if (enough)
532 n = 0;
533 break;
534 default:
535 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
536 }
537 if (got_buf)
538 brelse(bp);
539 } while (error == 0 && uio->uio_resid > 0 && n > 0);
540 return (error);
541 }
542
543 /*
544 * Vnode op for write using bio
545 */
546 int
547 nfs_write(v)
548 void *v;
549 {
550 struct vop_write_args /* {
551 struct vnode *a_vp;
552 struct uio *a_uio;
553 int a_ioflag;
554 struct ucred *a_cred;
555 } */ *ap = v;
556 register int biosize;
557 register struct uio *uio = ap->a_uio;
558 struct proc *p = uio->uio_procp;
559 register struct vnode *vp = ap->a_vp;
560 struct nfsnode *np = VTONFS(vp);
561 register struct ucred *cred = ap->a_cred;
562 int ioflag = ap->a_ioflag;
563 struct buf *bp;
564 struct vattr vattr;
565 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
566 daddr_t lbn, bn;
567 int n, on, error = 0, iomode, must_commit;
568
569 #ifdef DIAGNOSTIC
570 if (uio->uio_rw != UIO_WRITE)
571 panic("nfs_write mode");
572 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
573 panic("nfs_write proc");
574 #endif
575 if (vp->v_type != VREG)
576 return (EIO);
577 if (np->n_flag & NWRITEERR) {
578 np->n_flag &= ~NWRITEERR;
579 return (np->n_error);
580 }
581 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
582 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
583 (void)nfs_fsinfo(nmp, vp, cred, p);
584 if (ioflag & (IO_APPEND | IO_SYNC)) {
585 if (np->n_flag & NMODIFIED) {
586 np->n_attrstamp = 0;
587 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
588 if (error)
589 return (error);
590 }
591 if (ioflag & IO_APPEND) {
592 np->n_attrstamp = 0;
593 error = VOP_GETATTR(vp, &vattr, cred, p);
594 if (error)
595 return (error);
596 uio->uio_offset = np->n_size;
597 }
598 }
599 if (uio->uio_offset < 0)
600 return (EINVAL);
601 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
602 return (EFBIG);
603 if (uio->uio_resid == 0)
604 return (0);
605 /*
606 * Maybe this should be above the vnode op call, but so long as
607 * file servers have no limits, i don't think it matters
608 */
609 if (p && uio->uio_offset + uio->uio_resid >
610 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
611 psignal(p, SIGXFSZ);
612 return (EFBIG);
613 }
614 /*
615 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
616 * will be the same size within a filesystem. nfs_writerpc will
617 * still use nm_wsize when sizing the rpc's.
618 */
619 biosize = nmp->nm_rsize;
620 do {
621
622 /*
623 * XXX make sure we aren't cached in the VM page cache
624 */
625 #if defined(UVM)
626 (void)uvm_vnp_uncache(vp);
627 #else
628 (void)vnode_pager_uncache(vp);
629 #endif
630
631 /*
632 * Check for a valid write lease.
633 */
634 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
635 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
636 do {
637 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
638 } while (error == NQNFS_EXPIRED);
639 if (error)
640 return (error);
641 if (np->n_lrev != np->n_brev ||
642 (np->n_flag & NQNFSNONCACHE)) {
643 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
644 if (error)
645 return (error);
646 np->n_brev = np->n_lrev;
647 }
648 }
649 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
650 iomode = NFSV3WRITE_FILESYNC;
651 error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit);
652 if (must_commit)
653 nfs_clearcommit(vp->v_mount);
654 return (error);
655 }
656 nfsstats.biocache_writes++;
657 lbn = uio->uio_offset / biosize;
658 on = uio->uio_offset & (biosize-1);
659 n = min((unsigned)(biosize - on), uio->uio_resid);
660 bn = lbn * (biosize / DEV_BSIZE);
661 again:
662 bp = nfs_getcacheblk(vp, bn, biosize, p);
663 if (!bp)
664 return (EINTR);
665 if (bp->b_wcred == NOCRED) {
666 crhold(cred);
667 bp->b_wcred = cred;
668 }
669 np->n_flag |= NMODIFIED;
670 if (uio->uio_offset + n > np->n_size) {
671 np->n_size = uio->uio_offset + n;
672 #if defined(UVM)
673 uvm_vnp_setsize(vp, np->n_size);
674 #else
675 vnode_pager_setsize(vp, np->n_size);
676 #endif
677 }
678
679 /*
680 * If the new write will leave a contiguous dirty
681 * area, just update the b_dirtyoff and b_dirtyend,
682 * otherwise force a write rpc of the old dirty area.
683 */
684 if (bp->b_dirtyend > 0 &&
685 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
686 bp->b_proc = p;
687 if (VOP_BWRITE(bp) == EINTR)
688 return (EINTR);
689 goto again;
690 }
691
692 /*
693 * Check for valid write lease and get one as required.
694 * In case getblk() and/or bwrite() delayed us.
695 */
696 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
697 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
698 do {
699 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
700 } while (error == NQNFS_EXPIRED);
701 if (error) {
702 brelse(bp);
703 return (error);
704 }
705 if (np->n_lrev != np->n_brev ||
706 (np->n_flag & NQNFSNONCACHE)) {
707 brelse(bp);
708 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
709 if (error)
710 return (error);
711 np->n_brev = np->n_lrev;
712 goto again;
713 }
714 }
715 error = uiomove((char *)bp->b_data + on, n, uio);
716 if (error) {
717 bp->b_flags |= B_ERROR;
718 brelse(bp);
719 return (error);
720 }
721 if (bp->b_dirtyend > 0) {
722 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
723 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
724 } else {
725 bp->b_dirtyoff = on;
726 bp->b_dirtyend = on + n;
727 }
728 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
729 bp->b_validoff > bp->b_dirtyend) {
730 bp->b_validoff = bp->b_dirtyoff;
731 bp->b_validend = bp->b_dirtyend;
732 } else {
733 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
734 bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
735 }
736
737 /*
738 * Since this block is being modified, it must be written
739 * again and not just committed.
740 */
741 bp->b_flags &= ~B_NEEDCOMMIT;
742
743 /*
744 * If the lease is non-cachable or IO_SYNC do bwrite().
745 */
746 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
747 bp->b_proc = p;
748 error = VOP_BWRITE(bp);
749 if (error)
750 return (error);
751 if (np->n_flag & NQNFSNONCACHE) {
752 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
753 if (error)
754 return (error);
755 }
756 } else if ((n + on) == biosize &&
757 (nmp->nm_flag & NFSMNT_NQNFS) == 0) {
758 bp->b_proc = (struct proc *)0;
759 bp->b_flags |= B_ASYNC;
760 (void)nfs_writebp(bp, 0);
761 } else {
762 bdwrite(bp);
763 }
764 } while (uio->uio_resid > 0 && n > 0);
765 return (0);
766 }
767
768 /*
769 * Get an nfs cache block.
770 * Allocate a new one if the block isn't currently in the cache
771 * and return the block marked busy. If the calling process is
772 * interrupted by a signal for an interruptible mount point, return
773 * NULL.
774 */
775 struct buf *
776 nfs_getcacheblk(vp, bn, size, p)
777 struct vnode *vp;
778 daddr_t bn;
779 int size;
780 struct proc *p;
781 {
782 register struct buf *bp;
783 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
784
785 if (nmp->nm_flag & NFSMNT_INT) {
786 bp = getblk(vp, bn, size, PCATCH, 0);
787 while (bp == (struct buf *)0) {
788 if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
789 return ((struct buf *)0);
790 bp = getblk(vp, bn, size, 0, 2 * hz);
791 }
792 } else
793 bp = getblk(vp, bn, size, 0, 0);
794 return (bp);
795 }
796
797 /*
798 * Flush and invalidate all dirty buffers. If another process is already
799 * doing the flush, just wait for completion.
800 */
801 int
802 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
803 struct vnode *vp;
804 int flags;
805 struct ucred *cred;
806 struct proc *p;
807 int intrflg;
808 {
809 register struct nfsnode *np = VTONFS(vp);
810 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
811 int error = 0, slpflag, slptimeo;
812
813 if ((nmp->nm_flag & NFSMNT_INT) == 0)
814 intrflg = 0;
815 if (intrflg) {
816 slpflag = PCATCH;
817 slptimeo = 2 * hz;
818 } else {
819 slpflag = 0;
820 slptimeo = 0;
821 }
822 /*
823 * First wait for any other process doing a flush to complete.
824 */
825 while (np->n_flag & NFLUSHINPROG) {
826 np->n_flag |= NFLUSHWANT;
827 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
828 slptimeo);
829 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
830 return (EINTR);
831 }
832
833 /*
834 * Now, flush as required.
835 */
836 np->n_flag |= NFLUSHINPROG;
837 error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
838 while (error) {
839 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
840 np->n_flag &= ~NFLUSHINPROG;
841 if (np->n_flag & NFLUSHWANT) {
842 np->n_flag &= ~NFLUSHWANT;
843 wakeup((caddr_t)&np->n_flag);
844 }
845 return (EINTR);
846 }
847 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
848 }
849 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
850 if (np->n_flag & NFLUSHWANT) {
851 np->n_flag &= ~NFLUSHWANT;
852 wakeup((caddr_t)&np->n_flag);
853 }
854 return (0);
855 }
856
857 /*
858 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
859 * This is mainly to avoid queueing async I/O requests when the nfsiods
860 * are all hung on a dead server.
861 */
862 int
863 nfs_asyncio(bp, cred)
864 register struct buf *bp;
865 struct ucred *cred;
866 {
867 register int i;
868 register struct nfsmount *nmp;
869 int gotiod, slpflag = 0, slptimeo = 0, error;
870
871 if (nfs_numasync == 0)
872 return (EIO);
873
874
875 nmp = VFSTONFS(bp->b_vp->v_mount);
876 again:
877 if (nmp->nm_flag & NFSMNT_INT)
878 slpflag = PCATCH;
879 gotiod = FALSE;
880
881 /*
882 * Find a free iod to process this request.
883 */
884
885 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
886 if (nfs_iodwant[i]) {
887 /*
888 * Found one, so wake it up and tell it which
889 * mount to process.
890 */
891 nfs_iodwant[i] = (struct proc *)0;
892 nfs_iodmount[i] = nmp;
893 nmp->nm_bufqiods++;
894 wakeup((caddr_t)&nfs_iodwant[i]);
895 gotiod = TRUE;
896 break;
897 }
898 /*
899 * If none are free, we may already have an iod working on this mount
900 * point. If so, it will process our request.
901 */
902 if (!gotiod && nmp->nm_bufqiods > 0)
903 gotiod = TRUE;
904
905 /*
906 * If we have an iod which can process the request, then queue
907 * the buffer.
908 */
909 if (gotiod) {
910 /*
911 * Ensure that the queue never grows too large.
912 */
913 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
914 nmp->nm_bufqwant = TRUE;
915 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
916 "nfsaio", slptimeo);
917 if (error) {
918 if (nfs_sigintr(nmp, NULL, bp->b_proc))
919 return (EINTR);
920 if (slpflag == PCATCH) {
921 slpflag = 0;
922 slptimeo = 2 * hz;
923 }
924 }
925 /*
926 * We might have lost our iod while sleeping,
927 * so check and loop if nescessary.
928 */
929 if (nmp->nm_bufqiods == 0)
930 goto again;
931 }
932
933 if (bp->b_flags & B_READ) {
934 if (bp->b_rcred == NOCRED && cred != NOCRED) {
935 crhold(cred);
936 bp->b_rcred = cred;
937 }
938 } else {
939 bp->b_flags |= B_WRITEINPROG;
940 if (bp->b_wcred == NOCRED && cred != NOCRED) {
941 crhold(cred);
942 bp->b_wcred = cred;
943 }
944 }
945
946 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
947 nmp->nm_bufqlen++;
948 return (0);
949 }
950
951 /*
952 * All the iods are busy on other mounts, so return EIO to
953 * force the caller to process the i/o synchronously.
954 */
955 return (EIO);
956 }
957
958 /*
959 * Do an I/O operation to/from a cache block. This may be called
960 * synchronously or from an nfsiod.
961 */
962 int
963 nfs_doio(bp, cr, p)
964 register struct buf *bp;
965 struct ucred *cr;
966 struct proc *p;
967 {
968 register struct uio *uiop;
969 register struct vnode *vp;
970 struct nfsnode *np;
971 struct nfsmount *nmp;
972 int error = 0, diff, len, iomode, must_commit = 0;
973 struct uio uio;
974 struct iovec io;
975
976 vp = bp->b_vp;
977 np = VTONFS(vp);
978 nmp = VFSTONFS(vp->v_mount);
979 uiop = &uio;
980 uiop->uio_iov = &io;
981 uiop->uio_iovcnt = 1;
982 uiop->uio_segflg = UIO_SYSSPACE;
983 uiop->uio_procp = p;
984
985 /*
986 * Historically, paging was done with physio, but no more...
987 */
988 if (bp->b_flags & B_PHYS) {
989 /*
990 * ...though reading /dev/drum still gets us here.
991 */
992 io.iov_len = uiop->uio_resid = bp->b_bcount;
993 /* mapping was done by vmapbuf() */
994 io.iov_base = bp->b_data;
995 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
996 if (bp->b_flags & B_READ) {
997 uiop->uio_rw = UIO_READ;
998 nfsstats.read_physios++;
999 error = nfs_readrpc(vp, uiop, cr);
1000 } else {
1001 iomode = NFSV3WRITE_DATASYNC;
1002 uiop->uio_rw = UIO_WRITE;
1003 nfsstats.write_physios++;
1004 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
1005 }
1006 if (error) {
1007 bp->b_flags |= B_ERROR;
1008 bp->b_error = error;
1009 }
1010 } else if (bp->b_flags & B_READ) {
1011 io.iov_len = uiop->uio_resid = bp->b_bcount;
1012 io.iov_base = bp->b_data;
1013 uiop->uio_rw = UIO_READ;
1014 switch (vp->v_type) {
1015 case VREG:
1016 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1017 nfsstats.read_bios++;
1018 error = nfs_readrpc(vp, uiop, cr);
1019 if (!error) {
1020 bp->b_validoff = 0;
1021 if (uiop->uio_resid) {
1022 /*
1023 * If len > 0, there is a hole in the file and
1024 * no writes after the hole have been pushed to
1025 * the server yet.
1026 * Just zero fill the rest of the valid area.
1027 */
1028 diff = bp->b_bcount - uiop->uio_resid;
1029 len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE
1030 + diff);
1031 if (len > 0) {
1032 len = min(len, uiop->uio_resid);
1033 bzero((char *)bp->b_data + diff, len);
1034 bp->b_validend = diff + len;
1035 } else
1036 bp->b_validend = diff;
1037 } else
1038 bp->b_validend = bp->b_bcount;
1039 }
1040 if (p && (vp->v_flag & VTEXT) &&
1041 (((nmp->nm_flag & NFSMNT_NQNFS) &&
1042 NQNFS_CKINVALID(vp, np, ND_READ) &&
1043 np->n_lrev != np->n_brev) ||
1044 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
1045 np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
1046 uprintf("Process killed due to text file modification\n");
1047 psignal(p, SIGKILL);
1048 p->p_holdcnt++;
1049 }
1050 break;
1051 case VLNK:
1052 uiop->uio_offset = (off_t)0;
1053 nfsstats.readlink_bios++;
1054 error = nfs_readlinkrpc(vp, uiop, cr);
1055 break;
1056 case VDIR:
1057 nfsstats.readdir_bios++;
1058 uiop->uio_offset = bp->b_dcookie;
1059 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
1060 error = nfs_readdirplusrpc(vp, uiop, cr);
1061 if (error == NFSERR_NOTSUPP)
1062 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1063 }
1064 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1065 error = nfs_readdirrpc(vp, uiop, cr);
1066 if (!error) {
1067 bp->b_dcookie = uiop->uio_offset;
1068 bp->b_validoff = 0;
1069 bp->b_validend = bp->b_bcount - uiop->uio_resid;
1070 }
1071 break;
1072 default:
1073 printf("nfs_doio: type %x unexpected\n",vp->v_type);
1074 break;
1075 };
1076 if (error) {
1077 bp->b_flags |= B_ERROR;
1078 bp->b_error = error;
1079 }
1080 } else {
1081 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1082 - bp->b_dirtyoff;
1083 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
1084 + bp->b_dirtyoff;
1085 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1086 uiop->uio_rw = UIO_WRITE;
1087 nfsstats.write_bios++;
1088 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
1089 iomode = NFSV3WRITE_UNSTABLE;
1090 else
1091 iomode = NFSV3WRITE_FILESYNC;
1092 bp->b_flags |= B_WRITEINPROG;
1093 #ifdef fvdl_debug
1094 printf("nfs_doio(%x): bp %x doff %d dend %d\n",
1095 vp, bp, bp->b_dirtyoff, bp->b_dirtyend);
1096 #endif
1097 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
1098 if (!error && iomode == NFSV3WRITE_UNSTABLE)
1099 bp->b_flags |= B_NEEDCOMMIT;
1100 else
1101 bp->b_flags &= ~B_NEEDCOMMIT;
1102 bp->b_flags &= ~B_WRITEINPROG;
1103
1104 /*
1105 * For an interrupted write, the buffer is still valid and the
1106 * write hasn't been pushed to the server yet, so we can't set
1107 * B_ERROR and report the interruption by setting B_EINTR. For
1108 * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
1109 * is essentially a noop.
1110 * For the case of a V3 write rpc not being committed to stable
1111 * storage, the block is still dirty and requires either a commit
1112 * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
1113 * before the block is reused. This is indicated by setting the
1114 * B_DELWRI and B_NEEDCOMMIT flags.
1115 */
1116 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1117 bp->b_flags |= B_DELWRI;
1118
1119 /*
1120 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the
1121 * buffer to the clean list, we have to reassign it back to the
1122 * dirty one. Ugh.
1123 */
1124 if (bp->b_flags & B_ASYNC)
1125 reassignbuf(bp, vp);
1126 else if (error)
1127 bp->b_flags |= B_EINTR;
1128 } else {
1129 if (error) {
1130 bp->b_flags |= B_ERROR;
1131 bp->b_error = np->n_error = error;
1132 np->n_flag |= NWRITEERR;
1133 }
1134 bp->b_dirtyoff = bp->b_dirtyend = 0;
1135 }
1136 }
1137 bp->b_resid = uiop->uio_resid;
1138 if (must_commit)
1139 nfs_clearcommit(vp->v_mount);
1140 biodone(bp);
1141 return (error);
1142 }
1143