nfs_bio.c revision 1.46 1 /* $NetBSD: nfs_bio.c,v 1.46 1999/11/15 18:49:11 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
39 */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/resourcevar.h>
44 #include <sys/signalvar.h>
45 #include <sys/proc.h>
46 #include <sys/buf.h>
47 #include <sys/vnode.h>
48 #include <sys/trace.h>
49 #include <sys/mount.h>
50 #include <sys/kernel.h>
51 #include <sys/namei.h>
52 #include <sys/dirent.h>
53
54 #include <vm/vm.h>
55
56 #include <uvm/uvm_extern.h>
57
58 #include <nfs/rpcv2.h>
59 #include <nfs/nfsproto.h>
60 #include <nfs/nfs.h>
61 #include <nfs/nfsmount.h>
62 #include <nfs/nqnfs.h>
63 #include <nfs/nfsnode.h>
64 #include <nfs/nfs_var.h>
65
66 extern int nfs_numasync;
67 extern struct nfsstats nfsstats;
68
69 /*
70 * Vnode op for read using bio
71 * Any similarity to readip() is purely coincidental
72 */
73 int
74 nfs_bioread(vp, uio, ioflag, cred, cflag)
75 register struct vnode *vp;
76 register struct uio *uio;
77 int ioflag, cflag;
78 struct ucred *cred;
79 {
80 register struct nfsnode *np = VTONFS(vp);
81 register int biosize, diff;
82 struct buf *bp = NULL, *rabp;
83 struct vattr vattr;
84 struct proc *p;
85 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
86 struct nfsdircache *ndp = NULL, *nndp = NULL;
87 daddr_t lbn, bn, rabn;
88 caddr_t baddr, ep, edp;
89 int got_buf = 0, nra, error = 0, n = 0, on = 0, not_readin, en, enn;
90 int enough = 0;
91 struct dirent *dp, *pdp;
92 off_t curoff = 0, offdiff;
93
94 #ifdef DIAGNOSTIC
95 if (uio->uio_rw != UIO_READ)
96 panic("nfs_read mode");
97 #endif
98 if (uio->uio_resid == 0)
99 return (0);
100 if (vp->v_type != VDIR && uio->uio_offset < 0)
101 return (EINVAL);
102 p = uio->uio_procp;
103 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
104 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
105 (void)nfs_fsinfo(nmp, vp, cred, p);
106 if (vp->v_type != VDIR &&
107 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
108 return (EFBIG);
109 biosize = nmp->nm_rsize;
110 /*
111 * For nfs, cache consistency can only be maintained approximately.
112 * Although RFC1094 does not specify the criteria, the following is
113 * believed to be compatible with the reference port.
114 * For nqnfs, full cache consistency is maintained within the loop.
115 * For nfs:
116 * If the file's modify time on the server has changed since the
117 * last read rpc or you have written to the file,
118 * you may have lost data cache consistency with the
119 * server, so flush all of the file's data out of the cache.
120 * Then force a getattr rpc to ensure that you have up to date
121 * attributes.
122 * NB: This implies that cache data can be read when up to
123 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
124 * attributes this could be forced by setting n_attrstamp to 0 before
125 * the VOP_GETATTR() call.
126 */
127 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
128 if (np->n_flag & NMODIFIED) {
129 if (vp->v_type != VREG) {
130 if (vp->v_type != VDIR)
131 panic("nfs: bioread, not dir");
132 nfs_invaldircache(vp, 0);
133 np->n_direofoffset = 0;
134 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
135 if (error)
136 return (error);
137 }
138 np->n_attrstamp = 0;
139 error = VOP_GETATTR(vp, &vattr, cred, p);
140 if (error)
141 return (error);
142 np->n_mtime = vattr.va_mtime.tv_sec;
143 } else {
144 error = VOP_GETATTR(vp, &vattr, cred, p);
145 if (error)
146 return (error);
147 if (np->n_mtime != vattr.va_mtime.tv_sec) {
148 if (vp->v_type == VDIR) {
149 nfs_invaldircache(vp, 0);
150 np->n_direofoffset = 0;
151 }
152 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
153 if (error)
154 return (error);
155 np->n_mtime = vattr.va_mtime.tv_sec;
156 }
157 }
158 }
159 do {
160
161 /*
162 * Get a valid lease. If cached data is stale, flush it.
163 */
164 if (nmp->nm_flag & NFSMNT_NQNFS) {
165 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
166 do {
167 error = nqnfs_getlease(vp, ND_READ, cred, p);
168 } while (error == NQNFS_EXPIRED);
169 if (error)
170 return (error);
171 if (np->n_lrev != np->n_brev ||
172 (np->n_flag & NQNFSNONCACHE) ||
173 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
174 if (vp->v_type == VDIR) {
175 nfs_invaldircache(vp, 0);
176 np->n_direofoffset = 0;
177 }
178 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
179 if (error)
180 return (error);
181 np->n_brev = np->n_lrev;
182 }
183 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
184 nfs_invaldircache(vp, 0);
185 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
186 np->n_direofoffset = 0;
187 if (error)
188 return (error);
189 }
190 }
191 /*
192 * Don't cache symlinks.
193 */
194 if (np->n_flag & NQNFSNONCACHE
195 || ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
196 switch (vp->v_type) {
197 case VREG:
198 return (nfs_readrpc(vp, uio, cred));
199 case VLNK:
200 return (nfs_readlinkrpc(vp, uio, cred));
201 case VDIR:
202 break;
203 default:
204 printf(" NQNFSNONCACHE: type %x unexpected\n",
205 vp->v_type);
206 };
207 }
208 baddr = (caddr_t)0;
209 switch (vp->v_type) {
210 case VREG:
211 nfsstats.biocache_reads++;
212 lbn = uio->uio_offset / biosize;
213 on = uio->uio_offset & (biosize - 1);
214 bn = lbn * (biosize / DEV_BSIZE);
215 not_readin = 1;
216
217 /*
218 * Start the read ahead(s), as required.
219 */
220 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
221 lbn - 1 == vp->v_lastr) {
222 for (nra = 0; nra < nmp->nm_readahead &&
223 (lbn + 1 + nra) * biosize < np->n_size; nra++) {
224 rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
225 if (!incore(vp, rabn)) {
226 rabp = nfs_getcacheblk(vp, rabn, biosize, p);
227 if (!rabp)
228 return (EINTR);
229 if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
230 rabp->b_flags |= (B_READ | B_ASYNC);
231 if (nfs_asyncio(rabp, cred)) {
232 rabp->b_flags |= B_INVAL;
233 brelse(rabp);
234 }
235 } else
236 brelse(rabp);
237 }
238 }
239 }
240
241 /*
242 * If the block is in the cache and has the required data
243 * in a valid region, just copy it out.
244 * Otherwise, get the block and write back/read in,
245 * as required.
246 */
247 if ((bp = incore(vp, bn)) &&
248 (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
249 (B_BUSY | B_WRITEINPROG))
250 got_buf = 0;
251 else {
252 again:
253 bp = nfs_getcacheblk(vp, bn, biosize, p);
254 if (!bp)
255 return (EINTR);
256 got_buf = 1;
257 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
258 bp->b_flags |= B_READ;
259 not_readin = 0;
260 error = nfs_doio(bp, cred, p);
261 if (error) {
262 brelse(bp);
263 return (error);
264 }
265 }
266 }
267 n = min((unsigned)(biosize - on), uio->uio_resid);
268 offdiff = np->n_size - uio->uio_offset;
269 if (offdiff < (off_t)n)
270 n = (int)offdiff;
271 if (not_readin && n > 0) {
272 if (on < bp->b_validoff || (on + n) > bp->b_validend) {
273 if (!got_buf) {
274 bp = nfs_getcacheblk(vp, bn, biosize, p);
275 if (!bp)
276 return (EINTR);
277 got_buf = 1;
278 }
279 bp->b_flags |= B_INVAFTERWRITE;
280 if (bp->b_dirtyend > 0) {
281 if ((bp->b_flags & B_DELWRI) == 0)
282 panic("nfsbioread");
283 if (VOP_BWRITE(bp) == EINTR)
284 return (EINTR);
285 } else
286 brelse(bp);
287 goto again;
288 }
289 }
290 vp->v_lastr = lbn;
291 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
292 if (diff < n)
293 n = diff;
294 break;
295 case VLNK:
296 nfsstats.biocache_readlinks++;
297 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
298 if (!bp)
299 return (EINTR);
300 if ((bp->b_flags & B_DONE) == 0) {
301 bp->b_flags |= B_READ;
302 error = nfs_doio(bp, cred, p);
303 if (error) {
304 brelse(bp);
305 return (error);
306 }
307 }
308 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
309 got_buf = 1;
310 on = 0;
311 break;
312 case VDIR:
313 diragain:
314 nfsstats.biocache_readdirs++;
315 ndp = nfs_searchdircache(vp, uio->uio_offset,
316 (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
317 if (!ndp) {
318 /*
319 * We've been handed a cookie that is not
320 * in the cache. If we're not translating
321 * 32 <-> 64, it may be a value that was
322 * flushed out of the cache because it grew
323 * too big. Let the server judge if it's
324 * valid or not. In the translation case,
325 * we have no way of validating this value,
326 * so punt.
327 */
328 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
329 return (EINVAL);
330 ndp = nfs_enterdircache(vp, uio->uio_offset,
331 uio->uio_offset, 0, 0);
332 }
333
334 if (uio->uio_offset != 0 &&
335 ndp->dc_cookie == np->n_direofoffset) {
336 nfsstats.direofcache_hits++;
337 return (0);
338 }
339
340 bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
341 if (!bp)
342 return (EINTR);
343 if ((bp->b_flags & B_DONE) == 0) {
344 bp->b_flags |= B_READ;
345 bp->b_dcookie = ndp->dc_blkcookie;
346 error = nfs_doio(bp, cred, p);
347 if (error) {
348 /*
349 * Yuck! The directory has been modified on the
350 * server. Punt and let the userland code
351 * deal with it.
352 */
353 brelse(bp);
354 if (error == NFSERR_BAD_COOKIE) {
355 nfs_invaldircache(vp, 0);
356 nfs_vinvalbuf(vp, 0, cred, p, 1);
357 error = EINVAL;
358 }
359 return (error);
360 }
361 }
362
363 /*
364 * Just return if we hit EOF right away with this
365 * block. Always check here, because direofoffset
366 * may have been set by an nfsiod since the last
367 * check.
368 */
369 if (np->n_direofoffset != 0 &&
370 ndp->dc_blkcookie == np->n_direofoffset) {
371 brelse(bp);
372 return (0);
373 }
374
375 /*
376 * Find the entry we were looking for in the block.
377 */
378
379 en = ndp->dc_entry;
380
381 pdp = dp = (struct dirent *)bp->b_data;
382 edp = bp->b_data + bp->b_validend;
383 enn = 0;
384 while (enn < en && (caddr_t)dp < edp) {
385 pdp = dp;
386 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
387 enn++;
388 }
389
390 /*
391 * If the entry number was bigger than the number of
392 * entries in the block, or the cookie of the previous
393 * entry doesn't match, the directory cache is
394 * stale. Flush it and try again (i.e. go to
395 * the server).
396 */
397 if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
398 (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
399 #ifdef DEBUG
400 printf("invalid cache: %p %p %p off %lx %lx\n",
401 pdp, dp, edp,
402 (unsigned long)uio->uio_offset,
403 (unsigned long)NFS_GETCOOKIE(pdp));
404 #endif
405 brelse(bp);
406 nfs_invaldircache(vp, 0);
407 nfs_vinvalbuf(vp, 0, cred, p, 0);
408 goto diragain;
409 }
410
411 on = (caddr_t)dp - bp->b_data;
412
413 /*
414 * Cache all entries that may be exported to the
415 * user, as they may be thrown back at us. The
416 * NFSBIO_CACHECOOKIES flag indicates that all
417 * entries are being 'exported', so cache them all.
418 */
419
420 if (en == 0 && pdp == dp) {
421 dp = (struct dirent *)
422 ((caddr_t)dp + dp->d_reclen);
423 enn++;
424 }
425
426 if (uio->uio_resid < (bp->b_validend - on)) {
427 n = uio->uio_resid;
428 enough = 1;
429 } else
430 n = bp->b_validend - on;
431
432 ep = bp->b_data + on + n;
433
434 /*
435 * Find last complete entry to copy, caching entries
436 * (if requested) as we go.
437 */
438
439 while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
440 if (cflag & NFSBIO_CACHECOOKIES) {
441 nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
442 ndp->dc_blkcookie, enn, bp->b_lblkno);
443 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
444 NFS_STASHCOOKIE32(pdp,
445 nndp->dc_cookie32);
446 }
447 }
448 pdp = dp;
449 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
450 enn++;
451 }
452
453 /*
454 * If the last requested entry was not the last in the
455 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
456 * cache the cookie of the last requested one, and
457 * set of the offset to it.
458 */
459
460 if ((on + n) < bp->b_validend) {
461 curoff = NFS_GETCOOKIE(pdp);
462 nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
463 enn, bp->b_lblkno);
464 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
465 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
466 curoff = nndp->dc_cookie32;
467 }
468 } else
469 curoff = bp->b_dcookie;
470
471 /*
472 * Always cache the entry for the next block,
473 * so that readaheads can use it.
474 */
475 nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
476 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
477 if (curoff == bp->b_dcookie) {
478 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
479 curoff = nndp->dc_cookie32;
480 }
481 }
482
483 n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
484
485 /*
486 * If not eof and read aheads are enabled, start one.
487 * (You need the current block first, so that you have the
488 * directory offset cookie of the next block.)
489 */
490 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
491 np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
492 rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
493 NFS_DIRBLKSIZ, p);
494 if (rabp) {
495 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
496 rabp->b_dcookie = nndp->dc_cookie;
497 rabp->b_flags |= (B_READ | B_ASYNC);
498 if (nfs_asyncio(rabp, cred)) {
499 rabp->b_flags |= B_INVAL;
500 brelse(rabp);
501 }
502 } else
503 brelse(rabp);
504 }
505 }
506 got_buf = 1;
507 break;
508 default:
509 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
510 break;
511 };
512
513 if (n > 0) {
514 if (!baddr)
515 baddr = bp->b_data;
516 error = uiomove(baddr + on, (int)n, uio);
517 }
518 switch (vp->v_type) {
519 case VREG:
520 break;
521 case VLNK:
522 n = 0;
523 break;
524 case VDIR:
525 if (np->n_flag & NQNFSNONCACHE)
526 bp->b_flags |= B_INVAL;
527 uio->uio_offset = curoff;
528 if (enough)
529 n = 0;
530 break;
531 default:
532 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
533 }
534 if (got_buf)
535 brelse(bp);
536 } while (error == 0 && uio->uio_resid > 0 && n > 0);
537 return (error);
538 }
539
540 /*
541 * Vnode op for write using bio
542 */
543 int
544 nfs_write(v)
545 void *v;
546 {
547 struct vop_write_args /* {
548 struct vnode *a_vp;
549 struct uio *a_uio;
550 int a_ioflag;
551 struct ucred *a_cred;
552 } */ *ap = v;
553 register int biosize;
554 register struct uio *uio = ap->a_uio;
555 struct proc *p = uio->uio_procp;
556 register struct vnode *vp = ap->a_vp;
557 struct nfsnode *np = VTONFS(vp);
558 register struct ucred *cred = ap->a_cred;
559 int ioflag = ap->a_ioflag;
560 struct buf *bp;
561 struct vattr vattr;
562 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
563 daddr_t lbn, bn;
564 int n, on, error = 0, iomode, must_commit;
565
566 #ifdef DIAGNOSTIC
567 if (uio->uio_rw != UIO_WRITE)
568 panic("nfs_write mode");
569 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
570 panic("nfs_write proc");
571 #endif
572 if (vp->v_type != VREG)
573 return (EIO);
574 if (np->n_flag & NWRITEERR) {
575 np->n_flag &= ~NWRITEERR;
576 return (np->n_error);
577 }
578 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
579 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
580 (void)nfs_fsinfo(nmp, vp, cred, p);
581 if (ioflag & (IO_APPEND | IO_SYNC)) {
582 if (np->n_flag & NMODIFIED) {
583 np->n_attrstamp = 0;
584 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
585 if (error)
586 return (error);
587 }
588 if (ioflag & IO_APPEND) {
589 np->n_attrstamp = 0;
590 error = VOP_GETATTR(vp, &vattr, cred, p);
591 if (error)
592 return (error);
593 uio->uio_offset = np->n_size;
594 }
595 }
596 if (uio->uio_offset < 0)
597 return (EINVAL);
598 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
599 return (EFBIG);
600 if (uio->uio_resid == 0)
601 return (0);
602 /*
603 * Maybe this should be above the vnode op call, but so long as
604 * file servers have no limits, i don't think it matters
605 */
606 if (p && uio->uio_offset + uio->uio_resid >
607 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
608 psignal(p, SIGXFSZ);
609 return (EFBIG);
610 }
611 /*
612 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
613 * will be the same size within a filesystem. nfs_writerpc will
614 * still use nm_wsize when sizing the rpc's.
615 */
616 biosize = nmp->nm_rsize;
617 do {
618
619 /*
620 * XXX make sure we aren't cached in the VM page cache
621 */
622 (void)uvm_vnp_uncache(vp);
623
624 /*
625 * Check for a valid write lease.
626 */
627 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
628 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
629 do {
630 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
631 } while (error == NQNFS_EXPIRED);
632 if (error)
633 return (error);
634 if (np->n_lrev != np->n_brev ||
635 (np->n_flag & NQNFSNONCACHE)) {
636 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
637 if (error)
638 return (error);
639 np->n_brev = np->n_lrev;
640 }
641 }
642 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
643 iomode = NFSV3WRITE_FILESYNC;
644 error = nfs_writerpc(vp, uio, cred, &iomode, &must_commit);
645 if (must_commit)
646 nfs_clearcommit(vp->v_mount);
647 return (error);
648 }
649 nfsstats.biocache_writes++;
650 lbn = uio->uio_offset / biosize;
651 on = uio->uio_offset & (biosize-1);
652 n = min((unsigned)(biosize - on), uio->uio_resid);
653 bn = lbn * (biosize / DEV_BSIZE);
654 again:
655 bp = nfs_getcacheblk(vp, bn, biosize, p);
656 if (!bp)
657 return (EINTR);
658 if (bp->b_wcred == NOCRED) {
659 crhold(cred);
660 bp->b_wcred = cred;
661 }
662 np->n_flag |= NMODIFIED;
663 if (uio->uio_offset + n > np->n_size) {
664 np->n_size = uio->uio_offset + n;
665 uvm_vnp_setsize(vp, np->n_size);
666 }
667
668 /*
669 * If the new write will leave a contiguous dirty
670 * area, just update the b_dirtyoff and b_dirtyend,
671 * otherwise force a write rpc of the old dirty area.
672 */
673 if (bp->b_dirtyend > 0 &&
674 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
675 bp->b_proc = p;
676 if (VOP_BWRITE(bp) == EINTR)
677 return (EINTR);
678 goto again;
679 }
680
681 /*
682 * Check for valid write lease and get one as required.
683 * In case getblk() and/or bwrite() delayed us.
684 */
685 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
686 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
687 do {
688 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
689 } while (error == NQNFS_EXPIRED);
690 if (error) {
691 brelse(bp);
692 return (error);
693 }
694 if (np->n_lrev != np->n_brev ||
695 (np->n_flag & NQNFSNONCACHE)) {
696 brelse(bp);
697 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
698 if (error)
699 return (error);
700 np->n_brev = np->n_lrev;
701 goto again;
702 }
703 }
704 error = uiomove((char *)bp->b_data + on, n, uio);
705 if (error) {
706 bp->b_flags |= B_ERROR;
707 brelse(bp);
708 return (error);
709 }
710 if (bp->b_dirtyend > 0) {
711 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
712 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
713 } else {
714 bp->b_dirtyoff = on;
715 bp->b_dirtyend = on + n;
716 }
717 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
718 bp->b_validoff > bp->b_dirtyend) {
719 bp->b_validoff = bp->b_dirtyoff;
720 bp->b_validend = bp->b_dirtyend;
721 } else {
722 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
723 bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
724 }
725
726 /*
727 * Since this block is being modified, it must be written
728 * again and not just committed.
729 */
730 bp->b_flags &= ~B_NEEDCOMMIT;
731
732 /*
733 * If the lease is non-cachable or IO_SYNC do bwrite().
734 */
735 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
736 bp->b_proc = p;
737 error = VOP_BWRITE(bp);
738 if (error)
739 return (error);
740 if (np->n_flag & NQNFSNONCACHE) {
741 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
742 if (error)
743 return (error);
744 }
745 } else if ((n + on) == biosize &&
746 (nmp->nm_flag & NFSMNT_NQNFS) == 0) {
747 bp->b_proc = (struct proc *)0;
748 bp->b_flags |= B_ASYNC;
749 (void)nfs_writebp(bp, 0);
750 } else {
751 bdwrite(bp);
752 }
753 } while (uio->uio_resid > 0 && n > 0);
754 return (0);
755 }
756
757 /*
758 * Get an nfs cache block.
759 * Allocate a new one if the block isn't currently in the cache
760 * and return the block marked busy. If the calling process is
761 * interrupted by a signal for an interruptible mount point, return
762 * NULL.
763 */
764 struct buf *
765 nfs_getcacheblk(vp, bn, size, p)
766 struct vnode *vp;
767 daddr_t bn;
768 int size;
769 struct proc *p;
770 {
771 register struct buf *bp;
772 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
773
774 if (nmp->nm_flag & NFSMNT_INT) {
775 bp = getblk(vp, bn, size, PCATCH, 0);
776 while (bp == (struct buf *)0) {
777 if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
778 return ((struct buf *)0);
779 bp = getblk(vp, bn, size, 0, 2 * hz);
780 }
781 } else
782 bp = getblk(vp, bn, size, 0, 0);
783 return (bp);
784 }
785
786 /*
787 * Flush and invalidate all dirty buffers. If another process is already
788 * doing the flush, just wait for completion.
789 */
790 int
791 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
792 struct vnode *vp;
793 int flags;
794 struct ucred *cred;
795 struct proc *p;
796 int intrflg;
797 {
798 register struct nfsnode *np = VTONFS(vp);
799 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
800 int error = 0, slpflag, slptimeo;
801
802 if ((nmp->nm_flag & NFSMNT_INT) == 0)
803 intrflg = 0;
804 if (intrflg) {
805 slpflag = PCATCH;
806 slptimeo = 2 * hz;
807 } else {
808 slpflag = 0;
809 slptimeo = 0;
810 }
811 /*
812 * First wait for any other process doing a flush to complete.
813 */
814 while (np->n_flag & NFLUSHINPROG) {
815 np->n_flag |= NFLUSHWANT;
816 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
817 slptimeo);
818 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
819 return (EINTR);
820 }
821
822 /*
823 * Now, flush as required.
824 */
825 np->n_flag |= NFLUSHINPROG;
826 error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
827 while (error) {
828 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
829 np->n_flag &= ~NFLUSHINPROG;
830 if (np->n_flag & NFLUSHWANT) {
831 np->n_flag &= ~NFLUSHWANT;
832 wakeup((caddr_t)&np->n_flag);
833 }
834 return (EINTR);
835 }
836 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
837 }
838 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
839 if (np->n_flag & NFLUSHWANT) {
840 np->n_flag &= ~NFLUSHWANT;
841 wakeup((caddr_t)&np->n_flag);
842 }
843 return (0);
844 }
845
846 /*
847 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
848 * This is mainly to avoid queueing async I/O requests when the nfsiods
849 * are all hung on a dead server.
850 */
851 int
852 nfs_asyncio(bp, cred)
853 register struct buf *bp;
854 struct ucred *cred;
855 {
856 register int i;
857 register struct nfsmount *nmp;
858 int gotiod, slpflag = 0, slptimeo = 0, error;
859
860 if (nfs_numasync == 0)
861 return (EIO);
862
863
864 nmp = VFSTONFS(bp->b_vp->v_mount);
865 again:
866 if (nmp->nm_flag & NFSMNT_INT)
867 slpflag = PCATCH;
868 gotiod = FALSE;
869
870 /*
871 * Find a free iod to process this request.
872 */
873
874 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
875 if (nfs_iodwant[i]) {
876 /*
877 * Found one, so wake it up and tell it which
878 * mount to process.
879 */
880 nfs_iodwant[i] = (struct proc *)0;
881 nfs_iodmount[i] = nmp;
882 nmp->nm_bufqiods++;
883 wakeup((caddr_t)&nfs_iodwant[i]);
884 gotiod = TRUE;
885 break;
886 }
887 /*
888 * If none are free, we may already have an iod working on this mount
889 * point. If so, it will process our request.
890 */
891 if (!gotiod && nmp->nm_bufqiods > 0)
892 gotiod = TRUE;
893
894 /*
895 * If we have an iod which can process the request, then queue
896 * the buffer.
897 */
898 if (gotiod) {
899 /*
900 * Ensure that the queue never grows too large.
901 */
902 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
903 nmp->nm_bufqwant = TRUE;
904 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
905 "nfsaio", slptimeo);
906 if (error) {
907 if (nfs_sigintr(nmp, NULL, bp->b_proc))
908 return (EINTR);
909 if (slpflag == PCATCH) {
910 slpflag = 0;
911 slptimeo = 2 * hz;
912 }
913 }
914 /*
915 * We might have lost our iod while sleeping,
916 * so check and loop if nescessary.
917 */
918 if (nmp->nm_bufqiods == 0)
919 goto again;
920 }
921
922 if (bp->b_flags & B_READ) {
923 if (bp->b_rcred == NOCRED && cred != NOCRED) {
924 crhold(cred);
925 bp->b_rcred = cred;
926 }
927 } else {
928 bp->b_flags |= B_WRITEINPROG;
929 if (bp->b_wcred == NOCRED && cred != NOCRED) {
930 crhold(cred);
931 bp->b_wcred = cred;
932 }
933 }
934
935 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
936 nmp->nm_bufqlen++;
937 return (0);
938 }
939
940 /*
941 * All the iods are busy on other mounts, so return EIO to
942 * force the caller to process the i/o synchronously.
943 */
944 return (EIO);
945 }
946
947 /*
948 * Do an I/O operation to/from a cache block. This may be called
949 * synchronously or from an nfsiod.
950 */
951 int
952 nfs_doio(bp, cr, p)
953 register struct buf *bp;
954 struct ucred *cr;
955 struct proc *p;
956 {
957 register struct uio *uiop;
958 register struct vnode *vp;
959 struct nfsnode *np;
960 struct nfsmount *nmp;
961 int error = 0, diff, len, iomode, must_commit = 0, s;
962 struct uio uio;
963 struct iovec io;
964
965 vp = bp->b_vp;
966 np = VTONFS(vp);
967 nmp = VFSTONFS(vp->v_mount);
968 uiop = &uio;
969 uiop->uio_iov = &io;
970 uiop->uio_iovcnt = 1;
971 uiop->uio_segflg = UIO_SYSSPACE;
972 uiop->uio_procp = p;
973
974 /*
975 * Historically, paging was done with physio, but no more...
976 */
977 if (bp->b_flags & B_PHYS) {
978 /*
979 * ...though reading /dev/drum still gets us here.
980 */
981 io.iov_len = uiop->uio_resid = bp->b_bcount;
982 /* mapping was done by vmapbuf() */
983 io.iov_base = bp->b_data;
984 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
985 if (bp->b_flags & B_READ) {
986 uiop->uio_rw = UIO_READ;
987 nfsstats.read_physios++;
988 error = nfs_readrpc(vp, uiop, cr);
989 } else {
990 iomode = NFSV3WRITE_DATASYNC;
991 uiop->uio_rw = UIO_WRITE;
992 nfsstats.write_physios++;
993 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
994 }
995 if (error) {
996 bp->b_flags |= B_ERROR;
997 bp->b_error = error;
998 }
999 } else if (bp->b_flags & B_READ) {
1000 io.iov_len = uiop->uio_resid = bp->b_bcount;
1001 io.iov_base = bp->b_data;
1002 uiop->uio_rw = UIO_READ;
1003 switch (vp->v_type) {
1004 case VREG:
1005 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1006 nfsstats.read_bios++;
1007 error = nfs_readrpc(vp, uiop, cr);
1008 if (!error) {
1009 bp->b_validoff = 0;
1010 if (uiop->uio_resid) {
1011 /*
1012 * If len > 0, there is a hole in the file and
1013 * no writes after the hole have been pushed to
1014 * the server yet.
1015 * Just zero fill the rest of the valid area.
1016 */
1017 diff = bp->b_bcount - uiop->uio_resid;
1018 len = np->n_size - (((u_quad_t)bp->b_blkno) * DEV_BSIZE
1019 + diff);
1020 if (len > 0) {
1021 len = min(len, uiop->uio_resid);
1022 memset((char *)bp->b_data + diff, 0, len);
1023 bp->b_validend = diff + len;
1024 } else
1025 bp->b_validend = diff;
1026 } else
1027 bp->b_validend = bp->b_bcount;
1028 }
1029 if (p && (vp->v_flag & VTEXT) &&
1030 (((nmp->nm_flag & NFSMNT_NQNFS) &&
1031 NQNFS_CKINVALID(vp, np, ND_READ) &&
1032 np->n_lrev != np->n_brev) ||
1033 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
1034 np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
1035 uprintf("Process killed due to text file modification\n");
1036 psignal(p, SIGKILL);
1037 p->p_holdcnt++;
1038 }
1039 break;
1040 case VLNK:
1041 uiop->uio_offset = (off_t)0;
1042 nfsstats.readlink_bios++;
1043 error = nfs_readlinkrpc(vp, uiop, cr);
1044 break;
1045 case VDIR:
1046 nfsstats.readdir_bios++;
1047 uiop->uio_offset = bp->b_dcookie;
1048 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
1049 error = nfs_readdirplusrpc(vp, uiop, cr);
1050 if (error == NFSERR_NOTSUPP)
1051 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1052 }
1053 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1054 error = nfs_readdirrpc(vp, uiop, cr);
1055 if (!error) {
1056 bp->b_dcookie = uiop->uio_offset;
1057 bp->b_validoff = 0;
1058 bp->b_validend = bp->b_bcount - uiop->uio_resid;
1059 }
1060 break;
1061 default:
1062 printf("nfs_doio: type %x unexpected\n",vp->v_type);
1063 break;
1064 };
1065 if (error) {
1066 bp->b_flags |= B_ERROR;
1067 bp->b_error = error;
1068 }
1069 } else {
1070 io.iov_len = uiop->uio_resid = bp->b_dirtyend
1071 - bp->b_dirtyoff;
1072 uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE
1073 + bp->b_dirtyoff;
1074 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1075 uiop->uio_rw = UIO_WRITE;
1076 nfsstats.write_bios++;
1077 if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE)) == B_ASYNC)
1078 iomode = NFSV3WRITE_UNSTABLE;
1079 else
1080 iomode = NFSV3WRITE_FILESYNC;
1081 bp->b_flags |= B_WRITEINPROG;
1082 #ifdef fvdl_debug
1083 printf("nfs_doio(%x): bp %x doff %d dend %d\n",
1084 vp, bp, bp->b_dirtyoff, bp->b_dirtyend);
1085 #endif
1086 error = nfs_writerpc(vp, uiop, cr, &iomode, &must_commit);
1087 if (!error && iomode == NFSV3WRITE_UNSTABLE)
1088 bp->b_flags |= B_NEEDCOMMIT;
1089 else
1090 bp->b_flags &= ~B_NEEDCOMMIT;
1091 bp->b_flags &= ~B_WRITEINPROG;
1092
1093 /*
1094 * For an interrupted write, the buffer is still valid and the
1095 * write hasn't been pushed to the server yet, so we can't set
1096 * B_ERROR and report the interruption by setting B_EINTR. For
1097 * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
1098 * is essentially a noop.
1099 * For the case of a V3 write rpc not being committed to stable
1100 * storage, the block is still dirty and requires either a commit
1101 * rpc or another write rpc with iomode == NFSV3WRITE_FILESYNC
1102 * before the block is reused. This is indicated by setting the
1103 * B_DELWRI and B_NEEDCOMMIT flags.
1104 */
1105 if (error == EINTR || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1106 bp->b_flags |= B_DELWRI;
1107
1108 /*
1109 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the
1110 * buffer to the clean list, we have to reassign it back to the
1111 * dirty one. Ugh.
1112 */
1113 if (bp->b_flags & B_ASYNC) {
1114 s = splbio();
1115 reassignbuf(bp, vp);
1116 splx(s);
1117 } else if (error)
1118 bp->b_flags |= B_EINTR;
1119 } else {
1120 if (error) {
1121 bp->b_flags |= B_ERROR;
1122 bp->b_error = np->n_error = error;
1123 np->n_flag |= NWRITEERR;
1124 }
1125 bp->b_dirtyoff = bp->b_dirtyend = 0;
1126 }
1127 }
1128 bp->b_resid = uiop->uio_resid;
1129 if (must_commit)
1130 nfs_clearcommit(vp->v_mount);
1131 biodone(bp);
1132 return (error);
1133 }
1134