nfs_bio.c revision 1.18 1 /* $NetBSD: nfs_bio.c,v 1.18 1995/01/10 06:50:03 mycroft Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)nfs_bio.c 8.5 (Berkeley) 1/4/94
39 */
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/resourcevar.h>
44 #include <sys/proc.h>
45 #include <sys/buf.h>
46 #include <sys/vnode.h>
47 #include <sys/trace.h>
48 #include <sys/mount.h>
49 #include <sys/kernel.h>
50
51 #include <vm/vm.h>
52
53 #include <nfs/nfsnode.h>
54 #include <nfs/rpcv2.h>
55 #include <nfs/nfsv2.h>
56 #include <nfs/nfs.h>
57 #include <nfs/nfsmount.h>
58 #include <nfs/nqnfs.h>
59
60 struct buf *incore(), *nfs_getcacheblk();
61 extern struct proc *nfs_iodwant[NFS_MAXASYNCDAEMON];
62 extern int nfs_numasync;
63
64 /*
65 * Vnode op for read using bio
66 * Any similarity to readip() is purely coincidental
67 */
68 nfs_bioread(vp, uio, ioflag, cred)
69 register struct vnode *vp;
70 register struct uio *uio;
71 int ioflag;
72 struct ucred *cred;
73 {
74 register struct nfsnode *np = VTONFS(vp);
75 register int biosize, diff;
76 struct buf *bp, *rabp;
77 struct vattr vattr;
78 struct proc *p;
79 struct nfsmount *nmp;
80 daddr_t lbn, bn, rabn;
81 caddr_t baddr;
82 int got_buf, nra, error = 0, n, on, not_readin;
83
84 #ifdef lint
85 ioflag = ioflag;
86 #endif /* lint */
87 #ifdef DIAGNOSTIC
88 if (uio->uio_rw != UIO_READ)
89 panic("nfs_read mode");
90 #endif
91 if (uio->uio_resid == 0)
92 return (0);
93 if (uio->uio_offset < 0 && vp->v_type != VDIR)
94 return (EINVAL);
95 nmp = VFSTONFS(vp->v_mount);
96 biosize = nmp->nm_rsize;
97 p = uio->uio_procp;
98 /*
99 * For nfs, cache consistency can only be maintained approximately.
100 * Although RFC1094 does not specify the criteria, the following is
101 * believed to be compatible with the reference port.
102 * For nqnfs, full cache consistency is maintained within the loop.
103 * For nfs:
104 * If the file's modify time on the server has changed since the
105 * last read rpc or you have written to the file,
106 * you may have lost data cache consistency with the
107 * server, so flush all of the file's data out of the cache.
108 * Then force a getattr rpc to ensure that you have up to date
109 * attributes.
110 * The mount flag NFSMNT_MYWRITE says "Assume that my writes are
111 * the ones changing the modify time.
112 * NB: This implies that cache data can be read when up to
113 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
114 * attributes this could be forced by setting n_attrstamp to 0 before
115 * the VOP_GETATTR() call.
116 */
117 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
118 if (np->n_flag & NMODIFIED) {
119 if ((nmp->nm_flag & NFSMNT_MYWRITE) == 0 ||
120 vp->v_type != VREG) {
121 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
122 return (error);
123 }
124 np->n_attrstamp = 0;
125 np->n_direofoffset = 0;
126 if (error = VOP_GETATTR(vp, &vattr, cred, p))
127 return (error);
128 np->n_mtime = vattr.va_mtime.ts_sec;
129 } else {
130 if (error = VOP_GETATTR(vp, &vattr, cred, p))
131 return (error);
132 if (np->n_mtime != vattr.va_mtime.ts_sec) {
133 np->n_direofoffset = 0;
134 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
135 return (error);
136 np->n_mtime = vattr.va_mtime.ts_sec;
137 }
138 }
139 }
140 do {
141
142 /*
143 * Get a valid lease. If cached data is stale, flush it.
144 */
145 if (nmp->nm_flag & NFSMNT_NQNFS) {
146 if (NQNFS_CKINVALID(vp, np, NQL_READ)) {
147 do {
148 error = nqnfs_getlease(vp, NQL_READ, cred, p);
149 } while (error == NQNFS_EXPIRED);
150 if (error)
151 return (error);
152 if (np->n_lrev != np->n_brev ||
153 (np->n_flag & NQNFSNONCACHE) ||
154 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
155 if (vp->v_type == VDIR) {
156 np->n_direofoffset = 0;
157 cache_purge(vp);
158 }
159 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
160 return (error);
161 np->n_brev = np->n_lrev;
162 }
163 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
164 np->n_direofoffset = 0;
165 cache_purge(vp);
166 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
167 return (error);
168 }
169 }
170 if (np->n_flag & NQNFSNONCACHE) {
171 switch (vp->v_type) {
172 case VREG:
173 error = nfs_readrpc(vp, uio, cred);
174 break;
175 case VLNK:
176 error = nfs_readlinkrpc(vp, uio, cred);
177 break;
178 case VDIR:
179 error = nfs_readdirrpc(vp, uio, cred);
180 break;
181 };
182 return (error);
183 }
184 baddr = (caddr_t)0;
185 switch (vp->v_type) {
186 case VREG:
187 nfsstats.biocache_reads++;
188 lbn = uio->uio_offset / biosize;
189 on = uio->uio_offset & (biosize-1);
190 bn = lbn * (biosize / DEV_BSIZE);
191 not_readin = 1;
192
193 /*
194 * Start the read ahead(s), as required.
195 */
196 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
197 lbn == vp->v_lastr + 1) {
198 for (nra = 0; nra < nmp->nm_readahead &&
199 (lbn + 1 + nra) * biosize < np->n_size; nra++) {
200 rabn = (lbn + 1 + nra) * (biosize / DEV_BSIZE);
201 if (!incore(vp, rabn)) {
202 rabp = nfs_getcacheblk(vp, rabn, biosize, p);
203 if (!rabp)
204 return (EINTR);
205 if ((rabp->b_flags & (B_DELWRI | B_DONE)) == 0) {
206 rabp->b_flags |= (B_READ | B_ASYNC);
207 if (nfs_asyncio(rabp, cred)) {
208 rabp->b_flags |= B_INVAL;
209 brelse(rabp);
210 }
211 }
212 }
213 }
214 }
215
216 /*
217 * If the block is in the cache and has the required data
218 * in a valid region, just copy it out.
219 * Otherwise, get the block and write back/read in,
220 * as required.
221 */
222 if ((bp = incore(vp, bn)) &&
223 (bp->b_flags & (B_BUSY | B_WRITEINPROG)) ==
224 (B_BUSY | B_WRITEINPROG))
225 got_buf = 0;
226 else {
227 again:
228 bp = nfs_getcacheblk(vp, bn, biosize, p);
229 if (!bp)
230 return (EINTR);
231 got_buf = 1;
232 if ((bp->b_flags & (B_DONE | B_DELWRI)) == 0) {
233 bp->b_flags |= B_READ;
234 not_readin = 0;
235 if (error = nfs_doio(bp, cred, p)) {
236 brelse(bp);
237 return (error);
238 }
239 }
240 }
241 n = min((unsigned)(biosize - on), uio->uio_resid);
242 diff = np->n_size - uio->uio_offset;
243 if (diff < n)
244 n = diff;
245 if (not_readin && n > 0) {
246 if (on < bp->b_validoff || (on + n) > bp->b_validend) {
247 if (!got_buf) {
248 bp = nfs_getcacheblk(vp, bn, biosize, p);
249 if (!bp)
250 return (EINTR);
251 got_buf = 1;
252 }
253 bp->b_flags |= B_INVAL;
254 if (bp->b_dirtyend > 0) {
255 if ((bp->b_flags & B_DELWRI) == 0)
256 panic("nfsbioread");
257 if (VOP_BWRITE(bp) == EINTR)
258 return (EINTR);
259 } else
260 brelse(bp);
261 goto again;
262 }
263 }
264 vp->v_lastr = lbn;
265 diff = (on >= bp->b_validend) ? 0 : (bp->b_validend - on);
266 if (diff < n)
267 n = diff;
268 break;
269 case VLNK:
270 nfsstats.biocache_readlinks++;
271 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
272 if (!bp)
273 return (EINTR);
274 if ((bp->b_flags & B_DONE) == 0) {
275 bp->b_flags |= B_READ;
276 if (error = nfs_doio(bp, cred, p)) {
277 brelse(bp);
278 return (error);
279 }
280 }
281 n = min(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
282 got_buf = 1;
283 on = 0;
284 break;
285 case VDIR:
286 if (uio->uio_resid < NFS_DIRBLKSIZ)
287 return (0);
288 nfsstats.biocache_readdirs++;
289 bn = (daddr_t)uio->uio_offset;
290 bp = nfs_getcacheblk(vp, bn, NFS_DIRBLKSIZ, p);
291 if (!bp)
292 return (EINTR);
293 if ((bp->b_flags & B_DONE) == 0) {
294 bp->b_flags |= B_READ;
295 if (error = nfs_doio(bp, cred, p)) {
296 brelse(bp);
297 return (error);
298 }
299 }
300
301 /*
302 * If not eof and read aheads are enabled, start one.
303 * (You need the current block first, so that you have the
304 * directory offset cookie of the next block.
305 */
306 rabn = bp->b_blkno;
307 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
308 rabn != 0 && rabn != np->n_direofoffset &&
309 !incore(vp, rabn)) {
310 rabp = nfs_getcacheblk(vp, rabn, NFS_DIRBLKSIZ, p);
311 if (rabp) {
312 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
313 rabp->b_flags |= (B_READ | B_ASYNC);
314 if (nfs_asyncio(rabp, cred)) {
315 rabp->b_flags |= B_INVAL;
316 brelse(rabp);
317 }
318 }
319 }
320 }
321 on = 0;
322 n = min(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid);
323 got_buf = 1;
324 break;
325 };
326
327 if (n > 0) {
328 if (!baddr)
329 baddr = bp->b_data;
330 error = uiomove(baddr + on, (int)n, uio);
331 }
332 switch (vp->v_type) {
333 case VREG:
334 if (n + on == biosize || uio->uio_offset == np->n_size)
335 bp->b_flags |= B_AGE;
336 break;
337 case VLNK:
338 n = 0;
339 break;
340 case VDIR:
341 uio->uio_offset = bp->b_blkno;
342 break;
343 };
344 if (got_buf)
345 brelse(bp);
346 } while (error == 0 && uio->uio_resid > 0 && n > 0);
347 return (error);
348 }
349
350 /*
351 * Vnode op for write using bio
352 */
353 nfs_write(ap)
354 struct vop_write_args /* {
355 struct vnode *a_vp;
356 struct uio *a_uio;
357 int a_ioflag;
358 struct ucred *a_cred;
359 } */ *ap;
360 {
361 register int biosize;
362 register struct uio *uio = ap->a_uio;
363 struct proc *p = uio->uio_procp;
364 register struct vnode *vp = ap->a_vp;
365 struct nfsnode *np = VTONFS(vp);
366 register struct ucred *cred = ap->a_cred;
367 int ioflag = ap->a_ioflag;
368 struct buf *bp;
369 struct vattr vattr;
370 struct nfsmount *nmp;
371 daddr_t lbn, bn;
372 int n, on, error = 0;
373
374 #ifdef DIAGNOSTIC
375 if (uio->uio_rw != UIO_WRITE)
376 panic("nfs_write mode");
377 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
378 panic("nfs_write proc");
379 #endif
380 if (vp->v_type != VREG)
381 return (EIO);
382 if (np->n_flag & NWRITEERR) {
383 np->n_flag &= ~NWRITEERR;
384 return (np->n_error);
385 }
386 if (ioflag & (IO_APPEND | IO_SYNC)) {
387 if (np->n_flag & NMODIFIED) {
388 np->n_attrstamp = 0;
389 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
390 return (error);
391 }
392 if (ioflag & IO_APPEND) {
393 np->n_attrstamp = 0;
394 if (error = VOP_GETATTR(vp, &vattr, cred, p))
395 return (error);
396 uio->uio_offset = np->n_size;
397 }
398 }
399 nmp = VFSTONFS(vp->v_mount);
400 if (uio->uio_offset < 0)
401 return (EINVAL);
402 if (uio->uio_resid == 0)
403 return (0);
404 /*
405 * Maybe this should be above the vnode op call, but so long as
406 * file servers have no limits, i don't think it matters
407 */
408 if (p && uio->uio_offset + uio->uio_resid >
409 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
410 psignal(p, SIGXFSZ);
411 return (EFBIG);
412 }
413 /*
414 * I use nm_rsize, not nm_wsize so that all buffer cache blocks
415 * will be the same size within a filesystem. nfs_writerpc will
416 * still use nm_wsize when sizing the rpc's.
417 */
418 biosize = nmp->nm_rsize;
419 do {
420
421 /*
422 * XXX make sure we aren't cached in the VM page cache
423 */
424 (void)vnode_pager_uncache(vp);
425
426 /*
427 * Check for a valid write lease.
428 * If non-cachable, just do the rpc
429 */
430 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
431 NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
432 do {
433 error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
434 } while (error == NQNFS_EXPIRED);
435 if (error)
436 return (error);
437 if (np->n_lrev != np->n_brev ||
438 (np->n_flag & NQNFSNONCACHE)) {
439 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
440 return (error);
441 np->n_brev = np->n_lrev;
442 }
443 }
444 if (np->n_flag & NQNFSNONCACHE)
445 return (nfs_writerpc(vp, uio, cred, ioflag));
446 nfsstats.biocache_writes++;
447 lbn = uio->uio_offset / biosize;
448 on = uio->uio_offset & (biosize-1);
449 n = min((unsigned)(biosize - on), uio->uio_resid);
450 bn = lbn * (biosize / DEV_BSIZE);
451 again:
452 bp = nfs_getcacheblk(vp, bn, biosize, p);
453 if (!bp)
454 return (EINTR);
455 if (bp->b_wcred == NOCRED) {
456 crhold(cred);
457 bp->b_wcred = cred;
458 }
459 np->n_flag |= NMODIFIED;
460 if (uio->uio_offset + n > np->n_size) {
461 np->n_size = uio->uio_offset + n;
462 vnode_pager_setsize(vp, (u_long)np->n_size);
463 }
464
465 /*
466 * If the new write will leave a contiguous dirty
467 * area, just update the b_dirtyoff and b_dirtyend,
468 * otherwise force a write rpc of the old dirty area.
469 */
470 if (bp->b_dirtyend > 0 &&
471 (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
472 bp->b_proc = p;
473 if (VOP_BWRITE(bp) == EINTR)
474 return (EINTR);
475 goto again;
476 }
477
478 /*
479 * Check for valid write lease and get one as required.
480 * In case getblk() and/or bwrite() delayed us.
481 */
482 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
483 NQNFS_CKINVALID(vp, np, NQL_WRITE)) {
484 do {
485 error = nqnfs_getlease(vp, NQL_WRITE, cred, p);
486 } while (error == NQNFS_EXPIRED);
487 if (error) {
488 brelse(bp);
489 return (error);
490 }
491 if (np->n_lrev != np->n_brev ||
492 (np->n_flag & NQNFSNONCACHE)) {
493 brelse(bp);
494 if (error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1))
495 return (error);
496 np->n_brev = np->n_lrev;
497 goto again;
498 }
499 }
500 if (error = uiomove((char *)bp->b_data + on, n, uio)) {
501 bp->b_flags |= B_ERROR;
502 brelse(bp);
503 return (error);
504 }
505 if (bp->b_dirtyend > 0) {
506 bp->b_dirtyoff = min(on, bp->b_dirtyoff);
507 bp->b_dirtyend = max((on + n), bp->b_dirtyend);
508 } else {
509 bp->b_dirtyoff = on;
510 bp->b_dirtyend = on + n;
511 }
512 #ifndef notdef
513 if (bp->b_validend == 0 || bp->b_validend < bp->b_dirtyoff ||
514 bp->b_validoff > bp->b_dirtyend) {
515 bp->b_validoff = bp->b_dirtyoff;
516 bp->b_validend = bp->b_dirtyend;
517 } else {
518 bp->b_validoff = min(bp->b_validoff, bp->b_dirtyoff);
519 bp->b_validend = max(bp->b_validend, bp->b_dirtyend);
520 }
521 #else
522 bp->b_validoff = bp->b_dirtyoff;
523 bp->b_validend = bp->b_dirtyend;
524 #endif
525 if (ioflag & IO_APPEND)
526 bp->b_flags |= B_APPENDWRITE;
527
528 /*
529 * If the lease is non-cachable or IO_SYNC do bwrite().
530 */
531 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
532 bp->b_proc = p;
533 if (error = VOP_BWRITE(bp))
534 return (error);
535 } else if ((n + on) == biosize &&
536 (nmp->nm_flag & NFSMNT_NQNFS) == 0) {
537 bp->b_proc = (struct proc *)0;
538 bawrite(bp);
539 } else
540 bdwrite(bp);
541 } while (uio->uio_resid > 0 && n > 0);
542 return (0);
543 }
544
545 /*
546 * Get an nfs cache block.
547 * Allocate a new one if the block isn't currently in the cache
548 * and return the block marked busy. If the calling process is
549 * interrupted by a signal for an interruptible mount point, return
550 * NULL.
551 */
552 struct buf *
553 nfs_getcacheblk(vp, bn, size, p)
554 struct vnode *vp;
555 daddr_t bn;
556 int size;
557 struct proc *p;
558 {
559 register struct buf *bp;
560 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
561
562 if (nmp->nm_flag & NFSMNT_INT) {
563 bp = getblk(vp, bn, size, PCATCH, 0);
564 while (bp == (struct buf *)0) {
565 if (nfs_sigintr(nmp, (struct nfsreq *)0, p))
566 return ((struct buf *)0);
567 bp = getblk(vp, bn, size, 0, 2 * hz);
568 }
569 } else
570 bp = getblk(vp, bn, size, 0, 0);
571 return (bp);
572 }
573
574 /*
575 * Flush and invalidate all dirty buffers. If another process is already
576 * doing the flush, just wait for completion.
577 */
578 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
579 struct vnode *vp;
580 int flags;
581 struct ucred *cred;
582 struct proc *p;
583 int intrflg;
584 {
585 register struct nfsnode *np = VTONFS(vp);
586 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
587 int error = 0, slpflag, slptimeo;
588
589 if ((nmp->nm_flag & NFSMNT_INT) == 0)
590 intrflg = 0;
591 if (intrflg) {
592 slpflag = PCATCH;
593 slptimeo = 2 * hz;
594 } else {
595 slpflag = 0;
596 slptimeo = 0;
597 }
598 /*
599 * First wait for any other process doing a flush to complete.
600 */
601 while (np->n_flag & NFLUSHINPROG) {
602 np->n_flag |= NFLUSHWANT;
603 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
604 slptimeo);
605 if (error && intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p))
606 return (EINTR);
607 }
608
609 /*
610 * Now, flush as required.
611 */
612 np->n_flag |= NFLUSHINPROG;
613 error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
614 while (error) {
615 if (intrflg && nfs_sigintr(nmp, (struct nfsreq *)0, p)) {
616 np->n_flag &= ~NFLUSHINPROG;
617 if (np->n_flag & NFLUSHWANT) {
618 np->n_flag &= ~NFLUSHWANT;
619 wakeup((caddr_t)&np->n_flag);
620 }
621 return (EINTR);
622 }
623 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
624 }
625 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
626 if (np->n_flag & NFLUSHWANT) {
627 np->n_flag &= ~NFLUSHWANT;
628 wakeup((caddr_t)&np->n_flag);
629 }
630 return (0);
631 }
632
633 /*
634 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
635 * This is mainly to avoid queueing async I/O requests when the nfsiods
636 * are all hung on a dead server.
637 */
638 nfs_asyncio(bp, cred)
639 register struct buf *bp;
640 struct ucred *cred;
641 {
642 register int i;
643
644 if (nfs_numasync == 0)
645 return (EIO);
646 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
647 if (nfs_iodwant[i]) {
648 if (bp->b_flags & B_READ) {
649 if (bp->b_rcred == NOCRED && cred != NOCRED) {
650 crhold(cred);
651 bp->b_rcred = cred;
652 }
653 } else {
654 if (bp->b_wcred == NOCRED && cred != NOCRED) {
655 crhold(cred);
656 bp->b_wcred = cred;
657 }
658 }
659
660 TAILQ_INSERT_TAIL(&nfs_bufq, bp, b_freelist);
661 nfs_iodwant[i] = (struct proc *)0;
662 wakeup((caddr_t)&nfs_iodwant[i]);
663 return (0);
664 }
665 return (EIO);
666 }
667
668 /*
669 * Do an I/O operation to/from a cache block. This may be called
670 * synchronously or from an nfsiod.
671 */
672 int
673 nfs_doio(bp, cr, p)
674 register struct buf *bp;
675 struct cred *cr;
676 struct proc *p;
677 {
678 register struct uio *uiop;
679 register struct vnode *vp;
680 struct nfsnode *np;
681 struct nfsmount *nmp;
682 int error, diff, len;
683 struct uio uio;
684 struct iovec io;
685
686 vp = bp->b_vp;
687 np = VTONFS(vp);
688 nmp = VFSTONFS(vp->v_mount);
689 uiop = &uio;
690 uiop->uio_iov = &io;
691 uiop->uio_iovcnt = 1;
692 uiop->uio_segflg = UIO_SYSSPACE;
693 uiop->uio_procp = p;
694
695 /*
696 * Historically, paging was done with physio, but no more...
697 */
698 if (bp->b_flags & B_PHYS) {
699 /*
700 * ...though reading /dev/drum still gets us here.
701 */
702 io.iov_len = uiop->uio_resid = bp->b_bcount;
703 /* mapping was done by vmapbuf() */
704 io.iov_base = bp->b_data;
705 uiop->uio_offset = bp->b_blkno * DEV_BSIZE;
706 if (bp->b_flags & B_READ) {
707 uiop->uio_rw = UIO_READ;
708 nfsstats.read_physios++;
709 error = nfs_readrpc(vp, uiop, cr);
710 } else {
711 uiop->uio_rw = UIO_WRITE;
712 nfsstats.write_physios++;
713 error = nfs_writerpc(vp, uiop, cr);
714 }
715 if (error) {
716 bp->b_flags |= B_ERROR;
717 bp->b_error = error;
718 }
719 } else if (bp->b_flags & B_READ) {
720 io.iov_len = uiop->uio_resid = bp->b_bcount;
721 io.iov_base = bp->b_data;
722 uiop->uio_rw = UIO_READ;
723 switch (vp->v_type) {
724 case VREG:
725 uiop->uio_offset = bp->b_blkno * DEV_BSIZE;
726 nfsstats.read_bios++;
727 error = nfs_readrpc(vp, uiop, cr);
728 if (!error) {
729 bp->b_validoff = 0;
730 if (uiop->uio_resid) {
731 /*
732 * If len > 0, there is a hole in the file and
733 * no writes after the hole have been pushed to
734 * the server yet.
735 * Just zero fill the rest of the valid area.
736 */
737 diff = bp->b_bcount - uiop->uio_resid;
738 len = np->n_size - (bp->b_blkno * DEV_BSIZE
739 + diff);
740 if (len > 0) {
741 len = min(len, uiop->uio_resid);
742 bzero((char *)bp->b_data + diff, len);
743 bp->b_validend = diff + len;
744 } else
745 bp->b_validend = diff;
746 } else
747 bp->b_validend = bp->b_bcount;
748 }
749 if (p && (vp->v_flag & VTEXT) &&
750 (((nmp->nm_flag & NFSMNT_NQNFS) &&
751 NQNFS_CKINVALID(vp, np, NQL_READ) &&
752 np->n_lrev != np->n_brev) ||
753 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
754 np->n_mtime != np->n_vattr.va_mtime.ts_sec))) {
755 uprintf("Process killed due to text file modification\n");
756 psignal(p, SIGKILL);
757 p->p_holdcnt++;
758 }
759 break;
760 case VLNK:
761 uiop->uio_offset = 0;
762 nfsstats.readlink_bios++;
763 error = nfs_readlinkrpc(vp, uiop, cr);
764 break;
765 case VDIR:
766 uiop->uio_offset = bp->b_lblkno;
767 nfsstats.readdir_bios++;
768 if (VFSTONFS(vp->v_mount)->nm_flag & NFSMNT_NQNFS)
769 error = nfs_readdirlookrpc(vp, uiop, cr);
770 else
771 error = nfs_readdirrpc(vp, uiop, cr);
772 /*
773 * Save offset cookie in b_blkno.
774 */
775 bp->b_blkno = uiop->uio_offset;
776 break;
777 };
778 if (error) {
779 bp->b_flags |= B_ERROR;
780 bp->b_error = error;
781 }
782 } else {
783 io.iov_len = uiop->uio_resid = bp->b_dirtyend
784 - bp->b_dirtyoff;
785 uiop->uio_offset = (bp->b_blkno * DEV_BSIZE)
786 + bp->b_dirtyoff;
787 io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
788 uiop->uio_rw = UIO_WRITE;
789 nfsstats.write_bios++;
790 if (bp->b_flags & B_APPENDWRITE)
791 error = nfs_writerpc(vp, uiop, cr, IO_APPEND);
792 else
793 error = nfs_writerpc(vp, uiop, cr, 0);
794 bp->b_flags &= ~(B_WRITEINPROG | B_APPENDWRITE);
795
796 /*
797 * For an interrupted write, the buffer is still valid and the
798 * write hasn't been pushed to the server yet, so we can't set
799 * B_ERROR and report the interruption by setting B_EINTR. For
800 * the B_ASYNC case, B_EINTR is not relevant, so the rpc attempt
801 * is essentially a noop.
802 */
803 if (error == EINTR) {
804 bp->b_flags &= ~B_INVAL;
805 bp->b_flags |= B_DELWRI;
806
807 /*
808 * Since for the B_ASYNC case, nfs_bwrite() has reassigned the
809 * buffer to the clean list, we have to reassign it back to the
810 * dirty one. Ugh.
811 */
812 if (bp->b_flags & B_ASYNC)
813 reassignbuf(bp, vp);
814 else
815 bp->b_flags |= B_EINTR;
816 } else {
817 if (error) {
818 bp->b_flags |= B_ERROR;
819 bp->b_error = np->n_error = error;
820 np->n_flag |= NWRITEERR;
821 }
822 bp->b_dirtyoff = bp->b_dirtyend = 0;
823 }
824 }
825 bp->b_resid = uiop->uio_resid;
826 biodone(bp);
827 return (error);
828 }
829