nfs_bio.c revision 1.151.2.4 1 /* $NetBSD: nfs_bio.c,v 1.151.2.4 2007/05/13 17:36:38 ad Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.151.2.4 2007/05/13 17:36:38 ad Exp $");
39
40 #include "opt_nfs.h"
41 #include "opt_ddb.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/proc.h>
48 #include <sys/buf.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/namei.h>
53 #include <sys/dirent.h>
54 #include <sys/malloc.h>
55 #include <sys/kauth.h>
56
57 #include <uvm/uvm_extern.h>
58 #include <uvm/uvm.h>
59
60 #include <nfs/rpcv2.h>
61 #include <nfs/nfsproto.h>
62 #include <nfs/nfs.h>
63 #include <nfs/nfsmount.h>
64 #include <nfs/nfsnode.h>
65 #include <nfs/nfs_var.h>
66
67 extern int nfs_numasync;
68 extern int nfs_commitsize;
69 extern struct nfsstats nfsstats;
70
71 static int nfs_doio_read __P((struct buf *, struct uio *));
72 static int nfs_doio_write __P((struct buf *, struct uio *));
73 static int nfs_doio_phys __P((struct buf *, struct uio *));
74
75 /*
76 * Vnode op for read using bio
77 * Any similarity to readip() is purely coincidental
78 */
79 int
80 nfs_bioread(vp, uio, ioflag, cred, cflag)
81 struct vnode *vp;
82 struct uio *uio;
83 int ioflag, cflag;
84 kauth_cred_t cred;
85 {
86 struct nfsnode *np = VTONFS(vp);
87 struct buf *bp = NULL, *rabp;
88 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
89 struct nfsdircache *ndp = NULL, *nndp = NULL;
90 void *baddr;
91 int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
92 int enough = 0;
93 struct dirent *dp, *pdp, *edp, *ep;
94 off_t curoff = 0;
95 int advice;
96 struct lwp *l = curlwp;
97
98 #ifdef DIAGNOSTIC
99 if (uio->uio_rw != UIO_READ)
100 panic("nfs_read mode");
101 #endif
102 if (uio->uio_resid == 0)
103 return (0);
104 if (vp->v_type != VDIR && uio->uio_offset < 0)
105 return (EINVAL);
106 #ifndef NFS_V2_ONLY
107 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
108 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
109 (void)nfs_fsinfo(nmp, vp, cred, l);
110 #endif
111 if (vp->v_type != VDIR &&
112 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
113 return (EFBIG);
114
115 /*
116 * For nfs, cache consistency can only be maintained approximately.
117 * Although RFC1094 does not specify the criteria, the following is
118 * believed to be compatible with the reference port.
119 *
120 * If the file's modify time on the server has changed since the
121 * last read rpc or you have written to the file,
122 * you may have lost data cache consistency with the
123 * server, so flush all of the file's data out of the cache.
124 * Then force a getattr rpc to ensure that you have up to date
125 * attributes.
126 * NB: This implies that cache data can be read when up to
127 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
128 * attributes this could be forced by setting n_attrstamp to 0 before
129 * the VOP_GETATTR() call.
130 */
131
132 if (vp->v_type != VLNK) {
133 error = nfs_flushstalebuf(vp, cred, l,
134 NFS_FLUSHSTALEBUF_MYWRITE);
135 if (error)
136 return error;
137 }
138
139 do {
140 /*
141 * Don't cache symlinks.
142 */
143 if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
144 return (nfs_readlinkrpc(vp, uio, cred));
145 }
146 baddr = (void *)0;
147 switch (vp->v_type) {
148 case VREG:
149 nfsstats.biocache_reads++;
150
151 advice = IO_ADV_DECODE(ioflag);
152 error = 0;
153 while (uio->uio_resid > 0) {
154 void *win;
155 int flags;
156 vsize_t bytelen;
157
158 nfs_delayedtruncate(vp);
159 if (np->n_size <= uio->uio_offset) {
160 break;
161 }
162 bytelen =
163 MIN(np->n_size - uio->uio_offset, uio->uio_resid);
164 win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
165 &bytelen, advice, UBC_READ);
166 error = uiomove(win, bytelen, uio);
167 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
168 ubc_release(win, flags);
169 if (error) {
170 /*
171 * XXXkludge
172 * the file has been truncated on the server.
173 * there isn't much we can do.
174 */
175 if (uio->uio_offset >= np->n_size) {
176 /* end of file */
177 error = 0;
178 } else {
179 break;
180 }
181 }
182 }
183 break;
184
185 case VLNK:
186 nfsstats.biocache_readlinks++;
187 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, l);
188 if (!bp)
189 return (EINTR);
190 if ((bp->b_flags & B_DONE) == 0) {
191 bp->b_flags |= B_READ;
192 error = nfs_doio(bp);
193 if (error) {
194 brelse(bp, 0);
195 return (error);
196 }
197 }
198 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
199 got_buf = 1;
200 on = 0;
201 break;
202 case VDIR:
203 diragain:
204 nfsstats.biocache_readdirs++;
205 ndp = nfs_searchdircache(vp, uio->uio_offset,
206 (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
207 if (!ndp) {
208 /*
209 * We've been handed a cookie that is not
210 * in the cache. If we're not translating
211 * 32 <-> 64, it may be a value that was
212 * flushed out of the cache because it grew
213 * too big. Let the server judge if it's
214 * valid or not. In the translation case,
215 * we have no way of validating this value,
216 * so punt.
217 */
218 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
219 return (EINVAL);
220 ndp = nfs_enterdircache(vp, uio->uio_offset,
221 uio->uio_offset, 0, 0);
222 }
223
224 if (NFS_EOFVALID(np) &&
225 ndp->dc_cookie == np->n_direofoffset) {
226 nfs_putdircache(np, ndp);
227 nfsstats.direofcache_hits++;
228 return (0);
229 }
230
231 bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
232 if (!bp)
233 return (EINTR);
234 if ((bp->b_flags & B_DONE) == 0) {
235 bp->b_flags |= B_READ;
236 bp->b_dcookie = ndp->dc_blkcookie;
237 error = nfs_doio(bp);
238 if (error) {
239 /*
240 * Yuck! The directory has been modified on the
241 * server. Punt and let the userland code
242 * deal with it.
243 */
244 nfs_putdircache(np, ndp);
245 brelse(bp, 0);
246 /*
247 * nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
248 */
249 if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
250 nfs_invaldircache(vp, 0);
251 nfs_vinvalbuf(vp, 0, cred, l, 1);
252 }
253 return (error);
254 }
255 }
256
257 /*
258 * Just return if we hit EOF right away with this
259 * block. Always check here, because direofoffset
260 * may have been set by an nfsiod since the last
261 * check.
262 *
263 * also, empty block implies EOF.
264 */
265
266 if (bp->b_bcount == bp->b_resid ||
267 (NFS_EOFVALID(np) &&
268 ndp->dc_blkcookie == np->n_direofoffset)) {
269 KASSERT(bp->b_bcount != bp->b_resid ||
270 ndp->dc_blkcookie == bp->b_dcookie);
271 nfs_putdircache(np, ndp);
272 brelse(bp, B_NOCACHE);
273 return 0;
274 }
275
276 /*
277 * Find the entry we were looking for in the block.
278 */
279
280 en = ndp->dc_entry;
281
282 pdp = dp = (struct dirent *)bp->b_data;
283 edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
284 bp->b_resid);
285 enn = 0;
286 while (enn < en && dp < edp) {
287 pdp = dp;
288 dp = _DIRENT_NEXT(dp);
289 enn++;
290 }
291
292 /*
293 * If the entry number was bigger than the number of
294 * entries in the block, or the cookie of the previous
295 * entry doesn't match, the directory cache is
296 * stale. Flush it and try again (i.e. go to
297 * the server).
298 */
299 if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
300 (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
301 #ifdef DEBUG
302 printf("invalid cache: %p %p %p off %lx %lx\n",
303 pdp, dp, edp,
304 (unsigned long)uio->uio_offset,
305 (unsigned long)NFS_GETCOOKIE(pdp));
306 #endif
307 nfs_putdircache(np, ndp);
308 brelse(bp, 0);
309 nfs_invaldircache(vp, 0);
310 nfs_vinvalbuf(vp, 0, cred, l, 0);
311 goto diragain;
312 }
313
314 on = (char *)dp - (char *)bp->b_data;
315
316 /*
317 * Cache all entries that may be exported to the
318 * user, as they may be thrown back at us. The
319 * NFSBIO_CACHECOOKIES flag indicates that all
320 * entries are being 'exported', so cache them all.
321 */
322
323 if (en == 0 && pdp == dp) {
324 dp = _DIRENT_NEXT(dp);
325 enn++;
326 }
327
328 if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
329 n = uio->uio_resid;
330 enough = 1;
331 } else
332 n = bp->b_bcount - bp->b_resid - on;
333
334 ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
335
336 /*
337 * Find last complete entry to copy, caching entries
338 * (if requested) as we go.
339 */
340
341 while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
342 if (cflag & NFSBIO_CACHECOOKIES) {
343 nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
344 ndp->dc_blkcookie, enn, bp->b_lblkno);
345 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
346 NFS_STASHCOOKIE32(pdp,
347 nndp->dc_cookie32);
348 }
349 nfs_putdircache(np, nndp);
350 }
351 pdp = dp;
352 dp = _DIRENT_NEXT(dp);
353 enn++;
354 }
355 nfs_putdircache(np, ndp);
356
357 /*
358 * If the last requested entry was not the last in the
359 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
360 * cache the cookie of the last requested one, and
361 * set of the offset to it.
362 */
363
364 if ((on + n) < bp->b_bcount - bp->b_resid) {
365 curoff = NFS_GETCOOKIE(pdp);
366 nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
367 enn, bp->b_lblkno);
368 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
369 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
370 curoff = nndp->dc_cookie32;
371 }
372 nfs_putdircache(np, nndp);
373 } else
374 curoff = bp->b_dcookie;
375
376 /*
377 * Always cache the entry for the next block,
378 * so that readaheads can use it.
379 */
380 nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
381 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
382 if (curoff == bp->b_dcookie) {
383 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
384 curoff = nndp->dc_cookie32;
385 }
386 }
387
388 n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
389
390 /*
391 * If not eof and read aheads are enabled, start one.
392 * (You need the current block first, so that you have the
393 * directory offset cookie of the next block.)
394 */
395 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
396 !NFS_EOFVALID(np)) {
397 rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
398 NFS_DIRBLKSIZ, l);
399 if (rabp) {
400 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
401 rabp->b_dcookie = nndp->dc_cookie;
402 rabp->b_flags |= (B_READ | B_ASYNC);
403 if (nfs_asyncio(rabp)) {
404 brelse(rabp, B_INVAL);
405 }
406 } else
407 brelse(rabp, 0);
408 }
409 }
410 nfs_putdircache(np, nndp);
411 got_buf = 1;
412 break;
413 default:
414 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
415 break;
416 }
417
418 if (n > 0) {
419 if (!baddr)
420 baddr = bp->b_data;
421 error = uiomove((char *)baddr + on, (int)n, uio);
422 }
423 switch (vp->v_type) {
424 case VREG:
425 break;
426 case VLNK:
427 n = 0;
428 break;
429 case VDIR:
430 uio->uio_offset = curoff;
431 if (enough)
432 n = 0;
433 break;
434 default:
435 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
436 }
437 if (got_buf)
438 brelse(bp, 0);
439 } while (error == 0 && uio->uio_resid > 0 && n > 0);
440 return (error);
441 }
442
443 /*
444 * Vnode op for write using bio
445 */
446 int
447 nfs_write(v)
448 void *v;
449 {
450 struct vop_write_args /* {
451 struct vnode *a_vp;
452 struct uio *a_uio;
453 int a_ioflag;
454 kauth_cred_t a_cred;
455 } */ *ap = v;
456 struct uio *uio = ap->a_uio;
457 struct lwp *l = curlwp;
458 struct vnode *vp = ap->a_vp;
459 struct nfsnode *np = VTONFS(vp);
460 kauth_cred_t cred = ap->a_cred;
461 struct vattr vattr;
462 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
463 void *win;
464 voff_t oldoff, origoff;
465 vsize_t bytelen;
466 int flags, error = 0;
467 int ioflag = ap->a_ioflag;
468 int extended = 0, wrotedata = 0;
469
470 #ifdef DIAGNOSTIC
471 if (uio->uio_rw != UIO_WRITE)
472 panic("nfs_write mode");
473 #endif
474 if (vp->v_type != VREG)
475 return (EIO);
476 if (np->n_flag & NWRITEERR) {
477 np->n_flag &= ~NWRITEERR;
478 return (np->n_error);
479 }
480 #ifndef NFS_V2_ONLY
481 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
482 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
483 (void)nfs_fsinfo(nmp, vp, cred, l);
484 #endif
485 if (ioflag & (IO_APPEND | IO_SYNC)) {
486 if (np->n_flag & NMODIFIED) {
487 NFS_INVALIDATE_ATTRCACHE(np);
488 error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
489 if (error)
490 return (error);
491 }
492 if (ioflag & IO_APPEND) {
493 NFS_INVALIDATE_ATTRCACHE(np);
494 error = VOP_GETATTR(vp, &vattr, cred, l);
495 if (error)
496 return (error);
497 uio->uio_offset = np->n_size;
498 }
499 }
500 if (uio->uio_offset < 0)
501 return (EINVAL);
502 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
503 return (EFBIG);
504 if (uio->uio_resid == 0)
505 return (0);
506 /*
507 * Maybe this should be above the vnode op call, but so long as
508 * file servers have no limits, i don't think it matters
509 */
510 if (l && l->l_proc && uio->uio_offset + uio->uio_resid >
511 l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
512 psignal(l->l_proc, SIGXFSZ);
513 return (EFBIG);
514 }
515
516 origoff = uio->uio_offset;
517 do {
518 bool extending; /* if we are extending whole pages */
519 u_quad_t oldsize;
520 oldoff = uio->uio_offset;
521 bytelen = uio->uio_resid;
522
523 nfsstats.biocache_writes++;
524
525 oldsize = np->n_size;
526 np->n_flag |= NMODIFIED;
527 if (np->n_size < uio->uio_offset + bytelen) {
528 np->n_size = uio->uio_offset + bytelen;
529 }
530 extending = ((uio->uio_offset & PAGE_MASK) == 0 &&
531 (bytelen & PAGE_MASK) == 0 &&
532 uio->uio_offset >= vp->v_size);
533 win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
534 UVM_ADV_NORMAL,
535 UBC_WRITE | (extending ? UBC_FAULTBUSY : 0));
536 error = uiomove(win, bytelen, uio);
537 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
538 ubc_release(win, flags);
539 if (error) {
540 if (extending) {
541 /*
542 * backout size and free pages past eof.
543 */
544 np->n_size = oldsize;
545 mutex_enter(&vp->v_interlock);
546 (void)VOP_PUTPAGES(vp, round_page(vp->v_size),
547 0, PGO_SYNCIO | PGO_FREE);
548 }
549 break;
550 }
551 wrotedata = 1;
552
553 /*
554 * update UVM's notion of the size now that we've
555 * copied the data into the vnode's pages.
556 */
557
558 if (vp->v_size < uio->uio_offset) {
559 uvm_vnp_setsize(vp, uio->uio_offset);
560 extended = 1;
561 }
562
563 if ((oldoff & ~(nmp->nm_wsize - 1)) !=
564 (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
565 mutex_enter(&vp->v_interlock);
566 error = VOP_PUTPAGES(vp,
567 trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
568 round_page((uio->uio_offset + nmp->nm_wsize - 1) &
569 ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
570 }
571 } while (uio->uio_resid > 0);
572 if (wrotedata)
573 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
574 if (ioflag & IO_SYNC) {
575 mutex_enter(&vp->v_interlock);
576 error = VOP_PUTPAGES(vp,
577 trunc_page(origoff & ~(nmp->nm_wsize - 1)),
578 round_page((uio->uio_offset + nmp->nm_wsize - 1) &
579 ~(nmp->nm_wsize - 1)),
580 PGO_CLEANIT | PGO_SYNCIO);
581 }
582 return error;
583 }
584
585 /*
586 * Get an nfs cache block.
587 * Allocate a new one if the block isn't currently in the cache
588 * and return the block marked busy. If the calling process is
589 * interrupted by a signal for an interruptible mount point, return
590 * NULL.
591 */
592 struct buf *
593 nfs_getcacheblk(vp, bn, size, l)
594 struct vnode *vp;
595 daddr_t bn;
596 int size;
597 struct lwp *l;
598 {
599 struct buf *bp;
600 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
601
602 if (nmp->nm_flag & NFSMNT_INT) {
603 bp = getblk(vp, bn, size, PCATCH, 0);
604 while (bp == NULL) {
605 if (nfs_sigintr(nmp, NULL, l))
606 return (NULL);
607 bp = getblk(vp, bn, size, 0, 2 * hz);
608 }
609 } else
610 bp = getblk(vp, bn, size, 0, 0);
611 return (bp);
612 }
613
614 /*
615 * Flush and invalidate all dirty buffers. If another process is already
616 * doing the flush, just wait for completion.
617 */
618 int
619 nfs_vinvalbuf(vp, flags, cred, l, intrflg)
620 struct vnode *vp;
621 int flags;
622 kauth_cred_t cred;
623 struct lwp *l;
624 int intrflg;
625 {
626 struct nfsnode *np = VTONFS(vp);
627 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
628 int error = 0, catch, slptimeo;
629
630 if ((nmp->nm_flag & NFSMNT_INT) == 0)
631 intrflg = 0;
632 if (intrflg) {
633 catch = true;
634 slptimeo = 2 * hz;
635 } else {
636 catch = false;
637 slptimeo = 0;
638 }
639 /*
640 * First wait for any other process doing a flush to complete.
641 */
642 mutex_enter(&vp->v_interlock);
643 while (np->n_flag & NFLUSHINPROG) {
644 np->n_flag |= NFLUSHWANT;
645 error = mtsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
646 slptimeo, &vp->v_interlock);
647 if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
648 mutex_exit(&vp->v_interlock);
649 return EINTR;
650 }
651 }
652
653 /*
654 * Now, flush as required.
655 */
656 np->n_flag |= NFLUSHINPROG;
657 mutex_exit(&vp->v_interlock);
658 error = vinvalbuf(vp, flags, cred, l, catch, 0);
659 while (error) {
660 if (intrflg && nfs_sigintr(nmp, NULL, l)) {
661 error = EINTR;
662 break;
663 }
664 error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
665 }
666 mutex_enter(&vp->v_interlock);
667 if (error == 0)
668 np->n_flag &= ~NMODIFIED;
669 np->n_flag &= ~NFLUSHINPROG;
670 if (np->n_flag & NFLUSHWANT) {
671 np->n_flag &= ~NFLUSHWANT;
672 wakeup(&np->n_flag);
673 }
674 mutex_exit(&vp->v_interlock);
675 return error;
676 }
677
678 /*
679 * nfs_flushstalebuf: flush cache if it's stale.
680 *
681 * => caller shouldn't own any pages or buffers which belong to the vnode.
682 */
683
684 int
685 nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
686 int flags)
687 {
688 struct nfsnode *np = VTONFS(vp);
689 struct vattr vattr;
690 int error;
691
692 if (np->n_flag & NMODIFIED) {
693 if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
694 || vp->v_type != VREG) {
695 error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
696 if (error)
697 return error;
698 if (vp->v_type == VDIR) {
699 nfs_invaldircache(vp, 0);
700 }
701 } else {
702 /*
703 * XXX assuming writes are ours.
704 */
705 }
706 NFS_INVALIDATE_ATTRCACHE(np);
707 error = VOP_GETATTR(vp, &vattr, cred, l);
708 if (error)
709 return error;
710 np->n_mtime = vattr.va_mtime;
711 } else {
712 error = VOP_GETATTR(vp, &vattr, cred, l);
713 if (error)
714 return error;
715 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
716 if (vp->v_type == VDIR) {
717 nfs_invaldircache(vp, 0);
718 }
719 error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
720 if (error)
721 return error;
722 np->n_mtime = vattr.va_mtime;
723 }
724 }
725
726 return error;
727 }
728
729 /*
730 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
731 * This is mainly to avoid queueing async I/O requests when the nfsiods
732 * are all hung on a dead server.
733 */
734
735 int
736 nfs_asyncio(bp)
737 struct buf *bp;
738 {
739 int i;
740 struct nfsmount *nmp;
741 int gotiod, slpflag = 0, slptimeo = 0, error;
742
743 if (nfs_numasync == 0)
744 return (EIO);
745
746 nmp = VFSTONFS(bp->b_vp->v_mount);
747 again:
748 if (nmp->nm_flag & NFSMNT_INT)
749 slpflag = PCATCH;
750 gotiod = false;
751
752 /*
753 * Find a free iod to process this request.
754 */
755
756 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
757 struct nfs_iod *iod = &nfs_asyncdaemon[i];
758
759 mutex_enter(&iod->nid_lock);
760 if (iod->nid_want) {
761 /*
762 * Found one, so wake it up and tell it which
763 * mount to process.
764 */
765 iod->nid_want = NULL;
766 iod->nid_mount = nmp;
767 wakeup(&iod->nid_want);
768 mutex_enter(&nmp->nm_lock);
769 mutex_exit(&iod->nid_lock);
770 nmp->nm_bufqiods++;
771 gotiod = true;
772 break;
773 }
774 mutex_exit(&iod->nid_lock);
775 }
776
777 /*
778 * If none are free, we may already have an iod working on this mount
779 * point. If so, it will process our request.
780 */
781
782 if (!gotiod) {
783 mutex_enter(&nmp->nm_lock);
784 if (nmp->nm_bufqiods > 0)
785 gotiod = true;
786 }
787
788 KASSERT(mutex_owned(&nmp->nm_lock));
789
790 /*
791 * If we have an iod which can process the request, then queue
792 * the buffer. However, even if we have an iod, do not initiate
793 * queue cleaning if curproc is the pageout daemon. if the NFS mount
794 * is via local loopback, we may put curproc (pagedaemon) to sleep
795 * waiting for the writes to complete. But the server (ourself)
796 * may block the write, waiting for its (ie., our) pagedaemon
797 * to produce clean pages to handle the write: deadlock.
798 * XXX: start non-loopback mounts straight away? If "lots free",
799 * let pagedaemon start loopback writes anyway?
800 */
801 if (gotiod) {
802
803 /*
804 * Ensure that the queue never grows too large.
805 */
806 if (curlwp == uvm.pagedaemon_lwp) {
807 /* Enque for later, to avoid free-page deadlock */
808 (void) 0;
809 } else while (nmp->nm_bufqlen >= 2*nfs_numasync) {
810 nmp->nm_bufqwant = true;
811 error = mtsleep(&nmp->nm_bufq,
812 slpflag | PRIBIO | PNORELOCK,
813 "nfsaio", slptimeo, &nmp->nm_lock);
814 if (error) {
815 if (nfs_sigintr(nmp, NULL, curlwp))
816 return (EINTR);
817 if (slpflag == PCATCH) {
818 slpflag = 0;
819 slptimeo = 2 * hz;
820 }
821 }
822
823 /*
824 * We might have lost our iod while sleeping,
825 * so check and loop if nescessary.
826 */
827
828 if (nmp->nm_bufqiods == 0)
829 goto again;
830
831 mutex_enter(&nmp->nm_lock);
832 }
833 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
834 nmp->nm_bufqlen++;
835 mutex_exit(&nmp->nm_lock);
836 return (0);
837 }
838 mutex_exit(&nmp->nm_lock);
839
840 /*
841 * All the iods are busy on other mounts, so return EIO to
842 * force the caller to process the i/o synchronously.
843 */
844
845 return (EIO);
846 }
847
848 /*
849 * nfs_doio for read.
850 */
851 static int
852 nfs_doio_read(bp, uiop)
853 struct buf *bp;
854 struct uio *uiop;
855 {
856 struct vnode *vp = bp->b_vp;
857 struct nfsnode *np = VTONFS(vp);
858 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
859 int error = 0;
860
861 uiop->uio_rw = UIO_READ;
862 switch (vp->v_type) {
863 case VREG:
864 nfsstats.read_bios++;
865 error = nfs_readrpc(vp, uiop);
866 if (!error && uiop->uio_resid) {
867 int diff, len;
868
869 /*
870 * If uio_resid > 0, there is a hole in the file and
871 * no writes after the hole have been pushed to
872 * the server yet or the file has been truncated
873 * on the server.
874 * Just zero fill the rest of the valid area.
875 */
876
877 KASSERT(vp->v_size >=
878 uiop->uio_offset + uiop->uio_resid);
879 diff = bp->b_bcount - uiop->uio_resid;
880 len = uiop->uio_resid;
881 memset((char *)bp->b_data + diff, 0, len);
882 uiop->uio_resid = 0;
883 }
884 #if 0
885 if (uiop->uio_lwp && (vp->v_flag & VTEXT) &&
886 timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
887 killproc(uiop->uio_lwp->l_proc, "process text file was modified");
888 #if 0 /* XXX NJWLWP */
889 uiop->uio_lwp->l_proc->p_holdcnt++;
890 #endif
891 }
892 #endif
893 break;
894 case VLNK:
895 KASSERT(uiop->uio_offset == (off_t)0);
896 nfsstats.readlink_bios++;
897 error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
898 break;
899 case VDIR:
900 nfsstats.readdir_bios++;
901 uiop->uio_offset = bp->b_dcookie;
902 #ifndef NFS_V2_ONLY
903 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
904 error = nfs_readdirplusrpc(vp, uiop,
905 curlwp->l_cred);
906 /*
907 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
908 */
909 if (error == ENOTSUP)
910 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
911 }
912 #else
913 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
914 #endif
915 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
916 error = nfs_readdirrpc(vp, uiop,
917 curlwp->l_cred);
918 if (!error) {
919 bp->b_dcookie = uiop->uio_offset;
920 }
921 break;
922 default:
923 printf("nfs_doio: type %x unexpected\n", vp->v_type);
924 break;
925 }
926 if (error) {
927 bp->b_flags |= B_ERROR;
928 bp->b_error = error;
929 }
930 return error;
931 }
932
933 /*
934 * nfs_doio for write.
935 */
936 static int
937 nfs_doio_write(bp, uiop)
938 struct buf *bp;
939 struct uio *uiop;
940 {
941 struct vnode *vp = bp->b_vp;
942 struct nfsnode *np = VTONFS(vp);
943 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
944 int iomode;
945 bool stalewriteverf = false;
946 int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
947 struct vm_page *pgs[npages];
948 #ifndef NFS_V2_ONLY
949 bool needcommit = true; /* need only COMMIT RPC */
950 #else
951 bool needcommit = false; /* need only COMMIT RPC */
952 #endif
953 bool pageprotected;
954 struct uvm_object *uobj = &vp->v_uobj;
955 int error;
956 off_t off, cnt;
957
958 if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
959 iomode = NFSV3WRITE_UNSTABLE;
960 } else {
961 iomode = NFSV3WRITE_FILESYNC;
962 }
963
964 #ifndef NFS_V2_ONLY
965 again:
966 #endif
967 rw_enter(&nmp->nm_writeverflock, RW_READER);
968
969 for (i = 0; i < npages; i++) {
970 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
971 if (pgs[i]->uobject == uobj &&
972 pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
973 KASSERT(pgs[i]->flags & PG_BUSY);
974 /*
975 * this page belongs to our object.
976 */
977 mutex_enter(&uobj->vmobjlock);
978 /*
979 * write out the page stably if it's about to
980 * be released because we can't resend it
981 * on the server crash.
982 *
983 * XXX assuming PG_RELEASE|PG_PAGEOUT won't be
984 * changed until unbusy the page.
985 */
986 if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
987 iomode = NFSV3WRITE_FILESYNC;
988 /*
989 * if we met a page which hasn't been sent yet,
990 * we need do WRITE RPC.
991 */
992 if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
993 needcommit = false;
994 mutex_exit(&uobj->vmobjlock);
995 } else {
996 iomode = NFSV3WRITE_FILESYNC;
997 needcommit = false;
998 }
999 }
1000 if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
1001 mutex_enter(&uobj->vmobjlock);
1002 for (i = 0; i < npages; i++) {
1003 pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
1004 pmap_page_protect(pgs[i], VM_PROT_READ);
1005 }
1006 mutex_exit(&uobj->vmobjlock);
1007 pageprotected = true; /* pages can't be modified during i/o. */
1008 } else
1009 pageprotected = false;
1010
1011 /*
1012 * Send the data to the server if necessary,
1013 * otherwise just send a commit rpc.
1014 */
1015 #ifndef NFS_V2_ONLY
1016 if (needcommit) {
1017
1018 /*
1019 * If the buffer is in the range that we already committed,
1020 * there's nothing to do.
1021 *
1022 * If it's in the range that we need to commit, push the
1023 * whole range at once, otherwise only push the buffer.
1024 * In both these cases, acquire the commit lock to avoid
1025 * other processes modifying the range.
1026 */
1027
1028 off = uiop->uio_offset;
1029 cnt = bp->b_bcount;
1030 mutex_enter(&np->n_commitlock);
1031 if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
1032 bool pushedrange;
1033 if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
1034 pushedrange = true;
1035 off = np->n_pushlo;
1036 cnt = np->n_pushhi - np->n_pushlo;
1037 } else {
1038 pushedrange = false;
1039 }
1040 error = nfs_commit(vp, off, cnt, curlwp);
1041 if (error == 0) {
1042 if (pushedrange) {
1043 nfs_merge_commit_ranges(vp);
1044 } else {
1045 nfs_add_committed_range(vp, off, cnt);
1046 }
1047 }
1048 } else {
1049 error = 0;
1050 }
1051 mutex_exit(&np->n_commitlock);
1052 rw_exit(&nmp->nm_writeverflock);
1053 if (!error) {
1054 /*
1055 * pages are now on stable storage.
1056 */
1057 uiop->uio_resid = 0;
1058 mutex_enter(&uobj->vmobjlock);
1059 for (i = 0; i < npages; i++) {
1060 pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1061 }
1062 mutex_exit(&uobj->vmobjlock);
1063 return 0;
1064 } else if (error == NFSERR_STALEWRITEVERF) {
1065 nfs_clearcommit(vp->v_mount);
1066 goto again;
1067 }
1068 if (error) {
1069 bp->b_flags |= B_ERROR;
1070 bp->b_error = np->n_error = error;
1071 np->n_flag |= NWRITEERR;
1072 }
1073 return error;
1074 }
1075 #endif
1076 off = uiop->uio_offset;
1077 cnt = bp->b_bcount;
1078 uiop->uio_rw = UIO_WRITE;
1079 nfsstats.write_bios++;
1080 error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
1081 #ifndef NFS_V2_ONLY
1082 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1083 /*
1084 * we need to commit pages later.
1085 */
1086 mutex_enter(&np->n_commitlock);
1087 nfs_add_tobecommitted_range(vp, off, cnt);
1088 /*
1089 * if there can be too many uncommitted pages, commit them now.
1090 */
1091 if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
1092 off = np->n_pushlo;
1093 cnt = nfs_commitsize >> 1;
1094 error = nfs_commit(vp, off, cnt, curlwp);
1095 if (!error) {
1096 nfs_add_committed_range(vp, off, cnt);
1097 nfs_del_tobecommitted_range(vp, off, cnt);
1098 }
1099 if (error == NFSERR_STALEWRITEVERF) {
1100 stalewriteverf = true;
1101 error = 0; /* it isn't a real error */
1102 }
1103 } else {
1104 /*
1105 * re-dirty pages so that they will be passed
1106 * to us later again.
1107 */
1108 mutex_enter(&uobj->vmobjlock);
1109 for (i = 0; i < npages; i++) {
1110 pgs[i]->flags &= ~PG_CLEAN;
1111 }
1112 mutex_exit(&uobj->vmobjlock);
1113 }
1114 mutex_exit(&np->n_commitlock);
1115 } else
1116 #endif
1117 if (!error) {
1118 /*
1119 * pages are now on stable storage.
1120 */
1121 mutex_enter(&np->n_commitlock);
1122 nfs_del_committed_range(vp, off, cnt);
1123 mutex_exit(&np->n_commitlock);
1124 mutex_enter(&uobj->vmobjlock);
1125 for (i = 0; i < npages; i++) {
1126 pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1127 }
1128 mutex_exit(&uobj->vmobjlock);
1129 } else {
1130 /*
1131 * we got an error.
1132 */
1133 bp->b_flags |= B_ERROR;
1134 bp->b_error = np->n_error = error;
1135 np->n_flag |= NWRITEERR;
1136 }
1137
1138 rw_exit(&nmp->nm_writeverflock);
1139
1140 if (stalewriteverf) {
1141 nfs_clearcommit(vp->v_mount);
1142 }
1143 return error;
1144 }
1145
1146 /*
1147 * nfs_doio for B_PHYS.
1148 */
1149 static int
1150 nfs_doio_phys(bp, uiop)
1151 struct buf *bp;
1152 struct uio *uiop;
1153 {
1154 struct vnode *vp = bp->b_vp;
1155 int error;
1156
1157 uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
1158 if (bp->b_flags & B_READ) {
1159 uiop->uio_rw = UIO_READ;
1160 nfsstats.read_physios++;
1161 error = nfs_readrpc(vp, uiop);
1162 } else {
1163 int iomode = NFSV3WRITE_DATASYNC;
1164 bool stalewriteverf;
1165 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1166
1167 uiop->uio_rw = UIO_WRITE;
1168 nfsstats.write_physios++;
1169 rw_enter(&nmp->nm_writeverflock, RW_READER);
1170 error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
1171 rw_exit(&nmp->nm_writeverflock);
1172 if (stalewriteverf) {
1173 nfs_clearcommit(bp->b_vp->v_mount);
1174 }
1175 }
1176 if (error) {
1177 bp->b_flags |= B_ERROR;
1178 bp->b_error = error;
1179 }
1180 return error;
1181 }
1182
1183 /*
1184 * Do an I/O operation to/from a cache block. This may be called
1185 * synchronously or from an nfsiod.
1186 */
1187 int
1188 nfs_doio(bp)
1189 struct buf *bp;
1190 {
1191 int error;
1192 struct uio uio;
1193 struct uio *uiop = &uio;
1194 struct iovec io;
1195 UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
1196
1197 uiop->uio_iov = &io;
1198 uiop->uio_iovcnt = 1;
1199 uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
1200 UIO_SETUP_SYSSPACE(uiop);
1201 io.iov_base = bp->b_data;
1202 io.iov_len = uiop->uio_resid = bp->b_bcount;
1203
1204 /*
1205 * Historically, paging was done with physio, but no more...
1206 */
1207 if (bp->b_flags & B_PHYS) {
1208 /*
1209 * ...though reading /dev/drum still gets us here.
1210 */
1211 error = nfs_doio_phys(bp, uiop);
1212 } else if (bp->b_flags & B_READ) {
1213 error = nfs_doio_read(bp, uiop);
1214 } else {
1215 error = nfs_doio_write(bp, uiop);
1216 }
1217 biodone(bp, error, uiop->uio_resid);
1218 return (error);
1219 }
1220
1221 /*
1222 * Vnode op for VM getpages.
1223 */
1224
1225 int
1226 nfs_getpages(v)
1227 void *v;
1228 {
1229 struct vop_getpages_args /* {
1230 struct vnode *a_vp;
1231 voff_t a_offset;
1232 struct vm_page **a_m;
1233 int *a_count;
1234 int a_centeridx;
1235 vm_prot_t a_access_type;
1236 int a_advice;
1237 int a_flags;
1238 } */ *ap = v;
1239
1240 struct vnode *vp = ap->a_vp;
1241 struct uvm_object *uobj = &vp->v_uobj;
1242 struct nfsnode *np = VTONFS(vp);
1243 const int npages = *ap->a_count;
1244 struct vm_page *pg, **pgs, *opgs[npages];
1245 off_t origoffset, len;
1246 int i, error;
1247 bool v3 = NFS_ISV3(vp);
1248 bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1249 bool locked = (ap->a_flags & PGO_LOCKED) != 0;
1250
1251 /*
1252 * call the genfs code to get the pages. `pgs' may be NULL
1253 * when doing read-ahead.
1254 */
1255
1256 pgs = ap->a_m;
1257 if (write && locked && v3) {
1258 KASSERT(pgs != NULL);
1259 #ifdef DEBUG
1260
1261 /*
1262 * If PGO_LOCKED is set, real pages shouldn't exists
1263 * in the array.
1264 */
1265
1266 for (i = 0; i < npages; i++)
1267 KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
1268 #endif
1269 memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
1270 }
1271 error = genfs_getpages(v);
1272 if (error) {
1273 return (error);
1274 }
1275
1276 /*
1277 * for read faults where the nfs node is not yet marked NMODIFIED,
1278 * set PG_RDONLY on the pages so that we come back here if someone
1279 * tries to modify later via the mapping that will be entered for
1280 * this fault.
1281 */
1282
1283 if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
1284 if (!locked) {
1285 mutex_enter(&uobj->vmobjlock);
1286 }
1287 for (i = 0; i < npages; i++) {
1288 pg = pgs[i];
1289 if (pg == NULL || pg == PGO_DONTCARE) {
1290 continue;
1291 }
1292 pg->flags |= PG_RDONLY;
1293 }
1294 if (!locked) {
1295 mutex_exit(&uobj->vmobjlock);
1296 }
1297 }
1298 if (!write) {
1299 return (0);
1300 }
1301
1302 /*
1303 * this is a write fault, update the commit info.
1304 */
1305
1306 origoffset = ap->a_offset;
1307 len = npages << PAGE_SHIFT;
1308
1309 if (v3) {
1310 if (!locked) {
1311 mutex_enter(&np->n_commitlock);
1312 } else {
1313 if (!mutex_tryenter(&np->n_commitlock)) {
1314
1315 /*
1316 * Since PGO_LOCKED is set, we need to unbusy
1317 * all pages fetched by genfs_getpages() above,
1318 * tell the caller that there are no pages
1319 * available and put back original pgs array.
1320 */
1321
1322 mutex_enter(&uvm_pageqlock);
1323 uvm_page_unbusy(pgs, npages);
1324 mutex_exit(&uvm_pageqlock);
1325 *ap->a_count = 0;
1326 memcpy(pgs, opgs,
1327 npages * sizeof(struct vm_pages *));
1328 return EBUSY;
1329 }
1330 }
1331 nfs_del_committed_range(vp, origoffset, len);
1332 nfs_del_tobecommitted_range(vp, origoffset, len);
1333 }
1334 np->n_flag |= NMODIFIED;
1335 if (!locked) {
1336 mutex_enter(&uobj->vmobjlock);
1337 }
1338 for (i = 0; i < npages; i++) {
1339 pg = pgs[i];
1340 if (pg == NULL || pg == PGO_DONTCARE) {
1341 continue;
1342 }
1343 pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1344 }
1345 if (!locked) {
1346 mutex_exit(&uobj->vmobjlock);
1347 }
1348 if (v3) {
1349 mutex_exit(&np->n_commitlock);
1350 }
1351 return (0);
1352 }
1353