nfs_bio.c revision 1.147.2.1 1 /* $NetBSD: nfs_bio.c,v 1.147.2.1 2007/02/28 09:35:39 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. Neither the name of the University nor the names of its contributors
19 * may be used to endorse or promote products derived from this software
20 * without specific prior written permission.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 *
34 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
35 */
36
37 #include <sys/cdefs.h>
38 __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.147.2.1 2007/02/28 09:35:39 yamt Exp $");
39
40 #include "opt_nfs.h"
41 #include "opt_ddb.h"
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/resourcevar.h>
46 #include <sys/signalvar.h>
47 #include <sys/proc.h>
48 #include <sys/buf.h>
49 #include <sys/vnode.h>
50 #include <sys/mount.h>
51 #include <sys/kernel.h>
52 #include <sys/namei.h>
53 #include <sys/dirent.h>
54 #include <sys/malloc.h>
55 #include <sys/kauth.h>
56
57 #include <uvm/uvm_extern.h>
58 #include <uvm/uvm.h>
59
60 #include <nfs/rpcv2.h>
61 #include <nfs/nfsproto.h>
62 #include <nfs/nfs.h>
63 #include <nfs/nfsmount.h>
64 #include <nfs/nfsnode.h>
65 #include <nfs/nfs_var.h>
66
67 extern int nfs_numasync;
68 extern int nfs_commitsize;
69 extern struct nfsstats nfsstats;
70
71 static int nfs_doio_read __P((struct buf *, struct uio *));
72 static int nfs_doio_write __P((struct buf *, struct uio *));
73 static int nfs_doio_phys __P((struct buf *, struct uio *));
74
75 /*
76 * Vnode op for read using bio
77 * Any similarity to readip() is purely coincidental
78 */
79 int
80 nfs_bioread(vp, uio, ioflag, cred, cflag)
81 struct vnode *vp;
82 struct uio *uio;
83 int ioflag, cflag;
84 kauth_cred_t cred;
85 {
86 struct nfsnode *np = VTONFS(vp);
87 struct buf *bp = NULL, *rabp;
88 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
89 struct nfsdircache *ndp = NULL, *nndp = NULL;
90 caddr_t baddr;
91 int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
92 int enough = 0;
93 struct dirent *dp, *pdp, *edp, *ep;
94 off_t curoff = 0;
95 int advice;
96 struct lwp *l = curlwp;
97
98 #ifdef DIAGNOSTIC
99 if (uio->uio_rw != UIO_READ)
100 panic("nfs_read mode");
101 #endif
102 if (uio->uio_resid == 0)
103 return (0);
104 if (vp->v_type != VDIR && uio->uio_offset < 0)
105 return (EINVAL);
106 #ifndef NFS_V2_ONLY
107 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
108 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
109 (void)nfs_fsinfo(nmp, vp, cred, l);
110 #endif
111 if (vp->v_type != VDIR &&
112 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
113 return (EFBIG);
114
115 /*
116 * For nfs, cache consistency can only be maintained approximately.
117 * Although RFC1094 does not specify the criteria, the following is
118 * believed to be compatible with the reference port.
119 *
120 * If the file's modify time on the server has changed since the
121 * last read rpc or you have written to the file,
122 * you may have lost data cache consistency with the
123 * server, so flush all of the file's data out of the cache.
124 * Then force a getattr rpc to ensure that you have up to date
125 * attributes.
126 * NB: This implies that cache data can be read when up to
127 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
128 * attributes this could be forced by setting n_attrstamp to 0 before
129 * the VOP_GETATTR() call.
130 */
131
132 if (vp->v_type != VLNK) {
133 error = nfs_flushstalebuf(vp, cred, l,
134 NFS_FLUSHSTALEBUF_MYWRITE);
135 if (error)
136 return error;
137 }
138
139 do {
140 /*
141 * Don't cache symlinks.
142 */
143 if ((vp->v_flag & VROOT) && vp->v_type == VLNK) {
144 return (nfs_readlinkrpc(vp, uio, cred));
145 }
146 baddr = (caddr_t)0;
147 switch (vp->v_type) {
148 case VREG:
149 nfsstats.biocache_reads++;
150
151 advice = IO_ADV_DECODE(ioflag);
152 error = 0;
153 while (uio->uio_resid > 0) {
154 void *win;
155 int flags;
156 vsize_t bytelen;
157
158 nfs_delayedtruncate(vp);
159 if (np->n_size <= uio->uio_offset) {
160 break;
161 }
162 bytelen =
163 MIN(np->n_size - uio->uio_offset, uio->uio_resid);
164 win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
165 &bytelen, advice, UBC_READ);
166 error = uiomove(win, bytelen, uio);
167 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
168 ubc_release(win, flags);
169 if (error) {
170 /*
171 * XXXkludge
172 * the file has been truncated on the server.
173 * there isn't much we can do.
174 */
175 if (uio->uio_offset >= np->n_size) {
176 /* end of file */
177 error = 0;
178 } else {
179 break;
180 }
181 }
182 }
183 break;
184
185 case VLNK:
186 nfsstats.biocache_readlinks++;
187 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, l);
188 if (!bp)
189 return (EINTR);
190 if ((bp->b_flags & B_DONE) == 0) {
191 bp->b_flags |= B_READ;
192 error = nfs_doio(bp);
193 if (error) {
194 brelse(bp);
195 return (error);
196 }
197 }
198 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
199 got_buf = 1;
200 on = 0;
201 break;
202 case VDIR:
203 diragain:
204 nfsstats.biocache_readdirs++;
205 ndp = nfs_searchdircache(vp, uio->uio_offset,
206 (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
207 if (!ndp) {
208 /*
209 * We've been handed a cookie that is not
210 * in the cache. If we're not translating
211 * 32 <-> 64, it may be a value that was
212 * flushed out of the cache because it grew
213 * too big. Let the server judge if it's
214 * valid or not. In the translation case,
215 * we have no way of validating this value,
216 * so punt.
217 */
218 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
219 return (EINVAL);
220 ndp = nfs_enterdircache(vp, uio->uio_offset,
221 uio->uio_offset, 0, 0);
222 }
223
224 if (NFS_EOFVALID(np) &&
225 ndp->dc_cookie == np->n_direofoffset) {
226 nfs_putdircache(np, ndp);
227 nfsstats.direofcache_hits++;
228 return (0);
229 }
230
231 bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
232 if (!bp)
233 return (EINTR);
234 if ((bp->b_flags & B_DONE) == 0) {
235 bp->b_flags |= B_READ;
236 bp->b_dcookie = ndp->dc_blkcookie;
237 error = nfs_doio(bp);
238 if (error) {
239 /*
240 * Yuck! The directory has been modified on the
241 * server. Punt and let the userland code
242 * deal with it.
243 */
244 nfs_putdircache(np, ndp);
245 brelse(bp);
246 /*
247 * nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
248 */
249 if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
250 nfs_invaldircache(vp, 0);
251 nfs_vinvalbuf(vp, 0, cred, l, 1);
252 }
253 return (error);
254 }
255 }
256
257 /*
258 * Just return if we hit EOF right away with this
259 * block. Always check here, because direofoffset
260 * may have been set by an nfsiod since the last
261 * check.
262 *
263 * also, empty block implies EOF.
264 */
265
266 if (bp->b_bcount == bp->b_resid ||
267 (NFS_EOFVALID(np) &&
268 ndp->dc_blkcookie == np->n_direofoffset)) {
269 KASSERT(bp->b_bcount != bp->b_resid ||
270 ndp->dc_blkcookie == bp->b_dcookie);
271 nfs_putdircache(np, ndp);
272 bp->b_flags |= B_NOCACHE;
273 brelse(bp);
274 return 0;
275 }
276
277 /*
278 * Find the entry we were looking for in the block.
279 */
280
281 en = ndp->dc_entry;
282
283 pdp = dp = (struct dirent *)bp->b_data;
284 edp = (struct dirent *)(void *)(bp->b_data + bp->b_bcount -
285 bp->b_resid);
286 enn = 0;
287 while (enn < en && dp < edp) {
288 pdp = dp;
289 dp = _DIRENT_NEXT(dp);
290 enn++;
291 }
292
293 /*
294 * If the entry number was bigger than the number of
295 * entries in the block, or the cookie of the previous
296 * entry doesn't match, the directory cache is
297 * stale. Flush it and try again (i.e. go to
298 * the server).
299 */
300 if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
301 (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
302 #ifdef DEBUG
303 printf("invalid cache: %p %p %p off %lx %lx\n",
304 pdp, dp, edp,
305 (unsigned long)uio->uio_offset,
306 (unsigned long)NFS_GETCOOKIE(pdp));
307 #endif
308 nfs_putdircache(np, ndp);
309 brelse(bp);
310 nfs_invaldircache(vp, 0);
311 nfs_vinvalbuf(vp, 0, cred, l, 0);
312 goto diragain;
313 }
314
315 on = (caddr_t)dp - bp->b_data;
316
317 /*
318 * Cache all entries that may be exported to the
319 * user, as they may be thrown back at us. The
320 * NFSBIO_CACHECOOKIES flag indicates that all
321 * entries are being 'exported', so cache them all.
322 */
323
324 if (en == 0 && pdp == dp) {
325 dp = _DIRENT_NEXT(dp);
326 enn++;
327 }
328
329 if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
330 n = uio->uio_resid;
331 enough = 1;
332 } else
333 n = bp->b_bcount - bp->b_resid - on;
334
335 ep = (struct dirent *)(void *)(bp->b_data + on + n);
336
337 /*
338 * Find last complete entry to copy, caching entries
339 * (if requested) as we go.
340 */
341
342 while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
343 if (cflag & NFSBIO_CACHECOOKIES) {
344 nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
345 ndp->dc_blkcookie, enn, bp->b_lblkno);
346 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
347 NFS_STASHCOOKIE32(pdp,
348 nndp->dc_cookie32);
349 }
350 nfs_putdircache(np, nndp);
351 }
352 pdp = dp;
353 dp = _DIRENT_NEXT(dp);
354 enn++;
355 }
356 nfs_putdircache(np, ndp);
357
358 /*
359 * If the last requested entry was not the last in the
360 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
361 * cache the cookie of the last requested one, and
362 * set of the offset to it.
363 */
364
365 if ((on + n) < bp->b_bcount - bp->b_resid) {
366 curoff = NFS_GETCOOKIE(pdp);
367 nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
368 enn, bp->b_lblkno);
369 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
370 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
371 curoff = nndp->dc_cookie32;
372 }
373 nfs_putdircache(np, nndp);
374 } else
375 curoff = bp->b_dcookie;
376
377 /*
378 * Always cache the entry for the next block,
379 * so that readaheads can use it.
380 */
381 nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
382 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
383 if (curoff == bp->b_dcookie) {
384 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
385 curoff = nndp->dc_cookie32;
386 }
387 }
388
389 n = (char *)_DIRENT_NEXT(pdp) - (bp->b_data + on);
390
391 /*
392 * If not eof and read aheads are enabled, start one.
393 * (You need the current block first, so that you have the
394 * directory offset cookie of the next block.)
395 */
396 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
397 !NFS_EOFVALID(np)) {
398 rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
399 NFS_DIRBLKSIZ, l);
400 if (rabp) {
401 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
402 rabp->b_dcookie = nndp->dc_cookie;
403 rabp->b_flags |= (B_READ | B_ASYNC);
404 if (nfs_asyncio(rabp)) {
405 rabp->b_flags |= B_INVAL;
406 brelse(rabp);
407 }
408 } else
409 brelse(rabp);
410 }
411 }
412 nfs_putdircache(np, nndp);
413 got_buf = 1;
414 break;
415 default:
416 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
417 break;
418 }
419
420 if (n > 0) {
421 if (!baddr)
422 baddr = bp->b_data;
423 error = uiomove(baddr + on, (int)n, uio);
424 }
425 switch (vp->v_type) {
426 case VREG:
427 break;
428 case VLNK:
429 n = 0;
430 break;
431 case VDIR:
432 uio->uio_offset = curoff;
433 if (enough)
434 n = 0;
435 break;
436 default:
437 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
438 }
439 if (got_buf)
440 brelse(bp);
441 } while (error == 0 && uio->uio_resid > 0 && n > 0);
442 return (error);
443 }
444
445 /*
446 * Vnode op for write using bio
447 */
448 int
449 nfs_write(v)
450 void *v;
451 {
452 struct vop_write_args /* {
453 struct vnode *a_vp;
454 struct uio *a_uio;
455 int a_ioflag;
456 kauth_cred_t a_cred;
457 } */ *ap = v;
458 struct uio *uio = ap->a_uio;
459 struct lwp *l = curlwp;
460 struct vnode *vp = ap->a_vp;
461 struct nfsnode *np = VTONFS(vp);
462 kauth_cred_t cred = ap->a_cred;
463 struct vattr vattr;
464 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
465 void *win;
466 voff_t oldoff, origoff;
467 vsize_t bytelen;
468 int flags, error = 0;
469 int ioflag = ap->a_ioflag;
470 int extended = 0, wrotedata = 0;
471
472 #ifdef DIAGNOSTIC
473 if (uio->uio_rw != UIO_WRITE)
474 panic("nfs_write mode");
475 #endif
476 if (vp->v_type != VREG)
477 return (EIO);
478 if (np->n_flag & NWRITEERR) {
479 np->n_flag &= ~NWRITEERR;
480 return (np->n_error);
481 }
482 #ifndef NFS_V2_ONLY
483 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
484 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
485 (void)nfs_fsinfo(nmp, vp, cred, l);
486 #endif
487 if (ioflag & (IO_APPEND | IO_SYNC)) {
488 if (np->n_flag & NMODIFIED) {
489 NFS_INVALIDATE_ATTRCACHE(np);
490 error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
491 if (error)
492 return (error);
493 }
494 if (ioflag & IO_APPEND) {
495 NFS_INVALIDATE_ATTRCACHE(np);
496 error = VOP_GETATTR(vp, &vattr, cred, l);
497 if (error)
498 return (error);
499 uio->uio_offset = np->n_size;
500 }
501 }
502 if (uio->uio_offset < 0)
503 return (EINVAL);
504 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
505 return (EFBIG);
506 if (uio->uio_resid == 0)
507 return (0);
508 /*
509 * Maybe this should be above the vnode op call, but so long as
510 * file servers have no limits, i don't think it matters
511 */
512 if (l && l->l_proc && uio->uio_offset + uio->uio_resid >
513 l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
514 psignal(l->l_proc, SIGXFSZ);
515 return (EFBIG);
516 }
517
518 origoff = uio->uio_offset;
519 do {
520 bool extending; /* if we are extending whole pages */
521 u_quad_t oldsize;
522 oldoff = uio->uio_offset;
523 bytelen = uio->uio_resid;
524
525 nfsstats.biocache_writes++;
526
527 oldsize = np->n_size;
528 np->n_flag |= NMODIFIED;
529 if (np->n_size < uio->uio_offset + bytelen) {
530 np->n_size = uio->uio_offset + bytelen;
531 }
532 extending = ((uio->uio_offset & PAGE_MASK) == 0 &&
533 (bytelen & PAGE_MASK) == 0 &&
534 uio->uio_offset >= vp->v_size);
535 win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
536 UVM_ADV_NORMAL,
537 UBC_WRITE | (extending ? UBC_FAULTBUSY : 0));
538 error = uiomove(win, bytelen, uio);
539 flags = UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0;
540 ubc_release(win, flags);
541 if (error) {
542 if (extending) {
543 /*
544 * backout size and free pages past eof.
545 */
546 np->n_size = oldsize;
547 simple_lock(&vp->v_interlock);
548 (void)VOP_PUTPAGES(vp, round_page(vp->v_size),
549 0, PGO_SYNCIO | PGO_FREE);
550 }
551 break;
552 }
553 wrotedata = 1;
554
555 /*
556 * update UVM's notion of the size now that we've
557 * copied the data into the vnode's pages.
558 */
559
560 if (vp->v_size < uio->uio_offset) {
561 uvm_vnp_setsize(vp, uio->uio_offset);
562 extended = 1;
563 }
564
565 if ((oldoff & ~(nmp->nm_wsize - 1)) !=
566 (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
567 simple_lock(&vp->v_interlock);
568 error = VOP_PUTPAGES(vp,
569 trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
570 round_page((uio->uio_offset + nmp->nm_wsize - 1) &
571 ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
572 }
573 } while (uio->uio_resid > 0);
574 if (wrotedata)
575 VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
576 if (ioflag & IO_SYNC) {
577 simple_lock(&vp->v_interlock);
578 error = VOP_PUTPAGES(vp,
579 trunc_page(origoff & ~(nmp->nm_wsize - 1)),
580 round_page((uio->uio_offset + nmp->nm_wsize - 1) &
581 ~(nmp->nm_wsize - 1)),
582 PGO_CLEANIT | PGO_SYNCIO);
583 }
584 return error;
585 }
586
587 /*
588 * Get an nfs cache block.
589 * Allocate a new one if the block isn't currently in the cache
590 * and return the block marked busy. If the calling process is
591 * interrupted by a signal for an interruptible mount point, return
592 * NULL.
593 */
594 struct buf *
595 nfs_getcacheblk(vp, bn, size, l)
596 struct vnode *vp;
597 daddr_t bn;
598 int size;
599 struct lwp *l;
600 {
601 struct buf *bp;
602 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
603
604 if (nmp->nm_flag & NFSMNT_INT) {
605 bp = getblk(vp, bn, size, PCATCH, 0);
606 while (bp == NULL) {
607 if (nfs_sigintr(nmp, NULL, l))
608 return (NULL);
609 bp = getblk(vp, bn, size, 0, 2 * hz);
610 }
611 } else
612 bp = getblk(vp, bn, size, 0, 0);
613 return (bp);
614 }
615
616 /*
617 * Flush and invalidate all dirty buffers. If another process is already
618 * doing the flush, just wait for completion.
619 */
620 int
621 nfs_vinvalbuf(vp, flags, cred, l, intrflg)
622 struct vnode *vp;
623 int flags;
624 kauth_cred_t cred;
625 struct lwp *l;
626 int intrflg;
627 {
628 struct nfsnode *np = VTONFS(vp);
629 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
630 int error = 0, slpflag, slptimeo;
631
632 if ((nmp->nm_flag & NFSMNT_INT) == 0)
633 intrflg = 0;
634 if (intrflg) {
635 slpflag = PCATCH;
636 slptimeo = 2 * hz;
637 } else {
638 slpflag = 0;
639 slptimeo = 0;
640 }
641 /*
642 * First wait for any other process doing a flush to complete.
643 */
644 simple_lock(&vp->v_interlock);
645 while (np->n_flag & NFLUSHINPROG) {
646 np->n_flag |= NFLUSHWANT;
647 error = ltsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
648 slptimeo, &vp->v_interlock);
649 if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
650 simple_unlock(&vp->v_interlock);
651 return EINTR;
652 }
653 }
654
655 /*
656 * Now, flush as required.
657 */
658 np->n_flag |= NFLUSHINPROG;
659 simple_unlock(&vp->v_interlock);
660 error = vinvalbuf(vp, flags, cred, l, slpflag, 0);
661 while (error) {
662 if (intrflg && nfs_sigintr(nmp, NULL, l)) {
663 error = EINTR;
664 break;
665 }
666 error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
667 }
668 simple_lock(&vp->v_interlock);
669 if (error == 0)
670 np->n_flag &= ~NMODIFIED;
671 np->n_flag &= ~NFLUSHINPROG;
672 if (np->n_flag & NFLUSHWANT) {
673 np->n_flag &= ~NFLUSHWANT;
674 wakeup(&np->n_flag);
675 }
676 simple_unlock(&vp->v_interlock);
677 return error;
678 }
679
680 /*
681 * nfs_flushstalebuf: flush cache if it's stale.
682 *
683 * => caller shouldn't own any pages or buffers which belong to the vnode.
684 */
685
686 int
687 nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
688 int flags)
689 {
690 struct nfsnode *np = VTONFS(vp);
691 struct vattr vattr;
692 int error;
693
694 if (np->n_flag & NMODIFIED) {
695 if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
696 || vp->v_type != VREG) {
697 error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
698 if (error)
699 return error;
700 if (vp->v_type == VDIR) {
701 nfs_invaldircache(vp, 0);
702 }
703 } else {
704 /*
705 * XXX assuming writes are ours.
706 */
707 }
708 NFS_INVALIDATE_ATTRCACHE(np);
709 error = VOP_GETATTR(vp, &vattr, cred, l);
710 if (error)
711 return error;
712 np->n_mtime = vattr.va_mtime;
713 } else {
714 error = VOP_GETATTR(vp, &vattr, cred, l);
715 if (error)
716 return error;
717 if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
718 if (vp->v_type == VDIR) {
719 nfs_invaldircache(vp, 0);
720 }
721 error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
722 if (error)
723 return error;
724 np->n_mtime = vattr.va_mtime;
725 }
726 }
727
728 return error;
729 }
730
731 /*
732 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
733 * This is mainly to avoid queueing async I/O requests when the nfsiods
734 * are all hung on a dead server.
735 */
736
737 int
738 nfs_asyncio(bp)
739 struct buf *bp;
740 {
741 int i;
742 struct nfsmount *nmp;
743 int gotiod, slpflag = 0, slptimeo = 0, error;
744
745 if (nfs_numasync == 0)
746 return (EIO);
747
748 nmp = VFSTONFS(bp->b_vp->v_mount);
749 again:
750 if (nmp->nm_flag & NFSMNT_INT)
751 slpflag = PCATCH;
752 gotiod = false;
753
754 /*
755 * Find a free iod to process this request.
756 */
757
758 for (i = 0; i < NFS_MAXASYNCDAEMON; i++) {
759 struct nfs_iod *iod = &nfs_asyncdaemon[i];
760
761 simple_lock(&iod->nid_slock);
762 if (iod->nid_want) {
763 /*
764 * Found one, so wake it up and tell it which
765 * mount to process.
766 */
767 iod->nid_want = NULL;
768 iod->nid_mount = nmp;
769 wakeup(&iod->nid_want);
770 simple_lock(&nmp->nm_slock);
771 simple_unlock(&iod->nid_slock);
772 nmp->nm_bufqiods++;
773 gotiod = true;
774 break;
775 }
776 simple_unlock(&iod->nid_slock);
777 }
778
779 /*
780 * If none are free, we may already have an iod working on this mount
781 * point. If so, it will process our request.
782 */
783
784 if (!gotiod) {
785 simple_lock(&nmp->nm_slock);
786 if (nmp->nm_bufqiods > 0)
787 gotiod = true;
788 }
789
790 LOCK_ASSERT(simple_lock_held(&nmp->nm_slock));
791
792 /*
793 * If we have an iod which can process the request, then queue
794 * the buffer. However, even if we have an iod, do not initiate
795 * queue cleaning if curproc is the pageout daemon. if the NFS mount
796 * is via local loopback, we may put curproc (pagedaemon) to sleep
797 * waiting for the writes to complete. But the server (ourself)
798 * may block the write, waiting for its (ie., our) pagedaemon
799 * to produce clean pages to handle the write: deadlock.
800 * XXX: start non-loopback mounts straight away? If "lots free",
801 * let pagedaemon start loopback writes anyway?
802 */
803 if (gotiod) {
804
805 /*
806 * Ensure that the queue never grows too large.
807 */
808 if (curproc == uvm.pagedaemon_proc) {
809 /* Enque for later, to avoid free-page deadlock */
810 (void) 0;
811 } else while (nmp->nm_bufqlen >= 2*nfs_numasync) {
812 nmp->nm_bufqwant = true;
813 error = ltsleep(&nmp->nm_bufq,
814 slpflag | PRIBIO | PNORELOCK,
815 "nfsaio", slptimeo, &nmp->nm_slock);
816 if (error) {
817 if (nfs_sigintr(nmp, NULL, curlwp))
818 return (EINTR);
819 if (slpflag == PCATCH) {
820 slpflag = 0;
821 slptimeo = 2 * hz;
822 }
823 }
824
825 /*
826 * We might have lost our iod while sleeping,
827 * so check and loop if nescessary.
828 */
829
830 if (nmp->nm_bufqiods == 0)
831 goto again;
832
833 simple_lock(&nmp->nm_slock);
834 }
835 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
836 nmp->nm_bufqlen++;
837 simple_unlock(&nmp->nm_slock);
838 return (0);
839 }
840 simple_unlock(&nmp->nm_slock);
841
842 /*
843 * All the iods are busy on other mounts, so return EIO to
844 * force the caller to process the i/o synchronously.
845 */
846
847 return (EIO);
848 }
849
850 /*
851 * nfs_doio for read.
852 */
853 static int
854 nfs_doio_read(bp, uiop)
855 struct buf *bp;
856 struct uio *uiop;
857 {
858 struct vnode *vp = bp->b_vp;
859 struct nfsnode *np = VTONFS(vp);
860 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
861 int error = 0;
862
863 uiop->uio_rw = UIO_READ;
864 switch (vp->v_type) {
865 case VREG:
866 nfsstats.read_bios++;
867 error = nfs_readrpc(vp, uiop);
868 if (!error && uiop->uio_resid) {
869 int diff, len;
870
871 /*
872 * If uio_resid > 0, there is a hole in the file and
873 * no writes after the hole have been pushed to
874 * the server yet or the file has been truncated
875 * on the server.
876 * Just zero fill the rest of the valid area.
877 */
878
879 KASSERT(vp->v_size >=
880 uiop->uio_offset + uiop->uio_resid);
881 diff = bp->b_bcount - uiop->uio_resid;
882 len = uiop->uio_resid;
883 memset((char *)bp->b_data + diff, 0, len);
884 uiop->uio_resid = 0;
885 }
886 #if 0
887 if (uiop->uio_lwp && (vp->v_flag & VTEXT) &&
888 timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
889 killproc(uiop->uio_lwp->l_proc, "process text file was modified");
890 #if 0 /* XXX NJWLWP */
891 uiop->uio_lwp->l_proc->p_holdcnt++;
892 #endif
893 }
894 #endif
895 break;
896 case VLNK:
897 KASSERT(uiop->uio_offset == (off_t)0);
898 nfsstats.readlink_bios++;
899 error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
900 break;
901 case VDIR:
902 nfsstats.readdir_bios++;
903 uiop->uio_offset = bp->b_dcookie;
904 #ifndef NFS_V2_ONLY
905 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
906 error = nfs_readdirplusrpc(vp, uiop,
907 curlwp->l_cred);
908 /*
909 * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
910 */
911 if (error == ENOTSUP)
912 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
913 }
914 #else
915 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
916 #endif
917 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
918 error = nfs_readdirrpc(vp, uiop,
919 curlwp->l_cred);
920 if (!error) {
921 bp->b_dcookie = uiop->uio_offset;
922 }
923 break;
924 default:
925 printf("nfs_doio: type %x unexpected\n", vp->v_type);
926 break;
927 }
928 if (error) {
929 bp->b_flags |= B_ERROR;
930 bp->b_error = error;
931 }
932 return error;
933 }
934
935 /*
936 * nfs_doio for write.
937 */
938 static int
939 nfs_doio_write(bp, uiop)
940 struct buf *bp;
941 struct uio *uiop;
942 {
943 struct vnode *vp = bp->b_vp;
944 struct nfsnode *np = VTONFS(vp);
945 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
946 int iomode;
947 bool stalewriteverf = false;
948 int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
949 struct vm_page *pgs[npages];
950 #ifndef NFS_V2_ONLY
951 bool needcommit = true; /* need only COMMIT RPC */
952 #else
953 bool needcommit = false; /* need only COMMIT RPC */
954 #endif
955 bool pageprotected;
956 struct uvm_object *uobj = &vp->v_uobj;
957 int error;
958 off_t off, cnt;
959
960 if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
961 iomode = NFSV3WRITE_UNSTABLE;
962 } else {
963 iomode = NFSV3WRITE_FILESYNC;
964 }
965
966 #ifndef NFS_V2_ONLY
967 again:
968 #endif
969 rw_enter(&nmp->nm_writeverflock, RW_READER);
970
971 for (i = 0; i < npages; i++) {
972 pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
973 if (pgs[i]->uobject == uobj &&
974 pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
975 KASSERT(pgs[i]->flags & PG_BUSY);
976 /*
977 * this page belongs to our object.
978 */
979 simple_lock(&uobj->vmobjlock);
980 /*
981 * write out the page stably if it's about to
982 * be released because we can't resend it
983 * on the server crash.
984 *
985 * XXX assuming PG_RELEASE|PG_PAGEOUT won't be
986 * changed until unbusy the page.
987 */
988 if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
989 iomode = NFSV3WRITE_FILESYNC;
990 /*
991 * if we met a page which hasn't been sent yet,
992 * we need do WRITE RPC.
993 */
994 if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
995 needcommit = false;
996 simple_unlock(&uobj->vmobjlock);
997 } else {
998 iomode = NFSV3WRITE_FILESYNC;
999 needcommit = false;
1000 }
1001 }
1002 if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
1003 simple_lock(&uobj->vmobjlock);
1004 for (i = 0; i < npages; i++) {
1005 pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
1006 pmap_page_protect(pgs[i], VM_PROT_READ);
1007 }
1008 simple_unlock(&uobj->vmobjlock);
1009 pageprotected = true; /* pages can't be modified during i/o. */
1010 } else
1011 pageprotected = false;
1012
1013 /*
1014 * Send the data to the server if necessary,
1015 * otherwise just send a commit rpc.
1016 */
1017 #ifndef NFS_V2_ONLY
1018 if (needcommit) {
1019
1020 /*
1021 * If the buffer is in the range that we already committed,
1022 * there's nothing to do.
1023 *
1024 * If it's in the range that we need to commit, push the
1025 * whole range at once, otherwise only push the buffer.
1026 * In both these cases, acquire the commit lock to avoid
1027 * other processes modifying the range.
1028 */
1029
1030 off = uiop->uio_offset;
1031 cnt = bp->b_bcount;
1032 mutex_enter(&np->n_commitlock);
1033 if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
1034 bool pushedrange;
1035 if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
1036 pushedrange = true;
1037 off = np->n_pushlo;
1038 cnt = np->n_pushhi - np->n_pushlo;
1039 } else {
1040 pushedrange = false;
1041 }
1042 error = nfs_commit(vp, off, cnt, curlwp);
1043 if (error == 0) {
1044 if (pushedrange) {
1045 nfs_merge_commit_ranges(vp);
1046 } else {
1047 nfs_add_committed_range(vp, off, cnt);
1048 }
1049 }
1050 } else {
1051 error = 0;
1052 }
1053 mutex_exit(&np->n_commitlock);
1054 rw_exit(&nmp->nm_writeverflock);
1055 if (!error) {
1056 /*
1057 * pages are now on stable storage.
1058 */
1059 uiop->uio_resid = 0;
1060 simple_lock(&uobj->vmobjlock);
1061 for (i = 0; i < npages; i++) {
1062 pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1063 }
1064 simple_unlock(&uobj->vmobjlock);
1065 return 0;
1066 } else if (error == NFSERR_STALEWRITEVERF) {
1067 nfs_clearcommit(vp->v_mount);
1068 goto again;
1069 }
1070 if (error) {
1071 bp->b_flags |= B_ERROR;
1072 bp->b_error = np->n_error = error;
1073 np->n_flag |= NWRITEERR;
1074 }
1075 return error;
1076 }
1077 #endif
1078 off = uiop->uio_offset;
1079 cnt = bp->b_bcount;
1080 uiop->uio_rw = UIO_WRITE;
1081 nfsstats.write_bios++;
1082 error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
1083 #ifndef NFS_V2_ONLY
1084 if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1085 /*
1086 * we need to commit pages later.
1087 */
1088 mutex_enter(&np->n_commitlock);
1089 nfs_add_tobecommitted_range(vp, off, cnt);
1090 /*
1091 * if there can be too many uncommitted pages, commit them now.
1092 */
1093 if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
1094 off = np->n_pushlo;
1095 cnt = nfs_commitsize >> 1;
1096 error = nfs_commit(vp, off, cnt, curlwp);
1097 if (!error) {
1098 nfs_add_committed_range(vp, off, cnt);
1099 nfs_del_tobecommitted_range(vp, off, cnt);
1100 }
1101 if (error == NFSERR_STALEWRITEVERF) {
1102 stalewriteverf = true;
1103 error = 0; /* it isn't a real error */
1104 }
1105 } else {
1106 /*
1107 * re-dirty pages so that they will be passed
1108 * to us later again.
1109 */
1110 simple_lock(&uobj->vmobjlock);
1111 for (i = 0; i < npages; i++) {
1112 pgs[i]->flags &= ~PG_CLEAN;
1113 }
1114 simple_unlock(&uobj->vmobjlock);
1115 }
1116 mutex_exit(&np->n_commitlock);
1117 } else
1118 #endif
1119 if (!error) {
1120 /*
1121 * pages are now on stable storage.
1122 */
1123 mutex_enter(&np->n_commitlock);
1124 nfs_del_committed_range(vp, off, cnt);
1125 mutex_exit(&np->n_commitlock);
1126 simple_lock(&uobj->vmobjlock);
1127 for (i = 0; i < npages; i++) {
1128 pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1129 }
1130 simple_unlock(&uobj->vmobjlock);
1131 } else {
1132 /*
1133 * we got an error.
1134 */
1135 bp->b_flags |= B_ERROR;
1136 bp->b_error = np->n_error = error;
1137 np->n_flag |= NWRITEERR;
1138 }
1139
1140 rw_exit(&nmp->nm_writeverflock);
1141
1142 if (stalewriteverf) {
1143 nfs_clearcommit(vp->v_mount);
1144 }
1145 return error;
1146 }
1147
1148 /*
1149 * nfs_doio for B_PHYS.
1150 */
1151 static int
1152 nfs_doio_phys(bp, uiop)
1153 struct buf *bp;
1154 struct uio *uiop;
1155 {
1156 struct vnode *vp = bp->b_vp;
1157 int error;
1158
1159 uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
1160 if (bp->b_flags & B_READ) {
1161 uiop->uio_rw = UIO_READ;
1162 nfsstats.read_physios++;
1163 error = nfs_readrpc(vp, uiop);
1164 } else {
1165 int iomode = NFSV3WRITE_DATASYNC;
1166 bool stalewriteverf;
1167 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1168
1169 uiop->uio_rw = UIO_WRITE;
1170 nfsstats.write_physios++;
1171 rw_enter(&nmp->nm_writeverflock, RW_READER);
1172 error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
1173 rw_exit(&nmp->nm_writeverflock);
1174 if (stalewriteverf) {
1175 nfs_clearcommit(bp->b_vp->v_mount);
1176 }
1177 }
1178 if (error) {
1179 bp->b_flags |= B_ERROR;
1180 bp->b_error = error;
1181 }
1182 return error;
1183 }
1184
1185 /*
1186 * Do an I/O operation to/from a cache block. This may be called
1187 * synchronously or from an nfsiod.
1188 */
1189 int
1190 nfs_doio(bp)
1191 struct buf *bp;
1192 {
1193 int error;
1194 struct uio uio;
1195 struct uio *uiop = &uio;
1196 struct iovec io;
1197 UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
1198
1199 uiop->uio_iov = &io;
1200 uiop->uio_iovcnt = 1;
1201 uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
1202 UIO_SETUP_SYSSPACE(uiop);
1203 io.iov_base = bp->b_data;
1204 io.iov_len = uiop->uio_resid = bp->b_bcount;
1205
1206 /*
1207 * Historically, paging was done with physio, but no more...
1208 */
1209 if (bp->b_flags & B_PHYS) {
1210 /*
1211 * ...though reading /dev/drum still gets us here.
1212 */
1213 error = nfs_doio_phys(bp, uiop);
1214 } else if (bp->b_flags & B_READ) {
1215 error = nfs_doio_read(bp, uiop);
1216 } else {
1217 error = nfs_doio_write(bp, uiop);
1218 }
1219 bp->b_resid = uiop->uio_resid;
1220 biodone(bp);
1221 return (error);
1222 }
1223
1224 /*
1225 * Vnode op for VM getpages.
1226 */
1227
1228 int
1229 nfs_getpages(v)
1230 void *v;
1231 {
1232 struct vop_getpages_args /* {
1233 struct vnode *a_vp;
1234 voff_t a_offset;
1235 struct vm_page **a_m;
1236 int *a_count;
1237 int a_centeridx;
1238 vm_prot_t a_access_type;
1239 int a_advice;
1240 int a_flags;
1241 } */ *ap = v;
1242
1243 struct vnode *vp = ap->a_vp;
1244 struct uvm_object *uobj = &vp->v_uobj;
1245 struct nfsnode *np = VTONFS(vp);
1246 const int npages = *ap->a_count;
1247 struct vm_page *pg, **pgs, *opgs[npages];
1248 off_t origoffset, len;
1249 int i, error;
1250 bool v3 = NFS_ISV3(vp);
1251 bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1252 bool locked = (ap->a_flags & PGO_LOCKED) != 0;
1253
1254 /*
1255 * call the genfs code to get the pages. `pgs' may be NULL
1256 * when doing read-ahead.
1257 */
1258
1259 pgs = ap->a_m;
1260 if (write && locked && v3) {
1261 KASSERT(pgs != NULL);
1262 #ifdef DEBUG
1263
1264 /*
1265 * If PGO_LOCKED is set, real pages shouldn't exists
1266 * in the array.
1267 */
1268
1269 for (i = 0; i < npages; i++)
1270 KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
1271 #endif
1272 memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
1273 }
1274 error = genfs_getpages(v);
1275 if (error) {
1276 return (error);
1277 }
1278
1279 /*
1280 * for read faults where the nfs node is not yet marked NMODIFIED,
1281 * set PG_RDONLY on the pages so that we come back here if someone
1282 * tries to modify later via the mapping that will be entered for
1283 * this fault.
1284 */
1285
1286 if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
1287 if (!locked) {
1288 simple_lock(&uobj->vmobjlock);
1289 }
1290 for (i = 0; i < npages; i++) {
1291 pg = pgs[i];
1292 if (pg == NULL || pg == PGO_DONTCARE) {
1293 continue;
1294 }
1295 pg->flags |= PG_RDONLY;
1296 }
1297 if (!locked) {
1298 simple_unlock(&uobj->vmobjlock);
1299 }
1300 }
1301 if (!write) {
1302 return (0);
1303 }
1304
1305 /*
1306 * this is a write fault, update the commit info.
1307 */
1308
1309 origoffset = ap->a_offset;
1310 len = npages << PAGE_SHIFT;
1311
1312 if (v3) {
1313 if (!locked) {
1314 mutex_enter(&np->n_commitlock);
1315 } else {
1316 if (!mutex_tryenter(&np->n_commitlock)) {
1317
1318 /*
1319 * Since PGO_LOCKED is set, we need to unbusy
1320 * all pages fetched by genfs_getpages() above,
1321 * tell the caller that there are no pages
1322 * available and put back original pgs array.
1323 */
1324
1325 uvm_lock_pageq();
1326 uvm_page_unbusy(pgs, npages);
1327 uvm_unlock_pageq();
1328 *ap->a_count = 0;
1329 memcpy(pgs, opgs,
1330 npages * sizeof(struct vm_pages *));
1331 return EBUSY;
1332 }
1333 }
1334 nfs_del_committed_range(vp, origoffset, len);
1335 nfs_del_tobecommitted_range(vp, origoffset, len);
1336 }
1337 np->n_flag |= NMODIFIED;
1338 if (!locked) {
1339 simple_lock(&uobj->vmobjlock);
1340 }
1341 for (i = 0; i < npages; i++) {
1342 pg = pgs[i];
1343 if (pg == NULL || pg == PGO_DONTCARE) {
1344 continue;
1345 }
1346 pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1347 }
1348 if (!locked) {
1349 simple_unlock(&uobj->vmobjlock);
1350 }
1351 if (v3) {
1352 mutex_exit(&np->n_commitlock);
1353 }
1354 return (0);
1355 }
1356