nfs_bio.c revision 1.70 1 /* $NetBSD: nfs_bio.c,v 1.70 2001/10/13 23:25:58 simonb Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * This code is derived from software contributed to Berkeley by
8 * Rick Macklem at The University of Guelph.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the University of
21 * California, Berkeley and its contributors.
22 * 4. Neither the name of the University nor the names of its contributors
23 * may be used to endorse or promote products derived from this software
24 * without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36 * SUCH DAMAGE.
37 *
38 * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
39 */
40
41 #include "opt_nfs.h"
42 #include "opt_ddb.h"
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/resourcevar.h>
47 #include <sys/signalvar.h>
48 #include <sys/proc.h>
49 #include <sys/buf.h>
50 #include <sys/vnode.h>
51 #include <sys/mount.h>
52 #include <sys/kernel.h>
53 #include <sys/namei.h>
54 #include <sys/dirent.h>
55 #include <sys/malloc.h>
56
57 #include <uvm/uvm_extern.h>
58 #include <uvm/uvm.h>
59
60 #include <nfs/rpcv2.h>
61 #include <nfs/nfsproto.h>
62 #include <nfs/nfs.h>
63 #include <nfs/nfsmount.h>
64 #include <nfs/nqnfs.h>
65 #include <nfs/nfsnode.h>
66 #include <nfs/nfs_var.h>
67
68 extern int nfs_numasync;
69 extern struct nfsstats nfsstats;
70
71 /*
72 * Vnode op for read using bio
73 * Any similarity to readip() is purely coincidental
74 */
75 int
76 nfs_bioread(vp, uio, ioflag, cred, cflag)
77 struct vnode *vp;
78 struct uio *uio;
79 int ioflag, cflag;
80 struct ucred *cred;
81 {
82 struct nfsnode *np = VTONFS(vp);
83 struct buf *bp = NULL, *rabp;
84 struct vattr vattr;
85 struct proc *p;
86 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
87 struct nfsdircache *ndp = NULL, *nndp = NULL;
88 caddr_t baddr, ep, edp;
89 int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
90 int enough = 0;
91 struct dirent *dp, *pdp;
92 off_t curoff = 0;
93
94 #ifdef DIAGNOSTIC
95 if (uio->uio_rw != UIO_READ)
96 panic("nfs_read mode");
97 #endif
98 if (uio->uio_resid == 0)
99 return (0);
100 if (vp->v_type != VDIR && uio->uio_offset < 0)
101 return (EINVAL);
102 p = uio->uio_procp;
103 #ifndef NFS_V2_ONLY
104 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
105 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
106 (void)nfs_fsinfo(nmp, vp, cred, p);
107 #endif
108 if (vp->v_type != VDIR &&
109 (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
110 return (EFBIG);
111
112 /*
113 * For nfs, cache consistency can only be maintained approximately.
114 * Although RFC1094 does not specify the criteria, the following is
115 * believed to be compatible with the reference port.
116 * For nqnfs, full cache consistency is maintained within the loop.
117 * For nfs:
118 * If the file's modify time on the server has changed since the
119 * last read rpc or you have written to the file,
120 * you may have lost data cache consistency with the
121 * server, so flush all of the file's data out of the cache.
122 * Then force a getattr rpc to ensure that you have up to date
123 * attributes.
124 * NB: This implies that cache data can be read when up to
125 * NFS_ATTRTIMEO seconds out of date. If you find that you need current
126 * attributes this could be forced by setting n_attrstamp to 0 before
127 * the VOP_GETATTR() call.
128 */
129
130 if ((nmp->nm_flag & NFSMNT_NQNFS) == 0 && vp->v_type != VLNK) {
131 if (np->n_flag & NMODIFIED) {
132 if (vp->v_type != VREG) {
133 if (vp->v_type != VDIR)
134 panic("nfs: bioread, not dir");
135 nfs_invaldircache(vp, 0);
136 np->n_direofoffset = 0;
137 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
138 if (error)
139 return (error);
140 }
141 np->n_attrstamp = 0;
142 error = VOP_GETATTR(vp, &vattr, cred, p);
143 if (error)
144 return (error);
145 np->n_mtime = vattr.va_mtime.tv_sec;
146 } else {
147 error = VOP_GETATTR(vp, &vattr, cred, p);
148 if (error)
149 return (error);
150 if (np->n_mtime != vattr.va_mtime.tv_sec) {
151 if (vp->v_type == VDIR) {
152 nfs_invaldircache(vp, 0);
153 np->n_direofoffset = 0;
154 }
155 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
156 if (error)
157 return (error);
158 np->n_mtime = vattr.va_mtime.tv_sec;
159 }
160 }
161 }
162
163 /*
164 * update the cached read creds for this node.
165 */
166
167 if (np->n_rcred) {
168 crfree(np->n_rcred);
169 }
170 np->n_rcred = cred;
171 crhold(cred);
172
173 do {
174 #ifndef NFS_V2_ONLY
175 /*
176 * Get a valid lease. If cached data is stale, flush it.
177 */
178 if (nmp->nm_flag & NFSMNT_NQNFS) {
179 if (NQNFS_CKINVALID(vp, np, ND_READ)) {
180 do {
181 error = nqnfs_getlease(vp, ND_READ, cred, p);
182 } while (error == NQNFS_EXPIRED);
183 if (error)
184 return (error);
185 if (np->n_lrev != np->n_brev ||
186 (np->n_flag & NQNFSNONCACHE) ||
187 ((np->n_flag & NMODIFIED) && vp->v_type == VDIR)) {
188 if (vp->v_type == VDIR) {
189 nfs_invaldircache(vp, 0);
190 np->n_direofoffset = 0;
191 }
192 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
193 if (error)
194 return (error);
195 np->n_brev = np->n_lrev;
196 }
197 } else if (vp->v_type == VDIR && (np->n_flag & NMODIFIED)) {
198 nfs_invaldircache(vp, 0);
199 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
200 np->n_direofoffset = 0;
201 if (error)
202 return (error);
203 }
204 }
205 #endif
206 /*
207 * Don't cache symlinks.
208 */
209 if (np->n_flag & NQNFSNONCACHE
210 || ((vp->v_flag & VROOT) && vp->v_type == VLNK)) {
211 switch (vp->v_type) {
212 case VREG:
213 return (nfs_readrpc(vp, uio));
214 case VLNK:
215 return (nfs_readlinkrpc(vp, uio, cred));
216 case VDIR:
217 break;
218 default:
219 printf(" NQNFSNONCACHE: type %x unexpected\n",
220 vp->v_type);
221 };
222 }
223 baddr = (caddr_t)0;
224 switch (vp->v_type) {
225 case VREG:
226 nfsstats.biocache_reads++;
227
228 error = 0;
229 if (uio->uio_offset >= np->n_size) {
230 break;
231 }
232 while (uio->uio_resid > 0) {
233 void *win;
234 vsize_t bytelen = MIN(np->n_size - uio->uio_offset,
235 uio->uio_resid);
236
237 if (bytelen == 0)
238 break;
239 win = ubc_alloc(&vp->v_uobj, uio->uio_offset,
240 &bytelen, UBC_READ);
241 error = uiomove(win, bytelen, uio);
242 ubc_release(win, 0);
243 if (error) {
244 break;
245 }
246 }
247 n = 0;
248 break;
249
250 case VLNK:
251 nfsstats.biocache_readlinks++;
252 bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, p);
253 if (!bp)
254 return (EINTR);
255 if ((bp->b_flags & B_DONE) == 0) {
256 bp->b_flags |= B_READ;
257 error = nfs_doio(bp, p);
258 if (error) {
259 brelse(bp);
260 return (error);
261 }
262 }
263 n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
264 got_buf = 1;
265 on = 0;
266 break;
267 case VDIR:
268 diragain:
269 nfsstats.biocache_readdirs++;
270 ndp = nfs_searchdircache(vp, uio->uio_offset,
271 (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
272 if (!ndp) {
273 /*
274 * We've been handed a cookie that is not
275 * in the cache. If we're not translating
276 * 32 <-> 64, it may be a value that was
277 * flushed out of the cache because it grew
278 * too big. Let the server judge if it's
279 * valid or not. In the translation case,
280 * we have no way of validating this value,
281 * so punt.
282 */
283 if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
284 return (EINVAL);
285 ndp = nfs_enterdircache(vp, uio->uio_offset,
286 uio->uio_offset, 0, 0);
287 }
288
289 if (uio->uio_offset != 0 &&
290 ndp->dc_cookie == np->n_direofoffset) {
291 nfsstats.direofcache_hits++;
292 return (0);
293 }
294
295 bp = nfs_getcacheblk(vp, ndp->dc_blkno, NFS_DIRBLKSIZ, p);
296 if (!bp)
297 return (EINTR);
298 if ((bp->b_flags & B_DONE) == 0) {
299 bp->b_flags |= B_READ;
300 bp->b_dcookie = ndp->dc_blkcookie;
301 error = nfs_doio(bp, p);
302 if (error) {
303 /*
304 * Yuck! The directory has been modified on the
305 * server. Punt and let the userland code
306 * deal with it.
307 */
308 brelse(bp);
309 if (error == NFSERR_BAD_COOKIE) {
310 nfs_invaldircache(vp, 0);
311 nfs_vinvalbuf(vp, 0, cred, p, 1);
312 error = EINVAL;
313 }
314 return (error);
315 }
316 }
317
318 /*
319 * Just return if we hit EOF right away with this
320 * block. Always check here, because direofoffset
321 * may have been set by an nfsiod since the last
322 * check.
323 */
324 if (np->n_direofoffset != 0 &&
325 ndp->dc_blkcookie == np->n_direofoffset) {
326 brelse(bp);
327 return (0);
328 }
329
330 /*
331 * Find the entry we were looking for in the block.
332 */
333
334 en = ndp->dc_entry;
335
336 pdp = dp = (struct dirent *)bp->b_data;
337 edp = bp->b_data + bp->b_bcount - bp->b_resid;
338 enn = 0;
339 while (enn < en && (caddr_t)dp < edp) {
340 pdp = dp;
341 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
342 enn++;
343 }
344
345 /*
346 * If the entry number was bigger than the number of
347 * entries in the block, or the cookie of the previous
348 * entry doesn't match, the directory cache is
349 * stale. Flush it and try again (i.e. go to
350 * the server).
351 */
352 if ((caddr_t)dp >= edp || (caddr_t)dp + dp->d_reclen > edp ||
353 (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
354 #ifdef DEBUG
355 printf("invalid cache: %p %p %p off %lx %lx\n",
356 pdp, dp, edp,
357 (unsigned long)uio->uio_offset,
358 (unsigned long)NFS_GETCOOKIE(pdp));
359 #endif
360 brelse(bp);
361 nfs_invaldircache(vp, 0);
362 nfs_vinvalbuf(vp, 0, cred, p, 0);
363 goto diragain;
364 }
365
366 on = (caddr_t)dp - bp->b_data;
367
368 /*
369 * Cache all entries that may be exported to the
370 * user, as they may be thrown back at us. The
371 * NFSBIO_CACHECOOKIES flag indicates that all
372 * entries are being 'exported', so cache them all.
373 */
374
375 if (en == 0 && pdp == dp) {
376 dp = (struct dirent *)
377 ((caddr_t)dp + dp->d_reclen);
378 enn++;
379 }
380
381 if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
382 n = uio->uio_resid;
383 enough = 1;
384 } else
385 n = bp->b_bcount - bp->b_resid - on;
386
387 ep = bp->b_data + on + n;
388
389 /*
390 * Find last complete entry to copy, caching entries
391 * (if requested) as we go.
392 */
393
394 while ((caddr_t)dp < ep && (caddr_t)dp + dp->d_reclen <= ep) {
395 if (cflag & NFSBIO_CACHECOOKIES) {
396 nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
397 ndp->dc_blkcookie, enn, bp->b_lblkno);
398 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
399 NFS_STASHCOOKIE32(pdp,
400 nndp->dc_cookie32);
401 }
402 }
403 pdp = dp;
404 dp = (struct dirent *)((caddr_t)dp + dp->d_reclen);
405 enn++;
406 }
407
408 /*
409 * If the last requested entry was not the last in the
410 * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
411 * cache the cookie of the last requested one, and
412 * set of the offset to it.
413 */
414
415 if ((on + n) < bp->b_bcount - bp->b_resid) {
416 curoff = NFS_GETCOOKIE(pdp);
417 nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
418 enn, bp->b_lblkno);
419 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
420 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
421 curoff = nndp->dc_cookie32;
422 }
423 } else
424 curoff = bp->b_dcookie;
425
426 /*
427 * Always cache the entry for the next block,
428 * so that readaheads can use it.
429 */
430 nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
431 if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
432 if (curoff == bp->b_dcookie) {
433 NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
434 curoff = nndp->dc_cookie32;
435 }
436 }
437
438 n = ((caddr_t)pdp + pdp->d_reclen) - (bp->b_data + on);
439
440 /*
441 * If not eof and read aheads are enabled, start one.
442 * (You need the current block first, so that you have the
443 * directory offset cookie of the next block.)
444 */
445 if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
446 np->n_direofoffset == 0 && !(np->n_flag & NQNFSNONCACHE)) {
447 rabp = nfs_getcacheblk(vp, nndp->dc_blkno,
448 NFS_DIRBLKSIZ, p);
449 if (rabp) {
450 if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
451 rabp->b_dcookie = nndp->dc_cookie;
452 rabp->b_flags |= (B_READ | B_ASYNC);
453 if (nfs_asyncio(rabp)) {
454 rabp->b_flags |= B_INVAL;
455 brelse(rabp);
456 }
457 } else
458 brelse(rabp);
459 }
460 }
461 got_buf = 1;
462 break;
463 default:
464 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
465 break;
466 }
467
468 if (n > 0) {
469 if (!baddr)
470 baddr = bp->b_data;
471 error = uiomove(baddr + on, (int)n, uio);
472 }
473 switch (vp->v_type) {
474 case VREG:
475 break;
476 case VLNK:
477 n = 0;
478 break;
479 case VDIR:
480 if (np->n_flag & NQNFSNONCACHE)
481 bp->b_flags |= B_INVAL;
482 uio->uio_offset = curoff;
483 if (enough)
484 n = 0;
485 break;
486 default:
487 printf(" nfsbioread: type %x unexpected\n",vp->v_type);
488 }
489 if (got_buf)
490 brelse(bp);
491 } while (error == 0 && uio->uio_resid > 0 && n > 0);
492 return (error);
493 }
494
495 /*
496 * Vnode op for write using bio
497 */
498 int
499 nfs_write(v)
500 void *v;
501 {
502 struct vop_write_args /* {
503 struct vnode *a_vp;
504 struct uio *a_uio;
505 int a_ioflag;
506 struct ucred *a_cred;
507 } */ *ap = v;
508 struct uio *uio = ap->a_uio;
509 struct proc *p = uio->uio_procp;
510 struct vnode *vp = ap->a_vp;
511 struct nfsnode *np = VTONFS(vp);
512 struct ucred *cred = ap->a_cred;
513 int ioflag = ap->a_ioflag;
514 struct vattr vattr;
515 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
516 void *win;
517 voff_t oldoff, origoff;
518 vsize_t bytelen;
519 int error = 0, iomode, must_commit;
520
521 #ifdef DIAGNOSTIC
522 if (uio->uio_rw != UIO_WRITE)
523 panic("nfs_write mode");
524 if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc)
525 panic("nfs_write proc");
526 #endif
527 if (vp->v_type != VREG)
528 return (EIO);
529 if (np->n_flag & NWRITEERR) {
530 np->n_flag &= ~NWRITEERR;
531 return (np->n_error);
532 }
533 #ifndef NFS_V2_ONLY
534 if ((nmp->nm_flag & NFSMNT_NFSV3) &&
535 !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
536 (void)nfs_fsinfo(nmp, vp, cred, p);
537 #endif
538 if (ioflag & (IO_APPEND | IO_SYNC)) {
539 if (np->n_flag & NMODIFIED) {
540 np->n_attrstamp = 0;
541 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
542 if (error)
543 return (error);
544 }
545 if (ioflag & IO_APPEND) {
546 np->n_attrstamp = 0;
547 error = VOP_GETATTR(vp, &vattr, cred, p);
548 if (error)
549 return (error);
550 uio->uio_offset = np->n_size;
551 }
552 }
553 if (uio->uio_offset < 0)
554 return (EINVAL);
555 if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
556 return (EFBIG);
557 if (uio->uio_resid == 0)
558 return (0);
559 /*
560 * Maybe this should be above the vnode op call, but so long as
561 * file servers have no limits, i don't think it matters
562 */
563 if (p && uio->uio_offset + uio->uio_resid >
564 p->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
565 psignal(p, SIGXFSZ);
566 return (EFBIG);
567 }
568
569 /*
570 * update the cached write creds for this node.
571 */
572
573 if (np->n_wcred) {
574 crfree(np->n_wcred);
575 }
576 np->n_wcred = cred;
577 crhold(cred);
578
579 if ((np->n_flag & NQNFSNONCACHE) && uio->uio_iovcnt == 1) {
580 iomode = NFSV3WRITE_FILESYNC;
581 error = nfs_writerpc(vp, uio, &iomode, &must_commit);
582 if (must_commit)
583 nfs_clearcommit(vp->v_mount);
584 return (error);
585 }
586
587 origoff = uio->uio_offset;
588 do {
589 oldoff = uio->uio_offset;
590 bytelen = uio->uio_resid;
591
592 #ifndef NFS_V2_ONLY
593 /*
594 * Check for a valid write lease.
595 */
596 if ((nmp->nm_flag & NFSMNT_NQNFS) &&
597 NQNFS_CKINVALID(vp, np, ND_WRITE)) {
598 do {
599 error = nqnfs_getlease(vp, ND_WRITE, cred, p);
600 } while (error == NQNFS_EXPIRED);
601 if (error)
602 return (error);
603 if (np->n_lrev != np->n_brev ||
604 (np->n_flag & NQNFSNONCACHE)) {
605 error = nfs_vinvalbuf(vp, V_SAVE, cred, p, 1);
606 if (error)
607 return (error);
608 np->n_brev = np->n_lrev;
609 }
610 }
611 #endif
612 nfsstats.biocache_writes++;
613
614 np->n_flag |= NMODIFIED;
615 if (np->n_size < uio->uio_offset + bytelen) {
616 np->n_size = uio->uio_offset + bytelen;
617 }
618 if ((uio->uio_offset & PAGE_MASK) == 0 &&
619 ((uio->uio_offset + bytelen) & PAGE_MASK) == 0) {
620 win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
621 UBC_WRITE | UBC_FAULTBUSY);
622 } else {
623 win = ubc_alloc(&vp->v_uobj, uio->uio_offset, &bytelen,
624 UBC_WRITE);
625 }
626 error = uiomove(win, bytelen, uio);
627 ubc_release(win, 0);
628 if (error) {
629 break;
630 }
631
632 /*
633 * update UVM's notion of the size now that we've
634 * copied the data into the vnode's pages.
635 */
636
637 if (vp->v_size < uio->uio_offset) {
638 uvm_vnp_setsize(vp, uio->uio_offset);
639 }
640
641 if ((oldoff & ~(nmp->nm_wsize - 1)) !=
642 (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
643 simple_lock(&vp->v_uobj.vmobjlock);
644 error = (vp->v_uobj.pgops->pgo_put)(&vp->v_uobj,
645 trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
646 round_page((uio->uio_offset + nmp->nm_wsize - 1) &
647 ~(nmp->nm_wsize - 1)),
648 PGO_CLEANIT|PGO_WEAK);
649 }
650 } while (uio->uio_resid > 0);
651 if ((np->n_flag & NQNFSNONCACHE) || (ioflag & IO_SYNC)) {
652 simple_lock(&vp->v_uobj.vmobjlock);
653 error = (vp->v_uobj.pgops->pgo_put)(&vp->v_uobj,
654 trunc_page(origoff & ~(nmp->nm_wsize - 1)),
655 round_page((uio->uio_offset + nmp->nm_wsize - 1) &
656 ~(nmp->nm_wsize - 1)),
657 PGO_CLEANIT|PGO_SYNCIO);
658 }
659 return error;
660 }
661
662 /*
663 * Get an nfs cache block.
664 * Allocate a new one if the block isn't currently in the cache
665 * and return the block marked busy. If the calling process is
666 * interrupted by a signal for an interruptible mount point, return
667 * NULL.
668 */
669 struct buf *
670 nfs_getcacheblk(vp, bn, size, p)
671 struct vnode *vp;
672 daddr_t bn;
673 int size;
674 struct proc *p;
675 {
676 struct buf *bp;
677 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
678
679 if (nmp->nm_flag & NFSMNT_INT) {
680 bp = getblk(vp, bn, size, PCATCH, 0);
681 while (bp == NULL) {
682 if (nfs_sigintr(nmp, NULL, p))
683 return (NULL);
684 bp = getblk(vp, bn, size, 0, 2 * hz);
685 }
686 } else
687 bp = getblk(vp, bn, size, 0, 0);
688 return (bp);
689 }
690
691 /*
692 * Flush and invalidate all dirty buffers. If another process is already
693 * doing the flush, just wait for completion.
694 */
695 int
696 nfs_vinvalbuf(vp, flags, cred, p, intrflg)
697 struct vnode *vp;
698 int flags;
699 struct ucred *cred;
700 struct proc *p;
701 int intrflg;
702 {
703 struct nfsnode *np = VTONFS(vp);
704 struct nfsmount *nmp = VFSTONFS(vp->v_mount);
705 int error = 0, slpflag, slptimeo;
706
707 if ((nmp->nm_flag & NFSMNT_INT) == 0)
708 intrflg = 0;
709 if (intrflg) {
710 slpflag = PCATCH;
711 slptimeo = 2 * hz;
712 } else {
713 slpflag = 0;
714 slptimeo = 0;
715 }
716 /*
717 * First wait for any other process doing a flush to complete.
718 */
719 while (np->n_flag & NFLUSHINPROG) {
720 np->n_flag |= NFLUSHWANT;
721 error = tsleep((caddr_t)&np->n_flag, PRIBIO + 2, "nfsvinval",
722 slptimeo);
723 if (error && intrflg && nfs_sigintr(nmp, NULL, p))
724 return (EINTR);
725 }
726
727 /*
728 * Now, flush as required.
729 */
730 np->n_flag |= NFLUSHINPROG;
731 error = vinvalbuf(vp, flags, cred, p, slpflag, 0);
732 while (error) {
733 if (intrflg && nfs_sigintr(nmp, NULL, p)) {
734 np->n_flag &= ~NFLUSHINPROG;
735 if (np->n_flag & NFLUSHWANT) {
736 np->n_flag &= ~NFLUSHWANT;
737 wakeup((caddr_t)&np->n_flag);
738 }
739 return (EINTR);
740 }
741 error = vinvalbuf(vp, flags, cred, p, 0, slptimeo);
742 }
743 np->n_flag &= ~(NMODIFIED | NFLUSHINPROG);
744 if (np->n_flag & NFLUSHWANT) {
745 np->n_flag &= ~NFLUSHWANT;
746 wakeup((caddr_t)&np->n_flag);
747 }
748 return (0);
749 }
750
751 /*
752 * Initiate asynchronous I/O. Return an error if no nfsiods are available.
753 * This is mainly to avoid queueing async I/O requests when the nfsiods
754 * are all hung on a dead server.
755 */
756
757 int
758 nfs_asyncio(bp)
759 struct buf *bp;
760 {
761 int i;
762 struct nfsmount *nmp;
763 int gotiod, slpflag = 0, slptimeo = 0, error;
764
765 if (nfs_numasync == 0)
766 return (EIO);
767
768
769 nmp = VFSTONFS(bp->b_vp->v_mount);
770 again:
771 if (nmp->nm_flag & NFSMNT_INT)
772 slpflag = PCATCH;
773 gotiod = FALSE;
774
775 /*
776 * Find a free iod to process this request.
777 */
778
779 for (i = 0; i < NFS_MAXASYNCDAEMON; i++)
780 if (nfs_iodwant[i]) {
781 /*
782 * Found one, so wake it up and tell it which
783 * mount to process.
784 */
785 nfs_iodwant[i] = NULL;
786 nfs_iodmount[i] = nmp;
787 nmp->nm_bufqiods++;
788 wakeup((caddr_t)&nfs_iodwant[i]);
789 gotiod = TRUE;
790 break;
791 }
792 /*
793 * If none are free, we may already have an iod working on this mount
794 * point. If so, it will process our request.
795 */
796 if (!gotiod && nmp->nm_bufqiods > 0)
797 gotiod = TRUE;
798
799 /*
800 * If we have an iod which can process the request, then queue
801 * the buffer.
802 */
803 if (gotiod) {
804 /*
805 * Ensure that the queue never grows too large.
806 */
807 while (nmp->nm_bufqlen >= 2*nfs_numasync) {
808 nmp->nm_bufqwant = TRUE;
809 error = tsleep(&nmp->nm_bufq, slpflag | PRIBIO,
810 "nfsaio", slptimeo);
811 if (error) {
812 if (nfs_sigintr(nmp, NULL, bp->b_proc))
813 return (EINTR);
814 if (slpflag == PCATCH) {
815 slpflag = 0;
816 slptimeo = 2 * hz;
817 }
818 }
819 /*
820 * We might have lost our iod while sleeping,
821 * so check and loop if nescessary.
822 */
823 if (nmp->nm_bufqiods == 0)
824 goto again;
825 }
826 TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
827 nmp->nm_bufqlen++;
828 return (0);
829 }
830
831 /*
832 * All the iods are busy on other mounts, so return EIO to
833 * force the caller to process the i/o synchronously.
834 */
835 return (EIO);
836 }
837
838 /*
839 * Do an I/O operation to/from a cache block. This may be called
840 * synchronously or from an nfsiod.
841 */
842 int
843 nfs_doio(bp, p)
844 struct buf *bp;
845 struct proc *p;
846 {
847 struct uio *uiop;
848 struct vnode *vp;
849 struct nfsnode *np;
850 struct nfsmount *nmp;
851 int error = 0, diff, len, iomode, must_commit = 0;
852 struct uio uio;
853 struct iovec io;
854
855 vp = bp->b_vp;
856 np = VTONFS(vp);
857 nmp = VFSTONFS(vp->v_mount);
858 uiop = &uio;
859 uiop->uio_iov = &io;
860 uiop->uio_iovcnt = 1;
861 uiop->uio_segflg = UIO_SYSSPACE;
862 uiop->uio_procp = p;
863
864 /*
865 * Historically, paging was done with physio, but no more...
866 */
867 if (bp->b_flags & B_PHYS) {
868 /*
869 * ...though reading /dev/drum still gets us here.
870 */
871 io.iov_len = uiop->uio_resid = bp->b_bcount;
872 /* mapping was done by vmapbuf() */
873 io.iov_base = bp->b_data;
874 uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
875 if (bp->b_flags & B_READ) {
876 uiop->uio_rw = UIO_READ;
877 nfsstats.read_physios++;
878 error = nfs_readrpc(vp, uiop);
879 } else {
880 iomode = NFSV3WRITE_DATASYNC;
881 uiop->uio_rw = UIO_WRITE;
882 nfsstats.write_physios++;
883 error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
884 }
885 if (error) {
886 bp->b_flags |= B_ERROR;
887 bp->b_error = error;
888 }
889 } else if (bp->b_flags & B_READ) {
890 io.iov_len = uiop->uio_resid = bp->b_bcount;
891 io.iov_base = bp->b_data;
892 uiop->uio_rw = UIO_READ;
893 switch (vp->v_type) {
894 case VREG:
895 uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
896 nfsstats.read_bios++;
897 error = nfs_readrpc(vp, uiop);
898 if (!error && uiop->uio_resid) {
899
900 /*
901 * If len > 0, there is a hole in the file and
902 * no writes after the hole have been pushed to
903 * the server yet.
904 * Just zero fill the rest of the valid area.
905 */
906
907 diff = bp->b_bcount - uiop->uio_resid;
908 len = np->n_size - ((((off_t)bp->b_blkno) << DEV_BSHIFT)
909 + diff);
910 if (len > 0) {
911 len = MIN(len, uiop->uio_resid);
912 memset((char *)bp->b_data + diff, 0, len);
913 }
914 }
915 if (p && (vp->v_flag & VTEXT) &&
916 (((nmp->nm_flag & NFSMNT_NQNFS) &&
917 NQNFS_CKINVALID(vp, np, ND_READ) &&
918 np->n_lrev != np->n_brev) ||
919 (!(nmp->nm_flag & NFSMNT_NQNFS) &&
920 np->n_mtime != np->n_vattr->va_mtime.tv_sec))) {
921 uprintf("Process killed due to "
922 "text file modification\n");
923 psignal(p, SIGKILL);
924 p->p_holdcnt++;
925 }
926 break;
927 case VLNK:
928 uiop->uio_offset = (off_t)0;
929 nfsstats.readlink_bios++;
930 error = nfs_readlinkrpc(vp, uiop, curproc->p_ucred);
931 break;
932 case VDIR:
933 nfsstats.readdir_bios++;
934 uiop->uio_offset = bp->b_dcookie;
935 if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
936 error = nfs_readdirplusrpc(vp, uiop, curproc->p_ucred);
937 if (error == NFSERR_NOTSUPP)
938 nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
939 }
940 if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
941 error = nfs_readdirrpc(vp, uiop, curproc->p_ucred);
942 if (!error) {
943 bp->b_dcookie = uiop->uio_offset;
944 }
945 break;
946 default:
947 printf("nfs_doio: type %x unexpected\n",vp->v_type);
948 break;
949 }
950 if (error) {
951 bp->b_flags |= B_ERROR;
952 bp->b_error = error;
953 }
954 } else {
955 /*
956 * If B_NEEDCOMMIT is set, a commit rpc may do the trick. If not
957 * an actual write will have to be scheduled.
958 */
959
960 io.iov_base = bp->b_data;
961 io.iov_len = uiop->uio_resid = bp->b_bcount;
962 uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
963 uiop->uio_rw = UIO_WRITE;
964 nfsstats.write_bios++;
965 iomode = NFSV3WRITE_UNSTABLE;
966 error = nfs_writerpc(vp, uiop, &iomode, &must_commit);
967 }
968 bp->b_resid = uiop->uio_resid;
969 if (must_commit)
970 nfs_clearcommit(vp->v_mount);
971 biodone(bp);
972 return (error);
973 }
974
975 /*
976 * Vnode op for VM getpages.
977 */
978
979 int
980 nfs_getpages(v)
981 void *v;
982 {
983 struct vop_getpages_args /* {
984 struct vnode *a_vp;
985 voff_t a_offset;
986 struct vm_page **a_m;
987 int *a_count;
988 int a_centeridx;
989 vm_prot_t a_access_type;
990 int a_advice;
991 int a_flags;
992 } */ *ap = v;
993
994 struct vnode *vp = ap->a_vp;
995 struct uvm_object *uobj = &vp->v_uobj;
996 struct nfsnode *np = VTONFS(vp);
997 struct vm_page *pg, **pgs;
998 off_t origoffset;
999 int i, error, npages;
1000 boolean_t v3 = NFS_ISV3(vp);
1001 boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1002 UVMHIST_FUNC("nfs_getpages"); UVMHIST_CALLED(ubchist);
1003
1004 /*
1005 * update the cached read creds for this node.
1006 */
1007
1008 if (np->n_rcred) {
1009 crfree(np->n_rcred);
1010 }
1011 np->n_rcred = curproc->p_ucred;
1012 crhold(np->n_rcred);
1013
1014 /*
1015 * call the genfs code to get the pages.
1016 */
1017
1018 npages = *ap->a_count;
1019 error = genfs_getpages(v);
1020 if (error || !write || !v3) {
1021 return error;
1022 }
1023
1024 /*
1025 * this is a write fault, update the commit info.
1026 */
1027
1028 origoffset = ap->a_offset;
1029 pgs = ap->a_m;
1030
1031 lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
1032 nfs_del_committed_range(vp, origoffset, npages);
1033 nfs_del_tobecommitted_range(vp, origoffset, npages);
1034 simple_lock(&uobj->vmobjlock);
1035 for (i = 0; i < npages; i++) {
1036 pg = pgs[i];
1037 if (pg == NULL || pg == PGO_DONTCARE) {
1038 continue;
1039 }
1040 pg->flags &= ~(PG_NEEDCOMMIT|PG_RDONLY);
1041 }
1042 simple_unlock(&uobj->vmobjlock);
1043 lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
1044 return 0;
1045 }
1046
1047 int
1048 nfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1049 {
1050 struct uvm_object *uobj = &vp->v_uobj;
1051 struct nfsnode *np = VTONFS(vp);
1052 off_t origoffset, commitoff;
1053 uint32_t commitbytes;
1054 int error, i;
1055 int bytes;
1056 boolean_t v3 = NFS_ISV3(vp);
1057 boolean_t weak = flags & PGO_WEAK;
1058 UVMHIST_FUNC("nfs_gop_write"); UVMHIST_CALLED(ubchist);
1059
1060 /* XXX for now, skip the v3 stuff. */
1061 v3 = FALSE;
1062
1063 /*
1064 * for NFSv2, just write normally.
1065 */
1066
1067 if (!v3) {
1068 return genfs_gop_write(vp, pgs, npages, flags);
1069 }
1070
1071 /*
1072 * for NFSv3, use delayed writes and the "commit" operation
1073 * to avoid sync writes.
1074 */
1075
1076 origoffset = pgs[0]->offset;
1077 bytes = npages << PAGE_SHIFT;
1078 lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
1079 if (nfs_in_committed_range(vp, origoffset, bytes)) {
1080 goto committed;
1081 }
1082 if (nfs_in_tobecommitted_range(vp, origoffset, bytes)) {
1083 if (weak) {
1084 lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
1085 return 0;
1086 } else {
1087 commitoff = np->n_pushlo;
1088 commitbytes = (uint32_t)(np->n_pushhi - np->n_pushlo);
1089 goto commit;
1090 }
1091 } else {
1092 commitoff = origoffset;
1093 commitbytes = npages << PAGE_SHIFT;
1094 }
1095 simple_lock(&uobj->vmobjlock);
1096 for (i = 0; i < npages; i++) {
1097 pgs[i]->flags |= PG_NEEDCOMMIT|PG_RDONLY;
1098 pgs[i]->flags &= ~PG_CLEAN;
1099 }
1100 simple_unlock(&uobj->vmobjlock);
1101 lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
1102 error = genfs_gop_write(vp, pgs, npages, flags);
1103 if (error) {
1104 return error;
1105 }
1106 lockmgr(&np->n_commitlock, LK_EXCLUSIVE, NULL);
1107 if (weak) {
1108 nfs_add_tobecommitted_range(vp, origoffset,
1109 npages << PAGE_SHIFT);
1110 } else {
1111 commit:
1112 error = nfs_commit(vp, commitoff, commitbytes, curproc);
1113 nfs_del_tobecommitted_range(vp, commitoff, commitbytes);
1114 committed:
1115 simple_lock(&uobj->vmobjlock);
1116 for (i = 0; i < npages; i++) {
1117 pgs[i]->flags &= ~(PG_NEEDCOMMIT|PG_RDONLY);
1118 }
1119 simple_unlock(&uobj->vmobjlock);
1120 }
1121 lockmgr(&np->n_commitlock, LK_RELEASE, NULL);
1122 return error;
1123 }
1124