nfs_bio.c revision 1.128.4.5 1 1.128.4.5 yamt /* $NetBSD: nfs_bio.c,v 1.128.4.5 2007/10/27 11:36:16 yamt Exp $ */
2 1.15 cgd
3 1.1 cgd /*
4 1.12 mycroft * Copyright (c) 1989, 1993
5 1.12 mycroft * The Regents of the University of California. All rights reserved.
6 1.1 cgd *
7 1.1 cgd * This code is derived from software contributed to Berkeley by
8 1.1 cgd * Rick Macklem at The University of Guelph.
9 1.1 cgd *
10 1.1 cgd * Redistribution and use in source and binary forms, with or without
11 1.1 cgd * modification, are permitted provided that the following conditions
12 1.1 cgd * are met:
13 1.1 cgd * 1. Redistributions of source code must retain the above copyright
14 1.1 cgd * notice, this list of conditions and the following disclaimer.
15 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
16 1.1 cgd * notice, this list of conditions and the following disclaimer in the
17 1.1 cgd * documentation and/or other materials provided with the distribution.
18 1.107 agc * 3. Neither the name of the University nor the names of its contributors
19 1.1 cgd * may be used to endorse or promote products derived from this software
20 1.1 cgd * without specific prior written permission.
21 1.1 cgd *
22 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 1.1 cgd * SUCH DAMAGE.
33 1.1 cgd *
34 1.24 fvdl * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
35 1.1 cgd */
36 1.71 lukem
37 1.71 lukem #include <sys/cdefs.h>
38 1.128.4.5 yamt __KERNEL_RCSID(0, "$NetBSD: nfs_bio.c,v 1.128.4.5 2007/10/27 11:36:16 yamt Exp $");
39 1.1 cgd
40 1.51 bjh21 #include "opt_nfs.h"
41 1.54 chs #include "opt_ddb.h"
42 1.51 bjh21
43 1.8 mycroft #include <sys/param.h>
44 1.8 mycroft #include <sys/systm.h>
45 1.12 mycroft #include <sys/resourcevar.h>
46 1.24 fvdl #include <sys/signalvar.h>
47 1.8 mycroft #include <sys/proc.h>
48 1.8 mycroft #include <sys/buf.h>
49 1.8 mycroft #include <sys/vnode.h>
50 1.8 mycroft #include <sys/mount.h>
51 1.12 mycroft #include <sys/kernel.h>
52 1.23 christos #include <sys/namei.h>
53 1.34 fvdl #include <sys/dirent.h>
54 1.54 chs #include <sys/malloc.h>
55 1.128.4.1 yamt #include <sys/kauth.h>
56 1.1 cgd
57 1.41 mrg #include <uvm/uvm_extern.h>
58 1.54 chs #include <uvm/uvm.h>
59 1.41 mrg
60 1.12 mycroft #include <nfs/rpcv2.h>
61 1.24 fvdl #include <nfs/nfsproto.h>
62 1.8 mycroft #include <nfs/nfs.h>
63 1.8 mycroft #include <nfs/nfsmount.h>
64 1.24 fvdl #include <nfs/nfsnode.h>
65 1.23 christos #include <nfs/nfs_var.h>
66 1.1 cgd
67 1.12 mycroft extern int nfs_numasync;
68 1.74 chs extern int nfs_commitsize;
69 1.24 fvdl extern struct nfsstats nfsstats;
70 1.1 cgd
71 1.91 yamt static int nfs_doio_read __P((struct buf *, struct uio *));
72 1.91 yamt static int nfs_doio_write __P((struct buf *, struct uio *));
73 1.91 yamt static int nfs_doio_phys __P((struct buf *, struct uio *));
74 1.91 yamt
75 1.1 cgd /*
76 1.1 cgd * Vnode op for read using bio
77 1.1 cgd * Any similarity to readip() is purely coincidental
78 1.1 cgd */
79 1.23 christos int
80 1.34 fvdl nfs_bioread(vp, uio, ioflag, cred, cflag)
81 1.48 augustss struct vnode *vp;
82 1.48 augustss struct uio *uio;
83 1.34 fvdl int ioflag, cflag;
84 1.128.4.1 yamt kauth_cred_t cred;
85 1.1 cgd {
86 1.48 augustss struct nfsnode *np = VTONFS(vp);
87 1.23 christos struct buf *bp = NULL, *rabp;
88 1.24 fvdl struct nfsmount *nmp = VFSTONFS(vp->v_mount);
89 1.35 fvdl struct nfsdircache *ndp = NULL, *nndp = NULL;
90 1.128.4.4 yamt void *baddr;
91 1.54 chs int got_buf = 0, error = 0, n = 0, on = 0, en, enn;
92 1.34 fvdl int enough = 0;
93 1.128.4.1 yamt struct dirent *dp, *pdp, *edp, *ep;
94 1.54 chs off_t curoff = 0;
95 1.128.4.1 yamt int advice;
96 1.128.4.1 yamt struct lwp *l = curlwp;
97 1.1 cgd
98 1.1 cgd #ifdef DIAGNOSTIC
99 1.1 cgd if (uio->uio_rw != UIO_READ)
100 1.1 cgd panic("nfs_read mode");
101 1.1 cgd #endif
102 1.1 cgd if (uio->uio_resid == 0)
103 1.1 cgd return (0);
104 1.34 fvdl if (vp->v_type != VDIR && uio->uio_offset < 0)
105 1.1 cgd return (EINVAL);
106 1.51 bjh21 #ifndef NFS_V2_ONLY
107 1.34 fvdl if ((nmp->nm_flag & NFSMNT_NFSV3) &&
108 1.34 fvdl !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
109 1.128.4.1 yamt (void)nfs_fsinfo(nmp, vp, cred, l);
110 1.51 bjh21 #endif
111 1.34 fvdl if (vp->v_type != VDIR &&
112 1.34 fvdl (uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
113 1.33 fvdl return (EFBIG);
114 1.54 chs
115 1.1 cgd /*
116 1.12 mycroft * For nfs, cache consistency can only be maintained approximately.
117 1.12 mycroft * Although RFC1094 does not specify the criteria, the following is
118 1.12 mycroft * believed to be compatible with the reference port.
119 1.128.4.2 yamt *
120 1.1 cgd * If the file's modify time on the server has changed since the
121 1.1 cgd * last read rpc or you have written to the file,
122 1.1 cgd * you may have lost data cache consistency with the
123 1.1 cgd * server, so flush all of the file's data out of the cache.
124 1.1 cgd * Then force a getattr rpc to ensure that you have up to date
125 1.1 cgd * attributes.
126 1.1 cgd * NB: This implies that cache data can be read when up to
127 1.1 cgd * NFS_ATTRTIMEO seconds out of date. If you find that you need current
128 1.1 cgd * attributes this could be forced by setting n_attrstamp to 0 before
129 1.12 mycroft * the VOP_GETATTR() call.
130 1.1 cgd */
131 1.54 chs
132 1.128.4.2 yamt if (vp->v_type != VLNK) {
133 1.128.4.1 yamt error = nfs_flushstalebuf(vp, cred, l,
134 1.123 yamt NFS_FLUSHSTALEBUF_MYWRITE);
135 1.123 yamt if (error)
136 1.123 yamt return error;
137 1.1 cgd }
138 1.54 chs
139 1.1 cgd do {
140 1.26 fvdl /*
141 1.26 fvdl * Don't cache symlinks.
142 1.26 fvdl */
143 1.128.4.5 yamt if ((vp->v_vflag & VV_ROOT) && vp->v_type == VLNK) {
144 1.128.4.2 yamt return (nfs_readlinkrpc(vp, uio, cred));
145 1.12 mycroft }
146 1.128.4.4 yamt baddr = (void *)0;
147 1.1 cgd switch (vp->v_type) {
148 1.1 cgd case VREG:
149 1.1 cgd nfsstats.biocache_reads++;
150 1.12 mycroft
151 1.128.4.1 yamt advice = IO_ADV_DECODE(ioflag);
152 1.54 chs error = 0;
153 1.54 chs while (uio->uio_resid > 0) {
154 1.128.4.1 yamt vsize_t bytelen;
155 1.54 chs
156 1.128.4.1 yamt nfs_delayedtruncate(vp);
157 1.128.4.1 yamt if (np->n_size <= uio->uio_offset) {
158 1.54 chs break;
159 1.128.4.1 yamt }
160 1.128.4.1 yamt bytelen =
161 1.128.4.1 yamt MIN(np->n_size - uio->uio_offset, uio->uio_resid);
162 1.128.4.4 yamt error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
163 1.128.4.4 yamt advice, UBC_READ | UBC_PARTIALOK |
164 1.128.4.4 yamt (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
165 1.54 chs if (error) {
166 1.128.4.1 yamt /*
167 1.128.4.1 yamt * XXXkludge
168 1.128.4.1 yamt * the file has been truncated on the server.
169 1.128.4.1 yamt * there isn't much we can do.
170 1.128.4.1 yamt */
171 1.128.4.1 yamt if (uio->uio_offset >= np->n_size) {
172 1.128.4.1 yamt /* end of file */
173 1.128.4.1 yamt error = 0;
174 1.128.4.1 yamt } else {
175 1.128.4.1 yamt break;
176 1.128.4.1 yamt }
177 1.12 mycroft }
178 1.12 mycroft }
179 1.54 chs break;
180 1.12 mycroft
181 1.1 cgd case VLNK:
182 1.1 cgd nfsstats.biocache_readlinks++;
183 1.128.4.1 yamt bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, l);
184 1.12 mycroft if (!bp)
185 1.12 mycroft return (EINTR);
186 1.12 mycroft if ((bp->b_flags & B_DONE) == 0) {
187 1.12 mycroft bp->b_flags |= B_READ;
188 1.128.4.1 yamt error = nfs_doio(bp);
189 1.24 fvdl if (error) {
190 1.128.4.5 yamt brelse(bp, 0);
191 1.12 mycroft return (error);
192 1.12 mycroft }
193 1.12 mycroft }
194 1.63 chs n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
195 1.12 mycroft got_buf = 1;
196 1.1 cgd on = 0;
197 1.1 cgd break;
198 1.1 cgd case VDIR:
199 1.34 fvdl diragain:
200 1.34 fvdl nfsstats.biocache_readdirs++;
201 1.35 fvdl ndp = nfs_searchdircache(vp, uio->uio_offset,
202 1.35 fvdl (nmp->nm_flag & NFSMNT_XLATECOOKIE), 0);
203 1.35 fvdl if (!ndp) {
204 1.35 fvdl /*
205 1.35 fvdl * We've been handed a cookie that is not
206 1.35 fvdl * in the cache. If we're not translating
207 1.35 fvdl * 32 <-> 64, it may be a value that was
208 1.35 fvdl * flushed out of the cache because it grew
209 1.35 fvdl * too big. Let the server judge if it's
210 1.35 fvdl * valid or not. In the translation case,
211 1.35 fvdl * we have no way of validating this value,
212 1.35 fvdl * so punt.
213 1.35 fvdl */
214 1.35 fvdl if (nmp->nm_flag & NFSMNT_XLATECOOKIE)
215 1.35 fvdl return (EINVAL);
216 1.128 perry ndp = nfs_enterdircache(vp, uio->uio_offset,
217 1.35 fvdl uio->uio_offset, 0, 0);
218 1.35 fvdl }
219 1.35 fvdl
220 1.125 yamt if (NFS_EOFVALID(np) &&
221 1.35 fvdl ndp->dc_cookie == np->n_direofoffset) {
222 1.120 yamt nfs_putdircache(np, ndp);
223 1.35 fvdl nfsstats.direofcache_hits++;
224 1.18 mycroft return (0);
225 1.35 fvdl }
226 1.35 fvdl
227 1.128.4.1 yamt bp = nfs_getcacheblk(vp, NFSDC_BLKNO(ndp), NFS_DIRBLKSIZ, l);
228 1.12 mycroft if (!bp)
229 1.24 fvdl return (EINTR);
230 1.12 mycroft if ((bp->b_flags & B_DONE) == 0) {
231 1.24 fvdl bp->b_flags |= B_READ;
232 1.35 fvdl bp->b_dcookie = ndp->dc_blkcookie;
233 1.128.4.1 yamt error = nfs_doio(bp);
234 1.24 fvdl if (error) {
235 1.34 fvdl /*
236 1.34 fvdl * Yuck! The directory has been modified on the
237 1.34 fvdl * server. Punt and let the userland code
238 1.34 fvdl * deal with it.
239 1.34 fvdl */
240 1.120 yamt nfs_putdircache(np, ndp);
241 1.128.4.5 yamt brelse(bp, 0);
242 1.128.4.2 yamt /*
243 1.128.4.2 yamt * nfs_request maps NFSERR_BAD_COOKIE to EINVAL.
244 1.128.4.2 yamt */
245 1.128.4.2 yamt if (error == EINVAL) { /* NFSERR_BAD_COOKIE */
246 1.35 fvdl nfs_invaldircache(vp, 0);
247 1.128.4.1 yamt nfs_vinvalbuf(vp, 0, cred, l, 1);
248 1.12 mycroft }
249 1.34 fvdl return (error);
250 1.38 fvdl }
251 1.40 fvdl }
252 1.40 fvdl
253 1.40 fvdl /*
254 1.40 fvdl * Just return if we hit EOF right away with this
255 1.40 fvdl * block. Always check here, because direofoffset
256 1.40 fvdl * may have been set by an nfsiod since the last
257 1.40 fvdl * check.
258 1.127 yamt *
259 1.127 yamt * also, empty block implies EOF.
260 1.40 fvdl */
261 1.127 yamt
262 1.127 yamt if (bp->b_bcount == bp->b_resid ||
263 1.127 yamt (NFS_EOFVALID(np) &&
264 1.127 yamt ndp->dc_blkcookie == np->n_direofoffset)) {
265 1.127 yamt KASSERT(bp->b_bcount != bp->b_resid ||
266 1.127 yamt ndp->dc_blkcookie == bp->b_dcookie);
267 1.120 yamt nfs_putdircache(np, ndp);
268 1.128.4.5 yamt brelse(bp, BC_NOCACHE);
269 1.127 yamt return 0;
270 1.12 mycroft }
271 1.12 mycroft
272 1.12 mycroft /*
273 1.34 fvdl * Find the entry we were looking for in the block.
274 1.34 fvdl */
275 1.34 fvdl
276 1.34 fvdl en = ndp->dc_entry;
277 1.34 fvdl
278 1.34 fvdl pdp = dp = (struct dirent *)bp->b_data;
279 1.128.4.4 yamt edp = (struct dirent *)(void *)((char *)bp->b_data + bp->b_bcount -
280 1.128.4.1 yamt bp->b_resid);
281 1.34 fvdl enn = 0;
282 1.128.4.1 yamt while (enn < en && dp < edp) {
283 1.34 fvdl pdp = dp;
284 1.128.4.1 yamt dp = _DIRENT_NEXT(dp);
285 1.34 fvdl enn++;
286 1.34 fvdl }
287 1.34 fvdl
288 1.34 fvdl /*
289 1.34 fvdl * If the entry number was bigger than the number of
290 1.34 fvdl * entries in the block, or the cookie of the previous
291 1.34 fvdl * entry doesn't match, the directory cache is
292 1.34 fvdl * stale. Flush it and try again (i.e. go to
293 1.34 fvdl * the server).
294 1.34 fvdl */
295 1.128.4.1 yamt if (dp >= edp || (struct dirent *)_DIRENT_NEXT(dp) > edp ||
296 1.35 fvdl (en > 0 && NFS_GETCOOKIE(pdp) != ndp->dc_cookie)) {
297 1.34 fvdl #ifdef DEBUG
298 1.37 thorpej printf("invalid cache: %p %p %p off %lx %lx\n",
299 1.37 thorpej pdp, dp, edp,
300 1.34 fvdl (unsigned long)uio->uio_offset,
301 1.34 fvdl (unsigned long)NFS_GETCOOKIE(pdp));
302 1.34 fvdl #endif
303 1.120 yamt nfs_putdircache(np, ndp);
304 1.128.4.5 yamt brelse(bp, 0);
305 1.35 fvdl nfs_invaldircache(vp, 0);
306 1.128.4.1 yamt nfs_vinvalbuf(vp, 0, cred, l, 0);
307 1.34 fvdl goto diragain;
308 1.34 fvdl }
309 1.34 fvdl
310 1.128.4.4 yamt on = (char *)dp - (char *)bp->b_data;
311 1.34 fvdl
312 1.34 fvdl /*
313 1.34 fvdl * Cache all entries that may be exported to the
314 1.34 fvdl * user, as they may be thrown back at us. The
315 1.34 fvdl * NFSBIO_CACHECOOKIES flag indicates that all
316 1.34 fvdl * entries are being 'exported', so cache them all.
317 1.34 fvdl */
318 1.34 fvdl
319 1.34 fvdl if (en == 0 && pdp == dp) {
320 1.128.4.1 yamt dp = _DIRENT_NEXT(dp);
321 1.34 fvdl enn++;
322 1.34 fvdl }
323 1.34 fvdl
324 1.65 chs if (uio->uio_resid < (bp->b_bcount - bp->b_resid - on)) {
325 1.34 fvdl n = uio->uio_resid;
326 1.34 fvdl enough = 1;
327 1.34 fvdl } else
328 1.65 chs n = bp->b_bcount - bp->b_resid - on;
329 1.34 fvdl
330 1.128.4.4 yamt ep = (struct dirent *)(void *)((char *)bp->b_data + on + n);
331 1.34 fvdl
332 1.34 fvdl /*
333 1.34 fvdl * Find last complete entry to copy, caching entries
334 1.34 fvdl * (if requested) as we go.
335 1.34 fvdl */
336 1.34 fvdl
337 1.128.4.1 yamt while (dp < ep && (struct dirent *)_DIRENT_NEXT(dp) <= ep) {
338 1.35 fvdl if (cflag & NFSBIO_CACHECOOKIES) {
339 1.35 fvdl nndp = nfs_enterdircache(vp, NFS_GETCOOKIE(pdp),
340 1.35 fvdl ndp->dc_blkcookie, enn, bp->b_lblkno);
341 1.35 fvdl if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
342 1.35 fvdl NFS_STASHCOOKIE32(pdp,
343 1.35 fvdl nndp->dc_cookie32);
344 1.35 fvdl }
345 1.120 yamt nfs_putdircache(np, nndp);
346 1.35 fvdl }
347 1.34 fvdl pdp = dp;
348 1.128.4.1 yamt dp = _DIRENT_NEXT(dp);
349 1.34 fvdl enn++;
350 1.34 fvdl }
351 1.120 yamt nfs_putdircache(np, ndp);
352 1.34 fvdl
353 1.34 fvdl /*
354 1.34 fvdl * If the last requested entry was not the last in the
355 1.128 perry * buffer (happens if NFS_DIRFRAGSIZ < NFS_DIRBLKSIZ),
356 1.34 fvdl * cache the cookie of the last requested one, and
357 1.34 fvdl * set of the offset to it.
358 1.34 fvdl */
359 1.34 fvdl
360 1.65 chs if ((on + n) < bp->b_bcount - bp->b_resid) {
361 1.34 fvdl curoff = NFS_GETCOOKIE(pdp);
362 1.35 fvdl nndp = nfs_enterdircache(vp, curoff, ndp->dc_blkcookie,
363 1.35 fvdl enn, bp->b_lblkno);
364 1.35 fvdl if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
365 1.35 fvdl NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
366 1.35 fvdl curoff = nndp->dc_cookie32;
367 1.35 fvdl }
368 1.120 yamt nfs_putdircache(np, nndp);
369 1.34 fvdl } else
370 1.34 fvdl curoff = bp->b_dcookie;
371 1.34 fvdl
372 1.35 fvdl /*
373 1.35 fvdl * Always cache the entry for the next block,
374 1.35 fvdl * so that readaheads can use it.
375 1.35 fvdl */
376 1.35 fvdl nndp = nfs_enterdircache(vp, bp->b_dcookie, bp->b_dcookie, 0,0);
377 1.35 fvdl if (nmp->nm_flag & NFSMNT_XLATECOOKIE) {
378 1.35 fvdl if (curoff == bp->b_dcookie) {
379 1.35 fvdl NFS_STASHCOOKIE32(pdp, nndp->dc_cookie32);
380 1.35 fvdl curoff = nndp->dc_cookie32;
381 1.35 fvdl }
382 1.35 fvdl }
383 1.35 fvdl
384 1.128.4.4 yamt n = (char *)_DIRENT_NEXT(pdp) - ((char *)bp->b_data + on);
385 1.34 fvdl
386 1.34 fvdl /*
387 1.12 mycroft * If not eof and read aheads are enabled, start one.
388 1.12 mycroft * (You need the current block first, so that you have the
389 1.24 fvdl * directory offset cookie of the next block.)
390 1.12 mycroft */
391 1.12 mycroft if (nfs_numasync > 0 && nmp->nm_readahead > 0 &&
392 1.128.4.2 yamt !NFS_EOFVALID(np)) {
393 1.122 yamt rabp = nfs_getcacheblk(vp, NFSDC_BLKNO(nndp),
394 1.128.4.1 yamt NFS_DIRBLKSIZ, l);
395 1.12 mycroft if (rabp) {
396 1.12 mycroft if ((rabp->b_flags & (B_DONE | B_DELWRI)) == 0) {
397 1.35 fvdl rabp->b_dcookie = nndp->dc_cookie;
398 1.12 mycroft rabp->b_flags |= (B_READ | B_ASYNC);
399 1.54 chs if (nfs_asyncio(rabp)) {
400 1.128.4.5 yamt brelse(rabp, BC_INVAL);
401 1.12 mycroft }
402 1.19 mycroft } else
403 1.128.4.5 yamt brelse(rabp, 0);
404 1.12 mycroft }
405 1.12 mycroft }
406 1.120 yamt nfs_putdircache(np, nndp);
407 1.12 mycroft got_buf = 1;
408 1.1 cgd break;
409 1.24 fvdl default:
410 1.29 christos printf(" nfsbioread: type %x unexpected\n",vp->v_type);
411 1.23 christos break;
412 1.54 chs }
413 1.12 mycroft
414 1.12 mycroft if (n > 0) {
415 1.12 mycroft if (!baddr)
416 1.12 mycroft baddr = bp->b_data;
417 1.128.4.4 yamt error = uiomove((char *)baddr + on, (int)n, uio);
418 1.1 cgd }
419 1.1 cgd switch (vp->v_type) {
420 1.24 fvdl case VREG:
421 1.24 fvdl break;
422 1.1 cgd case VLNK:
423 1.1 cgd n = 0;
424 1.1 cgd break;
425 1.1 cgd case VDIR:
426 1.34 fvdl uio->uio_offset = curoff;
427 1.34 fvdl if (enough)
428 1.34 fvdl n = 0;
429 1.1 cgd break;
430 1.24 fvdl default:
431 1.29 christos printf(" nfsbioread: type %x unexpected\n",vp->v_type);
432 1.24 fvdl }
433 1.12 mycroft if (got_buf)
434 1.128.4.5 yamt brelse(bp, 0);
435 1.12 mycroft } while (error == 0 && uio->uio_resid > 0 && n > 0);
436 1.1 cgd return (error);
437 1.1 cgd }
438 1.1 cgd
439 1.1 cgd /*
440 1.1 cgd * Vnode op for write using bio
441 1.1 cgd */
442 1.23 christos int
443 1.23 christos nfs_write(v)
444 1.23 christos void *v;
445 1.23 christos {
446 1.12 mycroft struct vop_write_args /* {
447 1.24 fvdl struct vnode *a_vp;
448 1.12 mycroft struct uio *a_uio;
449 1.12 mycroft int a_ioflag;
450 1.128.4.1 yamt kauth_cred_t a_cred;
451 1.23 christos } */ *ap = v;
452 1.48 augustss struct uio *uio = ap->a_uio;
453 1.128.4.1 yamt struct lwp *l = curlwp;
454 1.48 augustss struct vnode *vp = ap->a_vp;
455 1.12 mycroft struct nfsnode *np = VTONFS(vp);
456 1.128.4.1 yamt kauth_cred_t cred = ap->a_cred;
457 1.24 fvdl struct nfsmount *nmp = VFSTONFS(vp->v_mount);
458 1.69 chs voff_t oldoff, origoff;
459 1.69 chs vsize_t bytelen;
460 1.128.4.4 yamt int error = 0;
461 1.124 chs int ioflag = ap->a_ioflag;
462 1.124 chs int extended = 0, wrotedata = 0;
463 1.1 cgd
464 1.1 cgd #ifdef DIAGNOSTIC
465 1.1 cgd if (uio->uio_rw != UIO_WRITE)
466 1.1 cgd panic("nfs_write mode");
467 1.1 cgd #endif
468 1.1 cgd if (vp->v_type != VREG)
469 1.1 cgd return (EIO);
470 1.12 mycroft if (np->n_flag & NWRITEERR) {
471 1.12 mycroft np->n_flag &= ~NWRITEERR;
472 1.12 mycroft return (np->n_error);
473 1.12 mycroft }
474 1.51 bjh21 #ifndef NFS_V2_ONLY
475 1.34 fvdl if ((nmp->nm_flag & NFSMNT_NFSV3) &&
476 1.34 fvdl !(nmp->nm_iflag & NFSMNT_GOTFSINFO))
477 1.128.4.1 yamt (void)nfs_fsinfo(nmp, vp, cred, l);
478 1.51 bjh21 #endif
479 1.128.4.4 yamt if (ioflag & IO_APPEND) {
480 1.128.4.4 yamt NFS_INVALIDATE_ATTRCACHE(np);
481 1.128.4.4 yamt error = nfs_flushstalebuf(vp, cred, l,
482 1.128.4.4 yamt NFS_FLUSHSTALEBUF_MYWRITE);
483 1.128.4.4 yamt if (error)
484 1.128.4.4 yamt return (error);
485 1.128.4.4 yamt uio->uio_offset = np->n_size;
486 1.1 cgd }
487 1.1 cgd if (uio->uio_offset < 0)
488 1.1 cgd return (EINVAL);
489 1.33 fvdl if ((uio->uio_offset + uio->uio_resid) > nmp->nm_maxfilesize)
490 1.33 fvdl return (EFBIG);
491 1.1 cgd if (uio->uio_resid == 0)
492 1.1 cgd return (0);
493 1.1 cgd /*
494 1.1 cgd * Maybe this should be above the vnode op call, but so long as
495 1.1 cgd * file servers have no limits, i don't think it matters
496 1.1 cgd */
497 1.128.4.1 yamt if (l && l->l_proc && uio->uio_offset + uio->uio_resid >
498 1.128.4.1 yamt l->l_proc->p_rlimit[RLIMIT_FSIZE].rlim_cur) {
499 1.128.4.4 yamt mutex_enter(&proclist_mutex);
500 1.128.4.1 yamt psignal(l->l_proc, SIGXFSZ);
501 1.128.4.4 yamt mutex_exit(&proclist_mutex);
502 1.1 cgd return (EFBIG);
503 1.1 cgd }
504 1.54 chs
505 1.69 chs origoff = uio->uio_offset;
506 1.1 cgd do {
507 1.128.4.4 yamt bool overwrite; /* if we are overwriting whole pages */
508 1.85 yamt u_quad_t oldsize;
509 1.69 chs oldoff = uio->uio_offset;
510 1.69 chs bytelen = uio->uio_resid;
511 1.12 mycroft
512 1.1 cgd nfsstats.biocache_writes++;
513 1.54 chs
514 1.85 yamt oldsize = np->n_size;
515 1.12 mycroft np->n_flag |= NMODIFIED;
516 1.54 chs if (np->n_size < uio->uio_offset + bytelen) {
517 1.54 chs np->n_size = uio->uio_offset + bytelen;
518 1.12 mycroft }
519 1.128.4.4 yamt overwrite = false;
520 1.128.4.4 yamt if ((uio->uio_offset & PAGE_MASK) == 0) {
521 1.128.4.5 yamt if ((vp->v_vflag & VV_MAPPED) == 0 &&
522 1.128.4.4 yamt bytelen > PAGE_SIZE) {
523 1.128.4.4 yamt bytelen = trunc_page(bytelen);
524 1.128.4.4 yamt overwrite = true;
525 1.128.4.4 yamt } else if ((bytelen & PAGE_MASK) == 0 &&
526 1.128.4.4 yamt uio->uio_offset >= vp->v_size) {
527 1.128.4.4 yamt overwrite = true;
528 1.128.4.4 yamt }
529 1.128.4.4 yamt }
530 1.128.4.4 yamt if (vp->v_size < uio->uio_offset + bytelen) {
531 1.128.4.4 yamt uvm_vnp_setwritesize(vp, uio->uio_offset + bytelen);
532 1.128.4.4 yamt }
533 1.128.4.4 yamt error = ubc_uiomove(&vp->v_uobj, uio, bytelen,
534 1.128.4.4 yamt UVM_ADV_RANDOM, UBC_WRITE | UBC_PARTIALOK |
535 1.128.4.4 yamt (overwrite ? UBC_FAULTBUSY : 0) |
536 1.128.4.4 yamt (UBC_WANT_UNMAP(vp) ? UBC_UNMAP : 0));
537 1.69 chs if (error) {
538 1.128.4.4 yamt uvm_vnp_setwritesize(vp, vp->v_size);
539 1.128.4.4 yamt if (overwrite && np->n_size != oldsize) {
540 1.85 yamt /*
541 1.85 yamt * backout size and free pages past eof.
542 1.85 yamt */
543 1.85 yamt np->n_size = oldsize;
544 1.108 pk simple_lock(&vp->v_interlock);
545 1.85 yamt (void)VOP_PUTPAGES(vp, round_page(vp->v_size),
546 1.85 yamt 0, PGO_SYNCIO | PGO_FREE);
547 1.85 yamt }
548 1.69 chs break;
549 1.69 chs }
550 1.124 chs wrotedata = 1;
551 1.69 chs
552 1.69 chs /*
553 1.69 chs * update UVM's notion of the size now that we've
554 1.69 chs * copied the data into the vnode's pages.
555 1.69 chs */
556 1.69 chs
557 1.69 chs if (vp->v_size < uio->uio_offset) {
558 1.69 chs uvm_vnp_setsize(vp, uio->uio_offset);
559 1.84 jdolecek extended = 1;
560 1.69 chs }
561 1.69 chs
562 1.69 chs if ((oldoff & ~(nmp->nm_wsize - 1)) !=
563 1.54 chs (uio->uio_offset & ~(nmp->nm_wsize - 1))) {
564 1.72 chs simple_lock(&vp->v_interlock);
565 1.72 chs error = VOP_PUTPAGES(vp,
566 1.69 chs trunc_page(oldoff & ~(nmp->nm_wsize - 1)),
567 1.69 chs round_page((uio->uio_offset + nmp->nm_wsize - 1) &
568 1.78 chs ~(nmp->nm_wsize - 1)), PGO_CLEANIT);
569 1.52 fvdl }
570 1.54 chs } while (uio->uio_resid > 0);
571 1.124 chs if (wrotedata)
572 1.84 jdolecek VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0));
573 1.128.4.4 yamt if (error == 0 && (ioflag & IO_SYNC) != 0) {
574 1.72 chs simple_lock(&vp->v_interlock);
575 1.72 chs error = VOP_PUTPAGES(vp,
576 1.69 chs trunc_page(origoff & ~(nmp->nm_wsize - 1)),
577 1.69 chs round_page((uio->uio_offset + nmp->nm_wsize - 1) &
578 1.69 chs ~(nmp->nm_wsize - 1)),
579 1.72 chs PGO_CLEANIT | PGO_SYNCIO);
580 1.69 chs }
581 1.54 chs return error;
582 1.12 mycroft }
583 1.12 mycroft
584 1.12 mycroft /*
585 1.12 mycroft * Get an nfs cache block.
586 1.12 mycroft * Allocate a new one if the block isn't currently in the cache
587 1.12 mycroft * and return the block marked busy. If the calling process is
588 1.12 mycroft * interrupted by a signal for an interruptible mount point, return
589 1.12 mycroft * NULL.
590 1.12 mycroft */
591 1.12 mycroft struct buf *
592 1.128.4.1 yamt nfs_getcacheblk(vp, bn, size, l)
593 1.12 mycroft struct vnode *vp;
594 1.12 mycroft daddr_t bn;
595 1.12 mycroft int size;
596 1.128.4.1 yamt struct lwp *l;
597 1.12 mycroft {
598 1.48 augustss struct buf *bp;
599 1.12 mycroft struct nfsmount *nmp = VFSTONFS(vp->v_mount);
600 1.12 mycroft
601 1.12 mycroft if (nmp->nm_flag & NFSMNT_INT) {
602 1.12 mycroft bp = getblk(vp, bn, size, PCATCH, 0);
603 1.54 chs while (bp == NULL) {
604 1.128.4.1 yamt if (nfs_sigintr(nmp, NULL, l))
605 1.54 chs return (NULL);
606 1.12 mycroft bp = getblk(vp, bn, size, 0, 2 * hz);
607 1.12 mycroft }
608 1.12 mycroft } else
609 1.12 mycroft bp = getblk(vp, bn, size, 0, 0);
610 1.12 mycroft return (bp);
611 1.12 mycroft }
612 1.12 mycroft
613 1.12 mycroft /*
614 1.12 mycroft * Flush and invalidate all dirty buffers. If another process is already
615 1.12 mycroft * doing the flush, just wait for completion.
616 1.12 mycroft */
617 1.23 christos int
618 1.128.4.1 yamt nfs_vinvalbuf(vp, flags, cred, l, intrflg)
619 1.12 mycroft struct vnode *vp;
620 1.12 mycroft int flags;
621 1.128.4.1 yamt kauth_cred_t cred;
622 1.128.4.1 yamt struct lwp *l;
623 1.12 mycroft int intrflg;
624 1.12 mycroft {
625 1.48 augustss struct nfsnode *np = VTONFS(vp);
626 1.12 mycroft struct nfsmount *nmp = VFSTONFS(vp->v_mount);
627 1.12 mycroft int error = 0, slpflag, slptimeo;
628 1.12 mycroft
629 1.12 mycroft if ((nmp->nm_flag & NFSMNT_INT) == 0)
630 1.12 mycroft intrflg = 0;
631 1.12 mycroft if (intrflg) {
632 1.12 mycroft slpflag = PCATCH;
633 1.12 mycroft slptimeo = 2 * hz;
634 1.12 mycroft } else {
635 1.12 mycroft slpflag = 0;
636 1.12 mycroft slptimeo = 0;
637 1.12 mycroft }
638 1.12 mycroft /*
639 1.12 mycroft * First wait for any other process doing a flush to complete.
640 1.12 mycroft */
641 1.103 yamt simple_lock(&vp->v_interlock);
642 1.12 mycroft while (np->n_flag & NFLUSHINPROG) {
643 1.12 mycroft np->n_flag |= NFLUSHWANT;
644 1.103 yamt error = ltsleep(&np->n_flag, PRIBIO + 2, "nfsvinval",
645 1.103 yamt slptimeo, &vp->v_interlock);
646 1.128.4.1 yamt if (error && intrflg && nfs_sigintr(nmp, NULL, l)) {
647 1.103 yamt simple_unlock(&vp->v_interlock);
648 1.103 yamt return EINTR;
649 1.103 yamt }
650 1.12 mycroft }
651 1.12 mycroft
652 1.12 mycroft /*
653 1.12 mycroft * Now, flush as required.
654 1.12 mycroft */
655 1.12 mycroft np->n_flag |= NFLUSHINPROG;
656 1.103 yamt simple_unlock(&vp->v_interlock);
657 1.128.4.1 yamt error = vinvalbuf(vp, flags, cred, l, slpflag, 0);
658 1.12 mycroft while (error) {
659 1.128.4.1 yamt if (intrflg && nfs_sigintr(nmp, NULL, l)) {
660 1.103 yamt error = EINTR;
661 1.103 yamt break;
662 1.12 mycroft }
663 1.128.4.1 yamt error = vinvalbuf(vp, flags, cred, l, 0, slptimeo);
664 1.12 mycroft }
665 1.103 yamt simple_lock(&vp->v_interlock);
666 1.103 yamt if (error == 0)
667 1.103 yamt np->n_flag &= ~NMODIFIED;
668 1.103 yamt np->n_flag &= ~NFLUSHINPROG;
669 1.12 mycroft if (np->n_flag & NFLUSHWANT) {
670 1.12 mycroft np->n_flag &= ~NFLUSHWANT;
671 1.103 yamt wakeup(&np->n_flag);
672 1.12 mycroft }
673 1.103 yamt simple_unlock(&vp->v_interlock);
674 1.103 yamt return error;
675 1.12 mycroft }
676 1.12 mycroft
677 1.12 mycroft /*
678 1.128 perry * nfs_flushstalebuf: flush cache if it's stale.
679 1.123 yamt *
680 1.123 yamt * => caller shouldn't own any pages or buffers which belong to the vnode.
681 1.123 yamt */
682 1.123 yamt
683 1.123 yamt int
684 1.128.4.1 yamt nfs_flushstalebuf(struct vnode *vp, kauth_cred_t cred, struct lwp *l,
685 1.123 yamt int flags)
686 1.123 yamt {
687 1.123 yamt struct nfsnode *np = VTONFS(vp);
688 1.123 yamt struct vattr vattr;
689 1.123 yamt int error;
690 1.123 yamt
691 1.123 yamt if (np->n_flag & NMODIFIED) {
692 1.123 yamt if ((flags & NFS_FLUSHSTALEBUF_MYWRITE) == 0
693 1.123 yamt || vp->v_type != VREG) {
694 1.128.4.1 yamt error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
695 1.123 yamt if (error)
696 1.123 yamt return error;
697 1.123 yamt if (vp->v_type == VDIR) {
698 1.123 yamt nfs_invaldircache(vp, 0);
699 1.123 yamt }
700 1.123 yamt } else {
701 1.123 yamt /*
702 1.123 yamt * XXX assuming writes are ours.
703 1.123 yamt */
704 1.123 yamt }
705 1.123 yamt NFS_INVALIDATE_ATTRCACHE(np);
706 1.128.4.1 yamt error = VOP_GETATTR(vp, &vattr, cred, l);
707 1.123 yamt if (error)
708 1.123 yamt return error;
709 1.123 yamt np->n_mtime = vattr.va_mtime;
710 1.123 yamt } else {
711 1.128.4.1 yamt error = VOP_GETATTR(vp, &vattr, cred, l);
712 1.123 yamt if (error)
713 1.123 yamt return error;
714 1.123 yamt if (timespeccmp(&np->n_mtime, &vattr.va_mtime, !=)) {
715 1.123 yamt if (vp->v_type == VDIR) {
716 1.123 yamt nfs_invaldircache(vp, 0);
717 1.123 yamt }
718 1.128.4.1 yamt error = nfs_vinvalbuf(vp, V_SAVE, cred, l, 1);
719 1.123 yamt if (error)
720 1.123 yamt return error;
721 1.123 yamt np->n_mtime = vattr.va_mtime;
722 1.123 yamt }
723 1.123 yamt }
724 1.123 yamt
725 1.123 yamt return error;
726 1.123 yamt }
727 1.123 yamt
728 1.123 yamt /*
729 1.12 mycroft * Initiate asynchronous I/O. Return an error if no nfsiods are available.
730 1.12 mycroft * This is mainly to avoid queueing async I/O requests when the nfsiods
731 1.12 mycroft * are all hung on a dead server.
732 1.12 mycroft */
733 1.69 chs
734 1.23 christos int
735 1.54 chs nfs_asyncio(bp)
736 1.48 augustss struct buf *bp;
737 1.12 mycroft {
738 1.128.4.4 yamt struct nfs_iod *iod;
739 1.48 augustss struct nfsmount *nmp;
740 1.128.4.4 yamt int slptimeo = 0, error;
741 1.128.4.4 yamt bool catch = false;
742 1.12 mycroft
743 1.12 mycroft if (nfs_numasync == 0)
744 1.12 mycroft return (EIO);
745 1.30 thorpej
746 1.30 thorpej nmp = VFSTONFS(bp->b_vp->v_mount);
747 1.30 thorpej again:
748 1.30 thorpej if (nmp->nm_flag & NFSMNT_INT)
749 1.128.4.4 yamt catch = true;
750 1.128 perry
751 1.30 thorpej /*
752 1.30 thorpej * Find a free iod to process this request.
753 1.30 thorpej */
754 1.30 thorpej
755 1.128.4.4 yamt mutex_enter(&nfs_iodlist_lock);
756 1.128.4.4 yamt iod = LIST_FIRST(&nfs_iodlist_idle);
757 1.128.4.4 yamt if (iod) {
758 1.128.4.4 yamt /*
759 1.128.4.4 yamt * Found one, so wake it up and tell it which
760 1.128.4.4 yamt * mount to process.
761 1.128.4.4 yamt */
762 1.128.4.4 yamt LIST_REMOVE(iod, nid_idle);
763 1.128.4.4 yamt mutex_enter(&iod->nid_lock);
764 1.128.4.4 yamt mutex_exit(&nfs_iodlist_lock);
765 1.128.4.4 yamt KASSERT(iod->nid_mount == NULL);
766 1.128.4.4 yamt iod->nid_mount = nmp;
767 1.128.4.4 yamt cv_signal(&iod->nid_cv);
768 1.128.4.4 yamt mutex_enter(&nmp->nm_lock);
769 1.128.4.4 yamt mutex_exit(&iod->nid_lock);
770 1.128.4.4 yamt nmp->nm_bufqiods++;
771 1.128.4.4 yamt if (nmp->nm_bufqlen < 2 * nmp->nm_bufqiods) {
772 1.128.4.4 yamt cv_broadcast(&nmp->nm_aiocv);
773 1.30 thorpej }
774 1.128.4.4 yamt } else {
775 1.128.4.4 yamt mutex_exit(&nfs_iodlist_lock);
776 1.128.4.4 yamt mutex_enter(&nmp->nm_lock);
777 1.99 yamt }
778 1.99 yamt
779 1.128.4.4 yamt KASSERT(mutex_owned(&nmp->nm_lock));
780 1.30 thorpej
781 1.30 thorpej /*
782 1.30 thorpej * If we have an iod which can process the request, then queue
783 1.128 perry * the buffer. However, even if we have an iod, do not initiate
784 1.111 jonathan * queue cleaning if curproc is the pageout daemon. if the NFS mount
785 1.111 jonathan * is via local loopback, we may put curproc (pagedaemon) to sleep
786 1.111 jonathan * waiting for the writes to complete. But the server (ourself)
787 1.111 jonathan * may block the write, waiting for its (ie., our) pagedaemon
788 1.111 jonathan * to produce clean pages to handle the write: deadlock.
789 1.111 jonathan * XXX: start non-loopback mounts straight away? If "lots free",
790 1.111 jonathan * let pagedaemon start loopback writes anyway?
791 1.30 thorpej */
792 1.128.4.4 yamt if (nmp->nm_bufqiods > 0) {
793 1.128 perry
794 1.30 thorpej /*
795 1.30 thorpej * Ensure that the queue never grows too large.
796 1.30 thorpej */
797 1.128.4.4 yamt if (curlwp == uvm.pagedaemon_lwp) {
798 1.112 jonathan /* Enque for later, to avoid free-page deadlock */
799 1.128.4.4 yamt } else while (nmp->nm_bufqlen >= 2 * nmp->nm_bufqiods) {
800 1.128.4.4 yamt if (catch) {
801 1.128.4.4 yamt error = cv_timedwait_sig(&nmp->nm_aiocv,
802 1.128.4.4 yamt &nmp->nm_lock, slptimeo);
803 1.128.4.4 yamt } else {
804 1.128.4.4 yamt error = cv_timedwait(&nmp->nm_aiocv,
805 1.128.4.4 yamt &nmp->nm_lock, slptimeo);
806 1.128.4.4 yamt }
807 1.30 thorpej if (error) {
808 1.128.4.4 yamt if (nfs_sigintr(nmp, NULL, curlwp)) {
809 1.128.4.4 yamt mutex_exit(&nmp->nm_lock);
810 1.30 thorpej return (EINTR);
811 1.128.4.4 yamt }
812 1.128.4.4 yamt if (catch) {
813 1.128.4.4 yamt catch = false;
814 1.30 thorpej slptimeo = 2 * hz;
815 1.30 thorpej }
816 1.30 thorpej }
817 1.74 chs
818 1.30 thorpej /*
819 1.30 thorpej * We might have lost our iod while sleeping,
820 1.128.4.4 yamt * so check and loop if necessary.
821 1.30 thorpej */
822 1.74 chs
823 1.128.4.4 yamt if (nmp->nm_bufqiods == 0) {
824 1.128.4.4 yamt mutex_exit(&nmp->nm_lock);
825 1.30 thorpej goto again;
826 1.128.4.4 yamt }
827 1.30 thorpej }
828 1.30 thorpej TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
829 1.30 thorpej nmp->nm_bufqlen++;
830 1.128.4.4 yamt mutex_exit(&nmp->nm_lock);
831 1.12 mycroft return (0);
832 1.74 chs }
833 1.128.4.4 yamt mutex_exit(&nmp->nm_lock);
834 1.24 fvdl
835 1.24 fvdl /*
836 1.30 thorpej * All the iods are busy on other mounts, so return EIO to
837 1.30 thorpej * force the caller to process the i/o synchronously.
838 1.24 fvdl */
839 1.74 chs
840 1.30 thorpej return (EIO);
841 1.12 mycroft }
842 1.12 mycroft
843 1.12 mycroft /*
844 1.91 yamt * nfs_doio for read.
845 1.12 mycroft */
846 1.91 yamt static int
847 1.91 yamt nfs_doio_read(bp, uiop)
848 1.48 augustss struct buf *bp;
849 1.91 yamt struct uio *uiop;
850 1.12 mycroft {
851 1.91 yamt struct vnode *vp = bp->b_vp;
852 1.91 yamt struct nfsnode *np = VTONFS(vp);
853 1.91 yamt struct nfsmount *nmp = VFSTONFS(vp->v_mount);
854 1.91 yamt int error = 0;
855 1.12 mycroft
856 1.91 yamt uiop->uio_rw = UIO_READ;
857 1.91 yamt switch (vp->v_type) {
858 1.91 yamt case VREG:
859 1.12 mycroft nfsstats.read_bios++;
860 1.54 chs error = nfs_readrpc(vp, uiop);
861 1.54 chs if (!error && uiop->uio_resid) {
862 1.91 yamt int diff, len;
863 1.54 chs
864 1.12 mycroft /*
865 1.119 yamt * If uio_resid > 0, there is a hole in the file and
866 1.12 mycroft * no writes after the hole have been pushed to
867 1.119 yamt * the server yet or the file has been truncated
868 1.119 yamt * on the server.
869 1.12 mycroft * Just zero fill the rest of the valid area.
870 1.12 mycroft */
871 1.54 chs
872 1.119 yamt KASSERT(vp->v_size >=
873 1.119 yamt uiop->uio_offset + uiop->uio_resid);
874 1.12 mycroft diff = bp->b_bcount - uiop->uio_resid;
875 1.119 yamt len = uiop->uio_resid;
876 1.119 yamt memset((char *)bp->b_data + diff, 0, len);
877 1.128.4.1 yamt uiop->uio_resid = 0;
878 1.12 mycroft }
879 1.128.4.1 yamt #if 0
880 1.128.4.1 yamt if (uiop->uio_lwp && (vp->v_flag & VTEXT) &&
881 1.128.4.2 yamt timespeccmp(&np->n_mtime, &np->n_vattr->va_mtime, !=)) {
882 1.128.4.1 yamt killproc(uiop->uio_lwp->l_proc, "process text file was modified");
883 1.86 thorpej #if 0 /* XXX NJWLWP */
884 1.128.4.1 yamt uiop->uio_lwp->l_proc->p_holdcnt++;
885 1.86 thorpej #endif
886 1.12 mycroft }
887 1.128.4.1 yamt #endif
888 1.12 mycroft break;
889 1.91 yamt case VLNK:
890 1.91 yamt KASSERT(uiop->uio_offset == (off_t)0);
891 1.12 mycroft nfsstats.readlink_bios++;
892 1.128.4.1 yamt error = nfs_readlinkrpc(vp, uiop, np->n_rcred);
893 1.12 mycroft break;
894 1.91 yamt case VDIR:
895 1.12 mycroft nfsstats.readdir_bios++;
896 1.34 fvdl uiop->uio_offset = bp->b_dcookie;
897 1.117 christos #ifndef NFS_V2_ONLY
898 1.24 fvdl if (nmp->nm_flag & NFSMNT_RDIRPLUS) {
899 1.128.4.1 yamt error = nfs_readdirplusrpc(vp, uiop,
900 1.128.4.2 yamt curlwp->l_cred);
901 1.128.4.2 yamt /*
902 1.128.4.2 yamt * nfs_request maps NFSERR_NOTSUPP to ENOTSUP.
903 1.128.4.2 yamt */
904 1.128.4.2 yamt if (error == ENOTSUP)
905 1.24 fvdl nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
906 1.24 fvdl }
907 1.117 christos #else
908 1.117 christos nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
909 1.117 christos #endif
910 1.24 fvdl if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
911 1.128.4.1 yamt error = nfs_readdirrpc(vp, uiop,
912 1.128.4.2 yamt curlwp->l_cred);
913 1.34 fvdl if (!error) {
914 1.34 fvdl bp->b_dcookie = uiop->uio_offset;
915 1.34 fvdl }
916 1.24 fvdl break;
917 1.91 yamt default:
918 1.91 yamt printf("nfs_doio: type %x unexpected\n", vp->v_type);
919 1.12 mycroft break;
920 1.91 yamt }
921 1.91 yamt if (error) {
922 1.12 mycroft bp->b_error = error;
923 1.91 yamt }
924 1.91 yamt return error;
925 1.91 yamt }
926 1.91 yamt
927 1.91 yamt /*
928 1.91 yamt * nfs_doio for write.
929 1.91 yamt */
930 1.91 yamt static int
931 1.91 yamt nfs_doio_write(bp, uiop)
932 1.91 yamt struct buf *bp;
933 1.91 yamt struct uio *uiop;
934 1.91 yamt {
935 1.91 yamt struct vnode *vp = bp->b_vp;
936 1.91 yamt struct nfsnode *np = VTONFS(vp);
937 1.96 yamt struct nfsmount *nmp = VFSTONFS(vp->v_mount);
938 1.91 yamt int iomode;
939 1.128.4.3 yamt bool stalewriteverf = false;
940 1.91 yamt int i, npages = (bp->b_bcount + PAGE_SIZE - 1) >> PAGE_SHIFT;
941 1.91 yamt struct vm_page *pgs[npages];
942 1.117 christos #ifndef NFS_V2_ONLY
943 1.128.4.3 yamt bool needcommit = true; /* need only COMMIT RPC */
944 1.117 christos #else
945 1.128.4.3 yamt bool needcommit = false; /* need only COMMIT RPC */
946 1.117 christos #endif
947 1.128.4.3 yamt bool pageprotected;
948 1.91 yamt struct uvm_object *uobj = &vp->v_uobj;
949 1.91 yamt int error;
950 1.91 yamt off_t off, cnt;
951 1.91 yamt
952 1.91 yamt if ((bp->b_flags & B_ASYNC) != 0 && NFS_ISV3(vp)) {
953 1.91 yamt iomode = NFSV3WRITE_UNSTABLE;
954 1.12 mycroft } else {
955 1.91 yamt iomode = NFSV3WRITE_FILESYNC;
956 1.91 yamt }
957 1.74 chs
958 1.117 christos #ifndef NFS_V2_ONLY
959 1.96 yamt again:
960 1.117 christos #endif
961 1.128.4.3 yamt rw_enter(&nmp->nm_writeverflock, RW_READER);
962 1.96 yamt
963 1.91 yamt for (i = 0; i < npages; i++) {
964 1.94 yamt pgs[i] = uvm_pageratop((vaddr_t)bp->b_data + (i << PAGE_SHIFT));
965 1.100 yamt if (pgs[i]->uobject == uobj &&
966 1.100 yamt pgs[i]->offset == uiop->uio_offset + (i << PAGE_SHIFT)) {
967 1.101 yamt KASSERT(pgs[i]->flags & PG_BUSY);
968 1.100 yamt /*
969 1.100 yamt * this page belongs to our object.
970 1.100 yamt */
971 1.100 yamt simple_lock(&uobj->vmobjlock);
972 1.115 yamt /*
973 1.115 yamt * write out the page stably if it's about to
974 1.115 yamt * be released because we can't resend it
975 1.115 yamt * on the server crash.
976 1.115 yamt *
977 1.115 yamt * XXX assuming PG_RELEASE|PG_PAGEOUT won't be
978 1.115 yamt * changed until unbusy the page.
979 1.115 yamt */
980 1.100 yamt if (pgs[i]->flags & (PG_RELEASED|PG_PAGEOUT))
981 1.100 yamt iomode = NFSV3WRITE_FILESYNC;
982 1.115 yamt /*
983 1.115 yamt * if we met a page which hasn't been sent yet,
984 1.115 yamt * we need do WRITE RPC.
985 1.115 yamt */
986 1.100 yamt if ((pgs[i]->flags & PG_NEEDCOMMIT) == 0)
987 1.128.4.3 yamt needcommit = false;
988 1.100 yamt simple_unlock(&uobj->vmobjlock);
989 1.100 yamt } else {
990 1.100 yamt iomode = NFSV3WRITE_FILESYNC;
991 1.128.4.3 yamt needcommit = false;
992 1.91 yamt }
993 1.91 yamt }
994 1.91 yamt if (!needcommit && iomode == NFSV3WRITE_UNSTABLE) {
995 1.100 yamt simple_lock(&uobj->vmobjlock);
996 1.91 yamt for (i = 0; i < npages; i++) {
997 1.91 yamt pgs[i]->flags |= PG_NEEDCOMMIT | PG_RDONLY;
998 1.91 yamt pmap_page_protect(pgs[i], VM_PROT_READ);
999 1.91 yamt }
1000 1.100 yamt simple_unlock(&uobj->vmobjlock);
1001 1.128.4.3 yamt pageprotected = true; /* pages can't be modified during i/o. */
1002 1.102 yamt } else
1003 1.128.4.3 yamt pageprotected = false;
1004 1.74 chs
1005 1.91 yamt /*
1006 1.91 yamt * Send the data to the server if necessary,
1007 1.91 yamt * otherwise just send a commit rpc.
1008 1.91 yamt */
1009 1.117 christos #ifndef NFS_V2_ONLY
1010 1.91 yamt if (needcommit) {
1011 1.74 chs
1012 1.74 chs /*
1013 1.74 chs * If the buffer is in the range that we already committed,
1014 1.74 chs * there's nothing to do.
1015 1.74 chs *
1016 1.74 chs * If it's in the range that we need to commit, push the
1017 1.74 chs * whole range at once, otherwise only push the buffer.
1018 1.74 chs * In both these cases, acquire the commit lock to avoid
1019 1.74 chs * other processes modifying the range.
1020 1.74 chs */
1021 1.74 chs
1022 1.88 yamt off = uiop->uio_offset;
1023 1.88 yamt cnt = bp->b_bcount;
1024 1.128.4.3 yamt mutex_enter(&np->n_commitlock);
1025 1.74 chs if (!nfs_in_committed_range(vp, off, bp->b_bcount)) {
1026 1.128.4.3 yamt bool pushedrange;
1027 1.74 chs if (nfs_in_tobecommitted_range(vp, off, bp->b_bcount)) {
1028 1.128.4.3 yamt pushedrange = true;
1029 1.74 chs off = np->n_pushlo;
1030 1.74 chs cnt = np->n_pushhi - np->n_pushlo;
1031 1.74 chs } else {
1032 1.128.4.3 yamt pushedrange = false;
1033 1.74 chs }
1034 1.128.4.1 yamt error = nfs_commit(vp, off, cnt, curlwp);
1035 1.74 chs if (error == 0) {
1036 1.74 chs if (pushedrange) {
1037 1.74 chs nfs_merge_commit_ranges(vp);
1038 1.74 chs } else {
1039 1.74 chs nfs_add_committed_range(vp, off, cnt);
1040 1.74 chs }
1041 1.74 chs }
1042 1.95 yamt } else {
1043 1.95 yamt error = 0;
1044 1.74 chs }
1045 1.128.4.3 yamt mutex_exit(&np->n_commitlock);
1046 1.128.4.3 yamt rw_exit(&nmp->nm_writeverflock);
1047 1.74 chs if (!error) {
1048 1.97 yamt /*
1049 1.97 yamt * pages are now on stable storage.
1050 1.97 yamt */
1051 1.93 yamt uiop->uio_resid = 0;
1052 1.74 chs simple_lock(&uobj->vmobjlock);
1053 1.74 chs for (i = 0; i < npages; i++) {
1054 1.74 chs pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1055 1.74 chs }
1056 1.74 chs simple_unlock(&uobj->vmobjlock);
1057 1.91 yamt return 0;
1058 1.74 chs } else if (error == NFSERR_STALEWRITEVERF) {
1059 1.96 yamt nfs_clearcommit(vp->v_mount);
1060 1.96 yamt goto again;
1061 1.96 yamt }
1062 1.96 yamt if (error) {
1063 1.96 yamt bp->b_error = np->n_error = error;
1064 1.96 yamt np->n_flag |= NWRITEERR;
1065 1.74 chs }
1066 1.96 yamt return error;
1067 1.91 yamt }
1068 1.117 christos #endif
1069 1.91 yamt off = uiop->uio_offset;
1070 1.91 yamt cnt = bp->b_bcount;
1071 1.91 yamt uiop->uio_rw = UIO_WRITE;
1072 1.91 yamt nfsstats.write_bios++;
1073 1.102 yamt error = nfs_writerpc(vp, uiop, &iomode, pageprotected, &stalewriteverf);
1074 1.117 christos #ifndef NFS_V2_ONLY
1075 1.91 yamt if (!error && iomode == NFSV3WRITE_UNSTABLE) {
1076 1.97 yamt /*
1077 1.97 yamt * we need to commit pages later.
1078 1.97 yamt */
1079 1.128.4.3 yamt mutex_enter(&np->n_commitlock);
1080 1.74 chs nfs_add_tobecommitted_range(vp, off, cnt);
1081 1.97 yamt /*
1082 1.97 yamt * if there can be too many uncommitted pages, commit them now.
1083 1.97 yamt */
1084 1.74 chs if (np->n_pushhi - np->n_pushlo > nfs_commitsize) {
1085 1.74 chs off = np->n_pushlo;
1086 1.74 chs cnt = nfs_commitsize >> 1;
1087 1.128.4.1 yamt error = nfs_commit(vp, off, cnt, curlwp);
1088 1.74 chs if (!error) {
1089 1.74 chs nfs_add_committed_range(vp, off, cnt);
1090 1.74 chs nfs_del_tobecommitted_range(vp, off, cnt);
1091 1.74 chs }
1092 1.97 yamt if (error == NFSERR_STALEWRITEVERF) {
1093 1.128.4.3 yamt stalewriteverf = true;
1094 1.97 yamt error = 0; /* it isn't a real error */
1095 1.97 yamt }
1096 1.97 yamt } else {
1097 1.97 yamt /*
1098 1.97 yamt * re-dirty pages so that they will be passed
1099 1.97 yamt * to us later again.
1100 1.97 yamt */
1101 1.97 yamt simple_lock(&uobj->vmobjlock);
1102 1.97 yamt for (i = 0; i < npages; i++) {
1103 1.97 yamt pgs[i]->flags &= ~PG_CLEAN;
1104 1.97 yamt }
1105 1.97 yamt simple_unlock(&uobj->vmobjlock);
1106 1.74 chs }
1107 1.128.4.3 yamt mutex_exit(&np->n_commitlock);
1108 1.117 christos } else
1109 1.117 christos #endif
1110 1.117 christos if (!error) {
1111 1.97 yamt /*
1112 1.97 yamt * pages are now on stable storage.
1113 1.97 yamt */
1114 1.128.4.3 yamt mutex_enter(&np->n_commitlock);
1115 1.74 chs nfs_del_committed_range(vp, off, cnt);
1116 1.128.4.3 yamt mutex_exit(&np->n_commitlock);
1117 1.74 chs simple_lock(&uobj->vmobjlock);
1118 1.74 chs for (i = 0; i < npages; i++) {
1119 1.74 chs pgs[i]->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1120 1.74 chs }
1121 1.74 chs simple_unlock(&uobj->vmobjlock);
1122 1.91 yamt } else {
1123 1.97 yamt /*
1124 1.97 yamt * we got an error.
1125 1.97 yamt */
1126 1.97 yamt bp->b_error = np->n_error = error;
1127 1.97 yamt np->n_flag |= NWRITEERR;
1128 1.54 chs }
1129 1.96 yamt
1130 1.128.4.3 yamt rw_exit(&nmp->nm_writeverflock);
1131 1.96 yamt
1132 1.96 yamt if (stalewriteverf) {
1133 1.54 chs nfs_clearcommit(vp->v_mount);
1134 1.74 chs }
1135 1.91 yamt return error;
1136 1.91 yamt }
1137 1.91 yamt
1138 1.91 yamt /*
1139 1.91 yamt * nfs_doio for B_PHYS.
1140 1.91 yamt */
1141 1.91 yamt static int
1142 1.91 yamt nfs_doio_phys(bp, uiop)
1143 1.91 yamt struct buf *bp;
1144 1.91 yamt struct uio *uiop;
1145 1.91 yamt {
1146 1.91 yamt struct vnode *vp = bp->b_vp;
1147 1.91 yamt int error;
1148 1.91 yamt
1149 1.91 yamt uiop->uio_offset = ((off_t)bp->b_blkno) << DEV_BSHIFT;
1150 1.91 yamt if (bp->b_flags & B_READ) {
1151 1.91 yamt uiop->uio_rw = UIO_READ;
1152 1.91 yamt nfsstats.read_physios++;
1153 1.91 yamt error = nfs_readrpc(vp, uiop);
1154 1.91 yamt } else {
1155 1.91 yamt int iomode = NFSV3WRITE_DATASYNC;
1156 1.128.4.3 yamt bool stalewriteverf;
1157 1.96 yamt struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1158 1.91 yamt
1159 1.91 yamt uiop->uio_rw = UIO_WRITE;
1160 1.91 yamt nfsstats.write_physios++;
1161 1.128.4.3 yamt rw_enter(&nmp->nm_writeverflock, RW_READER);
1162 1.128.4.3 yamt error = nfs_writerpc(vp, uiop, &iomode, false, &stalewriteverf);
1163 1.128.4.3 yamt rw_exit(&nmp->nm_writeverflock);
1164 1.91 yamt if (stalewriteverf) {
1165 1.91 yamt nfs_clearcommit(bp->b_vp->v_mount);
1166 1.91 yamt }
1167 1.91 yamt }
1168 1.91 yamt if (error) {
1169 1.91 yamt bp->b_error = error;
1170 1.91 yamt }
1171 1.91 yamt return error;
1172 1.91 yamt }
1173 1.91 yamt
1174 1.91 yamt /*
1175 1.91 yamt * Do an I/O operation to/from a cache block. This may be called
1176 1.91 yamt * synchronously or from an nfsiod.
1177 1.91 yamt */
1178 1.91 yamt int
1179 1.128.4.1 yamt nfs_doio(bp)
1180 1.91 yamt struct buf *bp;
1181 1.91 yamt {
1182 1.91 yamt int error;
1183 1.91 yamt struct uio uio;
1184 1.91 yamt struct uio *uiop = &uio;
1185 1.91 yamt struct iovec io;
1186 1.91 yamt UVMHIST_FUNC("nfs_doio"); UVMHIST_CALLED(ubchist);
1187 1.91 yamt
1188 1.91 yamt uiop->uio_iov = &io;
1189 1.91 yamt uiop->uio_iovcnt = 1;
1190 1.91 yamt uiop->uio_offset = (((off_t)bp->b_blkno) << DEV_BSHIFT);
1191 1.128.4.1 yamt UIO_SETUP_SYSSPACE(uiop);
1192 1.91 yamt io.iov_base = bp->b_data;
1193 1.91 yamt io.iov_len = uiop->uio_resid = bp->b_bcount;
1194 1.91 yamt
1195 1.91 yamt /*
1196 1.91 yamt * Historically, paging was done with physio, but no more...
1197 1.91 yamt */
1198 1.91 yamt if (bp->b_flags & B_PHYS) {
1199 1.91 yamt /*
1200 1.91 yamt * ...though reading /dev/drum still gets us here.
1201 1.91 yamt */
1202 1.91 yamt error = nfs_doio_phys(bp, uiop);
1203 1.91 yamt } else if (bp->b_flags & B_READ) {
1204 1.91 yamt error = nfs_doio_read(bp, uiop);
1205 1.91 yamt } else {
1206 1.91 yamt error = nfs_doio_write(bp, uiop);
1207 1.91 yamt }
1208 1.91 yamt bp->b_resid = uiop->uio_resid;
1209 1.54 chs biodone(bp);
1210 1.54 chs return (error);
1211 1.54 chs }
1212 1.54 chs
1213 1.54 chs /*
1214 1.54 chs * Vnode op for VM getpages.
1215 1.54 chs */
1216 1.69 chs
1217 1.54 chs int
1218 1.54 chs nfs_getpages(v)
1219 1.54 chs void *v;
1220 1.54 chs {
1221 1.54 chs struct vop_getpages_args /* {
1222 1.54 chs struct vnode *a_vp;
1223 1.54 chs voff_t a_offset;
1224 1.67 chs struct vm_page **a_m;
1225 1.54 chs int *a_count;
1226 1.54 chs int a_centeridx;
1227 1.54 chs vm_prot_t a_access_type;
1228 1.54 chs int a_advice;
1229 1.54 chs int a_flags;
1230 1.54 chs } */ *ap = v;
1231 1.54 chs
1232 1.54 chs struct vnode *vp = ap->a_vp;
1233 1.69 chs struct uvm_object *uobj = &vp->v_uobj;
1234 1.54 chs struct nfsnode *np = VTONFS(vp);
1235 1.80 enami const int npages = *ap->a_count;
1236 1.80 enami struct vm_page *pg, **pgs, *opgs[npages];
1237 1.74 chs off_t origoffset, len;
1238 1.80 enami int i, error;
1239 1.128.4.3 yamt bool v3 = NFS_ISV3(vp);
1240 1.128.4.3 yamt bool write = (ap->a_access_type & VM_PROT_WRITE) != 0;
1241 1.128.4.3 yamt bool locked = (ap->a_flags & PGO_LOCKED) != 0;
1242 1.54 chs
1243 1.54 chs /*
1244 1.80 enami * call the genfs code to get the pages. `pgs' may be NULL
1245 1.80 enami * when doing read-ahead.
1246 1.54 chs */
1247 1.54 chs
1248 1.80 enami pgs = ap->a_m;
1249 1.81 enami if (write && locked && v3) {
1250 1.80 enami KASSERT(pgs != NULL);
1251 1.80 enami #ifdef DEBUG
1252 1.80 enami
1253 1.80 enami /*
1254 1.80 enami * If PGO_LOCKED is set, real pages shouldn't exists
1255 1.80 enami * in the array.
1256 1.80 enami */
1257 1.80 enami
1258 1.80 enami for (i = 0; i < npages; i++)
1259 1.80 enami KDASSERT(pgs[i] == NULL || pgs[i] == PGO_DONTCARE);
1260 1.80 enami #endif
1261 1.80 enami memcpy(opgs, pgs, npages * sizeof(struct vm_pages *));
1262 1.80 enami }
1263 1.69 chs error = genfs_getpages(v);
1264 1.76 chs if (error) {
1265 1.80 enami return (error);
1266 1.76 chs }
1267 1.76 chs
1268 1.76 chs /*
1269 1.76 chs * for read faults where the nfs node is not yet marked NMODIFIED,
1270 1.76 chs * set PG_RDONLY on the pages so that we come back here if someone
1271 1.76 chs * tries to modify later via the mapping that will be entered for
1272 1.76 chs * this fault.
1273 1.76 chs */
1274 1.76 chs
1275 1.76 chs if (!write && (np->n_flag & NMODIFIED) == 0 && pgs != NULL) {
1276 1.76 chs if (!locked) {
1277 1.76 chs simple_lock(&uobj->vmobjlock);
1278 1.76 chs }
1279 1.76 chs for (i = 0; i < npages; i++) {
1280 1.76 chs pg = pgs[i];
1281 1.76 chs if (pg == NULL || pg == PGO_DONTCARE) {
1282 1.76 chs continue;
1283 1.76 chs }
1284 1.76 chs pg->flags |= PG_RDONLY;
1285 1.76 chs }
1286 1.76 chs if (!locked) {
1287 1.76 chs simple_unlock(&uobj->vmobjlock);
1288 1.76 chs }
1289 1.76 chs }
1290 1.76 chs if (!write) {
1291 1.80 enami return (0);
1292 1.54 chs }
1293 1.54 chs
1294 1.54 chs /*
1295 1.69 chs * this is a write fault, update the commit info.
1296 1.54 chs */
1297 1.54 chs
1298 1.69 chs origoffset = ap->a_offset;
1299 1.74 chs len = npages << PAGE_SHIFT;
1300 1.54 chs
1301 1.76 chs if (v3) {
1302 1.128.4.3 yamt if (!locked) {
1303 1.128.4.3 yamt mutex_enter(&np->n_commitlock);
1304 1.128.4.3 yamt } else {
1305 1.128.4.4 yamt if (!mutex_tryenter(&np->n_commitlock)) {
1306 1.80 enami
1307 1.128.4.3 yamt /*
1308 1.128.4.3 yamt * Since PGO_LOCKED is set, we need to unbusy
1309 1.128.4.3 yamt * all pages fetched by genfs_getpages() above,
1310 1.128.4.3 yamt * tell the caller that there are no pages
1311 1.128.4.3 yamt * available and put back original pgs array.
1312 1.128.4.3 yamt */
1313 1.80 enami
1314 1.128.4.3 yamt uvm_lock_pageq();
1315 1.128.4.3 yamt uvm_page_unbusy(pgs, npages);
1316 1.128.4.3 yamt uvm_unlock_pageq();
1317 1.128.4.3 yamt *ap->a_count = 0;
1318 1.128.4.3 yamt memcpy(pgs, opgs,
1319 1.128.4.3 yamt npages * sizeof(struct vm_pages *));
1320 1.128.4.4 yamt return EBUSY;
1321 1.128.4.3 yamt }
1322 1.80 enami }
1323 1.76 chs nfs_del_committed_range(vp, origoffset, len);
1324 1.76 chs nfs_del_tobecommitted_range(vp, origoffset, len);
1325 1.76 chs }
1326 1.80 enami np->n_flag |= NMODIFIED;
1327 1.73 chs if (!locked) {
1328 1.73 chs simple_lock(&uobj->vmobjlock);
1329 1.73 chs }
1330 1.54 chs for (i = 0; i < npages; i++) {
1331 1.69 chs pg = pgs[i];
1332 1.69 chs if (pg == NULL || pg == PGO_DONTCARE) {
1333 1.54 chs continue;
1334 1.54 chs }
1335 1.74 chs pg->flags &= ~(PG_NEEDCOMMIT | PG_RDONLY);
1336 1.54 chs }
1337 1.73 chs if (!locked) {
1338 1.73 chs simple_unlock(&uobj->vmobjlock);
1339 1.73 chs }
1340 1.76 chs if (v3) {
1341 1.128.4.3 yamt mutex_exit(&np->n_commitlock);
1342 1.76 chs }
1343 1.80 enami return (0);
1344 1.1 cgd }
1345