nfs_clbio.c revision 1.3 1 1.3 pgoyette /* $NetBSD: nfs_clbio.c,v 1.3 2016/11/18 08:31:30 pgoyette Exp $ */
2 1.1 dholland /*-
3 1.1 dholland * Copyright (c) 1989, 1993
4 1.1 dholland * The Regents of the University of California. All rights reserved.
5 1.1 dholland *
6 1.1 dholland * This code is derived from software contributed to Berkeley by
7 1.1 dholland * Rick Macklem at The University of Guelph.
8 1.1 dholland *
9 1.1 dholland * Redistribution and use in source and binary forms, with or without
10 1.1 dholland * modification, are permitted provided that the following conditions
11 1.1 dholland * are met:
12 1.1 dholland * 1. Redistributions of source code must retain the above copyright
13 1.1 dholland * notice, this list of conditions and the following disclaimer.
14 1.1 dholland * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 dholland * notice, this list of conditions and the following disclaimer in the
16 1.1 dholland * documentation and/or other materials provided with the distribution.
17 1.1 dholland * 4. Neither the name of the University nor the names of its contributors
18 1.1 dholland * may be used to endorse or promote products derived from this software
19 1.1 dholland * without specific prior written permission.
20 1.1 dholland *
21 1.1 dholland * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 dholland * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 dholland * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 dholland * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 dholland * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 dholland * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 dholland * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 dholland * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 dholland * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 dholland * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 dholland * SUCH DAMAGE.
32 1.1 dholland *
33 1.1 dholland * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
34 1.1 dholland */
35 1.1 dholland
36 1.1 dholland #include <sys/cdefs.h>
37 1.3 pgoyette /* __FBSDID("FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 304026 2016-08-12 22:44:59Z rmacklem "); */
38 1.3 pgoyette __RCSID("$NetBSD: nfs_clbio.c,v 1.3 2016/11/18 08:31:30 pgoyette Exp $");
39 1.1 dholland
40 1.1 dholland #include <sys/param.h>
41 1.1 dholland #include <sys/systm.h>
42 1.1 dholland #include <sys/bio.h>
43 1.1 dholland #include <sys/buf.h>
44 1.1 dholland #include <sys/kernel.h>
45 1.1 dholland #include <sys/mount.h>
46 1.1 dholland #include <sys/rwlock.h>
47 1.1 dholland #include <sys/vmmeter.h>
48 1.1 dholland #include <sys/vnode.h>
49 1.1 dholland
50 1.1 dholland #include <vm/vm.h>
51 1.1 dholland #include <vm/vm_param.h>
52 1.1 dholland #include <vm/vm_extern.h>
53 1.1 dholland #include <vm/vm_page.h>
54 1.1 dholland #include <vm/vm_object.h>
55 1.1 dholland #include <vm/vm_pager.h>
56 1.1 dholland #include <vm/vnode_pager.h>
57 1.1 dholland
58 1.1 dholland #include <fs/nfs/nfsport.h>
59 1.1 dholland #include <fs/nfsclient/nfsmount.h>
60 1.1 dholland #include <fs/nfsclient/nfs.h>
61 1.1 dholland #include <fs/nfsclient/nfsnode.h>
62 1.1 dholland #include <fs/nfsclient/nfs_kdtrace.h>
63 1.1 dholland
64 1.1 dholland extern int newnfs_directio_allow_mmap;
65 1.3 pgoyette extern struct nfsstatsv1 nfsstatsv1;
66 1.1 dholland extern struct mtx ncl_iod_mutex;
67 1.1 dholland extern int ncl_numasync;
68 1.1 dholland extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
69 1.1 dholland extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
70 1.1 dholland extern int newnfs_directio_enable;
71 1.1 dholland extern int nfs_keep_dirty_on_error;
72 1.1 dholland
73 1.1 dholland int ncl_pbuf_freecnt = -1; /* start out unlimited */
74 1.1 dholland
75 1.1 dholland static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
76 1.1 dholland struct thread *td);
77 1.1 dholland static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
78 1.1 dholland struct ucred *cred, int ioflag);
79 1.1 dholland
80 1.1 dholland /*
81 1.1 dholland * Vnode op for VM getpages.
82 1.1 dholland */
83 1.1 dholland int
84 1.1 dholland ncl_getpages(struct vop_getpages_args *ap)
85 1.1 dholland {
86 1.1 dholland int i, error, nextoff, size, toff, count, npages;
87 1.1 dholland struct uio uio;
88 1.1 dholland struct iovec iov;
89 1.1 dholland vm_offset_t kva;
90 1.1 dholland struct buf *bp;
91 1.1 dholland struct vnode *vp;
92 1.1 dholland struct thread *td;
93 1.1 dholland struct ucred *cred;
94 1.1 dholland struct nfsmount *nmp;
95 1.1 dholland vm_object_t object;
96 1.1 dholland vm_page_t *pages;
97 1.1 dholland struct nfsnode *np;
98 1.1 dholland
99 1.1 dholland vp = ap->a_vp;
100 1.1 dholland np = VTONFS(vp);
101 1.1 dholland td = curthread; /* XXX */
102 1.1 dholland cred = curthread->td_ucred; /* XXX */
103 1.1 dholland nmp = VFSTONFS(vp->v_mount);
104 1.1 dholland pages = ap->a_m;
105 1.3 pgoyette npages = ap->a_count;
106 1.1 dholland
107 1.1 dholland if ((object = vp->v_object) == NULL) {
108 1.3 pgoyette printf("ncl_getpages: called with non-merged cache vnode\n");
109 1.1 dholland return (VM_PAGER_ERROR);
110 1.1 dholland }
111 1.1 dholland
112 1.1 dholland if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
113 1.1 dholland mtx_lock(&np->n_mtx);
114 1.1 dholland if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
115 1.1 dholland mtx_unlock(&np->n_mtx);
116 1.3 pgoyette printf("ncl_getpages: called on non-cacheable vnode\n");
117 1.1 dholland return (VM_PAGER_ERROR);
118 1.1 dholland } else
119 1.1 dholland mtx_unlock(&np->n_mtx);
120 1.1 dholland }
121 1.1 dholland
122 1.1 dholland mtx_lock(&nmp->nm_mtx);
123 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
124 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
125 1.1 dholland mtx_unlock(&nmp->nm_mtx);
126 1.1 dholland /* We'll never get here for v4, because we always have fsinfo */
127 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
128 1.1 dholland } else
129 1.1 dholland mtx_unlock(&nmp->nm_mtx);
130 1.1 dholland
131 1.1 dholland /*
132 1.1 dholland * If the requested page is partially valid, just return it and
133 1.1 dholland * allow the pager to zero-out the blanks. Partially valid pages
134 1.1 dholland * can only occur at the file EOF.
135 1.3 pgoyette *
136 1.3 pgoyette * XXXGL: is that true for NFS, where short read can occur???
137 1.1 dholland */
138 1.1 dholland VM_OBJECT_WLOCK(object);
139 1.3 pgoyette if (pages[npages - 1]->valid != 0 && --npages == 0)
140 1.3 pgoyette goto out;
141 1.1 dholland VM_OBJECT_WUNLOCK(object);
142 1.1 dholland
143 1.1 dholland /*
144 1.1 dholland * We use only the kva address for the buffer, but this is extremely
145 1.3 pgoyette * convenient and fast.
146 1.1 dholland */
147 1.1 dholland bp = getpbuf(&ncl_pbuf_freecnt);
148 1.1 dholland
149 1.1 dholland kva = (vm_offset_t) bp->b_data;
150 1.1 dholland pmap_qenter(kva, pages, npages);
151 1.1 dholland PCPU_INC(cnt.v_vnodein);
152 1.1 dholland PCPU_ADD(cnt.v_vnodepgsin, npages);
153 1.1 dholland
154 1.3 pgoyette count = npages << PAGE_SHIFT;
155 1.1 dholland iov.iov_base = (caddr_t) kva;
156 1.1 dholland iov.iov_len = count;
157 1.1 dholland uio.uio_iov = &iov;
158 1.1 dholland uio.uio_iovcnt = 1;
159 1.1 dholland uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
160 1.1 dholland uio.uio_resid = count;
161 1.1 dholland uio.uio_segflg = UIO_SYSSPACE;
162 1.1 dholland uio.uio_rw = UIO_READ;
163 1.1 dholland uio.uio_td = td;
164 1.1 dholland
165 1.1 dholland error = ncl_readrpc(vp, &uio, cred);
166 1.1 dholland pmap_qremove(kva, npages);
167 1.1 dholland
168 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
169 1.1 dholland
170 1.1 dholland if (error && (uio.uio_resid == count)) {
171 1.3 pgoyette printf("ncl_getpages: error %d\n", error);
172 1.1 dholland return (VM_PAGER_ERROR);
173 1.1 dholland }
174 1.1 dholland
175 1.1 dholland /*
176 1.1 dholland * Calculate the number of bytes read and validate only that number
177 1.1 dholland * of bytes. Note that due to pending writes, size may be 0. This
178 1.1 dholland * does not mean that the remaining data is invalid!
179 1.1 dholland */
180 1.1 dholland
181 1.1 dholland size = count - uio.uio_resid;
182 1.1 dholland VM_OBJECT_WLOCK(object);
183 1.1 dholland for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
184 1.1 dholland vm_page_t m;
185 1.1 dholland nextoff = toff + PAGE_SIZE;
186 1.1 dholland m = pages[i];
187 1.1 dholland
188 1.1 dholland if (nextoff <= size) {
189 1.1 dholland /*
190 1.1 dholland * Read operation filled an entire page
191 1.1 dholland */
192 1.1 dholland m->valid = VM_PAGE_BITS_ALL;
193 1.1 dholland KASSERT(m->dirty == 0,
194 1.1 dholland ("nfs_getpages: page %p is dirty", m));
195 1.1 dholland } else if (size > toff) {
196 1.1 dholland /*
197 1.1 dholland * Read operation filled a partial page.
198 1.1 dholland */
199 1.1 dholland m->valid = 0;
200 1.1 dholland vm_page_set_valid_range(m, 0, size - toff);
201 1.1 dholland KASSERT(m->dirty == 0,
202 1.1 dholland ("nfs_getpages: page %p is dirty", m));
203 1.1 dholland } else {
204 1.1 dholland /*
205 1.1 dholland * Read operation was short. If no error
206 1.3 pgoyette * occurred we may have hit a zero-fill
207 1.1 dholland * section. We leave valid set to 0, and page
208 1.1 dholland * is freed by vm_page_readahead_finish() if
209 1.1 dholland * its index is not equal to requested, or
210 1.1 dholland * page is zeroed and set valid by
211 1.1 dholland * vm_pager_get_pages() for requested page.
212 1.1 dholland */
213 1.1 dholland ;
214 1.1 dholland }
215 1.1 dholland }
216 1.3 pgoyette out:
217 1.1 dholland VM_OBJECT_WUNLOCK(object);
218 1.3 pgoyette if (ap->a_rbehind)
219 1.3 pgoyette *ap->a_rbehind = 0;
220 1.3 pgoyette if (ap->a_rahead)
221 1.3 pgoyette *ap->a_rahead = 0;
222 1.3 pgoyette return (VM_PAGER_OK);
223 1.1 dholland }
224 1.1 dholland
225 1.1 dholland /*
226 1.1 dholland * Vnode op for VM putpages.
227 1.1 dholland */
228 1.1 dholland int
229 1.1 dholland ncl_putpages(struct vop_putpages_args *ap)
230 1.1 dholland {
231 1.1 dholland struct uio uio;
232 1.1 dholland struct iovec iov;
233 1.1 dholland vm_offset_t kva;
234 1.1 dholland struct buf *bp;
235 1.1 dholland int iomode, must_commit, i, error, npages, count;
236 1.1 dholland off_t offset;
237 1.1 dholland int *rtvals;
238 1.1 dholland struct vnode *vp;
239 1.1 dholland struct thread *td;
240 1.1 dholland struct ucred *cred;
241 1.1 dholland struct nfsmount *nmp;
242 1.1 dholland struct nfsnode *np;
243 1.1 dholland vm_page_t *pages;
244 1.1 dholland
245 1.1 dholland vp = ap->a_vp;
246 1.1 dholland np = VTONFS(vp);
247 1.1 dholland td = curthread; /* XXX */
248 1.1 dholland /* Set the cred to n_writecred for the write rpcs. */
249 1.1 dholland if (np->n_writecred != NULL)
250 1.1 dholland cred = crhold(np->n_writecred);
251 1.1 dholland else
252 1.1 dholland cred = crhold(curthread->td_ucred); /* XXX */
253 1.1 dholland nmp = VFSTONFS(vp->v_mount);
254 1.1 dholland pages = ap->a_m;
255 1.1 dholland count = ap->a_count;
256 1.1 dholland rtvals = ap->a_rtvals;
257 1.1 dholland npages = btoc(count);
258 1.1 dholland offset = IDX_TO_OFF(pages[0]->pindex);
259 1.1 dholland
260 1.1 dholland mtx_lock(&nmp->nm_mtx);
261 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
262 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
263 1.1 dholland mtx_unlock(&nmp->nm_mtx);
264 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
265 1.1 dholland } else
266 1.1 dholland mtx_unlock(&nmp->nm_mtx);
267 1.1 dholland
268 1.1 dholland mtx_lock(&np->n_mtx);
269 1.1 dholland if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
270 1.1 dholland (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
271 1.1 dholland mtx_unlock(&np->n_mtx);
272 1.3 pgoyette printf("ncl_putpages: called on noncache-able vnode\n");
273 1.1 dholland mtx_lock(&np->n_mtx);
274 1.1 dholland }
275 1.1 dholland
276 1.1 dholland for (i = 0; i < npages; i++)
277 1.1 dholland rtvals[i] = VM_PAGER_ERROR;
278 1.1 dholland
279 1.1 dholland /*
280 1.1 dholland * When putting pages, do not extend file past EOF.
281 1.1 dholland */
282 1.1 dholland if (offset + count > np->n_size) {
283 1.1 dholland count = np->n_size - offset;
284 1.1 dholland if (count < 0)
285 1.1 dholland count = 0;
286 1.1 dholland }
287 1.1 dholland mtx_unlock(&np->n_mtx);
288 1.1 dholland
289 1.1 dholland /*
290 1.1 dholland * We use only the kva address for the buffer, but this is extremely
291 1.3 pgoyette * convenient and fast.
292 1.1 dholland */
293 1.1 dholland bp = getpbuf(&ncl_pbuf_freecnt);
294 1.1 dholland
295 1.1 dholland kva = (vm_offset_t) bp->b_data;
296 1.1 dholland pmap_qenter(kva, pages, npages);
297 1.1 dholland PCPU_INC(cnt.v_vnodeout);
298 1.1 dholland PCPU_ADD(cnt.v_vnodepgsout, count);
299 1.1 dholland
300 1.1 dholland iov.iov_base = (caddr_t) kva;
301 1.1 dholland iov.iov_len = count;
302 1.1 dholland uio.uio_iov = &iov;
303 1.1 dholland uio.uio_iovcnt = 1;
304 1.1 dholland uio.uio_offset = offset;
305 1.1 dholland uio.uio_resid = count;
306 1.1 dholland uio.uio_segflg = UIO_SYSSPACE;
307 1.1 dholland uio.uio_rw = UIO_WRITE;
308 1.1 dholland uio.uio_td = td;
309 1.1 dholland
310 1.1 dholland if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
311 1.1 dholland iomode = NFSWRITE_UNSTABLE;
312 1.1 dholland else
313 1.1 dholland iomode = NFSWRITE_FILESYNC;
314 1.1 dholland
315 1.1 dholland error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
316 1.1 dholland crfree(cred);
317 1.1 dholland
318 1.1 dholland pmap_qremove(kva, npages);
319 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
320 1.1 dholland
321 1.1 dholland if (error == 0 || !nfs_keep_dirty_on_error) {
322 1.1 dholland vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
323 1.1 dholland if (must_commit)
324 1.1 dholland ncl_clearcommit(vp->v_mount);
325 1.1 dholland }
326 1.1 dholland return rtvals[0];
327 1.1 dholland }
328 1.1 dholland
329 1.1 dholland /*
330 1.1 dholland * For nfs, cache consistency can only be maintained approximately.
331 1.1 dholland * Although RFC1094 does not specify the criteria, the following is
332 1.1 dholland * believed to be compatible with the reference port.
333 1.1 dholland * For nfs:
334 1.1 dholland * If the file's modify time on the server has changed since the
335 1.1 dholland * last read rpc or you have written to the file,
336 1.1 dholland * you may have lost data cache consistency with the
337 1.1 dholland * server, so flush all of the file's data out of the cache.
338 1.1 dholland * Then force a getattr rpc to ensure that you have up to date
339 1.1 dholland * attributes.
340 1.1 dholland * NB: This implies that cache data can be read when up to
341 1.1 dholland * NFS_ATTRTIMEO seconds out of date. If you find that you need current
342 1.1 dholland * attributes this could be forced by setting n_attrstamp to 0 before
343 1.1 dholland * the VOP_GETATTR() call.
344 1.1 dholland */
345 1.1 dholland static inline int
346 1.1 dholland nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
347 1.1 dholland {
348 1.1 dholland int error = 0;
349 1.1 dholland struct vattr vattr;
350 1.1 dholland struct nfsnode *np = VTONFS(vp);
351 1.1 dholland int old_lock;
352 1.1 dholland
353 1.1 dholland /*
354 1.1 dholland * Grab the exclusive lock before checking whether the cache is
355 1.1 dholland * consistent.
356 1.1 dholland * XXX - We can make this cheaper later (by acquiring cheaper locks).
357 1.1 dholland * But for now, this suffices.
358 1.1 dholland */
359 1.1 dholland old_lock = ncl_upgrade_vnlock(vp);
360 1.1 dholland if (vp->v_iflag & VI_DOOMED) {
361 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
362 1.1 dholland return (EBADF);
363 1.1 dholland }
364 1.1 dholland
365 1.1 dholland mtx_lock(&np->n_mtx);
366 1.1 dholland if (np->n_flag & NMODIFIED) {
367 1.1 dholland mtx_unlock(&np->n_mtx);
368 1.1 dholland if (vp->v_type != VREG) {
369 1.1 dholland if (vp->v_type != VDIR)
370 1.1 dholland panic("nfs: bioread, not dir");
371 1.1 dholland ncl_invaldir(vp);
372 1.1 dholland error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
373 1.1 dholland if (error)
374 1.1 dholland goto out;
375 1.1 dholland }
376 1.1 dholland np->n_attrstamp = 0;
377 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
378 1.1 dholland error = VOP_GETATTR(vp, &vattr, cred);
379 1.1 dholland if (error)
380 1.1 dholland goto out;
381 1.1 dholland mtx_lock(&np->n_mtx);
382 1.1 dholland np->n_mtime = vattr.va_mtime;
383 1.1 dholland mtx_unlock(&np->n_mtx);
384 1.1 dholland } else {
385 1.1 dholland mtx_unlock(&np->n_mtx);
386 1.1 dholland error = VOP_GETATTR(vp, &vattr, cred);
387 1.1 dholland if (error)
388 1.1 dholland return (error);
389 1.1 dholland mtx_lock(&np->n_mtx);
390 1.1 dholland if ((np->n_flag & NSIZECHANGED)
391 1.1 dholland || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
392 1.1 dholland mtx_unlock(&np->n_mtx);
393 1.1 dholland if (vp->v_type == VDIR)
394 1.1 dholland ncl_invaldir(vp);
395 1.1 dholland error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
396 1.1 dholland if (error)
397 1.1 dholland goto out;
398 1.1 dholland mtx_lock(&np->n_mtx);
399 1.1 dholland np->n_mtime = vattr.va_mtime;
400 1.1 dholland np->n_flag &= ~NSIZECHANGED;
401 1.1 dholland }
402 1.1 dholland mtx_unlock(&np->n_mtx);
403 1.1 dholland }
404 1.1 dholland out:
405 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
406 1.1 dholland return error;
407 1.1 dholland }
408 1.1 dholland
409 1.1 dholland /*
410 1.1 dholland * Vnode op for read using bio
411 1.1 dholland */
412 1.1 dholland int
413 1.1 dholland ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
414 1.1 dholland {
415 1.1 dholland struct nfsnode *np = VTONFS(vp);
416 1.1 dholland int biosize, i;
417 1.1 dholland struct buf *bp, *rabp;
418 1.1 dholland struct thread *td;
419 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
420 1.1 dholland daddr_t lbn, rabn;
421 1.1 dholland int bcount;
422 1.1 dholland int seqcount;
423 1.1 dholland int nra, error = 0, n = 0, on = 0;
424 1.1 dholland off_t tmp_off;
425 1.1 dholland
426 1.1 dholland KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
427 1.1 dholland if (uio->uio_resid == 0)
428 1.1 dholland return (0);
429 1.1 dholland if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
430 1.1 dholland return (EINVAL);
431 1.1 dholland td = uio->uio_td;
432 1.1 dholland
433 1.1 dholland mtx_lock(&nmp->nm_mtx);
434 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
435 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
436 1.1 dholland mtx_unlock(&nmp->nm_mtx);
437 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
438 1.1 dholland mtx_lock(&nmp->nm_mtx);
439 1.1 dholland }
440 1.1 dholland if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
441 1.1 dholland (void) newnfs_iosize(nmp);
442 1.1 dholland
443 1.1 dholland tmp_off = uio->uio_offset + uio->uio_resid;
444 1.1 dholland if (vp->v_type != VDIR &&
445 1.1 dholland (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
446 1.1 dholland mtx_unlock(&nmp->nm_mtx);
447 1.1 dholland return (EFBIG);
448 1.1 dholland }
449 1.1 dholland mtx_unlock(&nmp->nm_mtx);
450 1.1 dholland
451 1.1 dholland if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
452 1.1 dholland /* No caching/ no readaheads. Just read data into the user buffer */
453 1.1 dholland return ncl_readrpc(vp, uio, cred);
454 1.1 dholland
455 1.1 dholland biosize = vp->v_bufobj.bo_bsize;
456 1.1 dholland seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
457 1.1 dholland
458 1.1 dholland error = nfs_bioread_check_cons(vp, td, cred);
459 1.1 dholland if (error)
460 1.1 dholland return error;
461 1.1 dholland
462 1.1 dholland do {
463 1.1 dholland u_quad_t nsize;
464 1.1 dholland
465 1.1 dholland mtx_lock(&np->n_mtx);
466 1.1 dholland nsize = np->n_size;
467 1.1 dholland mtx_unlock(&np->n_mtx);
468 1.1 dholland
469 1.1 dholland switch (vp->v_type) {
470 1.1 dholland case VREG:
471 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.biocache_reads);
472 1.1 dholland lbn = uio->uio_offset / biosize;
473 1.1 dholland on = uio->uio_offset - (lbn * biosize);
474 1.1 dholland
475 1.1 dholland /*
476 1.1 dholland * Start the read ahead(s), as required.
477 1.1 dholland */
478 1.1 dholland if (nmp->nm_readahead > 0) {
479 1.1 dholland for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
480 1.1 dholland (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
481 1.1 dholland rabn = lbn + 1 + nra;
482 1.1 dholland if (incore(&vp->v_bufobj, rabn) == NULL) {
483 1.1 dholland rabp = nfs_getcacheblk(vp, rabn, biosize, td);
484 1.1 dholland if (!rabp) {
485 1.1 dholland error = newnfs_sigintr(nmp, td);
486 1.1 dholland return (error ? error : EINTR);
487 1.1 dholland }
488 1.1 dholland if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
489 1.1 dholland rabp->b_flags |= B_ASYNC;
490 1.1 dholland rabp->b_iocmd = BIO_READ;
491 1.1 dholland vfs_busy_pages(rabp, 0);
492 1.1 dholland if (ncl_asyncio(nmp, rabp, cred, td)) {
493 1.1 dholland rabp->b_flags |= B_INVAL;
494 1.1 dholland rabp->b_ioflags |= BIO_ERROR;
495 1.1 dholland vfs_unbusy_pages(rabp);
496 1.1 dholland brelse(rabp);
497 1.1 dholland break;
498 1.1 dholland }
499 1.1 dholland } else {
500 1.1 dholland brelse(rabp);
501 1.1 dholland }
502 1.1 dholland }
503 1.1 dholland }
504 1.1 dholland }
505 1.1 dholland
506 1.1 dholland /* Note that bcount is *not* DEV_BSIZE aligned. */
507 1.1 dholland bcount = biosize;
508 1.1 dholland if ((off_t)lbn * biosize >= nsize) {
509 1.1 dholland bcount = 0;
510 1.1 dholland } else if ((off_t)(lbn + 1) * biosize > nsize) {
511 1.1 dholland bcount = nsize - (off_t)lbn * biosize;
512 1.1 dholland }
513 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bcount, td);
514 1.1 dholland
515 1.1 dholland if (!bp) {
516 1.1 dholland error = newnfs_sigintr(nmp, td);
517 1.1 dholland return (error ? error : EINTR);
518 1.1 dholland }
519 1.1 dholland
520 1.1 dholland /*
521 1.1 dholland * If B_CACHE is not set, we must issue the read. If this
522 1.1 dholland * fails, we return an error.
523 1.1 dholland */
524 1.1 dholland
525 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
526 1.1 dholland bp->b_iocmd = BIO_READ;
527 1.1 dholland vfs_busy_pages(bp, 0);
528 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
529 1.1 dholland if (error) {
530 1.1 dholland brelse(bp);
531 1.1 dholland return (error);
532 1.1 dholland }
533 1.1 dholland }
534 1.1 dholland
535 1.1 dholland /*
536 1.1 dholland * on is the offset into the current bp. Figure out how many
537 1.1 dholland * bytes we can copy out of the bp. Note that bcount is
538 1.1 dholland * NOT DEV_BSIZE aligned.
539 1.1 dholland *
540 1.1 dholland * Then figure out how many bytes we can copy into the uio.
541 1.1 dholland */
542 1.1 dholland
543 1.1 dholland n = 0;
544 1.1 dholland if (on < bcount)
545 1.1 dholland n = MIN((unsigned)(bcount - on), uio->uio_resid);
546 1.1 dholland break;
547 1.1 dholland case VLNK:
548 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.biocache_readlinks);
549 1.1 dholland bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
550 1.1 dholland if (!bp) {
551 1.1 dholland error = newnfs_sigintr(nmp, td);
552 1.1 dholland return (error ? error : EINTR);
553 1.1 dholland }
554 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
555 1.1 dholland bp->b_iocmd = BIO_READ;
556 1.1 dholland vfs_busy_pages(bp, 0);
557 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
558 1.1 dholland if (error) {
559 1.1 dholland bp->b_ioflags |= BIO_ERROR;
560 1.1 dholland brelse(bp);
561 1.1 dholland return (error);
562 1.1 dholland }
563 1.1 dholland }
564 1.1 dholland n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
565 1.1 dholland on = 0;
566 1.1 dholland break;
567 1.1 dholland case VDIR:
568 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.biocache_readdirs);
569 1.1 dholland if (np->n_direofoffset
570 1.1 dholland && uio->uio_offset >= np->n_direofoffset) {
571 1.1 dholland return (0);
572 1.1 dholland }
573 1.1 dholland lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
574 1.1 dholland on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
575 1.1 dholland bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
576 1.1 dholland if (!bp) {
577 1.1 dholland error = newnfs_sigintr(nmp, td);
578 1.1 dholland return (error ? error : EINTR);
579 1.1 dholland }
580 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
581 1.1 dholland bp->b_iocmd = BIO_READ;
582 1.1 dholland vfs_busy_pages(bp, 0);
583 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
584 1.1 dholland if (error) {
585 1.1 dholland brelse(bp);
586 1.1 dholland }
587 1.1 dholland while (error == NFSERR_BAD_COOKIE) {
588 1.1 dholland ncl_invaldir(vp);
589 1.1 dholland error = ncl_vinvalbuf(vp, 0, td, 1);
590 1.1 dholland /*
591 1.1 dholland * Yuck! The directory has been modified on the
592 1.1 dholland * server. The only way to get the block is by
593 1.1 dholland * reading from the beginning to get all the
594 1.1 dholland * offset cookies.
595 1.1 dholland *
596 1.1 dholland * Leave the last bp intact unless there is an error.
597 1.1 dholland * Loop back up to the while if the error is another
598 1.1 dholland * NFSERR_BAD_COOKIE (double yuch!).
599 1.1 dholland */
600 1.1 dholland for (i = 0; i <= lbn && !error; i++) {
601 1.1 dholland if (np->n_direofoffset
602 1.1 dholland && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
603 1.1 dholland return (0);
604 1.1 dholland bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
605 1.1 dholland if (!bp) {
606 1.1 dholland error = newnfs_sigintr(nmp, td);
607 1.1 dholland return (error ? error : EINTR);
608 1.1 dholland }
609 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
610 1.1 dholland bp->b_iocmd = BIO_READ;
611 1.1 dholland vfs_busy_pages(bp, 0);
612 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
613 1.1 dholland /*
614 1.1 dholland * no error + B_INVAL == directory EOF,
615 1.1 dholland * use the block.
616 1.1 dholland */
617 1.1 dholland if (error == 0 && (bp->b_flags & B_INVAL))
618 1.1 dholland break;
619 1.1 dholland }
620 1.1 dholland /*
621 1.1 dholland * An error will throw away the block and the
622 1.1 dholland * for loop will break out. If no error and this
623 1.1 dholland * is not the block we want, we throw away the
624 1.1 dholland * block and go for the next one via the for loop.
625 1.1 dholland */
626 1.1 dholland if (error || i < lbn)
627 1.1 dholland brelse(bp);
628 1.1 dholland }
629 1.1 dholland }
630 1.1 dholland /*
631 1.1 dholland * The above while is repeated if we hit another cookie
632 1.1 dholland * error. If we hit an error and it wasn't a cookie error,
633 1.1 dholland * we give up.
634 1.1 dholland */
635 1.1 dholland if (error)
636 1.1 dholland return (error);
637 1.1 dholland }
638 1.1 dholland
639 1.1 dholland /*
640 1.1 dholland * If not eof and read aheads are enabled, start one.
641 1.1 dholland * (You need the current block first, so that you have the
642 1.1 dholland * directory offset cookie of the next block.)
643 1.1 dholland */
644 1.1 dholland if (nmp->nm_readahead > 0 &&
645 1.1 dholland (bp->b_flags & B_INVAL) == 0 &&
646 1.1 dholland (np->n_direofoffset == 0 ||
647 1.1 dholland (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
648 1.1 dholland incore(&vp->v_bufobj, lbn + 1) == NULL) {
649 1.1 dholland rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
650 1.1 dholland if (rabp) {
651 1.1 dholland if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
652 1.1 dholland rabp->b_flags |= B_ASYNC;
653 1.1 dholland rabp->b_iocmd = BIO_READ;
654 1.1 dholland vfs_busy_pages(rabp, 0);
655 1.1 dholland if (ncl_asyncio(nmp, rabp, cred, td)) {
656 1.1 dholland rabp->b_flags |= B_INVAL;
657 1.1 dholland rabp->b_ioflags |= BIO_ERROR;
658 1.1 dholland vfs_unbusy_pages(rabp);
659 1.1 dholland brelse(rabp);
660 1.1 dholland }
661 1.1 dholland } else {
662 1.1 dholland brelse(rabp);
663 1.1 dholland }
664 1.1 dholland }
665 1.1 dholland }
666 1.1 dholland /*
667 1.1 dholland * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
668 1.1 dholland * chopped for the EOF condition, we cannot tell how large
669 1.1 dholland * NFS directories are going to be until we hit EOF. So
670 1.1 dholland * an NFS directory buffer is *not* chopped to its EOF. Now,
671 1.1 dholland * it just so happens that b_resid will effectively chop it
672 1.1 dholland * to EOF. *BUT* this information is lost if the buffer goes
673 1.1 dholland * away and is reconstituted into a B_CACHE state ( due to
674 1.1 dholland * being VMIO ) later. So we keep track of the directory eof
675 1.1 dholland * in np->n_direofoffset and chop it off as an extra step
676 1.1 dholland * right here.
677 1.1 dholland */
678 1.1 dholland n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
679 1.1 dholland if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
680 1.1 dholland n = np->n_direofoffset - uio->uio_offset;
681 1.1 dholland break;
682 1.1 dholland default:
683 1.3 pgoyette printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
684 1.1 dholland bp = NULL;
685 1.1 dholland break;
686 1.3 pgoyette }
687 1.1 dholland
688 1.1 dholland if (n > 0) {
689 1.1 dholland error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
690 1.1 dholland }
691 1.1 dholland if (vp->v_type == VLNK)
692 1.1 dholland n = 0;
693 1.1 dholland if (bp != NULL)
694 1.1 dholland brelse(bp);
695 1.1 dholland } while (error == 0 && uio->uio_resid > 0 && n > 0);
696 1.1 dholland return (error);
697 1.1 dholland }
698 1.1 dholland
699 1.1 dholland /*
700 1.1 dholland * The NFS write path cannot handle iovecs with len > 1. So we need to
701 1.1 dholland * break up iovecs accordingly (restricting them to wsize).
702 1.1 dholland * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
703 1.1 dholland * For the ASYNC case, 2 copies are needed. The first a copy from the
704 1.1 dholland * user buffer to a staging buffer and then a second copy from the staging
705 1.1 dholland * buffer to mbufs. This can be optimized by copying from the user buffer
706 1.1 dholland * directly into mbufs and passing the chain down, but that requires a
707 1.1 dholland * fair amount of re-working of the relevant codepaths (and can be done
708 1.1 dholland * later).
709 1.1 dholland */
710 1.1 dholland static int
711 1.1 dholland nfs_directio_write(vp, uiop, cred, ioflag)
712 1.1 dholland struct vnode *vp;
713 1.1 dholland struct uio *uiop;
714 1.1 dholland struct ucred *cred;
715 1.1 dholland int ioflag;
716 1.1 dholland {
717 1.1 dholland int error;
718 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
719 1.1 dholland struct thread *td = uiop->uio_td;
720 1.1 dholland int size;
721 1.1 dholland int wsize;
722 1.1 dholland
723 1.1 dholland mtx_lock(&nmp->nm_mtx);
724 1.1 dholland wsize = nmp->nm_wsize;
725 1.1 dholland mtx_unlock(&nmp->nm_mtx);
726 1.1 dholland if (ioflag & IO_SYNC) {
727 1.1 dholland int iomode, must_commit;
728 1.1 dholland struct uio uio;
729 1.1 dholland struct iovec iov;
730 1.1 dholland do_sync:
731 1.1 dholland while (uiop->uio_resid > 0) {
732 1.1 dholland size = MIN(uiop->uio_resid, wsize);
733 1.1 dholland size = MIN(uiop->uio_iov->iov_len, size);
734 1.1 dholland iov.iov_base = uiop->uio_iov->iov_base;
735 1.1 dholland iov.iov_len = size;
736 1.1 dholland uio.uio_iov = &iov;
737 1.1 dholland uio.uio_iovcnt = 1;
738 1.1 dholland uio.uio_offset = uiop->uio_offset;
739 1.1 dholland uio.uio_resid = size;
740 1.1 dholland uio.uio_segflg = UIO_USERSPACE;
741 1.1 dholland uio.uio_rw = UIO_WRITE;
742 1.1 dholland uio.uio_td = td;
743 1.1 dholland iomode = NFSWRITE_FILESYNC;
744 1.1 dholland error = ncl_writerpc(vp, &uio, cred, &iomode,
745 1.1 dholland &must_commit, 0);
746 1.1 dholland KASSERT((must_commit == 0),
747 1.1 dholland ("ncl_directio_write: Did not commit write"));
748 1.1 dholland if (error)
749 1.1 dholland return (error);
750 1.1 dholland uiop->uio_offset += size;
751 1.1 dholland uiop->uio_resid -= size;
752 1.1 dholland if (uiop->uio_iov->iov_len <= size) {
753 1.1 dholland uiop->uio_iovcnt--;
754 1.1 dholland uiop->uio_iov++;
755 1.1 dholland } else {
756 1.1 dholland uiop->uio_iov->iov_base =
757 1.1 dholland (char *)uiop->uio_iov->iov_base + size;
758 1.1 dholland uiop->uio_iov->iov_len -= size;
759 1.1 dholland }
760 1.1 dholland }
761 1.1 dholland } else {
762 1.1 dholland struct uio *t_uio;
763 1.1 dholland struct iovec *t_iov;
764 1.1 dholland struct buf *bp;
765 1.1 dholland
766 1.1 dholland /*
767 1.1 dholland * Break up the write into blocksize chunks and hand these
768 1.1 dholland * over to nfsiod's for write back.
769 1.1 dholland * Unfortunately, this incurs a copy of the data. Since
770 1.1 dholland * the user could modify the buffer before the write is
771 1.1 dholland * initiated.
772 1.1 dholland *
773 1.1 dholland * The obvious optimization here is that one of the 2 copies
774 1.1 dholland * in the async write path can be eliminated by copying the
775 1.1 dholland * data here directly into mbufs and passing the mbuf chain
776 1.1 dholland * down. But that will require a fair amount of re-working
777 1.1 dholland * of the code and can be done if there's enough interest
778 1.1 dholland * in NFS directio access.
779 1.1 dholland */
780 1.1 dholland while (uiop->uio_resid > 0) {
781 1.1 dholland size = MIN(uiop->uio_resid, wsize);
782 1.1 dholland size = MIN(uiop->uio_iov->iov_len, size);
783 1.1 dholland bp = getpbuf(&ncl_pbuf_freecnt);
784 1.1 dholland t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
785 1.1 dholland t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
786 1.1 dholland t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
787 1.1 dholland t_iov->iov_len = size;
788 1.1 dholland t_uio->uio_iov = t_iov;
789 1.1 dholland t_uio->uio_iovcnt = 1;
790 1.1 dholland t_uio->uio_offset = uiop->uio_offset;
791 1.1 dholland t_uio->uio_resid = size;
792 1.1 dholland t_uio->uio_segflg = UIO_SYSSPACE;
793 1.1 dholland t_uio->uio_rw = UIO_WRITE;
794 1.1 dholland t_uio->uio_td = td;
795 1.1 dholland KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
796 1.1 dholland uiop->uio_segflg == UIO_SYSSPACE,
797 1.1 dholland ("nfs_directio_write: Bad uio_segflg"));
798 1.1 dholland if (uiop->uio_segflg == UIO_USERSPACE) {
799 1.1 dholland error = copyin(uiop->uio_iov->iov_base,
800 1.1 dholland t_iov->iov_base, size);
801 1.1 dholland if (error != 0)
802 1.1 dholland goto err_free;
803 1.1 dholland } else
804 1.1 dholland /*
805 1.1 dholland * UIO_SYSSPACE may never happen, but handle
806 1.1 dholland * it just in case it does.
807 1.1 dholland */
808 1.1 dholland bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
809 1.1 dholland size);
810 1.1 dholland bp->b_flags |= B_DIRECT;
811 1.1 dholland bp->b_iocmd = BIO_WRITE;
812 1.1 dholland if (cred != NOCRED) {
813 1.1 dholland crhold(cred);
814 1.1 dholland bp->b_wcred = cred;
815 1.1 dholland } else
816 1.1 dholland bp->b_wcred = NOCRED;
817 1.1 dholland bp->b_caller1 = (void *)t_uio;
818 1.1 dholland bp->b_vp = vp;
819 1.1 dholland error = ncl_asyncio(nmp, bp, NOCRED, td);
820 1.1 dholland err_free:
821 1.1 dholland if (error) {
822 1.1 dholland free(t_iov->iov_base, M_NFSDIRECTIO);
823 1.1 dholland free(t_iov, M_NFSDIRECTIO);
824 1.1 dholland free(t_uio, M_NFSDIRECTIO);
825 1.1 dholland bp->b_vp = NULL;
826 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
827 1.1 dholland if (error == EINTR)
828 1.1 dholland return (error);
829 1.1 dholland goto do_sync;
830 1.1 dholland }
831 1.1 dholland uiop->uio_offset += size;
832 1.1 dholland uiop->uio_resid -= size;
833 1.1 dholland if (uiop->uio_iov->iov_len <= size) {
834 1.1 dholland uiop->uio_iovcnt--;
835 1.1 dholland uiop->uio_iov++;
836 1.1 dholland } else {
837 1.1 dholland uiop->uio_iov->iov_base =
838 1.1 dholland (char *)uiop->uio_iov->iov_base + size;
839 1.1 dholland uiop->uio_iov->iov_len -= size;
840 1.1 dholland }
841 1.1 dholland }
842 1.1 dholland }
843 1.1 dholland return (0);
844 1.1 dholland }
845 1.1 dholland
846 1.1 dholland /*
847 1.1 dholland * Vnode op for write using bio
848 1.1 dholland */
849 1.1 dholland int
850 1.1 dholland ncl_write(struct vop_write_args *ap)
851 1.1 dholland {
852 1.1 dholland int biosize;
853 1.1 dholland struct uio *uio = ap->a_uio;
854 1.1 dholland struct thread *td = uio->uio_td;
855 1.1 dholland struct vnode *vp = ap->a_vp;
856 1.1 dholland struct nfsnode *np = VTONFS(vp);
857 1.1 dholland struct ucred *cred = ap->a_cred;
858 1.1 dholland int ioflag = ap->a_ioflag;
859 1.1 dholland struct buf *bp;
860 1.1 dholland struct vattr vattr;
861 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
862 1.1 dholland daddr_t lbn;
863 1.3 pgoyette int bcount, noncontig_write, obcount;
864 1.3 pgoyette int bp_cached, n, on, error = 0, error1, wouldcommit;
865 1.1 dholland size_t orig_resid, local_resid;
866 1.1 dholland off_t orig_size, tmp_off;
867 1.1 dholland
868 1.1 dholland KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
869 1.1 dholland KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
870 1.1 dholland ("ncl_write proc"));
871 1.1 dholland if (vp->v_type != VREG)
872 1.1 dholland return (EIO);
873 1.1 dholland mtx_lock(&np->n_mtx);
874 1.1 dholland if (np->n_flag & NWRITEERR) {
875 1.1 dholland np->n_flag &= ~NWRITEERR;
876 1.1 dholland mtx_unlock(&np->n_mtx);
877 1.1 dholland return (np->n_error);
878 1.1 dholland } else
879 1.1 dholland mtx_unlock(&np->n_mtx);
880 1.1 dholland mtx_lock(&nmp->nm_mtx);
881 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
882 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
883 1.1 dholland mtx_unlock(&nmp->nm_mtx);
884 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
885 1.1 dholland mtx_lock(&nmp->nm_mtx);
886 1.1 dholland }
887 1.1 dholland if (nmp->nm_wsize == 0)
888 1.1 dholland (void) newnfs_iosize(nmp);
889 1.1 dholland mtx_unlock(&nmp->nm_mtx);
890 1.1 dholland
891 1.1 dholland /*
892 1.1 dholland * Synchronously flush pending buffers if we are in synchronous
893 1.1 dholland * mode or if we are appending.
894 1.1 dholland */
895 1.1 dholland if (ioflag & (IO_APPEND | IO_SYNC)) {
896 1.1 dholland mtx_lock(&np->n_mtx);
897 1.1 dholland if (np->n_flag & NMODIFIED) {
898 1.1 dholland mtx_unlock(&np->n_mtx);
899 1.1 dholland #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
900 1.1 dholland /*
901 1.1 dholland * Require non-blocking, synchronous writes to
902 1.1 dholland * dirty files to inform the program it needs
903 1.1 dholland * to fsync(2) explicitly.
904 1.1 dholland */
905 1.1 dholland if (ioflag & IO_NDELAY)
906 1.1 dholland return (EAGAIN);
907 1.1 dholland #endif
908 1.1 dholland np->n_attrstamp = 0;
909 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
910 1.1 dholland error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
911 1.1 dholland if (error)
912 1.1 dholland return (error);
913 1.1 dholland } else
914 1.1 dholland mtx_unlock(&np->n_mtx);
915 1.1 dholland }
916 1.1 dholland
917 1.1 dholland orig_resid = uio->uio_resid;
918 1.1 dholland mtx_lock(&np->n_mtx);
919 1.1 dholland orig_size = np->n_size;
920 1.1 dholland mtx_unlock(&np->n_mtx);
921 1.1 dholland
922 1.1 dholland /*
923 1.1 dholland * If IO_APPEND then load uio_offset. We restart here if we cannot
924 1.1 dholland * get the append lock.
925 1.1 dholland */
926 1.1 dholland if (ioflag & IO_APPEND) {
927 1.1 dholland np->n_attrstamp = 0;
928 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
929 1.1 dholland error = VOP_GETATTR(vp, &vattr, cred);
930 1.1 dholland if (error)
931 1.1 dholland return (error);
932 1.1 dholland mtx_lock(&np->n_mtx);
933 1.1 dholland uio->uio_offset = np->n_size;
934 1.1 dholland mtx_unlock(&np->n_mtx);
935 1.1 dholland }
936 1.1 dholland
937 1.1 dholland if (uio->uio_offset < 0)
938 1.1 dholland return (EINVAL);
939 1.1 dholland tmp_off = uio->uio_offset + uio->uio_resid;
940 1.1 dholland if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
941 1.1 dholland return (EFBIG);
942 1.1 dholland if (uio->uio_resid == 0)
943 1.1 dholland return (0);
944 1.1 dholland
945 1.1 dholland if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
946 1.1 dholland return nfs_directio_write(vp, uio, cred, ioflag);
947 1.1 dholland
948 1.1 dholland /*
949 1.1 dholland * Maybe this should be above the vnode op call, but so long as
950 1.1 dholland * file servers have no limits, i don't think it matters
951 1.1 dholland */
952 1.1 dholland if (vn_rlimit_fsize(vp, uio, td))
953 1.1 dholland return (EFBIG);
954 1.1 dholland
955 1.1 dholland biosize = vp->v_bufobj.bo_bsize;
956 1.1 dholland /*
957 1.1 dholland * Find all of this file's B_NEEDCOMMIT buffers. If our writes
958 1.1 dholland * would exceed the local maximum per-file write commit size when
959 1.1 dholland * combined with those, we must decide whether to flush,
960 1.1 dholland * go synchronous, or return error. We don't bother checking
961 1.1 dholland * IO_UNIT -- we just make all writes atomic anyway, as there's
962 1.1 dholland * no point optimizing for something that really won't ever happen.
963 1.1 dholland */
964 1.3 pgoyette wouldcommit = 0;
965 1.1 dholland if (!(ioflag & IO_SYNC)) {
966 1.1 dholland int nflag;
967 1.1 dholland
968 1.1 dholland mtx_lock(&np->n_mtx);
969 1.1 dholland nflag = np->n_flag;
970 1.1 dholland mtx_unlock(&np->n_mtx);
971 1.3 pgoyette if (nflag & NMODIFIED) {
972 1.1 dholland BO_LOCK(&vp->v_bufobj);
973 1.1 dholland if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
974 1.1 dholland TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
975 1.1 dholland b_bobufs) {
976 1.1 dholland if (bp->b_flags & B_NEEDCOMMIT)
977 1.1 dholland wouldcommit += bp->b_bcount;
978 1.1 dholland }
979 1.1 dholland }
980 1.1 dholland BO_UNLOCK(&vp->v_bufobj);
981 1.1 dholland }
982 1.1 dholland }
983 1.1 dholland
984 1.1 dholland do {
985 1.3 pgoyette if (!(ioflag & IO_SYNC)) {
986 1.3 pgoyette wouldcommit += biosize;
987 1.3 pgoyette if (wouldcommit > nmp->nm_wcommitsize) {
988 1.3 pgoyette np->n_attrstamp = 0;
989 1.3 pgoyette KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
990 1.3 pgoyette error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
991 1.3 pgoyette if (error)
992 1.3 pgoyette return (error);
993 1.3 pgoyette wouldcommit = biosize;
994 1.3 pgoyette }
995 1.3 pgoyette }
996 1.3 pgoyette
997 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.biocache_writes);
998 1.1 dholland lbn = uio->uio_offset / biosize;
999 1.1 dholland on = uio->uio_offset - (lbn * biosize);
1000 1.1 dholland n = MIN((unsigned)(biosize - on), uio->uio_resid);
1001 1.1 dholland again:
1002 1.1 dholland /*
1003 1.1 dholland * Handle direct append and file extension cases, calculate
1004 1.1 dholland * unaligned buffer size.
1005 1.1 dholland */
1006 1.1 dholland mtx_lock(&np->n_mtx);
1007 1.3 pgoyette if ((np->n_flag & NHASBEENLOCKED) == 0 &&
1008 1.3 pgoyette (nmp->nm_flag & NFSMNT_NONCONTIGWR) != 0)
1009 1.3 pgoyette noncontig_write = 1;
1010 1.3 pgoyette else
1011 1.3 pgoyette noncontig_write = 0;
1012 1.3 pgoyette if ((uio->uio_offset == np->n_size ||
1013 1.3 pgoyette (noncontig_write != 0 &&
1014 1.3 pgoyette lbn == (np->n_size / biosize) &&
1015 1.3 pgoyette uio->uio_offset + n > np->n_size)) && n) {
1016 1.1 dholland mtx_unlock(&np->n_mtx);
1017 1.1 dholland /*
1018 1.1 dholland * Get the buffer (in its pre-append state to maintain
1019 1.1 dholland * B_CACHE if it was previously set). Resize the
1020 1.1 dholland * nfsnode after we have locked the buffer to prevent
1021 1.1 dholland * readers from reading garbage.
1022 1.1 dholland */
1023 1.3 pgoyette obcount = np->n_size - (lbn * biosize);
1024 1.3 pgoyette bp = nfs_getcacheblk(vp, lbn, obcount, td);
1025 1.1 dholland
1026 1.1 dholland if (bp != NULL) {
1027 1.1 dholland long save;
1028 1.1 dholland
1029 1.1 dholland mtx_lock(&np->n_mtx);
1030 1.1 dholland np->n_size = uio->uio_offset + n;
1031 1.1 dholland np->n_flag |= NMODIFIED;
1032 1.1 dholland vnode_pager_setsize(vp, np->n_size);
1033 1.1 dholland mtx_unlock(&np->n_mtx);
1034 1.1 dholland
1035 1.1 dholland save = bp->b_flags & B_CACHE;
1036 1.3 pgoyette bcount = on + n;
1037 1.1 dholland allocbuf(bp, bcount);
1038 1.1 dholland bp->b_flags |= save;
1039 1.3 pgoyette if (noncontig_write != 0 && on > obcount)
1040 1.3 pgoyette vfs_bio_bzero_buf(bp, obcount, on -
1041 1.3 pgoyette obcount);
1042 1.1 dholland }
1043 1.1 dholland } else {
1044 1.1 dholland /*
1045 1.1 dholland * Obtain the locked cache block first, and then
1046 1.1 dholland * adjust the file's size as appropriate.
1047 1.1 dholland */
1048 1.1 dholland bcount = on + n;
1049 1.1 dholland if ((off_t)lbn * biosize + bcount < np->n_size) {
1050 1.1 dholland if ((off_t)(lbn + 1) * biosize < np->n_size)
1051 1.1 dholland bcount = biosize;
1052 1.1 dholland else
1053 1.1 dholland bcount = np->n_size - (off_t)lbn * biosize;
1054 1.1 dholland }
1055 1.1 dholland mtx_unlock(&np->n_mtx);
1056 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bcount, td);
1057 1.1 dholland mtx_lock(&np->n_mtx);
1058 1.1 dholland if (uio->uio_offset + n > np->n_size) {
1059 1.1 dholland np->n_size = uio->uio_offset + n;
1060 1.1 dholland np->n_flag |= NMODIFIED;
1061 1.1 dholland vnode_pager_setsize(vp, np->n_size);
1062 1.1 dholland }
1063 1.1 dholland mtx_unlock(&np->n_mtx);
1064 1.1 dholland }
1065 1.1 dholland
1066 1.1 dholland if (!bp) {
1067 1.1 dholland error = newnfs_sigintr(nmp, td);
1068 1.1 dholland if (!error)
1069 1.1 dholland error = EINTR;
1070 1.1 dholland break;
1071 1.1 dholland }
1072 1.1 dholland
1073 1.1 dholland /*
1074 1.1 dholland * Issue a READ if B_CACHE is not set. In special-append
1075 1.1 dholland * mode, B_CACHE is based on the buffer prior to the write
1076 1.1 dholland * op and is typically set, avoiding the read. If a read
1077 1.1 dholland * is required in special append mode, the server will
1078 1.1 dholland * probably send us a short-read since we extended the file
1079 1.1 dholland * on our end, resulting in b_resid == 0 and, thusly,
1080 1.1 dholland * B_CACHE getting set.
1081 1.1 dholland *
1082 1.1 dholland * We can also avoid issuing the read if the write covers
1083 1.1 dholland * the entire buffer. We have to make sure the buffer state
1084 1.1 dholland * is reasonable in this case since we will not be initiating
1085 1.1 dholland * I/O. See the comments in kern/vfs_bio.c's getblk() for
1086 1.1 dholland * more information.
1087 1.1 dholland *
1088 1.1 dholland * B_CACHE may also be set due to the buffer being cached
1089 1.1 dholland * normally.
1090 1.1 dholland */
1091 1.1 dholland
1092 1.1 dholland bp_cached = 1;
1093 1.1 dholland if (on == 0 && n == bcount) {
1094 1.1 dholland if ((bp->b_flags & B_CACHE) == 0)
1095 1.1 dholland bp_cached = 0;
1096 1.1 dholland bp->b_flags |= B_CACHE;
1097 1.1 dholland bp->b_flags &= ~B_INVAL;
1098 1.1 dholland bp->b_ioflags &= ~BIO_ERROR;
1099 1.1 dholland }
1100 1.1 dholland
1101 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
1102 1.1 dholland bp->b_iocmd = BIO_READ;
1103 1.1 dholland vfs_busy_pages(bp, 0);
1104 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
1105 1.1 dholland if (error) {
1106 1.1 dholland brelse(bp);
1107 1.1 dholland break;
1108 1.1 dholland }
1109 1.1 dholland }
1110 1.1 dholland if (bp->b_wcred == NOCRED)
1111 1.1 dholland bp->b_wcred = crhold(cred);
1112 1.1 dholland mtx_lock(&np->n_mtx);
1113 1.1 dholland np->n_flag |= NMODIFIED;
1114 1.1 dholland mtx_unlock(&np->n_mtx);
1115 1.1 dholland
1116 1.1 dholland /*
1117 1.1 dholland * If dirtyend exceeds file size, chop it down. This should
1118 1.1 dholland * not normally occur but there is an append race where it
1119 1.1 dholland * might occur XXX, so we log it.
1120 1.1 dholland *
1121 1.1 dholland * If the chopping creates a reverse-indexed or degenerate
1122 1.1 dholland * situation with dirtyoff/end, we 0 both of them.
1123 1.1 dholland */
1124 1.1 dholland
1125 1.1 dholland if (bp->b_dirtyend > bcount) {
1126 1.3 pgoyette printf("NFS append race @%lx:%d\n",
1127 1.1 dholland (long)bp->b_blkno * DEV_BSIZE,
1128 1.1 dholland bp->b_dirtyend - bcount);
1129 1.1 dholland bp->b_dirtyend = bcount;
1130 1.1 dholland }
1131 1.1 dholland
1132 1.1 dholland if (bp->b_dirtyoff >= bp->b_dirtyend)
1133 1.1 dholland bp->b_dirtyoff = bp->b_dirtyend = 0;
1134 1.1 dholland
1135 1.1 dholland /*
1136 1.1 dholland * If the new write will leave a contiguous dirty
1137 1.1 dholland * area, just update the b_dirtyoff and b_dirtyend,
1138 1.1 dholland * otherwise force a write rpc of the old dirty area.
1139 1.1 dholland *
1140 1.3 pgoyette * If there has been a file lock applied to this file
1141 1.3 pgoyette * or vfs.nfs.old_noncontig_writing is set, do the following:
1142 1.1 dholland * While it is possible to merge discontiguous writes due to
1143 1.1 dholland * our having a B_CACHE buffer ( and thus valid read data
1144 1.1 dholland * for the hole), we don't because it could lead to
1145 1.1 dholland * significant cache coherency problems with multiple clients,
1146 1.1 dholland * especially if locking is implemented later on.
1147 1.1 dholland *
1148 1.3 pgoyette * If vfs.nfs.old_noncontig_writing is not set and there has
1149 1.3 pgoyette * not been file locking done on this file:
1150 1.3 pgoyette * Relax coherency a bit for the sake of performance and
1151 1.3 pgoyette * expand the current dirty region to contain the new
1152 1.3 pgoyette * write even if it means we mark some non-dirty data as
1153 1.3 pgoyette * dirty.
1154 1.1 dholland */
1155 1.1 dholland
1156 1.3 pgoyette if (noncontig_write == 0 && bp->b_dirtyend > 0 &&
1157 1.1 dholland (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1158 1.1 dholland if (bwrite(bp) == EINTR) {
1159 1.1 dholland error = EINTR;
1160 1.1 dholland break;
1161 1.1 dholland }
1162 1.1 dholland goto again;
1163 1.1 dholland }
1164 1.1 dholland
1165 1.1 dholland local_resid = uio->uio_resid;
1166 1.1 dholland error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1167 1.1 dholland
1168 1.1 dholland if (error != 0 && !bp_cached) {
1169 1.1 dholland /*
1170 1.2 wiz * This block has no other content than what
1171 1.1 dholland * possibly was written by the faulty uiomove.
1172 1.1 dholland * Release it, forgetting the data pages, to
1173 1.1 dholland * prevent the leak of uninitialized data to
1174 1.1 dholland * usermode.
1175 1.1 dholland */
1176 1.1 dholland bp->b_ioflags |= BIO_ERROR;
1177 1.1 dholland brelse(bp);
1178 1.1 dholland uio->uio_offset -= local_resid - uio->uio_resid;
1179 1.1 dholland uio->uio_resid = local_resid;
1180 1.1 dholland break;
1181 1.1 dholland }
1182 1.1 dholland
1183 1.1 dholland /*
1184 1.1 dholland * Since this block is being modified, it must be written
1185 1.1 dholland * again and not just committed. Since write clustering does
1186 1.1 dholland * not work for the stage 1 data write, only the stage 2
1187 1.1 dholland * commit rpc, we have to clear B_CLUSTEROK as well.
1188 1.1 dholland */
1189 1.1 dholland bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1190 1.1 dholland
1191 1.1 dholland /*
1192 1.1 dholland * Get the partial update on the progress made from
1193 1.3 pgoyette * uiomove, if an error occurred.
1194 1.1 dholland */
1195 1.1 dholland if (error != 0)
1196 1.1 dholland n = local_resid - uio->uio_resid;
1197 1.1 dholland
1198 1.1 dholland /*
1199 1.1 dholland * Only update dirtyoff/dirtyend if not a degenerate
1200 1.1 dholland * condition.
1201 1.1 dholland */
1202 1.1 dholland if (n > 0) {
1203 1.1 dholland if (bp->b_dirtyend > 0) {
1204 1.1 dholland bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1205 1.1 dholland bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1206 1.1 dholland } else {
1207 1.1 dholland bp->b_dirtyoff = on;
1208 1.1 dholland bp->b_dirtyend = on + n;
1209 1.1 dholland }
1210 1.1 dholland vfs_bio_set_valid(bp, on, n);
1211 1.1 dholland }
1212 1.1 dholland
1213 1.1 dholland /*
1214 1.1 dholland * If IO_SYNC do bwrite().
1215 1.1 dholland *
1216 1.1 dholland * IO_INVAL appears to be unused. The idea appears to be
1217 1.1 dholland * to turn off caching in this case. Very odd. XXX
1218 1.1 dholland */
1219 1.1 dholland if ((ioflag & IO_SYNC)) {
1220 1.1 dholland if (ioflag & IO_INVAL)
1221 1.1 dholland bp->b_flags |= B_NOCACHE;
1222 1.1 dholland error1 = bwrite(bp);
1223 1.1 dholland if (error1 != 0) {
1224 1.1 dholland if (error == 0)
1225 1.1 dholland error = error1;
1226 1.1 dholland break;
1227 1.1 dholland }
1228 1.1 dholland } else if ((n + on) == biosize) {
1229 1.1 dholland bp->b_flags |= B_ASYNC;
1230 1.1 dholland (void) ncl_writebp(bp, 0, NULL);
1231 1.1 dholland } else {
1232 1.1 dholland bdwrite(bp);
1233 1.1 dholland }
1234 1.1 dholland
1235 1.1 dholland if (error != 0)
1236 1.1 dholland break;
1237 1.1 dholland } while (uio->uio_resid > 0 && n > 0);
1238 1.1 dholland
1239 1.1 dholland if (error != 0) {
1240 1.1 dholland if (ioflag & IO_UNIT) {
1241 1.1 dholland VATTR_NULL(&vattr);
1242 1.1 dholland vattr.va_size = orig_size;
1243 1.1 dholland /* IO_SYNC is handled implicitely */
1244 1.1 dholland (void)VOP_SETATTR(vp, &vattr, cred);
1245 1.1 dholland uio->uio_offset -= orig_resid - uio->uio_resid;
1246 1.1 dholland uio->uio_resid = orig_resid;
1247 1.1 dholland }
1248 1.1 dholland }
1249 1.1 dholland
1250 1.1 dholland return (error);
1251 1.1 dholland }
1252 1.1 dholland
1253 1.1 dholland /*
1254 1.1 dholland * Get an nfs cache block.
1255 1.1 dholland *
1256 1.1 dholland * Allocate a new one if the block isn't currently in the cache
1257 1.1 dholland * and return the block marked busy. If the calling process is
1258 1.1 dholland * interrupted by a signal for an interruptible mount point, return
1259 1.1 dholland * NULL.
1260 1.1 dholland *
1261 1.1 dholland * The caller must carefully deal with the possible B_INVAL state of
1262 1.1 dholland * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1263 1.1 dholland * indirectly), so synchronous reads can be issued without worrying about
1264 1.1 dholland * the B_INVAL state. We have to be a little more careful when dealing
1265 1.1 dholland * with writes (see comments in nfs_write()) when extending a file past
1266 1.1 dholland * its EOF.
1267 1.1 dholland */
1268 1.1 dholland static struct buf *
1269 1.1 dholland nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1270 1.1 dholland {
1271 1.1 dholland struct buf *bp;
1272 1.1 dholland struct mount *mp;
1273 1.1 dholland struct nfsmount *nmp;
1274 1.1 dholland
1275 1.1 dholland mp = vp->v_mount;
1276 1.1 dholland nmp = VFSTONFS(mp);
1277 1.1 dholland
1278 1.1 dholland if (nmp->nm_flag & NFSMNT_INT) {
1279 1.1 dholland sigset_t oldset;
1280 1.1 dholland
1281 1.1 dholland newnfs_set_sigmask(td, &oldset);
1282 1.1 dholland bp = getblk(vp, bn, size, PCATCH, 0, 0);
1283 1.1 dholland newnfs_restore_sigmask(td, &oldset);
1284 1.1 dholland while (bp == NULL) {
1285 1.1 dholland if (newnfs_sigintr(nmp, td))
1286 1.1 dholland return (NULL);
1287 1.1 dholland bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1288 1.1 dholland }
1289 1.1 dholland } else {
1290 1.1 dholland bp = getblk(vp, bn, size, 0, 0, 0);
1291 1.1 dholland }
1292 1.1 dholland
1293 1.1 dholland if (vp->v_type == VREG)
1294 1.1 dholland bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1295 1.1 dholland return (bp);
1296 1.1 dholland }
1297 1.1 dholland
1298 1.1 dholland /*
1299 1.1 dholland * Flush and invalidate all dirty buffers. If another process is already
1300 1.1 dholland * doing the flush, just wait for completion.
1301 1.1 dholland */
1302 1.1 dholland int
1303 1.1 dholland ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1304 1.1 dholland {
1305 1.1 dholland struct nfsnode *np = VTONFS(vp);
1306 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1307 1.1 dholland int error = 0, slpflag, slptimeo;
1308 1.1 dholland int old_lock = 0;
1309 1.1 dholland
1310 1.1 dholland ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1311 1.1 dholland
1312 1.1 dholland if ((nmp->nm_flag & NFSMNT_INT) == 0)
1313 1.1 dholland intrflg = 0;
1314 1.1 dholland if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1315 1.1 dholland intrflg = 1;
1316 1.1 dholland if (intrflg) {
1317 1.1 dholland slpflag = PCATCH;
1318 1.1 dholland slptimeo = 2 * hz;
1319 1.1 dholland } else {
1320 1.1 dholland slpflag = 0;
1321 1.1 dholland slptimeo = 0;
1322 1.1 dholland }
1323 1.1 dholland
1324 1.1 dholland old_lock = ncl_upgrade_vnlock(vp);
1325 1.1 dholland if (vp->v_iflag & VI_DOOMED) {
1326 1.1 dholland /*
1327 1.1 dholland * Since vgonel() uses the generic vinvalbuf() to flush
1328 1.1 dholland * dirty buffers and it does not call this function, it
1329 1.1 dholland * is safe to just return OK when VI_DOOMED is set.
1330 1.1 dholland */
1331 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
1332 1.1 dholland return (0);
1333 1.1 dholland }
1334 1.1 dholland
1335 1.1 dholland /*
1336 1.1 dholland * Now, flush as required.
1337 1.1 dholland */
1338 1.1 dholland if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1339 1.1 dholland VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
1340 1.1 dholland vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1341 1.1 dholland VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
1342 1.1 dholland /*
1343 1.1 dholland * If the page clean was interrupted, fail the invalidation.
1344 1.1 dholland * Not doing so, we run the risk of losing dirty pages in the
1345 1.1 dholland * vinvalbuf() call below.
1346 1.1 dholland */
1347 1.1 dholland if (intrflg && (error = newnfs_sigintr(nmp, td)))
1348 1.1 dholland goto out;
1349 1.1 dholland }
1350 1.1 dholland
1351 1.1 dholland error = vinvalbuf(vp, flags, slpflag, 0);
1352 1.1 dholland while (error) {
1353 1.1 dholland if (intrflg && (error = newnfs_sigintr(nmp, td)))
1354 1.1 dholland goto out;
1355 1.1 dholland error = vinvalbuf(vp, flags, 0, slptimeo);
1356 1.1 dholland }
1357 1.1 dholland if (NFSHASPNFS(nmp)) {
1358 1.1 dholland nfscl_layoutcommit(vp, td);
1359 1.1 dholland /*
1360 1.1 dholland * Invalidate the attribute cache, since writes to a DS
1361 1.1 dholland * won't update the size attribute.
1362 1.1 dholland */
1363 1.1 dholland mtx_lock(&np->n_mtx);
1364 1.1 dholland np->n_attrstamp = 0;
1365 1.1 dholland } else
1366 1.1 dholland mtx_lock(&np->n_mtx);
1367 1.1 dholland if (np->n_directio_asyncwr == 0)
1368 1.1 dholland np->n_flag &= ~NMODIFIED;
1369 1.1 dholland mtx_unlock(&np->n_mtx);
1370 1.1 dholland out:
1371 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
1372 1.1 dholland return error;
1373 1.1 dholland }
1374 1.1 dholland
1375 1.1 dholland /*
1376 1.1 dholland * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1377 1.1 dholland * This is mainly to avoid queueing async I/O requests when the nfsiods
1378 1.1 dholland * are all hung on a dead server.
1379 1.1 dholland *
1380 1.1 dholland * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1381 1.1 dholland * is eventually dequeued by the async daemon, ncl_doio() *will*.
1382 1.1 dholland */
1383 1.1 dholland int
1384 1.1 dholland ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1385 1.1 dholland {
1386 1.1 dholland int iod;
1387 1.1 dholland int gotiod;
1388 1.1 dholland int slpflag = 0;
1389 1.1 dholland int slptimeo = 0;
1390 1.1 dholland int error, error2;
1391 1.1 dholland
1392 1.1 dholland /*
1393 1.1 dholland * Commits are usually short and sweet so lets save some cpu and
1394 1.1 dholland * leave the async daemons for more important rpc's (such as reads
1395 1.1 dholland * and writes).
1396 1.1 dholland *
1397 1.1 dholland * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1398 1.1 dholland * in the directory in order to update attributes. This can deadlock
1399 1.1 dholland * with another thread that is waiting for async I/O to be done by
1400 1.1 dholland * an nfsiod thread while holding a lock on one of these vnodes.
1401 1.1 dholland * To avoid this deadlock, don't allow the async nfsiod threads to
1402 1.1 dholland * perform Readdirplus RPCs.
1403 1.1 dholland */
1404 1.1 dholland mtx_lock(&ncl_iod_mutex);
1405 1.1 dholland if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1406 1.1 dholland (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1407 1.1 dholland (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1408 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1409 1.1 dholland return(EIO);
1410 1.1 dholland }
1411 1.1 dholland again:
1412 1.1 dholland if (nmp->nm_flag & NFSMNT_INT)
1413 1.1 dholland slpflag = PCATCH;
1414 1.1 dholland gotiod = FALSE;
1415 1.1 dholland
1416 1.1 dholland /*
1417 1.1 dholland * Find a free iod to process this request.
1418 1.1 dholland */
1419 1.1 dholland for (iod = 0; iod < ncl_numasync; iod++)
1420 1.1 dholland if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1421 1.1 dholland gotiod = TRUE;
1422 1.1 dholland break;
1423 1.1 dholland }
1424 1.1 dholland
1425 1.1 dholland /*
1426 1.1 dholland * Try to create one if none are free.
1427 1.1 dholland */
1428 1.1 dholland if (!gotiod)
1429 1.1 dholland ncl_nfsiodnew();
1430 1.1 dholland else {
1431 1.1 dholland /*
1432 1.1 dholland * Found one, so wake it up and tell it which
1433 1.1 dholland * mount to process.
1434 1.1 dholland */
1435 1.1 dholland NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1436 1.1 dholland iod, nmp));
1437 1.1 dholland ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1438 1.1 dholland ncl_iodmount[iod] = nmp;
1439 1.1 dholland nmp->nm_bufqiods++;
1440 1.1 dholland wakeup(&ncl_iodwant[iod]);
1441 1.1 dholland }
1442 1.1 dholland
1443 1.1 dholland /*
1444 1.1 dholland * If none are free, we may already have an iod working on this mount
1445 1.1 dholland * point. If so, it will process our request.
1446 1.1 dholland */
1447 1.1 dholland if (!gotiod) {
1448 1.1 dholland if (nmp->nm_bufqiods > 0) {
1449 1.1 dholland NFS_DPF(ASYNCIO,
1450 1.1 dholland ("ncl_asyncio: %d iods are already processing mount %p\n",
1451 1.1 dholland nmp->nm_bufqiods, nmp));
1452 1.1 dholland gotiod = TRUE;
1453 1.1 dholland }
1454 1.1 dholland }
1455 1.1 dholland
1456 1.1 dholland /*
1457 1.1 dholland * If we have an iod which can process the request, then queue
1458 1.1 dholland * the buffer.
1459 1.1 dholland */
1460 1.1 dholland if (gotiod) {
1461 1.1 dholland /*
1462 1.1 dholland * Ensure that the queue never grows too large. We still want
1463 1.2 wiz * to asynchronize so we block rather than return EIO.
1464 1.1 dholland */
1465 1.1 dholland while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1466 1.1 dholland NFS_DPF(ASYNCIO,
1467 1.1 dholland ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1468 1.1 dholland nmp->nm_bufqwant = TRUE;
1469 1.1 dholland error = newnfs_msleep(td, &nmp->nm_bufq,
1470 1.1 dholland &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1471 1.1 dholland slptimeo);
1472 1.1 dholland if (error) {
1473 1.1 dholland error2 = newnfs_sigintr(nmp, td);
1474 1.1 dholland if (error2) {
1475 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1476 1.1 dholland return (error2);
1477 1.1 dholland }
1478 1.1 dholland if (slpflag == PCATCH) {
1479 1.1 dholland slpflag = 0;
1480 1.1 dholland slptimeo = 2 * hz;
1481 1.1 dholland }
1482 1.1 dholland }
1483 1.1 dholland /*
1484 1.1 dholland * We might have lost our iod while sleeping,
1485 1.3 pgoyette * so check and loop if necessary.
1486 1.1 dholland */
1487 1.1 dholland goto again;
1488 1.1 dholland }
1489 1.1 dholland
1490 1.1 dholland /* We might have lost our nfsiod */
1491 1.1 dholland if (nmp->nm_bufqiods == 0) {
1492 1.1 dholland NFS_DPF(ASYNCIO,
1493 1.1 dholland ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1494 1.1 dholland goto again;
1495 1.1 dholland }
1496 1.1 dholland
1497 1.1 dholland if (bp->b_iocmd == BIO_READ) {
1498 1.1 dholland if (bp->b_rcred == NOCRED && cred != NOCRED)
1499 1.1 dholland bp->b_rcred = crhold(cred);
1500 1.1 dholland } else {
1501 1.1 dholland if (bp->b_wcred == NOCRED && cred != NOCRED)
1502 1.1 dholland bp->b_wcred = crhold(cred);
1503 1.1 dholland }
1504 1.1 dholland
1505 1.1 dholland if (bp->b_flags & B_REMFREE)
1506 1.1 dholland bremfreef(bp);
1507 1.1 dholland BUF_KERNPROC(bp);
1508 1.1 dholland TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1509 1.1 dholland nmp->nm_bufqlen++;
1510 1.1 dholland if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1511 1.1 dholland mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1512 1.1 dholland VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1513 1.1 dholland VTONFS(bp->b_vp)->n_directio_asyncwr++;
1514 1.1 dholland mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1515 1.1 dholland }
1516 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1517 1.1 dholland return (0);
1518 1.1 dholland }
1519 1.1 dholland
1520 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1521 1.1 dholland
1522 1.1 dholland /*
1523 1.1 dholland * All the iods are busy on other mounts, so return EIO to
1524 1.1 dholland * force the caller to process the i/o synchronously.
1525 1.1 dholland */
1526 1.1 dholland NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1527 1.1 dholland return (EIO);
1528 1.1 dholland }
1529 1.1 dholland
1530 1.1 dholland void
1531 1.1 dholland ncl_doio_directwrite(struct buf *bp)
1532 1.1 dholland {
1533 1.1 dholland int iomode, must_commit;
1534 1.1 dholland struct uio *uiop = (struct uio *)bp->b_caller1;
1535 1.1 dholland char *iov_base = uiop->uio_iov->iov_base;
1536 1.1 dholland
1537 1.1 dholland iomode = NFSWRITE_FILESYNC;
1538 1.1 dholland uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1539 1.1 dholland ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1540 1.1 dholland KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1541 1.1 dholland free(iov_base, M_NFSDIRECTIO);
1542 1.1 dholland free(uiop->uio_iov, M_NFSDIRECTIO);
1543 1.1 dholland free(uiop, M_NFSDIRECTIO);
1544 1.1 dholland if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1545 1.1 dholland struct nfsnode *np = VTONFS(bp->b_vp);
1546 1.1 dholland mtx_lock(&np->n_mtx);
1547 1.1 dholland if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) {
1548 1.1 dholland /*
1549 1.1 dholland * Invalidate the attribute cache, since writes to a DS
1550 1.1 dholland * won't update the size attribute.
1551 1.1 dholland */
1552 1.1 dholland np->n_attrstamp = 0;
1553 1.1 dholland }
1554 1.1 dholland np->n_directio_asyncwr--;
1555 1.1 dholland if (np->n_directio_asyncwr == 0) {
1556 1.1 dholland np->n_flag &= ~NMODIFIED;
1557 1.1 dholland if ((np->n_flag & NFSYNCWAIT)) {
1558 1.1 dholland np->n_flag &= ~NFSYNCWAIT;
1559 1.1 dholland wakeup((caddr_t)&np->n_directio_asyncwr);
1560 1.1 dholland }
1561 1.1 dholland }
1562 1.1 dholland mtx_unlock(&np->n_mtx);
1563 1.1 dholland }
1564 1.1 dholland bp->b_vp = NULL;
1565 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
1566 1.1 dholland }
1567 1.1 dholland
1568 1.1 dholland /*
1569 1.1 dholland * Do an I/O operation to/from a cache block. This may be called
1570 1.1 dholland * synchronously or from an nfsiod.
1571 1.1 dholland */
1572 1.1 dholland int
1573 1.1 dholland ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1574 1.1 dholland int called_from_strategy)
1575 1.1 dholland {
1576 1.1 dholland struct uio *uiop;
1577 1.1 dholland struct nfsnode *np;
1578 1.1 dholland struct nfsmount *nmp;
1579 1.1 dholland int error = 0, iomode, must_commit = 0;
1580 1.1 dholland struct uio uio;
1581 1.1 dholland struct iovec io;
1582 1.1 dholland struct proc *p = td ? td->td_proc : NULL;
1583 1.1 dholland uint8_t iocmd;
1584 1.1 dholland
1585 1.1 dholland np = VTONFS(vp);
1586 1.1 dholland nmp = VFSTONFS(vp->v_mount);
1587 1.1 dholland uiop = &uio;
1588 1.1 dholland uiop->uio_iov = &io;
1589 1.1 dholland uiop->uio_iovcnt = 1;
1590 1.1 dholland uiop->uio_segflg = UIO_SYSSPACE;
1591 1.1 dholland uiop->uio_td = td;
1592 1.1 dholland
1593 1.1 dholland /*
1594 1.1 dholland * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1595 1.1 dholland * do this here so we do not have to do it in all the code that
1596 1.1 dholland * calls us.
1597 1.1 dholland */
1598 1.1 dholland bp->b_flags &= ~B_INVAL;
1599 1.1 dholland bp->b_ioflags &= ~BIO_ERROR;
1600 1.1 dholland
1601 1.1 dholland KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1602 1.1 dholland iocmd = bp->b_iocmd;
1603 1.1 dholland if (iocmd == BIO_READ) {
1604 1.1 dholland io.iov_len = uiop->uio_resid = bp->b_bcount;
1605 1.1 dholland io.iov_base = bp->b_data;
1606 1.1 dholland uiop->uio_rw = UIO_READ;
1607 1.1 dholland
1608 1.1 dholland switch (vp->v_type) {
1609 1.1 dholland case VREG:
1610 1.1 dholland uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1611 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.read_bios);
1612 1.1 dholland error = ncl_readrpc(vp, uiop, cr);
1613 1.1 dholland
1614 1.1 dholland if (!error) {
1615 1.1 dholland if (uiop->uio_resid) {
1616 1.1 dholland /*
1617 1.1 dholland * If we had a short read with no error, we must have
1618 1.1 dholland * hit a file hole. We should zero-fill the remainder.
1619 1.1 dholland * This can also occur if the server hits the file EOF.
1620 1.1 dholland *
1621 1.1 dholland * Holes used to be able to occur due to pending
1622 1.1 dholland * writes, but that is not possible any longer.
1623 1.1 dholland */
1624 1.1 dholland int nread = bp->b_bcount - uiop->uio_resid;
1625 1.1 dholland ssize_t left = uiop->uio_resid;
1626 1.1 dholland
1627 1.1 dholland if (left > 0)
1628 1.1 dholland bzero((char *)bp->b_data + nread, left);
1629 1.1 dholland uiop->uio_resid = 0;
1630 1.1 dholland }
1631 1.1 dholland }
1632 1.1 dholland /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1633 1.1 dholland if (p && (vp->v_vflag & VV_TEXT)) {
1634 1.1 dholland mtx_lock(&np->n_mtx);
1635 1.1 dholland if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1636 1.1 dholland mtx_unlock(&np->n_mtx);
1637 1.1 dholland PROC_LOCK(p);
1638 1.1 dholland killproc(p, "text file modification");
1639 1.1 dholland PROC_UNLOCK(p);
1640 1.1 dholland } else
1641 1.1 dholland mtx_unlock(&np->n_mtx);
1642 1.1 dholland }
1643 1.1 dholland break;
1644 1.1 dholland case VLNK:
1645 1.1 dholland uiop->uio_offset = (off_t)0;
1646 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.readlink_bios);
1647 1.1 dholland error = ncl_readlinkrpc(vp, uiop, cr);
1648 1.1 dholland break;
1649 1.1 dholland case VDIR:
1650 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.readdir_bios);
1651 1.1 dholland uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1652 1.1 dholland if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1653 1.1 dholland error = ncl_readdirplusrpc(vp, uiop, cr, td);
1654 1.1 dholland if (error == NFSERR_NOTSUPP)
1655 1.1 dholland nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1656 1.1 dholland }
1657 1.1 dholland if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1658 1.1 dholland error = ncl_readdirrpc(vp, uiop, cr, td);
1659 1.1 dholland /*
1660 1.1 dholland * end-of-directory sets B_INVAL but does not generate an
1661 1.1 dholland * error.
1662 1.1 dholland */
1663 1.1 dholland if (error == 0 && uiop->uio_resid == bp->b_bcount)
1664 1.1 dholland bp->b_flags |= B_INVAL;
1665 1.1 dholland break;
1666 1.1 dholland default:
1667 1.3 pgoyette printf("ncl_doio: type %x unexpected\n", vp->v_type);
1668 1.1 dholland break;
1669 1.3 pgoyette }
1670 1.1 dholland if (error) {
1671 1.1 dholland bp->b_ioflags |= BIO_ERROR;
1672 1.1 dholland bp->b_error = error;
1673 1.1 dholland }
1674 1.1 dholland } else {
1675 1.1 dholland /*
1676 1.1 dholland * If we only need to commit, try to commit
1677 1.1 dholland */
1678 1.1 dholland if (bp->b_flags & B_NEEDCOMMIT) {
1679 1.1 dholland int retv;
1680 1.1 dholland off_t off;
1681 1.1 dholland
1682 1.1 dholland off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1683 1.1 dholland retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1684 1.1 dholland bp->b_wcred, td);
1685 1.1 dholland if (retv == 0) {
1686 1.1 dholland bp->b_dirtyoff = bp->b_dirtyend = 0;
1687 1.1 dholland bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1688 1.1 dholland bp->b_resid = 0;
1689 1.1 dholland bufdone(bp);
1690 1.1 dholland return (0);
1691 1.1 dholland }
1692 1.1 dholland if (retv == NFSERR_STALEWRITEVERF) {
1693 1.1 dholland ncl_clearcommit(vp->v_mount);
1694 1.1 dholland }
1695 1.1 dholland }
1696 1.1 dholland
1697 1.1 dholland /*
1698 1.1 dholland * Setup for actual write
1699 1.1 dholland */
1700 1.1 dholland mtx_lock(&np->n_mtx);
1701 1.1 dholland if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1702 1.1 dholland bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1703 1.1 dholland mtx_unlock(&np->n_mtx);
1704 1.1 dholland
1705 1.1 dholland if (bp->b_dirtyend > bp->b_dirtyoff) {
1706 1.1 dholland io.iov_len = uiop->uio_resid = bp->b_dirtyend
1707 1.1 dholland - bp->b_dirtyoff;
1708 1.1 dholland uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1709 1.1 dholland + bp->b_dirtyoff;
1710 1.1 dholland io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1711 1.1 dholland uiop->uio_rw = UIO_WRITE;
1712 1.3 pgoyette NFSINCRGLOBAL(nfsstatsv1.write_bios);
1713 1.1 dholland
1714 1.1 dholland if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1715 1.1 dholland iomode = NFSWRITE_UNSTABLE;
1716 1.1 dholland else
1717 1.1 dholland iomode = NFSWRITE_FILESYNC;
1718 1.1 dholland
1719 1.1 dholland error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1720 1.1 dholland called_from_strategy);
1721 1.1 dholland
1722 1.1 dholland /*
1723 1.1 dholland * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1724 1.1 dholland * to cluster the buffers needing commit. This will allow
1725 1.1 dholland * the system to submit a single commit rpc for the whole
1726 1.1 dholland * cluster. We can do this even if the buffer is not 100%
1727 1.1 dholland * dirty (relative to the NFS blocksize), so we optimize the
1728 1.1 dholland * append-to-file-case.
1729 1.1 dholland *
1730 1.1 dholland * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1731 1.1 dholland * cleared because write clustering only works for commit
1732 1.1 dholland * rpc's, not for the data portion of the write).
1733 1.1 dholland */
1734 1.1 dholland
1735 1.1 dholland if (!error && iomode == NFSWRITE_UNSTABLE) {
1736 1.1 dholland bp->b_flags |= B_NEEDCOMMIT;
1737 1.1 dholland if (bp->b_dirtyoff == 0
1738 1.1 dholland && bp->b_dirtyend == bp->b_bcount)
1739 1.1 dholland bp->b_flags |= B_CLUSTEROK;
1740 1.1 dholland } else {
1741 1.1 dholland bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1742 1.1 dholland }
1743 1.1 dholland
1744 1.1 dholland /*
1745 1.1 dholland * For an interrupted write, the buffer is still valid
1746 1.1 dholland * and the write hasn't been pushed to the server yet,
1747 1.1 dholland * so we can't set BIO_ERROR and report the interruption
1748 1.1 dholland * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1749 1.1 dholland * is not relevant, so the rpc attempt is essentially
1750 1.1 dholland * a noop. For the case of a V3 write rpc not being
1751 1.1 dholland * committed to stable storage, the block is still
1752 1.1 dholland * dirty and requires either a commit rpc or another
1753 1.1 dholland * write rpc with iomode == NFSV3WRITE_FILESYNC before
1754 1.1 dholland * the block is reused. This is indicated by setting
1755 1.1 dholland * the B_DELWRI and B_NEEDCOMMIT flags.
1756 1.1 dholland *
1757 1.1 dholland * EIO is returned by ncl_writerpc() to indicate a recoverable
1758 1.1 dholland * write error and is handled as above, except that
1759 1.1 dholland * B_EINTR isn't set. One cause of this is a stale stateid
1760 1.1 dholland * error for the RPC that indicates recovery is required,
1761 1.1 dholland * when called with called_from_strategy != 0.
1762 1.1 dholland *
1763 1.1 dholland * If the buffer is marked B_PAGING, it does not reside on
1764 1.1 dholland * the vp's paging queues so we cannot call bdirty(). The
1765 1.1 dholland * bp in this case is not an NFS cache block so we should
1766 1.1 dholland * be safe. XXX
1767 1.1 dholland *
1768 1.1 dholland * The logic below breaks up errors into recoverable and
1769 1.1 dholland * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1770 1.1 dholland * and keep the buffer around for potential write retries.
1771 1.1 dholland * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1772 1.1 dholland * and save the error in the nfsnode. This is less than ideal
1773 1.1 dholland * but necessary. Keeping such buffers around could potentially
1774 1.1 dholland * cause buffer exhaustion eventually (they can never be written
1775 1.1 dholland * out, so will get constantly be re-dirtied). It also causes
1776 1.1 dholland * all sorts of vfs panics. For non-recoverable write errors,
1777 1.1 dholland * also invalidate the attrcache, so we'll be forced to go over
1778 1.1 dholland * the wire for this object, returning an error to user on next
1779 1.1 dholland * call (most of the time).
1780 1.1 dholland */
1781 1.1 dholland if (error == EINTR || error == EIO || error == ETIMEDOUT
1782 1.1 dholland || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1783 1.1 dholland int s;
1784 1.1 dholland
1785 1.1 dholland s = splbio();
1786 1.1 dholland bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1787 1.1 dholland if ((bp->b_flags & B_PAGING) == 0) {
1788 1.1 dholland bdirty(bp);
1789 1.1 dholland bp->b_flags &= ~B_DONE;
1790 1.1 dholland }
1791 1.1 dholland if ((error == EINTR || error == ETIMEDOUT) &&
1792 1.1 dholland (bp->b_flags & B_ASYNC) == 0)
1793 1.1 dholland bp->b_flags |= B_EINTR;
1794 1.1 dholland splx(s);
1795 1.1 dholland } else {
1796 1.1 dholland if (error) {
1797 1.1 dholland bp->b_ioflags |= BIO_ERROR;
1798 1.1 dholland bp->b_flags |= B_INVAL;
1799 1.1 dholland bp->b_error = np->n_error = error;
1800 1.1 dholland mtx_lock(&np->n_mtx);
1801 1.1 dholland np->n_flag |= NWRITEERR;
1802 1.1 dholland np->n_attrstamp = 0;
1803 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1804 1.1 dholland mtx_unlock(&np->n_mtx);
1805 1.1 dholland }
1806 1.1 dholland bp->b_dirtyoff = bp->b_dirtyend = 0;
1807 1.1 dholland }
1808 1.1 dholland } else {
1809 1.1 dholland bp->b_resid = 0;
1810 1.1 dholland bufdone(bp);
1811 1.1 dholland return (0);
1812 1.1 dholland }
1813 1.1 dholland }
1814 1.1 dholland bp->b_resid = uiop->uio_resid;
1815 1.1 dholland if (must_commit)
1816 1.1 dholland ncl_clearcommit(vp->v_mount);
1817 1.1 dholland bufdone(bp);
1818 1.1 dholland return (error);
1819 1.1 dholland }
1820 1.1 dholland
1821 1.1 dholland /*
1822 1.1 dholland * Used to aid in handling ftruncate() operations on the NFS client side.
1823 1.1 dholland * Truncation creates a number of special problems for NFS. We have to
1824 1.1 dholland * throw away VM pages and buffer cache buffers that are beyond EOF, and
1825 1.1 dholland * we have to properly handle VM pages or (potentially dirty) buffers
1826 1.1 dholland * that straddle the truncation point.
1827 1.1 dholland */
1828 1.1 dholland
1829 1.1 dholland int
1830 1.1 dholland ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1831 1.1 dholland {
1832 1.1 dholland struct nfsnode *np = VTONFS(vp);
1833 1.1 dholland u_quad_t tsize;
1834 1.1 dholland int biosize = vp->v_bufobj.bo_bsize;
1835 1.1 dholland int error = 0;
1836 1.1 dholland
1837 1.1 dholland mtx_lock(&np->n_mtx);
1838 1.1 dholland tsize = np->n_size;
1839 1.1 dholland np->n_size = nsize;
1840 1.1 dholland mtx_unlock(&np->n_mtx);
1841 1.1 dholland
1842 1.1 dholland if (nsize < tsize) {
1843 1.1 dholland struct buf *bp;
1844 1.1 dholland daddr_t lbn;
1845 1.1 dholland int bufsize;
1846 1.1 dholland
1847 1.1 dholland /*
1848 1.1 dholland * vtruncbuf() doesn't get the buffer overlapping the
1849 1.1 dholland * truncation point. We may have a B_DELWRI and/or B_CACHE
1850 1.1 dholland * buffer that now needs to be truncated.
1851 1.1 dholland */
1852 1.1 dholland error = vtruncbuf(vp, cred, nsize, biosize);
1853 1.1 dholland lbn = nsize / biosize;
1854 1.1 dholland bufsize = nsize - (lbn * biosize);
1855 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1856 1.1 dholland if (!bp)
1857 1.1 dholland return EINTR;
1858 1.1 dholland if (bp->b_dirtyoff > bp->b_bcount)
1859 1.1 dholland bp->b_dirtyoff = bp->b_bcount;
1860 1.1 dholland if (bp->b_dirtyend > bp->b_bcount)
1861 1.1 dholland bp->b_dirtyend = bp->b_bcount;
1862 1.1 dholland bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1863 1.1 dholland brelse(bp);
1864 1.1 dholland } else {
1865 1.1 dholland vnode_pager_setsize(vp, nsize);
1866 1.1 dholland }
1867 1.1 dholland return(error);
1868 1.1 dholland }
1869 1.1 dholland
1870