nfs_clbio.c revision 1.1 1 1.1 dholland /* $NetBSD: nfs_clbio.c,v 1.1 2013/09/30 07:19:30 dholland Exp $ */
2 1.1 dholland /*-
3 1.1 dholland * Copyright (c) 1989, 1993
4 1.1 dholland * The Regents of the University of California. All rights reserved.
5 1.1 dholland *
6 1.1 dholland * This code is derived from software contributed to Berkeley by
7 1.1 dholland * Rick Macklem at The University of Guelph.
8 1.1 dholland *
9 1.1 dholland * Redistribution and use in source and binary forms, with or without
10 1.1 dholland * modification, are permitted provided that the following conditions
11 1.1 dholland * are met:
12 1.1 dholland * 1. Redistributions of source code must retain the above copyright
13 1.1 dholland * notice, this list of conditions and the following disclaimer.
14 1.1 dholland * 2. Redistributions in binary form must reproduce the above copyright
15 1.1 dholland * notice, this list of conditions and the following disclaimer in the
16 1.1 dholland * documentation and/or other materials provided with the distribution.
17 1.1 dholland * 4. Neither the name of the University nor the names of its contributors
18 1.1 dholland * may be used to endorse or promote products derived from this software
19 1.1 dholland * without specific prior written permission.
20 1.1 dholland *
21 1.1 dholland * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 1.1 dholland * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 1.1 dholland * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 1.1 dholland * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 1.1 dholland * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 1.1 dholland * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 1.1 dholland * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 1.1 dholland * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 1.1 dholland * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 1.1 dholland * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 1.1 dholland * SUCH DAMAGE.
32 1.1 dholland *
33 1.1 dholland * @(#)nfs_bio.c 8.9 (Berkeley) 3/30/95
34 1.1 dholland */
35 1.1 dholland
36 1.1 dholland #include <sys/cdefs.h>
37 1.1 dholland /* __FBSDID("FreeBSD: head/sys/fs/nfsclient/nfs_clbio.c 252072 2013-06-21 22:26:18Z rmacklem "); */
38 1.1 dholland __RCSID("$NetBSD: nfs_clbio.c,v 1.1 2013/09/30 07:19:30 dholland Exp $");
39 1.1 dholland
40 1.1 dholland #include "opt_kdtrace.h"
41 1.1 dholland
42 1.1 dholland #include <sys/param.h>
43 1.1 dholland #include <sys/systm.h>
44 1.1 dholland #include <sys/bio.h>
45 1.1 dholland #include <sys/buf.h>
46 1.1 dholland #include <sys/kernel.h>
47 1.1 dholland #include <sys/mount.h>
48 1.1 dholland #include <sys/rwlock.h>
49 1.1 dholland #include <sys/vmmeter.h>
50 1.1 dholland #include <sys/vnode.h>
51 1.1 dholland
52 1.1 dholland #include <vm/vm.h>
53 1.1 dholland #include <vm/vm_param.h>
54 1.1 dholland #include <vm/vm_extern.h>
55 1.1 dholland #include <vm/vm_page.h>
56 1.1 dholland #include <vm/vm_object.h>
57 1.1 dholland #include <vm/vm_pager.h>
58 1.1 dholland #include <vm/vnode_pager.h>
59 1.1 dholland
60 1.1 dholland #include <fs/nfs/nfsport.h>
61 1.1 dholland #include <fs/nfsclient/nfsmount.h>
62 1.1 dholland #include <fs/nfsclient/nfs.h>
63 1.1 dholland #include <fs/nfsclient/nfsnode.h>
64 1.1 dholland #include <fs/nfsclient/nfs_kdtrace.h>
65 1.1 dholland
66 1.1 dholland extern int newnfs_directio_allow_mmap;
67 1.1 dholland extern struct nfsstats newnfsstats;
68 1.1 dholland extern struct mtx ncl_iod_mutex;
69 1.1 dholland extern int ncl_numasync;
70 1.1 dholland extern enum nfsiod_state ncl_iodwant[NFS_MAXASYNCDAEMON];
71 1.1 dholland extern struct nfsmount *ncl_iodmount[NFS_MAXASYNCDAEMON];
72 1.1 dholland extern int newnfs_directio_enable;
73 1.1 dholland extern int nfs_keep_dirty_on_error;
74 1.1 dholland
75 1.1 dholland int ncl_pbuf_freecnt = -1; /* start out unlimited */
76 1.1 dholland
77 1.1 dholland static struct buf *nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size,
78 1.1 dholland struct thread *td);
79 1.1 dholland static int nfs_directio_write(struct vnode *vp, struct uio *uiop,
80 1.1 dholland struct ucred *cred, int ioflag);
81 1.1 dholland
82 1.1 dholland /*
83 1.1 dholland * Vnode op for VM getpages.
84 1.1 dholland */
85 1.1 dholland int
86 1.1 dholland ncl_getpages(struct vop_getpages_args *ap)
87 1.1 dholland {
88 1.1 dholland int i, error, nextoff, size, toff, count, npages;
89 1.1 dholland struct uio uio;
90 1.1 dholland struct iovec iov;
91 1.1 dholland vm_offset_t kva;
92 1.1 dholland struct buf *bp;
93 1.1 dholland struct vnode *vp;
94 1.1 dholland struct thread *td;
95 1.1 dholland struct ucred *cred;
96 1.1 dholland struct nfsmount *nmp;
97 1.1 dholland vm_object_t object;
98 1.1 dholland vm_page_t *pages;
99 1.1 dholland struct nfsnode *np;
100 1.1 dholland
101 1.1 dholland vp = ap->a_vp;
102 1.1 dholland np = VTONFS(vp);
103 1.1 dholland td = curthread; /* XXX */
104 1.1 dholland cred = curthread->td_ucred; /* XXX */
105 1.1 dholland nmp = VFSTONFS(vp->v_mount);
106 1.1 dholland pages = ap->a_m;
107 1.1 dholland count = ap->a_count;
108 1.1 dholland
109 1.1 dholland if ((object = vp->v_object) == NULL) {
110 1.1 dholland ncl_printf("nfs_getpages: called with non-merged cache vnode??\n");
111 1.1 dholland return (VM_PAGER_ERROR);
112 1.1 dholland }
113 1.1 dholland
114 1.1 dholland if (newnfs_directio_enable && !newnfs_directio_allow_mmap) {
115 1.1 dholland mtx_lock(&np->n_mtx);
116 1.1 dholland if ((np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
117 1.1 dholland mtx_unlock(&np->n_mtx);
118 1.1 dholland ncl_printf("nfs_getpages: called on non-cacheable vnode??\n");
119 1.1 dholland return (VM_PAGER_ERROR);
120 1.1 dholland } else
121 1.1 dholland mtx_unlock(&np->n_mtx);
122 1.1 dholland }
123 1.1 dholland
124 1.1 dholland mtx_lock(&nmp->nm_mtx);
125 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
126 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
127 1.1 dholland mtx_unlock(&nmp->nm_mtx);
128 1.1 dholland /* We'll never get here for v4, because we always have fsinfo */
129 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
130 1.1 dholland } else
131 1.1 dholland mtx_unlock(&nmp->nm_mtx);
132 1.1 dholland
133 1.1 dholland npages = btoc(count);
134 1.1 dholland
135 1.1 dholland /*
136 1.1 dholland * If the requested page is partially valid, just return it and
137 1.1 dholland * allow the pager to zero-out the blanks. Partially valid pages
138 1.1 dholland * can only occur at the file EOF.
139 1.1 dholland */
140 1.1 dholland VM_OBJECT_WLOCK(object);
141 1.1 dholland if (pages[ap->a_reqpage]->valid != 0) {
142 1.1 dholland for (i = 0; i < npages; ++i) {
143 1.1 dholland if (i != ap->a_reqpage) {
144 1.1 dholland vm_page_lock(pages[i]);
145 1.1 dholland vm_page_free(pages[i]);
146 1.1 dholland vm_page_unlock(pages[i]);
147 1.1 dholland }
148 1.1 dholland }
149 1.1 dholland VM_OBJECT_WUNLOCK(object);
150 1.1 dholland return (0);
151 1.1 dholland }
152 1.1 dholland VM_OBJECT_WUNLOCK(object);
153 1.1 dholland
154 1.1 dholland /*
155 1.1 dholland * We use only the kva address for the buffer, but this is extremely
156 1.1 dholland * convienient and fast.
157 1.1 dholland */
158 1.1 dholland bp = getpbuf(&ncl_pbuf_freecnt);
159 1.1 dholland
160 1.1 dholland kva = (vm_offset_t) bp->b_data;
161 1.1 dholland pmap_qenter(kva, pages, npages);
162 1.1 dholland PCPU_INC(cnt.v_vnodein);
163 1.1 dholland PCPU_ADD(cnt.v_vnodepgsin, npages);
164 1.1 dholland
165 1.1 dholland iov.iov_base = (caddr_t) kva;
166 1.1 dholland iov.iov_len = count;
167 1.1 dholland uio.uio_iov = &iov;
168 1.1 dholland uio.uio_iovcnt = 1;
169 1.1 dholland uio.uio_offset = IDX_TO_OFF(pages[0]->pindex);
170 1.1 dholland uio.uio_resid = count;
171 1.1 dholland uio.uio_segflg = UIO_SYSSPACE;
172 1.1 dholland uio.uio_rw = UIO_READ;
173 1.1 dholland uio.uio_td = td;
174 1.1 dholland
175 1.1 dholland error = ncl_readrpc(vp, &uio, cred);
176 1.1 dholland pmap_qremove(kva, npages);
177 1.1 dholland
178 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
179 1.1 dholland
180 1.1 dholland if (error && (uio.uio_resid == count)) {
181 1.1 dholland ncl_printf("nfs_getpages: error %d\n", error);
182 1.1 dholland VM_OBJECT_WLOCK(object);
183 1.1 dholland for (i = 0; i < npages; ++i) {
184 1.1 dholland if (i != ap->a_reqpage) {
185 1.1 dholland vm_page_lock(pages[i]);
186 1.1 dholland vm_page_free(pages[i]);
187 1.1 dholland vm_page_unlock(pages[i]);
188 1.1 dholland }
189 1.1 dholland }
190 1.1 dholland VM_OBJECT_WUNLOCK(object);
191 1.1 dholland return (VM_PAGER_ERROR);
192 1.1 dholland }
193 1.1 dholland
194 1.1 dholland /*
195 1.1 dholland * Calculate the number of bytes read and validate only that number
196 1.1 dholland * of bytes. Note that due to pending writes, size may be 0. This
197 1.1 dholland * does not mean that the remaining data is invalid!
198 1.1 dholland */
199 1.1 dholland
200 1.1 dholland size = count - uio.uio_resid;
201 1.1 dholland VM_OBJECT_WLOCK(object);
202 1.1 dholland for (i = 0, toff = 0; i < npages; i++, toff = nextoff) {
203 1.1 dholland vm_page_t m;
204 1.1 dholland nextoff = toff + PAGE_SIZE;
205 1.1 dholland m = pages[i];
206 1.1 dholland
207 1.1 dholland if (nextoff <= size) {
208 1.1 dholland /*
209 1.1 dholland * Read operation filled an entire page
210 1.1 dholland */
211 1.1 dholland m->valid = VM_PAGE_BITS_ALL;
212 1.1 dholland KASSERT(m->dirty == 0,
213 1.1 dholland ("nfs_getpages: page %p is dirty", m));
214 1.1 dholland } else if (size > toff) {
215 1.1 dholland /*
216 1.1 dholland * Read operation filled a partial page.
217 1.1 dholland */
218 1.1 dholland m->valid = 0;
219 1.1 dholland vm_page_set_valid_range(m, 0, size - toff);
220 1.1 dholland KASSERT(m->dirty == 0,
221 1.1 dholland ("nfs_getpages: page %p is dirty", m));
222 1.1 dholland } else {
223 1.1 dholland /*
224 1.1 dholland * Read operation was short. If no error
225 1.1 dholland * occured we may have hit a zero-fill
226 1.1 dholland * section. We leave valid set to 0, and page
227 1.1 dholland * is freed by vm_page_readahead_finish() if
228 1.1 dholland * its index is not equal to requested, or
229 1.1 dholland * page is zeroed and set valid by
230 1.1 dholland * vm_pager_get_pages() for requested page.
231 1.1 dholland */
232 1.1 dholland ;
233 1.1 dholland }
234 1.1 dholland if (i != ap->a_reqpage)
235 1.1 dholland vm_page_readahead_finish(m);
236 1.1 dholland }
237 1.1 dholland VM_OBJECT_WUNLOCK(object);
238 1.1 dholland return (0);
239 1.1 dholland }
240 1.1 dholland
241 1.1 dholland /*
242 1.1 dholland * Vnode op for VM putpages.
243 1.1 dholland */
244 1.1 dholland int
245 1.1 dholland ncl_putpages(struct vop_putpages_args *ap)
246 1.1 dholland {
247 1.1 dholland struct uio uio;
248 1.1 dholland struct iovec iov;
249 1.1 dholland vm_offset_t kva;
250 1.1 dholland struct buf *bp;
251 1.1 dholland int iomode, must_commit, i, error, npages, count;
252 1.1 dholland off_t offset;
253 1.1 dholland int *rtvals;
254 1.1 dholland struct vnode *vp;
255 1.1 dholland struct thread *td;
256 1.1 dholland struct ucred *cred;
257 1.1 dholland struct nfsmount *nmp;
258 1.1 dholland struct nfsnode *np;
259 1.1 dholland vm_page_t *pages;
260 1.1 dholland
261 1.1 dholland vp = ap->a_vp;
262 1.1 dholland np = VTONFS(vp);
263 1.1 dholland td = curthread; /* XXX */
264 1.1 dholland /* Set the cred to n_writecred for the write rpcs. */
265 1.1 dholland if (np->n_writecred != NULL)
266 1.1 dholland cred = crhold(np->n_writecred);
267 1.1 dholland else
268 1.1 dholland cred = crhold(curthread->td_ucred); /* XXX */
269 1.1 dholland nmp = VFSTONFS(vp->v_mount);
270 1.1 dholland pages = ap->a_m;
271 1.1 dholland count = ap->a_count;
272 1.1 dholland rtvals = ap->a_rtvals;
273 1.1 dholland npages = btoc(count);
274 1.1 dholland offset = IDX_TO_OFF(pages[0]->pindex);
275 1.1 dholland
276 1.1 dholland mtx_lock(&nmp->nm_mtx);
277 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
278 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
279 1.1 dholland mtx_unlock(&nmp->nm_mtx);
280 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
281 1.1 dholland } else
282 1.1 dholland mtx_unlock(&nmp->nm_mtx);
283 1.1 dholland
284 1.1 dholland mtx_lock(&np->n_mtx);
285 1.1 dholland if (newnfs_directio_enable && !newnfs_directio_allow_mmap &&
286 1.1 dholland (np->n_flag & NNONCACHE) && (vp->v_type == VREG)) {
287 1.1 dholland mtx_unlock(&np->n_mtx);
288 1.1 dholland ncl_printf("ncl_putpages: called on noncache-able vnode??\n");
289 1.1 dholland mtx_lock(&np->n_mtx);
290 1.1 dholland }
291 1.1 dholland
292 1.1 dholland for (i = 0; i < npages; i++)
293 1.1 dholland rtvals[i] = VM_PAGER_ERROR;
294 1.1 dholland
295 1.1 dholland /*
296 1.1 dholland * When putting pages, do not extend file past EOF.
297 1.1 dholland */
298 1.1 dholland if (offset + count > np->n_size) {
299 1.1 dholland count = np->n_size - offset;
300 1.1 dholland if (count < 0)
301 1.1 dholland count = 0;
302 1.1 dholland }
303 1.1 dholland mtx_unlock(&np->n_mtx);
304 1.1 dholland
305 1.1 dholland /*
306 1.1 dholland * We use only the kva address for the buffer, but this is extremely
307 1.1 dholland * convienient and fast.
308 1.1 dholland */
309 1.1 dholland bp = getpbuf(&ncl_pbuf_freecnt);
310 1.1 dholland
311 1.1 dholland kva = (vm_offset_t) bp->b_data;
312 1.1 dholland pmap_qenter(kva, pages, npages);
313 1.1 dholland PCPU_INC(cnt.v_vnodeout);
314 1.1 dholland PCPU_ADD(cnt.v_vnodepgsout, count);
315 1.1 dholland
316 1.1 dholland iov.iov_base = (caddr_t) kva;
317 1.1 dholland iov.iov_len = count;
318 1.1 dholland uio.uio_iov = &iov;
319 1.1 dholland uio.uio_iovcnt = 1;
320 1.1 dholland uio.uio_offset = offset;
321 1.1 dholland uio.uio_resid = count;
322 1.1 dholland uio.uio_segflg = UIO_SYSSPACE;
323 1.1 dholland uio.uio_rw = UIO_WRITE;
324 1.1 dholland uio.uio_td = td;
325 1.1 dholland
326 1.1 dholland if ((ap->a_sync & VM_PAGER_PUT_SYNC) == 0)
327 1.1 dholland iomode = NFSWRITE_UNSTABLE;
328 1.1 dholland else
329 1.1 dholland iomode = NFSWRITE_FILESYNC;
330 1.1 dholland
331 1.1 dholland error = ncl_writerpc(vp, &uio, cred, &iomode, &must_commit, 0);
332 1.1 dholland crfree(cred);
333 1.1 dholland
334 1.1 dholland pmap_qremove(kva, npages);
335 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
336 1.1 dholland
337 1.1 dholland if (error == 0 || !nfs_keep_dirty_on_error) {
338 1.1 dholland vnode_pager_undirty_pages(pages, rtvals, count - uio.uio_resid);
339 1.1 dholland if (must_commit)
340 1.1 dholland ncl_clearcommit(vp->v_mount);
341 1.1 dholland }
342 1.1 dholland return rtvals[0];
343 1.1 dholland }
344 1.1 dholland
345 1.1 dholland /*
346 1.1 dholland * For nfs, cache consistency can only be maintained approximately.
347 1.1 dholland * Although RFC1094 does not specify the criteria, the following is
348 1.1 dholland * believed to be compatible with the reference port.
349 1.1 dholland * For nfs:
350 1.1 dholland * If the file's modify time on the server has changed since the
351 1.1 dholland * last read rpc or you have written to the file,
352 1.1 dholland * you may have lost data cache consistency with the
353 1.1 dholland * server, so flush all of the file's data out of the cache.
354 1.1 dholland * Then force a getattr rpc to ensure that you have up to date
355 1.1 dholland * attributes.
356 1.1 dholland * NB: This implies that cache data can be read when up to
357 1.1 dholland * NFS_ATTRTIMEO seconds out of date. If you find that you need current
358 1.1 dholland * attributes this could be forced by setting n_attrstamp to 0 before
359 1.1 dholland * the VOP_GETATTR() call.
360 1.1 dholland */
361 1.1 dholland static inline int
362 1.1 dholland nfs_bioread_check_cons(struct vnode *vp, struct thread *td, struct ucred *cred)
363 1.1 dholland {
364 1.1 dholland int error = 0;
365 1.1 dholland struct vattr vattr;
366 1.1 dholland struct nfsnode *np = VTONFS(vp);
367 1.1 dholland int old_lock;
368 1.1 dholland
369 1.1 dholland /*
370 1.1 dholland * Grab the exclusive lock before checking whether the cache is
371 1.1 dholland * consistent.
372 1.1 dholland * XXX - We can make this cheaper later (by acquiring cheaper locks).
373 1.1 dholland * But for now, this suffices.
374 1.1 dholland */
375 1.1 dholland old_lock = ncl_upgrade_vnlock(vp);
376 1.1 dholland if (vp->v_iflag & VI_DOOMED) {
377 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
378 1.1 dholland return (EBADF);
379 1.1 dholland }
380 1.1 dholland
381 1.1 dholland mtx_lock(&np->n_mtx);
382 1.1 dholland if (np->n_flag & NMODIFIED) {
383 1.1 dholland mtx_unlock(&np->n_mtx);
384 1.1 dholland if (vp->v_type != VREG) {
385 1.1 dholland if (vp->v_type != VDIR)
386 1.1 dholland panic("nfs: bioread, not dir");
387 1.1 dholland ncl_invaldir(vp);
388 1.1 dholland error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
389 1.1 dholland if (error)
390 1.1 dholland goto out;
391 1.1 dholland }
392 1.1 dholland np->n_attrstamp = 0;
393 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
394 1.1 dholland error = VOP_GETATTR(vp, &vattr, cred);
395 1.1 dholland if (error)
396 1.1 dholland goto out;
397 1.1 dholland mtx_lock(&np->n_mtx);
398 1.1 dholland np->n_mtime = vattr.va_mtime;
399 1.1 dholland mtx_unlock(&np->n_mtx);
400 1.1 dholland } else {
401 1.1 dholland mtx_unlock(&np->n_mtx);
402 1.1 dholland error = VOP_GETATTR(vp, &vattr, cred);
403 1.1 dholland if (error)
404 1.1 dholland return (error);
405 1.1 dholland mtx_lock(&np->n_mtx);
406 1.1 dholland if ((np->n_flag & NSIZECHANGED)
407 1.1 dholland || (NFS_TIMESPEC_COMPARE(&np->n_mtime, &vattr.va_mtime))) {
408 1.1 dholland mtx_unlock(&np->n_mtx);
409 1.1 dholland if (vp->v_type == VDIR)
410 1.1 dholland ncl_invaldir(vp);
411 1.1 dholland error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
412 1.1 dholland if (error)
413 1.1 dholland goto out;
414 1.1 dholland mtx_lock(&np->n_mtx);
415 1.1 dholland np->n_mtime = vattr.va_mtime;
416 1.1 dholland np->n_flag &= ~NSIZECHANGED;
417 1.1 dholland }
418 1.1 dholland mtx_unlock(&np->n_mtx);
419 1.1 dholland }
420 1.1 dholland out:
421 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
422 1.1 dholland return error;
423 1.1 dholland }
424 1.1 dholland
425 1.1 dholland /*
426 1.1 dholland * Vnode op for read using bio
427 1.1 dholland */
428 1.1 dholland int
429 1.1 dholland ncl_bioread(struct vnode *vp, struct uio *uio, int ioflag, struct ucred *cred)
430 1.1 dholland {
431 1.1 dholland struct nfsnode *np = VTONFS(vp);
432 1.1 dholland int biosize, i;
433 1.1 dholland struct buf *bp, *rabp;
434 1.1 dholland struct thread *td;
435 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
436 1.1 dholland daddr_t lbn, rabn;
437 1.1 dholland int bcount;
438 1.1 dholland int seqcount;
439 1.1 dholland int nra, error = 0, n = 0, on = 0;
440 1.1 dholland off_t tmp_off;
441 1.1 dholland
442 1.1 dholland KASSERT(uio->uio_rw == UIO_READ, ("ncl_read mode"));
443 1.1 dholland if (uio->uio_resid == 0)
444 1.1 dholland return (0);
445 1.1 dholland if (uio->uio_offset < 0) /* XXX VDIR cookies can be negative */
446 1.1 dholland return (EINVAL);
447 1.1 dholland td = uio->uio_td;
448 1.1 dholland
449 1.1 dholland mtx_lock(&nmp->nm_mtx);
450 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
451 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
452 1.1 dholland mtx_unlock(&nmp->nm_mtx);
453 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
454 1.1 dholland mtx_lock(&nmp->nm_mtx);
455 1.1 dholland }
456 1.1 dholland if (nmp->nm_rsize == 0 || nmp->nm_readdirsize == 0)
457 1.1 dholland (void) newnfs_iosize(nmp);
458 1.1 dholland
459 1.1 dholland tmp_off = uio->uio_offset + uio->uio_resid;
460 1.1 dholland if (vp->v_type != VDIR &&
461 1.1 dholland (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)) {
462 1.1 dholland mtx_unlock(&nmp->nm_mtx);
463 1.1 dholland return (EFBIG);
464 1.1 dholland }
465 1.1 dholland mtx_unlock(&nmp->nm_mtx);
466 1.1 dholland
467 1.1 dholland if (newnfs_directio_enable && (ioflag & IO_DIRECT) && (vp->v_type == VREG))
468 1.1 dholland /* No caching/ no readaheads. Just read data into the user buffer */
469 1.1 dholland return ncl_readrpc(vp, uio, cred);
470 1.1 dholland
471 1.1 dholland biosize = vp->v_bufobj.bo_bsize;
472 1.1 dholland seqcount = (int)((off_t)(ioflag >> IO_SEQSHIFT) * biosize / BKVASIZE);
473 1.1 dholland
474 1.1 dholland error = nfs_bioread_check_cons(vp, td, cred);
475 1.1 dholland if (error)
476 1.1 dholland return error;
477 1.1 dholland
478 1.1 dholland do {
479 1.1 dholland u_quad_t nsize;
480 1.1 dholland
481 1.1 dholland mtx_lock(&np->n_mtx);
482 1.1 dholland nsize = np->n_size;
483 1.1 dholland mtx_unlock(&np->n_mtx);
484 1.1 dholland
485 1.1 dholland switch (vp->v_type) {
486 1.1 dholland case VREG:
487 1.1 dholland NFSINCRGLOBAL(newnfsstats.biocache_reads);
488 1.1 dholland lbn = uio->uio_offset / biosize;
489 1.1 dholland on = uio->uio_offset - (lbn * biosize);
490 1.1 dholland
491 1.1 dholland /*
492 1.1 dholland * Start the read ahead(s), as required.
493 1.1 dholland */
494 1.1 dholland if (nmp->nm_readahead > 0) {
495 1.1 dholland for (nra = 0; nra < nmp->nm_readahead && nra < seqcount &&
496 1.1 dholland (off_t)(lbn + 1 + nra) * biosize < nsize; nra++) {
497 1.1 dholland rabn = lbn + 1 + nra;
498 1.1 dholland if (incore(&vp->v_bufobj, rabn) == NULL) {
499 1.1 dholland rabp = nfs_getcacheblk(vp, rabn, biosize, td);
500 1.1 dholland if (!rabp) {
501 1.1 dholland error = newnfs_sigintr(nmp, td);
502 1.1 dholland return (error ? error : EINTR);
503 1.1 dholland }
504 1.1 dholland if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
505 1.1 dholland rabp->b_flags |= B_ASYNC;
506 1.1 dholland rabp->b_iocmd = BIO_READ;
507 1.1 dholland vfs_busy_pages(rabp, 0);
508 1.1 dholland if (ncl_asyncio(nmp, rabp, cred, td)) {
509 1.1 dholland rabp->b_flags |= B_INVAL;
510 1.1 dholland rabp->b_ioflags |= BIO_ERROR;
511 1.1 dholland vfs_unbusy_pages(rabp);
512 1.1 dholland brelse(rabp);
513 1.1 dholland break;
514 1.1 dholland }
515 1.1 dholland } else {
516 1.1 dholland brelse(rabp);
517 1.1 dholland }
518 1.1 dholland }
519 1.1 dholland }
520 1.1 dholland }
521 1.1 dholland
522 1.1 dholland /* Note that bcount is *not* DEV_BSIZE aligned. */
523 1.1 dholland bcount = biosize;
524 1.1 dholland if ((off_t)lbn * biosize >= nsize) {
525 1.1 dholland bcount = 0;
526 1.1 dholland } else if ((off_t)(lbn + 1) * biosize > nsize) {
527 1.1 dholland bcount = nsize - (off_t)lbn * biosize;
528 1.1 dholland }
529 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bcount, td);
530 1.1 dholland
531 1.1 dholland if (!bp) {
532 1.1 dholland error = newnfs_sigintr(nmp, td);
533 1.1 dholland return (error ? error : EINTR);
534 1.1 dholland }
535 1.1 dholland
536 1.1 dholland /*
537 1.1 dholland * If B_CACHE is not set, we must issue the read. If this
538 1.1 dholland * fails, we return an error.
539 1.1 dholland */
540 1.1 dholland
541 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
542 1.1 dholland bp->b_iocmd = BIO_READ;
543 1.1 dholland vfs_busy_pages(bp, 0);
544 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
545 1.1 dholland if (error) {
546 1.1 dholland brelse(bp);
547 1.1 dholland return (error);
548 1.1 dholland }
549 1.1 dholland }
550 1.1 dholland
551 1.1 dholland /*
552 1.1 dholland * on is the offset into the current bp. Figure out how many
553 1.1 dholland * bytes we can copy out of the bp. Note that bcount is
554 1.1 dholland * NOT DEV_BSIZE aligned.
555 1.1 dholland *
556 1.1 dholland * Then figure out how many bytes we can copy into the uio.
557 1.1 dholland */
558 1.1 dholland
559 1.1 dholland n = 0;
560 1.1 dholland if (on < bcount)
561 1.1 dholland n = MIN((unsigned)(bcount - on), uio->uio_resid);
562 1.1 dholland break;
563 1.1 dholland case VLNK:
564 1.1 dholland NFSINCRGLOBAL(newnfsstats.biocache_readlinks);
565 1.1 dholland bp = nfs_getcacheblk(vp, (daddr_t)0, NFS_MAXPATHLEN, td);
566 1.1 dholland if (!bp) {
567 1.1 dholland error = newnfs_sigintr(nmp, td);
568 1.1 dholland return (error ? error : EINTR);
569 1.1 dholland }
570 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
571 1.1 dholland bp->b_iocmd = BIO_READ;
572 1.1 dholland vfs_busy_pages(bp, 0);
573 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
574 1.1 dholland if (error) {
575 1.1 dholland bp->b_ioflags |= BIO_ERROR;
576 1.1 dholland brelse(bp);
577 1.1 dholland return (error);
578 1.1 dholland }
579 1.1 dholland }
580 1.1 dholland n = MIN(uio->uio_resid, NFS_MAXPATHLEN - bp->b_resid);
581 1.1 dholland on = 0;
582 1.1 dholland break;
583 1.1 dholland case VDIR:
584 1.1 dholland NFSINCRGLOBAL(newnfsstats.biocache_readdirs);
585 1.1 dholland if (np->n_direofoffset
586 1.1 dholland && uio->uio_offset >= np->n_direofoffset) {
587 1.1 dholland return (0);
588 1.1 dholland }
589 1.1 dholland lbn = (uoff_t)uio->uio_offset / NFS_DIRBLKSIZ;
590 1.1 dholland on = uio->uio_offset & (NFS_DIRBLKSIZ - 1);
591 1.1 dholland bp = nfs_getcacheblk(vp, lbn, NFS_DIRBLKSIZ, td);
592 1.1 dholland if (!bp) {
593 1.1 dholland error = newnfs_sigintr(nmp, td);
594 1.1 dholland return (error ? error : EINTR);
595 1.1 dholland }
596 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
597 1.1 dholland bp->b_iocmd = BIO_READ;
598 1.1 dholland vfs_busy_pages(bp, 0);
599 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
600 1.1 dholland if (error) {
601 1.1 dholland brelse(bp);
602 1.1 dholland }
603 1.1 dholland while (error == NFSERR_BAD_COOKIE) {
604 1.1 dholland ncl_invaldir(vp);
605 1.1 dholland error = ncl_vinvalbuf(vp, 0, td, 1);
606 1.1 dholland /*
607 1.1 dholland * Yuck! The directory has been modified on the
608 1.1 dholland * server. The only way to get the block is by
609 1.1 dholland * reading from the beginning to get all the
610 1.1 dholland * offset cookies.
611 1.1 dholland *
612 1.1 dholland * Leave the last bp intact unless there is an error.
613 1.1 dholland * Loop back up to the while if the error is another
614 1.1 dholland * NFSERR_BAD_COOKIE (double yuch!).
615 1.1 dholland */
616 1.1 dholland for (i = 0; i <= lbn && !error; i++) {
617 1.1 dholland if (np->n_direofoffset
618 1.1 dholland && (i * NFS_DIRBLKSIZ) >= np->n_direofoffset)
619 1.1 dholland return (0);
620 1.1 dholland bp = nfs_getcacheblk(vp, i, NFS_DIRBLKSIZ, td);
621 1.1 dholland if (!bp) {
622 1.1 dholland error = newnfs_sigintr(nmp, td);
623 1.1 dholland return (error ? error : EINTR);
624 1.1 dholland }
625 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
626 1.1 dholland bp->b_iocmd = BIO_READ;
627 1.1 dholland vfs_busy_pages(bp, 0);
628 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
629 1.1 dholland /*
630 1.1 dholland * no error + B_INVAL == directory EOF,
631 1.1 dholland * use the block.
632 1.1 dholland */
633 1.1 dholland if (error == 0 && (bp->b_flags & B_INVAL))
634 1.1 dholland break;
635 1.1 dholland }
636 1.1 dholland /*
637 1.1 dholland * An error will throw away the block and the
638 1.1 dholland * for loop will break out. If no error and this
639 1.1 dholland * is not the block we want, we throw away the
640 1.1 dholland * block and go for the next one via the for loop.
641 1.1 dholland */
642 1.1 dholland if (error || i < lbn)
643 1.1 dholland brelse(bp);
644 1.1 dholland }
645 1.1 dholland }
646 1.1 dholland /*
647 1.1 dholland * The above while is repeated if we hit another cookie
648 1.1 dholland * error. If we hit an error and it wasn't a cookie error,
649 1.1 dholland * we give up.
650 1.1 dholland */
651 1.1 dholland if (error)
652 1.1 dholland return (error);
653 1.1 dholland }
654 1.1 dholland
655 1.1 dholland /*
656 1.1 dholland * If not eof and read aheads are enabled, start one.
657 1.1 dholland * (You need the current block first, so that you have the
658 1.1 dholland * directory offset cookie of the next block.)
659 1.1 dholland */
660 1.1 dholland if (nmp->nm_readahead > 0 &&
661 1.1 dholland (bp->b_flags & B_INVAL) == 0 &&
662 1.1 dholland (np->n_direofoffset == 0 ||
663 1.1 dholland (lbn + 1) * NFS_DIRBLKSIZ < np->n_direofoffset) &&
664 1.1 dholland incore(&vp->v_bufobj, lbn + 1) == NULL) {
665 1.1 dholland rabp = nfs_getcacheblk(vp, lbn + 1, NFS_DIRBLKSIZ, td);
666 1.1 dholland if (rabp) {
667 1.1 dholland if ((rabp->b_flags & (B_CACHE|B_DELWRI)) == 0) {
668 1.1 dholland rabp->b_flags |= B_ASYNC;
669 1.1 dholland rabp->b_iocmd = BIO_READ;
670 1.1 dholland vfs_busy_pages(rabp, 0);
671 1.1 dholland if (ncl_asyncio(nmp, rabp, cred, td)) {
672 1.1 dholland rabp->b_flags |= B_INVAL;
673 1.1 dholland rabp->b_ioflags |= BIO_ERROR;
674 1.1 dholland vfs_unbusy_pages(rabp);
675 1.1 dholland brelse(rabp);
676 1.1 dholland }
677 1.1 dholland } else {
678 1.1 dholland brelse(rabp);
679 1.1 dholland }
680 1.1 dholland }
681 1.1 dholland }
682 1.1 dholland /*
683 1.1 dholland * Unlike VREG files, whos buffer size ( bp->b_bcount ) is
684 1.1 dholland * chopped for the EOF condition, we cannot tell how large
685 1.1 dholland * NFS directories are going to be until we hit EOF. So
686 1.1 dholland * an NFS directory buffer is *not* chopped to its EOF. Now,
687 1.1 dholland * it just so happens that b_resid will effectively chop it
688 1.1 dholland * to EOF. *BUT* this information is lost if the buffer goes
689 1.1 dholland * away and is reconstituted into a B_CACHE state ( due to
690 1.1 dholland * being VMIO ) later. So we keep track of the directory eof
691 1.1 dholland * in np->n_direofoffset and chop it off as an extra step
692 1.1 dholland * right here.
693 1.1 dholland */
694 1.1 dholland n = lmin(uio->uio_resid, NFS_DIRBLKSIZ - bp->b_resid - on);
695 1.1 dholland if (np->n_direofoffset && n > np->n_direofoffset - uio->uio_offset)
696 1.1 dholland n = np->n_direofoffset - uio->uio_offset;
697 1.1 dholland break;
698 1.1 dholland default:
699 1.1 dholland ncl_printf(" ncl_bioread: type %x unexpected\n", vp->v_type);
700 1.1 dholland bp = NULL;
701 1.1 dholland break;
702 1.1 dholland };
703 1.1 dholland
704 1.1 dholland if (n > 0) {
705 1.1 dholland error = vn_io_fault_uiomove(bp->b_data + on, (int)n, uio);
706 1.1 dholland }
707 1.1 dholland if (vp->v_type == VLNK)
708 1.1 dholland n = 0;
709 1.1 dholland if (bp != NULL)
710 1.1 dholland brelse(bp);
711 1.1 dholland } while (error == 0 && uio->uio_resid > 0 && n > 0);
712 1.1 dholland return (error);
713 1.1 dholland }
714 1.1 dholland
715 1.1 dholland /*
716 1.1 dholland * The NFS write path cannot handle iovecs with len > 1. So we need to
717 1.1 dholland * break up iovecs accordingly (restricting them to wsize).
718 1.1 dholland * For the SYNC case, we can do this with 1 copy (user buffer -> mbuf).
719 1.1 dholland * For the ASYNC case, 2 copies are needed. The first a copy from the
720 1.1 dholland * user buffer to a staging buffer and then a second copy from the staging
721 1.1 dholland * buffer to mbufs. This can be optimized by copying from the user buffer
722 1.1 dholland * directly into mbufs and passing the chain down, but that requires a
723 1.1 dholland * fair amount of re-working of the relevant codepaths (and can be done
724 1.1 dholland * later).
725 1.1 dholland */
726 1.1 dholland static int
727 1.1 dholland nfs_directio_write(vp, uiop, cred, ioflag)
728 1.1 dholland struct vnode *vp;
729 1.1 dholland struct uio *uiop;
730 1.1 dholland struct ucred *cred;
731 1.1 dholland int ioflag;
732 1.1 dholland {
733 1.1 dholland int error;
734 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
735 1.1 dholland struct thread *td = uiop->uio_td;
736 1.1 dholland int size;
737 1.1 dholland int wsize;
738 1.1 dholland
739 1.1 dholland mtx_lock(&nmp->nm_mtx);
740 1.1 dholland wsize = nmp->nm_wsize;
741 1.1 dholland mtx_unlock(&nmp->nm_mtx);
742 1.1 dholland if (ioflag & IO_SYNC) {
743 1.1 dholland int iomode, must_commit;
744 1.1 dholland struct uio uio;
745 1.1 dholland struct iovec iov;
746 1.1 dholland do_sync:
747 1.1 dholland while (uiop->uio_resid > 0) {
748 1.1 dholland size = MIN(uiop->uio_resid, wsize);
749 1.1 dholland size = MIN(uiop->uio_iov->iov_len, size);
750 1.1 dholland iov.iov_base = uiop->uio_iov->iov_base;
751 1.1 dholland iov.iov_len = size;
752 1.1 dholland uio.uio_iov = &iov;
753 1.1 dholland uio.uio_iovcnt = 1;
754 1.1 dholland uio.uio_offset = uiop->uio_offset;
755 1.1 dholland uio.uio_resid = size;
756 1.1 dholland uio.uio_segflg = UIO_USERSPACE;
757 1.1 dholland uio.uio_rw = UIO_WRITE;
758 1.1 dholland uio.uio_td = td;
759 1.1 dholland iomode = NFSWRITE_FILESYNC;
760 1.1 dholland error = ncl_writerpc(vp, &uio, cred, &iomode,
761 1.1 dholland &must_commit, 0);
762 1.1 dholland KASSERT((must_commit == 0),
763 1.1 dholland ("ncl_directio_write: Did not commit write"));
764 1.1 dholland if (error)
765 1.1 dholland return (error);
766 1.1 dholland uiop->uio_offset += size;
767 1.1 dholland uiop->uio_resid -= size;
768 1.1 dholland if (uiop->uio_iov->iov_len <= size) {
769 1.1 dholland uiop->uio_iovcnt--;
770 1.1 dholland uiop->uio_iov++;
771 1.1 dholland } else {
772 1.1 dholland uiop->uio_iov->iov_base =
773 1.1 dholland (char *)uiop->uio_iov->iov_base + size;
774 1.1 dholland uiop->uio_iov->iov_len -= size;
775 1.1 dholland }
776 1.1 dholland }
777 1.1 dholland } else {
778 1.1 dholland struct uio *t_uio;
779 1.1 dholland struct iovec *t_iov;
780 1.1 dholland struct buf *bp;
781 1.1 dholland
782 1.1 dholland /*
783 1.1 dholland * Break up the write into blocksize chunks and hand these
784 1.1 dholland * over to nfsiod's for write back.
785 1.1 dholland * Unfortunately, this incurs a copy of the data. Since
786 1.1 dholland * the user could modify the buffer before the write is
787 1.1 dholland * initiated.
788 1.1 dholland *
789 1.1 dholland * The obvious optimization here is that one of the 2 copies
790 1.1 dholland * in the async write path can be eliminated by copying the
791 1.1 dholland * data here directly into mbufs and passing the mbuf chain
792 1.1 dholland * down. But that will require a fair amount of re-working
793 1.1 dholland * of the code and can be done if there's enough interest
794 1.1 dholland * in NFS directio access.
795 1.1 dholland */
796 1.1 dholland while (uiop->uio_resid > 0) {
797 1.1 dholland size = MIN(uiop->uio_resid, wsize);
798 1.1 dholland size = MIN(uiop->uio_iov->iov_len, size);
799 1.1 dholland bp = getpbuf(&ncl_pbuf_freecnt);
800 1.1 dholland t_uio = malloc(sizeof(struct uio), M_NFSDIRECTIO, M_WAITOK);
801 1.1 dholland t_iov = malloc(sizeof(struct iovec), M_NFSDIRECTIO, M_WAITOK);
802 1.1 dholland t_iov->iov_base = malloc(size, M_NFSDIRECTIO, M_WAITOK);
803 1.1 dholland t_iov->iov_len = size;
804 1.1 dholland t_uio->uio_iov = t_iov;
805 1.1 dholland t_uio->uio_iovcnt = 1;
806 1.1 dholland t_uio->uio_offset = uiop->uio_offset;
807 1.1 dholland t_uio->uio_resid = size;
808 1.1 dholland t_uio->uio_segflg = UIO_SYSSPACE;
809 1.1 dholland t_uio->uio_rw = UIO_WRITE;
810 1.1 dholland t_uio->uio_td = td;
811 1.1 dholland KASSERT(uiop->uio_segflg == UIO_USERSPACE ||
812 1.1 dholland uiop->uio_segflg == UIO_SYSSPACE,
813 1.1 dholland ("nfs_directio_write: Bad uio_segflg"));
814 1.1 dholland if (uiop->uio_segflg == UIO_USERSPACE) {
815 1.1 dholland error = copyin(uiop->uio_iov->iov_base,
816 1.1 dholland t_iov->iov_base, size);
817 1.1 dholland if (error != 0)
818 1.1 dholland goto err_free;
819 1.1 dholland } else
820 1.1 dholland /*
821 1.1 dholland * UIO_SYSSPACE may never happen, but handle
822 1.1 dholland * it just in case it does.
823 1.1 dholland */
824 1.1 dholland bcopy(uiop->uio_iov->iov_base, t_iov->iov_base,
825 1.1 dholland size);
826 1.1 dholland bp->b_flags |= B_DIRECT;
827 1.1 dholland bp->b_iocmd = BIO_WRITE;
828 1.1 dholland if (cred != NOCRED) {
829 1.1 dholland crhold(cred);
830 1.1 dholland bp->b_wcred = cred;
831 1.1 dholland } else
832 1.1 dholland bp->b_wcred = NOCRED;
833 1.1 dholland bp->b_caller1 = (void *)t_uio;
834 1.1 dholland bp->b_vp = vp;
835 1.1 dholland error = ncl_asyncio(nmp, bp, NOCRED, td);
836 1.1 dholland err_free:
837 1.1 dholland if (error) {
838 1.1 dholland free(t_iov->iov_base, M_NFSDIRECTIO);
839 1.1 dholland free(t_iov, M_NFSDIRECTIO);
840 1.1 dholland free(t_uio, M_NFSDIRECTIO);
841 1.1 dholland bp->b_vp = NULL;
842 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
843 1.1 dholland if (error == EINTR)
844 1.1 dholland return (error);
845 1.1 dholland goto do_sync;
846 1.1 dholland }
847 1.1 dholland uiop->uio_offset += size;
848 1.1 dholland uiop->uio_resid -= size;
849 1.1 dholland if (uiop->uio_iov->iov_len <= size) {
850 1.1 dholland uiop->uio_iovcnt--;
851 1.1 dholland uiop->uio_iov++;
852 1.1 dholland } else {
853 1.1 dholland uiop->uio_iov->iov_base =
854 1.1 dholland (char *)uiop->uio_iov->iov_base + size;
855 1.1 dholland uiop->uio_iov->iov_len -= size;
856 1.1 dholland }
857 1.1 dholland }
858 1.1 dholland }
859 1.1 dholland return (0);
860 1.1 dholland }
861 1.1 dholland
862 1.1 dholland /*
863 1.1 dholland * Vnode op for write using bio
864 1.1 dholland */
865 1.1 dholland int
866 1.1 dholland ncl_write(struct vop_write_args *ap)
867 1.1 dholland {
868 1.1 dholland int biosize;
869 1.1 dholland struct uio *uio = ap->a_uio;
870 1.1 dholland struct thread *td = uio->uio_td;
871 1.1 dholland struct vnode *vp = ap->a_vp;
872 1.1 dholland struct nfsnode *np = VTONFS(vp);
873 1.1 dholland struct ucred *cred = ap->a_cred;
874 1.1 dholland int ioflag = ap->a_ioflag;
875 1.1 dholland struct buf *bp;
876 1.1 dholland struct vattr vattr;
877 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
878 1.1 dholland daddr_t lbn;
879 1.1 dholland int bcount;
880 1.1 dholland int bp_cached, n, on, error = 0, error1;
881 1.1 dholland size_t orig_resid, local_resid;
882 1.1 dholland off_t orig_size, tmp_off;
883 1.1 dholland
884 1.1 dholland KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
885 1.1 dholland KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
886 1.1 dholland ("ncl_write proc"));
887 1.1 dholland if (vp->v_type != VREG)
888 1.1 dholland return (EIO);
889 1.1 dholland mtx_lock(&np->n_mtx);
890 1.1 dholland if (np->n_flag & NWRITEERR) {
891 1.1 dholland np->n_flag &= ~NWRITEERR;
892 1.1 dholland mtx_unlock(&np->n_mtx);
893 1.1 dholland return (np->n_error);
894 1.1 dholland } else
895 1.1 dholland mtx_unlock(&np->n_mtx);
896 1.1 dholland mtx_lock(&nmp->nm_mtx);
897 1.1 dholland if ((nmp->nm_flag & NFSMNT_NFSV3) != 0 &&
898 1.1 dholland (nmp->nm_state & NFSSTA_GOTFSINFO) == 0) {
899 1.1 dholland mtx_unlock(&nmp->nm_mtx);
900 1.1 dholland (void)ncl_fsinfo(nmp, vp, cred, td);
901 1.1 dholland mtx_lock(&nmp->nm_mtx);
902 1.1 dholland }
903 1.1 dholland if (nmp->nm_wsize == 0)
904 1.1 dholland (void) newnfs_iosize(nmp);
905 1.1 dholland mtx_unlock(&nmp->nm_mtx);
906 1.1 dholland
907 1.1 dholland /*
908 1.1 dholland * Synchronously flush pending buffers if we are in synchronous
909 1.1 dholland * mode or if we are appending.
910 1.1 dholland */
911 1.1 dholland if (ioflag & (IO_APPEND | IO_SYNC)) {
912 1.1 dholland mtx_lock(&np->n_mtx);
913 1.1 dholland if (np->n_flag & NMODIFIED) {
914 1.1 dholland mtx_unlock(&np->n_mtx);
915 1.1 dholland #ifdef notyet /* Needs matching nonblock semantics elsewhere, too. */
916 1.1 dholland /*
917 1.1 dholland * Require non-blocking, synchronous writes to
918 1.1 dholland * dirty files to inform the program it needs
919 1.1 dholland * to fsync(2) explicitly.
920 1.1 dholland */
921 1.1 dholland if (ioflag & IO_NDELAY)
922 1.1 dholland return (EAGAIN);
923 1.1 dholland #endif
924 1.1 dholland flush_and_restart:
925 1.1 dholland np->n_attrstamp = 0;
926 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
927 1.1 dholland error = ncl_vinvalbuf(vp, V_SAVE, td, 1);
928 1.1 dholland if (error)
929 1.1 dholland return (error);
930 1.1 dholland } else
931 1.1 dholland mtx_unlock(&np->n_mtx);
932 1.1 dholland }
933 1.1 dholland
934 1.1 dholland orig_resid = uio->uio_resid;
935 1.1 dholland mtx_lock(&np->n_mtx);
936 1.1 dholland orig_size = np->n_size;
937 1.1 dholland mtx_unlock(&np->n_mtx);
938 1.1 dholland
939 1.1 dholland /*
940 1.1 dholland * If IO_APPEND then load uio_offset. We restart here if we cannot
941 1.1 dholland * get the append lock.
942 1.1 dholland */
943 1.1 dholland if (ioflag & IO_APPEND) {
944 1.1 dholland np->n_attrstamp = 0;
945 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
946 1.1 dholland error = VOP_GETATTR(vp, &vattr, cred);
947 1.1 dholland if (error)
948 1.1 dholland return (error);
949 1.1 dholland mtx_lock(&np->n_mtx);
950 1.1 dholland uio->uio_offset = np->n_size;
951 1.1 dholland mtx_unlock(&np->n_mtx);
952 1.1 dholland }
953 1.1 dholland
954 1.1 dholland if (uio->uio_offset < 0)
955 1.1 dholland return (EINVAL);
956 1.1 dholland tmp_off = uio->uio_offset + uio->uio_resid;
957 1.1 dholland if (tmp_off > nmp->nm_maxfilesize || tmp_off < uio->uio_offset)
958 1.1 dholland return (EFBIG);
959 1.1 dholland if (uio->uio_resid == 0)
960 1.1 dholland return (0);
961 1.1 dholland
962 1.1 dholland if (newnfs_directio_enable && (ioflag & IO_DIRECT) && vp->v_type == VREG)
963 1.1 dholland return nfs_directio_write(vp, uio, cred, ioflag);
964 1.1 dholland
965 1.1 dholland /*
966 1.1 dholland * Maybe this should be above the vnode op call, but so long as
967 1.1 dholland * file servers have no limits, i don't think it matters
968 1.1 dholland */
969 1.1 dholland if (vn_rlimit_fsize(vp, uio, td))
970 1.1 dholland return (EFBIG);
971 1.1 dholland
972 1.1 dholland biosize = vp->v_bufobj.bo_bsize;
973 1.1 dholland /*
974 1.1 dholland * Find all of this file's B_NEEDCOMMIT buffers. If our writes
975 1.1 dholland * would exceed the local maximum per-file write commit size when
976 1.1 dholland * combined with those, we must decide whether to flush,
977 1.1 dholland * go synchronous, or return error. We don't bother checking
978 1.1 dholland * IO_UNIT -- we just make all writes atomic anyway, as there's
979 1.1 dholland * no point optimizing for something that really won't ever happen.
980 1.1 dholland */
981 1.1 dholland if (!(ioflag & IO_SYNC)) {
982 1.1 dholland int nflag;
983 1.1 dholland
984 1.1 dholland mtx_lock(&np->n_mtx);
985 1.1 dholland nflag = np->n_flag;
986 1.1 dholland mtx_unlock(&np->n_mtx);
987 1.1 dholland int needrestart = 0;
988 1.1 dholland if (nmp->nm_wcommitsize < uio->uio_resid) {
989 1.1 dholland /*
990 1.1 dholland * If this request could not possibly be completed
991 1.1 dholland * without exceeding the maximum outstanding write
992 1.1 dholland * commit size, see if we can convert it into a
993 1.1 dholland * synchronous write operation.
994 1.1 dholland */
995 1.1 dholland if (ioflag & IO_NDELAY)
996 1.1 dholland return (EAGAIN);
997 1.1 dholland ioflag |= IO_SYNC;
998 1.1 dholland if (nflag & NMODIFIED)
999 1.1 dholland needrestart = 1;
1000 1.1 dholland } else if (nflag & NMODIFIED) {
1001 1.1 dholland int wouldcommit = 0;
1002 1.1 dholland BO_LOCK(&vp->v_bufobj);
1003 1.1 dholland if (vp->v_bufobj.bo_dirty.bv_cnt != 0) {
1004 1.1 dholland TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd,
1005 1.1 dholland b_bobufs) {
1006 1.1 dholland if (bp->b_flags & B_NEEDCOMMIT)
1007 1.1 dholland wouldcommit += bp->b_bcount;
1008 1.1 dholland }
1009 1.1 dholland }
1010 1.1 dholland BO_UNLOCK(&vp->v_bufobj);
1011 1.1 dholland /*
1012 1.1 dholland * Since we're not operating synchronously and
1013 1.1 dholland * bypassing the buffer cache, we are in a commit
1014 1.1 dholland * and holding all of these buffers whether
1015 1.1 dholland * transmitted or not. If not limited, this
1016 1.1 dholland * will lead to the buffer cache deadlocking,
1017 1.1 dholland * as no one else can flush our uncommitted buffers.
1018 1.1 dholland */
1019 1.1 dholland wouldcommit += uio->uio_resid;
1020 1.1 dholland /*
1021 1.1 dholland * If we would initially exceed the maximum
1022 1.1 dholland * outstanding write commit size, flush and restart.
1023 1.1 dholland */
1024 1.1 dholland if (wouldcommit > nmp->nm_wcommitsize)
1025 1.1 dholland needrestart = 1;
1026 1.1 dholland }
1027 1.1 dholland if (needrestart)
1028 1.1 dholland goto flush_and_restart;
1029 1.1 dholland }
1030 1.1 dholland
1031 1.1 dholland do {
1032 1.1 dholland NFSINCRGLOBAL(newnfsstats.biocache_writes);
1033 1.1 dholland lbn = uio->uio_offset / biosize;
1034 1.1 dholland on = uio->uio_offset - (lbn * biosize);
1035 1.1 dholland n = MIN((unsigned)(biosize - on), uio->uio_resid);
1036 1.1 dholland again:
1037 1.1 dholland /*
1038 1.1 dholland * Handle direct append and file extension cases, calculate
1039 1.1 dholland * unaligned buffer size.
1040 1.1 dholland */
1041 1.1 dholland mtx_lock(&np->n_mtx);
1042 1.1 dholland if (uio->uio_offset == np->n_size && n) {
1043 1.1 dholland mtx_unlock(&np->n_mtx);
1044 1.1 dholland /*
1045 1.1 dholland * Get the buffer (in its pre-append state to maintain
1046 1.1 dholland * B_CACHE if it was previously set). Resize the
1047 1.1 dholland * nfsnode after we have locked the buffer to prevent
1048 1.1 dholland * readers from reading garbage.
1049 1.1 dholland */
1050 1.1 dholland bcount = on;
1051 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bcount, td);
1052 1.1 dholland
1053 1.1 dholland if (bp != NULL) {
1054 1.1 dholland long save;
1055 1.1 dholland
1056 1.1 dholland mtx_lock(&np->n_mtx);
1057 1.1 dholland np->n_size = uio->uio_offset + n;
1058 1.1 dholland np->n_flag |= NMODIFIED;
1059 1.1 dholland vnode_pager_setsize(vp, np->n_size);
1060 1.1 dholland mtx_unlock(&np->n_mtx);
1061 1.1 dholland
1062 1.1 dholland save = bp->b_flags & B_CACHE;
1063 1.1 dholland bcount += n;
1064 1.1 dholland allocbuf(bp, bcount);
1065 1.1 dholland bp->b_flags |= save;
1066 1.1 dholland }
1067 1.1 dholland } else {
1068 1.1 dholland /*
1069 1.1 dholland * Obtain the locked cache block first, and then
1070 1.1 dholland * adjust the file's size as appropriate.
1071 1.1 dholland */
1072 1.1 dholland bcount = on + n;
1073 1.1 dholland if ((off_t)lbn * biosize + bcount < np->n_size) {
1074 1.1 dholland if ((off_t)(lbn + 1) * biosize < np->n_size)
1075 1.1 dholland bcount = biosize;
1076 1.1 dholland else
1077 1.1 dholland bcount = np->n_size - (off_t)lbn * biosize;
1078 1.1 dholland }
1079 1.1 dholland mtx_unlock(&np->n_mtx);
1080 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bcount, td);
1081 1.1 dholland mtx_lock(&np->n_mtx);
1082 1.1 dholland if (uio->uio_offset + n > np->n_size) {
1083 1.1 dholland np->n_size = uio->uio_offset + n;
1084 1.1 dholland np->n_flag |= NMODIFIED;
1085 1.1 dholland vnode_pager_setsize(vp, np->n_size);
1086 1.1 dholland }
1087 1.1 dholland mtx_unlock(&np->n_mtx);
1088 1.1 dholland }
1089 1.1 dholland
1090 1.1 dholland if (!bp) {
1091 1.1 dholland error = newnfs_sigintr(nmp, td);
1092 1.1 dholland if (!error)
1093 1.1 dholland error = EINTR;
1094 1.1 dholland break;
1095 1.1 dholland }
1096 1.1 dholland
1097 1.1 dholland /*
1098 1.1 dholland * Issue a READ if B_CACHE is not set. In special-append
1099 1.1 dholland * mode, B_CACHE is based on the buffer prior to the write
1100 1.1 dholland * op and is typically set, avoiding the read. If a read
1101 1.1 dholland * is required in special append mode, the server will
1102 1.1 dholland * probably send us a short-read since we extended the file
1103 1.1 dholland * on our end, resulting in b_resid == 0 and, thusly,
1104 1.1 dholland * B_CACHE getting set.
1105 1.1 dholland *
1106 1.1 dholland * We can also avoid issuing the read if the write covers
1107 1.1 dholland * the entire buffer. We have to make sure the buffer state
1108 1.1 dholland * is reasonable in this case since we will not be initiating
1109 1.1 dholland * I/O. See the comments in kern/vfs_bio.c's getblk() for
1110 1.1 dholland * more information.
1111 1.1 dholland *
1112 1.1 dholland * B_CACHE may also be set due to the buffer being cached
1113 1.1 dholland * normally.
1114 1.1 dholland */
1115 1.1 dholland
1116 1.1 dholland bp_cached = 1;
1117 1.1 dholland if (on == 0 && n == bcount) {
1118 1.1 dholland if ((bp->b_flags & B_CACHE) == 0)
1119 1.1 dholland bp_cached = 0;
1120 1.1 dholland bp->b_flags |= B_CACHE;
1121 1.1 dholland bp->b_flags &= ~B_INVAL;
1122 1.1 dholland bp->b_ioflags &= ~BIO_ERROR;
1123 1.1 dholland }
1124 1.1 dholland
1125 1.1 dholland if ((bp->b_flags & B_CACHE) == 0) {
1126 1.1 dholland bp->b_iocmd = BIO_READ;
1127 1.1 dholland vfs_busy_pages(bp, 0);
1128 1.1 dholland error = ncl_doio(vp, bp, cred, td, 0);
1129 1.1 dholland if (error) {
1130 1.1 dholland brelse(bp);
1131 1.1 dholland break;
1132 1.1 dholland }
1133 1.1 dholland }
1134 1.1 dholland if (bp->b_wcred == NOCRED)
1135 1.1 dholland bp->b_wcred = crhold(cred);
1136 1.1 dholland mtx_lock(&np->n_mtx);
1137 1.1 dholland np->n_flag |= NMODIFIED;
1138 1.1 dholland mtx_unlock(&np->n_mtx);
1139 1.1 dholland
1140 1.1 dholland /*
1141 1.1 dholland * If dirtyend exceeds file size, chop it down. This should
1142 1.1 dholland * not normally occur but there is an append race where it
1143 1.1 dholland * might occur XXX, so we log it.
1144 1.1 dholland *
1145 1.1 dholland * If the chopping creates a reverse-indexed or degenerate
1146 1.1 dholland * situation with dirtyoff/end, we 0 both of them.
1147 1.1 dholland */
1148 1.1 dholland
1149 1.1 dholland if (bp->b_dirtyend > bcount) {
1150 1.1 dholland ncl_printf("NFS append race @%lx:%d\n",
1151 1.1 dholland (long)bp->b_blkno * DEV_BSIZE,
1152 1.1 dholland bp->b_dirtyend - bcount);
1153 1.1 dholland bp->b_dirtyend = bcount;
1154 1.1 dholland }
1155 1.1 dholland
1156 1.1 dholland if (bp->b_dirtyoff >= bp->b_dirtyend)
1157 1.1 dholland bp->b_dirtyoff = bp->b_dirtyend = 0;
1158 1.1 dholland
1159 1.1 dholland /*
1160 1.1 dholland * If the new write will leave a contiguous dirty
1161 1.1 dholland * area, just update the b_dirtyoff and b_dirtyend,
1162 1.1 dholland * otherwise force a write rpc of the old dirty area.
1163 1.1 dholland *
1164 1.1 dholland * While it is possible to merge discontiguous writes due to
1165 1.1 dholland * our having a B_CACHE buffer ( and thus valid read data
1166 1.1 dholland * for the hole), we don't because it could lead to
1167 1.1 dholland * significant cache coherency problems with multiple clients,
1168 1.1 dholland * especially if locking is implemented later on.
1169 1.1 dholland *
1170 1.1 dholland * As an optimization we could theoretically maintain
1171 1.1 dholland * a linked list of discontinuous areas, but we would still
1172 1.1 dholland * have to commit them separately so there isn't much
1173 1.1 dholland * advantage to it except perhaps a bit of asynchronization.
1174 1.1 dholland */
1175 1.1 dholland
1176 1.1 dholland if (bp->b_dirtyend > 0 &&
1177 1.1 dholland (on > bp->b_dirtyend || (on + n) < bp->b_dirtyoff)) {
1178 1.1 dholland if (bwrite(bp) == EINTR) {
1179 1.1 dholland error = EINTR;
1180 1.1 dholland break;
1181 1.1 dholland }
1182 1.1 dholland goto again;
1183 1.1 dholland }
1184 1.1 dholland
1185 1.1 dholland local_resid = uio->uio_resid;
1186 1.1 dholland error = vn_io_fault_uiomove((char *)bp->b_data + on, n, uio);
1187 1.1 dholland
1188 1.1 dholland if (error != 0 && !bp_cached) {
1189 1.1 dholland /*
1190 1.1 dholland * This block has no other content then what
1191 1.1 dholland * possibly was written by the faulty uiomove.
1192 1.1 dholland * Release it, forgetting the data pages, to
1193 1.1 dholland * prevent the leak of uninitialized data to
1194 1.1 dholland * usermode.
1195 1.1 dholland */
1196 1.1 dholland bp->b_ioflags |= BIO_ERROR;
1197 1.1 dholland brelse(bp);
1198 1.1 dholland uio->uio_offset -= local_resid - uio->uio_resid;
1199 1.1 dholland uio->uio_resid = local_resid;
1200 1.1 dholland break;
1201 1.1 dholland }
1202 1.1 dholland
1203 1.1 dholland /*
1204 1.1 dholland * Since this block is being modified, it must be written
1205 1.1 dholland * again and not just committed. Since write clustering does
1206 1.1 dholland * not work for the stage 1 data write, only the stage 2
1207 1.1 dholland * commit rpc, we have to clear B_CLUSTEROK as well.
1208 1.1 dholland */
1209 1.1 dholland bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1210 1.1 dholland
1211 1.1 dholland /*
1212 1.1 dholland * Get the partial update on the progress made from
1213 1.1 dholland * uiomove, if an error occured.
1214 1.1 dholland */
1215 1.1 dholland if (error != 0)
1216 1.1 dholland n = local_resid - uio->uio_resid;
1217 1.1 dholland
1218 1.1 dholland /*
1219 1.1 dholland * Only update dirtyoff/dirtyend if not a degenerate
1220 1.1 dholland * condition.
1221 1.1 dholland */
1222 1.1 dholland if (n > 0) {
1223 1.1 dholland if (bp->b_dirtyend > 0) {
1224 1.1 dholland bp->b_dirtyoff = min(on, bp->b_dirtyoff);
1225 1.1 dholland bp->b_dirtyend = max((on + n), bp->b_dirtyend);
1226 1.1 dholland } else {
1227 1.1 dholland bp->b_dirtyoff = on;
1228 1.1 dholland bp->b_dirtyend = on + n;
1229 1.1 dholland }
1230 1.1 dholland vfs_bio_set_valid(bp, on, n);
1231 1.1 dholland }
1232 1.1 dholland
1233 1.1 dholland /*
1234 1.1 dholland * If IO_SYNC do bwrite().
1235 1.1 dholland *
1236 1.1 dholland * IO_INVAL appears to be unused. The idea appears to be
1237 1.1 dholland * to turn off caching in this case. Very odd. XXX
1238 1.1 dholland */
1239 1.1 dholland if ((ioflag & IO_SYNC)) {
1240 1.1 dholland if (ioflag & IO_INVAL)
1241 1.1 dholland bp->b_flags |= B_NOCACHE;
1242 1.1 dholland error1 = bwrite(bp);
1243 1.1 dholland if (error1 != 0) {
1244 1.1 dholland if (error == 0)
1245 1.1 dholland error = error1;
1246 1.1 dholland break;
1247 1.1 dholland }
1248 1.1 dholland } else if ((n + on) == biosize) {
1249 1.1 dholland bp->b_flags |= B_ASYNC;
1250 1.1 dholland (void) ncl_writebp(bp, 0, NULL);
1251 1.1 dholland } else {
1252 1.1 dholland bdwrite(bp);
1253 1.1 dholland }
1254 1.1 dholland
1255 1.1 dholland if (error != 0)
1256 1.1 dholland break;
1257 1.1 dholland } while (uio->uio_resid > 0 && n > 0);
1258 1.1 dholland
1259 1.1 dholland if (error != 0) {
1260 1.1 dholland if (ioflag & IO_UNIT) {
1261 1.1 dholland VATTR_NULL(&vattr);
1262 1.1 dholland vattr.va_size = orig_size;
1263 1.1 dholland /* IO_SYNC is handled implicitely */
1264 1.1 dholland (void)VOP_SETATTR(vp, &vattr, cred);
1265 1.1 dholland uio->uio_offset -= orig_resid - uio->uio_resid;
1266 1.1 dholland uio->uio_resid = orig_resid;
1267 1.1 dholland }
1268 1.1 dholland }
1269 1.1 dholland
1270 1.1 dholland return (error);
1271 1.1 dholland }
1272 1.1 dholland
1273 1.1 dholland /*
1274 1.1 dholland * Get an nfs cache block.
1275 1.1 dholland *
1276 1.1 dholland * Allocate a new one if the block isn't currently in the cache
1277 1.1 dholland * and return the block marked busy. If the calling process is
1278 1.1 dholland * interrupted by a signal for an interruptible mount point, return
1279 1.1 dholland * NULL.
1280 1.1 dholland *
1281 1.1 dholland * The caller must carefully deal with the possible B_INVAL state of
1282 1.1 dholland * the buffer. ncl_doio() clears B_INVAL (and ncl_asyncio() clears it
1283 1.1 dholland * indirectly), so synchronous reads can be issued without worrying about
1284 1.1 dholland * the B_INVAL state. We have to be a little more careful when dealing
1285 1.1 dholland * with writes (see comments in nfs_write()) when extending a file past
1286 1.1 dholland * its EOF.
1287 1.1 dholland */
1288 1.1 dholland static struct buf *
1289 1.1 dholland nfs_getcacheblk(struct vnode *vp, daddr_t bn, int size, struct thread *td)
1290 1.1 dholland {
1291 1.1 dholland struct buf *bp;
1292 1.1 dholland struct mount *mp;
1293 1.1 dholland struct nfsmount *nmp;
1294 1.1 dholland
1295 1.1 dholland mp = vp->v_mount;
1296 1.1 dholland nmp = VFSTONFS(mp);
1297 1.1 dholland
1298 1.1 dholland if (nmp->nm_flag & NFSMNT_INT) {
1299 1.1 dholland sigset_t oldset;
1300 1.1 dholland
1301 1.1 dholland newnfs_set_sigmask(td, &oldset);
1302 1.1 dholland bp = getblk(vp, bn, size, PCATCH, 0, 0);
1303 1.1 dholland newnfs_restore_sigmask(td, &oldset);
1304 1.1 dholland while (bp == NULL) {
1305 1.1 dholland if (newnfs_sigintr(nmp, td))
1306 1.1 dholland return (NULL);
1307 1.1 dholland bp = getblk(vp, bn, size, 0, 2 * hz, 0);
1308 1.1 dholland }
1309 1.1 dholland } else {
1310 1.1 dholland bp = getblk(vp, bn, size, 0, 0, 0);
1311 1.1 dholland }
1312 1.1 dholland
1313 1.1 dholland if (vp->v_type == VREG)
1314 1.1 dholland bp->b_blkno = bn * (vp->v_bufobj.bo_bsize / DEV_BSIZE);
1315 1.1 dholland return (bp);
1316 1.1 dholland }
1317 1.1 dholland
1318 1.1 dholland /*
1319 1.1 dholland * Flush and invalidate all dirty buffers. If another process is already
1320 1.1 dholland * doing the flush, just wait for completion.
1321 1.1 dholland */
1322 1.1 dholland int
1323 1.1 dholland ncl_vinvalbuf(struct vnode *vp, int flags, struct thread *td, int intrflg)
1324 1.1 dholland {
1325 1.1 dholland struct nfsnode *np = VTONFS(vp);
1326 1.1 dholland struct nfsmount *nmp = VFSTONFS(vp->v_mount);
1327 1.1 dholland int error = 0, slpflag, slptimeo;
1328 1.1 dholland int old_lock = 0;
1329 1.1 dholland
1330 1.1 dholland ASSERT_VOP_LOCKED(vp, "ncl_vinvalbuf");
1331 1.1 dholland
1332 1.1 dholland if ((nmp->nm_flag & NFSMNT_INT) == 0)
1333 1.1 dholland intrflg = 0;
1334 1.1 dholland if ((nmp->nm_mountp->mnt_kern_flag & MNTK_UNMOUNTF))
1335 1.1 dholland intrflg = 1;
1336 1.1 dholland if (intrflg) {
1337 1.1 dholland slpflag = PCATCH;
1338 1.1 dholland slptimeo = 2 * hz;
1339 1.1 dholland } else {
1340 1.1 dholland slpflag = 0;
1341 1.1 dholland slptimeo = 0;
1342 1.1 dholland }
1343 1.1 dholland
1344 1.1 dholland old_lock = ncl_upgrade_vnlock(vp);
1345 1.1 dholland if (vp->v_iflag & VI_DOOMED) {
1346 1.1 dholland /*
1347 1.1 dholland * Since vgonel() uses the generic vinvalbuf() to flush
1348 1.1 dholland * dirty buffers and it does not call this function, it
1349 1.1 dholland * is safe to just return OK when VI_DOOMED is set.
1350 1.1 dholland */
1351 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
1352 1.1 dholland return (0);
1353 1.1 dholland }
1354 1.1 dholland
1355 1.1 dholland /*
1356 1.1 dholland * Now, flush as required.
1357 1.1 dholland */
1358 1.1 dholland if ((flags & V_SAVE) && (vp->v_bufobj.bo_object != NULL)) {
1359 1.1 dholland VM_OBJECT_WLOCK(vp->v_bufobj.bo_object);
1360 1.1 dholland vm_object_page_clean(vp->v_bufobj.bo_object, 0, 0, OBJPC_SYNC);
1361 1.1 dholland VM_OBJECT_WUNLOCK(vp->v_bufobj.bo_object);
1362 1.1 dholland /*
1363 1.1 dholland * If the page clean was interrupted, fail the invalidation.
1364 1.1 dholland * Not doing so, we run the risk of losing dirty pages in the
1365 1.1 dholland * vinvalbuf() call below.
1366 1.1 dholland */
1367 1.1 dholland if (intrflg && (error = newnfs_sigintr(nmp, td)))
1368 1.1 dholland goto out;
1369 1.1 dholland }
1370 1.1 dholland
1371 1.1 dholland error = vinvalbuf(vp, flags, slpflag, 0);
1372 1.1 dholland while (error) {
1373 1.1 dholland if (intrflg && (error = newnfs_sigintr(nmp, td)))
1374 1.1 dholland goto out;
1375 1.1 dholland error = vinvalbuf(vp, flags, 0, slptimeo);
1376 1.1 dholland }
1377 1.1 dholland if (NFSHASPNFS(nmp)) {
1378 1.1 dholland nfscl_layoutcommit(vp, td);
1379 1.1 dholland /*
1380 1.1 dholland * Invalidate the attribute cache, since writes to a DS
1381 1.1 dholland * won't update the size attribute.
1382 1.1 dholland */
1383 1.1 dholland mtx_lock(&np->n_mtx);
1384 1.1 dholland np->n_attrstamp = 0;
1385 1.1 dholland } else
1386 1.1 dholland mtx_lock(&np->n_mtx);
1387 1.1 dholland if (np->n_directio_asyncwr == 0)
1388 1.1 dholland np->n_flag &= ~NMODIFIED;
1389 1.1 dholland mtx_unlock(&np->n_mtx);
1390 1.1 dholland out:
1391 1.1 dholland ncl_downgrade_vnlock(vp, old_lock);
1392 1.1 dholland return error;
1393 1.1 dholland }
1394 1.1 dholland
1395 1.1 dholland /*
1396 1.1 dholland * Initiate asynchronous I/O. Return an error if no nfsiods are available.
1397 1.1 dholland * This is mainly to avoid queueing async I/O requests when the nfsiods
1398 1.1 dholland * are all hung on a dead server.
1399 1.1 dholland *
1400 1.1 dholland * Note: ncl_asyncio() does not clear (BIO_ERROR|B_INVAL) but when the bp
1401 1.1 dholland * is eventually dequeued by the async daemon, ncl_doio() *will*.
1402 1.1 dholland */
1403 1.1 dholland int
1404 1.1 dholland ncl_asyncio(struct nfsmount *nmp, struct buf *bp, struct ucred *cred, struct thread *td)
1405 1.1 dholland {
1406 1.1 dholland int iod;
1407 1.1 dholland int gotiod;
1408 1.1 dholland int slpflag = 0;
1409 1.1 dholland int slptimeo = 0;
1410 1.1 dholland int error, error2;
1411 1.1 dholland
1412 1.1 dholland /*
1413 1.1 dholland * Commits are usually short and sweet so lets save some cpu and
1414 1.1 dholland * leave the async daemons for more important rpc's (such as reads
1415 1.1 dholland * and writes).
1416 1.1 dholland *
1417 1.1 dholland * Readdirplus RPCs do vget()s to acquire the vnodes for entries
1418 1.1 dholland * in the directory in order to update attributes. This can deadlock
1419 1.1 dholland * with another thread that is waiting for async I/O to be done by
1420 1.1 dholland * an nfsiod thread while holding a lock on one of these vnodes.
1421 1.1 dholland * To avoid this deadlock, don't allow the async nfsiod threads to
1422 1.1 dholland * perform Readdirplus RPCs.
1423 1.1 dholland */
1424 1.1 dholland mtx_lock(&ncl_iod_mutex);
1425 1.1 dholland if ((bp->b_iocmd == BIO_WRITE && (bp->b_flags & B_NEEDCOMMIT) &&
1426 1.1 dholland (nmp->nm_bufqiods > ncl_numasync / 2)) ||
1427 1.1 dholland (bp->b_vp->v_type == VDIR && (nmp->nm_flag & NFSMNT_RDIRPLUS))) {
1428 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1429 1.1 dholland return(EIO);
1430 1.1 dholland }
1431 1.1 dholland again:
1432 1.1 dholland if (nmp->nm_flag & NFSMNT_INT)
1433 1.1 dholland slpflag = PCATCH;
1434 1.1 dholland gotiod = FALSE;
1435 1.1 dholland
1436 1.1 dholland /*
1437 1.1 dholland * Find a free iod to process this request.
1438 1.1 dholland */
1439 1.1 dholland for (iod = 0; iod < ncl_numasync; iod++)
1440 1.1 dholland if (ncl_iodwant[iod] == NFSIOD_AVAILABLE) {
1441 1.1 dholland gotiod = TRUE;
1442 1.1 dholland break;
1443 1.1 dholland }
1444 1.1 dholland
1445 1.1 dholland /*
1446 1.1 dholland * Try to create one if none are free.
1447 1.1 dholland */
1448 1.1 dholland if (!gotiod)
1449 1.1 dholland ncl_nfsiodnew();
1450 1.1 dholland else {
1451 1.1 dholland /*
1452 1.1 dholland * Found one, so wake it up and tell it which
1453 1.1 dholland * mount to process.
1454 1.1 dholland */
1455 1.1 dholland NFS_DPF(ASYNCIO, ("ncl_asyncio: waking iod %d for mount %p\n",
1456 1.1 dholland iod, nmp));
1457 1.1 dholland ncl_iodwant[iod] = NFSIOD_NOT_AVAILABLE;
1458 1.1 dholland ncl_iodmount[iod] = nmp;
1459 1.1 dholland nmp->nm_bufqiods++;
1460 1.1 dholland wakeup(&ncl_iodwant[iod]);
1461 1.1 dholland }
1462 1.1 dholland
1463 1.1 dholland /*
1464 1.1 dholland * If none are free, we may already have an iod working on this mount
1465 1.1 dholland * point. If so, it will process our request.
1466 1.1 dholland */
1467 1.1 dholland if (!gotiod) {
1468 1.1 dholland if (nmp->nm_bufqiods > 0) {
1469 1.1 dholland NFS_DPF(ASYNCIO,
1470 1.1 dholland ("ncl_asyncio: %d iods are already processing mount %p\n",
1471 1.1 dholland nmp->nm_bufqiods, nmp));
1472 1.1 dholland gotiod = TRUE;
1473 1.1 dholland }
1474 1.1 dholland }
1475 1.1 dholland
1476 1.1 dholland /*
1477 1.1 dholland * If we have an iod which can process the request, then queue
1478 1.1 dholland * the buffer.
1479 1.1 dholland */
1480 1.1 dholland if (gotiod) {
1481 1.1 dholland /*
1482 1.1 dholland * Ensure that the queue never grows too large. We still want
1483 1.1 dholland * to asynchronize so we block rather then return EIO.
1484 1.1 dholland */
1485 1.1 dholland while (nmp->nm_bufqlen >= 2*ncl_numasync) {
1486 1.1 dholland NFS_DPF(ASYNCIO,
1487 1.1 dholland ("ncl_asyncio: waiting for mount %p queue to drain\n", nmp));
1488 1.1 dholland nmp->nm_bufqwant = TRUE;
1489 1.1 dholland error = newnfs_msleep(td, &nmp->nm_bufq,
1490 1.1 dholland &ncl_iod_mutex, slpflag | PRIBIO, "nfsaio",
1491 1.1 dholland slptimeo);
1492 1.1 dholland if (error) {
1493 1.1 dholland error2 = newnfs_sigintr(nmp, td);
1494 1.1 dholland if (error2) {
1495 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1496 1.1 dholland return (error2);
1497 1.1 dholland }
1498 1.1 dholland if (slpflag == PCATCH) {
1499 1.1 dholland slpflag = 0;
1500 1.1 dholland slptimeo = 2 * hz;
1501 1.1 dholland }
1502 1.1 dholland }
1503 1.1 dholland /*
1504 1.1 dholland * We might have lost our iod while sleeping,
1505 1.1 dholland * so check and loop if nescessary.
1506 1.1 dholland */
1507 1.1 dholland goto again;
1508 1.1 dholland }
1509 1.1 dholland
1510 1.1 dholland /* We might have lost our nfsiod */
1511 1.1 dholland if (nmp->nm_bufqiods == 0) {
1512 1.1 dholland NFS_DPF(ASYNCIO,
1513 1.1 dholland ("ncl_asyncio: no iods after mount %p queue was drained, looping\n", nmp));
1514 1.1 dholland goto again;
1515 1.1 dholland }
1516 1.1 dholland
1517 1.1 dholland if (bp->b_iocmd == BIO_READ) {
1518 1.1 dholland if (bp->b_rcred == NOCRED && cred != NOCRED)
1519 1.1 dholland bp->b_rcred = crhold(cred);
1520 1.1 dholland } else {
1521 1.1 dholland if (bp->b_wcred == NOCRED && cred != NOCRED)
1522 1.1 dholland bp->b_wcred = crhold(cred);
1523 1.1 dholland }
1524 1.1 dholland
1525 1.1 dholland if (bp->b_flags & B_REMFREE)
1526 1.1 dholland bremfreef(bp);
1527 1.1 dholland BUF_KERNPROC(bp);
1528 1.1 dholland TAILQ_INSERT_TAIL(&nmp->nm_bufq, bp, b_freelist);
1529 1.1 dholland nmp->nm_bufqlen++;
1530 1.1 dholland if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1531 1.1 dholland mtx_lock(&(VTONFS(bp->b_vp))->n_mtx);
1532 1.1 dholland VTONFS(bp->b_vp)->n_flag |= NMODIFIED;
1533 1.1 dholland VTONFS(bp->b_vp)->n_directio_asyncwr++;
1534 1.1 dholland mtx_unlock(&(VTONFS(bp->b_vp))->n_mtx);
1535 1.1 dholland }
1536 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1537 1.1 dholland return (0);
1538 1.1 dholland }
1539 1.1 dholland
1540 1.1 dholland mtx_unlock(&ncl_iod_mutex);
1541 1.1 dholland
1542 1.1 dholland /*
1543 1.1 dholland * All the iods are busy on other mounts, so return EIO to
1544 1.1 dholland * force the caller to process the i/o synchronously.
1545 1.1 dholland */
1546 1.1 dholland NFS_DPF(ASYNCIO, ("ncl_asyncio: no iods available, i/o is synchronous\n"));
1547 1.1 dholland return (EIO);
1548 1.1 dholland }
1549 1.1 dholland
1550 1.1 dholland void
1551 1.1 dholland ncl_doio_directwrite(struct buf *bp)
1552 1.1 dholland {
1553 1.1 dholland int iomode, must_commit;
1554 1.1 dholland struct uio *uiop = (struct uio *)bp->b_caller1;
1555 1.1 dholland char *iov_base = uiop->uio_iov->iov_base;
1556 1.1 dholland
1557 1.1 dholland iomode = NFSWRITE_FILESYNC;
1558 1.1 dholland uiop->uio_td = NULL; /* NULL since we're in nfsiod */
1559 1.1 dholland ncl_writerpc(bp->b_vp, uiop, bp->b_wcred, &iomode, &must_commit, 0);
1560 1.1 dholland KASSERT((must_commit == 0), ("ncl_doio_directwrite: Did not commit write"));
1561 1.1 dholland free(iov_base, M_NFSDIRECTIO);
1562 1.1 dholland free(uiop->uio_iov, M_NFSDIRECTIO);
1563 1.1 dholland free(uiop, M_NFSDIRECTIO);
1564 1.1 dholland if ((bp->b_flags & B_DIRECT) && bp->b_iocmd == BIO_WRITE) {
1565 1.1 dholland struct nfsnode *np = VTONFS(bp->b_vp);
1566 1.1 dholland mtx_lock(&np->n_mtx);
1567 1.1 dholland if (NFSHASPNFS(VFSTONFS(vnode_mount(bp->b_vp)))) {
1568 1.1 dholland /*
1569 1.1 dholland * Invalidate the attribute cache, since writes to a DS
1570 1.1 dholland * won't update the size attribute.
1571 1.1 dholland */
1572 1.1 dholland np->n_attrstamp = 0;
1573 1.1 dholland }
1574 1.1 dholland np->n_directio_asyncwr--;
1575 1.1 dholland if (np->n_directio_asyncwr == 0) {
1576 1.1 dholland np->n_flag &= ~NMODIFIED;
1577 1.1 dholland if ((np->n_flag & NFSYNCWAIT)) {
1578 1.1 dholland np->n_flag &= ~NFSYNCWAIT;
1579 1.1 dholland wakeup((caddr_t)&np->n_directio_asyncwr);
1580 1.1 dholland }
1581 1.1 dholland }
1582 1.1 dholland mtx_unlock(&np->n_mtx);
1583 1.1 dholland }
1584 1.1 dholland bp->b_vp = NULL;
1585 1.1 dholland relpbuf(bp, &ncl_pbuf_freecnt);
1586 1.1 dholland }
1587 1.1 dholland
1588 1.1 dholland /*
1589 1.1 dholland * Do an I/O operation to/from a cache block. This may be called
1590 1.1 dholland * synchronously or from an nfsiod.
1591 1.1 dholland */
1592 1.1 dholland int
1593 1.1 dholland ncl_doio(struct vnode *vp, struct buf *bp, struct ucred *cr, struct thread *td,
1594 1.1 dholland int called_from_strategy)
1595 1.1 dholland {
1596 1.1 dholland struct uio *uiop;
1597 1.1 dholland struct nfsnode *np;
1598 1.1 dholland struct nfsmount *nmp;
1599 1.1 dholland int error = 0, iomode, must_commit = 0;
1600 1.1 dholland struct uio uio;
1601 1.1 dholland struct iovec io;
1602 1.1 dholland struct proc *p = td ? td->td_proc : NULL;
1603 1.1 dholland uint8_t iocmd;
1604 1.1 dholland
1605 1.1 dholland np = VTONFS(vp);
1606 1.1 dholland nmp = VFSTONFS(vp->v_mount);
1607 1.1 dholland uiop = &uio;
1608 1.1 dholland uiop->uio_iov = &io;
1609 1.1 dholland uiop->uio_iovcnt = 1;
1610 1.1 dholland uiop->uio_segflg = UIO_SYSSPACE;
1611 1.1 dholland uiop->uio_td = td;
1612 1.1 dholland
1613 1.1 dholland /*
1614 1.1 dholland * clear BIO_ERROR and B_INVAL state prior to initiating the I/O. We
1615 1.1 dholland * do this here so we do not have to do it in all the code that
1616 1.1 dholland * calls us.
1617 1.1 dholland */
1618 1.1 dholland bp->b_flags &= ~B_INVAL;
1619 1.1 dholland bp->b_ioflags &= ~BIO_ERROR;
1620 1.1 dholland
1621 1.1 dholland KASSERT(!(bp->b_flags & B_DONE), ("ncl_doio: bp %p already marked done", bp));
1622 1.1 dholland iocmd = bp->b_iocmd;
1623 1.1 dholland if (iocmd == BIO_READ) {
1624 1.1 dholland io.iov_len = uiop->uio_resid = bp->b_bcount;
1625 1.1 dholland io.iov_base = bp->b_data;
1626 1.1 dholland uiop->uio_rw = UIO_READ;
1627 1.1 dholland
1628 1.1 dholland switch (vp->v_type) {
1629 1.1 dholland case VREG:
1630 1.1 dholland uiop->uio_offset = ((off_t)bp->b_blkno) * DEV_BSIZE;
1631 1.1 dholland NFSINCRGLOBAL(newnfsstats.read_bios);
1632 1.1 dholland error = ncl_readrpc(vp, uiop, cr);
1633 1.1 dholland
1634 1.1 dholland if (!error) {
1635 1.1 dholland if (uiop->uio_resid) {
1636 1.1 dholland /*
1637 1.1 dholland * If we had a short read with no error, we must have
1638 1.1 dholland * hit a file hole. We should zero-fill the remainder.
1639 1.1 dholland * This can also occur if the server hits the file EOF.
1640 1.1 dholland *
1641 1.1 dholland * Holes used to be able to occur due to pending
1642 1.1 dholland * writes, but that is not possible any longer.
1643 1.1 dholland */
1644 1.1 dholland int nread = bp->b_bcount - uiop->uio_resid;
1645 1.1 dholland ssize_t left = uiop->uio_resid;
1646 1.1 dholland
1647 1.1 dholland if (left > 0)
1648 1.1 dholland bzero((char *)bp->b_data + nread, left);
1649 1.1 dholland uiop->uio_resid = 0;
1650 1.1 dholland }
1651 1.1 dholland }
1652 1.1 dholland /* ASSERT_VOP_LOCKED(vp, "ncl_doio"); */
1653 1.1 dholland if (p && (vp->v_vflag & VV_TEXT)) {
1654 1.1 dholland mtx_lock(&np->n_mtx);
1655 1.1 dholland if (NFS_TIMESPEC_COMPARE(&np->n_mtime, &np->n_vattr.na_mtime)) {
1656 1.1 dholland mtx_unlock(&np->n_mtx);
1657 1.1 dholland PROC_LOCK(p);
1658 1.1 dholland killproc(p, "text file modification");
1659 1.1 dholland PROC_UNLOCK(p);
1660 1.1 dholland } else
1661 1.1 dholland mtx_unlock(&np->n_mtx);
1662 1.1 dholland }
1663 1.1 dholland break;
1664 1.1 dholland case VLNK:
1665 1.1 dholland uiop->uio_offset = (off_t)0;
1666 1.1 dholland NFSINCRGLOBAL(newnfsstats.readlink_bios);
1667 1.1 dholland error = ncl_readlinkrpc(vp, uiop, cr);
1668 1.1 dholland break;
1669 1.1 dholland case VDIR:
1670 1.1 dholland NFSINCRGLOBAL(newnfsstats.readdir_bios);
1671 1.1 dholland uiop->uio_offset = ((u_quad_t)bp->b_lblkno) * NFS_DIRBLKSIZ;
1672 1.1 dholland if ((nmp->nm_flag & NFSMNT_RDIRPLUS) != 0) {
1673 1.1 dholland error = ncl_readdirplusrpc(vp, uiop, cr, td);
1674 1.1 dholland if (error == NFSERR_NOTSUPP)
1675 1.1 dholland nmp->nm_flag &= ~NFSMNT_RDIRPLUS;
1676 1.1 dholland }
1677 1.1 dholland if ((nmp->nm_flag & NFSMNT_RDIRPLUS) == 0)
1678 1.1 dholland error = ncl_readdirrpc(vp, uiop, cr, td);
1679 1.1 dholland /*
1680 1.1 dholland * end-of-directory sets B_INVAL but does not generate an
1681 1.1 dholland * error.
1682 1.1 dholland */
1683 1.1 dholland if (error == 0 && uiop->uio_resid == bp->b_bcount)
1684 1.1 dholland bp->b_flags |= B_INVAL;
1685 1.1 dholland break;
1686 1.1 dholland default:
1687 1.1 dholland ncl_printf("ncl_doio: type %x unexpected\n", vp->v_type);
1688 1.1 dholland break;
1689 1.1 dholland };
1690 1.1 dholland if (error) {
1691 1.1 dholland bp->b_ioflags |= BIO_ERROR;
1692 1.1 dholland bp->b_error = error;
1693 1.1 dholland }
1694 1.1 dholland } else {
1695 1.1 dholland /*
1696 1.1 dholland * If we only need to commit, try to commit
1697 1.1 dholland */
1698 1.1 dholland if (bp->b_flags & B_NEEDCOMMIT) {
1699 1.1 dholland int retv;
1700 1.1 dholland off_t off;
1701 1.1 dholland
1702 1.1 dholland off = ((u_quad_t)bp->b_blkno) * DEV_BSIZE + bp->b_dirtyoff;
1703 1.1 dholland retv = ncl_commit(vp, off, bp->b_dirtyend-bp->b_dirtyoff,
1704 1.1 dholland bp->b_wcred, td);
1705 1.1 dholland if (retv == 0) {
1706 1.1 dholland bp->b_dirtyoff = bp->b_dirtyend = 0;
1707 1.1 dholland bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1708 1.1 dholland bp->b_resid = 0;
1709 1.1 dholland bufdone(bp);
1710 1.1 dholland return (0);
1711 1.1 dholland }
1712 1.1 dholland if (retv == NFSERR_STALEWRITEVERF) {
1713 1.1 dholland ncl_clearcommit(vp->v_mount);
1714 1.1 dholland }
1715 1.1 dholland }
1716 1.1 dholland
1717 1.1 dholland /*
1718 1.1 dholland * Setup for actual write
1719 1.1 dholland */
1720 1.1 dholland mtx_lock(&np->n_mtx);
1721 1.1 dholland if ((off_t)bp->b_blkno * DEV_BSIZE + bp->b_dirtyend > np->n_size)
1722 1.1 dholland bp->b_dirtyend = np->n_size - (off_t)bp->b_blkno * DEV_BSIZE;
1723 1.1 dholland mtx_unlock(&np->n_mtx);
1724 1.1 dholland
1725 1.1 dholland if (bp->b_dirtyend > bp->b_dirtyoff) {
1726 1.1 dholland io.iov_len = uiop->uio_resid = bp->b_dirtyend
1727 1.1 dholland - bp->b_dirtyoff;
1728 1.1 dholland uiop->uio_offset = (off_t)bp->b_blkno * DEV_BSIZE
1729 1.1 dholland + bp->b_dirtyoff;
1730 1.1 dholland io.iov_base = (char *)bp->b_data + bp->b_dirtyoff;
1731 1.1 dholland uiop->uio_rw = UIO_WRITE;
1732 1.1 dholland NFSINCRGLOBAL(newnfsstats.write_bios);
1733 1.1 dholland
1734 1.1 dholland if ((bp->b_flags & (B_ASYNC | B_NEEDCOMMIT | B_NOCACHE | B_CLUSTER)) == B_ASYNC)
1735 1.1 dholland iomode = NFSWRITE_UNSTABLE;
1736 1.1 dholland else
1737 1.1 dholland iomode = NFSWRITE_FILESYNC;
1738 1.1 dholland
1739 1.1 dholland error = ncl_writerpc(vp, uiop, cr, &iomode, &must_commit,
1740 1.1 dholland called_from_strategy);
1741 1.1 dholland
1742 1.1 dholland /*
1743 1.1 dholland * When setting B_NEEDCOMMIT also set B_CLUSTEROK to try
1744 1.1 dholland * to cluster the buffers needing commit. This will allow
1745 1.1 dholland * the system to submit a single commit rpc for the whole
1746 1.1 dholland * cluster. We can do this even if the buffer is not 100%
1747 1.1 dholland * dirty (relative to the NFS blocksize), so we optimize the
1748 1.1 dholland * append-to-file-case.
1749 1.1 dholland *
1750 1.1 dholland * (when clearing B_NEEDCOMMIT, B_CLUSTEROK must also be
1751 1.1 dholland * cleared because write clustering only works for commit
1752 1.1 dholland * rpc's, not for the data portion of the write).
1753 1.1 dholland */
1754 1.1 dholland
1755 1.1 dholland if (!error && iomode == NFSWRITE_UNSTABLE) {
1756 1.1 dholland bp->b_flags |= B_NEEDCOMMIT;
1757 1.1 dholland if (bp->b_dirtyoff == 0
1758 1.1 dholland && bp->b_dirtyend == bp->b_bcount)
1759 1.1 dholland bp->b_flags |= B_CLUSTEROK;
1760 1.1 dholland } else {
1761 1.1 dholland bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
1762 1.1 dholland }
1763 1.1 dholland
1764 1.1 dholland /*
1765 1.1 dholland * For an interrupted write, the buffer is still valid
1766 1.1 dholland * and the write hasn't been pushed to the server yet,
1767 1.1 dholland * so we can't set BIO_ERROR and report the interruption
1768 1.1 dholland * by setting B_EINTR. For the B_ASYNC case, B_EINTR
1769 1.1 dholland * is not relevant, so the rpc attempt is essentially
1770 1.1 dholland * a noop. For the case of a V3 write rpc not being
1771 1.1 dholland * committed to stable storage, the block is still
1772 1.1 dholland * dirty and requires either a commit rpc or another
1773 1.1 dholland * write rpc with iomode == NFSV3WRITE_FILESYNC before
1774 1.1 dholland * the block is reused. This is indicated by setting
1775 1.1 dholland * the B_DELWRI and B_NEEDCOMMIT flags.
1776 1.1 dholland *
1777 1.1 dholland * EIO is returned by ncl_writerpc() to indicate a recoverable
1778 1.1 dholland * write error and is handled as above, except that
1779 1.1 dholland * B_EINTR isn't set. One cause of this is a stale stateid
1780 1.1 dholland * error for the RPC that indicates recovery is required,
1781 1.1 dholland * when called with called_from_strategy != 0.
1782 1.1 dholland *
1783 1.1 dholland * If the buffer is marked B_PAGING, it does not reside on
1784 1.1 dholland * the vp's paging queues so we cannot call bdirty(). The
1785 1.1 dholland * bp in this case is not an NFS cache block so we should
1786 1.1 dholland * be safe. XXX
1787 1.1 dholland *
1788 1.1 dholland * The logic below breaks up errors into recoverable and
1789 1.1 dholland * unrecoverable. For the former, we clear B_INVAL|B_NOCACHE
1790 1.1 dholland * and keep the buffer around for potential write retries.
1791 1.1 dholland * For the latter (eg ESTALE), we toss the buffer away (B_INVAL)
1792 1.1 dholland * and save the error in the nfsnode. This is less than ideal
1793 1.1 dholland * but necessary. Keeping such buffers around could potentially
1794 1.1 dholland * cause buffer exhaustion eventually (they can never be written
1795 1.1 dholland * out, so will get constantly be re-dirtied). It also causes
1796 1.1 dholland * all sorts of vfs panics. For non-recoverable write errors,
1797 1.1 dholland * also invalidate the attrcache, so we'll be forced to go over
1798 1.1 dholland * the wire for this object, returning an error to user on next
1799 1.1 dholland * call (most of the time).
1800 1.1 dholland */
1801 1.1 dholland if (error == EINTR || error == EIO || error == ETIMEDOUT
1802 1.1 dholland || (!error && (bp->b_flags & B_NEEDCOMMIT))) {
1803 1.1 dholland int s;
1804 1.1 dholland
1805 1.1 dholland s = splbio();
1806 1.1 dholland bp->b_flags &= ~(B_INVAL|B_NOCACHE);
1807 1.1 dholland if ((bp->b_flags & B_PAGING) == 0) {
1808 1.1 dholland bdirty(bp);
1809 1.1 dholland bp->b_flags &= ~B_DONE;
1810 1.1 dholland }
1811 1.1 dholland if ((error == EINTR || error == ETIMEDOUT) &&
1812 1.1 dholland (bp->b_flags & B_ASYNC) == 0)
1813 1.1 dholland bp->b_flags |= B_EINTR;
1814 1.1 dholland splx(s);
1815 1.1 dholland } else {
1816 1.1 dholland if (error) {
1817 1.1 dholland bp->b_ioflags |= BIO_ERROR;
1818 1.1 dholland bp->b_flags |= B_INVAL;
1819 1.1 dholland bp->b_error = np->n_error = error;
1820 1.1 dholland mtx_lock(&np->n_mtx);
1821 1.1 dholland np->n_flag |= NWRITEERR;
1822 1.1 dholland np->n_attrstamp = 0;
1823 1.1 dholland KDTRACE_NFS_ATTRCACHE_FLUSH_DONE(vp);
1824 1.1 dholland mtx_unlock(&np->n_mtx);
1825 1.1 dholland }
1826 1.1 dholland bp->b_dirtyoff = bp->b_dirtyend = 0;
1827 1.1 dholland }
1828 1.1 dholland } else {
1829 1.1 dholland bp->b_resid = 0;
1830 1.1 dholland bufdone(bp);
1831 1.1 dholland return (0);
1832 1.1 dholland }
1833 1.1 dholland }
1834 1.1 dholland bp->b_resid = uiop->uio_resid;
1835 1.1 dholland if (must_commit)
1836 1.1 dholland ncl_clearcommit(vp->v_mount);
1837 1.1 dholland bufdone(bp);
1838 1.1 dholland return (error);
1839 1.1 dholland }
1840 1.1 dholland
1841 1.1 dholland /*
1842 1.1 dholland * Used to aid in handling ftruncate() operations on the NFS client side.
1843 1.1 dholland * Truncation creates a number of special problems for NFS. We have to
1844 1.1 dholland * throw away VM pages and buffer cache buffers that are beyond EOF, and
1845 1.1 dholland * we have to properly handle VM pages or (potentially dirty) buffers
1846 1.1 dholland * that straddle the truncation point.
1847 1.1 dholland */
1848 1.1 dholland
1849 1.1 dholland int
1850 1.1 dholland ncl_meta_setsize(struct vnode *vp, struct ucred *cred, struct thread *td, u_quad_t nsize)
1851 1.1 dholland {
1852 1.1 dholland struct nfsnode *np = VTONFS(vp);
1853 1.1 dholland u_quad_t tsize;
1854 1.1 dholland int biosize = vp->v_bufobj.bo_bsize;
1855 1.1 dholland int error = 0;
1856 1.1 dholland
1857 1.1 dholland mtx_lock(&np->n_mtx);
1858 1.1 dholland tsize = np->n_size;
1859 1.1 dholland np->n_size = nsize;
1860 1.1 dholland mtx_unlock(&np->n_mtx);
1861 1.1 dholland
1862 1.1 dholland if (nsize < tsize) {
1863 1.1 dholland struct buf *bp;
1864 1.1 dholland daddr_t lbn;
1865 1.1 dholland int bufsize;
1866 1.1 dholland
1867 1.1 dholland /*
1868 1.1 dholland * vtruncbuf() doesn't get the buffer overlapping the
1869 1.1 dholland * truncation point. We may have a B_DELWRI and/or B_CACHE
1870 1.1 dholland * buffer that now needs to be truncated.
1871 1.1 dholland */
1872 1.1 dholland error = vtruncbuf(vp, cred, nsize, biosize);
1873 1.1 dholland lbn = nsize / biosize;
1874 1.1 dholland bufsize = nsize - (lbn * biosize);
1875 1.1 dholland bp = nfs_getcacheblk(vp, lbn, bufsize, td);
1876 1.1 dholland if (!bp)
1877 1.1 dholland return EINTR;
1878 1.1 dholland if (bp->b_dirtyoff > bp->b_bcount)
1879 1.1 dholland bp->b_dirtyoff = bp->b_bcount;
1880 1.1 dholland if (bp->b_dirtyend > bp->b_bcount)
1881 1.1 dholland bp->b_dirtyend = bp->b_bcount;
1882 1.1 dholland bp->b_flags |= B_RELBUF; /* don't leave garbage around */
1883 1.1 dholland brelse(bp);
1884 1.1 dholland } else {
1885 1.1 dholland vnode_pager_setsize(vp, nsize);
1886 1.1 dholland }
1887 1.1 dholland return(error);
1888 1.1 dholland }
1889 1.1 dholland
1890