genfs_io.c revision 1.53.2.16 1 1.53.2.15 yamt /* $NetBSD: genfs_io.c,v 1.53.2.16 2012/08/01 22:34:15 yamt Exp $ */
2 1.1 pooka
3 1.1 pooka /*
4 1.1 pooka * Copyright (c) 1982, 1986, 1989, 1993
5 1.1 pooka * The Regents of the University of California. All rights reserved.
6 1.1 pooka *
7 1.1 pooka * Redistribution and use in source and binary forms, with or without
8 1.1 pooka * modification, are permitted provided that the following conditions
9 1.1 pooka * are met:
10 1.1 pooka * 1. Redistributions of source code must retain the above copyright
11 1.1 pooka * notice, this list of conditions and the following disclaimer.
12 1.1 pooka * 2. Redistributions in binary form must reproduce the above copyright
13 1.1 pooka * notice, this list of conditions and the following disclaimer in the
14 1.1 pooka * documentation and/or other materials provided with the distribution.
15 1.1 pooka * 3. Neither the name of the University nor the names of its contributors
16 1.1 pooka * may be used to endorse or promote products derived from this software
17 1.1 pooka * without specific prior written permission.
18 1.1 pooka *
19 1.1 pooka * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 1.1 pooka * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 1.1 pooka * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 1.1 pooka * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 1.1 pooka * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 1.1 pooka * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 1.1 pooka * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 1.1 pooka * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 1.1 pooka * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 1.1 pooka * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 1.1 pooka * SUCH DAMAGE.
30 1.1 pooka *
31 1.1 pooka */
32 1.1 pooka
33 1.1 pooka #include <sys/cdefs.h>
34 1.53.2.15 yamt __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.16 2012/08/01 22:34:15 yamt Exp $");
35 1.1 pooka
36 1.1 pooka #include <sys/param.h>
37 1.1 pooka #include <sys/systm.h>
38 1.1 pooka #include <sys/proc.h>
39 1.1 pooka #include <sys/kernel.h>
40 1.1 pooka #include <sys/mount.h>
41 1.1 pooka #include <sys/vnode.h>
42 1.1 pooka #include <sys/kmem.h>
43 1.1 pooka #include <sys/kauth.h>
44 1.1 pooka #include <sys/fstrans.h>
45 1.15 pooka #include <sys/buf.h>
46 1.53.2.1 yamt #include <sys/radixtree.h>
47 1.1 pooka
48 1.1 pooka #include <miscfs/genfs/genfs.h>
49 1.1 pooka #include <miscfs/genfs/genfs_node.h>
50 1.1 pooka #include <miscfs/specfs/specdev.h>
51 1.47 rmind #include <miscfs/syncfs/syncfs.h>
52 1.1 pooka
53 1.1 pooka #include <uvm/uvm.h>
54 1.1 pooka #include <uvm/uvm_pager.h>
55 1.53.2.1 yamt #include <uvm/uvm_page_array.h>
56 1.1 pooka
57 1.1 pooka static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
58 1.1 pooka off_t, enum uio_rw);
59 1.1 pooka static void genfs_dio_iodone(struct buf *);
60 1.1 pooka
61 1.1 pooka static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
62 1.1 pooka void (*)(struct buf *));
63 1.53.2.14 yamt static void genfs_rel_pages(struct vm_page **, unsigned int);
64 1.38 chs static void genfs_markdirty(struct vnode *);
65 1.1 pooka
66 1.1 pooka int genfs_maxdio = MAXPHYS;
67 1.1 pooka
68 1.38 chs static void
69 1.53.2.14 yamt genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
70 1.1 pooka {
71 1.53.2.14 yamt unsigned int i;
72 1.1 pooka
73 1.1 pooka for (i = 0; i < npages; i++) {
74 1.1 pooka struct vm_page *pg = pgs[i];
75 1.1 pooka
76 1.1 pooka if (pg == NULL || pg == PGO_DONTCARE)
77 1.1 pooka continue;
78 1.53.2.14 yamt KASSERT(uvm_page_locked_p(pg));
79 1.1 pooka if (pg->flags & PG_FAKE) {
80 1.1 pooka pg->flags |= PG_RELEASED;
81 1.1 pooka }
82 1.1 pooka }
83 1.2 ad mutex_enter(&uvm_pageqlock);
84 1.1 pooka uvm_page_unbusy(pgs, npages);
85 1.2 ad mutex_exit(&uvm_pageqlock);
86 1.1 pooka }
87 1.1 pooka
88 1.38 chs static void
89 1.38 chs genfs_markdirty(struct vnode *vp)
90 1.38 chs {
91 1.38 chs
92 1.49 rmind KASSERT(mutex_owned(vp->v_interlock));
93 1.38 chs if ((vp->v_iflag & VI_ONWORKLST) == 0) {
94 1.38 chs vn_syncer_add_to_worklist(vp, filedelay);
95 1.38 chs }
96 1.38 chs if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
97 1.38 chs vp->v_iflag |= VI_WRMAPDIRTY;
98 1.38 chs }
99 1.38 chs }
100 1.38 chs
101 1.1 pooka /*
102 1.1 pooka * generic VM getpages routine.
103 1.1 pooka * Return PG_BUSY pages for the given range,
104 1.1 pooka * reading from backing store if necessary.
105 1.1 pooka */
106 1.1 pooka
107 1.1 pooka int
108 1.1 pooka genfs_getpages(void *v)
109 1.1 pooka {
110 1.1 pooka struct vop_getpages_args /* {
111 1.1 pooka struct vnode *a_vp;
112 1.1 pooka voff_t a_offset;
113 1.1 pooka struct vm_page **a_m;
114 1.1 pooka int *a_count;
115 1.1 pooka int a_centeridx;
116 1.1 pooka vm_prot_t a_access_type;
117 1.1 pooka int a_advice;
118 1.1 pooka int a_flags;
119 1.22 uebayasi } */ * const ap = v;
120 1.1 pooka
121 1.24 uebayasi off_t diskeof, memeof;
122 1.31 uebayasi int i, error, npages;
123 1.10 yamt const int flags = ap->a_flags;
124 1.22 uebayasi struct vnode * const vp = ap->a_vp;
125 1.22 uebayasi struct uvm_object * const uobj = &vp->v_uobj;
126 1.31 uebayasi kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */
127 1.10 yamt const bool async = (flags & PGO_SYNCIO) == 0;
128 1.35 uebayasi const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
129 1.10 yamt const bool overwrite = (flags & PGO_OVERWRITE) != 0;
130 1.35 uebayasi const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
131 1.40 chs const bool glocked = (flags & PGO_GLOCKHELD) != 0;
132 1.42 hannken const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
133 1.42 hannken bool has_trans_wapbl = false;
134 1.1 pooka UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
135 1.1 pooka
136 1.1 pooka UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
137 1.1 pooka vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
138 1.1 pooka
139 1.1 pooka KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
140 1.1 pooka vp->v_type == VLNK || vp->v_type == VBLK);
141 1.1 pooka
142 1.1 pooka startover:
143 1.1 pooka error = 0;
144 1.27 uebayasi const voff_t origvsize = vp->v_size;
145 1.27 uebayasi const off_t origoffset = ap->a_offset;
146 1.29 uebayasi const int orignpages = *ap->a_count;
147 1.33 uebayasi
148 1.1 pooka GOP_SIZE(vp, origvsize, &diskeof, 0);
149 1.1 pooka if (flags & PGO_PASTEOF) {
150 1.24 uebayasi off_t newsize;
151 1.1 pooka #if defined(DIAGNOSTIC)
152 1.1 pooka off_t writeeof;
153 1.1 pooka #endif /* defined(DIAGNOSTIC) */
154 1.1 pooka
155 1.1 pooka newsize = MAX(origvsize,
156 1.1 pooka origoffset + (orignpages << PAGE_SHIFT));
157 1.1 pooka GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
158 1.1 pooka #if defined(DIAGNOSTIC)
159 1.1 pooka GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
160 1.1 pooka if (newsize > round_page(writeeof)) {
161 1.39 pooka panic("%s: past eof: %" PRId64 " vs. %" PRId64,
162 1.39 pooka __func__, newsize, round_page(writeeof));
163 1.1 pooka }
164 1.1 pooka #endif /* defined(DIAGNOSTIC) */
165 1.1 pooka } else {
166 1.1 pooka GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
167 1.1 pooka }
168 1.1 pooka KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
169 1.1 pooka KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
170 1.1 pooka KASSERT(orignpages > 0);
171 1.1 pooka
172 1.1 pooka /*
173 1.1 pooka * Bounds-check the request.
174 1.1 pooka */
175 1.1 pooka
176 1.1 pooka if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
177 1.1 pooka if ((flags & PGO_LOCKED) == 0) {
178 1.49 rmind mutex_exit(uobj->vmobjlock);
179 1.1 pooka }
180 1.1 pooka UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
181 1.1 pooka origoffset, *ap->a_count, memeof,0);
182 1.1 pooka error = EINVAL;
183 1.1 pooka goto out_err;
184 1.1 pooka }
185 1.1 pooka
186 1.1 pooka /* uobj is locked */
187 1.1 pooka
188 1.1 pooka if ((flags & PGO_NOTIMESTAMP) == 0 &&
189 1.1 pooka (vp->v_type != VBLK ||
190 1.1 pooka (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
191 1.1 pooka int updflags = 0;
192 1.1 pooka
193 1.1 pooka if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
194 1.1 pooka updflags = GOP_UPDATE_ACCESSED;
195 1.1 pooka }
196 1.35 uebayasi if (memwrite) {
197 1.1 pooka updflags |= GOP_UPDATE_MODIFIED;
198 1.1 pooka }
199 1.1 pooka if (updflags != 0) {
200 1.1 pooka GOP_MARKUPDATE(vp, updflags);
201 1.1 pooka }
202 1.1 pooka }
203 1.1 pooka
204 1.1 pooka /*
205 1.1 pooka * For PGO_LOCKED requests, just return whatever's in memory.
206 1.1 pooka */
207 1.1 pooka
208 1.1 pooka if (flags & PGO_LOCKED) {
209 1.1 pooka int nfound;
210 1.31 uebayasi struct vm_page *pg;
211 1.1 pooka
212 1.40 chs KASSERT(!glocked);
213 1.1 pooka npages = *ap->a_count;
214 1.1 pooka #if defined(DEBUG)
215 1.1 pooka for (i = 0; i < npages; i++) {
216 1.1 pooka pg = ap->a_m[i];
217 1.1 pooka KASSERT(pg == NULL || pg == PGO_DONTCARE);
218 1.1 pooka }
219 1.1 pooka #endif /* defined(DEBUG) */
220 1.1 pooka nfound = uvn_findpages(uobj, origoffset, &npages,
221 1.53.2.6 yamt ap->a_m, NULL,
222 1.53.2.6 yamt UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
223 1.1 pooka KASSERT(npages == *ap->a_count);
224 1.1 pooka if (nfound == 0) {
225 1.1 pooka error = EBUSY;
226 1.1 pooka goto out_err;
227 1.1 pooka }
228 1.53.2.9 yamt /*
229 1.53.2.9 yamt * lock and unlock g_glock to ensure that no one is truncating
230 1.53.2.9 yamt * the file behind us.
231 1.53.2.9 yamt */
232 1.23 uebayasi if (!genfs_node_rdtrylock(vp)) {
233 1.1 pooka genfs_rel_pages(ap->a_m, npages);
234 1.1 pooka
235 1.1 pooka /*
236 1.1 pooka * restore the array.
237 1.1 pooka */
238 1.1 pooka
239 1.1 pooka for (i = 0; i < npages; i++) {
240 1.1 pooka pg = ap->a_m[i];
241 1.1 pooka
242 1.41 uebayasi if (pg != NULL && pg != PGO_DONTCARE) {
243 1.1 pooka ap->a_m[i] = NULL;
244 1.1 pooka }
245 1.46 uebayasi KASSERT(ap->a_m[i] == NULL ||
246 1.46 uebayasi ap->a_m[i] == PGO_DONTCARE);
247 1.1 pooka }
248 1.1 pooka } else {
249 1.23 uebayasi genfs_node_unlock(vp);
250 1.1 pooka }
251 1.1 pooka error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
252 1.38 chs if (error == 0 && memwrite) {
253 1.53.2.1 yamt for (i = 0; i < npages; i++) {
254 1.53.2.1 yamt pg = ap->a_m[i];
255 1.53.2.1 yamt if (pg == NULL || pg == PGO_DONTCARE) {
256 1.53.2.1 yamt continue;
257 1.53.2.1 yamt }
258 1.53.2.1 yamt if (uvm_pagegetdirty(pg) ==
259 1.53.2.1 yamt UVM_PAGE_STATUS_CLEAN) {
260 1.53.2.1 yamt uvm_pagemarkdirty(pg,
261 1.53.2.1 yamt UVM_PAGE_STATUS_UNKNOWN);
262 1.53.2.1 yamt }
263 1.53.2.1 yamt }
264 1.38 chs genfs_markdirty(vp);
265 1.38 chs }
266 1.1 pooka goto out_err;
267 1.1 pooka }
268 1.49 rmind mutex_exit(uobj->vmobjlock);
269 1.1 pooka
270 1.1 pooka /*
271 1.1 pooka * find the requested pages and make some simple checks.
272 1.1 pooka * leave space in the page array for a whole block.
273 1.1 pooka */
274 1.1 pooka
275 1.27 uebayasi const int fs_bshift = (vp->v_type != VBLK) ?
276 1.27 uebayasi vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
277 1.27 uebayasi const int dev_bshift = (vp->v_type != VBLK) ?
278 1.27 uebayasi vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
279 1.27 uebayasi const int fs_bsize = 1 << fs_bshift;
280 1.30 uebayasi #define blk_mask (fs_bsize - 1)
281 1.30 uebayasi #define trunc_blk(x) ((x) & ~blk_mask)
282 1.30 uebayasi #define round_blk(x) (((x) + blk_mask) & ~blk_mask)
283 1.1 pooka
284 1.29 uebayasi const int orignmempages = MIN(orignpages,
285 1.1 pooka round_page(memeof - origoffset) >> PAGE_SHIFT);
286 1.29 uebayasi npages = orignmempages;
287 1.30 uebayasi const off_t startoffset = trunc_blk(origoffset);
288 1.30 uebayasi const off_t endoffset = MIN(
289 1.30 uebayasi round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
290 1.30 uebayasi round_page(memeof));
291 1.31 uebayasi const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
292 1.1 pooka
293 1.33 uebayasi const int pgs_size = sizeof(struct vm_page *) *
294 1.1 pooka ((endoffset - startoffset) >> PAGE_SHIFT);
295 1.33 uebayasi struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
296 1.31 uebayasi
297 1.1 pooka if (pgs_size > sizeof(pgs_onstack)) {
298 1.1 pooka pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
299 1.1 pooka if (pgs == NULL) {
300 1.1 pooka pgs = pgs_onstack;
301 1.1 pooka error = ENOMEM;
302 1.32 uebayasi goto out_err;
303 1.1 pooka }
304 1.1 pooka } else {
305 1.14 christos pgs = pgs_onstack;
306 1.14 christos (void)memset(pgs, 0, pgs_size);
307 1.1 pooka }
308 1.14 christos
309 1.1 pooka UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
310 1.1 pooka ridx, npages, startoffset, endoffset);
311 1.1 pooka
312 1.42 hannken if (!has_trans_wapbl) {
313 1.1 pooka fstrans_start(vp->v_mount, FSTRANS_SHARED);
314 1.42 hannken /*
315 1.42 hannken * XXX: This assumes that we come here only via
316 1.42 hannken * the mmio path
317 1.42 hannken */
318 1.42 hannken if (need_wapbl) {
319 1.42 hannken error = WAPBL_BEGIN(vp->v_mount);
320 1.42 hannken if (error) {
321 1.42 hannken fstrans_done(vp->v_mount);
322 1.42 hannken goto out_err_free;
323 1.42 hannken }
324 1.42 hannken }
325 1.42 hannken has_trans_wapbl = true;
326 1.1 pooka }
327 1.1 pooka
328 1.1 pooka /*
329 1.1 pooka * hold g_glock to prevent a race with truncate.
330 1.1 pooka *
331 1.1 pooka * check if our idea of v_size is still valid.
332 1.1 pooka */
333 1.1 pooka
334 1.40 chs KASSERT(!glocked || genfs_node_wrlocked(vp));
335 1.40 chs if (!glocked) {
336 1.40 chs if (blockalloc) {
337 1.40 chs genfs_node_wrlock(vp);
338 1.40 chs } else {
339 1.40 chs genfs_node_rdlock(vp);
340 1.40 chs }
341 1.1 pooka }
342 1.49 rmind mutex_enter(uobj->vmobjlock);
343 1.1 pooka if (vp->v_size < origvsize) {
344 1.40 chs if (!glocked) {
345 1.40 chs genfs_node_unlock(vp);
346 1.40 chs }
347 1.1 pooka if (pgs != pgs_onstack)
348 1.1 pooka kmem_free(pgs, pgs_size);
349 1.1 pooka goto startover;
350 1.1 pooka }
351 1.1 pooka
352 1.53.2.6 yamt if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
353 1.29 uebayasi async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
354 1.40 chs if (!glocked) {
355 1.40 chs genfs_node_unlock(vp);
356 1.40 chs }
357 1.1 pooka KASSERT(async != 0);
358 1.29 uebayasi genfs_rel_pages(&pgs[ridx], orignmempages);
359 1.49 rmind mutex_exit(uobj->vmobjlock);
360 1.1 pooka error = EBUSY;
361 1.33 uebayasi goto out_err_free;
362 1.1 pooka }
363 1.1 pooka
364 1.1 pooka /*
365 1.53.2.1 yamt * if PGO_OVERWRITE is set, don't bother reading the pages.
366 1.1 pooka */
367 1.1 pooka
368 1.53.2.1 yamt if (overwrite) {
369 1.40 chs if (!glocked) {
370 1.40 chs genfs_node_unlock(vp);
371 1.40 chs }
372 1.53.2.1 yamt UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
373 1.53.2.1 yamt
374 1.53.2.1 yamt for (i = 0; i < npages; i++) {
375 1.53.2.1 yamt struct vm_page *pg = pgs[ridx + i];
376 1.53.2.1 yamt
377 1.53.2.1 yamt /*
378 1.53.2.7 yamt * it's caller's responsibility to allocate blocks
379 1.53.2.1 yamt * beforehand for the overwrite case.
380 1.53.2.1 yamt */
381 1.53.2.13 yamt pg->flags &= ~PG_RDONLY;
382 1.53.2.1 yamt /*
383 1.53.2.8 yamt * mark the page DIRTY.
384 1.53.2.1 yamt * otherwise another thread can do putpages and pull
385 1.53.2.1 yamt * our vnode from syncer's queue before our caller does
386 1.53.2.8 yamt * ubc_release. note that putpages won't see CLEAN
387 1.53.2.8 yamt * pages even if they are BUSY.
388 1.53.2.1 yamt */
389 1.53.2.1 yamt uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
390 1.53.2.1 yamt }
391 1.1 pooka npages += ridx;
392 1.1 pooka goto out;
393 1.1 pooka }
394 1.1 pooka
395 1.1 pooka /*
396 1.53.2.1 yamt * if the pages are already resident, just return them.
397 1.1 pooka */
398 1.1 pooka
399 1.53.2.1 yamt for (i = 0; i < npages; i++) {
400 1.53.2.1 yamt struct vm_page *pg = pgs[ridx + i];
401 1.53.2.1 yamt
402 1.53.2.1 yamt if ((pg->flags & PG_FAKE) ||
403 1.53.2.13 yamt (memwrite && (pg->flags & PG_RDONLY) != 0)) {
404 1.53.2.1 yamt break;
405 1.53.2.1 yamt }
406 1.53.2.1 yamt }
407 1.53.2.1 yamt if (i == npages) {
408 1.40 chs if (!glocked) {
409 1.40 chs genfs_node_unlock(vp);
410 1.40 chs }
411 1.53.2.1 yamt UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
412 1.1 pooka npages += ridx;
413 1.1 pooka goto out;
414 1.1 pooka }
415 1.1 pooka
416 1.1 pooka /*
417 1.1 pooka * the page wasn't resident and we're not overwriting,
418 1.1 pooka * so we're going to have to do some i/o.
419 1.1 pooka * find any additional pages needed to cover the expanded range.
420 1.1 pooka */
421 1.1 pooka
422 1.1 pooka npages = (endoffset - startoffset) >> PAGE_SHIFT;
423 1.29 uebayasi if (startoffset != origoffset || npages != orignmempages) {
424 1.31 uebayasi int npgs;
425 1.1 pooka
426 1.1 pooka /*
427 1.1 pooka * we need to avoid deadlocks caused by locking
428 1.1 pooka * additional pages at lower offsets than pages we
429 1.1 pooka * already have locked. unlock them all and start over.
430 1.1 pooka */
431 1.1 pooka
432 1.29 uebayasi genfs_rel_pages(&pgs[ridx], orignmempages);
433 1.1 pooka memset(pgs, 0, pgs_size);
434 1.1 pooka
435 1.1 pooka UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
436 1.1 pooka startoffset, endoffset, 0,0);
437 1.1 pooka npgs = npages;
438 1.53.2.6 yamt if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
439 1.1 pooka async ? UFP_NOWAIT : UFP_ALL) != npages) {
440 1.40 chs if (!glocked) {
441 1.40 chs genfs_node_unlock(vp);
442 1.40 chs }
443 1.1 pooka KASSERT(async != 0);
444 1.1 pooka genfs_rel_pages(pgs, npages);
445 1.49 rmind mutex_exit(uobj->vmobjlock);
446 1.1 pooka error = EBUSY;
447 1.33 uebayasi goto out_err_free;
448 1.1 pooka }
449 1.1 pooka }
450 1.34 uebayasi
451 1.49 rmind mutex_exit(uobj->vmobjlock);
452 1.1 pooka
453 1.34 uebayasi {
454 1.34 uebayasi size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
455 1.34 uebayasi vaddr_t kva;
456 1.34 uebayasi struct buf *bp, *mbp;
457 1.34 uebayasi bool sawhole = false;
458 1.34 uebayasi
459 1.1 pooka /*
460 1.1 pooka * read the desired page(s).
461 1.1 pooka */
462 1.1 pooka
463 1.1 pooka totalbytes = npages << PAGE_SHIFT;
464 1.1 pooka bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
465 1.1 pooka tailbytes = totalbytes - bytes;
466 1.1 pooka skipbytes = 0;
467 1.1 pooka
468 1.1 pooka kva = uvm_pagermapin(pgs, npages,
469 1.53.2.14 yamt UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
470 1.53.2.14 yamt if (kva == 0) {
471 1.53.2.14 yamt error = EBUSY;
472 1.53.2.14 yamt goto mapin_fail;
473 1.53.2.14 yamt }
474 1.1 pooka
475 1.2 ad mbp = getiobuf(vp, true);
476 1.1 pooka mbp->b_bufsize = totalbytes;
477 1.1 pooka mbp->b_data = (void *)kva;
478 1.1 pooka mbp->b_resid = mbp->b_bcount = bytes;
479 1.2 ad mbp->b_cflags = BC_BUSY;
480 1.2 ad if (async) {
481 1.2 ad mbp->b_flags = B_READ | B_ASYNC;
482 1.2 ad mbp->b_iodone = uvm_aio_biodone;
483 1.2 ad } else {
484 1.2 ad mbp->b_flags = B_READ;
485 1.2 ad mbp->b_iodone = NULL;
486 1.43 uebayasi }
487 1.1 pooka if (async)
488 1.1 pooka BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
489 1.1 pooka else
490 1.1 pooka BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
491 1.1 pooka
492 1.1 pooka /*
493 1.1 pooka * if EOF is in the middle of the range, zero the part past EOF.
494 1.1 pooka * skip over pages which are not PG_FAKE since in that case they have
495 1.1 pooka * valid data that we need to preserve.
496 1.1 pooka */
497 1.1 pooka
498 1.1 pooka tailstart = bytes;
499 1.1 pooka while (tailbytes > 0) {
500 1.1 pooka const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
501 1.1 pooka
502 1.1 pooka KASSERT(len <= tailbytes);
503 1.1 pooka if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
504 1.1 pooka memset((void *)(kva + tailstart), 0, len);
505 1.1 pooka UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
506 1.1 pooka kva, tailstart, len, 0);
507 1.1 pooka }
508 1.1 pooka tailstart += len;
509 1.1 pooka tailbytes -= len;
510 1.1 pooka }
511 1.1 pooka
512 1.1 pooka /*
513 1.1 pooka * now loop over the pages, reading as needed.
514 1.1 pooka */
515 1.1 pooka
516 1.1 pooka bp = NULL;
517 1.28 uebayasi off_t offset;
518 1.28 uebayasi for (offset = startoffset;
519 1.1 pooka bytes > 0;
520 1.1 pooka offset += iobytes, bytes -= iobytes) {
521 1.30 uebayasi int run;
522 1.25 uebayasi daddr_t lbn, blkno;
523 1.24 uebayasi int pidx;
524 1.26 uebayasi struct vnode *devvp;
525 1.1 pooka
526 1.1 pooka /*
527 1.1 pooka * skip pages which don't need to be read.
528 1.1 pooka */
529 1.1 pooka
530 1.1 pooka pidx = (offset - startoffset) >> PAGE_SHIFT;
531 1.1 pooka while ((pgs[pidx]->flags & PG_FAKE) == 0) {
532 1.1 pooka size_t b;
533 1.1 pooka
534 1.1 pooka KASSERT((offset & (PAGE_SIZE - 1)) == 0);
535 1.53.2.13 yamt if ((pgs[pidx]->flags & PG_RDONLY)) {
536 1.1 pooka sawhole = true;
537 1.1 pooka }
538 1.1 pooka b = MIN(PAGE_SIZE, bytes);
539 1.1 pooka offset += b;
540 1.1 pooka bytes -= b;
541 1.1 pooka skipbytes += b;
542 1.1 pooka pidx++;
543 1.1 pooka UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
544 1.1 pooka offset, 0,0,0);
545 1.1 pooka if (bytes == 0) {
546 1.1 pooka goto loopdone;
547 1.1 pooka }
548 1.1 pooka }
549 1.1 pooka
550 1.1 pooka /*
551 1.1 pooka * bmap the file to find out the blkno to read from and
552 1.1 pooka * how much we can read in one i/o. if bmap returns an error,
553 1.1 pooka * skip the rest of the top-level i/o.
554 1.1 pooka */
555 1.1 pooka
556 1.1 pooka lbn = offset >> fs_bshift;
557 1.1 pooka error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
558 1.1 pooka if (error) {
559 1.1 pooka UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
560 1.36 uebayasi lbn,error,0,0);
561 1.1 pooka skipbytes += bytes;
562 1.36 uebayasi bytes = 0;
563 1.1 pooka goto loopdone;
564 1.1 pooka }
565 1.1 pooka
566 1.1 pooka /*
567 1.1 pooka * see how many pages can be read with this i/o.
568 1.1 pooka * reduce the i/o size if necessary to avoid
569 1.1 pooka * overwriting pages with valid data.
570 1.1 pooka */
571 1.1 pooka
572 1.1 pooka iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
573 1.1 pooka bytes);
574 1.1 pooka if (offset + iobytes > round_page(offset)) {
575 1.24 uebayasi int pcount;
576 1.24 uebayasi
577 1.1 pooka pcount = 1;
578 1.1 pooka while (pidx + pcount < npages &&
579 1.1 pooka pgs[pidx + pcount]->flags & PG_FAKE) {
580 1.1 pooka pcount++;
581 1.1 pooka }
582 1.1 pooka iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
583 1.1 pooka (offset - trunc_page(offset)));
584 1.1 pooka }
585 1.1 pooka
586 1.1 pooka /*
587 1.1 pooka * if this block isn't allocated, zero it instead of
588 1.1 pooka * reading it. unless we are going to allocate blocks,
589 1.53.2.13 yamt * mark the pages we zeroed PG_RDONLY.
590 1.1 pooka */
591 1.1 pooka
592 1.36 uebayasi if (blkno == (daddr_t)-1) {
593 1.1 pooka int holepages = (round_page(offset + iobytes) -
594 1.1 pooka trunc_page(offset)) >> PAGE_SHIFT;
595 1.53.2.13 yamt UVMHIST_LOG(ubchist, "lbn 0x%x -> RDONLY", lbn,0,0,0);
596 1.1 pooka
597 1.1 pooka sawhole = true;
598 1.1 pooka memset((char *)kva + (offset - startoffset), 0,
599 1.1 pooka iobytes);
600 1.1 pooka skipbytes += iobytes;
601 1.1 pooka
602 1.53.2.9 yamt if (!blockalloc) {
603 1.53.2.9 yamt mutex_enter(uobj->vmobjlock);
604 1.53.2.9 yamt for (i = 0; i < holepages; i++) {
605 1.53.2.13 yamt pgs[pidx + i]->flags |= PG_RDONLY;
606 1.1 pooka }
607 1.53.2.9 yamt mutex_exit(uobj->vmobjlock);
608 1.1 pooka }
609 1.1 pooka continue;
610 1.1 pooka }
611 1.1 pooka
612 1.1 pooka /*
613 1.1 pooka * allocate a sub-buf for this piece of the i/o
614 1.1 pooka * (or just use mbp if there's only 1 piece),
615 1.1 pooka * and start it going.
616 1.1 pooka */
617 1.1 pooka
618 1.1 pooka if (offset == startoffset && iobytes == bytes) {
619 1.1 pooka bp = mbp;
620 1.1 pooka } else {
621 1.36 uebayasi UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
622 1.36 uebayasi vp, bp, vp->v_numoutput, 0);
623 1.2 ad bp = getiobuf(vp, true);
624 1.1 pooka nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
625 1.1 pooka }
626 1.1 pooka bp->b_lblkno = 0;
627 1.1 pooka
628 1.1 pooka /* adjust physical blkno for partial blocks */
629 1.1 pooka bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
630 1.1 pooka dev_bshift);
631 1.1 pooka
632 1.1 pooka UVMHIST_LOG(ubchist,
633 1.1 pooka "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
634 1.36 uebayasi bp, offset, bp->b_bcount, bp->b_blkno);
635 1.1 pooka
636 1.1 pooka VOP_STRATEGY(devvp, bp);
637 1.1 pooka }
638 1.1 pooka
639 1.1 pooka loopdone:
640 1.1 pooka nestiobuf_done(mbp, skipbytes, error);
641 1.1 pooka if (async) {
642 1.1 pooka UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
643 1.40 chs if (!glocked) {
644 1.40 chs genfs_node_unlock(vp);
645 1.40 chs }
646 1.1 pooka error = 0;
647 1.33 uebayasi goto out_err_free;
648 1.1 pooka }
649 1.1 pooka if (bp != NULL) {
650 1.1 pooka error = biowait(mbp);
651 1.1 pooka }
652 1.1 pooka
653 1.19 rmind /* Remove the mapping (make KVA available as soon as possible) */
654 1.19 rmind uvm_pagermapout(kva, npages);
655 1.19 rmind
656 1.1 pooka /*
657 1.1 pooka * if this we encountered a hole then we have to do a little more work.
658 1.53.2.13 yamt * if blockalloc is false, we marked the page PG_RDONLY so that future
659 1.1 pooka * write accesses to the page will fault again.
660 1.53.2.1 yamt * if blockalloc is true, we must make sure that the backing store for
661 1.1 pooka * the page is completely allocated while the pages are locked.
662 1.1 pooka */
663 1.1 pooka
664 1.1 pooka if (!error && sawhole && blockalloc) {
665 1.42 hannken error = GOP_ALLOC(vp, startoffset,
666 1.42 hannken npages << PAGE_SHIFT, 0, cred);
667 1.1 pooka UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
668 1.1 pooka startoffset, npages << PAGE_SHIFT, error,0);
669 1.1 pooka if (!error) {
670 1.49 rmind mutex_enter(uobj->vmobjlock);
671 1.1 pooka for (i = 0; i < npages; i++) {
672 1.31 uebayasi struct vm_page *pg = pgs[i];
673 1.31 uebayasi
674 1.31 uebayasi if (pg == NULL) {
675 1.1 pooka continue;
676 1.1 pooka }
677 1.53.2.13 yamt pg->flags &= ~PG_RDONLY;
678 1.53.2.1 yamt uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
679 1.1 pooka UVMHIST_LOG(ubchist, "mark dirty pg %p",
680 1.31 uebayasi pg,0,0,0);
681 1.1 pooka }
682 1.49 rmind mutex_exit(uobj->vmobjlock);
683 1.1 pooka }
684 1.1 pooka }
685 1.18 rmind
686 1.18 rmind putiobuf(mbp);
687 1.34 uebayasi }
688 1.18 rmind
689 1.53.2.14 yamt mapin_fail:
690 1.53.2.14 yamt if (!glocked) {
691 1.53.2.14 yamt genfs_node_unlock(vp);
692 1.53.2.14 yamt }
693 1.49 rmind mutex_enter(uobj->vmobjlock);
694 1.1 pooka
695 1.1 pooka /*
696 1.1 pooka * we're almost done! release the pages...
697 1.1 pooka * for errors, we free the pages.
698 1.1 pooka * otherwise we activate them and mark them as valid and clean.
699 1.1 pooka * also, unbusy pages that were not actually requested.
700 1.1 pooka */
701 1.1 pooka
702 1.1 pooka if (error) {
703 1.53.2.14 yamt genfs_rel_pages(pgs, npages);
704 1.49 rmind mutex_exit(uobj->vmobjlock);
705 1.1 pooka UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
706 1.33 uebayasi goto out_err_free;
707 1.1 pooka }
708 1.1 pooka
709 1.1 pooka out:
710 1.1 pooka UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
711 1.1 pooka error = 0;
712 1.2 ad mutex_enter(&uvm_pageqlock);
713 1.1 pooka for (i = 0; i < npages; i++) {
714 1.31 uebayasi struct vm_page *pg = pgs[i];
715 1.1 pooka if (pg == NULL) {
716 1.1 pooka continue;
717 1.1 pooka }
718 1.1 pooka UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
719 1.1 pooka pg, pg->flags, 0,0);
720 1.1 pooka if (pg->flags & PG_FAKE && !overwrite) {
721 1.53.2.1 yamt /*
722 1.53.2.1 yamt * we've read page's contents from the backing storage.
723 1.53.2.1 yamt *
724 1.53.2.1 yamt * for a read fault, we keep them CLEAN.
725 1.53.2.1 yamt */
726 1.53.2.1 yamt KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
727 1.53.2.1 yamt pg->flags &= ~PG_FAKE;
728 1.1 pooka }
729 1.53.2.13 yamt KASSERT(!blockalloc || (pg->flags & PG_RDONLY) == 0);
730 1.29 uebayasi if (i < ridx || i >= ridx + orignmempages || async) {
731 1.1 pooka UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
732 1.1 pooka pg, pg->offset,0,0);
733 1.53.2.1 yamt KASSERT(!overwrite);
734 1.1 pooka if (pg->flags & PG_WANTED) {
735 1.1 pooka wakeup(pg);
736 1.1 pooka }
737 1.53.2.1 yamt if (pg->flags & PG_FAKE && overwrite) {
738 1.1 pooka uvm_pagezero(pg);
739 1.1 pooka }
740 1.1 pooka if (pg->flags & PG_RELEASED) {
741 1.1 pooka uvm_pagefree(pg);
742 1.1 pooka continue;
743 1.1 pooka }
744 1.1 pooka uvm_pageenqueue(pg);
745 1.1 pooka pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
746 1.1 pooka UVM_PAGE_OWN(pg, NULL);
747 1.53.2.1 yamt } else if (memwrite && !overwrite &&
748 1.53.2.1 yamt uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
749 1.53.2.1 yamt /*
750 1.53.2.1 yamt * for a write fault, start dirtiness tracking of
751 1.53.2.1 yamt * requested pages.
752 1.53.2.1 yamt */
753 1.53.2.1 yamt uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
754 1.1 pooka }
755 1.1 pooka }
756 1.2 ad mutex_exit(&uvm_pageqlock);
757 1.38 chs if (memwrite) {
758 1.38 chs genfs_markdirty(vp);
759 1.38 chs }
760 1.49 rmind mutex_exit(uobj->vmobjlock);
761 1.1 pooka if (ap->a_m != NULL) {
762 1.1 pooka memcpy(ap->a_m, &pgs[ridx],
763 1.29 uebayasi orignmempages * sizeof(struct vm_page *));
764 1.1 pooka }
765 1.1 pooka
766 1.33 uebayasi out_err_free:
767 1.14 christos if (pgs != NULL && pgs != pgs_onstack)
768 1.1 pooka kmem_free(pgs, pgs_size);
769 1.33 uebayasi out_err:
770 1.42 hannken if (has_trans_wapbl) {
771 1.42 hannken if (need_wapbl)
772 1.42 hannken WAPBL_END(vp->v_mount);
773 1.1 pooka fstrans_done(vp->v_mount);
774 1.42 hannken }
775 1.38 chs return error;
776 1.1 pooka }
777 1.1 pooka
778 1.1 pooka /*
779 1.1 pooka * generic VM putpages routine.
780 1.1 pooka * Write the given range of pages to backing store.
781 1.1 pooka *
782 1.1 pooka * => "offhi == 0" means flush all pages at or after "offlo".
783 1.1 pooka * => object should be locked by caller. we return with the
784 1.1 pooka * object unlocked.
785 1.1 pooka * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
786 1.1 pooka * thus, a caller might want to unlock higher level resources
787 1.1 pooka * (e.g. vm_map) before calling flush.
788 1.1 pooka * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
789 1.1 pooka * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
790 1.1 pooka * => NOTE: we are allowed to lock the page queues, so the caller
791 1.1 pooka * must not be holding the page queue lock.
792 1.1 pooka *
793 1.1 pooka * note on "cleaning" object and PG_BUSY pages:
794 1.1 pooka * this routine is holding the lock on the object. the only time
795 1.1 pooka * that it can run into a PG_BUSY page that it does not own is if
796 1.1 pooka * some other process has started I/O on the page (e.g. either
797 1.53.2.1 yamt * a pagein, or a pageout). if the PG_BUSY page is being paged
798 1.53.2.1 yamt * in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
799 1.53.2.1 yamt * one has had a chance to modify it yet. if the PG_BUSY page is
800 1.53.2.1 yamt * being paged out then it means that someone else has already started
801 1.53.2.1 yamt * cleaning the page for us (how nice!). in this case, if we
802 1.1 pooka * have syncio specified, then after we make our pass through the
803 1.1 pooka * object we need to wait for the other PG_BUSY pages to clear
804 1.1 pooka * off (i.e. we need to do an iosync). also note that once a
805 1.1 pooka * page is PG_BUSY it must stay in its object until it is un-busyed.
806 1.1 pooka */
807 1.1 pooka
808 1.1 pooka int
809 1.1 pooka genfs_putpages(void *v)
810 1.1 pooka {
811 1.1 pooka struct vop_putpages_args /* {
812 1.1 pooka struct vnode *a_vp;
813 1.1 pooka voff_t a_offlo;
814 1.1 pooka voff_t a_offhi;
815 1.1 pooka int a_flags;
816 1.22 uebayasi } */ * const ap = v;
817 1.1 pooka
818 1.1 pooka return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
819 1.1 pooka ap->a_flags, NULL);
820 1.1 pooka }
821 1.1 pooka
822 1.1 pooka int
823 1.4 yamt genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
824 1.4 yamt int origflags, struct vm_page **busypg)
825 1.1 pooka {
826 1.22 uebayasi struct uvm_object * const uobj = &vp->v_uobj;
827 1.49 rmind kmutex_t * const slock = uobj->vmobjlock;
828 1.53.2.8 yamt off_t nextoff;
829 1.1 pooka /* Even for strange MAXPHYS, the shift rounds down to a page */
830 1.1 pooka #define maxpages (MAXPHYS >> PAGE_SHIFT)
831 1.53.2.11 yamt unsigned int i;
832 1.53.2.6 yamt unsigned int npages, nback;
833 1.53.2.11 yamt unsigned int freeflag;
834 1.53.2.11 yamt int error;
835 1.53.2.1 yamt struct vm_page *pgs[maxpages], *pg;
836 1.53.2.3 yamt struct uvm_page_array a;
837 1.53.2.16 yamt bool wasclean, needs_clean;
838 1.1 pooka bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
839 1.22 uebayasi struct lwp * const l = curlwp ? curlwp : &lwp0;
840 1.4 yamt int flags;
841 1.53.2.8 yamt bool written; /* if we write out any pages */
842 1.12 hannken bool need_wapbl;
843 1.4 yamt bool has_trans;
844 1.53.2.3 yamt bool tryclean; /* try to pull off from the syncer's list */
845 1.4 yamt bool onworklst;
846 1.53.2.16 yamt const bool integrity_sync =
847 1.53.2.16 yamt (origflags & (PGO_LAZY|PGO_SYNCIO)) == PGO_SYNCIO;
848 1.53.2.1 yamt const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
849 1.1 pooka
850 1.1 pooka UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
851 1.1 pooka
852 1.4 yamt KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
853 1.1 pooka KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
854 1.1 pooka KASSERT(startoff < endoff || endoff == 0);
855 1.1 pooka
856 1.1 pooka UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
857 1.1 pooka vp, uobj->uo_npages, startoff, endoff - startoff);
858 1.1 pooka
859 1.6 hannken has_trans = false;
860 1.12 hannken need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
861 1.12 hannken (origflags & PGO_JOURNALLOCKED) == 0);
862 1.6 hannken
863 1.4 yamt retry:
864 1.4 yamt flags = origflags;
865 1.1 pooka KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
866 1.1 pooka (vp->v_iflag & VI_WRMAPDIRTY) == 0);
867 1.53.2.3 yamt
868 1.53.2.3 yamt /*
869 1.53.2.3 yamt * shortcut if we have no pages to process.
870 1.53.2.3 yamt */
871 1.53.2.3 yamt
872 1.53.2.2 yamt if (uobj->uo_npages == 0 || (dirtyonly &&
873 1.53.2.2 yamt radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
874 1.53.2.2 yamt UVM_PAGE_DIRTY_TAG))) {
875 1.1 pooka if (vp->v_iflag & VI_ONWORKLST) {
876 1.1 pooka vp->v_iflag &= ~VI_WRMAPDIRTY;
877 1.1 pooka if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
878 1.1 pooka vn_syncer_remove_from_worklist(vp);
879 1.1 pooka }
880 1.12 hannken if (has_trans) {
881 1.12 hannken if (need_wapbl)
882 1.12 hannken WAPBL_END(vp->v_mount);
883 1.6 hannken fstrans_done(vp->v_mount);
884 1.12 hannken }
885 1.2 ad mutex_exit(slock);
886 1.1 pooka return (0);
887 1.1 pooka }
888 1.1 pooka
889 1.1 pooka /*
890 1.1 pooka * the vnode has pages, set up to process the request.
891 1.1 pooka */
892 1.1 pooka
893 1.6 hannken if (!has_trans && (flags & PGO_CLEANIT) != 0) {
894 1.2 ad mutex_exit(slock);
895 1.1 pooka if (pagedaemon) {
896 1.1 pooka error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
897 1.1 pooka if (error)
898 1.1 pooka return error;
899 1.1 pooka } else
900 1.1 pooka fstrans_start(vp->v_mount, FSTRANS_LAZY);
901 1.12 hannken if (need_wapbl) {
902 1.12 hannken error = WAPBL_BEGIN(vp->v_mount);
903 1.12 hannken if (error) {
904 1.12 hannken fstrans_done(vp->v_mount);
905 1.12 hannken return error;
906 1.12 hannken }
907 1.12 hannken }
908 1.1 pooka has_trans = true;
909 1.2 ad mutex_enter(slock);
910 1.6 hannken goto retry;
911 1.1 pooka }
912 1.1 pooka
913 1.1 pooka error = 0;
914 1.1 pooka wasclean = (vp->v_numoutput == 0);
915 1.1 pooka
916 1.1 pooka /*
917 1.1 pooka * if this vnode is known not to have dirty pages,
918 1.1 pooka * don't bother to clean it out.
919 1.1 pooka */
920 1.1 pooka
921 1.1 pooka if ((vp->v_iflag & VI_ONWORKLST) == 0) {
922 1.48 matt #if !defined(DEBUG)
923 1.53.2.2 yamt if (dirtyonly) {
924 1.1 pooka goto skip_scan;
925 1.1 pooka }
926 1.48 matt #endif /* !defined(DEBUG) */
927 1.1 pooka flags &= ~PGO_CLEANIT;
928 1.1 pooka }
929 1.1 pooka
930 1.1 pooka /*
931 1.53.2.9 yamt * start the loop to scan pages.
932 1.1 pooka */
933 1.1 pooka
934 1.53.2.9 yamt written = false;
935 1.53.2.9 yamt nextoff = startoff;
936 1.53.2.9 yamt if (endoff == 0 || flags & PGO_ALLPAGES) {
937 1.53.2.9 yamt endoff = trunc_page(LLONG_MAX);
938 1.53.2.9 yamt }
939 1.1 pooka freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
940 1.53.2.3 yamt tryclean = true;
941 1.53.2.3 yamt uvm_page_array_init(&a);
942 1.53.2.1 yamt for (;;) {
943 1.53.2.1 yamt bool protected;
944 1.53.2.1 yamt
945 1.53.2.16 yamt /*
946 1.53.2.16 yamt * if we are asked to sync for integrity, we should wait on
947 1.53.2.16 yamt * pages being written back by another threads as well.
948 1.53.2.16 yamt */
949 1.53.2.16 yamt
950 1.53.2.8 yamt pg = uvm_page_array_fill_and_peek(&a, uobj, nextoff, 0,
951 1.53.2.16 yamt dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
952 1.53.2.16 yamt (integrity_sync ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
953 1.53.2.1 yamt if (pg == NULL) {
954 1.53.2.3 yamt break;
955 1.53.2.1 yamt }
956 1.1 pooka
957 1.53.2.1 yamt KASSERT(pg->uobject == uobj);
958 1.53.2.1 yamt KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
959 1.53.2.3 yamt (pg->flags & (PG_BUSY)) != 0);
960 1.53.2.1 yamt KASSERT(pg->offset >= startoff);
961 1.53.2.8 yamt KASSERT(pg->offset >= nextoff);
962 1.53.2.1 yamt KASSERT(!dirtyonly ||
963 1.53.2.16 yamt uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
964 1.53.2.16 yamt radix_tree_get_tag(&uobj->uo_pages,
965 1.53.2.16 yamt pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
966 1.53.2.1 yamt if (pg->offset >= endoff) {
967 1.53.2.1 yamt break;
968 1.53.2.1 yamt }
969 1.53.2.16 yamt
970 1.53.2.16 yamt /*
971 1.53.2.16 yamt * a preempt point.
972 1.53.2.16 yamt */
973 1.53.2.16 yamt
974 1.53.2.16 yamt if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
975 1.53.2.16 yamt != 0) {
976 1.53.2.16 yamt nextoff = pg->offset; /* visit this page again */
977 1.53.2.16 yamt mutex_exit(slock);
978 1.53.2.16 yamt preempt();
979 1.53.2.16 yamt /*
980 1.53.2.16 yamt * as we dropped the object lock, our cached pages can
981 1.53.2.16 yamt * be stale.
982 1.53.2.16 yamt */
983 1.53.2.16 yamt uvm_page_array_clear(&a);
984 1.53.2.16 yamt mutex_enter(slock);
985 1.1 pooka continue;
986 1.1 pooka }
987 1.1 pooka
988 1.1 pooka /*
989 1.53.2.16 yamt * if the current page is busy, wait for it to become unbusy.
990 1.1 pooka */
991 1.1 pooka
992 1.53.2.16 yamt if ((pg->flags & PG_BUSY) != 0) {
993 1.1 pooka UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
994 1.53.2.16 yamt if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0
995 1.53.2.16 yamt && (flags & PGO_BUSYFAIL) != 0) {
996 1.53.2.16 yamt UVMHIST_LOG(ubchist, "busyfail %p", pg,
997 1.53.2.16 yamt 0,0,0);
998 1.1 pooka error = EDEADLK;
999 1.1 pooka if (busypg != NULL)
1000 1.1 pooka *busypg = pg;
1001 1.1 pooka break;
1002 1.1 pooka }
1003 1.1 pooka if (pagedaemon) {
1004 1.1 pooka /*
1005 1.1 pooka * someone has taken the page while we
1006 1.1 pooka * dropped the lock for fstrans_start.
1007 1.1 pooka */
1008 1.1 pooka break;
1009 1.1 pooka }
1010 1.53.2.16 yamt /*
1011 1.53.2.16 yamt * don't bother to wait on other's activities
1012 1.53.2.16 yamt * unless we are asked to sync for integrity.
1013 1.53.2.16 yamt */
1014 1.53.2.16 yamt if (!integrity_sync) {
1015 1.53.2.16 yamt wasclean = false;
1016 1.53.2.16 yamt nextoff = pg->offset + PAGE_SIZE;
1017 1.53.2.16 yamt uvm_page_array_advance(&a);
1018 1.53.2.16 yamt continue;
1019 1.1 pooka }
1020 1.53.2.16 yamt nextoff = pg->offset; /* visit this page again */
1021 1.53.2.16 yamt pg->flags |= PG_WANTED;
1022 1.53.2.16 yamt UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1023 1.53.2.1 yamt /*
1024 1.53.2.1 yamt * as we dropped the object lock, our cached pages can
1025 1.53.2.1 yamt * be stale.
1026 1.53.2.1 yamt */
1027 1.53.2.3 yamt uvm_page_array_clear(&a);
1028 1.53.2.1 yamt mutex_enter(slock);
1029 1.1 pooka continue;
1030 1.1 pooka }
1031 1.1 pooka
1032 1.53.2.8 yamt nextoff = pg->offset + PAGE_SIZE;
1033 1.53.2.3 yamt uvm_page_array_advance(&a);
1034 1.53.2.1 yamt
1035 1.1 pooka /*
1036 1.1 pooka * if we're freeing, remove all mappings of the page now.
1037 1.53.2.11 yamt * if we're cleaning, check if the page needs to be cleaned.
1038 1.1 pooka */
1039 1.1 pooka
1040 1.53.2.1 yamt protected = false;
1041 1.1 pooka if (flags & PGO_FREE) {
1042 1.1 pooka pmap_page_protect(pg, VM_PROT_NONE);
1043 1.53.2.1 yamt protected = true;
1044 1.1 pooka } else if (flags & PGO_CLEANIT) {
1045 1.1 pooka
1046 1.1 pooka /*
1047 1.1 pooka * if we still have some hope to pull this vnode off
1048 1.1 pooka * from the syncer queue, write-protect the page.
1049 1.1 pooka */
1050 1.1 pooka
1051 1.53.2.3 yamt if (tryclean && wasclean) {
1052 1.1 pooka
1053 1.1 pooka /*
1054 1.1 pooka * uobj pages get wired only by uvm_fault
1055 1.1 pooka * where uobj is locked.
1056 1.1 pooka */
1057 1.1 pooka
1058 1.1 pooka if (pg->wire_count == 0) {
1059 1.1 pooka pmap_page_protect(pg,
1060 1.1 pooka VM_PROT_READ|VM_PROT_EXECUTE);
1061 1.53.2.1 yamt protected = true;
1062 1.1 pooka } else {
1063 1.53.2.3 yamt /*
1064 1.53.2.3 yamt * give up.
1065 1.53.2.3 yamt */
1066 1.53.2.3 yamt tryclean = false;
1067 1.1 pooka }
1068 1.1 pooka }
1069 1.1 pooka }
1070 1.1 pooka
1071 1.1 pooka if (flags & PGO_CLEANIT) {
1072 1.53.2.1 yamt needs_clean = uvm_pagecheckdirty(pg, protected);
1073 1.1 pooka } else {
1074 1.1 pooka needs_clean = false;
1075 1.1 pooka }
1076 1.1 pooka
1077 1.1 pooka /*
1078 1.1 pooka * if we're cleaning, build a cluster.
1079 1.53.2.1 yamt * the cluster will consist of pages which are currently dirty.
1080 1.1 pooka * if not cleaning, just operate on the one page.
1081 1.1 pooka */
1082 1.1 pooka
1083 1.1 pooka if (needs_clean) {
1084 1.53.2.6 yamt unsigned int nforw;
1085 1.53.2.6 yamt unsigned int fpflags;
1086 1.53.2.6 yamt
1087 1.1 pooka KDASSERT((vp->v_iflag & VI_ONWORKLST));
1088 1.1 pooka wasclean = false;
1089 1.1 pooka memset(pgs, 0, sizeof(pgs));
1090 1.1 pooka pg->flags |= PG_BUSY;
1091 1.1 pooka UVM_PAGE_OWN(pg, "genfs_putpages");
1092 1.1 pooka
1093 1.53.2.8 yamt fpflags = UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY;
1094 1.53.2.8 yamt
1095 1.1 pooka /*
1096 1.53.2.6 yamt * XXX PG_PAGER1 incompatibility check.
1097 1.53.2.9 yamt *
1098 1.53.2.9 yamt * this is a kludge for nfs. nfs has two kind of dirty
1099 1.53.2.9 yamt * pages:
1100 1.53.2.9 yamt * - not written to the server yet
1101 1.53.2.9 yamt * - written to the server but not committed yet
1102 1.53.2.9 yamt * the latter is marked as PG_NEEDCOMMIT. (== PG_PAGER1)
1103 1.53.2.9 yamt * nfs doesn't want them being clustered together.
1104 1.53.2.9 yamt *
1105 1.53.2.5 yamt * probably it's better to make PG_NEEDCOMMIT a first
1106 1.53.2.5 yamt * level citizen for uvm/genfs.
1107 1.1 pooka */
1108 1.53.2.6 yamt if ((pg->flags & PG_PAGER1) != 0) {
1109 1.53.2.6 yamt fpflags |= UFP_ONLYPAGER1;
1110 1.53.2.6 yamt } else {
1111 1.53.2.6 yamt fpflags |= UFP_NOPAGER1;
1112 1.53.2.6 yamt }
1113 1.1 pooka
1114 1.53.2.6 yamt /*
1115 1.53.2.6 yamt * first look backward.
1116 1.53.2.8 yamt *
1117 1.53.2.8 yamt * because we always scan pages in the ascending order,
1118 1.53.2.8 yamt * backward scan can be useful only for the first page
1119 1.53.2.8 yamt * in the range.
1120 1.53.2.6 yamt */
1121 1.53.2.8 yamt if (startoff == pg->offset) {
1122 1.53.2.8 yamt npages = MIN(maxpages >> 1,
1123 1.53.2.8 yamt pg->offset >> PAGE_SHIFT);
1124 1.53.2.8 yamt nback = npages;
1125 1.53.2.8 yamt uvn_findpages(uobj, pg->offset - PAGE_SIZE,
1126 1.53.2.8 yamt &nback, &pgs[0], NULL,
1127 1.53.2.8 yamt fpflags | UFP_BACKWARD);
1128 1.53.2.8 yamt if (nback) {
1129 1.53.2.8 yamt memmove(&pgs[0], &pgs[npages - nback],
1130 1.1 pooka nback * sizeof(pgs[0]));
1131 1.53.2.8 yamt if (npages - nback < nback)
1132 1.53.2.8 yamt memset(&pgs[nback], 0,
1133 1.53.2.8 yamt (npages - nback) *
1134 1.53.2.8 yamt sizeof(pgs[0]));
1135 1.53.2.8 yamt else
1136 1.53.2.8 yamt memset(&pgs[npages - nback], 0,
1137 1.53.2.8 yamt nback * sizeof(pgs[0]));
1138 1.53.2.8 yamt }
1139 1.53.2.8 yamt } else {
1140 1.53.2.8 yamt nback = 0;
1141 1.1 pooka }
1142 1.1 pooka
1143 1.1 pooka /*
1144 1.1 pooka * then plug in our page of interest.
1145 1.1 pooka */
1146 1.1 pooka
1147 1.1 pooka pgs[nback] = pg;
1148 1.1 pooka
1149 1.1 pooka /*
1150 1.1 pooka * then look forward to fill in the remaining space in
1151 1.1 pooka * the array of pages.
1152 1.53.2.6 yamt *
1153 1.53.2.6 yamt * pass our cached array of pages so that hopefully
1154 1.53.2.6 yamt * uvn_findpages can find some good pages in it.
1155 1.1 pooka */
1156 1.1 pooka
1157 1.53.2.6 yamt nforw = maxpages - nback - 1;
1158 1.53.2.6 yamt uvn_findpages(uobj, pg->offset + PAGE_SIZE,
1159 1.53.2.6 yamt &nforw, &pgs[nback + 1], &a, fpflags);
1160 1.53.2.6 yamt npages = nback + 1 + nforw;
1161 1.1 pooka } else {
1162 1.1 pooka pgs[0] = pg;
1163 1.1 pooka npages = 1;
1164 1.1 pooka nback = 0;
1165 1.1 pooka }
1166 1.1 pooka
1167 1.1 pooka /*
1168 1.1 pooka * apply FREE or DEACTIVATE options if requested.
1169 1.1 pooka */
1170 1.1 pooka
1171 1.1 pooka if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1172 1.2 ad mutex_enter(&uvm_pageqlock);
1173 1.1 pooka }
1174 1.1 pooka for (i = 0; i < npages; i++) {
1175 1.53.2.1 yamt struct vm_page *tpg = pgs[i];
1176 1.53.2.1 yamt
1177 1.1 pooka KASSERT(tpg->uobject == uobj);
1178 1.53.2.11 yamt KASSERT(i == 0 ||
1179 1.53.2.11 yamt pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
1180 1.53.2.11 yamt KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
1181 1.53.2.11 yamt UVM_PAGE_STATUS_DIRTY);
1182 1.53.2.16 yamt if (needs_clean) {
1183 1.53.2.16 yamt /*
1184 1.53.2.16 yamt * mark pages as WRITEBACK so that concurrent
1185 1.53.2.16 yamt * fsync can find and wait for our activities.
1186 1.53.2.16 yamt */
1187 1.53.2.16 yamt radix_tree_set_tag(&uobj->uo_pages,
1188 1.53.2.16 yamt pgs[i]->offset >> PAGE_SHIFT,
1189 1.53.2.16 yamt UVM_PAGE_WRITEBACK_TAG);
1190 1.53.2.16 yamt }
1191 1.1 pooka if (tpg->offset < startoff || tpg->offset >= endoff)
1192 1.1 pooka continue;
1193 1.1 pooka if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1194 1.1 pooka uvm_pagedeactivate(tpg);
1195 1.1 pooka } else if (flags & PGO_FREE) {
1196 1.1 pooka pmap_page_protect(tpg, VM_PROT_NONE);
1197 1.1 pooka if (tpg->flags & PG_BUSY) {
1198 1.1 pooka tpg->flags |= freeflag;
1199 1.1 pooka if (pagedaemon) {
1200 1.2 ad uvm_pageout_start(1);
1201 1.1 pooka uvm_pagedequeue(tpg);
1202 1.1 pooka }
1203 1.1 pooka } else {
1204 1.1 pooka
1205 1.1 pooka /*
1206 1.1 pooka * ``page is not busy''
1207 1.1 pooka * implies that npages is 1
1208 1.1 pooka * and needs_clean is false.
1209 1.1 pooka */
1210 1.1 pooka
1211 1.53.2.1 yamt KASSERT(npages == 1);
1212 1.53.2.1 yamt KASSERT(!needs_clean);
1213 1.53.2.1 yamt KASSERT(pg == tpg);
1214 1.53.2.8 yamt KASSERT(nextoff ==
1215 1.53.2.8 yamt tpg->offset + PAGE_SIZE);
1216 1.1 pooka uvm_pagefree(tpg);
1217 1.1 pooka if (pagedaemon)
1218 1.1 pooka uvmexp.pdfreed++;
1219 1.1 pooka }
1220 1.1 pooka }
1221 1.1 pooka }
1222 1.1 pooka if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1223 1.2 ad mutex_exit(&uvm_pageqlock);
1224 1.1 pooka }
1225 1.1 pooka if (needs_clean) {
1226 1.53.2.1 yamt mutex_exit(slock);
1227 1.53.2.8 yamt KASSERT(nextoff == pg->offset + PAGE_SIZE);
1228 1.53.2.11 yamt KASSERT(nback < npages);
1229 1.53.2.8 yamt nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT);
1230 1.53.2.8 yamt KASSERT(pgs[nback] == pg);
1231 1.53.2.8 yamt KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
1232 1.1 pooka
1233 1.1 pooka /*
1234 1.53.2.1 yamt * start the i/o.
1235 1.53.2.8 yamt */
1236 1.53.2.8 yamt error = GOP_WRITE(vp, pgs, npages, flags);
1237 1.53.2.8 yamt written = true;
1238 1.53.2.8 yamt /*
1239 1.53.2.1 yamt * as we dropped the object lock, our cached pages can
1240 1.53.2.1 yamt * be stale.
1241 1.1 pooka */
1242 1.53.2.3 yamt uvm_page_array_clear(&a);
1243 1.2 ad mutex_enter(slock);
1244 1.1 pooka if (error) {
1245 1.1 pooka break;
1246 1.1 pooka }
1247 1.1 pooka }
1248 1.1 pooka }
1249 1.53.2.3 yamt uvm_page_array_fini(&a);
1250 1.1 pooka
1251 1.53.2.2 yamt /*
1252 1.53.2.2 yamt * update ctime/mtime if the modification we started writing out might
1253 1.53.2.2 yamt * be from mmap'ed write.
1254 1.53.2.2 yamt *
1255 1.53.2.2 yamt * this is necessary when an application keeps a file mmaped and
1256 1.53.2.2 yamt * repeatedly modifies it via the window. note that, because we
1257 1.53.2.2 yamt * don't always write-protect pages when cleaning, such modifications
1258 1.53.2.2 yamt * might not involve any page faults.
1259 1.53.2.2 yamt */
1260 1.53.2.2 yamt
1261 1.53.2.8 yamt if (written && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
1262 1.1 pooka (vp->v_type != VBLK ||
1263 1.1 pooka (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1264 1.1 pooka GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1265 1.1 pooka }
1266 1.1 pooka
1267 1.1 pooka /*
1268 1.53.2.3 yamt * if we no longer have any possibly dirty pages, take us off the
1269 1.53.2.3 yamt * syncer list.
1270 1.1 pooka */
1271 1.1 pooka
1272 1.53.2.3 yamt if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
1273 1.53.2.3 yamt radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
1274 1.53.2.3 yamt UVM_PAGE_DIRTY_TAG)) {
1275 1.1 pooka vp->v_iflag &= ~VI_WRMAPDIRTY;
1276 1.1 pooka if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1277 1.1 pooka vn_syncer_remove_from_worklist(vp);
1278 1.1 pooka }
1279 1.1 pooka
1280 1.1 pooka #if !defined(DEBUG)
1281 1.1 pooka skip_scan:
1282 1.1 pooka #endif /* !defined(DEBUG) */
1283 1.2 ad
1284 1.53.2.3 yamt /*
1285 1.53.2.16 yamt * if we found or started any i/o and we're asked to sync for integrity,
1286 1.53.2.10 yamt * wait for all writes to finish.
1287 1.53.2.3 yamt */
1288 1.53.2.3 yamt
1289 1.53.2.16 yamt if (!wasclean && integrity_sync) {
1290 1.2 ad while (vp->v_numoutput != 0)
1291 1.2 ad cv_wait(&vp->v_cv, slock);
1292 1.1 pooka }
1293 1.4 yamt onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
1294 1.2 ad mutex_exit(slock);
1295 1.1 pooka
1296 1.4 yamt if ((flags & PGO_RECLAIM) != 0 && onworklst) {
1297 1.4 yamt /*
1298 1.4 yamt * in the case of PGO_RECLAIM, ensure to make the vnode clean.
1299 1.4 yamt * retrying is not a big deal because, in many cases,
1300 1.4 yamt * uobj->uo_npages is already 0 here.
1301 1.4 yamt */
1302 1.4 yamt mutex_enter(slock);
1303 1.4 yamt goto retry;
1304 1.4 yamt }
1305 1.4 yamt
1306 1.12 hannken if (has_trans) {
1307 1.12 hannken if (need_wapbl)
1308 1.12 hannken WAPBL_END(vp->v_mount);
1309 1.6 hannken fstrans_done(vp->v_mount);
1310 1.12 hannken }
1311 1.6 hannken
1312 1.1 pooka return (error);
1313 1.1 pooka }
1314 1.1 pooka
1315 1.1 pooka int
1316 1.1 pooka genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1317 1.1 pooka {
1318 1.1 pooka off_t off;
1319 1.1 pooka vaddr_t kva;
1320 1.1 pooka size_t len;
1321 1.1 pooka int error;
1322 1.1 pooka UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1323 1.1 pooka
1324 1.1 pooka UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1325 1.1 pooka vp, pgs, npages, flags);
1326 1.1 pooka
1327 1.1 pooka off = pgs[0]->offset;
1328 1.1 pooka kva = uvm_pagermapin(pgs, npages,
1329 1.1 pooka UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1330 1.1 pooka len = npages << PAGE_SHIFT;
1331 1.1 pooka
1332 1.1 pooka error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1333 1.1 pooka uvm_aio_biodone);
1334 1.1 pooka
1335 1.1 pooka return error;
1336 1.1 pooka }
1337 1.1 pooka
1338 1.53.2.12 yamt /*
1339 1.53.2.12 yamt * genfs_gop_write_rwmap:
1340 1.53.2.12 yamt *
1341 1.53.2.12 yamt * a variant of genfs_gop_write. it's used by UDF for its directory buffers.
1342 1.53.2.12 yamt * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies
1343 1.53.2.12 yamt * the contents before writing it out to the underlying storage.
1344 1.53.2.12 yamt */
1345 1.53.2.12 yamt
1346 1.7 reinoud int
1347 1.53.2.12 yamt genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
1348 1.53.2.12 yamt int flags)
1349 1.7 reinoud {
1350 1.7 reinoud off_t off;
1351 1.7 reinoud vaddr_t kva;
1352 1.7 reinoud size_t len;
1353 1.7 reinoud int error;
1354 1.7 reinoud UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1355 1.7 reinoud
1356 1.7 reinoud UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1357 1.7 reinoud vp, pgs, npages, flags);
1358 1.7 reinoud
1359 1.7 reinoud off = pgs[0]->offset;
1360 1.7 reinoud kva = uvm_pagermapin(pgs, npages,
1361 1.7 reinoud UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1362 1.7 reinoud len = npages << PAGE_SHIFT;
1363 1.7 reinoud
1364 1.7 reinoud error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1365 1.7 reinoud uvm_aio_biodone);
1366 1.7 reinoud
1367 1.7 reinoud return error;
1368 1.7 reinoud }
1369 1.7 reinoud
1370 1.1 pooka /*
1371 1.1 pooka * Backend routine for doing I/O to vnode pages. Pages are already locked
1372 1.1 pooka * and mapped into kernel memory. Here we just look up the underlying
1373 1.1 pooka * device block addresses and call the strategy routine.
1374 1.1 pooka */
1375 1.1 pooka
1376 1.1 pooka static int
1377 1.1 pooka genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1378 1.1 pooka enum uio_rw rw, void (*iodone)(struct buf *))
1379 1.1 pooka {
1380 1.36 uebayasi int s, error;
1381 1.1 pooka int fs_bshift, dev_bshift;
1382 1.1 pooka off_t eof, offset, startoffset;
1383 1.1 pooka size_t bytes, iobytes, skipbytes;
1384 1.1 pooka struct buf *mbp, *bp;
1385 1.35 uebayasi const bool async = (flags & PGO_SYNCIO) == 0;
1386 1.53.2.14 yamt const bool lazy = (flags & PGO_LAZY) == 0;
1387 1.35 uebayasi const bool iowrite = rw == UIO_WRITE;
1388 1.35 uebayasi const int brw = iowrite ? B_WRITE : B_READ;
1389 1.1 pooka UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1390 1.1 pooka
1391 1.1 pooka UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1392 1.1 pooka vp, kva, len, flags);
1393 1.1 pooka
1394 1.1 pooka KASSERT(vp->v_size <= vp->v_writesize);
1395 1.1 pooka GOP_SIZE(vp, vp->v_writesize, &eof, 0);
1396 1.1 pooka if (vp->v_type != VBLK) {
1397 1.1 pooka fs_bshift = vp->v_mount->mnt_fs_bshift;
1398 1.1 pooka dev_bshift = vp->v_mount->mnt_dev_bshift;
1399 1.1 pooka } else {
1400 1.1 pooka fs_bshift = DEV_BSHIFT;
1401 1.1 pooka dev_bshift = DEV_BSHIFT;
1402 1.1 pooka }
1403 1.1 pooka error = 0;
1404 1.1 pooka startoffset = off;
1405 1.1 pooka bytes = MIN(len, eof - startoffset);
1406 1.1 pooka skipbytes = 0;
1407 1.1 pooka KASSERT(bytes != 0);
1408 1.1 pooka
1409 1.35 uebayasi if (iowrite) {
1410 1.53.2.16 yamt /*
1411 1.53.2.16 yamt * why += 2?
1412 1.53.2.16 yamt * 1 for biodone, 1 for uvm_aio_aiodone.
1413 1.53.2.16 yamt */
1414 1.49 rmind mutex_enter(vp->v_interlock);
1415 1.1 pooka vp->v_numoutput += 2;
1416 1.49 rmind mutex_exit(vp->v_interlock);
1417 1.1 pooka }
1418 1.2 ad mbp = getiobuf(vp, true);
1419 1.1 pooka UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1420 1.1 pooka vp, mbp, vp->v_numoutput, bytes);
1421 1.1 pooka mbp->b_bufsize = len;
1422 1.1 pooka mbp->b_data = (void *)kva;
1423 1.1 pooka mbp->b_resid = mbp->b_bcount = bytes;
1424 1.2 ad mbp->b_cflags = BC_BUSY | BC_AGE;
1425 1.2 ad if (async) {
1426 1.2 ad mbp->b_flags = brw | B_ASYNC;
1427 1.2 ad mbp->b_iodone = iodone;
1428 1.2 ad } else {
1429 1.2 ad mbp->b_flags = brw;
1430 1.2 ad mbp->b_iodone = NULL;
1431 1.2 ad }
1432 1.1 pooka if (curlwp == uvm.pagedaemon_lwp)
1433 1.1 pooka BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1434 1.53.2.14 yamt else if (async || lazy)
1435 1.1 pooka BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1436 1.1 pooka else
1437 1.1 pooka BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1438 1.1 pooka
1439 1.1 pooka bp = NULL;
1440 1.1 pooka for (offset = startoffset;
1441 1.1 pooka bytes > 0;
1442 1.1 pooka offset += iobytes, bytes -= iobytes) {
1443 1.36 uebayasi int run;
1444 1.36 uebayasi daddr_t lbn, blkno;
1445 1.36 uebayasi struct vnode *devvp;
1446 1.36 uebayasi
1447 1.36 uebayasi /*
1448 1.36 uebayasi * bmap the file to find out the blkno to read from and
1449 1.36 uebayasi * how much we can read in one i/o. if bmap returns an error,
1450 1.36 uebayasi * skip the rest of the top-level i/o.
1451 1.36 uebayasi */
1452 1.36 uebayasi
1453 1.1 pooka lbn = offset >> fs_bshift;
1454 1.1 pooka error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1455 1.1 pooka if (error) {
1456 1.36 uebayasi UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
1457 1.36 uebayasi lbn,error,0,0);
1458 1.1 pooka skipbytes += bytes;
1459 1.1 pooka bytes = 0;
1460 1.36 uebayasi goto loopdone;
1461 1.1 pooka }
1462 1.1 pooka
1463 1.36 uebayasi /*
1464 1.36 uebayasi * see how many pages can be read with this i/o.
1465 1.36 uebayasi * reduce the i/o size if necessary to avoid
1466 1.36 uebayasi * overwriting pages with valid data.
1467 1.36 uebayasi */
1468 1.36 uebayasi
1469 1.1 pooka iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1470 1.1 pooka bytes);
1471 1.36 uebayasi
1472 1.36 uebayasi /*
1473 1.36 uebayasi * if this block isn't allocated, zero it instead of
1474 1.36 uebayasi * reading it. unless we are going to allocate blocks,
1475 1.36 uebayasi * mark the pages we zeroed PG_RDONLY.
1476 1.36 uebayasi */
1477 1.36 uebayasi
1478 1.1 pooka if (blkno == (daddr_t)-1) {
1479 1.35 uebayasi if (!iowrite) {
1480 1.1 pooka memset((char *)kva + (offset - startoffset), 0,
1481 1.36 uebayasi iobytes);
1482 1.1 pooka }
1483 1.1 pooka skipbytes += iobytes;
1484 1.1 pooka continue;
1485 1.1 pooka }
1486 1.1 pooka
1487 1.36 uebayasi /*
1488 1.36 uebayasi * allocate a sub-buf for this piece of the i/o
1489 1.36 uebayasi * (or just use mbp if there's only 1 piece),
1490 1.36 uebayasi * and start it going.
1491 1.36 uebayasi */
1492 1.36 uebayasi
1493 1.1 pooka if (offset == startoffset && iobytes == bytes) {
1494 1.1 pooka bp = mbp;
1495 1.1 pooka } else {
1496 1.1 pooka UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1497 1.1 pooka vp, bp, vp->v_numoutput, 0);
1498 1.2 ad bp = getiobuf(vp, true);
1499 1.1 pooka nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1500 1.1 pooka }
1501 1.1 pooka bp->b_lblkno = 0;
1502 1.1 pooka
1503 1.1 pooka /* adjust physical blkno for partial blocks */
1504 1.1 pooka bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1505 1.1 pooka dev_bshift);
1506 1.36 uebayasi
1507 1.1 pooka UVMHIST_LOG(ubchist,
1508 1.36 uebayasi "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
1509 1.36 uebayasi bp, offset, bp->b_bcount, bp->b_blkno);
1510 1.1 pooka
1511 1.1 pooka VOP_STRATEGY(devvp, bp);
1512 1.1 pooka }
1513 1.36 uebayasi
1514 1.36 uebayasi loopdone:
1515 1.1 pooka if (skipbytes) {
1516 1.1 pooka UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1517 1.1 pooka }
1518 1.1 pooka nestiobuf_done(mbp, skipbytes, error);
1519 1.1 pooka if (async) {
1520 1.1 pooka UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1521 1.1 pooka return (0);
1522 1.1 pooka }
1523 1.1 pooka UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1524 1.1 pooka error = biowait(mbp);
1525 1.1 pooka s = splbio();
1526 1.1 pooka (*iodone)(mbp);
1527 1.1 pooka splx(s);
1528 1.1 pooka UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1529 1.1 pooka return (error);
1530 1.1 pooka }
1531 1.1 pooka
1532 1.1 pooka int
1533 1.1 pooka genfs_compat_getpages(void *v)
1534 1.1 pooka {
1535 1.1 pooka struct vop_getpages_args /* {
1536 1.1 pooka struct vnode *a_vp;
1537 1.1 pooka voff_t a_offset;
1538 1.1 pooka struct vm_page **a_m;
1539 1.1 pooka int *a_count;
1540 1.1 pooka int a_centeridx;
1541 1.1 pooka vm_prot_t a_access_type;
1542 1.1 pooka int a_advice;
1543 1.1 pooka int a_flags;
1544 1.1 pooka } */ *ap = v;
1545 1.1 pooka
1546 1.1 pooka off_t origoffset;
1547 1.1 pooka struct vnode *vp = ap->a_vp;
1548 1.1 pooka struct uvm_object *uobj = &vp->v_uobj;
1549 1.1 pooka struct vm_page *pg, **pgs;
1550 1.1 pooka vaddr_t kva;
1551 1.1 pooka int i, error, orignpages, npages;
1552 1.1 pooka struct iovec iov;
1553 1.1 pooka struct uio uio;
1554 1.1 pooka kauth_cred_t cred = curlwp->l_cred;
1555 1.35 uebayasi const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
1556 1.1 pooka
1557 1.1 pooka error = 0;
1558 1.1 pooka origoffset = ap->a_offset;
1559 1.1 pooka orignpages = *ap->a_count;
1560 1.1 pooka pgs = ap->a_m;
1561 1.1 pooka
1562 1.1 pooka if (ap->a_flags & PGO_LOCKED) {
1563 1.53.2.6 yamt uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL,
1564 1.35 uebayasi UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
1565 1.1 pooka
1566 1.38 chs error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
1567 1.38 chs if (error == 0 && memwrite) {
1568 1.38 chs genfs_markdirty(vp);
1569 1.38 chs }
1570 1.38 chs return error;
1571 1.1 pooka }
1572 1.1 pooka if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1573 1.49 rmind mutex_exit(uobj->vmobjlock);
1574 1.38 chs return EINVAL;
1575 1.1 pooka }
1576 1.1 pooka if ((ap->a_flags & PGO_SYNCIO) == 0) {
1577 1.49 rmind mutex_exit(uobj->vmobjlock);
1578 1.1 pooka return 0;
1579 1.1 pooka }
1580 1.1 pooka npages = orignpages;
1581 1.53.2.6 yamt uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
1582 1.49 rmind mutex_exit(uobj->vmobjlock);
1583 1.1 pooka kva = uvm_pagermapin(pgs, npages,
1584 1.1 pooka UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1585 1.1 pooka for (i = 0; i < npages; i++) {
1586 1.1 pooka pg = pgs[i];
1587 1.1 pooka if ((pg->flags & PG_FAKE) == 0) {
1588 1.1 pooka continue;
1589 1.1 pooka }
1590 1.1 pooka iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1591 1.1 pooka iov.iov_len = PAGE_SIZE;
1592 1.1 pooka uio.uio_iov = &iov;
1593 1.1 pooka uio.uio_iovcnt = 1;
1594 1.1 pooka uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1595 1.1 pooka uio.uio_rw = UIO_READ;
1596 1.1 pooka uio.uio_resid = PAGE_SIZE;
1597 1.1 pooka UIO_SETUP_SYSSPACE(&uio);
1598 1.1 pooka /* XXX vn_lock */
1599 1.1 pooka error = VOP_READ(vp, &uio, 0, cred);
1600 1.1 pooka if (error) {
1601 1.1 pooka break;
1602 1.1 pooka }
1603 1.1 pooka if (uio.uio_resid) {
1604 1.1 pooka memset(iov.iov_base, 0, uio.uio_resid);
1605 1.1 pooka }
1606 1.1 pooka }
1607 1.1 pooka uvm_pagermapout(kva, npages);
1608 1.49 rmind mutex_enter(uobj->vmobjlock);
1609 1.2 ad mutex_enter(&uvm_pageqlock);
1610 1.1 pooka for (i = 0; i < npages; i++) {
1611 1.1 pooka pg = pgs[i];
1612 1.1 pooka if (error && (pg->flags & PG_FAKE) != 0) {
1613 1.1 pooka pg->flags |= PG_RELEASED;
1614 1.1 pooka } else {
1615 1.53.2.1 yamt uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
1616 1.1 pooka uvm_pageactivate(pg);
1617 1.1 pooka }
1618 1.1 pooka }
1619 1.1 pooka if (error) {
1620 1.1 pooka uvm_page_unbusy(pgs, npages);
1621 1.1 pooka }
1622 1.2 ad mutex_exit(&uvm_pageqlock);
1623 1.38 chs if (error == 0 && memwrite) {
1624 1.38 chs genfs_markdirty(vp);
1625 1.38 chs }
1626 1.49 rmind mutex_exit(uobj->vmobjlock);
1627 1.38 chs return error;
1628 1.1 pooka }
1629 1.1 pooka
1630 1.1 pooka int
1631 1.1 pooka genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1632 1.1 pooka int flags)
1633 1.1 pooka {
1634 1.1 pooka off_t offset;
1635 1.1 pooka struct iovec iov;
1636 1.1 pooka struct uio uio;
1637 1.1 pooka kauth_cred_t cred = curlwp->l_cred;
1638 1.1 pooka struct buf *bp;
1639 1.1 pooka vaddr_t kva;
1640 1.2 ad int error;
1641 1.1 pooka
1642 1.1 pooka offset = pgs[0]->offset;
1643 1.1 pooka kva = uvm_pagermapin(pgs, npages,
1644 1.1 pooka UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1645 1.1 pooka
1646 1.1 pooka iov.iov_base = (void *)kva;
1647 1.1 pooka iov.iov_len = npages << PAGE_SHIFT;
1648 1.1 pooka uio.uio_iov = &iov;
1649 1.1 pooka uio.uio_iovcnt = 1;
1650 1.1 pooka uio.uio_offset = offset;
1651 1.1 pooka uio.uio_rw = UIO_WRITE;
1652 1.1 pooka uio.uio_resid = npages << PAGE_SHIFT;
1653 1.1 pooka UIO_SETUP_SYSSPACE(&uio);
1654 1.1 pooka /* XXX vn_lock */
1655 1.1 pooka error = VOP_WRITE(vp, &uio, 0, cred);
1656 1.1 pooka
1657 1.49 rmind mutex_enter(vp->v_interlock);
1658 1.2 ad vp->v_numoutput++;
1659 1.49 rmind mutex_exit(vp->v_interlock);
1660 1.1 pooka
1661 1.2 ad bp = getiobuf(vp, true);
1662 1.2 ad bp->b_cflags = BC_BUSY | BC_AGE;
1663 1.1 pooka bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1664 1.1 pooka bp->b_data = (char *)kva;
1665 1.1 pooka bp->b_bcount = npages << PAGE_SHIFT;
1666 1.1 pooka bp->b_bufsize = npages << PAGE_SHIFT;
1667 1.1 pooka bp->b_resid = 0;
1668 1.1 pooka bp->b_error = error;
1669 1.1 pooka uvm_aio_aiodone(bp);
1670 1.1 pooka return (error);
1671 1.1 pooka }
1672 1.1 pooka
1673 1.1 pooka /*
1674 1.1 pooka * Process a uio using direct I/O. If we reach a part of the request
1675 1.1 pooka * which cannot be processed in this fashion for some reason, just return.
1676 1.1 pooka * The caller must handle some additional part of the request using
1677 1.1 pooka * buffered I/O before trying direct I/O again.
1678 1.1 pooka */
1679 1.1 pooka
1680 1.1 pooka void
1681 1.1 pooka genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1682 1.1 pooka {
1683 1.1 pooka struct vmspace *vs;
1684 1.1 pooka struct iovec *iov;
1685 1.1 pooka vaddr_t va;
1686 1.1 pooka size_t len;
1687 1.1 pooka const int mask = DEV_BSIZE - 1;
1688 1.1 pooka int error;
1689 1.16 joerg bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
1690 1.16 joerg (ioflag & IO_JOURNALLOCKED) == 0);
1691 1.1 pooka
1692 1.1 pooka /*
1693 1.1 pooka * We only support direct I/O to user space for now.
1694 1.1 pooka */
1695 1.1 pooka
1696 1.1 pooka if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1697 1.1 pooka return;
1698 1.1 pooka }
1699 1.1 pooka
1700 1.1 pooka /*
1701 1.1 pooka * If the vnode is mapped, we would need to get the getpages lock
1702 1.53 yamt * to stabilize the bmap, but then we would get into trouble while
1703 1.1 pooka * locking the pages if the pages belong to this same vnode (or a
1704 1.1 pooka * multi-vnode cascade to the same effect). Just fall back to
1705 1.1 pooka * buffered I/O if the vnode is mapped to avoid this mess.
1706 1.1 pooka */
1707 1.1 pooka
1708 1.1 pooka if (vp->v_vflag & VV_MAPPED) {
1709 1.1 pooka return;
1710 1.1 pooka }
1711 1.1 pooka
1712 1.16 joerg if (need_wapbl) {
1713 1.13 hannken error = WAPBL_BEGIN(vp->v_mount);
1714 1.13 hannken if (error)
1715 1.13 hannken return;
1716 1.13 hannken }
1717 1.13 hannken
1718 1.1 pooka /*
1719 1.1 pooka * Do as much of the uio as possible with direct I/O.
1720 1.1 pooka */
1721 1.1 pooka
1722 1.1 pooka vs = uio->uio_vmspace;
1723 1.1 pooka while (uio->uio_resid) {
1724 1.1 pooka iov = uio->uio_iov;
1725 1.1 pooka if (iov->iov_len == 0) {
1726 1.1 pooka uio->uio_iov++;
1727 1.1 pooka uio->uio_iovcnt--;
1728 1.1 pooka continue;
1729 1.1 pooka }
1730 1.1 pooka va = (vaddr_t)iov->iov_base;
1731 1.1 pooka len = MIN(iov->iov_len, genfs_maxdio);
1732 1.1 pooka len &= ~mask;
1733 1.1 pooka
1734 1.1 pooka /*
1735 1.1 pooka * If the next chunk is smaller than DEV_BSIZE or extends past
1736 1.1 pooka * the current EOF, then fall back to buffered I/O.
1737 1.1 pooka */
1738 1.1 pooka
1739 1.1 pooka if (len == 0 || uio->uio_offset + len > vp->v_size) {
1740 1.13 hannken break;
1741 1.1 pooka }
1742 1.1 pooka
1743 1.1 pooka /*
1744 1.1 pooka * Check alignment. The file offset must be at least
1745 1.1 pooka * sector-aligned. The exact constraint on memory alignment
1746 1.1 pooka * is very hardware-dependent, but requiring sector-aligned
1747 1.1 pooka * addresses there too is safe.
1748 1.1 pooka */
1749 1.1 pooka
1750 1.1 pooka if (uio->uio_offset & mask || va & mask) {
1751 1.13 hannken break;
1752 1.1 pooka }
1753 1.1 pooka error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1754 1.1 pooka uio->uio_rw);
1755 1.1 pooka if (error) {
1756 1.1 pooka break;
1757 1.1 pooka }
1758 1.1 pooka iov->iov_base = (char *)iov->iov_base + len;
1759 1.1 pooka iov->iov_len -= len;
1760 1.1 pooka uio->uio_offset += len;
1761 1.1 pooka uio->uio_resid -= len;
1762 1.1 pooka }
1763 1.13 hannken
1764 1.16 joerg if (need_wapbl)
1765 1.13 hannken WAPBL_END(vp->v_mount);
1766 1.1 pooka }
1767 1.1 pooka
1768 1.1 pooka /*
1769 1.1 pooka * Iodone routine for direct I/O. We don't do much here since the request is
1770 1.1 pooka * always synchronous, so the caller will do most of the work after biowait().
1771 1.1 pooka */
1772 1.1 pooka
1773 1.1 pooka static void
1774 1.1 pooka genfs_dio_iodone(struct buf *bp)
1775 1.1 pooka {
1776 1.1 pooka
1777 1.1 pooka KASSERT((bp->b_flags & B_ASYNC) == 0);
1778 1.2 ad if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
1779 1.2 ad mutex_enter(bp->b_objlock);
1780 1.1 pooka vwakeup(bp);
1781 1.2 ad mutex_exit(bp->b_objlock);
1782 1.1 pooka }
1783 1.1 pooka putiobuf(bp);
1784 1.1 pooka }
1785 1.1 pooka
1786 1.1 pooka /*
1787 1.1 pooka * Process one chunk of a direct I/O request.
1788 1.1 pooka */
1789 1.1 pooka
1790 1.1 pooka static int
1791 1.1 pooka genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1792 1.1 pooka off_t off, enum uio_rw rw)
1793 1.1 pooka {
1794 1.1 pooka struct vm_map *map;
1795 1.1 pooka struct pmap *upm, *kpm;
1796 1.1 pooka size_t klen = round_page(uva + len) - trunc_page(uva);
1797 1.1 pooka off_t spoff, epoff;
1798 1.1 pooka vaddr_t kva, puva;
1799 1.1 pooka paddr_t pa;
1800 1.1 pooka vm_prot_t prot;
1801 1.1 pooka int error, rv, poff, koff;
1802 1.13 hannken const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
1803 1.1 pooka (rw == UIO_WRITE ? PGO_FREE : 0);
1804 1.1 pooka
1805 1.1 pooka /*
1806 1.1 pooka * For writes, verify that this range of the file already has fully
1807 1.1 pooka * allocated backing store. If there are any holes, just punt and
1808 1.1 pooka * make the caller take the buffered write path.
1809 1.1 pooka */
1810 1.1 pooka
1811 1.1 pooka if (rw == UIO_WRITE) {
1812 1.1 pooka daddr_t lbn, elbn, blkno;
1813 1.1 pooka int bsize, bshift, run;
1814 1.1 pooka
1815 1.1 pooka bshift = vp->v_mount->mnt_fs_bshift;
1816 1.1 pooka bsize = 1 << bshift;
1817 1.1 pooka lbn = off >> bshift;
1818 1.1 pooka elbn = (off + len + bsize - 1) >> bshift;
1819 1.1 pooka while (lbn < elbn) {
1820 1.1 pooka error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1821 1.1 pooka if (error) {
1822 1.1 pooka return error;
1823 1.1 pooka }
1824 1.1 pooka if (blkno == (daddr_t)-1) {
1825 1.1 pooka return ENOSPC;
1826 1.1 pooka }
1827 1.1 pooka lbn += 1 + run;
1828 1.1 pooka }
1829 1.1 pooka }
1830 1.1 pooka
1831 1.1 pooka /*
1832 1.1 pooka * Flush any cached pages for parts of the file that we're about to
1833 1.1 pooka * access. If we're writing, invalidate pages as well.
1834 1.1 pooka */
1835 1.1 pooka
1836 1.1 pooka spoff = trunc_page(off);
1837 1.1 pooka epoff = round_page(off + len);
1838 1.49 rmind mutex_enter(vp->v_interlock);
1839 1.1 pooka error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1840 1.1 pooka if (error) {
1841 1.1 pooka return error;
1842 1.1 pooka }
1843 1.1 pooka
1844 1.1 pooka /*
1845 1.1 pooka * Wire the user pages and remap them into kernel memory.
1846 1.1 pooka */
1847 1.1 pooka
1848 1.1 pooka prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1849 1.1 pooka error = uvm_vslock(vs, (void *)uva, len, prot);
1850 1.1 pooka if (error) {
1851 1.1 pooka return error;
1852 1.1 pooka }
1853 1.1 pooka
1854 1.1 pooka map = &vs->vm_map;
1855 1.1 pooka upm = vm_map_pmap(map);
1856 1.1 pooka kpm = vm_map_pmap(kernel_map);
1857 1.1 pooka puva = trunc_page(uva);
1858 1.51 matt kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
1859 1.51 matt UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
1860 1.1 pooka for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1861 1.1 pooka rv = pmap_extract(upm, puva + poff, &pa);
1862 1.1 pooka KASSERT(rv);
1863 1.51 matt pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
1864 1.1 pooka }
1865 1.1 pooka pmap_update(kpm);
1866 1.1 pooka
1867 1.1 pooka /*
1868 1.1 pooka * Do the I/O.
1869 1.1 pooka */
1870 1.1 pooka
1871 1.1 pooka koff = uva - trunc_page(uva);
1872 1.1 pooka error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1873 1.1 pooka genfs_dio_iodone);
1874 1.1 pooka
1875 1.1 pooka /*
1876 1.1 pooka * Tear down the kernel mapping.
1877 1.1 pooka */
1878 1.1 pooka
1879 1.51 matt pmap_kremove(kva, klen);
1880 1.1 pooka pmap_update(kpm);
1881 1.1 pooka uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1882 1.1 pooka
1883 1.1 pooka /*
1884 1.1 pooka * Unwire the user pages.
1885 1.1 pooka */
1886 1.1 pooka
1887 1.1 pooka uvm_vsunlock(vs, (void *)uva, len);
1888 1.1 pooka return error;
1889 1.1 pooka }
1890