genfs_io.c revision 1.53.2.16 1 /* $NetBSD: genfs_io.c,v 1.53.2.16 2012/08/01 22:34:15 yamt Exp $ */
2
3 /*
4 * Copyright (c) 1982, 1986, 1989, 1993
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 */
32
33 #include <sys/cdefs.h>
34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.16 2012/08/01 22:34:15 yamt Exp $");
35
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/proc.h>
39 #include <sys/kernel.h>
40 #include <sys/mount.h>
41 #include <sys/vnode.h>
42 #include <sys/kmem.h>
43 #include <sys/kauth.h>
44 #include <sys/fstrans.h>
45 #include <sys/buf.h>
46 #include <sys/radixtree.h>
47
48 #include <miscfs/genfs/genfs.h>
49 #include <miscfs/genfs/genfs_node.h>
50 #include <miscfs/specfs/specdev.h>
51 #include <miscfs/syncfs/syncfs.h>
52
53 #include <uvm/uvm.h>
54 #include <uvm/uvm_pager.h>
55 #include <uvm/uvm_page_array.h>
56
57 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
58 off_t, enum uio_rw);
59 static void genfs_dio_iodone(struct buf *);
60
61 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
62 void (*)(struct buf *));
63 static void genfs_rel_pages(struct vm_page **, unsigned int);
64 static void genfs_markdirty(struct vnode *);
65
66 int genfs_maxdio = MAXPHYS;
67
68 static void
69 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
70 {
71 unsigned int i;
72
73 for (i = 0; i < npages; i++) {
74 struct vm_page *pg = pgs[i];
75
76 if (pg == NULL || pg == PGO_DONTCARE)
77 continue;
78 KASSERT(uvm_page_locked_p(pg));
79 if (pg->flags & PG_FAKE) {
80 pg->flags |= PG_RELEASED;
81 }
82 }
83 mutex_enter(&uvm_pageqlock);
84 uvm_page_unbusy(pgs, npages);
85 mutex_exit(&uvm_pageqlock);
86 }
87
88 static void
89 genfs_markdirty(struct vnode *vp)
90 {
91
92 KASSERT(mutex_owned(vp->v_interlock));
93 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
94 vn_syncer_add_to_worklist(vp, filedelay);
95 }
96 if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
97 vp->v_iflag |= VI_WRMAPDIRTY;
98 }
99 }
100
101 /*
102 * generic VM getpages routine.
103 * Return PG_BUSY pages for the given range,
104 * reading from backing store if necessary.
105 */
106
107 int
108 genfs_getpages(void *v)
109 {
110 struct vop_getpages_args /* {
111 struct vnode *a_vp;
112 voff_t a_offset;
113 struct vm_page **a_m;
114 int *a_count;
115 int a_centeridx;
116 vm_prot_t a_access_type;
117 int a_advice;
118 int a_flags;
119 } */ * const ap = v;
120
121 off_t diskeof, memeof;
122 int i, error, npages;
123 const int flags = ap->a_flags;
124 struct vnode * const vp = ap->a_vp;
125 struct uvm_object * const uobj = &vp->v_uobj;
126 kauth_cred_t const cred = curlwp->l_cred; /* XXXUBC curlwp */
127 const bool async = (flags & PGO_SYNCIO) == 0;
128 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
129 const bool overwrite = (flags & PGO_OVERWRITE) != 0;
130 const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
131 const bool glocked = (flags & PGO_GLOCKHELD) != 0;
132 const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
133 bool has_trans_wapbl = false;
134 UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
135
136 UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
137 vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
138
139 KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
140 vp->v_type == VLNK || vp->v_type == VBLK);
141
142 startover:
143 error = 0;
144 const voff_t origvsize = vp->v_size;
145 const off_t origoffset = ap->a_offset;
146 const int orignpages = *ap->a_count;
147
148 GOP_SIZE(vp, origvsize, &diskeof, 0);
149 if (flags & PGO_PASTEOF) {
150 off_t newsize;
151 #if defined(DIAGNOSTIC)
152 off_t writeeof;
153 #endif /* defined(DIAGNOSTIC) */
154
155 newsize = MAX(origvsize,
156 origoffset + (orignpages << PAGE_SHIFT));
157 GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
158 #if defined(DIAGNOSTIC)
159 GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
160 if (newsize > round_page(writeeof)) {
161 panic("%s: past eof: %" PRId64 " vs. %" PRId64,
162 __func__, newsize, round_page(writeeof));
163 }
164 #endif /* defined(DIAGNOSTIC) */
165 } else {
166 GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
167 }
168 KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
169 KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
170 KASSERT(orignpages > 0);
171
172 /*
173 * Bounds-check the request.
174 */
175
176 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
177 if ((flags & PGO_LOCKED) == 0) {
178 mutex_exit(uobj->vmobjlock);
179 }
180 UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
181 origoffset, *ap->a_count, memeof,0);
182 error = EINVAL;
183 goto out_err;
184 }
185
186 /* uobj is locked */
187
188 if ((flags & PGO_NOTIMESTAMP) == 0 &&
189 (vp->v_type != VBLK ||
190 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
191 int updflags = 0;
192
193 if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
194 updflags = GOP_UPDATE_ACCESSED;
195 }
196 if (memwrite) {
197 updflags |= GOP_UPDATE_MODIFIED;
198 }
199 if (updflags != 0) {
200 GOP_MARKUPDATE(vp, updflags);
201 }
202 }
203
204 /*
205 * For PGO_LOCKED requests, just return whatever's in memory.
206 */
207
208 if (flags & PGO_LOCKED) {
209 int nfound;
210 struct vm_page *pg;
211
212 KASSERT(!glocked);
213 npages = *ap->a_count;
214 #if defined(DEBUG)
215 for (i = 0; i < npages; i++) {
216 pg = ap->a_m[i];
217 KASSERT(pg == NULL || pg == PGO_DONTCARE);
218 }
219 #endif /* defined(DEBUG) */
220 nfound = uvn_findpages(uobj, origoffset, &npages,
221 ap->a_m, NULL,
222 UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
223 KASSERT(npages == *ap->a_count);
224 if (nfound == 0) {
225 error = EBUSY;
226 goto out_err;
227 }
228 /*
229 * lock and unlock g_glock to ensure that no one is truncating
230 * the file behind us.
231 */
232 if (!genfs_node_rdtrylock(vp)) {
233 genfs_rel_pages(ap->a_m, npages);
234
235 /*
236 * restore the array.
237 */
238
239 for (i = 0; i < npages; i++) {
240 pg = ap->a_m[i];
241
242 if (pg != NULL && pg != PGO_DONTCARE) {
243 ap->a_m[i] = NULL;
244 }
245 KASSERT(ap->a_m[i] == NULL ||
246 ap->a_m[i] == PGO_DONTCARE);
247 }
248 } else {
249 genfs_node_unlock(vp);
250 }
251 error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
252 if (error == 0 && memwrite) {
253 for (i = 0; i < npages; i++) {
254 pg = ap->a_m[i];
255 if (pg == NULL || pg == PGO_DONTCARE) {
256 continue;
257 }
258 if (uvm_pagegetdirty(pg) ==
259 UVM_PAGE_STATUS_CLEAN) {
260 uvm_pagemarkdirty(pg,
261 UVM_PAGE_STATUS_UNKNOWN);
262 }
263 }
264 genfs_markdirty(vp);
265 }
266 goto out_err;
267 }
268 mutex_exit(uobj->vmobjlock);
269
270 /*
271 * find the requested pages and make some simple checks.
272 * leave space in the page array for a whole block.
273 */
274
275 const int fs_bshift = (vp->v_type != VBLK) ?
276 vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
277 const int dev_bshift = (vp->v_type != VBLK) ?
278 vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
279 const int fs_bsize = 1 << fs_bshift;
280 #define blk_mask (fs_bsize - 1)
281 #define trunc_blk(x) ((x) & ~blk_mask)
282 #define round_blk(x) (((x) + blk_mask) & ~blk_mask)
283
284 const int orignmempages = MIN(orignpages,
285 round_page(memeof - origoffset) >> PAGE_SHIFT);
286 npages = orignmempages;
287 const off_t startoffset = trunc_blk(origoffset);
288 const off_t endoffset = MIN(
289 round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
290 round_page(memeof));
291 const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
292
293 const int pgs_size = sizeof(struct vm_page *) *
294 ((endoffset - startoffset) >> PAGE_SHIFT);
295 struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
296
297 if (pgs_size > sizeof(pgs_onstack)) {
298 pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
299 if (pgs == NULL) {
300 pgs = pgs_onstack;
301 error = ENOMEM;
302 goto out_err;
303 }
304 } else {
305 pgs = pgs_onstack;
306 (void)memset(pgs, 0, pgs_size);
307 }
308
309 UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
310 ridx, npages, startoffset, endoffset);
311
312 if (!has_trans_wapbl) {
313 fstrans_start(vp->v_mount, FSTRANS_SHARED);
314 /*
315 * XXX: This assumes that we come here only via
316 * the mmio path
317 */
318 if (need_wapbl) {
319 error = WAPBL_BEGIN(vp->v_mount);
320 if (error) {
321 fstrans_done(vp->v_mount);
322 goto out_err_free;
323 }
324 }
325 has_trans_wapbl = true;
326 }
327
328 /*
329 * hold g_glock to prevent a race with truncate.
330 *
331 * check if our idea of v_size is still valid.
332 */
333
334 KASSERT(!glocked || genfs_node_wrlocked(vp));
335 if (!glocked) {
336 if (blockalloc) {
337 genfs_node_wrlock(vp);
338 } else {
339 genfs_node_rdlock(vp);
340 }
341 }
342 mutex_enter(uobj->vmobjlock);
343 if (vp->v_size < origvsize) {
344 if (!glocked) {
345 genfs_node_unlock(vp);
346 }
347 if (pgs != pgs_onstack)
348 kmem_free(pgs, pgs_size);
349 goto startover;
350 }
351
352 if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
353 async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
354 if (!glocked) {
355 genfs_node_unlock(vp);
356 }
357 KASSERT(async != 0);
358 genfs_rel_pages(&pgs[ridx], orignmempages);
359 mutex_exit(uobj->vmobjlock);
360 error = EBUSY;
361 goto out_err_free;
362 }
363
364 /*
365 * if PGO_OVERWRITE is set, don't bother reading the pages.
366 */
367
368 if (overwrite) {
369 if (!glocked) {
370 genfs_node_unlock(vp);
371 }
372 UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
373
374 for (i = 0; i < npages; i++) {
375 struct vm_page *pg = pgs[ridx + i];
376
377 /*
378 * it's caller's responsibility to allocate blocks
379 * beforehand for the overwrite case.
380 */
381 pg->flags &= ~PG_RDONLY;
382 /*
383 * mark the page DIRTY.
384 * otherwise another thread can do putpages and pull
385 * our vnode from syncer's queue before our caller does
386 * ubc_release. note that putpages won't see CLEAN
387 * pages even if they are BUSY.
388 */
389 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
390 }
391 npages += ridx;
392 goto out;
393 }
394
395 /*
396 * if the pages are already resident, just return them.
397 */
398
399 for (i = 0; i < npages; i++) {
400 struct vm_page *pg = pgs[ridx + i];
401
402 if ((pg->flags & PG_FAKE) ||
403 (memwrite && (pg->flags & PG_RDONLY) != 0)) {
404 break;
405 }
406 }
407 if (i == npages) {
408 if (!glocked) {
409 genfs_node_unlock(vp);
410 }
411 UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
412 npages += ridx;
413 goto out;
414 }
415
416 /*
417 * the page wasn't resident and we're not overwriting,
418 * so we're going to have to do some i/o.
419 * find any additional pages needed to cover the expanded range.
420 */
421
422 npages = (endoffset - startoffset) >> PAGE_SHIFT;
423 if (startoffset != origoffset || npages != orignmempages) {
424 int npgs;
425
426 /*
427 * we need to avoid deadlocks caused by locking
428 * additional pages at lower offsets than pages we
429 * already have locked. unlock them all and start over.
430 */
431
432 genfs_rel_pages(&pgs[ridx], orignmempages);
433 memset(pgs, 0, pgs_size);
434
435 UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
436 startoffset, endoffset, 0,0);
437 npgs = npages;
438 if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
439 async ? UFP_NOWAIT : UFP_ALL) != npages) {
440 if (!glocked) {
441 genfs_node_unlock(vp);
442 }
443 KASSERT(async != 0);
444 genfs_rel_pages(pgs, npages);
445 mutex_exit(uobj->vmobjlock);
446 error = EBUSY;
447 goto out_err_free;
448 }
449 }
450
451 mutex_exit(uobj->vmobjlock);
452
453 {
454 size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
455 vaddr_t kva;
456 struct buf *bp, *mbp;
457 bool sawhole = false;
458
459 /*
460 * read the desired page(s).
461 */
462
463 totalbytes = npages << PAGE_SHIFT;
464 bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
465 tailbytes = totalbytes - bytes;
466 skipbytes = 0;
467
468 kva = uvm_pagermapin(pgs, npages,
469 UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
470 if (kva == 0) {
471 error = EBUSY;
472 goto mapin_fail;
473 }
474
475 mbp = getiobuf(vp, true);
476 mbp->b_bufsize = totalbytes;
477 mbp->b_data = (void *)kva;
478 mbp->b_resid = mbp->b_bcount = bytes;
479 mbp->b_cflags = BC_BUSY;
480 if (async) {
481 mbp->b_flags = B_READ | B_ASYNC;
482 mbp->b_iodone = uvm_aio_biodone;
483 } else {
484 mbp->b_flags = B_READ;
485 mbp->b_iodone = NULL;
486 }
487 if (async)
488 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
489 else
490 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
491
492 /*
493 * if EOF is in the middle of the range, zero the part past EOF.
494 * skip over pages which are not PG_FAKE since in that case they have
495 * valid data that we need to preserve.
496 */
497
498 tailstart = bytes;
499 while (tailbytes > 0) {
500 const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
501
502 KASSERT(len <= tailbytes);
503 if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
504 memset((void *)(kva + tailstart), 0, len);
505 UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
506 kva, tailstart, len, 0);
507 }
508 tailstart += len;
509 tailbytes -= len;
510 }
511
512 /*
513 * now loop over the pages, reading as needed.
514 */
515
516 bp = NULL;
517 off_t offset;
518 for (offset = startoffset;
519 bytes > 0;
520 offset += iobytes, bytes -= iobytes) {
521 int run;
522 daddr_t lbn, blkno;
523 int pidx;
524 struct vnode *devvp;
525
526 /*
527 * skip pages which don't need to be read.
528 */
529
530 pidx = (offset - startoffset) >> PAGE_SHIFT;
531 while ((pgs[pidx]->flags & PG_FAKE) == 0) {
532 size_t b;
533
534 KASSERT((offset & (PAGE_SIZE - 1)) == 0);
535 if ((pgs[pidx]->flags & PG_RDONLY)) {
536 sawhole = true;
537 }
538 b = MIN(PAGE_SIZE, bytes);
539 offset += b;
540 bytes -= b;
541 skipbytes += b;
542 pidx++;
543 UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
544 offset, 0,0,0);
545 if (bytes == 0) {
546 goto loopdone;
547 }
548 }
549
550 /*
551 * bmap the file to find out the blkno to read from and
552 * how much we can read in one i/o. if bmap returns an error,
553 * skip the rest of the top-level i/o.
554 */
555
556 lbn = offset >> fs_bshift;
557 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
558 if (error) {
559 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
560 lbn,error,0,0);
561 skipbytes += bytes;
562 bytes = 0;
563 goto loopdone;
564 }
565
566 /*
567 * see how many pages can be read with this i/o.
568 * reduce the i/o size if necessary to avoid
569 * overwriting pages with valid data.
570 */
571
572 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
573 bytes);
574 if (offset + iobytes > round_page(offset)) {
575 int pcount;
576
577 pcount = 1;
578 while (pidx + pcount < npages &&
579 pgs[pidx + pcount]->flags & PG_FAKE) {
580 pcount++;
581 }
582 iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
583 (offset - trunc_page(offset)));
584 }
585
586 /*
587 * if this block isn't allocated, zero it instead of
588 * reading it. unless we are going to allocate blocks,
589 * mark the pages we zeroed PG_RDONLY.
590 */
591
592 if (blkno == (daddr_t)-1) {
593 int holepages = (round_page(offset + iobytes) -
594 trunc_page(offset)) >> PAGE_SHIFT;
595 UVMHIST_LOG(ubchist, "lbn 0x%x -> RDONLY", lbn,0,0,0);
596
597 sawhole = true;
598 memset((char *)kva + (offset - startoffset), 0,
599 iobytes);
600 skipbytes += iobytes;
601
602 if (!blockalloc) {
603 mutex_enter(uobj->vmobjlock);
604 for (i = 0; i < holepages; i++) {
605 pgs[pidx + i]->flags |= PG_RDONLY;
606 }
607 mutex_exit(uobj->vmobjlock);
608 }
609 continue;
610 }
611
612 /*
613 * allocate a sub-buf for this piece of the i/o
614 * (or just use mbp if there's only 1 piece),
615 * and start it going.
616 */
617
618 if (offset == startoffset && iobytes == bytes) {
619 bp = mbp;
620 } else {
621 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
622 vp, bp, vp->v_numoutput, 0);
623 bp = getiobuf(vp, true);
624 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
625 }
626 bp->b_lblkno = 0;
627
628 /* adjust physical blkno for partial blocks */
629 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
630 dev_bshift);
631
632 UVMHIST_LOG(ubchist,
633 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
634 bp, offset, bp->b_bcount, bp->b_blkno);
635
636 VOP_STRATEGY(devvp, bp);
637 }
638
639 loopdone:
640 nestiobuf_done(mbp, skipbytes, error);
641 if (async) {
642 UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
643 if (!glocked) {
644 genfs_node_unlock(vp);
645 }
646 error = 0;
647 goto out_err_free;
648 }
649 if (bp != NULL) {
650 error = biowait(mbp);
651 }
652
653 /* Remove the mapping (make KVA available as soon as possible) */
654 uvm_pagermapout(kva, npages);
655
656 /*
657 * if this we encountered a hole then we have to do a little more work.
658 * if blockalloc is false, we marked the page PG_RDONLY so that future
659 * write accesses to the page will fault again.
660 * if blockalloc is true, we must make sure that the backing store for
661 * the page is completely allocated while the pages are locked.
662 */
663
664 if (!error && sawhole && blockalloc) {
665 error = GOP_ALLOC(vp, startoffset,
666 npages << PAGE_SHIFT, 0, cred);
667 UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
668 startoffset, npages << PAGE_SHIFT, error,0);
669 if (!error) {
670 mutex_enter(uobj->vmobjlock);
671 for (i = 0; i < npages; i++) {
672 struct vm_page *pg = pgs[i];
673
674 if (pg == NULL) {
675 continue;
676 }
677 pg->flags &= ~PG_RDONLY;
678 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
679 UVMHIST_LOG(ubchist, "mark dirty pg %p",
680 pg,0,0,0);
681 }
682 mutex_exit(uobj->vmobjlock);
683 }
684 }
685
686 putiobuf(mbp);
687 }
688
689 mapin_fail:
690 if (!glocked) {
691 genfs_node_unlock(vp);
692 }
693 mutex_enter(uobj->vmobjlock);
694
695 /*
696 * we're almost done! release the pages...
697 * for errors, we free the pages.
698 * otherwise we activate them and mark them as valid and clean.
699 * also, unbusy pages that were not actually requested.
700 */
701
702 if (error) {
703 genfs_rel_pages(pgs, npages);
704 mutex_exit(uobj->vmobjlock);
705 UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
706 goto out_err_free;
707 }
708
709 out:
710 UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
711 error = 0;
712 mutex_enter(&uvm_pageqlock);
713 for (i = 0; i < npages; i++) {
714 struct vm_page *pg = pgs[i];
715 if (pg == NULL) {
716 continue;
717 }
718 UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
719 pg, pg->flags, 0,0);
720 if (pg->flags & PG_FAKE && !overwrite) {
721 /*
722 * we've read page's contents from the backing storage.
723 *
724 * for a read fault, we keep them CLEAN.
725 */
726 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
727 pg->flags &= ~PG_FAKE;
728 }
729 KASSERT(!blockalloc || (pg->flags & PG_RDONLY) == 0);
730 if (i < ridx || i >= ridx + orignmempages || async) {
731 UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
732 pg, pg->offset,0,0);
733 KASSERT(!overwrite);
734 if (pg->flags & PG_WANTED) {
735 wakeup(pg);
736 }
737 if (pg->flags & PG_FAKE && overwrite) {
738 uvm_pagezero(pg);
739 }
740 if (pg->flags & PG_RELEASED) {
741 uvm_pagefree(pg);
742 continue;
743 }
744 uvm_pageenqueue(pg);
745 pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
746 UVM_PAGE_OWN(pg, NULL);
747 } else if (memwrite && !overwrite &&
748 uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
749 /*
750 * for a write fault, start dirtiness tracking of
751 * requested pages.
752 */
753 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
754 }
755 }
756 mutex_exit(&uvm_pageqlock);
757 if (memwrite) {
758 genfs_markdirty(vp);
759 }
760 mutex_exit(uobj->vmobjlock);
761 if (ap->a_m != NULL) {
762 memcpy(ap->a_m, &pgs[ridx],
763 orignmempages * sizeof(struct vm_page *));
764 }
765
766 out_err_free:
767 if (pgs != NULL && pgs != pgs_onstack)
768 kmem_free(pgs, pgs_size);
769 out_err:
770 if (has_trans_wapbl) {
771 if (need_wapbl)
772 WAPBL_END(vp->v_mount);
773 fstrans_done(vp->v_mount);
774 }
775 return error;
776 }
777
778 /*
779 * generic VM putpages routine.
780 * Write the given range of pages to backing store.
781 *
782 * => "offhi == 0" means flush all pages at or after "offlo".
783 * => object should be locked by caller. we return with the
784 * object unlocked.
785 * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
786 * thus, a caller might want to unlock higher level resources
787 * (e.g. vm_map) before calling flush.
788 * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
789 * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
790 * => NOTE: we are allowed to lock the page queues, so the caller
791 * must not be holding the page queue lock.
792 *
793 * note on "cleaning" object and PG_BUSY pages:
794 * this routine is holding the lock on the object. the only time
795 * that it can run into a PG_BUSY page that it does not own is if
796 * some other process has started I/O on the page (e.g. either
797 * a pagein, or a pageout). if the PG_BUSY page is being paged
798 * in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
799 * one has had a chance to modify it yet. if the PG_BUSY page is
800 * being paged out then it means that someone else has already started
801 * cleaning the page for us (how nice!). in this case, if we
802 * have syncio specified, then after we make our pass through the
803 * object we need to wait for the other PG_BUSY pages to clear
804 * off (i.e. we need to do an iosync). also note that once a
805 * page is PG_BUSY it must stay in its object until it is un-busyed.
806 */
807
808 int
809 genfs_putpages(void *v)
810 {
811 struct vop_putpages_args /* {
812 struct vnode *a_vp;
813 voff_t a_offlo;
814 voff_t a_offhi;
815 int a_flags;
816 } */ * const ap = v;
817
818 return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
819 ap->a_flags, NULL);
820 }
821
822 int
823 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
824 int origflags, struct vm_page **busypg)
825 {
826 struct uvm_object * const uobj = &vp->v_uobj;
827 kmutex_t * const slock = uobj->vmobjlock;
828 off_t nextoff;
829 /* Even for strange MAXPHYS, the shift rounds down to a page */
830 #define maxpages (MAXPHYS >> PAGE_SHIFT)
831 unsigned int i;
832 unsigned int npages, nback;
833 unsigned int freeflag;
834 int error;
835 struct vm_page *pgs[maxpages], *pg;
836 struct uvm_page_array a;
837 bool wasclean, needs_clean;
838 bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
839 struct lwp * const l = curlwp ? curlwp : &lwp0;
840 int flags;
841 bool written; /* if we write out any pages */
842 bool need_wapbl;
843 bool has_trans;
844 bool tryclean; /* try to pull off from the syncer's list */
845 bool onworklst;
846 const bool integrity_sync =
847 (origflags & (PGO_LAZY|PGO_SYNCIO)) == PGO_SYNCIO;
848 const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
849
850 UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
851
852 KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
853 KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
854 KASSERT(startoff < endoff || endoff == 0);
855
856 UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
857 vp, uobj->uo_npages, startoff, endoff - startoff);
858
859 has_trans = false;
860 need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
861 (origflags & PGO_JOURNALLOCKED) == 0);
862
863 retry:
864 flags = origflags;
865 KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
866 (vp->v_iflag & VI_WRMAPDIRTY) == 0);
867
868 /*
869 * shortcut if we have no pages to process.
870 */
871
872 if (uobj->uo_npages == 0 || (dirtyonly &&
873 radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
874 UVM_PAGE_DIRTY_TAG))) {
875 if (vp->v_iflag & VI_ONWORKLST) {
876 vp->v_iflag &= ~VI_WRMAPDIRTY;
877 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
878 vn_syncer_remove_from_worklist(vp);
879 }
880 if (has_trans) {
881 if (need_wapbl)
882 WAPBL_END(vp->v_mount);
883 fstrans_done(vp->v_mount);
884 }
885 mutex_exit(slock);
886 return (0);
887 }
888
889 /*
890 * the vnode has pages, set up to process the request.
891 */
892
893 if (!has_trans && (flags & PGO_CLEANIT) != 0) {
894 mutex_exit(slock);
895 if (pagedaemon) {
896 error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
897 if (error)
898 return error;
899 } else
900 fstrans_start(vp->v_mount, FSTRANS_LAZY);
901 if (need_wapbl) {
902 error = WAPBL_BEGIN(vp->v_mount);
903 if (error) {
904 fstrans_done(vp->v_mount);
905 return error;
906 }
907 }
908 has_trans = true;
909 mutex_enter(slock);
910 goto retry;
911 }
912
913 error = 0;
914 wasclean = (vp->v_numoutput == 0);
915
916 /*
917 * if this vnode is known not to have dirty pages,
918 * don't bother to clean it out.
919 */
920
921 if ((vp->v_iflag & VI_ONWORKLST) == 0) {
922 #if !defined(DEBUG)
923 if (dirtyonly) {
924 goto skip_scan;
925 }
926 #endif /* !defined(DEBUG) */
927 flags &= ~PGO_CLEANIT;
928 }
929
930 /*
931 * start the loop to scan pages.
932 */
933
934 written = false;
935 nextoff = startoff;
936 if (endoff == 0 || flags & PGO_ALLPAGES) {
937 endoff = trunc_page(LLONG_MAX);
938 }
939 freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
940 tryclean = true;
941 uvm_page_array_init(&a);
942 for (;;) {
943 bool protected;
944
945 /*
946 * if we are asked to sync for integrity, we should wait on
947 * pages being written back by another threads as well.
948 */
949
950 pg = uvm_page_array_fill_and_peek(&a, uobj, nextoff, 0,
951 dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
952 (integrity_sync ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
953 if (pg == NULL) {
954 break;
955 }
956
957 KASSERT(pg->uobject == uobj);
958 KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
959 (pg->flags & (PG_BUSY)) != 0);
960 KASSERT(pg->offset >= startoff);
961 KASSERT(pg->offset >= nextoff);
962 KASSERT(!dirtyonly ||
963 uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
964 radix_tree_get_tag(&uobj->uo_pages,
965 pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
966 if (pg->offset >= endoff) {
967 break;
968 }
969
970 /*
971 * a preempt point.
972 */
973
974 if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
975 != 0) {
976 nextoff = pg->offset; /* visit this page again */
977 mutex_exit(slock);
978 preempt();
979 /*
980 * as we dropped the object lock, our cached pages can
981 * be stale.
982 */
983 uvm_page_array_clear(&a);
984 mutex_enter(slock);
985 continue;
986 }
987
988 /*
989 * if the current page is busy, wait for it to become unbusy.
990 */
991
992 if ((pg->flags & PG_BUSY) != 0) {
993 UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
994 if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0
995 && (flags & PGO_BUSYFAIL) != 0) {
996 UVMHIST_LOG(ubchist, "busyfail %p", pg,
997 0,0,0);
998 error = EDEADLK;
999 if (busypg != NULL)
1000 *busypg = pg;
1001 break;
1002 }
1003 if (pagedaemon) {
1004 /*
1005 * someone has taken the page while we
1006 * dropped the lock for fstrans_start.
1007 */
1008 break;
1009 }
1010 /*
1011 * don't bother to wait on other's activities
1012 * unless we are asked to sync for integrity.
1013 */
1014 if (!integrity_sync) {
1015 wasclean = false;
1016 nextoff = pg->offset + PAGE_SIZE;
1017 uvm_page_array_advance(&a);
1018 continue;
1019 }
1020 nextoff = pg->offset; /* visit this page again */
1021 pg->flags |= PG_WANTED;
1022 UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
1023 /*
1024 * as we dropped the object lock, our cached pages can
1025 * be stale.
1026 */
1027 uvm_page_array_clear(&a);
1028 mutex_enter(slock);
1029 continue;
1030 }
1031
1032 nextoff = pg->offset + PAGE_SIZE;
1033 uvm_page_array_advance(&a);
1034
1035 /*
1036 * if we're freeing, remove all mappings of the page now.
1037 * if we're cleaning, check if the page needs to be cleaned.
1038 */
1039
1040 protected = false;
1041 if (flags & PGO_FREE) {
1042 pmap_page_protect(pg, VM_PROT_NONE);
1043 protected = true;
1044 } else if (flags & PGO_CLEANIT) {
1045
1046 /*
1047 * if we still have some hope to pull this vnode off
1048 * from the syncer queue, write-protect the page.
1049 */
1050
1051 if (tryclean && wasclean) {
1052
1053 /*
1054 * uobj pages get wired only by uvm_fault
1055 * where uobj is locked.
1056 */
1057
1058 if (pg->wire_count == 0) {
1059 pmap_page_protect(pg,
1060 VM_PROT_READ|VM_PROT_EXECUTE);
1061 protected = true;
1062 } else {
1063 /*
1064 * give up.
1065 */
1066 tryclean = false;
1067 }
1068 }
1069 }
1070
1071 if (flags & PGO_CLEANIT) {
1072 needs_clean = uvm_pagecheckdirty(pg, protected);
1073 } else {
1074 needs_clean = false;
1075 }
1076
1077 /*
1078 * if we're cleaning, build a cluster.
1079 * the cluster will consist of pages which are currently dirty.
1080 * if not cleaning, just operate on the one page.
1081 */
1082
1083 if (needs_clean) {
1084 unsigned int nforw;
1085 unsigned int fpflags;
1086
1087 KDASSERT((vp->v_iflag & VI_ONWORKLST));
1088 wasclean = false;
1089 memset(pgs, 0, sizeof(pgs));
1090 pg->flags |= PG_BUSY;
1091 UVM_PAGE_OWN(pg, "genfs_putpages");
1092
1093 fpflags = UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY;
1094
1095 /*
1096 * XXX PG_PAGER1 incompatibility check.
1097 *
1098 * this is a kludge for nfs. nfs has two kind of dirty
1099 * pages:
1100 * - not written to the server yet
1101 * - written to the server but not committed yet
1102 * the latter is marked as PG_NEEDCOMMIT. (== PG_PAGER1)
1103 * nfs doesn't want them being clustered together.
1104 *
1105 * probably it's better to make PG_NEEDCOMMIT a first
1106 * level citizen for uvm/genfs.
1107 */
1108 if ((pg->flags & PG_PAGER1) != 0) {
1109 fpflags |= UFP_ONLYPAGER1;
1110 } else {
1111 fpflags |= UFP_NOPAGER1;
1112 }
1113
1114 /*
1115 * first look backward.
1116 *
1117 * because we always scan pages in the ascending order,
1118 * backward scan can be useful only for the first page
1119 * in the range.
1120 */
1121 if (startoff == pg->offset) {
1122 npages = MIN(maxpages >> 1,
1123 pg->offset >> PAGE_SHIFT);
1124 nback = npages;
1125 uvn_findpages(uobj, pg->offset - PAGE_SIZE,
1126 &nback, &pgs[0], NULL,
1127 fpflags | UFP_BACKWARD);
1128 if (nback) {
1129 memmove(&pgs[0], &pgs[npages - nback],
1130 nback * sizeof(pgs[0]));
1131 if (npages - nback < nback)
1132 memset(&pgs[nback], 0,
1133 (npages - nback) *
1134 sizeof(pgs[0]));
1135 else
1136 memset(&pgs[npages - nback], 0,
1137 nback * sizeof(pgs[0]));
1138 }
1139 } else {
1140 nback = 0;
1141 }
1142
1143 /*
1144 * then plug in our page of interest.
1145 */
1146
1147 pgs[nback] = pg;
1148
1149 /*
1150 * then look forward to fill in the remaining space in
1151 * the array of pages.
1152 *
1153 * pass our cached array of pages so that hopefully
1154 * uvn_findpages can find some good pages in it.
1155 */
1156
1157 nforw = maxpages - nback - 1;
1158 uvn_findpages(uobj, pg->offset + PAGE_SIZE,
1159 &nforw, &pgs[nback + 1], &a, fpflags);
1160 npages = nback + 1 + nforw;
1161 } else {
1162 pgs[0] = pg;
1163 npages = 1;
1164 nback = 0;
1165 }
1166
1167 /*
1168 * apply FREE or DEACTIVATE options if requested.
1169 */
1170
1171 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1172 mutex_enter(&uvm_pageqlock);
1173 }
1174 for (i = 0; i < npages; i++) {
1175 struct vm_page *tpg = pgs[i];
1176
1177 KASSERT(tpg->uobject == uobj);
1178 KASSERT(i == 0 ||
1179 pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
1180 KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
1181 UVM_PAGE_STATUS_DIRTY);
1182 if (needs_clean) {
1183 /*
1184 * mark pages as WRITEBACK so that concurrent
1185 * fsync can find and wait for our activities.
1186 */
1187 radix_tree_set_tag(&uobj->uo_pages,
1188 pgs[i]->offset >> PAGE_SHIFT,
1189 UVM_PAGE_WRITEBACK_TAG);
1190 }
1191 if (tpg->offset < startoff || tpg->offset >= endoff)
1192 continue;
1193 if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
1194 uvm_pagedeactivate(tpg);
1195 } else if (flags & PGO_FREE) {
1196 pmap_page_protect(tpg, VM_PROT_NONE);
1197 if (tpg->flags & PG_BUSY) {
1198 tpg->flags |= freeflag;
1199 if (pagedaemon) {
1200 uvm_pageout_start(1);
1201 uvm_pagedequeue(tpg);
1202 }
1203 } else {
1204
1205 /*
1206 * ``page is not busy''
1207 * implies that npages is 1
1208 * and needs_clean is false.
1209 */
1210
1211 KASSERT(npages == 1);
1212 KASSERT(!needs_clean);
1213 KASSERT(pg == tpg);
1214 KASSERT(nextoff ==
1215 tpg->offset + PAGE_SIZE);
1216 uvm_pagefree(tpg);
1217 if (pagedaemon)
1218 uvmexp.pdfreed++;
1219 }
1220 }
1221 }
1222 if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
1223 mutex_exit(&uvm_pageqlock);
1224 }
1225 if (needs_clean) {
1226 mutex_exit(slock);
1227 KASSERT(nextoff == pg->offset + PAGE_SIZE);
1228 KASSERT(nback < npages);
1229 nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT);
1230 KASSERT(pgs[nback] == pg);
1231 KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
1232
1233 /*
1234 * start the i/o.
1235 */
1236 error = GOP_WRITE(vp, pgs, npages, flags);
1237 written = true;
1238 /*
1239 * as we dropped the object lock, our cached pages can
1240 * be stale.
1241 */
1242 uvm_page_array_clear(&a);
1243 mutex_enter(slock);
1244 if (error) {
1245 break;
1246 }
1247 }
1248 }
1249 uvm_page_array_fini(&a);
1250
1251 /*
1252 * update ctime/mtime if the modification we started writing out might
1253 * be from mmap'ed write.
1254 *
1255 * this is necessary when an application keeps a file mmaped and
1256 * repeatedly modifies it via the window. note that, because we
1257 * don't always write-protect pages when cleaning, such modifications
1258 * might not involve any page faults.
1259 */
1260
1261 if (written && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
1262 (vp->v_type != VBLK ||
1263 (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
1264 GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
1265 }
1266
1267 /*
1268 * if we no longer have any possibly dirty pages, take us off the
1269 * syncer list.
1270 */
1271
1272 if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
1273 radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
1274 UVM_PAGE_DIRTY_TAG)) {
1275 vp->v_iflag &= ~VI_WRMAPDIRTY;
1276 if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
1277 vn_syncer_remove_from_worklist(vp);
1278 }
1279
1280 #if !defined(DEBUG)
1281 skip_scan:
1282 #endif /* !defined(DEBUG) */
1283
1284 /*
1285 * if we found or started any i/o and we're asked to sync for integrity,
1286 * wait for all writes to finish.
1287 */
1288
1289 if (!wasclean && integrity_sync) {
1290 while (vp->v_numoutput != 0)
1291 cv_wait(&vp->v_cv, slock);
1292 }
1293 onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
1294 mutex_exit(slock);
1295
1296 if ((flags & PGO_RECLAIM) != 0 && onworklst) {
1297 /*
1298 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
1299 * retrying is not a big deal because, in many cases,
1300 * uobj->uo_npages is already 0 here.
1301 */
1302 mutex_enter(slock);
1303 goto retry;
1304 }
1305
1306 if (has_trans) {
1307 if (need_wapbl)
1308 WAPBL_END(vp->v_mount);
1309 fstrans_done(vp->v_mount);
1310 }
1311
1312 return (error);
1313 }
1314
1315 int
1316 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1317 {
1318 off_t off;
1319 vaddr_t kva;
1320 size_t len;
1321 int error;
1322 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1323
1324 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1325 vp, pgs, npages, flags);
1326
1327 off = pgs[0]->offset;
1328 kva = uvm_pagermapin(pgs, npages,
1329 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1330 len = npages << PAGE_SHIFT;
1331
1332 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1333 uvm_aio_biodone);
1334
1335 return error;
1336 }
1337
1338 /*
1339 * genfs_gop_write_rwmap:
1340 *
1341 * a variant of genfs_gop_write. it's used by UDF for its directory buffers.
1342 * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies
1343 * the contents before writing it out to the underlying storage.
1344 */
1345
1346 int
1347 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
1348 int flags)
1349 {
1350 off_t off;
1351 vaddr_t kva;
1352 size_t len;
1353 int error;
1354 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1355
1356 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1357 vp, pgs, npages, flags);
1358
1359 off = pgs[0]->offset;
1360 kva = uvm_pagermapin(pgs, npages,
1361 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1362 len = npages << PAGE_SHIFT;
1363
1364 error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
1365 uvm_aio_biodone);
1366
1367 return error;
1368 }
1369
1370 /*
1371 * Backend routine for doing I/O to vnode pages. Pages are already locked
1372 * and mapped into kernel memory. Here we just look up the underlying
1373 * device block addresses and call the strategy routine.
1374 */
1375
1376 static int
1377 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
1378 enum uio_rw rw, void (*iodone)(struct buf *))
1379 {
1380 int s, error;
1381 int fs_bshift, dev_bshift;
1382 off_t eof, offset, startoffset;
1383 size_t bytes, iobytes, skipbytes;
1384 struct buf *mbp, *bp;
1385 const bool async = (flags & PGO_SYNCIO) == 0;
1386 const bool lazy = (flags & PGO_LAZY) == 0;
1387 const bool iowrite = rw == UIO_WRITE;
1388 const int brw = iowrite ? B_WRITE : B_READ;
1389 UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
1390
1391 UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
1392 vp, kva, len, flags);
1393
1394 KASSERT(vp->v_size <= vp->v_writesize);
1395 GOP_SIZE(vp, vp->v_writesize, &eof, 0);
1396 if (vp->v_type != VBLK) {
1397 fs_bshift = vp->v_mount->mnt_fs_bshift;
1398 dev_bshift = vp->v_mount->mnt_dev_bshift;
1399 } else {
1400 fs_bshift = DEV_BSHIFT;
1401 dev_bshift = DEV_BSHIFT;
1402 }
1403 error = 0;
1404 startoffset = off;
1405 bytes = MIN(len, eof - startoffset);
1406 skipbytes = 0;
1407 KASSERT(bytes != 0);
1408
1409 if (iowrite) {
1410 /*
1411 * why += 2?
1412 * 1 for biodone, 1 for uvm_aio_aiodone.
1413 */
1414 mutex_enter(vp->v_interlock);
1415 vp->v_numoutput += 2;
1416 mutex_exit(vp->v_interlock);
1417 }
1418 mbp = getiobuf(vp, true);
1419 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1420 vp, mbp, vp->v_numoutput, bytes);
1421 mbp->b_bufsize = len;
1422 mbp->b_data = (void *)kva;
1423 mbp->b_resid = mbp->b_bcount = bytes;
1424 mbp->b_cflags = BC_BUSY | BC_AGE;
1425 if (async) {
1426 mbp->b_flags = brw | B_ASYNC;
1427 mbp->b_iodone = iodone;
1428 } else {
1429 mbp->b_flags = brw;
1430 mbp->b_iodone = NULL;
1431 }
1432 if (curlwp == uvm.pagedaemon_lwp)
1433 BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
1434 else if (async || lazy)
1435 BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
1436 else
1437 BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
1438
1439 bp = NULL;
1440 for (offset = startoffset;
1441 bytes > 0;
1442 offset += iobytes, bytes -= iobytes) {
1443 int run;
1444 daddr_t lbn, blkno;
1445 struct vnode *devvp;
1446
1447 /*
1448 * bmap the file to find out the blkno to read from and
1449 * how much we can read in one i/o. if bmap returns an error,
1450 * skip the rest of the top-level i/o.
1451 */
1452
1453 lbn = offset >> fs_bshift;
1454 error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
1455 if (error) {
1456 UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
1457 lbn,error,0,0);
1458 skipbytes += bytes;
1459 bytes = 0;
1460 goto loopdone;
1461 }
1462
1463 /*
1464 * see how many pages can be read with this i/o.
1465 * reduce the i/o size if necessary to avoid
1466 * overwriting pages with valid data.
1467 */
1468
1469 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1470 bytes);
1471
1472 /*
1473 * if this block isn't allocated, zero it instead of
1474 * reading it. unless we are going to allocate blocks,
1475 * mark the pages we zeroed PG_RDONLY.
1476 */
1477
1478 if (blkno == (daddr_t)-1) {
1479 if (!iowrite) {
1480 memset((char *)kva + (offset - startoffset), 0,
1481 iobytes);
1482 }
1483 skipbytes += iobytes;
1484 continue;
1485 }
1486
1487 /*
1488 * allocate a sub-buf for this piece of the i/o
1489 * (or just use mbp if there's only 1 piece),
1490 * and start it going.
1491 */
1492
1493 if (offset == startoffset && iobytes == bytes) {
1494 bp = mbp;
1495 } else {
1496 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1497 vp, bp, vp->v_numoutput, 0);
1498 bp = getiobuf(vp, true);
1499 nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
1500 }
1501 bp->b_lblkno = 0;
1502
1503 /* adjust physical blkno for partial blocks */
1504 bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
1505 dev_bshift);
1506
1507 UVMHIST_LOG(ubchist,
1508 "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
1509 bp, offset, bp->b_bcount, bp->b_blkno);
1510
1511 VOP_STRATEGY(devvp, bp);
1512 }
1513
1514 loopdone:
1515 if (skipbytes) {
1516 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1517 }
1518 nestiobuf_done(mbp, skipbytes, error);
1519 if (async) {
1520 UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
1521 return (0);
1522 }
1523 UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
1524 error = biowait(mbp);
1525 s = splbio();
1526 (*iodone)(mbp);
1527 splx(s);
1528 UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
1529 return (error);
1530 }
1531
1532 int
1533 genfs_compat_getpages(void *v)
1534 {
1535 struct vop_getpages_args /* {
1536 struct vnode *a_vp;
1537 voff_t a_offset;
1538 struct vm_page **a_m;
1539 int *a_count;
1540 int a_centeridx;
1541 vm_prot_t a_access_type;
1542 int a_advice;
1543 int a_flags;
1544 } */ *ap = v;
1545
1546 off_t origoffset;
1547 struct vnode *vp = ap->a_vp;
1548 struct uvm_object *uobj = &vp->v_uobj;
1549 struct vm_page *pg, **pgs;
1550 vaddr_t kva;
1551 int i, error, orignpages, npages;
1552 struct iovec iov;
1553 struct uio uio;
1554 kauth_cred_t cred = curlwp->l_cred;
1555 const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
1556
1557 error = 0;
1558 origoffset = ap->a_offset;
1559 orignpages = *ap->a_count;
1560 pgs = ap->a_m;
1561
1562 if (ap->a_flags & PGO_LOCKED) {
1563 uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL,
1564 UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
1565
1566 error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
1567 if (error == 0 && memwrite) {
1568 genfs_markdirty(vp);
1569 }
1570 return error;
1571 }
1572 if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
1573 mutex_exit(uobj->vmobjlock);
1574 return EINVAL;
1575 }
1576 if ((ap->a_flags & PGO_SYNCIO) == 0) {
1577 mutex_exit(uobj->vmobjlock);
1578 return 0;
1579 }
1580 npages = orignpages;
1581 uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
1582 mutex_exit(uobj->vmobjlock);
1583 kva = uvm_pagermapin(pgs, npages,
1584 UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
1585 for (i = 0; i < npages; i++) {
1586 pg = pgs[i];
1587 if ((pg->flags & PG_FAKE) == 0) {
1588 continue;
1589 }
1590 iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
1591 iov.iov_len = PAGE_SIZE;
1592 uio.uio_iov = &iov;
1593 uio.uio_iovcnt = 1;
1594 uio.uio_offset = origoffset + (i << PAGE_SHIFT);
1595 uio.uio_rw = UIO_READ;
1596 uio.uio_resid = PAGE_SIZE;
1597 UIO_SETUP_SYSSPACE(&uio);
1598 /* XXX vn_lock */
1599 error = VOP_READ(vp, &uio, 0, cred);
1600 if (error) {
1601 break;
1602 }
1603 if (uio.uio_resid) {
1604 memset(iov.iov_base, 0, uio.uio_resid);
1605 }
1606 }
1607 uvm_pagermapout(kva, npages);
1608 mutex_enter(uobj->vmobjlock);
1609 mutex_enter(&uvm_pageqlock);
1610 for (i = 0; i < npages; i++) {
1611 pg = pgs[i];
1612 if (error && (pg->flags & PG_FAKE) != 0) {
1613 pg->flags |= PG_RELEASED;
1614 } else {
1615 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
1616 uvm_pageactivate(pg);
1617 }
1618 }
1619 if (error) {
1620 uvm_page_unbusy(pgs, npages);
1621 }
1622 mutex_exit(&uvm_pageqlock);
1623 if (error == 0 && memwrite) {
1624 genfs_markdirty(vp);
1625 }
1626 mutex_exit(uobj->vmobjlock);
1627 return error;
1628 }
1629
1630 int
1631 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
1632 int flags)
1633 {
1634 off_t offset;
1635 struct iovec iov;
1636 struct uio uio;
1637 kauth_cred_t cred = curlwp->l_cred;
1638 struct buf *bp;
1639 vaddr_t kva;
1640 int error;
1641
1642 offset = pgs[0]->offset;
1643 kva = uvm_pagermapin(pgs, npages,
1644 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1645
1646 iov.iov_base = (void *)kva;
1647 iov.iov_len = npages << PAGE_SHIFT;
1648 uio.uio_iov = &iov;
1649 uio.uio_iovcnt = 1;
1650 uio.uio_offset = offset;
1651 uio.uio_rw = UIO_WRITE;
1652 uio.uio_resid = npages << PAGE_SHIFT;
1653 UIO_SETUP_SYSSPACE(&uio);
1654 /* XXX vn_lock */
1655 error = VOP_WRITE(vp, &uio, 0, cred);
1656
1657 mutex_enter(vp->v_interlock);
1658 vp->v_numoutput++;
1659 mutex_exit(vp->v_interlock);
1660
1661 bp = getiobuf(vp, true);
1662 bp->b_cflags = BC_BUSY | BC_AGE;
1663 bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
1664 bp->b_data = (char *)kva;
1665 bp->b_bcount = npages << PAGE_SHIFT;
1666 bp->b_bufsize = npages << PAGE_SHIFT;
1667 bp->b_resid = 0;
1668 bp->b_error = error;
1669 uvm_aio_aiodone(bp);
1670 return (error);
1671 }
1672
1673 /*
1674 * Process a uio using direct I/O. If we reach a part of the request
1675 * which cannot be processed in this fashion for some reason, just return.
1676 * The caller must handle some additional part of the request using
1677 * buffered I/O before trying direct I/O again.
1678 */
1679
1680 void
1681 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
1682 {
1683 struct vmspace *vs;
1684 struct iovec *iov;
1685 vaddr_t va;
1686 size_t len;
1687 const int mask = DEV_BSIZE - 1;
1688 int error;
1689 bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
1690 (ioflag & IO_JOURNALLOCKED) == 0);
1691
1692 /*
1693 * We only support direct I/O to user space for now.
1694 */
1695
1696 if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
1697 return;
1698 }
1699
1700 /*
1701 * If the vnode is mapped, we would need to get the getpages lock
1702 * to stabilize the bmap, but then we would get into trouble while
1703 * locking the pages if the pages belong to this same vnode (or a
1704 * multi-vnode cascade to the same effect). Just fall back to
1705 * buffered I/O if the vnode is mapped to avoid this mess.
1706 */
1707
1708 if (vp->v_vflag & VV_MAPPED) {
1709 return;
1710 }
1711
1712 if (need_wapbl) {
1713 error = WAPBL_BEGIN(vp->v_mount);
1714 if (error)
1715 return;
1716 }
1717
1718 /*
1719 * Do as much of the uio as possible with direct I/O.
1720 */
1721
1722 vs = uio->uio_vmspace;
1723 while (uio->uio_resid) {
1724 iov = uio->uio_iov;
1725 if (iov->iov_len == 0) {
1726 uio->uio_iov++;
1727 uio->uio_iovcnt--;
1728 continue;
1729 }
1730 va = (vaddr_t)iov->iov_base;
1731 len = MIN(iov->iov_len, genfs_maxdio);
1732 len &= ~mask;
1733
1734 /*
1735 * If the next chunk is smaller than DEV_BSIZE or extends past
1736 * the current EOF, then fall back to buffered I/O.
1737 */
1738
1739 if (len == 0 || uio->uio_offset + len > vp->v_size) {
1740 break;
1741 }
1742
1743 /*
1744 * Check alignment. The file offset must be at least
1745 * sector-aligned. The exact constraint on memory alignment
1746 * is very hardware-dependent, but requiring sector-aligned
1747 * addresses there too is safe.
1748 */
1749
1750 if (uio->uio_offset & mask || va & mask) {
1751 break;
1752 }
1753 error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
1754 uio->uio_rw);
1755 if (error) {
1756 break;
1757 }
1758 iov->iov_base = (char *)iov->iov_base + len;
1759 iov->iov_len -= len;
1760 uio->uio_offset += len;
1761 uio->uio_resid -= len;
1762 }
1763
1764 if (need_wapbl)
1765 WAPBL_END(vp->v_mount);
1766 }
1767
1768 /*
1769 * Iodone routine for direct I/O. We don't do much here since the request is
1770 * always synchronous, so the caller will do most of the work after biowait().
1771 */
1772
1773 static void
1774 genfs_dio_iodone(struct buf *bp)
1775 {
1776
1777 KASSERT((bp->b_flags & B_ASYNC) == 0);
1778 if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
1779 mutex_enter(bp->b_objlock);
1780 vwakeup(bp);
1781 mutex_exit(bp->b_objlock);
1782 }
1783 putiobuf(bp);
1784 }
1785
1786 /*
1787 * Process one chunk of a direct I/O request.
1788 */
1789
1790 static int
1791 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
1792 off_t off, enum uio_rw rw)
1793 {
1794 struct vm_map *map;
1795 struct pmap *upm, *kpm;
1796 size_t klen = round_page(uva + len) - trunc_page(uva);
1797 off_t spoff, epoff;
1798 vaddr_t kva, puva;
1799 paddr_t pa;
1800 vm_prot_t prot;
1801 int error, rv, poff, koff;
1802 const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
1803 (rw == UIO_WRITE ? PGO_FREE : 0);
1804
1805 /*
1806 * For writes, verify that this range of the file already has fully
1807 * allocated backing store. If there are any holes, just punt and
1808 * make the caller take the buffered write path.
1809 */
1810
1811 if (rw == UIO_WRITE) {
1812 daddr_t lbn, elbn, blkno;
1813 int bsize, bshift, run;
1814
1815 bshift = vp->v_mount->mnt_fs_bshift;
1816 bsize = 1 << bshift;
1817 lbn = off >> bshift;
1818 elbn = (off + len + bsize - 1) >> bshift;
1819 while (lbn < elbn) {
1820 error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
1821 if (error) {
1822 return error;
1823 }
1824 if (blkno == (daddr_t)-1) {
1825 return ENOSPC;
1826 }
1827 lbn += 1 + run;
1828 }
1829 }
1830
1831 /*
1832 * Flush any cached pages for parts of the file that we're about to
1833 * access. If we're writing, invalidate pages as well.
1834 */
1835
1836 spoff = trunc_page(off);
1837 epoff = round_page(off + len);
1838 mutex_enter(vp->v_interlock);
1839 error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
1840 if (error) {
1841 return error;
1842 }
1843
1844 /*
1845 * Wire the user pages and remap them into kernel memory.
1846 */
1847
1848 prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
1849 error = uvm_vslock(vs, (void *)uva, len, prot);
1850 if (error) {
1851 return error;
1852 }
1853
1854 map = &vs->vm_map;
1855 upm = vm_map_pmap(map);
1856 kpm = vm_map_pmap(kernel_map);
1857 puva = trunc_page(uva);
1858 kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
1859 UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
1860 for (poff = 0; poff < klen; poff += PAGE_SIZE) {
1861 rv = pmap_extract(upm, puva + poff, &pa);
1862 KASSERT(rv);
1863 pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
1864 }
1865 pmap_update(kpm);
1866
1867 /*
1868 * Do the I/O.
1869 */
1870
1871 koff = uva - trunc_page(uva);
1872 error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
1873 genfs_dio_iodone);
1874
1875 /*
1876 * Tear down the kernel mapping.
1877 */
1878
1879 pmap_kremove(kva, klen);
1880 pmap_update(kpm);
1881 uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
1882
1883 /*
1884 * Unwire the user pages.
1885 */
1886
1887 uvm_vsunlock(vs, (void *)uva, len);
1888 return error;
1889 }
1890