Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.36.2.52
      1 /*	$NetBSD: genfs_io.c,v 1.36.2.52 2010/11/20 08:03:22 uebayasi Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.52 2010/11/20 08:03:22 uebayasi Exp $");
     35 
     36 #include "opt_xip.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/proc.h>
     41 #include <sys/kernel.h>
     42 #include <sys/mount.h>
     43 #include <sys/namei.h>
     44 #include <sys/vnode.h>
     45 #include <sys/fcntl.h>
     46 #include <sys/kmem.h>
     47 #include <sys/poll.h>
     48 #include <sys/mman.h>
     49 #include <sys/file.h>
     50 #include <sys/kauth.h>
     51 #include <sys/fstrans.h>
     52 #include <sys/buf.h>
     53 #include <sys/once.h>
     54 
     55 #include <miscfs/genfs/genfs.h>
     56 #include <miscfs/genfs/genfs_node.h>
     57 #include <miscfs/specfs/specdev.h>
     58 
     59 #include <uvm/uvm.h>
     60 #include <uvm/uvm_pager.h>
     61 
     62 #ifdef XIP
     63 static int genfs_do_getpages_xip_io(struct vnode *, voff_t, struct vm_page **,
     64     int *, int, vm_prot_t, int, int, const int);
     65 static int genfs_do_getpages_xip_io_done(struct vnode *, voff_t, struct vm_page **,
     66     int *, int, vm_prot_t, int, int, const int);
     67 static int genfs_do_putpages_xip(struct vnode *, off_t, off_t, int,
     68     struct vm_page **);
     69 #endif
     70 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     71     off_t, enum uio_rw);
     72 static void genfs_dio_iodone(struct buf *);
     73 
     74 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     75     void (*)(struct buf *));
     76 static void genfs_rel_pages(struct vm_page **, int);
     77 static void genfs_markdirty(struct vnode *);
     78 
     79 int genfs_maxdio = MAXPHYS;
     80 
     81 static void
     82 genfs_rel_pages(struct vm_page **pgs, int npages)
     83 {
     84 	int i;
     85 
     86 	for (i = 0; i < npages; i++) {
     87 		struct vm_page *pg = pgs[i];
     88 
     89 		if (pg == NULL || pg == PGO_DONTCARE)
     90 			continue;
     91 		if (pg->flags & PG_FAKE) {
     92 			pg->flags |= PG_RELEASED;
     93 		}
     94 	}
     95 	mutex_enter(&uvm_pageqlock);
     96 	uvm_page_unbusy(pgs, npages);
     97 	mutex_exit(&uvm_pageqlock);
     98 }
     99 
    100 static void
    101 genfs_markdirty(struct vnode *vp)
    102 {
    103 	struct genfs_node * const gp = VTOG(vp);
    104 
    105 	KASSERT(mutex_owned(&vp->v_interlock));
    106 	gp->g_dirtygen++;
    107 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    108 		vn_syncer_add_to_worklist(vp, filedelay);
    109 	}
    110 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    111 		vp->v_iflag |= VI_WRMAPDIRTY;
    112 	}
    113 }
    114 
    115 /*
    116  * generic VM getpages routine.
    117  * Return PG_BUSY pages for the given range,
    118  * reading from backing store if necessary.
    119  */
    120 
    121 int
    122 genfs_getpages(void *v)
    123 {
    124 	struct vop_getpages_args /* {
    125 		struct vnode *a_vp;
    126 		voff_t a_offset;
    127 		struct vm_page **a_m;
    128 		int *a_count;
    129 		int a_centeridx;
    130 		vm_prot_t a_access_type;
    131 		int a_advice;
    132 		int a_flags;
    133 	} */ * const ap = v;
    134 
    135 	off_t diskeof, memeof;
    136 	int i, error, npages;
    137 	const int flags = ap->a_flags;
    138 	struct vnode * const vp = ap->a_vp;
    139 	struct uvm_object * const uobj = &vp->v_uobj;
    140 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    141 	const bool async = (flags & PGO_SYNCIO) == 0;
    142 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    143 	bool has_trans = false;
    144 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    145 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    146 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    147 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    148 
    149 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    150 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    151 
    152 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    153 	    vp->v_type == VLNK || vp->v_type == VBLK);
    154 
    155 startover:
    156 	error = 0;
    157 	const voff_t origvsize = vp->v_size;
    158 	const off_t origoffset = ap->a_offset;
    159 	const int orignpages = *ap->a_count;
    160 
    161 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    162 	if (flags & PGO_PASTEOF) {
    163 		off_t newsize;
    164 #if defined(DIAGNOSTIC)
    165 		off_t writeeof;
    166 #endif /* defined(DIAGNOSTIC) */
    167 
    168 		newsize = MAX(origvsize,
    169 		    origoffset + (orignpages << PAGE_SHIFT));
    170 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    171 #if defined(DIAGNOSTIC)
    172 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    173 		if (newsize > round_page(writeeof)) {
    174 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    175 			    __func__, newsize, round_page(writeeof));
    176 		}
    177 #endif /* defined(DIAGNOSTIC) */
    178 	} else {
    179 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    180 	}
    181 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    182 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    183 	KASSERT(orignpages > 0);
    184 
    185 	/*
    186 	 * Bounds-check the request.
    187 	 */
    188 
    189 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    190 		if ((flags & PGO_LOCKED) == 0) {
    191 			mutex_exit(&uobj->vmobjlock);
    192 		}
    193 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    194 		    origoffset, *ap->a_count, memeof,0);
    195 		error = EINVAL;
    196 		goto out_err;
    197 	}
    198 
    199 	/* uobj is locked */
    200 
    201 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    202 	    (vp->v_type != VBLK ||
    203 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    204 		int updflags = 0;
    205 
    206 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    207 			updflags = GOP_UPDATE_ACCESSED;
    208 		}
    209 		if (memwrite) {
    210 			updflags |= GOP_UPDATE_MODIFIED;
    211 		}
    212 		if (updflags != 0) {
    213 			GOP_MARKUPDATE(vp, updflags);
    214 		}
    215 	}
    216 
    217 	/*
    218 	 * For PGO_LOCKED requests, just return whatever's in memory.
    219 	 */
    220 
    221 	if (flags & PGO_LOCKED) {
    222 #if 0
    223 		genfs_getpages_mem();
    224 	} else {
    225 		genfs_getpages_io();
    226 	}
    227 }
    228 
    229 int
    230 genfs_getpages_mem()
    231 {
    232 #endif
    233 		int nfound;
    234 		struct vm_page *pg;
    235 
    236 #ifdef XIP
    237 		if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    238 			*ap->a_count = 0;
    239 			return 0;
    240 		}
    241 #endif
    242 
    243 		KASSERT(!glocked);
    244 		npages = *ap->a_count;
    245 #if defined(DEBUG)
    246 		for (i = 0; i < npages; i++) {
    247 			pg = ap->a_m[i];
    248 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    249 		}
    250 #endif /* defined(DEBUG) */
    251 		nfound = uvn_findpages(uobj, origoffset, &npages,
    252 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    253 		KASSERT(npages == *ap->a_count);
    254 		if (nfound == 0) {
    255 			error = EBUSY;
    256 			goto out_err;
    257 		}
    258 		if (!genfs_node_rdtrylock(vp)) {
    259 			genfs_rel_pages(ap->a_m, npages);
    260 
    261 			/*
    262 			 * restore the array.
    263 			 */
    264 
    265 			for (i = 0; i < npages; i++) {
    266 				pg = ap->a_m[i];
    267 
    268 				if (pg != NULL && pg != PGO_DONTCARE) {
    269 					ap->a_m[i] = NULL;
    270 				}
    271 				KASSERT(pg == NULL || pg == PGO_DONTCARE);
    272 			}
    273 		} else {
    274 			genfs_node_unlock(vp);
    275 		}
    276 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    277 		if (error == 0 && memwrite) {
    278 			genfs_markdirty(vp);
    279 		}
    280 		goto out_err;
    281 	}
    282 	mutex_exit(&uobj->vmobjlock);
    283 #if 0
    284 }
    285 
    286 int
    287 genfs_getpages_io()
    288 {
    289 #endif
    290 	/*
    291 	 * find the requested pages and make some simple checks.
    292 	 * leave space in the page array for a whole block.
    293 	 */
    294 
    295 #define	vp2fs_bshift(vp) \
    296 	(((vp)->v_type != VBLK) ? (vp)->v_mount->mnt_fs_bshift : DEV_BSHIFT)
    297 #define	vp2dev_bshift(vp) \
    298 	(((vp)->v_type != VBLK) ? (vp)->v_mount->mnt_dev_bshift : DEV_BSHIFT)
    299 
    300 	const int fs_bshift = vp2fs_bshift(vp);
    301 	const int dev_bshift = vp2dev_bshift(vp);
    302 	const int fs_bsize = 1 << fs_bshift;
    303 #define	blk_mask	(fs_bsize - 1)
    304 #define	trunc_blk(x)	((x) & ~blk_mask)
    305 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    306 
    307 	const int orignmempages = MIN(orignpages,
    308 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    309 	npages = orignmempages;
    310 	const off_t startoffset = trunc_blk(origoffset);
    311 	const off_t endoffset = MIN(
    312 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    313 	    round_page(memeof));
    314 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    315 
    316 	const int pgs_size = sizeof(struct vm_page *) *
    317 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    318 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    319 
    320 	if (pgs_size > sizeof(pgs_onstack)) {
    321 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    322 		if (pgs == NULL) {
    323 			pgs = pgs_onstack;
    324 			error = ENOMEM;
    325 			goto out_err;
    326 		}
    327 	} else {
    328 		pgs = pgs_onstack;
    329 		(void)memset(pgs, 0, pgs_size);
    330 	}
    331 
    332 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    333 	    ridx, npages, startoffset, endoffset);
    334 #if 0
    335 }
    336 
    337 int
    338 genfs_getpages_io_relock()
    339 {
    340 #endif
    341 	if (!has_trans) {
    342 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    343 		has_trans = true;
    344 	}
    345 
    346 	/*
    347 	 * hold g_glock to prevent a race with truncate.
    348 	 *
    349 	 * check if our idea of v_size is still valid.
    350 	 */
    351 
    352 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    353 	if (!glocked) {
    354 		if (blockalloc) {
    355 			genfs_node_wrlock(vp);
    356 		} else {
    357 			genfs_node_rdlock(vp);
    358 		}
    359 	}
    360 	mutex_enter(&uobj->vmobjlock);
    361 	if (vp->v_size < origvsize) {
    362 		if (!glocked) {
    363 			genfs_node_unlock(vp);
    364 		}
    365 		if (pgs != pgs_onstack)
    366 			kmem_free(pgs, pgs_size);
    367 		goto startover;
    368 	}
    369 #if 0
    370 }
    371 
    372 int
    373 genfs_getpages_io_findpages()
    374 {
    375 #endif
    376 #ifdef XIP
    377 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    378 		goto genfs_getpages_io_read_allocpages_done;
    379 #endif
    380 
    381 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    382 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    383 		if (!glocked) {
    384 			genfs_node_unlock(vp);
    385 		}
    386 		KASSERT(async != 0);
    387 		genfs_rel_pages(&pgs[ridx], orignmempages);
    388 		mutex_exit(&uobj->vmobjlock);
    389 		error = EBUSY;
    390 		goto out_err_free;
    391 	}
    392 
    393 	/*
    394 	 * if the pages are already resident, just return them.
    395 	 */
    396 
    397 	for (i = 0; i < npages; i++) {
    398 		struct vm_page *pg = pgs[ridx + i];
    399 
    400 		if ((pg->flags & PG_FAKE) ||
    401 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    402 			break;
    403 		}
    404 	}
    405 	if (i == npages) {
    406 		if (!glocked) {
    407 			genfs_node_unlock(vp);
    408 		}
    409 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    410 		npages += ridx;
    411 		goto out;
    412 	}
    413 
    414 	/*
    415 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    416 	 */
    417 
    418 	if (overwrite) {
    419 #if 0
    420 		genfs_getpages_io_overwrite();
    421 	} else {
    422 		genfs_getpages_io_read();
    423 	}
    424 }
    425 
    426 int
    427 genfs_getpages_io_overwrite()
    428 {
    429 	{
    430 #endif
    431 		if (!glocked) {
    432 			genfs_node_unlock(vp);
    433 		}
    434 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    435 
    436 		for (i = 0; i < npages; i++) {
    437 			struct vm_page *pg = pgs[ridx + i];
    438 
    439 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    440 		}
    441 		npages += ridx;
    442 		goto out;
    443 	}
    444 #if 0
    445 }
    446 
    447 int
    448 genfs_getpages_io_read()
    449 {
    450 #endif
    451 	/*
    452 	 * the page wasn't resident and we're not overwriting,
    453 	 * so we're going to have to do some i/o.
    454 	 * find any additional pages needed to cover the expanded range.
    455 	 */
    456 #if 0
    457 }
    458 
    459 int
    460 genfs_getpages_io_read_allocpages()
    461 {
    462 #endif
    463 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    464 	if (startoffset != origoffset || npages != orignmempages) {
    465 		int npgs;
    466 
    467 		/*
    468 		 * we need to avoid deadlocks caused by locking
    469 		 * additional pages at lower offsets than pages we
    470 		 * already have locked.  unlock them all and start over.
    471 		 */
    472 
    473 		genfs_rel_pages(&pgs[ridx], orignmempages);
    474 		memset(pgs, 0, pgs_size);
    475 
    476 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    477 		    startoffset, endoffset, 0,0);
    478 		npgs = npages;
    479 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    480 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    481 			if (!glocked) {
    482 				genfs_node_unlock(vp);
    483 			}
    484 			KASSERT(async != 0);
    485 			genfs_rel_pages(pgs, npages);
    486 			mutex_exit(&uobj->vmobjlock);
    487 			error = EBUSY;
    488 			goto out_err_free;
    489 		}
    490 	}
    491 #ifdef XIP
    492 genfs_getpages_io_read_allocpages_done:
    493 #endif
    494 #if 0
    495 }
    496 
    497 int
    498 genfs_getpages_io_read_bio()
    499 {
    500 #endif
    501 	mutex_exit(&uobj->vmobjlock);
    502 
    503     {
    504 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    505 	vaddr_t kva = 0;
    506 	struct buf *bp = NULL, *mbp = NULL;
    507 	bool sawhole = false;
    508 
    509 	/*
    510 	 * read the desired page(s).
    511 	 */
    512 
    513 	totalbytes = npages << PAGE_SHIFT;
    514 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    515 	tailbytes = totalbytes - bytes;
    516 	skipbytes = 0;
    517 
    518 #if 1
    519 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    520 		goto genfs_getpages_bio_prepare_done;
    521 #endif
    522 #if 0
    523 }
    524 
    525 int
    526 genfs_getpages_io_read_bio_prepare()
    527 {
    528 #endif
    529 	kva = uvm_pagermapin(pgs, npages,
    530 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    531 
    532 	mbp = getiobuf(vp, true);
    533 	mbp->b_bufsize = totalbytes;
    534 	mbp->b_data = (void *)kva;
    535 	mbp->b_resid = mbp->b_bcount = bytes;
    536 	mbp->b_cflags = BC_BUSY;
    537 	if (async) {
    538 		mbp->b_flags = B_READ | B_ASYNC;
    539 		mbp->b_iodone = uvm_aio_biodone;
    540 	} else {
    541 		mbp->b_flags = B_READ;
    542 		mbp->b_iodone = NULL;
    543 	}
    544 	if (async)
    545 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    546 	else
    547 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    548 #if 0
    549 }
    550 
    551 #endif
    552 #if 1
    553 genfs_getpages_bio_prepare_done:
    554 #endif
    555 
    556 	/*
    557 	 * if EOF is in the middle of the range, zero the part past EOF.
    558 	 * skip over pages which are not PG_FAKE since in that case they have
    559 	 * valid data that we need to preserve.
    560 	 */
    561 
    562 	tailstart = bytes;
    563 	while (tailbytes > 0) {
    564 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    565 
    566 		KASSERT(len <= tailbytes);
    567 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    568 			memset((void *)(kva + tailstart), 0, len);
    569 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    570 			    kva, tailstart, len, 0);
    571 		}
    572 		tailstart += len;
    573 		tailbytes -= len;
    574 	}
    575 
    576 #if 1
    577 	if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    578 		error = genfs_do_getpages_xip_io(
    579 			ap->a_vp,
    580 			ap->a_offset,
    581 			pgs,
    582 			ap->a_count,
    583 			ap->a_centeridx,
    584 			ap->a_access_type,
    585 			ap->a_advice,
    586 			ap->a_flags,
    587 			orignmempages);
    588 if (0)
    589 		goto loopdone;
    590 	}
    591 #endif
    592 #if 0
    593 }
    594 
    595 int
    596 genfs_getpages_io_read_bio_loop()
    597 {
    598 #endif
    599 	/*
    600 	 * now loop over the pages, reading as needed.
    601 	 */
    602 
    603 	bp = NULL;
    604 	off_t offset;
    605 	for (offset = startoffset;
    606 	    bytes > 0;
    607 	    offset += iobytes, bytes -= iobytes) {
    608 		int run;
    609 		daddr_t lbn, blkno;
    610 		int pidx;
    611 		struct vnode *devvp;
    612 
    613 		/*
    614 		 * skip pages which don't need to be read.
    615 		 */
    616 
    617 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    618 #ifdef XIP
    619 	    if ((ap->a_vp->v_vflag & VV_XIP) == 0) {
    620 #endif
    621 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    622 			size_t b;
    623 
    624 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    625 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    626 				sawhole = true;
    627 			}
    628 			b = MIN(PAGE_SIZE, bytes);
    629 			offset += b;
    630 			bytes -= b;
    631 			skipbytes += b;
    632 			pidx++;
    633 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    634 			    offset, 0,0,0);
    635 			if (bytes == 0) {
    636 				goto loopdone;
    637 			}
    638 		}
    639 #ifdef XIP
    640 	    }
    641 #endif
    642 
    643 		/*
    644 		 * bmap the file to find out the blkno to read from and
    645 		 * how much we can read in one i/o.  if bmap returns an error,
    646 		 * skip the rest of the top-level i/o.
    647 		 */
    648 
    649 		lbn = offset >> fs_bshift;
    650 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    651 		if (error) {
    652 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    653 			    lbn,error,0,0);
    654 			skipbytes += bytes;
    655 			bytes = 0;
    656 			goto loopdone;
    657 		}
    658 
    659 		/*
    660 		 * see how many pages can be read with this i/o.
    661 		 * reduce the i/o size if necessary to avoid
    662 		 * overwriting pages with valid data.
    663 		 */
    664 
    665 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    666 		    bytes);
    667 		if (offset + iobytes > round_page(offset)) {
    668 			int pcount;
    669 
    670 			pcount = 1;
    671 			while ((pidx + pcount < npages) && (
    672 #ifdef XIP
    673 			    /*
    674 			     * in XIP case, we don't know what page to read
    675 			     * at this point!
    676 			     */
    677 			    ((ap->a_vp->v_vflag & VV_XIP) != 0) ||
    678 #else
    679 			     0 ||
    680 #endif
    681 			     (pgs[pidx + pcount]->flags & PG_FAKE))) {
    682 				pcount++;
    683 			}
    684 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    685 			    (offset - trunc_page(offset)));
    686 		}
    687 
    688 		/*
    689 		 * if this block isn't allocated, zero it instead of
    690 		 * reading it.  unless we are going to allocate blocks,
    691 		 * mark the pages we zeroed PG_RDONLY.
    692 		 */
    693 
    694 		if (blkno == (daddr_t)-1) {
    695 #ifdef XIP
    696 		    if ((ap->a_vp->v_vflag & VV_XIP) == 0) {
    697 #endif
    698 			int holepages = (round_page(offset + iobytes) -
    699 			    trunc_page(offset)) >> PAGE_SHIFT;
    700 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    701 
    702 			KASSERT((ap->a_vp->v_vflag & VV_XIP) == 0);
    703 
    704 			sawhole = true;
    705 			memset((char *)kva + (offset - startoffset), 0,
    706 			    iobytes);
    707 			skipbytes += iobytes;
    708 
    709 			for (i = 0; i < holepages; i++) {
    710 				if (memwrite) {
    711 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    712 				}
    713 				if (!blockalloc) {
    714 					pgs[pidx + i]->flags |= PG_RDONLY;
    715 				}
    716 			}
    717 #ifdef XIP
    718 		    } else {
    719 			panic("XIP hole page is not supported yet");
    720 		    }
    721 #endif
    722 			continue;
    723 		}
    724 
    725 #ifdef XIP
    726 	    if ((ap->a_vp->v_vflag & VV_XIP) == 0) {
    727 #endif
    728 		/*
    729 		 * allocate a sub-buf for this piece of the i/o
    730 		 * (or just use mbp if there's only 1 piece),
    731 		 * and start it going.
    732 		 */
    733 
    734 		KASSERT((ap->a_vp->v_vflag & VV_XIP) == 0);
    735 
    736 		if (offset == startoffset && iobytes == bytes) {
    737 			bp = mbp;
    738 		} else {
    739 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    740 			    vp, bp, vp->v_numoutput, 0);
    741 			bp = getiobuf(vp, true);
    742 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    743 		}
    744 		bp->b_lblkno = 0;
    745 
    746 		/* adjust physical blkno for partial blocks */
    747 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    748 		    dev_bshift);
    749 
    750 		UVMHIST_LOG(ubchist,
    751 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    752 		    bp, offset, bp->b_bcount, bp->b_blkno);
    753 
    754 		VOP_STRATEGY(devvp, bp);
    755 	    }
    756 #ifdef XIP
    757 	    else {
    758 		/*
    759 		 * XIP page metadata assignment
    760 		 * - Unallocated block is redirected to the dedicated zero'ed
    761 		 *   page.
    762 		 */
    763 		const int npgs = MIN(
    764 			iobytes >> PAGE_SHIFT,
    765 			((1 + run) << fs_bshift) >> PAGE_SHIFT);
    766 		const daddr_t blk_off = blkno << dev_bshift;
    767 		const daddr_t fs_off = origoffset - startoffset;
    768 
    769 		for (i = ridx + pidx; i < npgs; i++) {
    770 			const daddr_t pg_off = pidx << PAGE_SHIFT;
    771 			struct vm_page *pg;
    772 
    773 			UVMHIST_LOG(ubchist,
    774 			    "xip blk_off=0x%lx fs_off=0x%lx pg_off=%lx",
    775 			    (long)blk_off, (long)fs_off, (long)pg_off, 0);
    776 
    777 			pg = uvn_findpage_xip(devvp, &vp->v_uobj,
    778 			    blk_off + fs_off + pg_off);
    779 			KASSERT(pg != NULL);
    780 			UVMHIST_LOG(ubchist,
    781 				"xip pgs %d => phys_addr=0x%lx (%p)",
    782 				i,
    783 				(long)pg->phys_addr,
    784 				pg,
    785 				0);
    786 			pgs[i] = pg;
    787 		}
    788 	    }
    789 #endif
    790 	}
    791 
    792 loopdone:
    793 #if 1
    794 	if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    795 		goto genfs_getpages_biodone_done;
    796 	}
    797 #endif
    798 #if 0
    799 
    800 int
    801 genfs_getpages_biodone()
    802 {
    803 #endif
    804 	nestiobuf_done(mbp, skipbytes, error);
    805 	if (async) {
    806 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    807 		if (!glocked) {
    808 			genfs_node_unlock(vp);
    809 		}
    810 		error = 0;
    811 		goto out_err_free;
    812 	}
    813 	if (bp != NULL) {
    814 		error = biowait(mbp);
    815 	}
    816 
    817 	/* Remove the mapping (make KVA available as soon as possible) */
    818 	uvm_pagermapout(kva, npages);
    819 
    820 	/*
    821 	 * if this we encountered a hole then we have to do a little more work.
    822 	 * for read faults, we marked the page PG_RDONLY so that future
    823 	 * write accesses to the page will fault again.
    824 	 * for write faults, we must make sure that the backing store for
    825 	 * the page is completely allocated while the pages are locked.
    826 	 */
    827 
    828 	if (!error && sawhole && blockalloc) {
    829 		/*
    830 		 * XXX: This assumes that we come here only via
    831 		 * the mmio path
    832 		 */
    833 		if (vp->v_mount->mnt_wapbl) {
    834 			error = WAPBL_BEGIN(vp->v_mount);
    835 		}
    836 
    837 		if (!error) {
    838 			error = GOP_ALLOC(vp, startoffset,
    839 			    npages << PAGE_SHIFT, 0, cred);
    840 			if (vp->v_mount->mnt_wapbl) {
    841 				WAPBL_END(vp->v_mount);
    842 			}
    843 		}
    844 
    845 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    846 		    startoffset, npages << PAGE_SHIFT, error,0);
    847 		if (!error) {
    848 			for (i = 0; i < npages; i++) {
    849 				struct vm_page *pg = pgs[i];
    850 
    851 				if (pg == NULL) {
    852 					continue;
    853 				}
    854 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    855 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    856 				    pg,0,0,0);
    857 			}
    858 		}
    859 	}
    860 
    861 	putiobuf(mbp);
    862 #if 0
    863 }
    864 
    865 #endif
    866 #if 1
    867 genfs_getpages_biodone_done:
    868 	{}
    869 #endif
    870     }
    871 
    872 	if (!glocked) {
    873 		genfs_node_unlock(vp);
    874 	}
    875 
    876 #if 1
    877 	if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    878 		error = genfs_do_getpages_xip_io_done(
    879 			ap->a_vp,
    880 			ap->a_offset,
    881 			pgs,
    882 			ap->a_count,
    883 			ap->a_centeridx,
    884 			ap->a_access_type,
    885 			ap->a_advice,
    886 			ap->a_flags,
    887 			orignmempages);
    888 		goto genfs_getpages_generic_io_done_done;
    889 	}
    890 #endif
    891 #if 0
    892 	else {
    893 		error = genfs_getpages_generic_io_done();
    894 	}
    895 }
    896 
    897 int
    898 genfs_getpages_generic_io_done()
    899 {
    900 #endif
    901 
    902 	mutex_enter(&uobj->vmobjlock);
    903 
    904 	/*
    905 	 * we're almost done!  release the pages...
    906 	 * for errors, we free the pages.
    907 	 * otherwise we activate them and mark them as valid and clean.
    908 	 * also, unbusy pages that were not actually requested.
    909 	 */
    910 
    911 	if (error) {
    912 		for (i = 0; i < npages; i++) {
    913 			struct vm_page *pg = pgs[i];
    914 
    915 			if (pg == NULL) {
    916 				continue;
    917 			}
    918 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    919 			    pg, pg->flags, 0,0);
    920 			if (pg->flags & PG_FAKE) {
    921 				pg->flags |= PG_RELEASED;
    922 			}
    923 		}
    924 		mutex_enter(&uvm_pageqlock);
    925 		uvm_page_unbusy(pgs, npages);
    926 		mutex_exit(&uvm_pageqlock);
    927 		mutex_exit(&uobj->vmobjlock);
    928 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    929 		goto out_err_free;
    930 	}
    931 
    932 out:
    933 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    934 	error = 0;
    935 	mutex_enter(&uvm_pageqlock);
    936 	for (i = 0; i < npages; i++) {
    937 		struct vm_page *pg = pgs[i];
    938 		if (pg == NULL) {
    939 			continue;
    940 		}
    941 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    942 		    pg, pg->flags, 0,0);
    943 		if (pg->flags & PG_FAKE && !overwrite) {
    944 			pg->flags &= ~(PG_FAKE);
    945 			pmap_clear_modify(pgs[i]);
    946 		}
    947 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    948 		if (i < ridx || i >= ridx + orignmempages || async) {
    949 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    950 			    pg, pg->offset,0,0);
    951 			if (pg->flags & PG_WANTED) {
    952 				wakeup(pg);
    953 			}
    954 			if (pg->flags & PG_FAKE) {
    955 				KASSERT(overwrite);
    956 				uvm_pagezero(pg);
    957 			}
    958 			if (pg->flags & PG_RELEASED) {
    959 				uvm_pagefree(pg);
    960 				continue;
    961 			}
    962 			uvm_pageenqueue(pg);
    963 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    964 			UVM_PAGE_OWN(pg, NULL);
    965 		}
    966 	}
    967 	mutex_exit(&uvm_pageqlock);
    968 
    969 	if (memwrite) {
    970 		genfs_markdirty(vp);
    971 	}
    972 	mutex_exit(&uobj->vmobjlock);
    973 
    974 #if 1
    975 genfs_getpages_generic_io_done_done:
    976 	{}
    977 #endif
    978 	if (ap->a_m != NULL) {
    979 		memcpy(ap->a_m, &pgs[ridx],
    980 		    orignmempages * sizeof(struct vm_page *));
    981 	}
    982 #if 0
    983 }
    984 
    985 #endif
    986 
    987 out_err_free:
    988 	if (pgs != NULL && pgs != pgs_onstack)
    989 		kmem_free(pgs, pgs_size);
    990 out_err:
    991 	if (has_trans)
    992 		fstrans_done(vp->v_mount);
    993 	return error;
    994 }
    995 
    996 #ifdef XIP
    997 /*
    998  * genfs_do_getpages_xip_io
    999  *      Return "direct pages" of XIP vnode.  The block addresses of XIP
   1000  *      vnode pages are returned back to the VM fault handler as the
   1001  *	actually mapped physical addresses.
   1002  */
   1003 static int
   1004 genfs_do_getpages_xip_io(
   1005 	struct vnode *vp,
   1006 	voff_t origoffset,
   1007 	struct vm_page **pps,
   1008 	int *npagesp,
   1009 	int centeridx,
   1010 	vm_prot_t access_type,
   1011 	int advice,
   1012 	int flags,
   1013 	const int orignmempages)
   1014 {
   1015 	const int fs_bshift = vp2fs_bshift(vp);
   1016 	const int dev_bshift = vp2dev_bshift(vp);
   1017 	const int fs_bsize = 1 << fs_bshift;
   1018 
   1019 	int error;
   1020 	off_t off;
   1021 	int i;
   1022 
   1023 	UVMHIST_FUNC("genfs_do_getpages_xip_io"); UVMHIST_CALLED(ubchist);
   1024 
   1025 	KASSERT(((flags & PGO_GLOCKHELD) != 0) || genfs_node_rdlocked(vp));
   1026 
   1027 #ifdef UVMHIST
   1028 	const off_t startoffset = trunc_blk(origoffset);
   1029 	const off_t endoffset = round_blk(origoffset + PAGE_SIZE * orignmempages);
   1030 #endif
   1031 
   1032 	UVMHIST_LOG(ubchist, "xip npages=%d startoffset=%lx endoffset=%lx",
   1033 	    orignmempages, (long)startoffset, (long)endoffset, 0);
   1034 
   1035 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
   1036 
   1037 	off = origoffset;
   1038 	for (i = ridx; i < ridx + orignmempages; i++) {
   1039 		daddr_t blkno;
   1040 		int run;
   1041 		struct vnode *devvp;
   1042 
   1043 		KASSERT((off - origoffset) >> PAGE_SHIFT == i - ridx);
   1044 
   1045 		const daddr_t lbn = (off & ~(fs_bsize - 1)) >> fs_bshift;
   1046 
   1047 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1048 		KASSERT(error == 0);
   1049 		UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d",
   1050 		    (long)lbn, (long)blkno, run, 0);
   1051 
   1052 		const daddr_t blk_off = blkno << dev_bshift;
   1053 		const daddr_t fs_off = origoffset - (lbn << fs_bshift);
   1054 
   1055 		/*
   1056 		 * XIP page metadata assignment
   1057 		 * - Unallocated block is redirected to the dedicated zero'ed
   1058 		 *   page.
   1059 		 */
   1060 		if (blkno < 0) {
   1061 			panic("XIP hole is not supported yet!");
   1062 		} else {
   1063 			KASSERT(off - origoffset == (i - ridx) << PAGE_SHIFT);
   1064 
   1065 			const daddr_t pg_off = (i - ridx) << PAGE_SHIFT;
   1066 
   1067 			UVMHIST_LOG(ubchist,
   1068 			    "xip blk_off=%lx fs_off=%lx pg_off=%lx",
   1069 			    (long)blk_off, (long)fs_off, (long)pg_off, 0);
   1070 
   1071 			pps[i] = uvn_findpage_xip(devvp, &vp->v_uobj,
   1072 			    blk_off + fs_off + pg_off);
   1073 			KASSERT(pps[i] != NULL);
   1074 		}
   1075 
   1076 		UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
   1077 			i,
   1078 			(long)pps[i]->phys_addr,
   1079 			pps[i],
   1080 			0);
   1081 
   1082 		off += PAGE_SIZE;
   1083 	}
   1084 
   1085 	return 0;
   1086 }
   1087 
   1088 int
   1089 genfs_do_getpages_xip_io_done(
   1090 	struct vnode *vp,
   1091 	voff_t origoffset,
   1092 	struct vm_page **pps,
   1093 	int *npagesp,
   1094 	int centeridx,
   1095 	vm_prot_t access_type,
   1096 	int advice,
   1097 	int flags,
   1098 	const int orignmempages)
   1099 {
   1100 	struct uvm_object * const uobj = &vp->v_uobj;
   1101 	int i;
   1102 
   1103 	const int fs_bshift = vp2fs_bshift(vp);
   1104 	const int fs_bsize = 1 << fs_bshift;
   1105 
   1106 	const off_t startoffset = trunc_blk(origoffset);
   1107 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
   1108 
   1109 	mutex_enter(&uobj->vmobjlock);
   1110 
   1111 	for (i = ridx; i < ridx + orignmempages; i++) {
   1112 		struct vm_page *pg = pps[i];
   1113 
   1114 		KASSERT((pg->flags & PG_RDONLY) != 0);
   1115 		KASSERT((pg->flags & PG_BUSY) != 0);
   1116 		KASSERT((pg->flags & PG_CLEAN) != 0);
   1117 		KASSERT((pg->flags & PG_DEVICE) != 0);
   1118 		KASSERT((pg->flags & PG_FAKE) == 0);
   1119 		pg->uobject = &vp->v_uobj;
   1120 	}
   1121 
   1122 	mutex_exit(&uobj->vmobjlock);
   1123 
   1124 	*npagesp = orignmempages;
   1125 
   1126 	return 0;
   1127 }
   1128 #endif
   1129 
   1130 /*
   1131  * generic VM putpages routine.
   1132  * Write the given range of pages to backing store.
   1133  *
   1134  * => "offhi == 0" means flush all pages at or after "offlo".
   1135  * => object should be locked by caller.  we return with the
   1136  *      object unlocked.
   1137  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
   1138  *	thus, a caller might want to unlock higher level resources
   1139  *	(e.g. vm_map) before calling flush.
   1140  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
   1141  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1142  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1143  *	that new pages are inserted on the tail end of the list.   thus,
   1144  *	we can make a complete pass through the object in one go by starting
   1145  *	at the head and working towards the tail (new pages are put in
   1146  *	front of us).
   1147  * => NOTE: we are allowed to lock the page queues, so the caller
   1148  *	must not be holding the page queue lock.
   1149  *
   1150  * note on "cleaning" object and PG_BUSY pages:
   1151  *	this routine is holding the lock on the object.   the only time
   1152  *	that it can run into a PG_BUSY page that it does not own is if
   1153  *	some other process has started I/O on the page (e.g. either
   1154  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1155  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1156  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1157  *	paged out then it means that someone else has already started
   1158  *	cleaning the page for us (how nice!).    in this case, if we
   1159  *	have syncio specified, then after we make our pass through the
   1160  *	object we need to wait for the other PG_BUSY pages to clear
   1161  *	off (i.e. we need to do an iosync).   also note that once a
   1162  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1163  *
   1164  * note on page traversal:
   1165  *	we can traverse the pages in an object either by going down the
   1166  *	linked list in "uobj->memq", or we can go over the address range
   1167  *	by page doing hash table lookups for each address.    depending
   1168  *	on how many pages are in the object it may be cheaper to do one
   1169  *	or the other.   we set "by_list" to true if we are using memq.
   1170  *	if the cost of a hash lookup was equal to the cost of the list
   1171  *	traversal we could compare the number of pages in the start->stop
   1172  *	range to the total number of pages in the object.   however, it
   1173  *	seems that a hash table lookup is more expensive than the linked
   1174  *	list traversal, so we multiply the number of pages in the
   1175  *	range by an estimate of the relatively higher cost of the hash lookup.
   1176  */
   1177 
   1178 int
   1179 genfs_putpages(void *v)
   1180 {
   1181 	struct vop_putpages_args /* {
   1182 		struct vnode *a_vp;
   1183 		voff_t a_offlo;
   1184 		voff_t a_offhi;
   1185 		int a_flags;
   1186 	} */ * const ap = v;
   1187 
   1188 #ifdef XIP
   1189 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
   1190 		return genfs_do_putpages_xip(ap->a_vp, ap->a_offlo, ap->a_offhi,
   1191 		    ap->a_flags, NULL);
   1192 	else
   1193 #endif
   1194 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
   1195 	    ap->a_flags, NULL);
   1196 }
   1197 
   1198 int
   1199 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
   1200     int origflags, struct vm_page **busypg)
   1201 {
   1202 	struct uvm_object * const uobj = &vp->v_uobj;
   1203 	kmutex_t * const slock = &uobj->vmobjlock;
   1204 	off_t off;
   1205 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1206 #define maxpages (MAXPHYS >> PAGE_SHIFT)
   1207 	int i, error, npages, nback;
   1208 	int freeflag;
   1209 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1210 	bool wasclean, by_list, needs_clean, yld;
   1211 	bool async = (origflags & PGO_SYNCIO) == 0;
   1212 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
   1213 	struct lwp * const l = curlwp ? curlwp : &lwp0;
   1214 	struct genfs_node * const gp = VTOG(vp);
   1215 	int flags;
   1216 	int dirtygen;
   1217 	bool modified;
   1218 	bool need_wapbl;
   1219 	bool has_trans;
   1220 	bool cleanall;
   1221 	bool onworklst;
   1222 
   1223 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1224 
   1225 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1226 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1227 	KASSERT(startoff < endoff || endoff == 0);
   1228 
   1229 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1230 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1231 
   1232 	has_trans = false;
   1233 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
   1234 	    (origflags & PGO_JOURNALLOCKED) == 0);
   1235 
   1236 retry:
   1237 	modified = false;
   1238 	flags = origflags;
   1239 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
   1240 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
   1241 	if (uobj->uo_npages == 0) {
   1242 		if (vp->v_iflag & VI_ONWORKLST) {
   1243 			vp->v_iflag &= ~VI_WRMAPDIRTY;
   1244 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1245 				vn_syncer_remove_from_worklist(vp);
   1246 		}
   1247 		if (has_trans) {
   1248 			if (need_wapbl)
   1249 				WAPBL_END(vp->v_mount);
   1250 			fstrans_done(vp->v_mount);
   1251 		}
   1252 		mutex_exit(slock);
   1253 		return (0);
   1254 	}
   1255 
   1256 	/*
   1257 	 * the vnode has pages, set up to process the request.
   1258 	 */
   1259 
   1260 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
   1261 		mutex_exit(slock);
   1262 		if (pagedaemon) {
   1263 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
   1264 			if (error)
   1265 				return error;
   1266 		} else
   1267 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
   1268 		if (need_wapbl) {
   1269 			error = WAPBL_BEGIN(vp->v_mount);
   1270 			if (error) {
   1271 				fstrans_done(vp->v_mount);
   1272 				return error;
   1273 			}
   1274 		}
   1275 		has_trans = true;
   1276 		mutex_enter(slock);
   1277 		goto retry;
   1278 	}
   1279 
   1280 	error = 0;
   1281 	wasclean = (vp->v_numoutput == 0);
   1282 	off = startoff;
   1283 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1284 		endoff = trunc_page(LLONG_MAX);
   1285 	}
   1286 	by_list = (uobj->uo_npages <=
   1287 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
   1288 
   1289 #if !defined(DEBUG)
   1290 	/*
   1291 	 * if this vnode is known not to have dirty pages,
   1292 	 * don't bother to clean it out.
   1293 	 */
   1294 
   1295 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
   1296 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1297 			goto skip_scan;
   1298 		}
   1299 		flags &= ~PGO_CLEANIT;
   1300 	}
   1301 #endif /* !defined(DEBUG) */
   1302 
   1303 	/*
   1304 	 * start the loop.  when scanning by list, hold the last page
   1305 	 * in the list before we start.  pages allocated after we start
   1306 	 * will be added to the end of the list, so we can stop at the
   1307 	 * current last page.
   1308 	 */
   1309 
   1310 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1311 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1312 	    (vp->v_iflag & VI_ONWORKLST) != 0;
   1313 	dirtygen = gp->g_dirtygen;
   1314 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1315 	if (by_list) {
   1316 		curmp.flags = PG_MARKER;
   1317 		endmp.flags = PG_MARKER;
   1318 		pg = TAILQ_FIRST(&uobj->memq);
   1319 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
   1320 	} else {
   1321 		pg = uvm_pagelookup(uobj, off);
   1322 	}
   1323 	nextpg = NULL;
   1324 	while (by_list || off < endoff) {
   1325 
   1326 		/*
   1327 		 * if the current page is not interesting, move on to the next.
   1328 		 */
   1329 
   1330 		KASSERT(pg == NULL || pg->uobject == uobj ||
   1331 		    (pg->flags & PG_MARKER) != 0);
   1332 		KASSERT(pg == NULL ||
   1333 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1334 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
   1335 		if (by_list) {
   1336 			if (pg == &endmp) {
   1337 				break;
   1338 			}
   1339 			if (pg->flags & PG_MARKER) {
   1340 				pg = TAILQ_NEXT(pg, listq.queue);
   1341 				continue;
   1342 			}
   1343 			if (pg->offset < startoff || pg->offset >= endoff ||
   1344 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1345 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1346 					wasclean = false;
   1347 				}
   1348 				pg = TAILQ_NEXT(pg, listq.queue);
   1349 				continue;
   1350 			}
   1351 			off = pg->offset;
   1352 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1353 			if (pg != NULL) {
   1354 				wasclean = false;
   1355 			}
   1356 			off += PAGE_SIZE;
   1357 			if (off < endoff) {
   1358 				pg = uvm_pagelookup(uobj, off);
   1359 			}
   1360 			continue;
   1361 		}
   1362 
   1363 		/*
   1364 		 * if the current page needs to be cleaned and it's busy,
   1365 		 * wait for it to become unbusy.
   1366 		 */
   1367 
   1368 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1369 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1370 		if (pg->flags & PG_BUSY || yld) {
   1371 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1372 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1373 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1374 				error = EDEADLK;
   1375 				if (busypg != NULL)
   1376 					*busypg = pg;
   1377 				break;
   1378 			}
   1379 			if (pagedaemon) {
   1380 				/*
   1381 				 * someone has taken the page while we
   1382 				 * dropped the lock for fstrans_start.
   1383 				 */
   1384 				break;
   1385 			}
   1386 			if (by_list) {
   1387 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1388 				UVMHIST_LOG(ubchist, "curmp next %p",
   1389 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1390 			}
   1391 			if (yld) {
   1392 				mutex_exit(slock);
   1393 				preempt();
   1394 				mutex_enter(slock);
   1395 			} else {
   1396 				pg->flags |= PG_WANTED;
   1397 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1398 				mutex_enter(slock);
   1399 			}
   1400 			if (by_list) {
   1401 				UVMHIST_LOG(ubchist, "after next %p",
   1402 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1403 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1404 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1405 			} else {
   1406 				pg = uvm_pagelookup(uobj, off);
   1407 			}
   1408 			continue;
   1409 		}
   1410 
   1411 		/*
   1412 		 * if we're freeing, remove all mappings of the page now.
   1413 		 * if we're cleaning, check if the page is needs to be cleaned.
   1414 		 */
   1415 
   1416 		if (flags & PGO_FREE) {
   1417 			pmap_page_protect(pg, VM_PROT_NONE);
   1418 		} else if (flags & PGO_CLEANIT) {
   1419 
   1420 			/*
   1421 			 * if we still have some hope to pull this vnode off
   1422 			 * from the syncer queue, write-protect the page.
   1423 			 */
   1424 
   1425 			if (cleanall && wasclean &&
   1426 			    gp->g_dirtygen == dirtygen) {
   1427 
   1428 				/*
   1429 				 * uobj pages get wired only by uvm_fault
   1430 				 * where uobj is locked.
   1431 				 */
   1432 
   1433 				if (pg->wire_count == 0) {
   1434 					pmap_page_protect(pg,
   1435 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1436 				} else {
   1437 					cleanall = false;
   1438 				}
   1439 			}
   1440 		}
   1441 
   1442 		if (flags & PGO_CLEANIT) {
   1443 			needs_clean = pmap_clear_modify(pg) ||
   1444 			    (pg->flags & PG_CLEAN) == 0;
   1445 			pg->flags |= PG_CLEAN;
   1446 		} else {
   1447 			needs_clean = false;
   1448 		}
   1449 
   1450 		/*
   1451 		 * if we're cleaning, build a cluster.
   1452 		 * the cluster will consist of pages which are currently dirty,
   1453 		 * but they will be returned to us marked clean.
   1454 		 * if not cleaning, just operate on the one page.
   1455 		 */
   1456 
   1457 		if (needs_clean) {
   1458 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1459 			wasclean = false;
   1460 			memset(pgs, 0, sizeof(pgs));
   1461 			pg->flags |= PG_BUSY;
   1462 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1463 
   1464 			/*
   1465 			 * first look backward.
   1466 			 */
   1467 
   1468 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1469 			nback = npages;
   1470 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1471 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1472 			if (nback) {
   1473 				memmove(&pgs[0], &pgs[npages - nback],
   1474 				    nback * sizeof(pgs[0]));
   1475 				if (npages - nback < nback)
   1476 					memset(&pgs[nback], 0,
   1477 					    (npages - nback) * sizeof(pgs[0]));
   1478 				else
   1479 					memset(&pgs[npages - nback], 0,
   1480 					    nback * sizeof(pgs[0]));
   1481 			}
   1482 
   1483 			/*
   1484 			 * then plug in our page of interest.
   1485 			 */
   1486 
   1487 			pgs[nback] = pg;
   1488 
   1489 			/*
   1490 			 * then look forward to fill in the remaining space in
   1491 			 * the array of pages.
   1492 			 */
   1493 
   1494 			npages = maxpages - nback - 1;
   1495 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1496 			    &pgs[nback + 1],
   1497 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1498 			npages += nback + 1;
   1499 		} else {
   1500 			pgs[0] = pg;
   1501 			npages = 1;
   1502 			nback = 0;
   1503 		}
   1504 
   1505 		/*
   1506 		 * apply FREE or DEACTIVATE options if requested.
   1507 		 */
   1508 
   1509 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1510 			mutex_enter(&uvm_pageqlock);
   1511 		}
   1512 		for (i = 0; i < npages; i++) {
   1513 			tpg = pgs[i];
   1514 			KASSERT(tpg->uobject == uobj);
   1515 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1516 				pg = tpg;
   1517 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1518 				continue;
   1519 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1520 				uvm_pagedeactivate(tpg);
   1521 			} else if (flags & PGO_FREE) {
   1522 				pmap_page_protect(tpg, VM_PROT_NONE);
   1523 				if (tpg->flags & PG_BUSY) {
   1524 					tpg->flags |= freeflag;
   1525 					if (pagedaemon) {
   1526 						uvm_pageout_start(1);
   1527 						uvm_pagedequeue(tpg);
   1528 					}
   1529 				} else {
   1530 
   1531 					/*
   1532 					 * ``page is not busy''
   1533 					 * implies that npages is 1
   1534 					 * and needs_clean is false.
   1535 					 */
   1536 
   1537 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1538 					uvm_pagefree(tpg);
   1539 					if (pagedaemon)
   1540 						uvmexp.pdfreed++;
   1541 				}
   1542 			}
   1543 		}
   1544 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1545 			mutex_exit(&uvm_pageqlock);
   1546 		}
   1547 		if (needs_clean) {
   1548 			modified = true;
   1549 
   1550 			/*
   1551 			 * start the i/o.  if we're traversing by list,
   1552 			 * keep our place in the list with a marker page.
   1553 			 */
   1554 
   1555 			if (by_list) {
   1556 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1557 				    listq.queue);
   1558 			}
   1559 			mutex_exit(slock);
   1560 			error = GOP_WRITE(vp, pgs, npages, flags);
   1561 			mutex_enter(slock);
   1562 			if (by_list) {
   1563 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1564 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1565 			}
   1566 			if (error) {
   1567 				break;
   1568 			}
   1569 			if (by_list) {
   1570 				continue;
   1571 			}
   1572 		}
   1573 
   1574 		/*
   1575 		 * find the next page and continue if there was no error.
   1576 		 */
   1577 
   1578 		if (by_list) {
   1579 			if (nextpg) {
   1580 				pg = nextpg;
   1581 				nextpg = NULL;
   1582 			} else {
   1583 				pg = TAILQ_NEXT(pg, listq.queue);
   1584 			}
   1585 		} else {
   1586 			off += (npages - nback) << PAGE_SHIFT;
   1587 			if (off < endoff) {
   1588 				pg = uvm_pagelookup(uobj, off);
   1589 			}
   1590 		}
   1591 	}
   1592 	if (by_list) {
   1593 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1594 	}
   1595 
   1596 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1597 	    (vp->v_type != VBLK ||
   1598 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1599 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1600 	}
   1601 
   1602 	/*
   1603 	 * if we're cleaning and there was nothing to clean,
   1604 	 * take us off the syncer list.  if we started any i/o
   1605 	 * and we're doing sync i/o, wait for all writes to finish.
   1606 	 */
   1607 
   1608 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1609 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1610 #if defined(DEBUG)
   1611 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1612 			if ((pg->flags & PG_MARKER) != 0) {
   1613 				continue;
   1614 			}
   1615 			if ((pg->flags & PG_CLEAN) == 0) {
   1616 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1617 			}
   1618 			if (pmap_is_modified(pg)) {
   1619 				printf("%s: %p: modified\n", __func__, pg);
   1620 			}
   1621 		}
   1622 #endif /* defined(DEBUG) */
   1623 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1624 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1625 			vn_syncer_remove_from_worklist(vp);
   1626 	}
   1627 
   1628 #if !defined(DEBUG)
   1629 skip_scan:
   1630 #endif /* !defined(DEBUG) */
   1631 
   1632 	/* Wait for output to complete. */
   1633 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1634 		while (vp->v_numoutput != 0)
   1635 			cv_wait(&vp->v_cv, slock);
   1636 	}
   1637 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1638 	mutex_exit(slock);
   1639 
   1640 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1641 		/*
   1642 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1643 		 * retrying is not a big deal because, in many cases,
   1644 		 * uobj->uo_npages is already 0 here.
   1645 		 */
   1646 		mutex_enter(slock);
   1647 		goto retry;
   1648 	}
   1649 
   1650 	if (has_trans) {
   1651 		if (need_wapbl)
   1652 			WAPBL_END(vp->v_mount);
   1653 		fstrans_done(vp->v_mount);
   1654 	}
   1655 
   1656 	return (error);
   1657 }
   1658 
   1659 #ifdef XIP
   1660 int
   1661 genfs_do_putpages_xip(struct vnode *vp, off_t startoff, off_t endoff,
   1662     int flags, struct vm_page **busypg)
   1663 {
   1664 	struct uvm_object *uobj = &vp->v_uobj;
   1665 #ifdef DIAGNOSTIC
   1666 	struct genfs_node * const gp = VTOG(vp);
   1667 #endif
   1668 
   1669 	UVMHIST_FUNC("genfs_do_putpages_xip"); UVMHIST_CALLED(ubchist);
   1670 
   1671 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1672 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1673 	KASSERT(vp->v_numoutput == 0);
   1674 	KASSERT(gp->g_dirtygen == 0);
   1675 
   1676 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1677 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1678 
   1679 	/*
   1680 	 * XIP pages are read-only, and never become dirty.  They're also never
   1681 	 * queued.  PGO_DEACTIVATE and PGO_CLEANIT are meaningless for XIP
   1682 	 * pages, so we ignore them.
   1683 	 */
   1684 	if ((flags & PGO_FREE) == 0)
   1685 		goto done;
   1686 
   1687 	/*
   1688 	 * For PGO_FREE (or (PGO_CLEANIT | PGO_FREE)), we invalidate MMU
   1689 	 * mappings of both XIP pages and XIP zero pages.
   1690 	 *
   1691 	 * Zero page is freed when one of its mapped offset is freed, even if
   1692 	 * one file (vnode) has many holes and mapping its zero page to all
   1693 	 * of those hole pages.
   1694 	 *
   1695 	 * We don't know which pages are currently mapped in the given vnode,
   1696 	 * because XIP pages are not added to vnode.  What we can do is to
   1697 	 * locate pages by querying the filesystem as done in getpages.  Call
   1698 	 * genfs_do_getpages_xip_io().
   1699 	 */
   1700 
   1701 	off_t off, eof;
   1702 
   1703 	off = trunc_page(startoff);
   1704 	if (endoff == 0 || (flags & PGO_ALLPAGES))
   1705 		GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM);
   1706 	else
   1707 		eof = endoff;
   1708 
   1709 	while (off < eof) {
   1710 		int npages, orignpages, error, i;
   1711 		struct vm_page *pgs[maxpages], *pg;
   1712 
   1713 		npages = round_page(eof - off) >> PAGE_SHIFT;
   1714 		if (npages > maxpages)
   1715 			npages = maxpages;
   1716 
   1717 		orignpages = npages;
   1718 		KASSERT(mutex_owned(&uobj->vmobjlock));
   1719 		mutex_exit(&uobj->vmobjlock);
   1720 		error = genfs_do_getpages_xip_io(vp, off, pgs, &npages, 0,
   1721 		    VM_PROT_ALL, 0, PGO_GLOCKHELD, orignpages);
   1722 		KASSERT(error == 0);
   1723 		KASSERT(npages == orignpages);
   1724 		mutex_enter(&uobj->vmobjlock);
   1725 		for (i = 0; i < npages; i++) {
   1726 			pg = pgs[i];
   1727 			if (pg == NULL || pg == PGO_DONTCARE)
   1728 				continue;
   1729 			/*
   1730 			 * Freeing normal XIP pages; nothing to do.
   1731 			 */
   1732 			pmap_page_protect(pg, VM_PROT_NONE);
   1733 			KASSERT((pg->flags & PG_RDONLY) != 0);
   1734 			KASSERT((pg->flags & PG_CLEAN) != 0);
   1735 			KASSERT((pg->flags & PG_FAKE) == 0);
   1736 			KASSERT((pg->flags & PG_DEVICE) != 0);
   1737 			pg->flags &= ~PG_BUSY;
   1738 		}
   1739 		off += npages << PAGE_SHIFT;
   1740 	}
   1741 
   1742 	KASSERT(uobj->uo_npages == 0);
   1743 
   1744 done:
   1745 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1746 	mutex_exit(&uobj->vmobjlock);
   1747 	return 0;
   1748 }
   1749 #endif
   1750 
   1751 int
   1752 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1753 {
   1754 	off_t off;
   1755 	vaddr_t kva;
   1756 	size_t len;
   1757 	int error;
   1758 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1759 
   1760 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1761 	    vp, pgs, npages, flags);
   1762 
   1763 	off = pgs[0]->offset;
   1764 	kva = uvm_pagermapin(pgs, npages,
   1765 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1766 	len = npages << PAGE_SHIFT;
   1767 
   1768 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1769 			    uvm_aio_biodone);
   1770 
   1771 	return error;
   1772 }
   1773 
   1774 int
   1775 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1776 {
   1777 	off_t off;
   1778 	vaddr_t kva;
   1779 	size_t len;
   1780 	int error;
   1781 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1782 
   1783 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1784 	    vp, pgs, npages, flags);
   1785 
   1786 	off = pgs[0]->offset;
   1787 	kva = uvm_pagermapin(pgs, npages,
   1788 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1789 	len = npages << PAGE_SHIFT;
   1790 
   1791 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1792 			    uvm_aio_biodone);
   1793 
   1794 	return error;
   1795 }
   1796 
   1797 /*
   1798  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1799  * and mapped into kernel memory.  Here we just look up the underlying
   1800  * device block addresses and call the strategy routine.
   1801  */
   1802 
   1803 static int
   1804 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1805     enum uio_rw rw, void (*iodone)(struct buf *))
   1806 {
   1807 	int s, error;
   1808 	int fs_bshift, dev_bshift;
   1809 	off_t eof, offset, startoffset;
   1810 	size_t bytes, iobytes, skipbytes;
   1811 	struct buf *mbp, *bp;
   1812 	const bool async = (flags & PGO_SYNCIO) == 0;
   1813 	const bool iowrite = rw == UIO_WRITE;
   1814 	const int brw = iowrite ? B_WRITE : B_READ;
   1815 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1816 
   1817 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1818 	    vp, kva, len, flags);
   1819 
   1820 	KASSERT(vp->v_size <= vp->v_writesize);
   1821 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1822 	if (vp->v_type != VBLK) {
   1823 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1824 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1825 	} else {
   1826 		fs_bshift = DEV_BSHIFT;
   1827 		dev_bshift = DEV_BSHIFT;
   1828 	}
   1829 	error = 0;
   1830 	startoffset = off;
   1831 	bytes = MIN(len, eof - startoffset);
   1832 	skipbytes = 0;
   1833 	KASSERT(bytes != 0);
   1834 
   1835 	if (iowrite) {
   1836 		mutex_enter(&vp->v_interlock);
   1837 		vp->v_numoutput += 2;
   1838 		mutex_exit(&vp->v_interlock);
   1839 	}
   1840 	mbp = getiobuf(vp, true);
   1841 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1842 	    vp, mbp, vp->v_numoutput, bytes);
   1843 	mbp->b_bufsize = len;
   1844 	mbp->b_data = (void *)kva;
   1845 	mbp->b_resid = mbp->b_bcount = bytes;
   1846 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1847 	if (async) {
   1848 		mbp->b_flags = brw | B_ASYNC;
   1849 		mbp->b_iodone = iodone;
   1850 	} else {
   1851 		mbp->b_flags = brw;
   1852 		mbp->b_iodone = NULL;
   1853 	}
   1854 	if (curlwp == uvm.pagedaemon_lwp)
   1855 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1856 	else if (async)
   1857 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1858 	else
   1859 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1860 
   1861 	bp = NULL;
   1862 	for (offset = startoffset;
   1863 	    bytes > 0;
   1864 	    offset += iobytes, bytes -= iobytes) {
   1865 		int run;
   1866 		daddr_t lbn, blkno;
   1867 		struct vnode *devvp;
   1868 
   1869 		/*
   1870 		 * bmap the file to find out the blkno to read from and
   1871 		 * how much we can read in one i/o.  if bmap returns an error,
   1872 		 * skip the rest of the top-level i/o.
   1873 		 */
   1874 
   1875 		lbn = offset >> fs_bshift;
   1876 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1877 		if (error) {
   1878 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1879 			    lbn,error,0,0);
   1880 			skipbytes += bytes;
   1881 			bytes = 0;
   1882 			goto loopdone;
   1883 		}
   1884 
   1885 		/*
   1886 		 * see how many pages can be read with this i/o.
   1887 		 * reduce the i/o size if necessary to avoid
   1888 		 * overwriting pages with valid data.
   1889 		 */
   1890 
   1891 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1892 		    bytes);
   1893 
   1894 		/*
   1895 		 * if this block isn't allocated, zero it instead of
   1896 		 * reading it.  unless we are going to allocate blocks,
   1897 		 * mark the pages we zeroed PG_RDONLY.
   1898 		 */
   1899 
   1900 		if (blkno == (daddr_t)-1) {
   1901 			if (!iowrite) {
   1902 				memset((char *)kva + (offset - startoffset), 0,
   1903 				    iobytes);
   1904 			}
   1905 			skipbytes += iobytes;
   1906 			continue;
   1907 		}
   1908 
   1909 		/*
   1910 		 * allocate a sub-buf for this piece of the i/o
   1911 		 * (or just use mbp if there's only 1 piece),
   1912 		 * and start it going.
   1913 		 */
   1914 
   1915 		if (offset == startoffset && iobytes == bytes) {
   1916 			bp = mbp;
   1917 		} else {
   1918 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1919 			    vp, bp, vp->v_numoutput, 0);
   1920 			bp = getiobuf(vp, true);
   1921 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1922 		}
   1923 		bp->b_lblkno = 0;
   1924 
   1925 		/* adjust physical blkno for partial blocks */
   1926 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1927 		    dev_bshift);
   1928 
   1929 		UVMHIST_LOG(ubchist,
   1930 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1931 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1932 
   1933 		VOP_STRATEGY(devvp, bp);
   1934 	}
   1935 
   1936 loopdone:
   1937 	if (skipbytes) {
   1938 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1939 	}
   1940 	nestiobuf_done(mbp, skipbytes, error);
   1941 	if (async) {
   1942 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1943 		return (0);
   1944 	}
   1945 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1946 	error = biowait(mbp);
   1947 	s = splbio();
   1948 	(*iodone)(mbp);
   1949 	splx(s);
   1950 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1951 	return (error);
   1952 }
   1953 
   1954 int
   1955 genfs_compat_getpages(void *v)
   1956 {
   1957 	struct vop_getpages_args /* {
   1958 		struct vnode *a_vp;
   1959 		voff_t a_offset;
   1960 		struct vm_page **a_m;
   1961 		int *a_count;
   1962 		int a_centeridx;
   1963 		vm_prot_t a_access_type;
   1964 		int a_advice;
   1965 		int a_flags;
   1966 	} */ *ap = v;
   1967 
   1968 	off_t origoffset;
   1969 	struct vnode *vp = ap->a_vp;
   1970 	struct uvm_object *uobj = &vp->v_uobj;
   1971 	struct vm_page *pg, **pgs;
   1972 	vaddr_t kva;
   1973 	int i, error, orignpages, npages;
   1974 	struct iovec iov;
   1975 	struct uio uio;
   1976 	kauth_cred_t cred = curlwp->l_cred;
   1977 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1978 
   1979 	error = 0;
   1980 	origoffset = ap->a_offset;
   1981 	orignpages = *ap->a_count;
   1982 	pgs = ap->a_m;
   1983 
   1984 	if (ap->a_flags & PGO_LOCKED) {
   1985 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1986 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1987 
   1988 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1989 		if (error == 0 && memwrite) {
   1990 			genfs_markdirty(vp);
   1991 		}
   1992 		return error;
   1993 	}
   1994 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1995 		mutex_exit(&uobj->vmobjlock);
   1996 		return EINVAL;
   1997 	}
   1998 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1999 		mutex_exit(&uobj->vmobjlock);
   2000 		return 0;
   2001 	}
   2002 	npages = orignpages;
   2003 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   2004 	mutex_exit(&uobj->vmobjlock);
   2005 	kva = uvm_pagermapin(pgs, npages,
   2006 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   2007 	for (i = 0; i < npages; i++) {
   2008 		pg = pgs[i];
   2009 		if ((pg->flags & PG_FAKE) == 0) {
   2010 			continue;
   2011 		}
   2012 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   2013 		iov.iov_len = PAGE_SIZE;
   2014 		uio.uio_iov = &iov;
   2015 		uio.uio_iovcnt = 1;
   2016 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   2017 		uio.uio_rw = UIO_READ;
   2018 		uio.uio_resid = PAGE_SIZE;
   2019 		UIO_SETUP_SYSSPACE(&uio);
   2020 		/* XXX vn_lock */
   2021 		error = VOP_READ(vp, &uio, 0, cred);
   2022 		if (error) {
   2023 			break;
   2024 		}
   2025 		if (uio.uio_resid) {
   2026 			memset(iov.iov_base, 0, uio.uio_resid);
   2027 		}
   2028 	}
   2029 	uvm_pagermapout(kva, npages);
   2030 	mutex_enter(&uobj->vmobjlock);
   2031 	mutex_enter(&uvm_pageqlock);
   2032 	for (i = 0; i < npages; i++) {
   2033 		pg = pgs[i];
   2034 		if (error && (pg->flags & PG_FAKE) != 0) {
   2035 			pg->flags |= PG_RELEASED;
   2036 		} else {
   2037 			pmap_clear_modify(pg);
   2038 			uvm_pageactivate(pg);
   2039 		}
   2040 	}
   2041 	if (error) {
   2042 		uvm_page_unbusy(pgs, npages);
   2043 	}
   2044 	mutex_exit(&uvm_pageqlock);
   2045 	if (error == 0 && memwrite) {
   2046 		genfs_markdirty(vp);
   2047 	}
   2048 	mutex_exit(&uobj->vmobjlock);
   2049 	return error;
   2050 }
   2051 
   2052 int
   2053 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   2054     int flags)
   2055 {
   2056 	off_t offset;
   2057 	struct iovec iov;
   2058 	struct uio uio;
   2059 	kauth_cred_t cred = curlwp->l_cred;
   2060 	struct buf *bp;
   2061 	vaddr_t kva;
   2062 	int error;
   2063 
   2064 	offset = pgs[0]->offset;
   2065 	kva = uvm_pagermapin(pgs, npages,
   2066 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   2067 
   2068 	iov.iov_base = (void *)kva;
   2069 	iov.iov_len = npages << PAGE_SHIFT;
   2070 	uio.uio_iov = &iov;
   2071 	uio.uio_iovcnt = 1;
   2072 	uio.uio_offset = offset;
   2073 	uio.uio_rw = UIO_WRITE;
   2074 	uio.uio_resid = npages << PAGE_SHIFT;
   2075 	UIO_SETUP_SYSSPACE(&uio);
   2076 	/* XXX vn_lock */
   2077 	error = VOP_WRITE(vp, &uio, 0, cred);
   2078 
   2079 	mutex_enter(&vp->v_interlock);
   2080 	vp->v_numoutput++;
   2081 	mutex_exit(&vp->v_interlock);
   2082 
   2083 	bp = getiobuf(vp, true);
   2084 	bp->b_cflags = BC_BUSY | BC_AGE;
   2085 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   2086 	bp->b_data = (char *)kva;
   2087 	bp->b_bcount = npages << PAGE_SHIFT;
   2088 	bp->b_bufsize = npages << PAGE_SHIFT;
   2089 	bp->b_resid = 0;
   2090 	bp->b_error = error;
   2091 	uvm_aio_aiodone(bp);
   2092 	return (error);
   2093 }
   2094 
   2095 /*
   2096  * Process a uio using direct I/O.  If we reach a part of the request
   2097  * which cannot be processed in this fashion for some reason, just return.
   2098  * The caller must handle some additional part of the request using
   2099  * buffered I/O before trying direct I/O again.
   2100  */
   2101 
   2102 void
   2103 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   2104 {
   2105 	struct vmspace *vs;
   2106 	struct iovec *iov;
   2107 	vaddr_t va;
   2108 	size_t len;
   2109 	const int mask = DEV_BSIZE - 1;
   2110 	int error;
   2111 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   2112 	    (ioflag & IO_JOURNALLOCKED) == 0);
   2113 
   2114 	/*
   2115 	 * We only support direct I/O to user space for now.
   2116 	 */
   2117 
   2118 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   2119 		return;
   2120 	}
   2121 
   2122 	/*
   2123 	 * If the vnode is mapped, we would need to get the getpages lock
   2124 	 * to stabilize the bmap, but then we would get into trouble whil e
   2125 	 * locking the pages if the pages belong to this same vnode (or a
   2126 	 * multi-vnode cascade to the same effect).  Just fall back to
   2127 	 * buffered I/O if the vnode is mapped to avoid this mess.
   2128 	 */
   2129 
   2130 	if (vp->v_vflag & VV_MAPPED) {
   2131 		return;
   2132 	}
   2133 
   2134 	if (need_wapbl) {
   2135 		error = WAPBL_BEGIN(vp->v_mount);
   2136 		if (error)
   2137 			return;
   2138 	}
   2139 
   2140 	/*
   2141 	 * Do as much of the uio as possible with direct I/O.
   2142 	 */
   2143 
   2144 	vs = uio->uio_vmspace;
   2145 	while (uio->uio_resid) {
   2146 		iov = uio->uio_iov;
   2147 		if (iov->iov_len == 0) {
   2148 			uio->uio_iov++;
   2149 			uio->uio_iovcnt--;
   2150 			continue;
   2151 		}
   2152 		va = (vaddr_t)iov->iov_base;
   2153 		len = MIN(iov->iov_len, genfs_maxdio);
   2154 		len &= ~mask;
   2155 
   2156 		/*
   2157 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   2158 		 * the current EOF, then fall back to buffered I/O.
   2159 		 */
   2160 
   2161 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   2162 			break;
   2163 		}
   2164 
   2165 		/*
   2166 		 * Check alignment.  The file offset must be at least
   2167 		 * sector-aligned.  The exact constraint on memory alignment
   2168 		 * is very hardware-dependent, but requiring sector-aligned
   2169 		 * addresses there too is safe.
   2170 		 */
   2171 
   2172 		if (uio->uio_offset & mask || va & mask) {
   2173 			break;
   2174 		}
   2175 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   2176 					  uio->uio_rw);
   2177 		if (error) {
   2178 			break;
   2179 		}
   2180 		iov->iov_base = (char *)iov->iov_base + len;
   2181 		iov->iov_len -= len;
   2182 		uio->uio_offset += len;
   2183 		uio->uio_resid -= len;
   2184 	}
   2185 
   2186 	if (need_wapbl)
   2187 		WAPBL_END(vp->v_mount);
   2188 }
   2189 
   2190 /*
   2191  * Iodone routine for direct I/O.  We don't do much here since the request is
   2192  * always synchronous, so the caller will do most of the work after biowait().
   2193  */
   2194 
   2195 static void
   2196 genfs_dio_iodone(struct buf *bp)
   2197 {
   2198 
   2199 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   2200 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   2201 		mutex_enter(bp->b_objlock);
   2202 		vwakeup(bp);
   2203 		mutex_exit(bp->b_objlock);
   2204 	}
   2205 	putiobuf(bp);
   2206 }
   2207 
   2208 /*
   2209  * Process one chunk of a direct I/O request.
   2210  */
   2211 
   2212 static int
   2213 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   2214     off_t off, enum uio_rw rw)
   2215 {
   2216 	struct vm_map *map;
   2217 	struct pmap *upm, *kpm;
   2218 	size_t klen = round_page(uva + len) - trunc_page(uva);
   2219 	off_t spoff, epoff;
   2220 	vaddr_t kva, puva;
   2221 	paddr_t pa;
   2222 	vm_prot_t prot;
   2223 	int error, rv, poff, koff;
   2224 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   2225 		(rw == UIO_WRITE ? PGO_FREE : 0);
   2226 
   2227 	/*
   2228 	 * For writes, verify that this range of the file already has fully
   2229 	 * allocated backing store.  If there are any holes, just punt and
   2230 	 * make the caller take the buffered write path.
   2231 	 */
   2232 
   2233 	if (rw == UIO_WRITE) {
   2234 		daddr_t lbn, elbn, blkno;
   2235 		int bsize, bshift, run;
   2236 
   2237 		bshift = vp->v_mount->mnt_fs_bshift;
   2238 		bsize = 1 << bshift;
   2239 		lbn = off >> bshift;
   2240 		elbn = (off + len + bsize - 1) >> bshift;
   2241 		while (lbn < elbn) {
   2242 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   2243 			if (error) {
   2244 				return error;
   2245 			}
   2246 			if (blkno == (daddr_t)-1) {
   2247 				return ENOSPC;
   2248 			}
   2249 			lbn += 1 + run;
   2250 		}
   2251 	}
   2252 
   2253 	/*
   2254 	 * Flush any cached pages for parts of the file that we're about to
   2255 	 * access.  If we're writing, invalidate pages as well.
   2256 	 */
   2257 
   2258 	spoff = trunc_page(off);
   2259 	epoff = round_page(off + len);
   2260 	mutex_enter(&vp->v_interlock);
   2261 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   2262 	if (error) {
   2263 		return error;
   2264 	}
   2265 
   2266 	/*
   2267 	 * Wire the user pages and remap them into kernel memory.
   2268 	 */
   2269 
   2270 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   2271 	error = uvm_vslock(vs, (void *)uva, len, prot);
   2272 	if (error) {
   2273 		return error;
   2274 	}
   2275 
   2276 	map = &vs->vm_map;
   2277 	upm = vm_map_pmap(map);
   2278 	kpm = vm_map_pmap(kernel_map);
   2279 	kva = uvm_km_alloc(kernel_map, klen, 0,
   2280 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   2281 	puva = trunc_page(uva);
   2282 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   2283 		rv = pmap_extract(upm, puva + poff, &pa);
   2284 		KASSERT(rv);
   2285 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   2286 	}
   2287 	pmap_update(kpm);
   2288 
   2289 	/*
   2290 	 * Do the I/O.
   2291 	 */
   2292 
   2293 	koff = uva - trunc_page(uva);
   2294 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   2295 			    genfs_dio_iodone);
   2296 
   2297 	/*
   2298 	 * Tear down the kernel mapping.
   2299 	 */
   2300 
   2301 	pmap_remove(kpm, kva, kva + klen);
   2302 	pmap_update(kpm);
   2303 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   2304 
   2305 	/*
   2306 	 * Unwire the user pages.
   2307 	 */
   2308 
   2309 	uvm_vsunlock(vs, (void *)uva, len);
   2310 	return error;
   2311 }
   2312 
   2313