Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.36.2.47
      1 /*	$NetBSD: genfs_io.c,v 1.36.2.47 2010/11/19 15:25:37 uebayasi Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.47 2010/11/19 15:25:37 uebayasi Exp $");
     35 
     36 #include "opt_xip.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/proc.h>
     41 #include <sys/kernel.h>
     42 #include <sys/mount.h>
     43 #include <sys/namei.h>
     44 #include <sys/vnode.h>
     45 #include <sys/fcntl.h>
     46 #include <sys/kmem.h>
     47 #include <sys/poll.h>
     48 #include <sys/mman.h>
     49 #include <sys/file.h>
     50 #include <sys/kauth.h>
     51 #include <sys/fstrans.h>
     52 #include <sys/buf.h>
     53 #include <sys/once.h>
     54 
     55 #include <miscfs/genfs/genfs.h>
     56 #include <miscfs/genfs/genfs_node.h>
     57 #include <miscfs/specfs/specdev.h>
     58 
     59 #include <uvm/uvm.h>
     60 #include <uvm/uvm_pager.h>
     61 
     62 #ifdef XIP
     63 static int genfs_do_getpages_xip_io(struct vnode *, voff_t, struct vm_page **,
     64     int *, int, vm_prot_t, int, int, const int);
     65 static int genfs_do_getpages_xip_io_done(struct vnode *, voff_t, struct vm_page **,
     66     int *, int, vm_prot_t, int, int, const int);
     67 static int genfs_do_putpages_xip(struct vnode *, off_t, off_t, int,
     68     struct vm_page **);
     69 #endif
     70 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     71     off_t, enum uio_rw);
     72 static void genfs_dio_iodone(struct buf *);
     73 
     74 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     75     void (*)(struct buf *));
     76 static void genfs_rel_pages(struct vm_page **, int);
     77 static void genfs_markdirty(struct vnode *);
     78 
     79 int genfs_maxdio = MAXPHYS;
     80 
     81 static void
     82 genfs_rel_pages(struct vm_page **pgs, int npages)
     83 {
     84 	int i;
     85 
     86 	for (i = 0; i < npages; i++) {
     87 		struct vm_page *pg = pgs[i];
     88 
     89 		if (pg == NULL || pg == PGO_DONTCARE)
     90 			continue;
     91 		if (pg->flags & PG_FAKE) {
     92 			pg->flags |= PG_RELEASED;
     93 		}
     94 	}
     95 	mutex_enter(&uvm_pageqlock);
     96 	uvm_page_unbusy(pgs, npages);
     97 	mutex_exit(&uvm_pageqlock);
     98 }
     99 
    100 static void
    101 genfs_markdirty(struct vnode *vp)
    102 {
    103 	struct genfs_node * const gp = VTOG(vp);
    104 
    105 	KASSERT(mutex_owned(&vp->v_interlock));
    106 	gp->g_dirtygen++;
    107 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    108 		vn_syncer_add_to_worklist(vp, filedelay);
    109 	}
    110 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    111 		vp->v_iflag |= VI_WRMAPDIRTY;
    112 	}
    113 }
    114 
    115 /*
    116  * generic VM getpages routine.
    117  * Return PG_BUSY pages for the given range,
    118  * reading from backing store if necessary.
    119  */
    120 
    121 int
    122 genfs_getpages(void *v)
    123 {
    124 	struct vop_getpages_args /* {
    125 		struct vnode *a_vp;
    126 		voff_t a_offset;
    127 		struct vm_page **a_m;
    128 		int *a_count;
    129 		int a_centeridx;
    130 		vm_prot_t a_access_type;
    131 		int a_advice;
    132 		int a_flags;
    133 	} */ * const ap = v;
    134 
    135 	off_t diskeof, memeof;
    136 	int i, error, npages;
    137 	const int flags = ap->a_flags;
    138 	struct vnode * const vp = ap->a_vp;
    139 	struct uvm_object * const uobj = &vp->v_uobj;
    140 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    141 	const bool async = (flags & PGO_SYNCIO) == 0;
    142 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    143 	bool has_trans = false;
    144 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    145 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    146 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    147 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    148 
    149 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    150 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    151 
    152 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    153 	    vp->v_type == VLNK || vp->v_type == VBLK);
    154 
    155 startover:
    156 	error = 0;
    157 	const voff_t origvsize = vp->v_size;
    158 	const off_t origoffset = ap->a_offset;
    159 	const int orignpages = *ap->a_count;
    160 
    161 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    162 	if (flags & PGO_PASTEOF) {
    163 		off_t newsize;
    164 #if defined(DIAGNOSTIC)
    165 		off_t writeeof;
    166 #endif /* defined(DIAGNOSTIC) */
    167 
    168 		newsize = MAX(origvsize,
    169 		    origoffset + (orignpages << PAGE_SHIFT));
    170 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    171 #if defined(DIAGNOSTIC)
    172 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    173 		if (newsize > round_page(writeeof)) {
    174 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    175 			    __func__, newsize, round_page(writeeof));
    176 		}
    177 #endif /* defined(DIAGNOSTIC) */
    178 	} else {
    179 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    180 	}
    181 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    182 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    183 	KASSERT(orignpages > 0);
    184 
    185 	/*
    186 	 * Bounds-check the request.
    187 	 */
    188 
    189 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    190 		if ((flags & PGO_LOCKED) == 0) {
    191 			mutex_exit(&uobj->vmobjlock);
    192 		}
    193 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    194 		    origoffset, *ap->a_count, memeof,0);
    195 		error = EINVAL;
    196 		goto out_err;
    197 	}
    198 
    199 	/* uobj is locked */
    200 
    201 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    202 	    (vp->v_type != VBLK ||
    203 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    204 		int updflags = 0;
    205 
    206 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    207 			updflags = GOP_UPDATE_ACCESSED;
    208 		}
    209 		if (memwrite) {
    210 			updflags |= GOP_UPDATE_MODIFIED;
    211 		}
    212 		if (updflags != 0) {
    213 			GOP_MARKUPDATE(vp, updflags);
    214 		}
    215 	}
    216 
    217 	/*
    218 	 * For PGO_LOCKED requests, just return whatever's in memory.
    219 	 */
    220 
    221 	if (flags & PGO_LOCKED) {
    222 #if 0
    223 		genfs_getpages_mem();
    224 	} else {
    225 		genfs_getpages_io();
    226 	}
    227 }
    228 
    229 int
    230 genfs_getpages_mem()
    231 {
    232 #endif
    233 		int nfound;
    234 		struct vm_page *pg;
    235 
    236 #ifdef XIP
    237 		if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    238 			*ap->a_count = 0;
    239 			return 0;
    240 		}
    241 #endif
    242 
    243 		KASSERT(!glocked);
    244 		npages = *ap->a_count;
    245 #if defined(DEBUG)
    246 		for (i = 0; i < npages; i++) {
    247 			pg = ap->a_m[i];
    248 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    249 		}
    250 #endif /* defined(DEBUG) */
    251 		nfound = uvn_findpages(uobj, origoffset, &npages,
    252 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    253 		KASSERT(npages == *ap->a_count);
    254 		if (nfound == 0) {
    255 			error = EBUSY;
    256 			goto out_err;
    257 		}
    258 		if (!genfs_node_rdtrylock(vp)) {
    259 			genfs_rel_pages(ap->a_m, npages);
    260 
    261 			/*
    262 			 * restore the array.
    263 			 */
    264 
    265 			for (i = 0; i < npages; i++) {
    266 				pg = ap->a_m[i];
    267 
    268 				if (pg != NULL && pg != PGO_DONTCARE) {
    269 					ap->a_m[i] = NULL;
    270 				}
    271 				KASSERT(pg == NULL || pg == PGO_DONTCARE);
    272 			}
    273 		} else {
    274 			genfs_node_unlock(vp);
    275 		}
    276 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    277 		if (error == 0 && memwrite) {
    278 			genfs_markdirty(vp);
    279 		}
    280 		goto out_err;
    281 	}
    282 	mutex_exit(&uobj->vmobjlock);
    283 #if 0
    284 }
    285 
    286 int
    287 genfs_getpages_io()
    288 {
    289 #endif
    290 	/*
    291 	 * find the requested pages and make some simple checks.
    292 	 * leave space in the page array for a whole block.
    293 	 */
    294 
    295 #define	vp2fs_bshift(vp) \
    296 	(((vp)->v_type != VBLK) ? (vp)->v_mount->mnt_fs_bshift : DEV_BSHIFT)
    297 #define	vp2dev_bshift(vp) \
    298 	(((vp)->v_type != VBLK) ? (vp)->v_mount->mnt_dev_bshift : DEV_BSHIFT)
    299 
    300 	const int fs_bshift = vp2fs_bshift(vp);
    301 	const int dev_bshift = vp2dev_bshift(vp);
    302 	const int fs_bsize = 1 << fs_bshift;
    303 #define	blk_mask	(fs_bsize - 1)
    304 #define	trunc_blk(x)	((x) & ~blk_mask)
    305 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    306 
    307 	const int orignmempages = MIN(orignpages,
    308 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    309 	npages = orignmempages;
    310 	const off_t startoffset = trunc_blk(origoffset);
    311 	const off_t endoffset = MIN(
    312 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    313 	    round_page(memeof));
    314 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    315 
    316 	const int pgs_size = sizeof(struct vm_page *) *
    317 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    318 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    319 
    320 	if (pgs_size > sizeof(pgs_onstack)) {
    321 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    322 		if (pgs == NULL) {
    323 			pgs = pgs_onstack;
    324 			error = ENOMEM;
    325 			goto out_err;
    326 		}
    327 	} else {
    328 		pgs = pgs_onstack;
    329 		(void)memset(pgs, 0, pgs_size);
    330 	}
    331 
    332 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    333 	    ridx, npages, startoffset, endoffset);
    334 #if 0
    335 }
    336 
    337 int
    338 genfs_getpages_io_relock()
    339 {
    340 #endif
    341 	if (!has_trans) {
    342 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    343 		has_trans = true;
    344 	}
    345 
    346 	/*
    347 	 * hold g_glock to prevent a race with truncate.
    348 	 *
    349 	 * check if our idea of v_size is still valid.
    350 	 */
    351 
    352 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    353 	if (!glocked) {
    354 		if (blockalloc) {
    355 			genfs_node_wrlock(vp);
    356 		} else {
    357 			genfs_node_rdlock(vp);
    358 		}
    359 	}
    360 	mutex_enter(&uobj->vmobjlock);
    361 	if (vp->v_size < origvsize) {
    362 		if (!glocked) {
    363 			genfs_node_unlock(vp);
    364 		}
    365 		if (pgs != pgs_onstack)
    366 			kmem_free(pgs, pgs_size);
    367 		goto startover;
    368 	}
    369 #if 0
    370 }
    371 
    372 int
    373 genfs_getpages_io_findpages()
    374 {
    375 #endif
    376 #ifdef XIP
    377 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    378 		goto genfs_getpages_io_read_allocpages_done;
    379 #endif
    380 
    381 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    382 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    383 		if (!glocked) {
    384 			genfs_node_unlock(vp);
    385 		}
    386 		KASSERT(async != 0);
    387 		genfs_rel_pages(&pgs[ridx], orignmempages);
    388 		mutex_exit(&uobj->vmobjlock);
    389 		error = EBUSY;
    390 		goto out_err_free;
    391 	}
    392 
    393 	/*
    394 	 * if the pages are already resident, just return them.
    395 	 */
    396 
    397 	for (i = 0; i < npages; i++) {
    398 		struct vm_page *pg = pgs[ridx + i];
    399 
    400 		if ((pg->flags & PG_FAKE) ||
    401 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    402 			break;
    403 		}
    404 	}
    405 	if (i == npages) {
    406 		if (!glocked) {
    407 			genfs_node_unlock(vp);
    408 		}
    409 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    410 		npages += ridx;
    411 		goto out;
    412 	}
    413 
    414 	/*
    415 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    416 	 */
    417 
    418 	if (overwrite) {
    419 #if 0
    420 		genfs_getpages_io_overwrite();
    421 	} else {
    422 		genfs_getpages_io_read();
    423 	}
    424 }
    425 
    426 int
    427 genfs_getpages_io_overwrite()
    428 {
    429 	{
    430 #endif
    431 		if (!glocked) {
    432 			genfs_node_unlock(vp);
    433 		}
    434 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    435 
    436 		for (i = 0; i < npages; i++) {
    437 			struct vm_page *pg = pgs[ridx + i];
    438 
    439 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    440 		}
    441 		npages += ridx;
    442 		goto out;
    443 	}
    444 #if 0
    445 }
    446 
    447 int
    448 genfs_getpages_io_read()
    449 {
    450 #endif
    451 	/*
    452 	 * the page wasn't resident and we're not overwriting,
    453 	 * so we're going to have to do some i/o.
    454 	 * find any additional pages needed to cover the expanded range.
    455 	 */
    456 #if 0
    457 }
    458 
    459 int
    460 genfs_getpages_io_read_allocpages()
    461 {
    462 #endif
    463 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    464 	if (startoffset != origoffset || npages != orignmempages) {
    465 		int npgs;
    466 
    467 		/*
    468 		 * we need to avoid deadlocks caused by locking
    469 		 * additional pages at lower offsets than pages we
    470 		 * already have locked.  unlock them all and start over.
    471 		 */
    472 
    473 		genfs_rel_pages(&pgs[ridx], orignmempages);
    474 		memset(pgs, 0, pgs_size);
    475 
    476 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    477 		    startoffset, endoffset, 0,0);
    478 		npgs = npages;
    479 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    480 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    481 			if (!glocked) {
    482 				genfs_node_unlock(vp);
    483 			}
    484 			KASSERT(async != 0);
    485 			genfs_rel_pages(pgs, npages);
    486 			mutex_exit(&uobj->vmobjlock);
    487 			error = EBUSY;
    488 			goto out_err_free;
    489 		}
    490 	}
    491 #ifdef XIP
    492 genfs_getpages_io_read_allocpages_done:
    493 #endif
    494 #if 0
    495 }
    496 
    497 int
    498 genfs_getpages_io_read_bio()
    499 {
    500 #endif
    501 	mutex_exit(&uobj->vmobjlock);
    502 
    503     {
    504 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    505 	vaddr_t kva = 0;
    506 	struct buf *bp = NULL, *mbp = NULL;
    507 	bool sawhole = false;
    508 
    509 	/*
    510 	 * read the desired page(s).
    511 	 */
    512 
    513 	totalbytes = npages << PAGE_SHIFT;
    514 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    515 	tailbytes = totalbytes - bytes;
    516 	skipbytes = 0;
    517 
    518 #if 1
    519 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    520 		goto genfs_getpages_bio_prepare_done;
    521 #endif
    522 #if 0
    523 }
    524 
    525 int
    526 genfs_getpages_io_read_bio_prepare()
    527 {
    528 #endif
    529 	kva = uvm_pagermapin(pgs, npages,
    530 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    531 
    532 	mbp = getiobuf(vp, true);
    533 	mbp->b_bufsize = totalbytes;
    534 	mbp->b_data = (void *)kva;
    535 	mbp->b_resid = mbp->b_bcount = bytes;
    536 	mbp->b_cflags = BC_BUSY;
    537 	if (async) {
    538 		mbp->b_flags = B_READ | B_ASYNC;
    539 		mbp->b_iodone = uvm_aio_biodone;
    540 	} else {
    541 		mbp->b_flags = B_READ;
    542 		mbp->b_iodone = NULL;
    543 	}
    544 	if (async)
    545 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    546 	else
    547 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    548 #if 0
    549 }
    550 
    551 #endif
    552 #if 1
    553 genfs_getpages_bio_prepare_done:
    554 #endif
    555 
    556 	/*
    557 	 * if EOF is in the middle of the range, zero the part past EOF.
    558 	 * skip over pages which are not PG_FAKE since in that case they have
    559 	 * valid data that we need to preserve.
    560 	 */
    561 
    562 	tailstart = bytes;
    563 	while (tailbytes > 0) {
    564 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    565 
    566 		KASSERT(len <= tailbytes);
    567 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    568 			memset((void *)(kva + tailstart), 0, len);
    569 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    570 			    kva, tailstart, len, 0);
    571 		}
    572 		tailstart += len;
    573 		tailbytes -= len;
    574 	}
    575 
    576 #if 1
    577 	if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    578 		error = genfs_do_getpages_xip_io(
    579 			ap->a_vp,
    580 			ap->a_offset,
    581 			pgs,
    582 			ap->a_count,
    583 			ap->a_centeridx,
    584 			ap->a_access_type,
    585 			ap->a_advice,
    586 			ap->a_flags,
    587 			orignmempages);
    588 		goto loopdone;
    589 	}
    590 #endif
    591 #if 0
    592 }
    593 
    594 int
    595 genfs_getpages_io_read_bio_loop()
    596 {
    597 #endif
    598 	/*
    599 	 * now loop over the pages, reading as needed.
    600 	 */
    601 
    602 	bp = NULL;
    603 	off_t offset;
    604 	for (offset = startoffset;
    605 	    bytes > 0;
    606 	    offset += iobytes, bytes -= iobytes) {
    607 		int run;
    608 		daddr_t lbn, blkno;
    609 		int pidx;
    610 		struct vnode *devvp;
    611 
    612 		/*
    613 		 * skip pages which don't need to be read.
    614 		 */
    615 
    616 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    617 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    618 			size_t b;
    619 
    620 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    621 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    622 				sawhole = true;
    623 			}
    624 			b = MIN(PAGE_SIZE, bytes);
    625 			offset += b;
    626 			bytes -= b;
    627 			skipbytes += b;
    628 			pidx++;
    629 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    630 			    offset, 0,0,0);
    631 			if (bytes == 0) {
    632 				goto loopdone;
    633 			}
    634 		}
    635 
    636 		/*
    637 		 * bmap the file to find out the blkno to read from and
    638 		 * how much we can read in one i/o.  if bmap returns an error,
    639 		 * skip the rest of the top-level i/o.
    640 		 */
    641 
    642 		lbn = offset >> fs_bshift;
    643 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    644 		if (error) {
    645 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    646 			    lbn,error,0,0);
    647 			skipbytes += bytes;
    648 			bytes = 0;
    649 			goto loopdone;
    650 		}
    651 
    652 		/*
    653 		 * see how many pages can be read with this i/o.
    654 		 * reduce the i/o size if necessary to avoid
    655 		 * overwriting pages with valid data.
    656 		 */
    657 
    658 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    659 		    bytes);
    660 		if (offset + iobytes > round_page(offset)) {
    661 			int pcount;
    662 
    663 			pcount = 1;
    664 			while (pidx + pcount < npages &&
    665 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    666 				pcount++;
    667 			}
    668 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    669 			    (offset - trunc_page(offset)));
    670 		}
    671 
    672 		/*
    673 		 * if this block isn't allocated, zero it instead of
    674 		 * reading it.  unless we are going to allocate blocks,
    675 		 * mark the pages we zeroed PG_RDONLY.
    676 		 */
    677 
    678 		if (blkno == (daddr_t)-1) {
    679 			int holepages = (round_page(offset + iobytes) -
    680 			    trunc_page(offset)) >> PAGE_SHIFT;
    681 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    682 
    683 			sawhole = true;
    684 			memset((char *)kva + (offset - startoffset), 0,
    685 			    iobytes);
    686 			skipbytes += iobytes;
    687 
    688 			for (i = 0; i < holepages; i++) {
    689 				if (memwrite) {
    690 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    691 				}
    692 				if (!blockalloc) {
    693 					pgs[pidx + i]->flags |= PG_RDONLY;
    694 				}
    695 			}
    696 			continue;
    697 		}
    698 
    699 		/*
    700 		 * allocate a sub-buf for this piece of the i/o
    701 		 * (or just use mbp if there's only 1 piece),
    702 		 * and start it going.
    703 		 */
    704 
    705 		if (offset == startoffset && iobytes == bytes) {
    706 			bp = mbp;
    707 		} else {
    708 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    709 			    vp, bp, vp->v_numoutput, 0);
    710 			bp = getiobuf(vp, true);
    711 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    712 		}
    713 		bp->b_lblkno = 0;
    714 
    715 		/* adjust physical blkno for partial blocks */
    716 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    717 		    dev_bshift);
    718 
    719 		UVMHIST_LOG(ubchist,
    720 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    721 		    bp, offset, bp->b_bcount, bp->b_blkno);
    722 
    723 		VOP_STRATEGY(devvp, bp);
    724 	}
    725 
    726 loopdone:
    727 #if 1
    728 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
    729 		goto genfs_getpages_biodone_done;
    730 #endif
    731 #if 0
    732 
    733 int
    734 genfs_getpages_biodone()
    735 {
    736 #endif
    737 	nestiobuf_done(mbp, skipbytes, error);
    738 	if (async) {
    739 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    740 		if (!glocked) {
    741 			genfs_node_unlock(vp);
    742 		}
    743 		error = 0;
    744 		goto out_err_free;
    745 	}
    746 	if (bp != NULL) {
    747 		error = biowait(mbp);
    748 	}
    749 
    750 	/* Remove the mapping (make KVA available as soon as possible) */
    751 	uvm_pagermapout(kva, npages);
    752 
    753 	/*
    754 	 * if this we encountered a hole then we have to do a little more work.
    755 	 * for read faults, we marked the page PG_RDONLY so that future
    756 	 * write accesses to the page will fault again.
    757 	 * for write faults, we must make sure that the backing store for
    758 	 * the page is completely allocated while the pages are locked.
    759 	 */
    760 
    761 	if (!error && sawhole && blockalloc) {
    762 		/*
    763 		 * XXX: This assumes that we come here only via
    764 		 * the mmio path
    765 		 */
    766 		if (vp->v_mount->mnt_wapbl) {
    767 			error = WAPBL_BEGIN(vp->v_mount);
    768 		}
    769 
    770 		if (!error) {
    771 			error = GOP_ALLOC(vp, startoffset,
    772 			    npages << PAGE_SHIFT, 0, cred);
    773 			if (vp->v_mount->mnt_wapbl) {
    774 				WAPBL_END(vp->v_mount);
    775 			}
    776 		}
    777 
    778 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    779 		    startoffset, npages << PAGE_SHIFT, error,0);
    780 		if (!error) {
    781 			for (i = 0; i < npages; i++) {
    782 				struct vm_page *pg = pgs[i];
    783 
    784 				if (pg == NULL) {
    785 					continue;
    786 				}
    787 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    788 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    789 				    pg,0,0,0);
    790 			}
    791 		}
    792 	}
    793 
    794 	putiobuf(mbp);
    795 #if 0
    796 }
    797 
    798 #endif
    799 #if 1
    800 genfs_getpages_biodone_done:
    801 	{}
    802 #endif
    803     }
    804 
    805 	if (!glocked) {
    806 		genfs_node_unlock(vp);
    807 	}
    808 
    809 #if 1
    810 	if ((ap->a_vp->v_vflag & VV_XIP) != 0) {
    811 		error = genfs_do_getpages_xip_io_done(
    812 			ap->a_vp,
    813 			ap->a_offset,
    814 			pgs,
    815 			ap->a_count,
    816 			ap->a_centeridx,
    817 			ap->a_access_type,
    818 			ap->a_advice,
    819 			ap->a_flags,
    820 			orignmempages);
    821 		goto genfs_getpages_generic_io_done_done;
    822 	}
    823 #endif
    824 #if 0
    825 	else {
    826 		error = genfs_getpages_generic_io_done();
    827 	}
    828 }
    829 
    830 int
    831 genfs_getpages_generic_io_done()
    832 {
    833 #endif
    834 
    835 	mutex_enter(&uobj->vmobjlock);
    836 
    837 	/*
    838 	 * we're almost done!  release the pages...
    839 	 * for errors, we free the pages.
    840 	 * otherwise we activate them and mark them as valid and clean.
    841 	 * also, unbusy pages that were not actually requested.
    842 	 */
    843 
    844 	if (error) {
    845 		for (i = 0; i < npages; i++) {
    846 			struct vm_page *pg = pgs[i];
    847 
    848 			if (pg == NULL) {
    849 				continue;
    850 			}
    851 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    852 			    pg, pg->flags, 0,0);
    853 			if (pg->flags & PG_FAKE) {
    854 				pg->flags |= PG_RELEASED;
    855 			}
    856 		}
    857 		mutex_enter(&uvm_pageqlock);
    858 		uvm_page_unbusy(pgs, npages);
    859 		mutex_exit(&uvm_pageqlock);
    860 		mutex_exit(&uobj->vmobjlock);
    861 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    862 		goto out_err_free;
    863 	}
    864 
    865 out:
    866 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    867 	error = 0;
    868 	mutex_enter(&uvm_pageqlock);
    869 	for (i = 0; i < npages; i++) {
    870 		struct vm_page *pg = pgs[i];
    871 		if (pg == NULL) {
    872 			continue;
    873 		}
    874 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    875 		    pg, pg->flags, 0,0);
    876 		if (pg->flags & PG_FAKE && !overwrite) {
    877 			pg->flags &= ~(PG_FAKE);
    878 			pmap_clear_modify(pgs[i]);
    879 		}
    880 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    881 		if (i < ridx || i >= ridx + orignmempages || async) {
    882 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    883 			    pg, pg->offset,0,0);
    884 			if (pg->flags & PG_WANTED) {
    885 				wakeup(pg);
    886 			}
    887 			if (pg->flags & PG_FAKE) {
    888 				KASSERT(overwrite);
    889 				uvm_pagezero(pg);
    890 			}
    891 			if (pg->flags & PG_RELEASED) {
    892 				uvm_pagefree(pg);
    893 				continue;
    894 			}
    895 			uvm_pageenqueue(pg);
    896 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    897 			UVM_PAGE_OWN(pg, NULL);
    898 		}
    899 	}
    900 	mutex_exit(&uvm_pageqlock);
    901 
    902 	if (memwrite) {
    903 		genfs_markdirty(vp);
    904 	}
    905 	mutex_exit(&uobj->vmobjlock);
    906 #if 1
    907 genfs_getpages_generic_io_done_done:
    908 	{}
    909 #endif
    910 	if (ap->a_m != NULL) {
    911 		memcpy(ap->a_m, &pgs[ridx],
    912 		    orignmempages * sizeof(struct vm_page *));
    913 	}
    914 #if 0
    915 }
    916 
    917 #endif
    918 
    919 out_err_free:
    920 	if (pgs != NULL && pgs != pgs_onstack)
    921 		kmem_free(pgs, pgs_size);
    922 out_err:
    923 	if (has_trans)
    924 		fstrans_done(vp->v_mount);
    925 	return error;
    926 }
    927 
    928 #ifdef XIP
    929 /*
    930  * genfs_do_getpages_xip_io
    931  *      Return "direct pages" of XIP vnode.  The block addresses of XIP
    932  *      vnode pages are returned back to the VM fault handler as the
    933  *	actually mapped physical addresses.
    934  */
    935 static int
    936 genfs_do_getpages_xip_io(
    937 	struct vnode *vp,
    938 	voff_t origoffset,
    939 	struct vm_page **pps,
    940 	int *npagesp,
    941 	int centeridx,
    942 	vm_prot_t access_type,
    943 	int advice,
    944 	int flags,
    945 	const int orignmempages)
    946 {
    947 	const int fs_bshift = vp2fs_bshift(vp);
    948 	const int dev_bshift = vp2dev_bshift(vp);
    949 	const int fs_bsize = 1 << fs_bshift;
    950 
    951 	int error;
    952 	off_t off;
    953 	int i;
    954 
    955 	UVMHIST_FUNC("genfs_do_getpages_xip_io"); UVMHIST_CALLED(ubchist);
    956 
    957 	KASSERT(((flags & PGO_GLOCKHELD) != 0) || genfs_node_rdlocked(vp));
    958 
    959 #ifdef UVMHIST
    960 	const off_t startoffset = trunc_blk(origoffset);
    961 	const off_t endoffset = round_blk(origoffset + PAGE_SIZE * orignmempages);
    962 #endif
    963 
    964 	UVMHIST_LOG(ubchist, "xip npages=%d startoffset=%lx endoffset=%lx",
    965 	    orignmempages, (long)startoffset, (long)endoffset, 0);
    966 
    967 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    968 
    969 	off = origoffset;
    970 	for (i = ridx; i < ridx + orignmempages; i++) {
    971 		daddr_t lbn, blkno;
    972 		int run;
    973 		struct vnode *devvp;
    974 
    975 		lbn = (off & ~(fs_bsize - 1)) >> fs_bshift;
    976 
    977 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    978 		KASSERT(error == 0);
    979 		UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d",
    980 		    (long)lbn, (long)blkno, run, 0);
    981 
    982 		/*
    983 		 * XIP page metadata assignment
    984 		 * - Unallocated block is redirected to the dedicated zero'ed
    985 		 *   page.
    986 		 */
    987 		if (blkno < 0) {
    988 			panic("XIP hole is not supported yet!");
    989 		} else {
    990 			daddr_t blk_off, fs_off;
    991 
    992 			blk_off = blkno << dev_bshift;
    993 			fs_off = off - (lbn << fs_bshift);
    994 
    995 			pps[i] = uvn_findpage_xip(devvp, &vp->v_uobj,
    996 			    blk_off + fs_off);
    997 			KASSERT(pps[i] != NULL);
    998 		}
    999 
   1000 		UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
   1001 			i,
   1002 			(long)pps[i]->phys_addr,
   1003 			pps[i],
   1004 			0);
   1005 
   1006 		off += PAGE_SIZE;
   1007 	}
   1008 
   1009 	return 0;
   1010 }
   1011 
   1012 int
   1013 genfs_do_getpages_xip_io_done(
   1014 	struct vnode *vp,
   1015 	voff_t origoffset,
   1016 	struct vm_page **pps,
   1017 	int *npagesp,
   1018 	int centeridx,
   1019 	vm_prot_t access_type,
   1020 	int advice,
   1021 	int flags,
   1022 	const int orignmempages)
   1023 {
   1024 	struct uvm_object * const uobj = &vp->v_uobj;
   1025 	int i;
   1026 
   1027 	const int fs_bshift = vp2fs_bshift(vp);
   1028 	const int fs_bsize = 1 << fs_bshift;
   1029 
   1030 	const off_t startoffset = trunc_blk(origoffset);
   1031 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
   1032 
   1033 	mutex_enter(&uobj->vmobjlock);
   1034 
   1035 	for (i = ridx; i < ridx + orignmempages; i++) {
   1036 		struct vm_page *pg = pps[i];
   1037 
   1038 		KASSERT((pg->flags & PG_RDONLY) != 0);
   1039 		KASSERT((pg->flags & PG_BUSY) == 0);
   1040 		KASSERT((pg->flags & PG_CLEAN) != 0);
   1041 		KASSERT((pg->flags & PG_DEVICE) != 0);
   1042 		pg->flags |= PG_BUSY;
   1043 		pg->flags &= ~PG_FAKE;
   1044 		pg->uobject = &vp->v_uobj;
   1045 	}
   1046 
   1047 	mutex_exit(&uobj->vmobjlock);
   1048 
   1049 	*npagesp = orignmempages;
   1050 
   1051 	return 0;
   1052 }
   1053 #endif
   1054 
   1055 /*
   1056  * generic VM putpages routine.
   1057  * Write the given range of pages to backing store.
   1058  *
   1059  * => "offhi == 0" means flush all pages at or after "offlo".
   1060  * => object should be locked by caller.  we return with the
   1061  *      object unlocked.
   1062  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
   1063  *	thus, a caller might want to unlock higher level resources
   1064  *	(e.g. vm_map) before calling flush.
   1065  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
   1066  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1067  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1068  *	that new pages are inserted on the tail end of the list.   thus,
   1069  *	we can make a complete pass through the object in one go by starting
   1070  *	at the head and working towards the tail (new pages are put in
   1071  *	front of us).
   1072  * => NOTE: we are allowed to lock the page queues, so the caller
   1073  *	must not be holding the page queue lock.
   1074  *
   1075  * note on "cleaning" object and PG_BUSY pages:
   1076  *	this routine is holding the lock on the object.   the only time
   1077  *	that it can run into a PG_BUSY page that it does not own is if
   1078  *	some other process has started I/O on the page (e.g. either
   1079  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1080  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1081  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1082  *	paged out then it means that someone else has already started
   1083  *	cleaning the page for us (how nice!).    in this case, if we
   1084  *	have syncio specified, then after we make our pass through the
   1085  *	object we need to wait for the other PG_BUSY pages to clear
   1086  *	off (i.e. we need to do an iosync).   also note that once a
   1087  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1088  *
   1089  * note on page traversal:
   1090  *	we can traverse the pages in an object either by going down the
   1091  *	linked list in "uobj->memq", or we can go over the address range
   1092  *	by page doing hash table lookups for each address.    depending
   1093  *	on how many pages are in the object it may be cheaper to do one
   1094  *	or the other.   we set "by_list" to true if we are using memq.
   1095  *	if the cost of a hash lookup was equal to the cost of the list
   1096  *	traversal we could compare the number of pages in the start->stop
   1097  *	range to the total number of pages in the object.   however, it
   1098  *	seems that a hash table lookup is more expensive than the linked
   1099  *	list traversal, so we multiply the number of pages in the
   1100  *	range by an estimate of the relatively higher cost of the hash lookup.
   1101  */
   1102 
   1103 int
   1104 genfs_putpages(void *v)
   1105 {
   1106 	struct vop_putpages_args /* {
   1107 		struct vnode *a_vp;
   1108 		voff_t a_offlo;
   1109 		voff_t a_offhi;
   1110 		int a_flags;
   1111 	} */ * const ap = v;
   1112 
   1113 #ifdef XIP
   1114 	if ((ap->a_vp->v_vflag & VV_XIP) != 0)
   1115 		return genfs_do_putpages_xip(ap->a_vp, ap->a_offlo, ap->a_offhi,
   1116 		    ap->a_flags, NULL);
   1117 	else
   1118 #endif
   1119 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
   1120 	    ap->a_flags, NULL);
   1121 }
   1122 
   1123 int
   1124 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
   1125     int origflags, struct vm_page **busypg)
   1126 {
   1127 	struct uvm_object * const uobj = &vp->v_uobj;
   1128 	kmutex_t * const slock = &uobj->vmobjlock;
   1129 	off_t off;
   1130 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1131 #define maxpages (MAXPHYS >> PAGE_SHIFT)
   1132 	int i, error, npages, nback;
   1133 	int freeflag;
   1134 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1135 	bool wasclean, by_list, needs_clean, yld;
   1136 	bool async = (origflags & PGO_SYNCIO) == 0;
   1137 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
   1138 	struct lwp * const l = curlwp ? curlwp : &lwp0;
   1139 	struct genfs_node * const gp = VTOG(vp);
   1140 	int flags;
   1141 	int dirtygen;
   1142 	bool modified;
   1143 	bool need_wapbl;
   1144 	bool has_trans;
   1145 	bool cleanall;
   1146 	bool onworklst;
   1147 
   1148 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1149 
   1150 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1151 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1152 	KASSERT(startoff < endoff || endoff == 0);
   1153 
   1154 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1155 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1156 
   1157 	has_trans = false;
   1158 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
   1159 	    (origflags & PGO_JOURNALLOCKED) == 0);
   1160 
   1161 retry:
   1162 	modified = false;
   1163 	flags = origflags;
   1164 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
   1165 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
   1166 	if (uobj->uo_npages == 0) {
   1167 		if (vp->v_iflag & VI_ONWORKLST) {
   1168 			vp->v_iflag &= ~VI_WRMAPDIRTY;
   1169 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1170 				vn_syncer_remove_from_worklist(vp);
   1171 		}
   1172 		if (has_trans) {
   1173 			if (need_wapbl)
   1174 				WAPBL_END(vp->v_mount);
   1175 			fstrans_done(vp->v_mount);
   1176 		}
   1177 		mutex_exit(slock);
   1178 		return (0);
   1179 	}
   1180 
   1181 	/*
   1182 	 * the vnode has pages, set up to process the request.
   1183 	 */
   1184 
   1185 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
   1186 		mutex_exit(slock);
   1187 		if (pagedaemon) {
   1188 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
   1189 			if (error)
   1190 				return error;
   1191 		} else
   1192 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
   1193 		if (need_wapbl) {
   1194 			error = WAPBL_BEGIN(vp->v_mount);
   1195 			if (error) {
   1196 				fstrans_done(vp->v_mount);
   1197 				return error;
   1198 			}
   1199 		}
   1200 		has_trans = true;
   1201 		mutex_enter(slock);
   1202 		goto retry;
   1203 	}
   1204 
   1205 	error = 0;
   1206 	wasclean = (vp->v_numoutput == 0);
   1207 	off = startoff;
   1208 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1209 		endoff = trunc_page(LLONG_MAX);
   1210 	}
   1211 	by_list = (uobj->uo_npages <=
   1212 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
   1213 
   1214 #if !defined(DEBUG)
   1215 	/*
   1216 	 * if this vnode is known not to have dirty pages,
   1217 	 * don't bother to clean it out.
   1218 	 */
   1219 
   1220 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
   1221 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1222 			goto skip_scan;
   1223 		}
   1224 		flags &= ~PGO_CLEANIT;
   1225 	}
   1226 #endif /* !defined(DEBUG) */
   1227 
   1228 	/*
   1229 	 * start the loop.  when scanning by list, hold the last page
   1230 	 * in the list before we start.  pages allocated after we start
   1231 	 * will be added to the end of the list, so we can stop at the
   1232 	 * current last page.
   1233 	 */
   1234 
   1235 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1236 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1237 	    (vp->v_iflag & VI_ONWORKLST) != 0;
   1238 	dirtygen = gp->g_dirtygen;
   1239 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1240 	if (by_list) {
   1241 		curmp.flags = PG_MARKER;
   1242 		endmp.flags = PG_MARKER;
   1243 		pg = TAILQ_FIRST(&uobj->memq);
   1244 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
   1245 	} else {
   1246 		pg = uvm_pagelookup(uobj, off);
   1247 	}
   1248 	nextpg = NULL;
   1249 	while (by_list || off < endoff) {
   1250 
   1251 		/*
   1252 		 * if the current page is not interesting, move on to the next.
   1253 		 */
   1254 
   1255 		KASSERT(pg == NULL || pg->uobject == uobj ||
   1256 		    (pg->flags & PG_MARKER) != 0);
   1257 		KASSERT(pg == NULL ||
   1258 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1259 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
   1260 		if (by_list) {
   1261 			if (pg == &endmp) {
   1262 				break;
   1263 			}
   1264 			if (pg->flags & PG_MARKER) {
   1265 				pg = TAILQ_NEXT(pg, listq.queue);
   1266 				continue;
   1267 			}
   1268 			if (pg->offset < startoff || pg->offset >= endoff ||
   1269 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1270 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1271 					wasclean = false;
   1272 				}
   1273 				pg = TAILQ_NEXT(pg, listq.queue);
   1274 				continue;
   1275 			}
   1276 			off = pg->offset;
   1277 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1278 			if (pg != NULL) {
   1279 				wasclean = false;
   1280 			}
   1281 			off += PAGE_SIZE;
   1282 			if (off < endoff) {
   1283 				pg = uvm_pagelookup(uobj, off);
   1284 			}
   1285 			continue;
   1286 		}
   1287 
   1288 		/*
   1289 		 * if the current page needs to be cleaned and it's busy,
   1290 		 * wait for it to become unbusy.
   1291 		 */
   1292 
   1293 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1294 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1295 		if (pg->flags & PG_BUSY || yld) {
   1296 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1297 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1298 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1299 				error = EDEADLK;
   1300 				if (busypg != NULL)
   1301 					*busypg = pg;
   1302 				break;
   1303 			}
   1304 			if (pagedaemon) {
   1305 				/*
   1306 				 * someone has taken the page while we
   1307 				 * dropped the lock for fstrans_start.
   1308 				 */
   1309 				break;
   1310 			}
   1311 			if (by_list) {
   1312 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1313 				UVMHIST_LOG(ubchist, "curmp next %p",
   1314 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1315 			}
   1316 			if (yld) {
   1317 				mutex_exit(slock);
   1318 				preempt();
   1319 				mutex_enter(slock);
   1320 			} else {
   1321 				pg->flags |= PG_WANTED;
   1322 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1323 				mutex_enter(slock);
   1324 			}
   1325 			if (by_list) {
   1326 				UVMHIST_LOG(ubchist, "after next %p",
   1327 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1328 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1329 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1330 			} else {
   1331 				pg = uvm_pagelookup(uobj, off);
   1332 			}
   1333 			continue;
   1334 		}
   1335 
   1336 		/*
   1337 		 * if we're freeing, remove all mappings of the page now.
   1338 		 * if we're cleaning, check if the page is needs to be cleaned.
   1339 		 */
   1340 
   1341 		if (flags & PGO_FREE) {
   1342 			pmap_page_protect(pg, VM_PROT_NONE);
   1343 		} else if (flags & PGO_CLEANIT) {
   1344 
   1345 			/*
   1346 			 * if we still have some hope to pull this vnode off
   1347 			 * from the syncer queue, write-protect the page.
   1348 			 */
   1349 
   1350 			if (cleanall && wasclean &&
   1351 			    gp->g_dirtygen == dirtygen) {
   1352 
   1353 				/*
   1354 				 * uobj pages get wired only by uvm_fault
   1355 				 * where uobj is locked.
   1356 				 */
   1357 
   1358 				if (pg->wire_count == 0) {
   1359 					pmap_page_protect(pg,
   1360 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1361 				} else {
   1362 					cleanall = false;
   1363 				}
   1364 			}
   1365 		}
   1366 
   1367 		if (flags & PGO_CLEANIT) {
   1368 			needs_clean = pmap_clear_modify(pg) ||
   1369 			    (pg->flags & PG_CLEAN) == 0;
   1370 			pg->flags |= PG_CLEAN;
   1371 		} else {
   1372 			needs_clean = false;
   1373 		}
   1374 
   1375 		/*
   1376 		 * if we're cleaning, build a cluster.
   1377 		 * the cluster will consist of pages which are currently dirty,
   1378 		 * but they will be returned to us marked clean.
   1379 		 * if not cleaning, just operate on the one page.
   1380 		 */
   1381 
   1382 		if (needs_clean) {
   1383 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1384 			wasclean = false;
   1385 			memset(pgs, 0, sizeof(pgs));
   1386 			pg->flags |= PG_BUSY;
   1387 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1388 
   1389 			/*
   1390 			 * first look backward.
   1391 			 */
   1392 
   1393 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1394 			nback = npages;
   1395 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1396 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1397 			if (nback) {
   1398 				memmove(&pgs[0], &pgs[npages - nback],
   1399 				    nback * sizeof(pgs[0]));
   1400 				if (npages - nback < nback)
   1401 					memset(&pgs[nback], 0,
   1402 					    (npages - nback) * sizeof(pgs[0]));
   1403 				else
   1404 					memset(&pgs[npages - nback], 0,
   1405 					    nback * sizeof(pgs[0]));
   1406 			}
   1407 
   1408 			/*
   1409 			 * then plug in our page of interest.
   1410 			 */
   1411 
   1412 			pgs[nback] = pg;
   1413 
   1414 			/*
   1415 			 * then look forward to fill in the remaining space in
   1416 			 * the array of pages.
   1417 			 */
   1418 
   1419 			npages = maxpages - nback - 1;
   1420 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1421 			    &pgs[nback + 1],
   1422 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1423 			npages += nback + 1;
   1424 		} else {
   1425 			pgs[0] = pg;
   1426 			npages = 1;
   1427 			nback = 0;
   1428 		}
   1429 
   1430 		/*
   1431 		 * apply FREE or DEACTIVATE options if requested.
   1432 		 */
   1433 
   1434 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1435 			mutex_enter(&uvm_pageqlock);
   1436 		}
   1437 		for (i = 0; i < npages; i++) {
   1438 			tpg = pgs[i];
   1439 			KASSERT(tpg->uobject == uobj);
   1440 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1441 				pg = tpg;
   1442 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1443 				continue;
   1444 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1445 				uvm_pagedeactivate(tpg);
   1446 			} else if (flags & PGO_FREE) {
   1447 				pmap_page_protect(tpg, VM_PROT_NONE);
   1448 				if (tpg->flags & PG_BUSY) {
   1449 					tpg->flags |= freeflag;
   1450 					if (pagedaemon) {
   1451 						uvm_pageout_start(1);
   1452 						uvm_pagedequeue(tpg);
   1453 					}
   1454 				} else {
   1455 
   1456 					/*
   1457 					 * ``page is not busy''
   1458 					 * implies that npages is 1
   1459 					 * and needs_clean is false.
   1460 					 */
   1461 
   1462 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1463 					uvm_pagefree(tpg);
   1464 					if (pagedaemon)
   1465 						uvmexp.pdfreed++;
   1466 				}
   1467 			}
   1468 		}
   1469 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1470 			mutex_exit(&uvm_pageqlock);
   1471 		}
   1472 		if (needs_clean) {
   1473 			modified = true;
   1474 
   1475 			/*
   1476 			 * start the i/o.  if we're traversing by list,
   1477 			 * keep our place in the list with a marker page.
   1478 			 */
   1479 
   1480 			if (by_list) {
   1481 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1482 				    listq.queue);
   1483 			}
   1484 			mutex_exit(slock);
   1485 			error = GOP_WRITE(vp, pgs, npages, flags);
   1486 			mutex_enter(slock);
   1487 			if (by_list) {
   1488 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1489 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1490 			}
   1491 			if (error) {
   1492 				break;
   1493 			}
   1494 			if (by_list) {
   1495 				continue;
   1496 			}
   1497 		}
   1498 
   1499 		/*
   1500 		 * find the next page and continue if there was no error.
   1501 		 */
   1502 
   1503 		if (by_list) {
   1504 			if (nextpg) {
   1505 				pg = nextpg;
   1506 				nextpg = NULL;
   1507 			} else {
   1508 				pg = TAILQ_NEXT(pg, listq.queue);
   1509 			}
   1510 		} else {
   1511 			off += (npages - nback) << PAGE_SHIFT;
   1512 			if (off < endoff) {
   1513 				pg = uvm_pagelookup(uobj, off);
   1514 			}
   1515 		}
   1516 	}
   1517 	if (by_list) {
   1518 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1519 	}
   1520 
   1521 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1522 	    (vp->v_type != VBLK ||
   1523 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1524 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1525 	}
   1526 
   1527 	/*
   1528 	 * if we're cleaning and there was nothing to clean,
   1529 	 * take us off the syncer list.  if we started any i/o
   1530 	 * and we're doing sync i/o, wait for all writes to finish.
   1531 	 */
   1532 
   1533 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1534 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1535 #if defined(DEBUG)
   1536 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1537 			if ((pg->flags & PG_MARKER) != 0) {
   1538 				continue;
   1539 			}
   1540 			if ((pg->flags & PG_CLEAN) == 0) {
   1541 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1542 			}
   1543 			if (pmap_is_modified(pg)) {
   1544 				printf("%s: %p: modified\n", __func__, pg);
   1545 			}
   1546 		}
   1547 #endif /* defined(DEBUG) */
   1548 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1549 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1550 			vn_syncer_remove_from_worklist(vp);
   1551 	}
   1552 
   1553 #if !defined(DEBUG)
   1554 skip_scan:
   1555 #endif /* !defined(DEBUG) */
   1556 
   1557 	/* Wait for output to complete. */
   1558 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1559 		while (vp->v_numoutput != 0)
   1560 			cv_wait(&vp->v_cv, slock);
   1561 	}
   1562 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1563 	mutex_exit(slock);
   1564 
   1565 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1566 		/*
   1567 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1568 		 * retrying is not a big deal because, in many cases,
   1569 		 * uobj->uo_npages is already 0 here.
   1570 		 */
   1571 		mutex_enter(slock);
   1572 		goto retry;
   1573 	}
   1574 
   1575 	if (has_trans) {
   1576 		if (need_wapbl)
   1577 			WAPBL_END(vp->v_mount);
   1578 		fstrans_done(vp->v_mount);
   1579 	}
   1580 
   1581 	return (error);
   1582 }
   1583 
   1584 #ifdef XIP
   1585 int
   1586 genfs_do_putpages_xip(struct vnode *vp, off_t startoff, off_t endoff,
   1587     int flags, struct vm_page **busypg)
   1588 {
   1589 	struct uvm_object *uobj = &vp->v_uobj;
   1590 #ifdef DIAGNOSTIC
   1591 	struct genfs_node * const gp = VTOG(vp);
   1592 #endif
   1593 
   1594 	UVMHIST_FUNC("genfs_do_putpages_xip"); UVMHIST_CALLED(ubchist);
   1595 
   1596 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1597 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1598 	KASSERT(vp->v_numoutput == 0);
   1599 	KASSERT(gp->g_dirtygen == 0);
   1600 
   1601 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1602 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1603 
   1604 	/*
   1605 	 * XIP pages are read-only, and never become dirty.  They're also never
   1606 	 * queued.  PGO_DEACTIVATE and PGO_CLEANIT are meaningless for XIP
   1607 	 * pages, so we ignore them.
   1608 	 */
   1609 	if ((flags & PGO_FREE) == 0)
   1610 		goto done;
   1611 
   1612 	/*
   1613 	 * For PGO_FREE (or (PGO_CLEANIT | PGO_FREE)), we invalidate MMU
   1614 	 * mappings of both XIP pages and XIP zero pages.
   1615 	 *
   1616 	 * Zero page is freed when one of its mapped offset is freed, even if
   1617 	 * one file (vnode) has many holes and mapping its zero page to all
   1618 	 * of those hole pages.
   1619 	 *
   1620 	 * We don't know which pages are currently mapped in the given vnode,
   1621 	 * because XIP pages are not added to vnode.  What we can do is to
   1622 	 * locate pages by querying the filesystem as done in getpages.  Call
   1623 	 * genfs_do_getpages_xip_io().
   1624 	 */
   1625 
   1626 	off_t off, eof;
   1627 
   1628 	off = trunc_page(startoff);
   1629 	if (endoff == 0 || (flags & PGO_ALLPAGES))
   1630 		GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM);
   1631 	else
   1632 		eof = endoff;
   1633 
   1634 	while (off < eof) {
   1635 		int npages, orignpages, error, i;
   1636 		struct vm_page *pgs[maxpages], *pg;
   1637 
   1638 		npages = round_page(eof - off) >> PAGE_SHIFT;
   1639 		if (npages > maxpages)
   1640 			npages = maxpages;
   1641 
   1642 		orignpages = npages;
   1643 		KASSERT(mutex_owned(&uobj->vmobjlock));
   1644 		mutex_exit(&uobj->vmobjlock);
   1645 		error = genfs_do_getpages_xip_io(vp, off, pgs, &npages, 0,
   1646 		    VM_PROT_ALL, 0, PGO_GLOCKHELD, orignpages);
   1647 		KASSERT(error == 0);
   1648 		KASSERT(npages == orignpages);
   1649 		mutex_enter(&uobj->vmobjlock);
   1650 		for (i = 0; i < npages; i++) {
   1651 			pg = pgs[i];
   1652 			if (pg == NULL || pg == PGO_DONTCARE)
   1653 				continue;
   1654 			/*
   1655 			 * Freeing normal XIP pages; nothing to do.
   1656 			 */
   1657 			pmap_page_protect(pg, VM_PROT_NONE);
   1658 			KASSERT((pg->flags & PG_RDONLY) != 0);
   1659 			KASSERT((pg->flags & PG_CLEAN) != 0);
   1660 			KASSERT((pg->flags & PG_FAKE) == 0);
   1661 			KASSERT((pg->flags & PG_DEVICE) != 0);
   1662 			pg->flags &= ~PG_BUSY;
   1663 		}
   1664 		off += npages << PAGE_SHIFT;
   1665 	}
   1666 
   1667 	KASSERT(uobj->uo_npages == 0);
   1668 
   1669 done:
   1670 	KASSERT(mutex_owned(&uobj->vmobjlock));
   1671 	mutex_exit(&uobj->vmobjlock);
   1672 	return 0;
   1673 }
   1674 #endif
   1675 
   1676 int
   1677 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1678 {
   1679 	off_t off;
   1680 	vaddr_t kva;
   1681 	size_t len;
   1682 	int error;
   1683 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1684 
   1685 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1686 	    vp, pgs, npages, flags);
   1687 
   1688 	off = pgs[0]->offset;
   1689 	kva = uvm_pagermapin(pgs, npages,
   1690 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1691 	len = npages << PAGE_SHIFT;
   1692 
   1693 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1694 			    uvm_aio_biodone);
   1695 
   1696 	return error;
   1697 }
   1698 
   1699 int
   1700 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1701 {
   1702 	off_t off;
   1703 	vaddr_t kva;
   1704 	size_t len;
   1705 	int error;
   1706 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1707 
   1708 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1709 	    vp, pgs, npages, flags);
   1710 
   1711 	off = pgs[0]->offset;
   1712 	kva = uvm_pagermapin(pgs, npages,
   1713 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1714 	len = npages << PAGE_SHIFT;
   1715 
   1716 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1717 			    uvm_aio_biodone);
   1718 
   1719 	return error;
   1720 }
   1721 
   1722 /*
   1723  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1724  * and mapped into kernel memory.  Here we just look up the underlying
   1725  * device block addresses and call the strategy routine.
   1726  */
   1727 
   1728 static int
   1729 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1730     enum uio_rw rw, void (*iodone)(struct buf *))
   1731 {
   1732 	int s, error;
   1733 	int fs_bshift, dev_bshift;
   1734 	off_t eof, offset, startoffset;
   1735 	size_t bytes, iobytes, skipbytes;
   1736 	struct buf *mbp, *bp;
   1737 	const bool async = (flags & PGO_SYNCIO) == 0;
   1738 	const bool iowrite = rw == UIO_WRITE;
   1739 	const int brw = iowrite ? B_WRITE : B_READ;
   1740 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1741 
   1742 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1743 	    vp, kva, len, flags);
   1744 
   1745 	KASSERT(vp->v_size <= vp->v_writesize);
   1746 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1747 	if (vp->v_type != VBLK) {
   1748 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1749 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1750 	} else {
   1751 		fs_bshift = DEV_BSHIFT;
   1752 		dev_bshift = DEV_BSHIFT;
   1753 	}
   1754 	error = 0;
   1755 	startoffset = off;
   1756 	bytes = MIN(len, eof - startoffset);
   1757 	skipbytes = 0;
   1758 	KASSERT(bytes != 0);
   1759 
   1760 	if (iowrite) {
   1761 		mutex_enter(&vp->v_interlock);
   1762 		vp->v_numoutput += 2;
   1763 		mutex_exit(&vp->v_interlock);
   1764 	}
   1765 	mbp = getiobuf(vp, true);
   1766 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1767 	    vp, mbp, vp->v_numoutput, bytes);
   1768 	mbp->b_bufsize = len;
   1769 	mbp->b_data = (void *)kva;
   1770 	mbp->b_resid = mbp->b_bcount = bytes;
   1771 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1772 	if (async) {
   1773 		mbp->b_flags = brw | B_ASYNC;
   1774 		mbp->b_iodone = iodone;
   1775 	} else {
   1776 		mbp->b_flags = brw;
   1777 		mbp->b_iodone = NULL;
   1778 	}
   1779 	if (curlwp == uvm.pagedaemon_lwp)
   1780 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1781 	else if (async)
   1782 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1783 	else
   1784 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1785 
   1786 	bp = NULL;
   1787 	for (offset = startoffset;
   1788 	    bytes > 0;
   1789 	    offset += iobytes, bytes -= iobytes) {
   1790 		int run;
   1791 		daddr_t lbn, blkno;
   1792 		struct vnode *devvp;
   1793 
   1794 		/*
   1795 		 * bmap the file to find out the blkno to read from and
   1796 		 * how much we can read in one i/o.  if bmap returns an error,
   1797 		 * skip the rest of the top-level i/o.
   1798 		 */
   1799 
   1800 		lbn = offset >> fs_bshift;
   1801 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1802 		if (error) {
   1803 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1804 			    lbn,error,0,0);
   1805 			skipbytes += bytes;
   1806 			bytes = 0;
   1807 			goto loopdone;
   1808 		}
   1809 
   1810 		/*
   1811 		 * see how many pages can be read with this i/o.
   1812 		 * reduce the i/o size if necessary to avoid
   1813 		 * overwriting pages with valid data.
   1814 		 */
   1815 
   1816 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1817 		    bytes);
   1818 
   1819 		/*
   1820 		 * if this block isn't allocated, zero it instead of
   1821 		 * reading it.  unless we are going to allocate blocks,
   1822 		 * mark the pages we zeroed PG_RDONLY.
   1823 		 */
   1824 
   1825 		if (blkno == (daddr_t)-1) {
   1826 			if (!iowrite) {
   1827 				memset((char *)kva + (offset - startoffset), 0,
   1828 				    iobytes);
   1829 			}
   1830 			skipbytes += iobytes;
   1831 			continue;
   1832 		}
   1833 
   1834 		/*
   1835 		 * allocate a sub-buf for this piece of the i/o
   1836 		 * (or just use mbp if there's only 1 piece),
   1837 		 * and start it going.
   1838 		 */
   1839 
   1840 		if (offset == startoffset && iobytes == bytes) {
   1841 			bp = mbp;
   1842 		} else {
   1843 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1844 			    vp, bp, vp->v_numoutput, 0);
   1845 			bp = getiobuf(vp, true);
   1846 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1847 		}
   1848 		bp->b_lblkno = 0;
   1849 
   1850 		/* adjust physical blkno for partial blocks */
   1851 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1852 		    dev_bshift);
   1853 
   1854 		UVMHIST_LOG(ubchist,
   1855 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1856 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1857 
   1858 		VOP_STRATEGY(devvp, bp);
   1859 	}
   1860 
   1861 loopdone:
   1862 	if (skipbytes) {
   1863 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1864 	}
   1865 	nestiobuf_done(mbp, skipbytes, error);
   1866 	if (async) {
   1867 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1868 		return (0);
   1869 	}
   1870 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1871 	error = biowait(mbp);
   1872 	s = splbio();
   1873 	(*iodone)(mbp);
   1874 	splx(s);
   1875 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1876 	return (error);
   1877 }
   1878 
   1879 int
   1880 genfs_compat_getpages(void *v)
   1881 {
   1882 	struct vop_getpages_args /* {
   1883 		struct vnode *a_vp;
   1884 		voff_t a_offset;
   1885 		struct vm_page **a_m;
   1886 		int *a_count;
   1887 		int a_centeridx;
   1888 		vm_prot_t a_access_type;
   1889 		int a_advice;
   1890 		int a_flags;
   1891 	} */ *ap = v;
   1892 
   1893 	off_t origoffset;
   1894 	struct vnode *vp = ap->a_vp;
   1895 	struct uvm_object *uobj = &vp->v_uobj;
   1896 	struct vm_page *pg, **pgs;
   1897 	vaddr_t kva;
   1898 	int i, error, orignpages, npages;
   1899 	struct iovec iov;
   1900 	struct uio uio;
   1901 	kauth_cred_t cred = curlwp->l_cred;
   1902 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1903 
   1904 	error = 0;
   1905 	origoffset = ap->a_offset;
   1906 	orignpages = *ap->a_count;
   1907 	pgs = ap->a_m;
   1908 
   1909 	if (ap->a_flags & PGO_LOCKED) {
   1910 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1911 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1912 
   1913 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1914 		if (error == 0 && memwrite) {
   1915 			genfs_markdirty(vp);
   1916 		}
   1917 		return error;
   1918 	}
   1919 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1920 		mutex_exit(&uobj->vmobjlock);
   1921 		return EINVAL;
   1922 	}
   1923 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1924 		mutex_exit(&uobj->vmobjlock);
   1925 		return 0;
   1926 	}
   1927 	npages = orignpages;
   1928 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1929 	mutex_exit(&uobj->vmobjlock);
   1930 	kva = uvm_pagermapin(pgs, npages,
   1931 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1932 	for (i = 0; i < npages; i++) {
   1933 		pg = pgs[i];
   1934 		if ((pg->flags & PG_FAKE) == 0) {
   1935 			continue;
   1936 		}
   1937 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1938 		iov.iov_len = PAGE_SIZE;
   1939 		uio.uio_iov = &iov;
   1940 		uio.uio_iovcnt = 1;
   1941 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1942 		uio.uio_rw = UIO_READ;
   1943 		uio.uio_resid = PAGE_SIZE;
   1944 		UIO_SETUP_SYSSPACE(&uio);
   1945 		/* XXX vn_lock */
   1946 		error = VOP_READ(vp, &uio, 0, cred);
   1947 		if (error) {
   1948 			break;
   1949 		}
   1950 		if (uio.uio_resid) {
   1951 			memset(iov.iov_base, 0, uio.uio_resid);
   1952 		}
   1953 	}
   1954 	uvm_pagermapout(kva, npages);
   1955 	mutex_enter(&uobj->vmobjlock);
   1956 	mutex_enter(&uvm_pageqlock);
   1957 	for (i = 0; i < npages; i++) {
   1958 		pg = pgs[i];
   1959 		if (error && (pg->flags & PG_FAKE) != 0) {
   1960 			pg->flags |= PG_RELEASED;
   1961 		} else {
   1962 			pmap_clear_modify(pg);
   1963 			uvm_pageactivate(pg);
   1964 		}
   1965 	}
   1966 	if (error) {
   1967 		uvm_page_unbusy(pgs, npages);
   1968 	}
   1969 	mutex_exit(&uvm_pageqlock);
   1970 	if (error == 0 && memwrite) {
   1971 		genfs_markdirty(vp);
   1972 	}
   1973 	mutex_exit(&uobj->vmobjlock);
   1974 	return error;
   1975 }
   1976 
   1977 int
   1978 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1979     int flags)
   1980 {
   1981 	off_t offset;
   1982 	struct iovec iov;
   1983 	struct uio uio;
   1984 	kauth_cred_t cred = curlwp->l_cred;
   1985 	struct buf *bp;
   1986 	vaddr_t kva;
   1987 	int error;
   1988 
   1989 	offset = pgs[0]->offset;
   1990 	kva = uvm_pagermapin(pgs, npages,
   1991 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1992 
   1993 	iov.iov_base = (void *)kva;
   1994 	iov.iov_len = npages << PAGE_SHIFT;
   1995 	uio.uio_iov = &iov;
   1996 	uio.uio_iovcnt = 1;
   1997 	uio.uio_offset = offset;
   1998 	uio.uio_rw = UIO_WRITE;
   1999 	uio.uio_resid = npages << PAGE_SHIFT;
   2000 	UIO_SETUP_SYSSPACE(&uio);
   2001 	/* XXX vn_lock */
   2002 	error = VOP_WRITE(vp, &uio, 0, cred);
   2003 
   2004 	mutex_enter(&vp->v_interlock);
   2005 	vp->v_numoutput++;
   2006 	mutex_exit(&vp->v_interlock);
   2007 
   2008 	bp = getiobuf(vp, true);
   2009 	bp->b_cflags = BC_BUSY | BC_AGE;
   2010 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   2011 	bp->b_data = (char *)kva;
   2012 	bp->b_bcount = npages << PAGE_SHIFT;
   2013 	bp->b_bufsize = npages << PAGE_SHIFT;
   2014 	bp->b_resid = 0;
   2015 	bp->b_error = error;
   2016 	uvm_aio_aiodone(bp);
   2017 	return (error);
   2018 }
   2019 
   2020 /*
   2021  * Process a uio using direct I/O.  If we reach a part of the request
   2022  * which cannot be processed in this fashion for some reason, just return.
   2023  * The caller must handle some additional part of the request using
   2024  * buffered I/O before trying direct I/O again.
   2025  */
   2026 
   2027 void
   2028 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   2029 {
   2030 	struct vmspace *vs;
   2031 	struct iovec *iov;
   2032 	vaddr_t va;
   2033 	size_t len;
   2034 	const int mask = DEV_BSIZE - 1;
   2035 	int error;
   2036 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   2037 	    (ioflag & IO_JOURNALLOCKED) == 0);
   2038 
   2039 	/*
   2040 	 * We only support direct I/O to user space for now.
   2041 	 */
   2042 
   2043 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   2044 		return;
   2045 	}
   2046 
   2047 	/*
   2048 	 * If the vnode is mapped, we would need to get the getpages lock
   2049 	 * to stabilize the bmap, but then we would get into trouble whil e
   2050 	 * locking the pages if the pages belong to this same vnode (or a
   2051 	 * multi-vnode cascade to the same effect).  Just fall back to
   2052 	 * buffered I/O if the vnode is mapped to avoid this mess.
   2053 	 */
   2054 
   2055 	if (vp->v_vflag & VV_MAPPED) {
   2056 		return;
   2057 	}
   2058 
   2059 	if (need_wapbl) {
   2060 		error = WAPBL_BEGIN(vp->v_mount);
   2061 		if (error)
   2062 			return;
   2063 	}
   2064 
   2065 	/*
   2066 	 * Do as much of the uio as possible with direct I/O.
   2067 	 */
   2068 
   2069 	vs = uio->uio_vmspace;
   2070 	while (uio->uio_resid) {
   2071 		iov = uio->uio_iov;
   2072 		if (iov->iov_len == 0) {
   2073 			uio->uio_iov++;
   2074 			uio->uio_iovcnt--;
   2075 			continue;
   2076 		}
   2077 		va = (vaddr_t)iov->iov_base;
   2078 		len = MIN(iov->iov_len, genfs_maxdio);
   2079 		len &= ~mask;
   2080 
   2081 		/*
   2082 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   2083 		 * the current EOF, then fall back to buffered I/O.
   2084 		 */
   2085 
   2086 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   2087 			break;
   2088 		}
   2089 
   2090 		/*
   2091 		 * Check alignment.  The file offset must be at least
   2092 		 * sector-aligned.  The exact constraint on memory alignment
   2093 		 * is very hardware-dependent, but requiring sector-aligned
   2094 		 * addresses there too is safe.
   2095 		 */
   2096 
   2097 		if (uio->uio_offset & mask || va & mask) {
   2098 			break;
   2099 		}
   2100 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   2101 					  uio->uio_rw);
   2102 		if (error) {
   2103 			break;
   2104 		}
   2105 		iov->iov_base = (char *)iov->iov_base + len;
   2106 		iov->iov_len -= len;
   2107 		uio->uio_offset += len;
   2108 		uio->uio_resid -= len;
   2109 	}
   2110 
   2111 	if (need_wapbl)
   2112 		WAPBL_END(vp->v_mount);
   2113 }
   2114 
   2115 /*
   2116  * Iodone routine for direct I/O.  We don't do much here since the request is
   2117  * always synchronous, so the caller will do most of the work after biowait().
   2118  */
   2119 
   2120 static void
   2121 genfs_dio_iodone(struct buf *bp)
   2122 {
   2123 
   2124 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   2125 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   2126 		mutex_enter(bp->b_objlock);
   2127 		vwakeup(bp);
   2128 		mutex_exit(bp->b_objlock);
   2129 	}
   2130 	putiobuf(bp);
   2131 }
   2132 
   2133 /*
   2134  * Process one chunk of a direct I/O request.
   2135  */
   2136 
   2137 static int
   2138 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   2139     off_t off, enum uio_rw rw)
   2140 {
   2141 	struct vm_map *map;
   2142 	struct pmap *upm, *kpm;
   2143 	size_t klen = round_page(uva + len) - trunc_page(uva);
   2144 	off_t spoff, epoff;
   2145 	vaddr_t kva, puva;
   2146 	paddr_t pa;
   2147 	vm_prot_t prot;
   2148 	int error, rv, poff, koff;
   2149 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   2150 		(rw == UIO_WRITE ? PGO_FREE : 0);
   2151 
   2152 	/*
   2153 	 * For writes, verify that this range of the file already has fully
   2154 	 * allocated backing store.  If there are any holes, just punt and
   2155 	 * make the caller take the buffered write path.
   2156 	 */
   2157 
   2158 	if (rw == UIO_WRITE) {
   2159 		daddr_t lbn, elbn, blkno;
   2160 		int bsize, bshift, run;
   2161 
   2162 		bshift = vp->v_mount->mnt_fs_bshift;
   2163 		bsize = 1 << bshift;
   2164 		lbn = off >> bshift;
   2165 		elbn = (off + len + bsize - 1) >> bshift;
   2166 		while (lbn < elbn) {
   2167 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   2168 			if (error) {
   2169 				return error;
   2170 			}
   2171 			if (blkno == (daddr_t)-1) {
   2172 				return ENOSPC;
   2173 			}
   2174 			lbn += 1 + run;
   2175 		}
   2176 	}
   2177 
   2178 	/*
   2179 	 * Flush any cached pages for parts of the file that we're about to
   2180 	 * access.  If we're writing, invalidate pages as well.
   2181 	 */
   2182 
   2183 	spoff = trunc_page(off);
   2184 	epoff = round_page(off + len);
   2185 	mutex_enter(&vp->v_interlock);
   2186 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   2187 	if (error) {
   2188 		return error;
   2189 	}
   2190 
   2191 	/*
   2192 	 * Wire the user pages and remap them into kernel memory.
   2193 	 */
   2194 
   2195 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   2196 	error = uvm_vslock(vs, (void *)uva, len, prot);
   2197 	if (error) {
   2198 		return error;
   2199 	}
   2200 
   2201 	map = &vs->vm_map;
   2202 	upm = vm_map_pmap(map);
   2203 	kpm = vm_map_pmap(kernel_map);
   2204 	kva = uvm_km_alloc(kernel_map, klen, 0,
   2205 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   2206 	puva = trunc_page(uva);
   2207 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   2208 		rv = pmap_extract(upm, puva + poff, &pa);
   2209 		KASSERT(rv);
   2210 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   2211 	}
   2212 	pmap_update(kpm);
   2213 
   2214 	/*
   2215 	 * Do the I/O.
   2216 	 */
   2217 
   2218 	koff = uva - trunc_page(uva);
   2219 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   2220 			    genfs_dio_iodone);
   2221 
   2222 	/*
   2223 	 * Tear down the kernel mapping.
   2224 	 */
   2225 
   2226 	pmap_remove(kpm, kva, kva + klen);
   2227 	pmap_update(kpm);
   2228 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   2229 
   2230 	/*
   2231 	 * Unwire the user pages.
   2232 	 */
   2233 
   2234 	uvm_vsunlock(vs, (void *)uva, len);
   2235 	return error;
   2236 }
   2237 
   2238