Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.72
      1 /*	$NetBSD: genfs_io.c,v 1.72 2018/05/28 21:04:38 chs Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.72 2018/05/28 21:04:38 chs Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/kernel.h>
     40 #include <sys/mount.h>
     41 #include <sys/vnode.h>
     42 #include <sys/kmem.h>
     43 #include <sys/kauth.h>
     44 #include <sys/fstrans.h>
     45 #include <sys/buf.h>
     46 
     47 #include <miscfs/genfs/genfs.h>
     48 #include <miscfs/genfs/genfs_node.h>
     49 #include <miscfs/specfs/specdev.h>
     50 
     51 #include <uvm/uvm.h>
     52 #include <uvm/uvm_pager.h>
     53 
     54 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     55     off_t, enum uio_rw);
     56 static void genfs_dio_iodone(struct buf *);
     57 
     58 static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
     59     off_t, bool, bool, bool, bool);
     60 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     61     void (*)(struct buf *));
     62 static void genfs_rel_pages(struct vm_page **, unsigned int);
     63 static void genfs_markdirty(struct vnode *);
     64 
     65 int genfs_maxdio = MAXPHYS;
     66 
     67 static void
     68 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     69 {
     70 	unsigned int i;
     71 
     72 	for (i = 0; i < npages; i++) {
     73 		struct vm_page *pg = pgs[i];
     74 
     75 		if (pg == NULL || pg == PGO_DONTCARE)
     76 			continue;
     77 		KASSERT(uvm_page_locked_p(pg));
     78 		if (pg->flags & PG_FAKE) {
     79 			pg->flags |= PG_RELEASED;
     80 		}
     81 	}
     82 	mutex_enter(&uvm_pageqlock);
     83 	uvm_page_unbusy(pgs, npages);
     84 	mutex_exit(&uvm_pageqlock);
     85 }
     86 
     87 static void
     88 genfs_markdirty(struct vnode *vp)
     89 {
     90 	struct genfs_node * const gp = VTOG(vp);
     91 
     92 	KASSERT(mutex_owned(vp->v_interlock));
     93 	gp->g_dirtygen++;
     94 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     95 		vn_syncer_add_to_worklist(vp, filedelay);
     96 	}
     97 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
     98 		vp->v_iflag |= VI_WRMAPDIRTY;
     99 	}
    100 }
    101 
    102 /*
    103  * generic VM getpages routine.
    104  * Return PG_BUSY pages for the given range,
    105  * reading from backing store if necessary.
    106  */
    107 
    108 int
    109 genfs_getpages(void *v)
    110 {
    111 	struct vop_getpages_args /* {
    112 		struct vnode *a_vp;
    113 		voff_t a_offset;
    114 		struct vm_page **a_m;
    115 		int *a_count;
    116 		int a_centeridx;
    117 		vm_prot_t a_access_type;
    118 		int a_advice;
    119 		int a_flags;
    120 	} */ * const ap = v;
    121 
    122 	off_t diskeof, memeof;
    123 	int i, error, npages;
    124 	const int flags = ap->a_flags;
    125 	struct vnode * const vp = ap->a_vp;
    126 	struct uvm_object * const uobj = &vp->v_uobj;
    127 	const bool async = (flags & PGO_SYNCIO) == 0;
    128 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    129 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    130 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    131 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    132 	bool holds_wapbl = false;
    133 	struct mount *trans_mount = NULL;
    134 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    135 
    136 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd",
    137 	    (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    138 
    139 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    140 	    vp->v_type == VLNK || vp->v_type == VBLK);
    141 
    142 	error = vdead_check(vp, VDEAD_NOWAIT);
    143 	if (error) {
    144 		if ((flags & PGO_LOCKED) == 0)
    145 			mutex_exit(uobj->vmobjlock);
    146 		return error;
    147 	}
    148 
    149 startover:
    150 	error = 0;
    151 	const voff_t origvsize = vp->v_size;
    152 	const off_t origoffset = ap->a_offset;
    153 	const int orignpages = *ap->a_count;
    154 
    155 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    156 	if (flags & PGO_PASTEOF) {
    157 		off_t newsize;
    158 #if defined(DIAGNOSTIC)
    159 		off_t writeeof;
    160 #endif /* defined(DIAGNOSTIC) */
    161 
    162 		newsize = MAX(origvsize,
    163 		    origoffset + (orignpages << PAGE_SHIFT));
    164 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    165 #if defined(DIAGNOSTIC)
    166 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    167 		if (newsize > round_page(writeeof)) {
    168 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    169 			    __func__, newsize, round_page(writeeof));
    170 		}
    171 #endif /* defined(DIAGNOSTIC) */
    172 	} else {
    173 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    174 	}
    175 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    176 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    177 	KASSERT(orignpages > 0);
    178 
    179 	/*
    180 	 * Bounds-check the request.
    181 	 */
    182 
    183 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    184 		if ((flags & PGO_LOCKED) == 0) {
    185 			mutex_exit(uobj->vmobjlock);
    186 		}
    187 		UVMHIST_LOG(ubchist, "off 0x%jx count %jd goes past EOF 0x%jx",
    188 		    origoffset, *ap->a_count, memeof,0);
    189 		error = EINVAL;
    190 		goto out_err;
    191 	}
    192 
    193 	/* uobj is locked */
    194 
    195 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    196 	    (vp->v_type != VBLK ||
    197 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    198 		int updflags = 0;
    199 
    200 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    201 			updflags = GOP_UPDATE_ACCESSED;
    202 		}
    203 		if (memwrite) {
    204 			updflags |= GOP_UPDATE_MODIFIED;
    205 		}
    206 		if (updflags != 0) {
    207 			GOP_MARKUPDATE(vp, updflags);
    208 		}
    209 	}
    210 
    211 	/*
    212 	 * For PGO_LOCKED requests, just return whatever's in memory.
    213 	 */
    214 
    215 	if (flags & PGO_LOCKED) {
    216 		int nfound;
    217 		struct vm_page *pg;
    218 
    219 		KASSERT(!glocked);
    220 		npages = *ap->a_count;
    221 #if defined(DEBUG)
    222 		for (i = 0; i < npages; i++) {
    223 			pg = ap->a_m[i];
    224 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    225 		}
    226 #endif /* defined(DEBUG) */
    227 		nfound = uvn_findpages(uobj, origoffset, &npages,
    228 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    229 		KASSERT(npages == *ap->a_count);
    230 		if (nfound == 0) {
    231 			error = EBUSY;
    232 			goto out_err;
    233 		}
    234 		if (!genfs_node_rdtrylock(vp)) {
    235 			genfs_rel_pages(ap->a_m, npages);
    236 
    237 			/*
    238 			 * restore the array.
    239 			 */
    240 
    241 			for (i = 0; i < npages; i++) {
    242 				pg = ap->a_m[i];
    243 
    244 				if (pg != NULL && pg != PGO_DONTCARE) {
    245 					ap->a_m[i] = NULL;
    246 				}
    247 				KASSERT(ap->a_m[i] == NULL ||
    248 				    ap->a_m[i] == PGO_DONTCARE);
    249 			}
    250 		} else {
    251 			genfs_node_unlock(vp);
    252 		}
    253 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    254 		if (error == 0 && memwrite) {
    255 			genfs_markdirty(vp);
    256 		}
    257 		goto out_err;
    258 	}
    259 	mutex_exit(uobj->vmobjlock);
    260 
    261 	/*
    262 	 * find the requested pages and make some simple checks.
    263 	 * leave space in the page array for a whole block.
    264 	 */
    265 
    266 	const int fs_bshift = (vp->v_type != VBLK) ?
    267 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    268 	const int fs_bsize = 1 << fs_bshift;
    269 #define	blk_mask	(fs_bsize - 1)
    270 #define	trunc_blk(x)	((x) & ~blk_mask)
    271 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    272 
    273 	const int orignmempages = MIN(orignpages,
    274 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    275 	npages = orignmempages;
    276 	const off_t startoffset = trunc_blk(origoffset);
    277 	const off_t endoffset = MIN(
    278 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    279 	    round_page(memeof));
    280 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    281 
    282 	const int pgs_size = sizeof(struct vm_page *) *
    283 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    284 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    285 
    286 	if (pgs_size > sizeof(pgs_onstack)) {
    287 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    288 		if (pgs == NULL) {
    289 			pgs = pgs_onstack;
    290 			error = ENOMEM;
    291 			goto out_err;
    292 		}
    293 	} else {
    294 		pgs = pgs_onstack;
    295 		(void)memset(pgs, 0, pgs_size);
    296 	}
    297 
    298 	UVMHIST_LOG(ubchist, "ridx %jd npages %jd startoff %jd endoff %jd",
    299 	    ridx, npages, startoffset, endoffset);
    300 
    301 	if (trans_mount == NULL) {
    302 		trans_mount = vp->v_mount;
    303 		fstrans_start(trans_mount);
    304 		/*
    305 		 * check if this vnode is still valid.
    306 		 */
    307 		mutex_enter(vp->v_interlock);
    308 		error = vdead_check(vp, 0);
    309 		mutex_exit(vp->v_interlock);
    310 		if (error)
    311 			goto out_err_free;
    312 		/*
    313 		 * XXX: This assumes that we come here only via
    314 		 * the mmio path
    315 		 */
    316 		if (blockalloc && vp->v_mount->mnt_wapbl) {
    317 			error = WAPBL_BEGIN(trans_mount);
    318 			if (error)
    319 				goto out_err_free;
    320 			holds_wapbl = true;
    321 		}
    322 	}
    323 
    324 	/*
    325 	 * hold g_glock to prevent a race with truncate.
    326 	 *
    327 	 * check if our idea of v_size is still valid.
    328 	 */
    329 
    330 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    331 	if (!glocked) {
    332 		if (blockalloc) {
    333 			genfs_node_wrlock(vp);
    334 		} else {
    335 			genfs_node_rdlock(vp);
    336 		}
    337 	}
    338 	mutex_enter(uobj->vmobjlock);
    339 	if (vp->v_size < origvsize) {
    340 		if (!glocked) {
    341 			genfs_node_unlock(vp);
    342 		}
    343 		if (pgs != pgs_onstack)
    344 			kmem_free(pgs, pgs_size);
    345 		goto startover;
    346 	}
    347 
    348 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    349 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    350 		if (!glocked) {
    351 			genfs_node_unlock(vp);
    352 		}
    353 		KASSERT(async != 0);
    354 		genfs_rel_pages(&pgs[ridx], orignmempages);
    355 		mutex_exit(uobj->vmobjlock);
    356 		error = EBUSY;
    357 		goto out_err_free;
    358 	}
    359 
    360 	/*
    361 	 * if the pages are already resident, just return them.
    362 	 */
    363 
    364 	for (i = 0; i < npages; i++) {
    365 		struct vm_page *pg = pgs[ridx + i];
    366 
    367 		if ((pg->flags & PG_FAKE) ||
    368 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    369 			break;
    370 		}
    371 	}
    372 	if (i == npages) {
    373 		if (!glocked) {
    374 			genfs_node_unlock(vp);
    375 		}
    376 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    377 		npages += ridx;
    378 		goto out;
    379 	}
    380 
    381 	/*
    382 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    383 	 */
    384 
    385 	if (overwrite) {
    386 		if (!glocked) {
    387 			genfs_node_unlock(vp);
    388 		}
    389 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    390 
    391 		for (i = 0; i < npages; i++) {
    392 			struct vm_page *pg = pgs[ridx + i];
    393 
    394 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    395 		}
    396 		npages += ridx;
    397 		goto out;
    398 	}
    399 
    400 	/*
    401 	 * the page wasn't resident and we're not overwriting,
    402 	 * so we're going to have to do some i/o.
    403 	 * find any additional pages needed to cover the expanded range.
    404 	 */
    405 
    406 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    407 	if (startoffset != origoffset || npages != orignmempages) {
    408 		int npgs;
    409 
    410 		/*
    411 		 * we need to avoid deadlocks caused by locking
    412 		 * additional pages at lower offsets than pages we
    413 		 * already have locked.  unlock them all and start over.
    414 		 */
    415 
    416 		genfs_rel_pages(&pgs[ridx], orignmempages);
    417 		memset(pgs, 0, pgs_size);
    418 
    419 		UVMHIST_LOG(ubchist, "reset npages start 0x%jx end 0x%jx",
    420 		    startoffset, endoffset, 0,0);
    421 		npgs = npages;
    422 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    423 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    424 			if (!glocked) {
    425 				genfs_node_unlock(vp);
    426 			}
    427 			KASSERT(async != 0);
    428 			genfs_rel_pages(pgs, npages);
    429 			mutex_exit(uobj->vmobjlock);
    430 			error = EBUSY;
    431 			goto out_err_free;
    432 		}
    433 	}
    434 
    435 	mutex_exit(uobj->vmobjlock);
    436 	error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
    437 	    async, memwrite, blockalloc, glocked);
    438 	if (!glocked) {
    439 		genfs_node_unlock(vp);
    440 	}
    441 	if (error == 0 && async)
    442 		goto out_err_free;
    443 	mutex_enter(uobj->vmobjlock);
    444 
    445 	/*
    446 	 * we're almost done!  release the pages...
    447 	 * for errors, we free the pages.
    448 	 * otherwise we activate them and mark them as valid and clean.
    449 	 * also, unbusy pages that were not actually requested.
    450 	 */
    451 
    452 	if (error) {
    453 		genfs_rel_pages(pgs, npages);
    454 		mutex_exit(uobj->vmobjlock);
    455 		UVMHIST_LOG(ubchist, "returning error %jd", error,0,0,0);
    456 		goto out_err_free;
    457 	}
    458 
    459 out:
    460 	UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0);
    461 	error = 0;
    462 	mutex_enter(&uvm_pageqlock);
    463 	for (i = 0; i < npages; i++) {
    464 		struct vm_page *pg = pgs[i];
    465 		if (pg == NULL) {
    466 			continue;
    467 		}
    468 		UVMHIST_LOG(ubchist, "examining pg %#jx flags 0x%jx",
    469 		    (uintptr_t)pg, pg->flags, 0,0);
    470 		if (pg->flags & PG_FAKE && !overwrite) {
    471 			pg->flags &= ~(PG_FAKE);
    472 			pmap_clear_modify(pgs[i]);
    473 		}
    474 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    475 		if (i < ridx || i >= ridx + orignmempages || async) {
    476 			UVMHIST_LOG(ubchist, "unbusy pg %#jx offset 0x%jx",
    477 			    (uintptr_t)pg, pg->offset,0,0);
    478 			if (pg->flags & PG_WANTED) {
    479 				wakeup(pg);
    480 			}
    481 			if (pg->flags & PG_FAKE) {
    482 				KASSERT(overwrite);
    483 				uvm_pagezero(pg);
    484 			}
    485 			if (pg->flags & PG_RELEASED) {
    486 				uvm_pagefree(pg);
    487 				continue;
    488 			}
    489 			uvm_pageenqueue(pg);
    490 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    491 			UVM_PAGE_OWN(pg, NULL);
    492 		}
    493 	}
    494 	mutex_exit(&uvm_pageqlock);
    495 	if (memwrite) {
    496 		genfs_markdirty(vp);
    497 	}
    498 	mutex_exit(uobj->vmobjlock);
    499 	if (ap->a_m != NULL) {
    500 		memcpy(ap->a_m, &pgs[ridx],
    501 		    orignmempages * sizeof(struct vm_page *));
    502 	}
    503 
    504 out_err_free:
    505 	if (pgs != NULL && pgs != pgs_onstack)
    506 		kmem_free(pgs, pgs_size);
    507 out_err:
    508 	if (trans_mount != NULL) {
    509 		if (holds_wapbl)
    510 			WAPBL_END(trans_mount);
    511 		fstrans_done(trans_mount);
    512 	}
    513 	return error;
    514 }
    515 
    516 /*
    517  * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY.
    518  *
    519  * "glocked" (which is currently not actually used) tells us not whether
    520  * the genfs_node is locked on entry (it always is) but whether it was
    521  * locked on entry to genfs_getpages.
    522  */
    523 static int
    524 genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
    525     off_t startoffset, off_t diskeof,
    526     bool async, bool memwrite, bool blockalloc, bool glocked)
    527 {
    528 	struct uvm_object * const uobj = &vp->v_uobj;
    529 	const int fs_bshift = (vp->v_type != VBLK) ?
    530 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    531 	const int dev_bshift = (vp->v_type != VBLK) ?
    532 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    533 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    534 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    535 	vaddr_t kva;
    536 	struct buf *bp, *mbp;
    537 	bool sawhole = false;
    538 	int i;
    539 	int error = 0;
    540 
    541 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    542 
    543 	/*
    544 	 * read the desired page(s).
    545 	 */
    546 
    547 	totalbytes = npages << PAGE_SHIFT;
    548 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    549 	tailbytes = totalbytes - bytes;
    550 	skipbytes = 0;
    551 
    552 	kva = uvm_pagermapin(pgs, npages,
    553 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    554 	if (kva == 0)
    555 		return EBUSY;
    556 
    557 	mbp = getiobuf(vp, true);
    558 	mbp->b_bufsize = totalbytes;
    559 	mbp->b_data = (void *)kva;
    560 	mbp->b_resid = mbp->b_bcount = bytes;
    561 	mbp->b_cflags = BC_BUSY;
    562 	if (async) {
    563 		mbp->b_flags = B_READ | B_ASYNC;
    564 		mbp->b_iodone = uvm_aio_biodone;
    565 	} else {
    566 		mbp->b_flags = B_READ;
    567 		mbp->b_iodone = NULL;
    568 	}
    569 	if (async)
    570 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    571 	else
    572 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    573 
    574 	/*
    575 	 * if EOF is in the middle of the range, zero the part past EOF.
    576 	 * skip over pages which are not PG_FAKE since in that case they have
    577 	 * valid data that we need to preserve.
    578 	 */
    579 
    580 	tailstart = bytes;
    581 	while (tailbytes > 0) {
    582 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    583 
    584 		KASSERT(len <= tailbytes);
    585 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    586 			memset((void *)(kva + tailstart), 0, len);
    587 			UVMHIST_LOG(ubchist, "tailbytes %#jx 0x%jx 0x%jx",
    588 			    (uintptr_t)kva, tailstart, len, 0);
    589 		}
    590 		tailstart += len;
    591 		tailbytes -= len;
    592 	}
    593 
    594 	/*
    595 	 * now loop over the pages, reading as needed.
    596 	 */
    597 
    598 	bp = NULL;
    599 	off_t offset;
    600 	for (offset = startoffset;
    601 	    bytes > 0;
    602 	    offset += iobytes, bytes -= iobytes) {
    603 		int run;
    604 		daddr_t lbn, blkno;
    605 		int pidx;
    606 		struct vnode *devvp;
    607 
    608 		/*
    609 		 * skip pages which don't need to be read.
    610 		 */
    611 
    612 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    613 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    614 			size_t b;
    615 
    616 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    617 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    618 				sawhole = true;
    619 			}
    620 			b = MIN(PAGE_SIZE, bytes);
    621 			offset += b;
    622 			bytes -= b;
    623 			skipbytes += b;
    624 			pidx++;
    625 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%jx",
    626 			    offset, 0,0,0);
    627 			if (bytes == 0) {
    628 				goto loopdone;
    629 			}
    630 		}
    631 
    632 		/*
    633 		 * bmap the file to find out the blkno to read from and
    634 		 * how much we can read in one i/o.  if bmap returns an error,
    635 		 * skip the rest of the top-level i/o.
    636 		 */
    637 
    638 		lbn = offset >> fs_bshift;
    639 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    640 		if (error) {
    641 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
    642 			    lbn,error,0,0);
    643 			skipbytes += bytes;
    644 			bytes = 0;
    645 			goto loopdone;
    646 		}
    647 
    648 		/*
    649 		 * see how many pages can be read with this i/o.
    650 		 * reduce the i/o size if necessary to avoid
    651 		 * overwriting pages with valid data.
    652 		 */
    653 
    654 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    655 		    bytes);
    656 		if (offset + iobytes > round_page(offset)) {
    657 			int pcount;
    658 
    659 			pcount = 1;
    660 			while (pidx + pcount < npages &&
    661 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    662 				pcount++;
    663 			}
    664 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    665 			    (offset - trunc_page(offset)));
    666 		}
    667 
    668 		/*
    669 		 * if this block isn't allocated, zero it instead of
    670 		 * reading it.  unless we are going to allocate blocks,
    671 		 * mark the pages we zeroed PG_RDONLY.
    672 		 */
    673 
    674 		if (blkno == (daddr_t)-1) {
    675 			int holepages = (round_page(offset + iobytes) -
    676 			    trunc_page(offset)) >> PAGE_SHIFT;
    677 			UVMHIST_LOG(ubchist, "lbn 0x%jx -> HOLE", lbn,0,0,0);
    678 
    679 			sawhole = true;
    680 			memset((char *)kva + (offset - startoffset), 0,
    681 			    iobytes);
    682 			skipbytes += iobytes;
    683 
    684 			mutex_enter(uobj->vmobjlock);
    685 			for (i = 0; i < holepages; i++) {
    686 				if (memwrite) {
    687 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    688 				}
    689 				if (!blockalloc) {
    690 					pgs[pidx + i]->flags |= PG_RDONLY;
    691 				}
    692 			}
    693 			mutex_exit(uobj->vmobjlock);
    694 			continue;
    695 		}
    696 
    697 		/*
    698 		 * allocate a sub-buf for this piece of the i/o
    699 		 * (or just use mbp if there's only 1 piece),
    700 		 * and start it going.
    701 		 */
    702 
    703 		if (offset == startoffset && iobytes == bytes) {
    704 			bp = mbp;
    705 		} else {
    706 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
    707 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
    708 			bp = getiobuf(vp, true);
    709 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    710 		}
    711 		bp->b_lblkno = 0;
    712 
    713 		/* adjust physical blkno for partial blocks */
    714 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    715 		    dev_bshift);
    716 
    717 		UVMHIST_LOG(ubchist,
    718 		    "bp %#jx offset 0x%x bcount 0x%x blkno 0x%x",
    719 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
    720 
    721 		VOP_STRATEGY(devvp, bp);
    722 	}
    723 
    724 loopdone:
    725 	nestiobuf_done(mbp, skipbytes, error);
    726 	if (async) {
    727 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    728 		return 0;
    729 	}
    730 	if (bp != NULL) {
    731 		error = biowait(mbp);
    732 	}
    733 
    734 	/* Remove the mapping (make KVA available as soon as possible) */
    735 	uvm_pagermapout(kva, npages);
    736 
    737 	/*
    738 	 * if this we encountered a hole then we have to do a little more work.
    739 	 * for read faults, we marked the page PG_RDONLY so that future
    740 	 * write accesses to the page will fault again.
    741 	 * for write faults, we must make sure that the backing store for
    742 	 * the page is completely allocated while the pages are locked.
    743 	 */
    744 
    745 	if (!error && sawhole && blockalloc) {
    746 		error = GOP_ALLOC(vp, startoffset,
    747 		    npages << PAGE_SHIFT, 0, cred);
    748 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%jx/0x%jx -> %jd",
    749 		    startoffset, npages << PAGE_SHIFT, error,0);
    750 		if (!error) {
    751 			mutex_enter(uobj->vmobjlock);
    752 			for (i = 0; i < npages; i++) {
    753 				struct vm_page *pg = pgs[i];
    754 
    755 				if (pg == NULL) {
    756 					continue;
    757 				}
    758 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    759 				UVMHIST_LOG(ubchist, "mark dirty pg %#jx",
    760 				    (uintptr_t)pg, 0, 0, 0);
    761 			}
    762 			mutex_exit(uobj->vmobjlock);
    763 		}
    764 	}
    765 
    766 	putiobuf(mbp);
    767 	return error;
    768 }
    769 
    770 /*
    771  * generic VM putpages routine.
    772  * Write the given range of pages to backing store.
    773  *
    774  * => "offhi == 0" means flush all pages at or after "offlo".
    775  * => object should be locked by caller.  we return with the
    776  *      object unlocked.
    777  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    778  *	thus, a caller might want to unlock higher level resources
    779  *	(e.g. vm_map) before calling flush.
    780  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    781  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    782  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    783  *	that new pages are inserted on the tail end of the list.   thus,
    784  *	we can make a complete pass through the object in one go by starting
    785  *	at the head and working towards the tail (new pages are put in
    786  *	front of us).
    787  * => NOTE: we are allowed to lock the page queues, so the caller
    788  *	must not be holding the page queue lock.
    789  *
    790  * note on "cleaning" object and PG_BUSY pages:
    791  *	this routine is holding the lock on the object.   the only time
    792  *	that it can run into a PG_BUSY page that it does not own is if
    793  *	some other process has started I/O on the page (e.g. either
    794  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    795  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    796  *	had a chance to modify it yet.    if the PG_BUSY page is being
    797  *	paged out then it means that someone else has already started
    798  *	cleaning the page for us (how nice!).    in this case, if we
    799  *	have syncio specified, then after we make our pass through the
    800  *	object we need to wait for the other PG_BUSY pages to clear
    801  *	off (i.e. we need to do an iosync).   also note that once a
    802  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    803  *
    804  * note on page traversal:
    805  *	we can traverse the pages in an object either by going down the
    806  *	linked list in "uobj->memq", or we can go over the address range
    807  *	by page doing hash table lookups for each address.    depending
    808  *	on how many pages are in the object it may be cheaper to do one
    809  *	or the other.   we set "by_list" to true if we are using memq.
    810  *	if the cost of a hash lookup was equal to the cost of the list
    811  *	traversal we could compare the number of pages in the start->stop
    812  *	range to the total number of pages in the object.   however, it
    813  *	seems that a hash table lookup is more expensive than the linked
    814  *	list traversal, so we multiply the number of pages in the
    815  *	range by an estimate of the relatively higher cost of the hash lookup.
    816  */
    817 
    818 int
    819 genfs_putpages(void *v)
    820 {
    821 	struct vop_putpages_args /* {
    822 		struct vnode *a_vp;
    823 		voff_t a_offlo;
    824 		voff_t a_offhi;
    825 		int a_flags;
    826 	} */ * const ap = v;
    827 
    828 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    829 	    ap->a_flags, NULL);
    830 }
    831 
    832 int
    833 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    834     int origflags, struct vm_page **busypg)
    835 {
    836 	struct uvm_object * const uobj = &vp->v_uobj;
    837 	kmutex_t * const slock = uobj->vmobjlock;
    838 	off_t off;
    839 	int i, error, npages, nback;
    840 	int freeflag;
    841 	/*
    842 	 * This array is larger than it should so that it's size is constant.
    843 	 * The right size is MAXPAGES.
    844 	 */
    845 	struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
    846 #define MAXPAGES (MAXPHYS / PAGE_SIZE)
    847 	struct vm_page *pg, *nextpg, *tpg, curmp, endmp;
    848 	bool wasclean, by_list, needs_clean, yld;
    849 	bool async = (origflags & PGO_SYNCIO) == 0;
    850 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    851 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    852 	struct genfs_node * const gp = VTOG(vp);
    853 	struct mount *trans_mp;
    854 	int flags;
    855 	int dirtygen;
    856 	bool modified;
    857 	bool holds_wapbl;
    858 	bool cleanall;
    859 	bool onworklst;
    860 
    861 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    862 
    863 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    864 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    865 	KASSERT(startoff < endoff || endoff == 0);
    866 
    867 	UVMHIST_LOG(ubchist, "vp %#jx pages %jd off 0x%jx len 0x%jx",
    868 	    (uintptr_t)vp, uobj->uo_npages, startoff, endoff - startoff);
    869 
    870 	trans_mp = NULL;
    871 	holds_wapbl = false;
    872 
    873 retry:
    874 	modified = false;
    875 	flags = origflags;
    876 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    877 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    878 	if (uobj->uo_npages == 0) {
    879 		if (vp->v_iflag & VI_ONWORKLST) {
    880 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    881 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    882 				vn_syncer_remove_from_worklist(vp);
    883 		}
    884 		if (trans_mp) {
    885 			if (holds_wapbl)
    886 				WAPBL_END(trans_mp);
    887 			fstrans_done(trans_mp);
    888 		}
    889 		mutex_exit(slock);
    890 		return (0);
    891 	}
    892 
    893 	/*
    894 	 * the vnode has pages, set up to process the request.
    895 	 */
    896 
    897 	if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) {
    898 		if (pagedaemon) {
    899 			/* Pagedaemon must not sleep here. */
    900 			trans_mp = vp->v_mount;
    901 			error = fstrans_start_nowait(trans_mp);
    902 			if (error) {
    903 				mutex_exit(slock);
    904 				return error;
    905 			}
    906 		} else {
    907 			/*
    908 			 * Cannot use vdeadcheck() here as this operation
    909 			 * usually gets used from VOP_RECLAIM().  Test for
    910 			 * change of v_mount instead and retry on change.
    911 			 */
    912 			mutex_exit(slock);
    913 			trans_mp = vp->v_mount;
    914 			fstrans_start(trans_mp);
    915 			if (vp->v_mount != trans_mp) {
    916 				fstrans_done(trans_mp);
    917 				trans_mp = NULL;
    918 			} else {
    919 				holds_wapbl = (trans_mp->mnt_wapbl &&
    920 				    (origflags & PGO_JOURNALLOCKED) == 0);
    921 				if (holds_wapbl) {
    922 					error = WAPBL_BEGIN(trans_mp);
    923 					if (error) {
    924 						fstrans_done(trans_mp);
    925 						return error;
    926 					}
    927 				}
    928 			}
    929 			mutex_enter(slock);
    930 			goto retry;
    931 		}
    932 	}
    933 
    934 	error = 0;
    935 	wasclean = (vp->v_numoutput == 0);
    936 	off = startoff;
    937 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    938 		endoff = trunc_page(LLONG_MAX);
    939 	}
    940 	by_list = (uobj->uo_npages <=
    941 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    942 
    943 	/*
    944 	 * if this vnode is known not to have dirty pages,
    945 	 * don't bother to clean it out.
    946 	 */
    947 
    948 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    949 #if !defined(DEBUG)
    950 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
    951 			goto skip_scan;
    952 		}
    953 #endif /* !defined(DEBUG) */
    954 		flags &= ~PGO_CLEANIT;
    955 	}
    956 
    957 	/*
    958 	 * start the loop.  when scanning by list, hold the last page
    959 	 * in the list before we start.  pages allocated after we start
    960 	 * will be added to the end of the list, so we can stop at the
    961 	 * current last page.
    962 	 */
    963 
    964 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
    965 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
    966 	    (vp->v_iflag & VI_ONWORKLST) != 0;
    967 	dirtygen = gp->g_dirtygen;
    968 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
    969 	if (by_list) {
    970 		curmp.flags = PG_MARKER;
    971 		endmp.flags = PG_MARKER;
    972 		pg = TAILQ_FIRST(&uobj->memq);
    973 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    974 	} else {
    975 		pg = uvm_pagelookup(uobj, off);
    976 	}
    977 	nextpg = NULL;
    978 	while (by_list || off < endoff) {
    979 
    980 		/*
    981 		 * if the current page is not interesting, move on to the next.
    982 		 */
    983 
    984 		KASSERT(pg == NULL || pg->uobject == uobj ||
    985 		    (pg->flags & PG_MARKER) != 0);
    986 		KASSERT(pg == NULL ||
    987 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
    988 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
    989 		if (by_list) {
    990 			if (pg == &endmp) {
    991 				break;
    992 			}
    993 			if (pg->flags & PG_MARKER) {
    994 				pg = TAILQ_NEXT(pg, listq.queue);
    995 				continue;
    996 			}
    997 			if (pg->offset < startoff || pg->offset >= endoff ||
    998 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    999 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1000 					wasclean = false;
   1001 				}
   1002 				pg = TAILQ_NEXT(pg, listq.queue);
   1003 				continue;
   1004 			}
   1005 			off = pg->offset;
   1006 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1007 			if (pg != NULL) {
   1008 				wasclean = false;
   1009 			}
   1010 			off += PAGE_SIZE;
   1011 			if (off < endoff) {
   1012 				pg = uvm_pagelookup(uobj, off);
   1013 			}
   1014 			continue;
   1015 		}
   1016 
   1017 		/*
   1018 		 * if the current page needs to be cleaned and it's busy,
   1019 		 * wait for it to become unbusy.
   1020 		 */
   1021 
   1022 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1023 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1024 		if (pg->flags & PG_BUSY || yld) {
   1025 			UVMHIST_LOG(ubchist, "busy %#jx", (uintptr_t)pg,
   1026 			   0, 0, 0);
   1027 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1028 				UVMHIST_LOG(ubchist, "busyfail %#jx",
   1029 				    (uintptr_t)pg, 0, 0, 0);
   1030 				error = EDEADLK;
   1031 				if (busypg != NULL)
   1032 					*busypg = pg;
   1033 				break;
   1034 			}
   1035 			if (pagedaemon) {
   1036 				/*
   1037 				 * someone has taken the page while we
   1038 				 * dropped the lock for fstrans_start.
   1039 				 */
   1040 				break;
   1041 			}
   1042 			if (by_list) {
   1043 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1044 				UVMHIST_LOG(ubchist, "curmp next %#jx",
   1045 				    (uintptr_t)TAILQ_NEXT(&curmp, listq.queue),
   1046 				    0, 0, 0);
   1047 			}
   1048 			if (yld) {
   1049 				mutex_exit(slock);
   1050 				preempt();
   1051 				mutex_enter(slock);
   1052 			} else {
   1053 				pg->flags |= PG_WANTED;
   1054 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1055 				mutex_enter(slock);
   1056 			}
   1057 			if (by_list) {
   1058 				UVMHIST_LOG(ubchist, "after next %#jx",
   1059 				    (uintptr_t)TAILQ_NEXT(&curmp, listq.queue),
   1060 				    0, 0, 0);
   1061 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1062 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1063 			} else {
   1064 				pg = uvm_pagelookup(uobj, off);
   1065 			}
   1066 			continue;
   1067 		}
   1068 
   1069 		/*
   1070 		 * if we're freeing, remove all mappings of the page now.
   1071 		 * if we're cleaning, check if the page is needs to be cleaned.
   1072 		 */
   1073 
   1074 		if (flags & PGO_FREE) {
   1075 			pmap_page_protect(pg, VM_PROT_NONE);
   1076 		} else if (flags & PGO_CLEANIT) {
   1077 
   1078 			/*
   1079 			 * if we still have some hope to pull this vnode off
   1080 			 * from the syncer queue, write-protect the page.
   1081 			 */
   1082 
   1083 			if (cleanall && wasclean &&
   1084 			    gp->g_dirtygen == dirtygen) {
   1085 
   1086 				/*
   1087 				 * uobj pages get wired only by uvm_fault
   1088 				 * where uobj is locked.
   1089 				 */
   1090 
   1091 				if (pg->wire_count == 0) {
   1092 					pmap_page_protect(pg,
   1093 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1094 				} else {
   1095 					cleanall = false;
   1096 				}
   1097 			}
   1098 		}
   1099 
   1100 		if (flags & PGO_CLEANIT) {
   1101 			needs_clean = pmap_clear_modify(pg) ||
   1102 			    (pg->flags & PG_CLEAN) == 0;
   1103 			pg->flags |= PG_CLEAN;
   1104 		} else {
   1105 			needs_clean = false;
   1106 		}
   1107 
   1108 		/*
   1109 		 * if we're cleaning, build a cluster.
   1110 		 * the cluster will consist of pages which are currently dirty,
   1111 		 * but they will be returned to us marked clean.
   1112 		 * if not cleaning, just operate on the one page.
   1113 		 */
   1114 
   1115 		if (needs_clean) {
   1116 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1117 			wasclean = false;
   1118 			memset(pgs, 0, sizeof(pgs));
   1119 			pg->flags |= PG_BUSY;
   1120 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1121 
   1122 			/*
   1123 			 * let the fs constrain the offset range of the cluster.
   1124 			 * we additionally constrain the range here such that
   1125 			 * it fits in the "pgs" pages array.
   1126 			 */
   1127 
   1128 			off_t fslo, fshi, genlo, lo;
   1129 			GOP_PUTRANGE(vp, off, &fslo, &fshi);
   1130 			KASSERT(fslo == trunc_page(fslo));
   1131 			KASSERT(fslo <= off);
   1132 			KASSERT(fshi == trunc_page(fshi));
   1133 			KASSERT(fshi == 0 || off < fshi);
   1134 
   1135 			if (off > MAXPHYS / 2)
   1136 				genlo = trunc_page(off - (MAXPHYS / 2));
   1137 			else
   1138 				genlo = 0;
   1139 			lo = MAX(fslo, genlo);
   1140 
   1141 			/*
   1142 			 * first look backward.
   1143 			 */
   1144 
   1145 			npages = (off - lo) >> PAGE_SHIFT;
   1146 			nback = npages;
   1147 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1148 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1149 			if (nback) {
   1150 				memmove(&pgs[0], &pgs[npages - nback],
   1151 				    nback * sizeof(pgs[0]));
   1152 				if (npages - nback < nback)
   1153 					memset(&pgs[nback], 0,
   1154 					    (npages - nback) * sizeof(pgs[0]));
   1155 				else
   1156 					memset(&pgs[npages - nback], 0,
   1157 					    nback * sizeof(pgs[0]));
   1158 			}
   1159 
   1160 			/*
   1161 			 * then plug in our page of interest.
   1162 			 */
   1163 
   1164 			pgs[nback] = pg;
   1165 
   1166 			/*
   1167 			 * then look forward to fill in the remaining space in
   1168 			 * the array of pages.
   1169 			 */
   1170 
   1171 			npages = MAXPAGES - nback - 1;
   1172 			if (fshi)
   1173 				npages = MIN(npages,
   1174 					     (fshi - off - 1) >> PAGE_SHIFT);
   1175 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1176 			    &pgs[nback + 1],
   1177 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1178 			npages += nback + 1;
   1179 		} else {
   1180 			pgs[0] = pg;
   1181 			npages = 1;
   1182 			nback = 0;
   1183 		}
   1184 
   1185 		/*
   1186 		 * apply FREE or DEACTIVATE options if requested.
   1187 		 */
   1188 
   1189 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1190 			mutex_enter(&uvm_pageqlock);
   1191 		}
   1192 		for (i = 0; i < npages; i++) {
   1193 			tpg = pgs[i];
   1194 			KASSERT(tpg->uobject == uobj);
   1195 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1196 				pg = tpg;
   1197 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1198 				continue;
   1199 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1200 				uvm_pagedeactivate(tpg);
   1201 			} else if (flags & PGO_FREE) {
   1202 				pmap_page_protect(tpg, VM_PROT_NONE);
   1203 				if (tpg->flags & PG_BUSY) {
   1204 					tpg->flags |= freeflag;
   1205 					if (pagedaemon) {
   1206 						uvm_pageout_start(1);
   1207 						uvm_pagedequeue(tpg);
   1208 					}
   1209 				} else {
   1210 
   1211 					/*
   1212 					 * ``page is not busy''
   1213 					 * implies that npages is 1
   1214 					 * and needs_clean is false.
   1215 					 */
   1216 
   1217 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1218 					uvm_pagefree(tpg);
   1219 					if (pagedaemon)
   1220 						uvmexp.pdfreed++;
   1221 				}
   1222 			}
   1223 		}
   1224 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1225 			mutex_exit(&uvm_pageqlock);
   1226 		}
   1227 		if (needs_clean) {
   1228 			modified = true;
   1229 
   1230 			/*
   1231 			 * start the i/o.  if we're traversing by list,
   1232 			 * keep our place in the list with a marker page.
   1233 			 */
   1234 
   1235 			if (by_list) {
   1236 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1237 				    listq.queue);
   1238 			}
   1239 			mutex_exit(slock);
   1240 			error = GOP_WRITE(vp, pgs, npages, flags);
   1241 			mutex_enter(slock);
   1242 			if (by_list) {
   1243 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1244 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1245 			}
   1246 			if (error) {
   1247 				break;
   1248 			}
   1249 			if (by_list) {
   1250 				continue;
   1251 			}
   1252 		}
   1253 
   1254 		/*
   1255 		 * find the next page and continue if there was no error.
   1256 		 */
   1257 
   1258 		if (by_list) {
   1259 			if (nextpg) {
   1260 				pg = nextpg;
   1261 				nextpg = NULL;
   1262 			} else {
   1263 				pg = TAILQ_NEXT(pg, listq.queue);
   1264 			}
   1265 		} else {
   1266 			off += (npages - nback) << PAGE_SHIFT;
   1267 			if (off < endoff) {
   1268 				pg = uvm_pagelookup(uobj, off);
   1269 			}
   1270 		}
   1271 	}
   1272 	if (by_list) {
   1273 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1274 	}
   1275 
   1276 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1277 	    (vp->v_type != VBLK ||
   1278 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1279 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1280 	}
   1281 
   1282 	/*
   1283 	 * if we're cleaning and there was nothing to clean,
   1284 	 * take us off the syncer list.  if we started any i/o
   1285 	 * and we're doing sync i/o, wait for all writes to finish.
   1286 	 */
   1287 
   1288 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1289 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1290 #if defined(DEBUG)
   1291 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1292 			if ((pg->flags & (PG_FAKE | PG_MARKER)) != 0) {
   1293 				continue;
   1294 			}
   1295 			if ((pg->flags & PG_CLEAN) == 0) {
   1296 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1297 			}
   1298 			if (pmap_is_modified(pg)) {
   1299 				printf("%s: %p: modified\n", __func__, pg);
   1300 			}
   1301 		}
   1302 #endif /* defined(DEBUG) */
   1303 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1304 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1305 			vn_syncer_remove_from_worklist(vp);
   1306 	}
   1307 
   1308 #if !defined(DEBUG)
   1309 skip_scan:
   1310 #endif /* !defined(DEBUG) */
   1311 
   1312 	/* Wait for output to complete. */
   1313 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1314 		while (vp->v_numoutput != 0)
   1315 			cv_wait(&vp->v_cv, slock);
   1316 	}
   1317 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1318 	mutex_exit(slock);
   1319 
   1320 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1321 		/*
   1322 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1323 		 * retrying is not a big deal because, in many cases,
   1324 		 * uobj->uo_npages is already 0 here.
   1325 		 */
   1326 		mutex_enter(slock);
   1327 		goto retry;
   1328 	}
   1329 
   1330 	if (trans_mp) {
   1331 		if (holds_wapbl)
   1332 			WAPBL_END(trans_mp);
   1333 		fstrans_done(trans_mp);
   1334 	}
   1335 
   1336 	return (error);
   1337 }
   1338 
   1339 /*
   1340  * Default putrange method for file systems that do not care
   1341  * how many pages are given to one GOP_WRITE() call.
   1342  */
   1343 void
   1344 genfs_gop_putrange(struct vnode *vp, off_t off, off_t *lop, off_t *hip)
   1345 {
   1346 
   1347 	*lop = 0;
   1348 	*hip = 0;
   1349 }
   1350 
   1351 int
   1352 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1353 {
   1354 	off_t off;
   1355 	vaddr_t kva;
   1356 	size_t len;
   1357 	int error;
   1358 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1359 
   1360 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1361 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1362 
   1363 	off = pgs[0]->offset;
   1364 	kva = uvm_pagermapin(pgs, npages,
   1365 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1366 	len = npages << PAGE_SHIFT;
   1367 
   1368 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1369 			    uvm_aio_biodone);
   1370 
   1371 	return error;
   1372 }
   1373 
   1374 int
   1375 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1376 {
   1377 	off_t off;
   1378 	vaddr_t kva;
   1379 	size_t len;
   1380 	int error;
   1381 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1382 
   1383 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1384 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1385 
   1386 	off = pgs[0]->offset;
   1387 	kva = uvm_pagermapin(pgs, npages,
   1388 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1389 	len = npages << PAGE_SHIFT;
   1390 
   1391 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1392 			    uvm_aio_biodone);
   1393 
   1394 	return error;
   1395 }
   1396 
   1397 /*
   1398  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1399  * and mapped into kernel memory.  Here we just look up the underlying
   1400  * device block addresses and call the strategy routine.
   1401  */
   1402 
   1403 static int
   1404 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1405     enum uio_rw rw, void (*iodone)(struct buf *))
   1406 {
   1407 	int s, error;
   1408 	int fs_bshift, dev_bshift;
   1409 	off_t eof, offset, startoffset;
   1410 	size_t bytes, iobytes, skipbytes;
   1411 	struct buf *mbp, *bp;
   1412 	const bool async = (flags & PGO_SYNCIO) == 0;
   1413 	const bool lazy = (flags & PGO_LAZY) == 0;
   1414 	const bool iowrite = rw == UIO_WRITE;
   1415 	const int brw = iowrite ? B_WRITE : B_READ;
   1416 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1417 
   1418 	UVMHIST_LOG(ubchist, "vp %#jx kva %#jx len 0x%jx flags 0x%jx",
   1419 	    (uintptr_t)vp, (uintptr_t)kva, len, flags);
   1420 
   1421 	KASSERT(vp->v_size <= vp->v_writesize);
   1422 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1423 	if (vp->v_type != VBLK) {
   1424 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1425 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1426 	} else {
   1427 		fs_bshift = DEV_BSHIFT;
   1428 		dev_bshift = DEV_BSHIFT;
   1429 	}
   1430 	error = 0;
   1431 	startoffset = off;
   1432 	bytes = MIN(len, eof - startoffset);
   1433 	skipbytes = 0;
   1434 	KASSERT(bytes != 0);
   1435 
   1436 	if (iowrite) {
   1437 		mutex_enter(vp->v_interlock);
   1438 		vp->v_numoutput += 2;
   1439 		mutex_exit(vp->v_interlock);
   1440 	}
   1441 	mbp = getiobuf(vp, true);
   1442 	UVMHIST_LOG(ubchist, "vp %#jx mbp %#jx num now %jd bytes 0x%jx",
   1443 	    (uintptr_t)vp, (uintptr_t)mbp, vp->v_numoutput, bytes);
   1444 	mbp->b_bufsize = len;
   1445 	mbp->b_data = (void *)kva;
   1446 	mbp->b_resid = mbp->b_bcount = bytes;
   1447 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1448 	if (async) {
   1449 		mbp->b_flags = brw | B_ASYNC;
   1450 		mbp->b_iodone = iodone;
   1451 	} else {
   1452 		mbp->b_flags = brw;
   1453 		mbp->b_iodone = NULL;
   1454 	}
   1455 	if (curlwp == uvm.pagedaemon_lwp)
   1456 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1457 	else if (async || lazy)
   1458 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1459 	else
   1460 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1461 
   1462 	bp = NULL;
   1463 	for (offset = startoffset;
   1464 	    bytes > 0;
   1465 	    offset += iobytes, bytes -= iobytes) {
   1466 		int run;
   1467 		daddr_t lbn, blkno;
   1468 		struct vnode *devvp;
   1469 
   1470 		/*
   1471 		 * bmap the file to find out the blkno to read from and
   1472 		 * how much we can read in one i/o.  if bmap returns an error,
   1473 		 * skip the rest of the top-level i/o.
   1474 		 */
   1475 
   1476 		lbn = offset >> fs_bshift;
   1477 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1478 		if (error) {
   1479 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
   1480 			    lbn, error, 0, 0);
   1481 			skipbytes += bytes;
   1482 			bytes = 0;
   1483 			goto loopdone;
   1484 		}
   1485 
   1486 		/*
   1487 		 * see how many pages can be read with this i/o.
   1488 		 * reduce the i/o size if necessary to avoid
   1489 		 * overwriting pages with valid data.
   1490 		 */
   1491 
   1492 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1493 		    bytes);
   1494 
   1495 		/*
   1496 		 * if this block isn't allocated, zero it instead of
   1497 		 * reading it.  unless we are going to allocate blocks,
   1498 		 * mark the pages we zeroed PG_RDONLY.
   1499 		 */
   1500 
   1501 		if (blkno == (daddr_t)-1) {
   1502 			if (!iowrite) {
   1503 				memset((char *)kva + (offset - startoffset), 0,
   1504 				    iobytes);
   1505 			}
   1506 			skipbytes += iobytes;
   1507 			continue;
   1508 		}
   1509 
   1510 		/*
   1511 		 * allocate a sub-buf for this piece of the i/o
   1512 		 * (or just use mbp if there's only 1 piece),
   1513 		 * and start it going.
   1514 		 */
   1515 
   1516 		if (offset == startoffset && iobytes == bytes) {
   1517 			bp = mbp;
   1518 		} else {
   1519 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
   1520 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
   1521 			bp = getiobuf(vp, true);
   1522 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1523 		}
   1524 		bp->b_lblkno = 0;
   1525 
   1526 		/* adjust physical blkno for partial blocks */
   1527 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1528 		    dev_bshift);
   1529 
   1530 		UVMHIST_LOG(ubchist,
   1531 		    "bp %#jx offset 0x%jx bcount 0x%jx blkno 0x%jx",
   1532 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
   1533 
   1534 		VOP_STRATEGY(devvp, bp);
   1535 	}
   1536 
   1537 loopdone:
   1538 	if (skipbytes) {
   1539 		UVMHIST_LOG(ubchist, "skipbytes %jd", skipbytes, 0,0,0);
   1540 	}
   1541 	nestiobuf_done(mbp, skipbytes, error);
   1542 	if (async) {
   1543 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1544 		return (0);
   1545 	}
   1546 	UVMHIST_LOG(ubchist, "waiting for mbp %#jx", (uintptr_t)mbp, 0, 0, 0);
   1547 	error = biowait(mbp);
   1548 	s = splbio();
   1549 	(*iodone)(mbp);
   1550 	splx(s);
   1551 	UVMHIST_LOG(ubchist, "returning, error %jd", error, 0, 0, 0);
   1552 	return (error);
   1553 }
   1554 
   1555 int
   1556 genfs_compat_getpages(void *v)
   1557 {
   1558 	struct vop_getpages_args /* {
   1559 		struct vnode *a_vp;
   1560 		voff_t a_offset;
   1561 		struct vm_page **a_m;
   1562 		int *a_count;
   1563 		int a_centeridx;
   1564 		vm_prot_t a_access_type;
   1565 		int a_advice;
   1566 		int a_flags;
   1567 	} */ *ap = v;
   1568 
   1569 	off_t origoffset;
   1570 	struct vnode *vp = ap->a_vp;
   1571 	struct uvm_object *uobj = &vp->v_uobj;
   1572 	struct vm_page *pg, **pgs;
   1573 	vaddr_t kva;
   1574 	int i, error, orignpages, npages;
   1575 	struct iovec iov;
   1576 	struct uio uio;
   1577 	kauth_cred_t cred = curlwp->l_cred;
   1578 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1579 
   1580 	error = 0;
   1581 	origoffset = ap->a_offset;
   1582 	orignpages = *ap->a_count;
   1583 	pgs = ap->a_m;
   1584 
   1585 	if (ap->a_flags & PGO_LOCKED) {
   1586 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1587 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1588 
   1589 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1590 		if (error == 0 && memwrite) {
   1591 			genfs_markdirty(vp);
   1592 		}
   1593 		return error;
   1594 	}
   1595 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1596 		mutex_exit(uobj->vmobjlock);
   1597 		return EINVAL;
   1598 	}
   1599 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1600 		mutex_exit(uobj->vmobjlock);
   1601 		return 0;
   1602 	}
   1603 	npages = orignpages;
   1604 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1605 	mutex_exit(uobj->vmobjlock);
   1606 	kva = uvm_pagermapin(pgs, npages,
   1607 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1608 	for (i = 0; i < npages; i++) {
   1609 		pg = pgs[i];
   1610 		if ((pg->flags & PG_FAKE) == 0) {
   1611 			continue;
   1612 		}
   1613 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1614 		iov.iov_len = PAGE_SIZE;
   1615 		uio.uio_iov = &iov;
   1616 		uio.uio_iovcnt = 1;
   1617 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1618 		uio.uio_rw = UIO_READ;
   1619 		uio.uio_resid = PAGE_SIZE;
   1620 		UIO_SETUP_SYSSPACE(&uio);
   1621 		/* XXX vn_lock */
   1622 		error = VOP_READ(vp, &uio, 0, cred);
   1623 		if (error) {
   1624 			break;
   1625 		}
   1626 		if (uio.uio_resid) {
   1627 			memset(iov.iov_base, 0, uio.uio_resid);
   1628 		}
   1629 	}
   1630 	uvm_pagermapout(kva, npages);
   1631 	mutex_enter(uobj->vmobjlock);
   1632 	mutex_enter(&uvm_pageqlock);
   1633 	for (i = 0; i < npages; i++) {
   1634 		pg = pgs[i];
   1635 		if (error && (pg->flags & PG_FAKE) != 0) {
   1636 			pg->flags |= PG_RELEASED;
   1637 		} else {
   1638 			pmap_clear_modify(pg);
   1639 			uvm_pageactivate(pg);
   1640 		}
   1641 	}
   1642 	if (error) {
   1643 		uvm_page_unbusy(pgs, npages);
   1644 	}
   1645 	mutex_exit(&uvm_pageqlock);
   1646 	if (error == 0 && memwrite) {
   1647 		genfs_markdirty(vp);
   1648 	}
   1649 	mutex_exit(uobj->vmobjlock);
   1650 	return error;
   1651 }
   1652 
   1653 int
   1654 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1655     int flags)
   1656 {
   1657 	off_t offset;
   1658 	struct iovec iov;
   1659 	struct uio uio;
   1660 	kauth_cred_t cred = curlwp->l_cred;
   1661 	struct buf *bp;
   1662 	vaddr_t kva;
   1663 	int error;
   1664 
   1665 	offset = pgs[0]->offset;
   1666 	kva = uvm_pagermapin(pgs, npages,
   1667 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1668 
   1669 	iov.iov_base = (void *)kva;
   1670 	iov.iov_len = npages << PAGE_SHIFT;
   1671 	uio.uio_iov = &iov;
   1672 	uio.uio_iovcnt = 1;
   1673 	uio.uio_offset = offset;
   1674 	uio.uio_rw = UIO_WRITE;
   1675 	uio.uio_resid = npages << PAGE_SHIFT;
   1676 	UIO_SETUP_SYSSPACE(&uio);
   1677 	/* XXX vn_lock */
   1678 	error = VOP_WRITE(vp, &uio, 0, cred);
   1679 
   1680 	mutex_enter(vp->v_interlock);
   1681 	vp->v_numoutput++;
   1682 	mutex_exit(vp->v_interlock);
   1683 
   1684 	bp = getiobuf(vp, true);
   1685 	bp->b_cflags = BC_BUSY | BC_AGE;
   1686 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1687 	bp->b_data = (char *)kva;
   1688 	bp->b_bcount = npages << PAGE_SHIFT;
   1689 	bp->b_bufsize = npages << PAGE_SHIFT;
   1690 	bp->b_resid = 0;
   1691 	bp->b_error = error;
   1692 	uvm_aio_aiodone(bp);
   1693 	return (error);
   1694 }
   1695 
   1696 /*
   1697  * Process a uio using direct I/O.  If we reach a part of the request
   1698  * which cannot be processed in this fashion for some reason, just return.
   1699  * The caller must handle some additional part of the request using
   1700  * buffered I/O before trying direct I/O again.
   1701  */
   1702 
   1703 void
   1704 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1705 {
   1706 	struct vmspace *vs;
   1707 	struct iovec *iov;
   1708 	vaddr_t va;
   1709 	size_t len;
   1710 	const int mask = DEV_BSIZE - 1;
   1711 	int error;
   1712 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1713 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1714 
   1715 	/*
   1716 	 * We only support direct I/O to user space for now.
   1717 	 */
   1718 
   1719 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1720 		return;
   1721 	}
   1722 
   1723 	/*
   1724 	 * If the vnode is mapped, we would need to get the getpages lock
   1725 	 * to stabilize the bmap, but then we would get into trouble while
   1726 	 * locking the pages if the pages belong to this same vnode (or a
   1727 	 * multi-vnode cascade to the same effect).  Just fall back to
   1728 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1729 	 */
   1730 
   1731 	if (vp->v_vflag & VV_MAPPED) {
   1732 		return;
   1733 	}
   1734 
   1735 	if (need_wapbl) {
   1736 		error = WAPBL_BEGIN(vp->v_mount);
   1737 		if (error)
   1738 			return;
   1739 	}
   1740 
   1741 	/*
   1742 	 * Do as much of the uio as possible with direct I/O.
   1743 	 */
   1744 
   1745 	vs = uio->uio_vmspace;
   1746 	while (uio->uio_resid) {
   1747 		iov = uio->uio_iov;
   1748 		if (iov->iov_len == 0) {
   1749 			uio->uio_iov++;
   1750 			uio->uio_iovcnt--;
   1751 			continue;
   1752 		}
   1753 		va = (vaddr_t)iov->iov_base;
   1754 		len = MIN(iov->iov_len, genfs_maxdio);
   1755 		len &= ~mask;
   1756 
   1757 		/*
   1758 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1759 		 * the current EOF, then fall back to buffered I/O.
   1760 		 */
   1761 
   1762 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1763 			break;
   1764 		}
   1765 
   1766 		/*
   1767 		 * Check alignment.  The file offset must be at least
   1768 		 * sector-aligned.  The exact constraint on memory alignment
   1769 		 * is very hardware-dependent, but requiring sector-aligned
   1770 		 * addresses there too is safe.
   1771 		 */
   1772 
   1773 		if (uio->uio_offset & mask || va & mask) {
   1774 			break;
   1775 		}
   1776 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1777 					  uio->uio_rw);
   1778 		if (error) {
   1779 			break;
   1780 		}
   1781 		iov->iov_base = (char *)iov->iov_base + len;
   1782 		iov->iov_len -= len;
   1783 		uio->uio_offset += len;
   1784 		uio->uio_resid -= len;
   1785 	}
   1786 
   1787 	if (need_wapbl)
   1788 		WAPBL_END(vp->v_mount);
   1789 }
   1790 
   1791 /*
   1792  * Iodone routine for direct I/O.  We don't do much here since the request is
   1793  * always synchronous, so the caller will do most of the work after biowait().
   1794  */
   1795 
   1796 static void
   1797 genfs_dio_iodone(struct buf *bp)
   1798 {
   1799 
   1800 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1801 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1802 		mutex_enter(bp->b_objlock);
   1803 		vwakeup(bp);
   1804 		mutex_exit(bp->b_objlock);
   1805 	}
   1806 	putiobuf(bp);
   1807 }
   1808 
   1809 /*
   1810  * Process one chunk of a direct I/O request.
   1811  */
   1812 
   1813 static int
   1814 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1815     off_t off, enum uio_rw rw)
   1816 {
   1817 	struct vm_map *map;
   1818 	struct pmap *upm, *kpm __unused;
   1819 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1820 	off_t spoff, epoff;
   1821 	vaddr_t kva, puva;
   1822 	paddr_t pa;
   1823 	vm_prot_t prot;
   1824 	int error, rv __diagused, poff, koff;
   1825 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1826 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1827 
   1828 	/*
   1829 	 * For writes, verify that this range of the file already has fully
   1830 	 * allocated backing store.  If there are any holes, just punt and
   1831 	 * make the caller take the buffered write path.
   1832 	 */
   1833 
   1834 	if (rw == UIO_WRITE) {
   1835 		daddr_t lbn, elbn, blkno;
   1836 		int bsize, bshift, run;
   1837 
   1838 		bshift = vp->v_mount->mnt_fs_bshift;
   1839 		bsize = 1 << bshift;
   1840 		lbn = off >> bshift;
   1841 		elbn = (off + len + bsize - 1) >> bshift;
   1842 		while (lbn < elbn) {
   1843 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1844 			if (error) {
   1845 				return error;
   1846 			}
   1847 			if (blkno == (daddr_t)-1) {
   1848 				return ENOSPC;
   1849 			}
   1850 			lbn += 1 + run;
   1851 		}
   1852 	}
   1853 
   1854 	/*
   1855 	 * Flush any cached pages for parts of the file that we're about to
   1856 	 * access.  If we're writing, invalidate pages as well.
   1857 	 */
   1858 
   1859 	spoff = trunc_page(off);
   1860 	epoff = round_page(off + len);
   1861 	mutex_enter(vp->v_interlock);
   1862 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1863 	if (error) {
   1864 		return error;
   1865 	}
   1866 
   1867 	/*
   1868 	 * Wire the user pages and remap them into kernel memory.
   1869 	 */
   1870 
   1871 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1872 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1873 	if (error) {
   1874 		return error;
   1875 	}
   1876 
   1877 	map = &vs->vm_map;
   1878 	upm = vm_map_pmap(map);
   1879 	kpm = vm_map_pmap(kernel_map);
   1880 	puva = trunc_page(uva);
   1881 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1882 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1883 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1884 		rv = pmap_extract(upm, puva + poff, &pa);
   1885 		KASSERT(rv);
   1886 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1887 	}
   1888 	pmap_update(kpm);
   1889 
   1890 	/*
   1891 	 * Do the I/O.
   1892 	 */
   1893 
   1894 	koff = uva - trunc_page(uva);
   1895 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1896 			    genfs_dio_iodone);
   1897 
   1898 	/*
   1899 	 * Tear down the kernel mapping.
   1900 	 */
   1901 
   1902 	pmap_kremove(kva, klen);
   1903 	pmap_update(kpm);
   1904 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1905 
   1906 	/*
   1907 	 * Unwire the user pages.
   1908 	 */
   1909 
   1910 	uvm_vsunlock(vs, (void *)uva, len);
   1911 	return error;
   1912 }
   1913