Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.77
      1 /*	$NetBSD: genfs_io.c,v 1.77 2019/12/13 20:10:21 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.77 2019/12/13 20:10:21 ad Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/kernel.h>
     40 #include <sys/mount.h>
     41 #include <sys/vnode.h>
     42 #include <sys/kmem.h>
     43 #include <sys/kauth.h>
     44 #include <sys/fstrans.h>
     45 #include <sys/buf.h>
     46 
     47 #include <miscfs/genfs/genfs.h>
     48 #include <miscfs/genfs/genfs_node.h>
     49 #include <miscfs/specfs/specdev.h>
     50 
     51 #include <uvm/uvm.h>
     52 #include <uvm/uvm_pager.h>
     53 
     54 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     55     off_t, enum uio_rw);
     56 static void genfs_dio_iodone(struct buf *);
     57 
     58 static int genfs_getpages_read(struct vnode *, struct vm_page **, int, off_t,
     59     off_t, bool, bool, bool, bool);
     60 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     61     void (*)(struct buf *));
     62 static void genfs_rel_pages(struct vm_page **, unsigned int);
     63 static void genfs_markdirty(struct vnode *);
     64 
     65 int genfs_maxdio = MAXPHYS;
     66 
     67 static void
     68 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     69 {
     70 	unsigned int i;
     71 
     72 	for (i = 0; i < npages; i++) {
     73 		struct vm_page *pg = pgs[i];
     74 
     75 		if (pg == NULL || pg == PGO_DONTCARE)
     76 			continue;
     77 		KASSERT(uvm_page_locked_p(pg));
     78 		if (pg->flags & PG_FAKE) {
     79 			pg->flags |= PG_RELEASED;
     80 		}
     81 	}
     82 	uvm_page_unbusy(pgs, npages);
     83 }
     84 
     85 static void
     86 genfs_markdirty(struct vnode *vp)
     87 {
     88 	struct genfs_node * const gp = VTOG(vp);
     89 
     90 	KASSERT(mutex_owned(vp->v_interlock));
     91 	gp->g_dirtygen++;
     92 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     93 		vn_syncer_add_to_worklist(vp, filedelay);
     94 	}
     95 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
     96 		vp->v_iflag |= VI_WRMAPDIRTY;
     97 	}
     98 }
     99 
    100 /*
    101  * generic VM getpages routine.
    102  * Return PG_BUSY pages for the given range,
    103  * reading from backing store if necessary.
    104  */
    105 
    106 int
    107 genfs_getpages(void *v)
    108 {
    109 	struct vop_getpages_args /* {
    110 		struct vnode *a_vp;
    111 		voff_t a_offset;
    112 		struct vm_page **a_m;
    113 		int *a_count;
    114 		int a_centeridx;
    115 		vm_prot_t a_access_type;
    116 		int a_advice;
    117 		int a_flags;
    118 	} */ * const ap = v;
    119 
    120 	off_t diskeof, memeof;
    121 	int i, error, npages;
    122 	const int flags = ap->a_flags;
    123 	struct vnode * const vp = ap->a_vp;
    124 	struct uvm_object * const uobj = &vp->v_uobj;
    125 	const bool async = (flags & PGO_SYNCIO) == 0;
    126 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    127 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    128 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    129 	const bool need_wapbl = (vp->v_mount->mnt_wapbl &&
    130 			(flags & PGO_JOURNALLOCKED) == 0);
    131 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    132 	bool holds_wapbl = false;
    133 	struct mount *trans_mount = NULL;
    134 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    135 
    136 	UVMHIST_LOG(ubchist, "vp %#jx off 0x%jx/%jx count %jd",
    137 	    (uintptr_t)vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    138 
    139 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    140 	    vp->v_type == VLNK || vp->v_type == VBLK);
    141 
    142 #ifdef DIAGNOSTIC
    143 	if ((flags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    144                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    145 #endif
    146 
    147 	error = vdead_check(vp, VDEAD_NOWAIT);
    148 	if (error) {
    149 		if ((flags & PGO_LOCKED) == 0)
    150 			mutex_exit(uobj->vmobjlock);
    151 		return error;
    152 	}
    153 
    154 startover:
    155 	error = 0;
    156 	const voff_t origvsize = vp->v_size;
    157 	const off_t origoffset = ap->a_offset;
    158 	const int orignpages = *ap->a_count;
    159 
    160 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    161 	if (flags & PGO_PASTEOF) {
    162 		off_t newsize;
    163 #if defined(DIAGNOSTIC)
    164 		off_t writeeof;
    165 #endif /* defined(DIAGNOSTIC) */
    166 
    167 		newsize = MAX(origvsize,
    168 		    origoffset + (orignpages << PAGE_SHIFT));
    169 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    170 #if defined(DIAGNOSTIC)
    171 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    172 		if (newsize > round_page(writeeof)) {
    173 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    174 			    __func__, newsize, round_page(writeeof));
    175 		}
    176 #endif /* defined(DIAGNOSTIC) */
    177 	} else {
    178 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    179 	}
    180 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    181 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    182 	KASSERT(orignpages > 0);
    183 
    184 	/*
    185 	 * Bounds-check the request.
    186 	 */
    187 
    188 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    189 		if ((flags & PGO_LOCKED) == 0) {
    190 			mutex_exit(uobj->vmobjlock);
    191 		}
    192 		UVMHIST_LOG(ubchist, "off 0x%jx count %jd goes past EOF 0x%jx",
    193 		    origoffset, *ap->a_count, memeof,0);
    194 		error = EINVAL;
    195 		goto out_err;
    196 	}
    197 
    198 	/* uobj is locked */
    199 
    200 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    201 	    (vp->v_type != VBLK ||
    202 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    203 		int updflags = 0;
    204 
    205 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    206 			updflags = GOP_UPDATE_ACCESSED;
    207 		}
    208 		if (memwrite) {
    209 			updflags |= GOP_UPDATE_MODIFIED;
    210 		}
    211 		if (updflags != 0) {
    212 			GOP_MARKUPDATE(vp, updflags);
    213 		}
    214 	}
    215 
    216 	/*
    217 	 * For PGO_LOCKED requests, just return whatever's in memory.
    218 	 */
    219 
    220 	if (flags & PGO_LOCKED) {
    221 		int nfound;
    222 		struct vm_page *pg;
    223 
    224 		KASSERT(!glocked);
    225 		npages = *ap->a_count;
    226 #if defined(DEBUG)
    227 		for (i = 0; i < npages; i++) {
    228 			pg = ap->a_m[i];
    229 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    230 		}
    231 #endif /* defined(DEBUG) */
    232 		nfound = uvn_findpages(uobj, origoffset, &npages,
    233 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    234 		KASSERT(npages == *ap->a_count);
    235 		if (nfound == 0) {
    236 			error = EBUSY;
    237 			goto out_err;
    238 		}
    239 		if (!genfs_node_rdtrylock(vp)) {
    240 			genfs_rel_pages(ap->a_m, npages);
    241 
    242 			/*
    243 			 * restore the array.
    244 			 */
    245 
    246 			for (i = 0; i < npages; i++) {
    247 				pg = ap->a_m[i];
    248 
    249 				if (pg != NULL && pg != PGO_DONTCARE) {
    250 					ap->a_m[i] = NULL;
    251 				}
    252 				KASSERT(ap->a_m[i] == NULL ||
    253 				    ap->a_m[i] == PGO_DONTCARE);
    254 			}
    255 		} else {
    256 			genfs_node_unlock(vp);
    257 		}
    258 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    259 		if (error == 0 && memwrite) {
    260 			genfs_markdirty(vp);
    261 		}
    262 		goto out_err;
    263 	}
    264 	mutex_exit(uobj->vmobjlock);
    265 
    266 	/*
    267 	 * find the requested pages and make some simple checks.
    268 	 * leave space in the page array for a whole block.
    269 	 */
    270 
    271 	const int fs_bshift = (vp->v_type != VBLK) ?
    272 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    273 	const int fs_bsize = 1 << fs_bshift;
    274 #define	blk_mask	(fs_bsize - 1)
    275 #define	trunc_blk(x)	((x) & ~blk_mask)
    276 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    277 
    278 	const int orignmempages = MIN(orignpages,
    279 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    280 	npages = orignmempages;
    281 	const off_t startoffset = trunc_blk(origoffset);
    282 	const off_t endoffset = MIN(
    283 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    284 	    round_page(memeof));
    285 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    286 
    287 	const int pgs_size = sizeof(struct vm_page *) *
    288 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    289 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    290 
    291 	if (pgs_size > sizeof(pgs_onstack)) {
    292 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    293 		if (pgs == NULL) {
    294 			pgs = pgs_onstack;
    295 			error = ENOMEM;
    296 			goto out_err;
    297 		}
    298 	} else {
    299 		pgs = pgs_onstack;
    300 		(void)memset(pgs, 0, pgs_size);
    301 	}
    302 
    303 	UVMHIST_LOG(ubchist, "ridx %jd npages %jd startoff %jd endoff %jd",
    304 	    ridx, npages, startoffset, endoffset);
    305 
    306 	if (trans_mount == NULL) {
    307 		trans_mount = vp->v_mount;
    308 		fstrans_start(trans_mount);
    309 		/*
    310 		 * check if this vnode is still valid.
    311 		 */
    312 		mutex_enter(vp->v_interlock);
    313 		error = vdead_check(vp, 0);
    314 		mutex_exit(vp->v_interlock);
    315 		if (error)
    316 			goto out_err_free;
    317 		/*
    318 		 * XXX: This assumes that we come here only via
    319 		 * the mmio path
    320 		 */
    321 		if (blockalloc && need_wapbl) {
    322 			error = WAPBL_BEGIN(trans_mount);
    323 			if (error)
    324 				goto out_err_free;
    325 			holds_wapbl = true;
    326 		}
    327 	}
    328 
    329 	/*
    330 	 * hold g_glock to prevent a race with truncate.
    331 	 *
    332 	 * check if our idea of v_size is still valid.
    333 	 */
    334 
    335 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    336 	if (!glocked) {
    337 		if (blockalloc) {
    338 			genfs_node_wrlock(vp);
    339 		} else {
    340 			genfs_node_rdlock(vp);
    341 		}
    342 	}
    343 	mutex_enter(uobj->vmobjlock);
    344 	if (vp->v_size < origvsize) {
    345 		if (!glocked) {
    346 			genfs_node_unlock(vp);
    347 		}
    348 		if (pgs != pgs_onstack)
    349 			kmem_free(pgs, pgs_size);
    350 		goto startover;
    351 	}
    352 
    353 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    354 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    355 		if (!glocked) {
    356 			genfs_node_unlock(vp);
    357 		}
    358 		KASSERT(async != 0);
    359 		genfs_rel_pages(&pgs[ridx], orignmempages);
    360 		mutex_exit(uobj->vmobjlock);
    361 		error = EBUSY;
    362 		goto out_err_free;
    363 	}
    364 
    365 	/*
    366 	 * if the pages are already resident, just return them.
    367 	 */
    368 
    369 	for (i = 0; i < npages; i++) {
    370 		struct vm_page *pg = pgs[ridx + i];
    371 
    372 		if ((pg->flags & PG_FAKE) ||
    373 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    374 			break;
    375 		}
    376 	}
    377 	if (i == npages) {
    378 		if (!glocked) {
    379 			genfs_node_unlock(vp);
    380 		}
    381 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    382 		npages += ridx;
    383 		goto out;
    384 	}
    385 
    386 	/*
    387 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    388 	 */
    389 
    390 	if (overwrite) {
    391 		if (!glocked) {
    392 			genfs_node_unlock(vp);
    393 		}
    394 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    395 
    396 		for (i = 0; i < npages; i++) {
    397 			struct vm_page *pg = pgs[ridx + i];
    398 
    399 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    400 		}
    401 		npages += ridx;
    402 		goto out;
    403 	}
    404 
    405 	/*
    406 	 * the page wasn't resident and we're not overwriting,
    407 	 * so we're going to have to do some i/o.
    408 	 * find any additional pages needed to cover the expanded range.
    409 	 */
    410 
    411 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    412 	if (startoffset != origoffset || npages != orignmempages) {
    413 		int npgs;
    414 
    415 		/*
    416 		 * we need to avoid deadlocks caused by locking
    417 		 * additional pages at lower offsets than pages we
    418 		 * already have locked.  unlock them all and start over.
    419 		 */
    420 
    421 		genfs_rel_pages(&pgs[ridx], orignmempages);
    422 		memset(pgs, 0, pgs_size);
    423 
    424 		UVMHIST_LOG(ubchist, "reset npages start 0x%jx end 0x%jx",
    425 		    startoffset, endoffset, 0,0);
    426 		npgs = npages;
    427 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    428 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    429 			if (!glocked) {
    430 				genfs_node_unlock(vp);
    431 			}
    432 			KASSERT(async != 0);
    433 			genfs_rel_pages(pgs, npages);
    434 			mutex_exit(uobj->vmobjlock);
    435 			error = EBUSY;
    436 			goto out_err_free;
    437 		}
    438 	}
    439 
    440 	mutex_exit(uobj->vmobjlock);
    441 	error = genfs_getpages_read(vp, pgs, npages, startoffset, diskeof,
    442 	    async, memwrite, blockalloc, glocked);
    443 	if (!glocked) {
    444 		genfs_node_unlock(vp);
    445 	}
    446 	if (error == 0 && async)
    447 		goto out_err_free;
    448 	mutex_enter(uobj->vmobjlock);
    449 
    450 	/*
    451 	 * we're almost done!  release the pages...
    452 	 * for errors, we free the pages.
    453 	 * otherwise we activate them and mark them as valid and clean.
    454 	 * also, unbusy pages that were not actually requested.
    455 	 */
    456 
    457 	if (error) {
    458 		genfs_rel_pages(pgs, npages);
    459 		mutex_exit(uobj->vmobjlock);
    460 		UVMHIST_LOG(ubchist, "returning error %jd", error,0,0,0);
    461 		goto out_err_free;
    462 	}
    463 
    464 out:
    465 	UVMHIST_LOG(ubchist, "succeeding, npages %jd", npages,0,0,0);
    466 	error = 0;
    467 	for (i = 0; i < npages; i++) {
    468 		struct vm_page *pg = pgs[i];
    469 		if (pg == NULL) {
    470 			continue;
    471 		}
    472 		UVMHIST_LOG(ubchist, "examining pg %#jx flags 0x%jx",
    473 		    (uintptr_t)pg, pg->flags, 0,0);
    474 		if (pg->flags & PG_FAKE && !overwrite) {
    475 			pg->flags &= ~(PG_FAKE);
    476 			pmap_clear_modify(pgs[i]);
    477 		}
    478 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    479 		if (i < ridx || i >= ridx + orignmempages || async) {
    480 			UVMHIST_LOG(ubchist, "unbusy pg %#jx offset 0x%jx",
    481 			    (uintptr_t)pg, pg->offset,0,0);
    482 			if (pg->flags & PG_WANTED) {
    483 				wakeup(pg);
    484 			}
    485 			if (pg->flags & PG_FAKE) {
    486 				KASSERT(overwrite);
    487 				uvm_pagezero(pg);
    488 			}
    489 			if (pg->flags & PG_RELEASED) {
    490 				uvm_pagefree(pg);
    491 				continue;
    492 			}
    493 			uvm_pageenqueue(pg);
    494 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    495 			UVM_PAGE_OWN(pg, NULL);
    496 		}
    497 	}
    498 	if (memwrite) {
    499 		genfs_markdirty(vp);
    500 	}
    501 	mutex_exit(uobj->vmobjlock);
    502 	if (ap->a_m != NULL) {
    503 		memcpy(ap->a_m, &pgs[ridx],
    504 		    orignmempages * sizeof(struct vm_page *));
    505 	}
    506 
    507 out_err_free:
    508 	if (pgs != NULL && pgs != pgs_onstack)
    509 		kmem_free(pgs, pgs_size);
    510 out_err:
    511 	if (trans_mount != NULL) {
    512 		if (holds_wapbl)
    513 			WAPBL_END(trans_mount);
    514 		fstrans_done(trans_mount);
    515 	}
    516 	return error;
    517 }
    518 
    519 /*
    520  * genfs_getpages_read: Read the pages in with VOP_BMAP/VOP_STRATEGY.
    521  *
    522  * "glocked" (which is currently not actually used) tells us not whether
    523  * the genfs_node is locked on entry (it always is) but whether it was
    524  * locked on entry to genfs_getpages.
    525  */
    526 static int
    527 genfs_getpages_read(struct vnode *vp, struct vm_page **pgs, int npages,
    528     off_t startoffset, off_t diskeof,
    529     bool async, bool memwrite, bool blockalloc, bool glocked)
    530 {
    531 	struct uvm_object * const uobj = &vp->v_uobj;
    532 	const int fs_bshift = (vp->v_type != VBLK) ?
    533 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    534 	const int dev_bshift = (vp->v_type != VBLK) ?
    535 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    536 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    537 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    538 	vaddr_t kva;
    539 	struct buf *bp, *mbp;
    540 	bool sawhole = false;
    541 	int i;
    542 	int error = 0;
    543 
    544 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
    545 
    546 	/*
    547 	 * read the desired page(s).
    548 	 */
    549 
    550 	totalbytes = npages << PAGE_SHIFT;
    551 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    552 	tailbytes = totalbytes - bytes;
    553 	skipbytes = 0;
    554 
    555 	kva = uvm_pagermapin(pgs, npages,
    556 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    557 	if (kva == 0)
    558 		return EBUSY;
    559 
    560 	if (uvm.aiodone_queue == NULL)
    561 		async = 0;
    562 
    563 	mbp = getiobuf(vp, true);
    564 	mbp->b_bufsize = totalbytes;
    565 	mbp->b_data = (void *)kva;
    566 	mbp->b_resid = mbp->b_bcount = bytes;
    567 	mbp->b_cflags = BC_BUSY;
    568 	if (async) {
    569 		mbp->b_flags = B_READ | B_ASYNC;
    570 		mbp->b_iodone = uvm_aio_biodone;
    571 	} else {
    572 		mbp->b_flags = B_READ;
    573 		mbp->b_iodone = NULL;
    574 	}
    575 	if (async)
    576 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    577 	else
    578 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    579 
    580 	/*
    581 	 * if EOF is in the middle of the range, zero the part past EOF.
    582 	 * skip over pages which are not PG_FAKE since in that case they have
    583 	 * valid data that we need to preserve.
    584 	 */
    585 
    586 	tailstart = bytes;
    587 	while (tailbytes > 0) {
    588 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    589 
    590 		KASSERT(len <= tailbytes);
    591 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    592 			memset((void *)(kva + tailstart), 0, len);
    593 			UVMHIST_LOG(ubchist, "tailbytes %#jx 0x%jx 0x%jx",
    594 			    (uintptr_t)kva, tailstart, len, 0);
    595 		}
    596 		tailstart += len;
    597 		tailbytes -= len;
    598 	}
    599 
    600 	/*
    601 	 * now loop over the pages, reading as needed.
    602 	 */
    603 
    604 	bp = NULL;
    605 	off_t offset;
    606 	for (offset = startoffset;
    607 	    bytes > 0;
    608 	    offset += iobytes, bytes -= iobytes) {
    609 		int run;
    610 		daddr_t lbn, blkno;
    611 		int pidx;
    612 		struct vnode *devvp;
    613 
    614 		/*
    615 		 * skip pages which don't need to be read.
    616 		 */
    617 
    618 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    619 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    620 			size_t b;
    621 
    622 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    623 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    624 				sawhole = true;
    625 			}
    626 			b = MIN(PAGE_SIZE, bytes);
    627 			offset += b;
    628 			bytes -= b;
    629 			skipbytes += b;
    630 			pidx++;
    631 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%jx",
    632 			    offset, 0,0,0);
    633 			if (bytes == 0) {
    634 				goto loopdone;
    635 			}
    636 		}
    637 
    638 		/*
    639 		 * bmap the file to find out the blkno to read from and
    640 		 * how much we can read in one i/o.  if bmap returns an error,
    641 		 * skip the rest of the top-level i/o.
    642 		 */
    643 
    644 		lbn = offset >> fs_bshift;
    645 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    646 		if (error) {
    647 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
    648 			    lbn,error,0,0);
    649 			skipbytes += bytes;
    650 			bytes = 0;
    651 			goto loopdone;
    652 		}
    653 
    654 		/*
    655 		 * see how many pages can be read with this i/o.
    656 		 * reduce the i/o size if necessary to avoid
    657 		 * overwriting pages with valid data.
    658 		 */
    659 
    660 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    661 		    bytes);
    662 		if (offset + iobytes > round_page(offset)) {
    663 			int pcount;
    664 
    665 			pcount = 1;
    666 			while (pidx + pcount < npages &&
    667 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    668 				pcount++;
    669 			}
    670 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    671 			    (offset - trunc_page(offset)));
    672 		}
    673 
    674 		/*
    675 		 * if this block isn't allocated, zero it instead of
    676 		 * reading it.  unless we are going to allocate blocks,
    677 		 * mark the pages we zeroed PG_RDONLY.
    678 		 */
    679 
    680 		if (blkno == (daddr_t)-1) {
    681 			int holepages = (round_page(offset + iobytes) -
    682 			    trunc_page(offset)) >> PAGE_SHIFT;
    683 			UVMHIST_LOG(ubchist, "lbn 0x%jx -> HOLE", lbn,0,0,0);
    684 
    685 			sawhole = true;
    686 			memset((char *)kva + (offset - startoffset), 0,
    687 			    iobytes);
    688 			skipbytes += iobytes;
    689 
    690 			mutex_enter(uobj->vmobjlock);
    691 			for (i = 0; i < holepages; i++) {
    692 				if (memwrite) {
    693 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    694 				}
    695 				if (!blockalloc) {
    696 					pgs[pidx + i]->flags |= PG_RDONLY;
    697 				}
    698 			}
    699 			mutex_exit(uobj->vmobjlock);
    700 			continue;
    701 		}
    702 
    703 		/*
    704 		 * allocate a sub-buf for this piece of the i/o
    705 		 * (or just use mbp if there's only 1 piece),
    706 		 * and start it going.
    707 		 */
    708 
    709 		if (offset == startoffset && iobytes == bytes) {
    710 			bp = mbp;
    711 		} else {
    712 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
    713 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
    714 			bp = getiobuf(vp, true);
    715 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    716 		}
    717 		bp->b_lblkno = 0;
    718 
    719 		/* adjust physical blkno for partial blocks */
    720 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    721 		    dev_bshift);
    722 
    723 		UVMHIST_LOG(ubchist,
    724 		    "bp %#jx offset 0x%x bcount 0x%x blkno 0x%x",
    725 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
    726 
    727 		VOP_STRATEGY(devvp, bp);
    728 	}
    729 
    730 loopdone:
    731 	nestiobuf_done(mbp, skipbytes, error);
    732 	if (async) {
    733 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    734 		return 0;
    735 	}
    736 	if (bp != NULL) {
    737 		error = biowait(mbp);
    738 	}
    739 
    740 	/* Remove the mapping (make KVA available as soon as possible) */
    741 	uvm_pagermapout(kva, npages);
    742 
    743 	/*
    744 	 * if this we encountered a hole then we have to do a little more work.
    745 	 * for read faults, we marked the page PG_RDONLY so that future
    746 	 * write accesses to the page will fault again.
    747 	 * for write faults, we must make sure that the backing store for
    748 	 * the page is completely allocated while the pages are locked.
    749 	 */
    750 
    751 	if (!error && sawhole && blockalloc) {
    752 		error = GOP_ALLOC(vp, startoffset,
    753 		    npages << PAGE_SHIFT, 0, cred);
    754 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%jx/0x%jx -> %jd",
    755 		    startoffset, npages << PAGE_SHIFT, error,0);
    756 		if (!error) {
    757 			mutex_enter(uobj->vmobjlock);
    758 			for (i = 0; i < npages; i++) {
    759 				struct vm_page *pg = pgs[i];
    760 
    761 				if (pg == NULL) {
    762 					continue;
    763 				}
    764 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    765 				UVMHIST_LOG(ubchist, "mark dirty pg %#jx",
    766 				    (uintptr_t)pg, 0, 0, 0);
    767 			}
    768 			mutex_exit(uobj->vmobjlock);
    769 		}
    770 	}
    771 
    772 	putiobuf(mbp);
    773 	return error;
    774 }
    775 
    776 /*
    777  * generic VM putpages routine.
    778  * Write the given range of pages to backing store.
    779  *
    780  * => "offhi == 0" means flush all pages at or after "offlo".
    781  * => object should be locked by caller.  we return with the
    782  *      object unlocked.
    783  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    784  *	thus, a caller might want to unlock higher level resources
    785  *	(e.g. vm_map) before calling flush.
    786  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    787  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    788  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    789  *	that new pages are inserted on the tail end of the list.   thus,
    790  *	we can make a complete pass through the object in one go by starting
    791  *	at the head and working towards the tail (new pages are put in
    792  *	front of us).
    793  * => NOTE: we are allowed to lock the page queues, so the caller
    794  *	must not be holding the page queue lock.
    795  *
    796  * note on "cleaning" object and PG_BUSY pages:
    797  *	this routine is holding the lock on the object.   the only time
    798  *	that it can run into a PG_BUSY page that it does not own is if
    799  *	some other process has started I/O on the page (e.g. either
    800  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    801  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    802  *	had a chance to modify it yet.    if the PG_BUSY page is being
    803  *	paged out then it means that someone else has already started
    804  *	cleaning the page for us (how nice!).    in this case, if we
    805  *	have syncio specified, then after we make our pass through the
    806  *	object we need to wait for the other PG_BUSY pages to clear
    807  *	off (i.e. we need to do an iosync).   also note that once a
    808  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    809  *
    810  * note on page traversal:
    811  *	we can traverse the pages in an object either by going down the
    812  *	linked list in "uobj->memq", or we can go over the address range
    813  *	by page doing hash table lookups for each address.    depending
    814  *	on how many pages are in the object it may be cheaper to do one
    815  *	or the other.   we set "by_list" to true if we are using memq.
    816  *	if the cost of a hash lookup was equal to the cost of the list
    817  *	traversal we could compare the number of pages in the start->stop
    818  *	range to the total number of pages in the object.   however, it
    819  *	seems that a hash table lookup is more expensive than the linked
    820  *	list traversal, so we multiply the number of pages in the
    821  *	range by an estimate of the relatively higher cost of the hash lookup.
    822  */
    823 
    824 int
    825 genfs_putpages(void *v)
    826 {
    827 	struct vop_putpages_args /* {
    828 		struct vnode *a_vp;
    829 		voff_t a_offlo;
    830 		voff_t a_offhi;
    831 		int a_flags;
    832 	} */ * const ap = v;
    833 
    834 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    835 	    ap->a_flags, NULL);
    836 }
    837 
    838 int
    839 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    840     int origflags, struct vm_page **busypg)
    841 {
    842 	struct uvm_object * const uobj = &vp->v_uobj;
    843 	kmutex_t * const slock = uobj->vmobjlock;
    844 	off_t off;
    845 	int i, error, npages, nback;
    846 	int freeflag;
    847 	/*
    848 	 * This array is larger than it should so that it's size is constant.
    849 	 * The right size is MAXPAGES.
    850 	 */
    851 	struct vm_page *pgs[MAXPHYS / MIN_PAGE_SIZE];
    852 #define MAXPAGES (MAXPHYS / PAGE_SIZE)
    853 	struct vm_page *pg, *nextpg, *tpg, curmp, endmp;
    854 	bool wasclean, by_list, needs_clean, yld;
    855 	bool async = (origflags & PGO_SYNCIO) == 0;
    856 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    857 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    858 	struct genfs_node * const gp = VTOG(vp);
    859 	struct mount *trans_mp;
    860 	int flags;
    861 	int dirtygen;
    862 	bool modified;
    863 	bool holds_wapbl;
    864 	bool cleanall;
    865 	bool onworklst;
    866 
    867 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    868 
    869 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    870 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    871 	KASSERT(startoff < endoff || endoff == 0);
    872 
    873 	UVMHIST_LOG(ubchist, "vp %#jx pages %jd off 0x%jx len 0x%jx",
    874 	    (uintptr_t)vp, uobj->uo_npages, startoff, endoff - startoff);
    875 
    876 #ifdef DIAGNOSTIC
    877 	if ((origflags & PGO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
    878                 WAPBL_JLOCK_ASSERT(vp->v_mount);
    879 #endif
    880 
    881 	trans_mp = NULL;
    882 	holds_wapbl = false;
    883 
    884 retry:
    885 	modified = false;
    886 	flags = origflags;
    887 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    888 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    889 	if (uobj->uo_npages == 0) {
    890 		if (vp->v_iflag & VI_ONWORKLST) {
    891 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    892 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    893 				vn_syncer_remove_from_worklist(vp);
    894 		}
    895 		if (trans_mp) {
    896 			if (holds_wapbl)
    897 				WAPBL_END(trans_mp);
    898 			fstrans_done(trans_mp);
    899 		}
    900 		mutex_exit(slock);
    901 		return (0);
    902 	}
    903 
    904 	/*
    905 	 * the vnode has pages, set up to process the request.
    906 	 */
    907 
    908 	if (trans_mp == NULL && (flags & PGO_CLEANIT) != 0) {
    909 		if (pagedaemon) {
    910 			/* Pagedaemon must not sleep here. */
    911 			trans_mp = vp->v_mount;
    912 			error = fstrans_start_nowait(trans_mp);
    913 			if (error) {
    914 				mutex_exit(slock);
    915 				return error;
    916 			}
    917 		} else {
    918 			/*
    919 			 * Cannot use vdeadcheck() here as this operation
    920 			 * usually gets used from VOP_RECLAIM().  Test for
    921 			 * change of v_mount instead and retry on change.
    922 			 */
    923 			mutex_exit(slock);
    924 			trans_mp = vp->v_mount;
    925 			fstrans_start(trans_mp);
    926 			if (vp->v_mount != trans_mp) {
    927 				fstrans_done(trans_mp);
    928 				trans_mp = NULL;
    929 			} else {
    930 				holds_wapbl = (trans_mp->mnt_wapbl &&
    931 				    (origflags & PGO_JOURNALLOCKED) == 0);
    932 				if (holds_wapbl) {
    933 					error = WAPBL_BEGIN(trans_mp);
    934 					if (error) {
    935 						fstrans_done(trans_mp);
    936 						return error;
    937 					}
    938 				}
    939 			}
    940 			mutex_enter(slock);
    941 			goto retry;
    942 		}
    943 	}
    944 
    945 	error = 0;
    946 	wasclean = (vp->v_numoutput == 0);
    947 	off = startoff;
    948 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    949 		endoff = trunc_page(LLONG_MAX);
    950 	}
    951 	by_list = (uobj->uo_npages <=
    952 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    953 
    954 	/*
    955 	 * if this vnode is known not to have dirty pages,
    956 	 * don't bother to clean it out.
    957 	 */
    958 
    959 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    960 #if !defined(DEBUG)
    961 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
    962 			goto skip_scan;
    963 		}
    964 #endif /* !defined(DEBUG) */
    965 		flags &= ~PGO_CLEANIT;
    966 	}
    967 
    968 	/*
    969 	 * start the loop.  when scanning by list, hold the last page
    970 	 * in the list before we start.  pages allocated after we start
    971 	 * will be added to the end of the list, so we can stop at the
    972 	 * current last page.
    973 	 */
    974 
    975 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
    976 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
    977 	    (vp->v_iflag & VI_ONWORKLST) != 0;
    978 	dirtygen = gp->g_dirtygen;
    979 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
    980 	if (by_list) {
    981 		curmp.flags = PG_MARKER;
    982 		endmp.flags = PG_MARKER;
    983 		pg = TAILQ_FIRST(&uobj->memq);
    984 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    985 	} else {
    986 		pg = uvm_pagelookup(uobj, off);
    987 	}
    988 	nextpg = NULL;
    989 	while (by_list || off < endoff) {
    990 
    991 		/*
    992 		 * if the current page is not interesting, move on to the next.
    993 		 */
    994 
    995 		KASSERT(pg == NULL || (pg->flags & PG_MARKER) != 0 ||
    996 		    pg->uobject == uobj);
    997 		KASSERT(pg == NULL ||
    998 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
    999 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
   1000 		if (by_list) {
   1001 			if (pg == &endmp) {
   1002 				break;
   1003 			}
   1004 			if (pg->flags & PG_MARKER) {
   1005 				pg = TAILQ_NEXT(pg, listq.queue);
   1006 				continue;
   1007 			}
   1008 			if (pg->offset < startoff || pg->offset >= endoff ||
   1009 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1010 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1011 					wasclean = false;
   1012 				}
   1013 				pg = TAILQ_NEXT(pg, listq.queue);
   1014 				continue;
   1015 			}
   1016 			off = pg->offset;
   1017 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1018 			if (pg != NULL) {
   1019 				wasclean = false;
   1020 			}
   1021 			off += PAGE_SIZE;
   1022 			if (off < endoff) {
   1023 				pg = uvm_pagelookup(uobj, off);
   1024 			}
   1025 			continue;
   1026 		}
   1027 
   1028 		/*
   1029 		 * if the current page needs to be cleaned and it's busy,
   1030 		 * wait for it to become unbusy.
   1031 		 */
   1032 
   1033 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1034 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1035 		if (pg->flags & PG_BUSY || yld) {
   1036 			UVMHIST_LOG(ubchist, "busy %#jx", (uintptr_t)pg,
   1037 			   0, 0, 0);
   1038 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1039 				UVMHIST_LOG(ubchist, "busyfail %#jx",
   1040 				    (uintptr_t)pg, 0, 0, 0);
   1041 				error = EDEADLK;
   1042 				if (busypg != NULL)
   1043 					*busypg = pg;
   1044 				break;
   1045 			}
   1046 			if (pagedaemon) {
   1047 				/*
   1048 				 * someone has taken the page while we
   1049 				 * dropped the lock for fstrans_start.
   1050 				 */
   1051 				break;
   1052 			}
   1053 			if (by_list) {
   1054 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1055 				UVMHIST_LOG(ubchist, "curmp next %#jx",
   1056 				    (uintptr_t)TAILQ_NEXT(&curmp, listq.queue),
   1057 				    0, 0, 0);
   1058 			}
   1059 			if (yld) {
   1060 				mutex_exit(slock);
   1061 				preempt();
   1062 				mutex_enter(slock);
   1063 			} else {
   1064 				pg->flags |= PG_WANTED;
   1065 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1066 				mutex_enter(slock);
   1067 			}
   1068 			if (by_list) {
   1069 				UVMHIST_LOG(ubchist, "after next %#jx",
   1070 				    (uintptr_t)TAILQ_NEXT(&curmp, listq.queue),
   1071 				    0, 0, 0);
   1072 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1073 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1074 			} else {
   1075 				pg = uvm_pagelookup(uobj, off);
   1076 			}
   1077 			continue;
   1078 		}
   1079 
   1080 		/*
   1081 		 * if we're freeing, remove all mappings of the page now.
   1082 		 * if we're cleaning, check if the page is needs to be cleaned.
   1083 		 */
   1084 
   1085 		if (flags & PGO_FREE) {
   1086 			pmap_page_protect(pg, VM_PROT_NONE);
   1087 		} else if (flags & PGO_CLEANIT) {
   1088 
   1089 			/*
   1090 			 * if we still have some hope to pull this vnode off
   1091 			 * from the syncer queue, write-protect the page.
   1092 			 */
   1093 
   1094 			if (cleanall && wasclean &&
   1095 			    gp->g_dirtygen == dirtygen) {
   1096 
   1097 				/*
   1098 				 * uobj pages get wired only by uvm_fault
   1099 				 * where uobj is locked.
   1100 				 */
   1101 
   1102 				if (pg->wire_count == 0) {
   1103 					pmap_page_protect(pg,
   1104 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1105 				} else {
   1106 					cleanall = false;
   1107 				}
   1108 			}
   1109 		}
   1110 
   1111 		if (flags & PGO_CLEANIT) {
   1112 			needs_clean = pmap_clear_modify(pg) ||
   1113 			    (pg->flags & PG_CLEAN) == 0;
   1114 			pg->flags |= PG_CLEAN;
   1115 		} else {
   1116 			needs_clean = false;
   1117 		}
   1118 
   1119 		/*
   1120 		 * if we're cleaning, build a cluster.
   1121 		 * the cluster will consist of pages which are currently dirty,
   1122 		 * but they will be returned to us marked clean.
   1123 		 * if not cleaning, just operate on the one page.
   1124 		 */
   1125 
   1126 		if (needs_clean) {
   1127 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1128 			wasclean = false;
   1129 			memset(pgs, 0, sizeof(pgs));
   1130 			pg->flags |= PG_BUSY;
   1131 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1132 
   1133 			/*
   1134 			 * let the fs constrain the offset range of the cluster.
   1135 			 * we additionally constrain the range here such that
   1136 			 * it fits in the "pgs" pages array.
   1137 			 */
   1138 
   1139 			off_t fslo, fshi, genlo, lo;
   1140 			GOP_PUTRANGE(vp, off, &fslo, &fshi);
   1141 			KASSERT(fslo == trunc_page(fslo));
   1142 			KASSERT(fslo <= off);
   1143 			KASSERT(fshi == trunc_page(fshi));
   1144 			KASSERT(fshi == 0 || off < fshi);
   1145 
   1146 			if (off > MAXPHYS / 2)
   1147 				genlo = trunc_page(off - (MAXPHYS / 2));
   1148 			else
   1149 				genlo = 0;
   1150 			lo = MAX(fslo, genlo);
   1151 
   1152 			/*
   1153 			 * first look backward.
   1154 			 */
   1155 
   1156 			npages = (off - lo) >> PAGE_SHIFT;
   1157 			nback = npages;
   1158 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1159 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1160 			if (nback) {
   1161 				memmove(&pgs[0], &pgs[npages - nback],
   1162 				    nback * sizeof(pgs[0]));
   1163 				if (npages - nback < nback)
   1164 					memset(&pgs[nback], 0,
   1165 					    (npages - nback) * sizeof(pgs[0]));
   1166 				else
   1167 					memset(&pgs[npages - nback], 0,
   1168 					    nback * sizeof(pgs[0]));
   1169 			}
   1170 
   1171 			/*
   1172 			 * then plug in our page of interest.
   1173 			 */
   1174 
   1175 			pgs[nback] = pg;
   1176 
   1177 			/*
   1178 			 * then look forward to fill in the remaining space in
   1179 			 * the array of pages.
   1180 			 */
   1181 
   1182 			npages = MAXPAGES - nback - 1;
   1183 			if (fshi)
   1184 				npages = MIN(npages,
   1185 					     (fshi - off - 1) >> PAGE_SHIFT);
   1186 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1187 			    &pgs[nback + 1],
   1188 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1189 			npages += nback + 1;
   1190 		} else {
   1191 			pgs[0] = pg;
   1192 			npages = 1;
   1193 			nback = 0;
   1194 		}
   1195 
   1196 		/*
   1197 		 * apply FREE or DEACTIVATE options if requested.
   1198 		 */
   1199 
   1200 		for (i = 0; i < npages; i++) {
   1201 			tpg = pgs[i];
   1202 			KASSERT(tpg->uobject == uobj);
   1203 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1204 				pg = tpg;
   1205 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1206 				continue;
   1207 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1208 				uvm_pagedeactivate(tpg);
   1209 			} else if (flags & PGO_FREE) {
   1210 				pmap_page_protect(tpg, VM_PROT_NONE);
   1211 				if (tpg->flags & PG_BUSY) {
   1212 					tpg->flags |= freeflag;
   1213 					if (pagedaemon) {
   1214 						uvm_pageout_start(1);
   1215 						uvm_pagedequeue(tpg);
   1216 					}
   1217 				} else {
   1218 
   1219 					/*
   1220 					 * ``page is not busy''
   1221 					 * implies that npages is 1
   1222 					 * and needs_clean is false.
   1223 					 */
   1224 
   1225 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1226 					uvm_pagefree(tpg);
   1227 					if (pagedaemon)
   1228 						uvmexp.pdfreed++;
   1229 				}
   1230 			}
   1231 		}
   1232 		if (needs_clean) {
   1233 			modified = true;
   1234 
   1235 			/*
   1236 			 * start the i/o.  if we're traversing by list,
   1237 			 * keep our place in the list with a marker page.
   1238 			 */
   1239 
   1240 			if (by_list) {
   1241 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1242 				    listq.queue);
   1243 			}
   1244 			mutex_exit(slock);
   1245 			error = GOP_WRITE(vp, pgs, npages, flags);
   1246 			mutex_enter(slock);
   1247 			if (by_list) {
   1248 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1249 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1250 			}
   1251 			if (error) {
   1252 				break;
   1253 			}
   1254 			if (by_list) {
   1255 				continue;
   1256 			}
   1257 		}
   1258 
   1259 		/*
   1260 		 * find the next page and continue if there was no error.
   1261 		 */
   1262 
   1263 		if (by_list) {
   1264 			if (nextpg) {
   1265 				pg = nextpg;
   1266 				nextpg = NULL;
   1267 			} else {
   1268 				pg = TAILQ_NEXT(pg, listq.queue);
   1269 			}
   1270 		} else {
   1271 			off += (npages - nback) << PAGE_SHIFT;
   1272 			if (off < endoff) {
   1273 				pg = uvm_pagelookup(uobj, off);
   1274 			}
   1275 		}
   1276 	}
   1277 	if (by_list) {
   1278 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1279 	}
   1280 
   1281 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1282 	    (vp->v_type != VBLK ||
   1283 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1284 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1285 	}
   1286 
   1287 	/*
   1288 	 * if we're cleaning and there was nothing to clean,
   1289 	 * take us off the syncer list.  if we started any i/o
   1290 	 * and we're doing sync i/o, wait for all writes to finish.
   1291 	 */
   1292 
   1293 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1294 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1295 #if defined(DEBUG)
   1296 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1297 			if ((pg->flags & (PG_FAKE | PG_MARKER)) != 0) {
   1298 				continue;
   1299 			}
   1300 			if ((pg->flags & PG_CLEAN) == 0) {
   1301 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1302 			}
   1303 			if (pmap_is_modified(pg)) {
   1304 				printf("%s: %p: modified\n", __func__, pg);
   1305 			}
   1306 		}
   1307 #endif /* defined(DEBUG) */
   1308 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1309 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1310 			vn_syncer_remove_from_worklist(vp);
   1311 	}
   1312 
   1313 #if !defined(DEBUG)
   1314 skip_scan:
   1315 #endif /* !defined(DEBUG) */
   1316 
   1317 	/* Wait for output to complete. */
   1318 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1319 		while (vp->v_numoutput != 0)
   1320 			cv_wait(&vp->v_cv, slock);
   1321 	}
   1322 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1323 	mutex_exit(slock);
   1324 
   1325 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1326 		/*
   1327 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1328 		 * retrying is not a big deal because, in many cases,
   1329 		 * uobj->uo_npages is already 0 here.
   1330 		 */
   1331 		mutex_enter(slock);
   1332 		goto retry;
   1333 	}
   1334 
   1335 	if (trans_mp) {
   1336 		if (holds_wapbl)
   1337 			WAPBL_END(trans_mp);
   1338 		fstrans_done(trans_mp);
   1339 	}
   1340 
   1341 	return (error);
   1342 }
   1343 
   1344 /*
   1345  * Default putrange method for file systems that do not care
   1346  * how many pages are given to one GOP_WRITE() call.
   1347  */
   1348 void
   1349 genfs_gop_putrange(struct vnode *vp, off_t off, off_t *lop, off_t *hip)
   1350 {
   1351 
   1352 	*lop = 0;
   1353 	*hip = 0;
   1354 }
   1355 
   1356 int
   1357 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1358 {
   1359 	off_t off;
   1360 	vaddr_t kva;
   1361 	size_t len;
   1362 	int error;
   1363 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1364 
   1365 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1366 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1367 
   1368 	off = pgs[0]->offset;
   1369 	kva = uvm_pagermapin(pgs, npages,
   1370 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1371 	len = npages << PAGE_SHIFT;
   1372 
   1373 	KASSERT(uvm.aiodone_queue != NULL);
   1374 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1375 			    uvm_aio_biodone);
   1376 
   1377 	return error;
   1378 }
   1379 
   1380 int
   1381 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1382 {
   1383 	off_t off;
   1384 	vaddr_t kva;
   1385 	size_t len;
   1386 	int error;
   1387 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1388 
   1389 	UVMHIST_LOG(ubchist, "vp %#jx pgs %#jx npages %jd flags 0x%jx",
   1390 	    (uintptr_t)vp, (uintptr_t)pgs, npages, flags);
   1391 
   1392 	off = pgs[0]->offset;
   1393 	kva = uvm_pagermapin(pgs, npages,
   1394 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1395 	len = npages << PAGE_SHIFT;
   1396 
   1397 	KASSERT(uvm.aiodone_queue != NULL);
   1398 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1399 			    uvm_aio_biodone);
   1400 
   1401 	return error;
   1402 }
   1403 
   1404 /*
   1405  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1406  * and mapped into kernel memory.  Here we just look up the underlying
   1407  * device block addresses and call the strategy routine.
   1408  */
   1409 
   1410 static int
   1411 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1412     enum uio_rw rw, void (*iodone)(struct buf *))
   1413 {
   1414 	int s, error;
   1415 	int fs_bshift, dev_bshift;
   1416 	off_t eof, offset, startoffset;
   1417 	size_t bytes, iobytes, skipbytes;
   1418 	struct buf *mbp, *bp;
   1419 	const bool async = (flags & PGO_SYNCIO) == 0;
   1420 	const bool lazy = (flags & PGO_LAZY) == 0;
   1421 	const bool iowrite = rw == UIO_WRITE;
   1422 	const int brw = iowrite ? B_WRITE : B_READ;
   1423 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1424 
   1425 	UVMHIST_LOG(ubchist, "vp %#jx kva %#jx len 0x%jx flags 0x%jx",
   1426 	    (uintptr_t)vp, (uintptr_t)kva, len, flags);
   1427 
   1428 	KASSERT(vp->v_size <= vp->v_writesize);
   1429 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1430 	if (vp->v_type != VBLK) {
   1431 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1432 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1433 	} else {
   1434 		fs_bshift = DEV_BSHIFT;
   1435 		dev_bshift = DEV_BSHIFT;
   1436 	}
   1437 	error = 0;
   1438 	startoffset = off;
   1439 	bytes = MIN(len, eof - startoffset);
   1440 	skipbytes = 0;
   1441 	KASSERT(bytes != 0);
   1442 
   1443 	if (iowrite) {
   1444 		mutex_enter(vp->v_interlock);
   1445 		vp->v_numoutput += 2;
   1446 		mutex_exit(vp->v_interlock);
   1447 	}
   1448 	mbp = getiobuf(vp, true);
   1449 	UVMHIST_LOG(ubchist, "vp %#jx mbp %#jx num now %jd bytes 0x%jx",
   1450 	    (uintptr_t)vp, (uintptr_t)mbp, vp->v_numoutput, bytes);
   1451 	mbp->b_bufsize = len;
   1452 	mbp->b_data = (void *)kva;
   1453 	mbp->b_resid = mbp->b_bcount = bytes;
   1454 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1455 	if (async) {
   1456 		mbp->b_flags = brw | B_ASYNC;
   1457 		mbp->b_iodone = iodone;
   1458 	} else {
   1459 		mbp->b_flags = brw;
   1460 		mbp->b_iodone = NULL;
   1461 	}
   1462 	if (curlwp == uvm.pagedaemon_lwp)
   1463 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1464 	else if (async || lazy)
   1465 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1466 	else
   1467 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1468 
   1469 	bp = NULL;
   1470 	for (offset = startoffset;
   1471 	    bytes > 0;
   1472 	    offset += iobytes, bytes -= iobytes) {
   1473 		int run;
   1474 		daddr_t lbn, blkno;
   1475 		struct vnode *devvp;
   1476 
   1477 		/*
   1478 		 * bmap the file to find out the blkno to read from and
   1479 		 * how much we can read in one i/o.  if bmap returns an error,
   1480 		 * skip the rest of the top-level i/o.
   1481 		 */
   1482 
   1483 		lbn = offset >> fs_bshift;
   1484 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1485 		if (error) {
   1486 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%jx -> %jd\n",
   1487 			    lbn, error, 0, 0);
   1488 			skipbytes += bytes;
   1489 			bytes = 0;
   1490 			goto loopdone;
   1491 		}
   1492 
   1493 		/*
   1494 		 * see how many pages can be read with this i/o.
   1495 		 * reduce the i/o size if necessary to avoid
   1496 		 * overwriting pages with valid data.
   1497 		 */
   1498 
   1499 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1500 		    bytes);
   1501 
   1502 		/*
   1503 		 * if this block isn't allocated, zero it instead of
   1504 		 * reading it.  unless we are going to allocate blocks,
   1505 		 * mark the pages we zeroed PG_RDONLY.
   1506 		 */
   1507 
   1508 		if (blkno == (daddr_t)-1) {
   1509 			if (!iowrite) {
   1510 				memset((char *)kva + (offset - startoffset), 0,
   1511 				    iobytes);
   1512 			}
   1513 			skipbytes += iobytes;
   1514 			continue;
   1515 		}
   1516 
   1517 		/*
   1518 		 * allocate a sub-buf for this piece of the i/o
   1519 		 * (or just use mbp if there's only 1 piece),
   1520 		 * and start it going.
   1521 		 */
   1522 
   1523 		if (offset == startoffset && iobytes == bytes) {
   1524 			bp = mbp;
   1525 		} else {
   1526 			UVMHIST_LOG(ubchist, "vp %#jx bp %#jx num now %jd",
   1527 			    (uintptr_t)vp, (uintptr_t)bp, vp->v_numoutput, 0);
   1528 			bp = getiobuf(vp, true);
   1529 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1530 		}
   1531 		bp->b_lblkno = 0;
   1532 
   1533 		/* adjust physical blkno for partial blocks */
   1534 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1535 		    dev_bshift);
   1536 
   1537 		UVMHIST_LOG(ubchist,
   1538 		    "bp %#jx offset 0x%jx bcount 0x%jx blkno 0x%jx",
   1539 		    (uintptr_t)bp, offset, bp->b_bcount, bp->b_blkno);
   1540 
   1541 		VOP_STRATEGY(devvp, bp);
   1542 	}
   1543 
   1544 loopdone:
   1545 	if (skipbytes) {
   1546 		UVMHIST_LOG(ubchist, "skipbytes %jd", skipbytes, 0,0,0);
   1547 	}
   1548 	nestiobuf_done(mbp, skipbytes, error);
   1549 	if (async) {
   1550 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1551 		return (0);
   1552 	}
   1553 	UVMHIST_LOG(ubchist, "waiting for mbp %#jx", (uintptr_t)mbp, 0, 0, 0);
   1554 	error = biowait(mbp);
   1555 	s = splbio();
   1556 	(*iodone)(mbp);
   1557 	splx(s);
   1558 	UVMHIST_LOG(ubchist, "returning, error %jd", error, 0, 0, 0);
   1559 	return (error);
   1560 }
   1561 
   1562 int
   1563 genfs_compat_getpages(void *v)
   1564 {
   1565 	struct vop_getpages_args /* {
   1566 		struct vnode *a_vp;
   1567 		voff_t a_offset;
   1568 		struct vm_page **a_m;
   1569 		int *a_count;
   1570 		int a_centeridx;
   1571 		vm_prot_t a_access_type;
   1572 		int a_advice;
   1573 		int a_flags;
   1574 	} */ *ap = v;
   1575 
   1576 	off_t origoffset;
   1577 	struct vnode *vp = ap->a_vp;
   1578 	struct uvm_object *uobj = &vp->v_uobj;
   1579 	struct vm_page *pg, **pgs;
   1580 	vaddr_t kva;
   1581 	int i, error, orignpages, npages;
   1582 	struct iovec iov;
   1583 	struct uio uio;
   1584 	kauth_cred_t cred = curlwp->l_cred;
   1585 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1586 
   1587 	error = 0;
   1588 	origoffset = ap->a_offset;
   1589 	orignpages = *ap->a_count;
   1590 	pgs = ap->a_m;
   1591 
   1592 	if (ap->a_flags & PGO_LOCKED) {
   1593 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1594 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1595 
   1596 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1597 		if (error == 0 && memwrite) {
   1598 			genfs_markdirty(vp);
   1599 		}
   1600 		return error;
   1601 	}
   1602 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1603 		mutex_exit(uobj->vmobjlock);
   1604 		return EINVAL;
   1605 	}
   1606 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1607 		mutex_exit(uobj->vmobjlock);
   1608 		return 0;
   1609 	}
   1610 	npages = orignpages;
   1611 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1612 	mutex_exit(uobj->vmobjlock);
   1613 	kva = uvm_pagermapin(pgs, npages,
   1614 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1615 	for (i = 0; i < npages; i++) {
   1616 		pg = pgs[i];
   1617 		if ((pg->flags & PG_FAKE) == 0) {
   1618 			continue;
   1619 		}
   1620 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1621 		iov.iov_len = PAGE_SIZE;
   1622 		uio.uio_iov = &iov;
   1623 		uio.uio_iovcnt = 1;
   1624 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1625 		uio.uio_rw = UIO_READ;
   1626 		uio.uio_resid = PAGE_SIZE;
   1627 		UIO_SETUP_SYSSPACE(&uio);
   1628 		/* XXX vn_lock */
   1629 		error = VOP_READ(vp, &uio, 0, cred);
   1630 		if (error) {
   1631 			break;
   1632 		}
   1633 		if (uio.uio_resid) {
   1634 			memset(iov.iov_base, 0, uio.uio_resid);
   1635 		}
   1636 	}
   1637 	uvm_pagermapout(kva, npages);
   1638 	mutex_enter(uobj->vmobjlock);
   1639 	for (i = 0; i < npages; i++) {
   1640 		pg = pgs[i];
   1641 		if (error && (pg->flags & PG_FAKE) != 0) {
   1642 			pg->flags |= PG_RELEASED;
   1643 		} else {
   1644 			pmap_clear_modify(pg);
   1645 			uvm_pageactivate(pg);
   1646 		}
   1647 	}
   1648 	if (error) {
   1649 		uvm_page_unbusy(pgs, npages);
   1650 	}
   1651 	if (error == 0 && memwrite) {
   1652 		genfs_markdirty(vp);
   1653 	}
   1654 	mutex_exit(uobj->vmobjlock);
   1655 	return error;
   1656 }
   1657 
   1658 int
   1659 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1660     int flags)
   1661 {
   1662 	off_t offset;
   1663 	struct iovec iov;
   1664 	struct uio uio;
   1665 	kauth_cred_t cred = curlwp->l_cred;
   1666 	struct buf *bp;
   1667 	vaddr_t kva;
   1668 	int error;
   1669 
   1670 	offset = pgs[0]->offset;
   1671 	kva = uvm_pagermapin(pgs, npages,
   1672 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1673 
   1674 	iov.iov_base = (void *)kva;
   1675 	iov.iov_len = npages << PAGE_SHIFT;
   1676 	uio.uio_iov = &iov;
   1677 	uio.uio_iovcnt = 1;
   1678 	uio.uio_offset = offset;
   1679 	uio.uio_rw = UIO_WRITE;
   1680 	uio.uio_resid = npages << PAGE_SHIFT;
   1681 	UIO_SETUP_SYSSPACE(&uio);
   1682 	/* XXX vn_lock */
   1683 	error = VOP_WRITE(vp, &uio, 0, cred);
   1684 
   1685 	mutex_enter(vp->v_interlock);
   1686 	vp->v_numoutput++;
   1687 	mutex_exit(vp->v_interlock);
   1688 
   1689 	bp = getiobuf(vp, true);
   1690 	bp->b_cflags = BC_BUSY | BC_AGE;
   1691 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1692 	bp->b_data = (char *)kva;
   1693 	bp->b_bcount = npages << PAGE_SHIFT;
   1694 	bp->b_bufsize = npages << PAGE_SHIFT;
   1695 	bp->b_resid = 0;
   1696 	bp->b_error = error;
   1697 	uvm_aio_aiodone(bp);
   1698 	return (error);
   1699 }
   1700 
   1701 /*
   1702  * Process a uio using direct I/O.  If we reach a part of the request
   1703  * which cannot be processed in this fashion for some reason, just return.
   1704  * The caller must handle some additional part of the request using
   1705  * buffered I/O before trying direct I/O again.
   1706  */
   1707 
   1708 void
   1709 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1710 {
   1711 	struct vmspace *vs;
   1712 	struct iovec *iov;
   1713 	vaddr_t va;
   1714 	size_t len;
   1715 	const int mask = DEV_BSIZE - 1;
   1716 	int error;
   1717 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1718 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1719 
   1720 #ifdef DIAGNOSTIC
   1721 	if ((ioflag & IO_JOURNALLOCKED) && vp->v_mount->mnt_wapbl)
   1722                 WAPBL_JLOCK_ASSERT(vp->v_mount);
   1723 #endif
   1724 
   1725 	/*
   1726 	 * We only support direct I/O to user space for now.
   1727 	 */
   1728 
   1729 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1730 		return;
   1731 	}
   1732 
   1733 	/*
   1734 	 * If the vnode is mapped, we would need to get the getpages lock
   1735 	 * to stabilize the bmap, but then we would get into trouble while
   1736 	 * locking the pages if the pages belong to this same vnode (or a
   1737 	 * multi-vnode cascade to the same effect).  Just fall back to
   1738 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1739 	 */
   1740 
   1741 	if (vp->v_vflag & VV_MAPPED) {
   1742 		return;
   1743 	}
   1744 
   1745 	if (need_wapbl) {
   1746 		error = WAPBL_BEGIN(vp->v_mount);
   1747 		if (error)
   1748 			return;
   1749 	}
   1750 
   1751 	/*
   1752 	 * Do as much of the uio as possible with direct I/O.
   1753 	 */
   1754 
   1755 	vs = uio->uio_vmspace;
   1756 	while (uio->uio_resid) {
   1757 		iov = uio->uio_iov;
   1758 		if (iov->iov_len == 0) {
   1759 			uio->uio_iov++;
   1760 			uio->uio_iovcnt--;
   1761 			continue;
   1762 		}
   1763 		va = (vaddr_t)iov->iov_base;
   1764 		len = MIN(iov->iov_len, genfs_maxdio);
   1765 		len &= ~mask;
   1766 
   1767 		/*
   1768 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1769 		 * the current EOF, then fall back to buffered I/O.
   1770 		 */
   1771 
   1772 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1773 			break;
   1774 		}
   1775 
   1776 		/*
   1777 		 * Check alignment.  The file offset must be at least
   1778 		 * sector-aligned.  The exact constraint on memory alignment
   1779 		 * is very hardware-dependent, but requiring sector-aligned
   1780 		 * addresses there too is safe.
   1781 		 */
   1782 
   1783 		if (uio->uio_offset & mask || va & mask) {
   1784 			break;
   1785 		}
   1786 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1787 					  uio->uio_rw);
   1788 		if (error) {
   1789 			break;
   1790 		}
   1791 		iov->iov_base = (char *)iov->iov_base + len;
   1792 		iov->iov_len -= len;
   1793 		uio->uio_offset += len;
   1794 		uio->uio_resid -= len;
   1795 	}
   1796 
   1797 	if (need_wapbl)
   1798 		WAPBL_END(vp->v_mount);
   1799 }
   1800 
   1801 /*
   1802  * Iodone routine for direct I/O.  We don't do much here since the request is
   1803  * always synchronous, so the caller will do most of the work after biowait().
   1804  */
   1805 
   1806 static void
   1807 genfs_dio_iodone(struct buf *bp)
   1808 {
   1809 
   1810 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1811 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1812 		mutex_enter(bp->b_objlock);
   1813 		vwakeup(bp);
   1814 		mutex_exit(bp->b_objlock);
   1815 	}
   1816 	putiobuf(bp);
   1817 }
   1818 
   1819 /*
   1820  * Process one chunk of a direct I/O request.
   1821  */
   1822 
   1823 static int
   1824 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1825     off_t off, enum uio_rw rw)
   1826 {
   1827 	struct vm_map *map;
   1828 	struct pmap *upm, *kpm __unused;
   1829 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1830 	off_t spoff, epoff;
   1831 	vaddr_t kva, puva;
   1832 	paddr_t pa;
   1833 	vm_prot_t prot;
   1834 	int error, rv __diagused, poff, koff;
   1835 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1836 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1837 
   1838 	/*
   1839 	 * For writes, verify that this range of the file already has fully
   1840 	 * allocated backing store.  If there are any holes, just punt and
   1841 	 * make the caller take the buffered write path.
   1842 	 */
   1843 
   1844 	if (rw == UIO_WRITE) {
   1845 		daddr_t lbn, elbn, blkno;
   1846 		int bsize, bshift, run;
   1847 
   1848 		bshift = vp->v_mount->mnt_fs_bshift;
   1849 		bsize = 1 << bshift;
   1850 		lbn = off >> bshift;
   1851 		elbn = (off + len + bsize - 1) >> bshift;
   1852 		while (lbn < elbn) {
   1853 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1854 			if (error) {
   1855 				return error;
   1856 			}
   1857 			if (blkno == (daddr_t)-1) {
   1858 				return ENOSPC;
   1859 			}
   1860 			lbn += 1 + run;
   1861 		}
   1862 	}
   1863 
   1864 	/*
   1865 	 * Flush any cached pages for parts of the file that we're about to
   1866 	 * access.  If we're writing, invalidate pages as well.
   1867 	 */
   1868 
   1869 	spoff = trunc_page(off);
   1870 	epoff = round_page(off + len);
   1871 	mutex_enter(vp->v_interlock);
   1872 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1873 	if (error) {
   1874 		return error;
   1875 	}
   1876 
   1877 	/*
   1878 	 * Wire the user pages and remap them into kernel memory.
   1879 	 */
   1880 
   1881 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1882 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1883 	if (error) {
   1884 		return error;
   1885 	}
   1886 
   1887 	map = &vs->vm_map;
   1888 	upm = vm_map_pmap(map);
   1889 	kpm = vm_map_pmap(kernel_map);
   1890 	puva = trunc_page(uva);
   1891 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1892 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1893 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1894 		rv = pmap_extract(upm, puva + poff, &pa);
   1895 		KASSERT(rv);
   1896 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1897 	}
   1898 	pmap_update(kpm);
   1899 
   1900 	/*
   1901 	 * Do the I/O.
   1902 	 */
   1903 
   1904 	koff = uva - trunc_page(uva);
   1905 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1906 			    genfs_dio_iodone);
   1907 
   1908 	/*
   1909 	 * Tear down the kernel mapping.
   1910 	 */
   1911 
   1912 	pmap_kremove(kva, klen);
   1913 	pmap_update(kpm);
   1914 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1915 
   1916 	/*
   1917 	 * Unwire the user pages.
   1918 	 */
   1919 
   1920 	uvm_vsunlock(vs, (void *)uva, len);
   1921 	return error;
   1922 }
   1923