Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.47
      1 /*	$NetBSD: genfs_io.c,v 1.47 2011/04/18 15:53:04 rmind Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.47 2011/04/18 15:53:04 rmind Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/kernel.h>
     40 #include <sys/mount.h>
     41 #include <sys/namei.h>
     42 #include <sys/vnode.h>
     43 #include <sys/fcntl.h>
     44 #include <sys/kmem.h>
     45 #include <sys/poll.h>
     46 #include <sys/mman.h>
     47 #include <sys/file.h>
     48 #include <sys/kauth.h>
     49 #include <sys/fstrans.h>
     50 #include <sys/buf.h>
     51 
     52 #include <miscfs/genfs/genfs.h>
     53 #include <miscfs/genfs/genfs_node.h>
     54 #include <miscfs/specfs/specdev.h>
     55 #include <miscfs/syncfs/syncfs.h>
     56 
     57 #include <uvm/uvm.h>
     58 #include <uvm/uvm_pager.h>
     59 
     60 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     61     off_t, enum uio_rw);
     62 static void genfs_dio_iodone(struct buf *);
     63 
     64 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     65     void (*)(struct buf *));
     66 static void genfs_rel_pages(struct vm_page **, int);
     67 static void genfs_markdirty(struct vnode *);
     68 
     69 int genfs_maxdio = MAXPHYS;
     70 
     71 static void
     72 genfs_rel_pages(struct vm_page **pgs, int npages)
     73 {
     74 	int i;
     75 
     76 	for (i = 0; i < npages; i++) {
     77 		struct vm_page *pg = pgs[i];
     78 
     79 		if (pg == NULL || pg == PGO_DONTCARE)
     80 			continue;
     81 		if (pg->flags & PG_FAKE) {
     82 			pg->flags |= PG_RELEASED;
     83 		}
     84 	}
     85 	mutex_enter(&uvm_pageqlock);
     86 	uvm_page_unbusy(pgs, npages);
     87 	mutex_exit(&uvm_pageqlock);
     88 }
     89 
     90 static void
     91 genfs_markdirty(struct vnode *vp)
     92 {
     93 	struct genfs_node * const gp = VTOG(vp);
     94 
     95 	KASSERT(mutex_owned(&vp->v_interlock));
     96 	gp->g_dirtygen++;
     97 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     98 		vn_syncer_add_to_worklist(vp, filedelay);
     99 	}
    100 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    101 		vp->v_iflag |= VI_WRMAPDIRTY;
    102 	}
    103 }
    104 
    105 /*
    106  * generic VM getpages routine.
    107  * Return PG_BUSY pages for the given range,
    108  * reading from backing store if necessary.
    109  */
    110 
    111 int
    112 genfs_getpages(void *v)
    113 {
    114 	struct vop_getpages_args /* {
    115 		struct vnode *a_vp;
    116 		voff_t a_offset;
    117 		struct vm_page **a_m;
    118 		int *a_count;
    119 		int a_centeridx;
    120 		vm_prot_t a_access_type;
    121 		int a_advice;
    122 		int a_flags;
    123 	} */ * const ap = v;
    124 
    125 	off_t diskeof, memeof;
    126 	int i, error, npages;
    127 	const int flags = ap->a_flags;
    128 	struct vnode * const vp = ap->a_vp;
    129 	struct uvm_object * const uobj = &vp->v_uobj;
    130 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    131 	const bool async = (flags & PGO_SYNCIO) == 0;
    132 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    133 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    134 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    135 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    136 	const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
    137 	bool has_trans_wapbl = false;
    138 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    139 
    140 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    141 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    142 
    143 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    144 	    vp->v_type == VLNK || vp->v_type == VBLK);
    145 
    146 startover:
    147 	error = 0;
    148 	const voff_t origvsize = vp->v_size;
    149 	const off_t origoffset = ap->a_offset;
    150 	const int orignpages = *ap->a_count;
    151 
    152 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    153 	if (flags & PGO_PASTEOF) {
    154 		off_t newsize;
    155 #if defined(DIAGNOSTIC)
    156 		off_t writeeof;
    157 #endif /* defined(DIAGNOSTIC) */
    158 
    159 		newsize = MAX(origvsize,
    160 		    origoffset + (orignpages << PAGE_SHIFT));
    161 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    162 #if defined(DIAGNOSTIC)
    163 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    164 		if (newsize > round_page(writeeof)) {
    165 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    166 			    __func__, newsize, round_page(writeeof));
    167 		}
    168 #endif /* defined(DIAGNOSTIC) */
    169 	} else {
    170 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    171 	}
    172 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    173 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    174 	KASSERT(orignpages > 0);
    175 
    176 	/*
    177 	 * Bounds-check the request.
    178 	 */
    179 
    180 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    181 		if ((flags & PGO_LOCKED) == 0) {
    182 			mutex_exit(&uobj->vmobjlock);
    183 		}
    184 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    185 		    origoffset, *ap->a_count, memeof,0);
    186 		error = EINVAL;
    187 		goto out_err;
    188 	}
    189 
    190 	/* uobj is locked */
    191 
    192 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    193 	    (vp->v_type != VBLK ||
    194 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    195 		int updflags = 0;
    196 
    197 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    198 			updflags = GOP_UPDATE_ACCESSED;
    199 		}
    200 		if (memwrite) {
    201 			updflags |= GOP_UPDATE_MODIFIED;
    202 		}
    203 		if (updflags != 0) {
    204 			GOP_MARKUPDATE(vp, updflags);
    205 		}
    206 	}
    207 
    208 	/*
    209 	 * For PGO_LOCKED requests, just return whatever's in memory.
    210 	 */
    211 
    212 	if (flags & PGO_LOCKED) {
    213 		int nfound;
    214 		struct vm_page *pg;
    215 
    216 		KASSERT(!glocked);
    217 		npages = *ap->a_count;
    218 #if defined(DEBUG)
    219 		for (i = 0; i < npages; i++) {
    220 			pg = ap->a_m[i];
    221 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    222 		}
    223 #endif /* defined(DEBUG) */
    224 		nfound = uvn_findpages(uobj, origoffset, &npages,
    225 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    226 		KASSERT(npages == *ap->a_count);
    227 		if (nfound == 0) {
    228 			error = EBUSY;
    229 			goto out_err;
    230 		}
    231 		if (!genfs_node_rdtrylock(vp)) {
    232 			genfs_rel_pages(ap->a_m, npages);
    233 
    234 			/*
    235 			 * restore the array.
    236 			 */
    237 
    238 			for (i = 0; i < npages; i++) {
    239 				pg = ap->a_m[i];
    240 
    241 				if (pg != NULL && pg != PGO_DONTCARE) {
    242 					ap->a_m[i] = NULL;
    243 				}
    244 				KASSERT(ap->a_m[i] == NULL ||
    245 				    ap->a_m[i] == PGO_DONTCARE);
    246 			}
    247 		} else {
    248 			genfs_node_unlock(vp);
    249 		}
    250 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    251 		if (error == 0 && memwrite) {
    252 			genfs_markdirty(vp);
    253 		}
    254 		goto out_err;
    255 	}
    256 	mutex_exit(&uobj->vmobjlock);
    257 
    258 	/*
    259 	 * find the requested pages and make some simple checks.
    260 	 * leave space in the page array for a whole block.
    261 	 */
    262 
    263 	const int fs_bshift = (vp->v_type != VBLK) ?
    264 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    265 	const int dev_bshift = (vp->v_type != VBLK) ?
    266 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    267 	const int fs_bsize = 1 << fs_bshift;
    268 #define	blk_mask	(fs_bsize - 1)
    269 #define	trunc_blk(x)	((x) & ~blk_mask)
    270 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    271 
    272 	const int orignmempages = MIN(orignpages,
    273 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    274 	npages = orignmempages;
    275 	const off_t startoffset = trunc_blk(origoffset);
    276 	const off_t endoffset = MIN(
    277 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    278 	    round_page(memeof));
    279 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    280 
    281 	const int pgs_size = sizeof(struct vm_page *) *
    282 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    283 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    284 
    285 	if (pgs_size > sizeof(pgs_onstack)) {
    286 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    287 		if (pgs == NULL) {
    288 			pgs = pgs_onstack;
    289 			error = ENOMEM;
    290 			goto out_err;
    291 		}
    292 	} else {
    293 		pgs = pgs_onstack;
    294 		(void)memset(pgs, 0, pgs_size);
    295 	}
    296 
    297 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    298 	    ridx, npages, startoffset, endoffset);
    299 
    300 	if (!has_trans_wapbl) {
    301 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    302 		/*
    303 		 * XXX: This assumes that we come here only via
    304 		 * the mmio path
    305 		 */
    306 		if (need_wapbl) {
    307 			error = WAPBL_BEGIN(vp->v_mount);
    308 			if (error) {
    309 				fstrans_done(vp->v_mount);
    310 				goto out_err_free;
    311 			}
    312 		}
    313 		has_trans_wapbl = true;
    314 	}
    315 
    316 	/*
    317 	 * hold g_glock to prevent a race with truncate.
    318 	 *
    319 	 * check if our idea of v_size is still valid.
    320 	 */
    321 
    322 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    323 	if (!glocked) {
    324 		if (blockalloc) {
    325 			genfs_node_wrlock(vp);
    326 		} else {
    327 			genfs_node_rdlock(vp);
    328 		}
    329 	}
    330 	mutex_enter(&uobj->vmobjlock);
    331 	if (vp->v_size < origvsize) {
    332 		if (!glocked) {
    333 			genfs_node_unlock(vp);
    334 		}
    335 		if (pgs != pgs_onstack)
    336 			kmem_free(pgs, pgs_size);
    337 		goto startover;
    338 	}
    339 
    340 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    341 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    342 		if (!glocked) {
    343 			genfs_node_unlock(vp);
    344 		}
    345 		KASSERT(async != 0);
    346 		genfs_rel_pages(&pgs[ridx], orignmempages);
    347 		mutex_exit(&uobj->vmobjlock);
    348 		error = EBUSY;
    349 		goto out_err_free;
    350 	}
    351 
    352 	/*
    353 	 * if the pages are already resident, just return them.
    354 	 */
    355 
    356 	for (i = 0; i < npages; i++) {
    357 		struct vm_page *pg = pgs[ridx + i];
    358 
    359 		if ((pg->flags & PG_FAKE) ||
    360 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    361 			break;
    362 		}
    363 	}
    364 	if (i == npages) {
    365 		if (!glocked) {
    366 			genfs_node_unlock(vp);
    367 		}
    368 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    369 		npages += ridx;
    370 		goto out;
    371 	}
    372 
    373 	/*
    374 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    375 	 */
    376 
    377 	if (overwrite) {
    378 		if (!glocked) {
    379 			genfs_node_unlock(vp);
    380 		}
    381 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    382 
    383 		for (i = 0; i < npages; i++) {
    384 			struct vm_page *pg = pgs[ridx + i];
    385 
    386 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    387 		}
    388 		npages += ridx;
    389 		goto out;
    390 	}
    391 
    392 	/*
    393 	 * the page wasn't resident and we're not overwriting,
    394 	 * so we're going to have to do some i/o.
    395 	 * find any additional pages needed to cover the expanded range.
    396 	 */
    397 
    398 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    399 	if (startoffset != origoffset || npages != orignmempages) {
    400 		int npgs;
    401 
    402 		/*
    403 		 * we need to avoid deadlocks caused by locking
    404 		 * additional pages at lower offsets than pages we
    405 		 * already have locked.  unlock them all and start over.
    406 		 */
    407 
    408 		genfs_rel_pages(&pgs[ridx], orignmempages);
    409 		memset(pgs, 0, pgs_size);
    410 
    411 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    412 		    startoffset, endoffset, 0,0);
    413 		npgs = npages;
    414 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    415 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    416 			if (!glocked) {
    417 				genfs_node_unlock(vp);
    418 			}
    419 			KASSERT(async != 0);
    420 			genfs_rel_pages(pgs, npages);
    421 			mutex_exit(&uobj->vmobjlock);
    422 			error = EBUSY;
    423 			goto out_err_free;
    424 		}
    425 	}
    426 
    427 	mutex_exit(&uobj->vmobjlock);
    428 
    429     {
    430 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    431 	vaddr_t kva;
    432 	struct buf *bp, *mbp;
    433 	bool sawhole = false;
    434 
    435 	/*
    436 	 * read the desired page(s).
    437 	 */
    438 
    439 	totalbytes = npages << PAGE_SHIFT;
    440 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    441 	tailbytes = totalbytes - bytes;
    442 	skipbytes = 0;
    443 
    444 	kva = uvm_pagermapin(pgs, npages,
    445 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    446 
    447 	mbp = getiobuf(vp, true);
    448 	mbp->b_bufsize = totalbytes;
    449 	mbp->b_data = (void *)kva;
    450 	mbp->b_resid = mbp->b_bcount = bytes;
    451 	mbp->b_cflags = BC_BUSY;
    452 	if (async) {
    453 		mbp->b_flags = B_READ | B_ASYNC;
    454 		mbp->b_iodone = uvm_aio_biodone;
    455 	} else {
    456 		mbp->b_flags = B_READ;
    457 		mbp->b_iodone = NULL;
    458 	}
    459 	if (async)
    460 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    461 	else
    462 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    463 
    464 	/*
    465 	 * if EOF is in the middle of the range, zero the part past EOF.
    466 	 * skip over pages which are not PG_FAKE since in that case they have
    467 	 * valid data that we need to preserve.
    468 	 */
    469 
    470 	tailstart = bytes;
    471 	while (tailbytes > 0) {
    472 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    473 
    474 		KASSERT(len <= tailbytes);
    475 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    476 			memset((void *)(kva + tailstart), 0, len);
    477 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    478 			    kva, tailstart, len, 0);
    479 		}
    480 		tailstart += len;
    481 		tailbytes -= len;
    482 	}
    483 
    484 	/*
    485 	 * now loop over the pages, reading as needed.
    486 	 */
    487 
    488 	bp = NULL;
    489 	off_t offset;
    490 	for (offset = startoffset;
    491 	    bytes > 0;
    492 	    offset += iobytes, bytes -= iobytes) {
    493 		int run;
    494 		daddr_t lbn, blkno;
    495 		int pidx;
    496 		struct vnode *devvp;
    497 
    498 		/*
    499 		 * skip pages which don't need to be read.
    500 		 */
    501 
    502 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    503 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    504 			size_t b;
    505 
    506 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    507 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    508 				sawhole = true;
    509 			}
    510 			b = MIN(PAGE_SIZE, bytes);
    511 			offset += b;
    512 			bytes -= b;
    513 			skipbytes += b;
    514 			pidx++;
    515 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    516 			    offset, 0,0,0);
    517 			if (bytes == 0) {
    518 				goto loopdone;
    519 			}
    520 		}
    521 
    522 		/*
    523 		 * bmap the file to find out the blkno to read from and
    524 		 * how much we can read in one i/o.  if bmap returns an error,
    525 		 * skip the rest of the top-level i/o.
    526 		 */
    527 
    528 		lbn = offset >> fs_bshift;
    529 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    530 		if (error) {
    531 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    532 			    lbn,error,0,0);
    533 			skipbytes += bytes;
    534 			bytes = 0;
    535 			goto loopdone;
    536 		}
    537 
    538 		/*
    539 		 * see how many pages can be read with this i/o.
    540 		 * reduce the i/o size if necessary to avoid
    541 		 * overwriting pages with valid data.
    542 		 */
    543 
    544 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    545 		    bytes);
    546 		if (offset + iobytes > round_page(offset)) {
    547 			int pcount;
    548 
    549 			pcount = 1;
    550 			while (pidx + pcount < npages &&
    551 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    552 				pcount++;
    553 			}
    554 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    555 			    (offset - trunc_page(offset)));
    556 		}
    557 
    558 		/*
    559 		 * if this block isn't allocated, zero it instead of
    560 		 * reading it.  unless we are going to allocate blocks,
    561 		 * mark the pages we zeroed PG_RDONLY.
    562 		 */
    563 
    564 		if (blkno == (daddr_t)-1) {
    565 			int holepages = (round_page(offset + iobytes) -
    566 			    trunc_page(offset)) >> PAGE_SHIFT;
    567 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    568 
    569 			sawhole = true;
    570 			memset((char *)kva + (offset - startoffset), 0,
    571 			    iobytes);
    572 			skipbytes += iobytes;
    573 
    574 			mutex_enter(&uobj->vmobjlock);
    575 			for (i = 0; i < holepages; i++) {
    576 				if (memwrite) {
    577 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    578 				}
    579 				if (!blockalloc) {
    580 					pgs[pidx + i]->flags |= PG_RDONLY;
    581 				}
    582 			}
    583 			mutex_exit(&uobj->vmobjlock);
    584 			continue;
    585 		}
    586 
    587 		/*
    588 		 * allocate a sub-buf for this piece of the i/o
    589 		 * (or just use mbp if there's only 1 piece),
    590 		 * and start it going.
    591 		 */
    592 
    593 		if (offset == startoffset && iobytes == bytes) {
    594 			bp = mbp;
    595 		} else {
    596 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    597 			    vp, bp, vp->v_numoutput, 0);
    598 			bp = getiobuf(vp, true);
    599 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    600 		}
    601 		bp->b_lblkno = 0;
    602 
    603 		/* adjust physical blkno for partial blocks */
    604 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    605 		    dev_bshift);
    606 
    607 		UVMHIST_LOG(ubchist,
    608 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    609 		    bp, offset, bp->b_bcount, bp->b_blkno);
    610 
    611 		VOP_STRATEGY(devvp, bp);
    612 	}
    613 
    614 loopdone:
    615 	nestiobuf_done(mbp, skipbytes, error);
    616 	if (async) {
    617 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    618 		if (!glocked) {
    619 			genfs_node_unlock(vp);
    620 		}
    621 		error = 0;
    622 		goto out_err_free;
    623 	}
    624 	if (bp != NULL) {
    625 		error = biowait(mbp);
    626 	}
    627 
    628 	/* Remove the mapping (make KVA available as soon as possible) */
    629 	uvm_pagermapout(kva, npages);
    630 
    631 	/*
    632 	 * if this we encountered a hole then we have to do a little more work.
    633 	 * for read faults, we marked the page PG_RDONLY so that future
    634 	 * write accesses to the page will fault again.
    635 	 * for write faults, we must make sure that the backing store for
    636 	 * the page is completely allocated while the pages are locked.
    637 	 */
    638 
    639 	if (!error && sawhole && blockalloc) {
    640 		error = GOP_ALLOC(vp, startoffset,
    641 		    npages << PAGE_SHIFT, 0, cred);
    642 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    643 		    startoffset, npages << PAGE_SHIFT, error,0);
    644 		if (!error) {
    645 			mutex_enter(&uobj->vmobjlock);
    646 			for (i = 0; i < npages; i++) {
    647 				struct vm_page *pg = pgs[i];
    648 
    649 				if (pg == NULL) {
    650 					continue;
    651 				}
    652 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    653 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    654 				    pg,0,0,0);
    655 			}
    656 			mutex_exit(&uobj->vmobjlock);
    657 		}
    658 	}
    659 	if (!glocked) {
    660 		genfs_node_unlock(vp);
    661 	}
    662 
    663 	putiobuf(mbp);
    664     }
    665 
    666 	mutex_enter(&uobj->vmobjlock);
    667 
    668 	/*
    669 	 * we're almost done!  release the pages...
    670 	 * for errors, we free the pages.
    671 	 * otherwise we activate them and mark them as valid and clean.
    672 	 * also, unbusy pages that were not actually requested.
    673 	 */
    674 
    675 	if (error) {
    676 		for (i = 0; i < npages; i++) {
    677 			struct vm_page *pg = pgs[i];
    678 
    679 			if (pg == NULL) {
    680 				continue;
    681 			}
    682 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    683 			    pg, pg->flags, 0,0);
    684 			if (pg->flags & PG_FAKE) {
    685 				pg->flags |= PG_RELEASED;
    686 			}
    687 		}
    688 		mutex_enter(&uvm_pageqlock);
    689 		uvm_page_unbusy(pgs, npages);
    690 		mutex_exit(&uvm_pageqlock);
    691 		mutex_exit(&uobj->vmobjlock);
    692 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    693 		goto out_err_free;
    694 	}
    695 
    696 out:
    697 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    698 	error = 0;
    699 	mutex_enter(&uvm_pageqlock);
    700 	for (i = 0; i < npages; i++) {
    701 		struct vm_page *pg = pgs[i];
    702 		if (pg == NULL) {
    703 			continue;
    704 		}
    705 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    706 		    pg, pg->flags, 0,0);
    707 		if (pg->flags & PG_FAKE && !overwrite) {
    708 			pg->flags &= ~(PG_FAKE);
    709 			pmap_clear_modify(pgs[i]);
    710 		}
    711 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    712 		if (i < ridx || i >= ridx + orignmempages || async) {
    713 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    714 			    pg, pg->offset,0,0);
    715 			if (pg->flags & PG_WANTED) {
    716 				wakeup(pg);
    717 			}
    718 			if (pg->flags & PG_FAKE) {
    719 				KASSERT(overwrite);
    720 				uvm_pagezero(pg);
    721 			}
    722 			if (pg->flags & PG_RELEASED) {
    723 				uvm_pagefree(pg);
    724 				continue;
    725 			}
    726 			uvm_pageenqueue(pg);
    727 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    728 			UVM_PAGE_OWN(pg, NULL);
    729 		}
    730 	}
    731 	mutex_exit(&uvm_pageqlock);
    732 	if (memwrite) {
    733 		genfs_markdirty(vp);
    734 	}
    735 	mutex_exit(&uobj->vmobjlock);
    736 	if (ap->a_m != NULL) {
    737 		memcpy(ap->a_m, &pgs[ridx],
    738 		    orignmempages * sizeof(struct vm_page *));
    739 	}
    740 
    741 out_err_free:
    742 	if (pgs != NULL && pgs != pgs_onstack)
    743 		kmem_free(pgs, pgs_size);
    744 out_err:
    745 	if (has_trans_wapbl) {
    746 		if (need_wapbl)
    747 			WAPBL_END(vp->v_mount);
    748 		fstrans_done(vp->v_mount);
    749 	}
    750 	return error;
    751 }
    752 
    753 /*
    754  * generic VM putpages routine.
    755  * Write the given range of pages to backing store.
    756  *
    757  * => "offhi == 0" means flush all pages at or after "offlo".
    758  * => object should be locked by caller.  we return with the
    759  *      object unlocked.
    760  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    761  *	thus, a caller might want to unlock higher level resources
    762  *	(e.g. vm_map) before calling flush.
    763  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    764  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    765  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    766  *	that new pages are inserted on the tail end of the list.   thus,
    767  *	we can make a complete pass through the object in one go by starting
    768  *	at the head and working towards the tail (new pages are put in
    769  *	front of us).
    770  * => NOTE: we are allowed to lock the page queues, so the caller
    771  *	must not be holding the page queue lock.
    772  *
    773  * note on "cleaning" object and PG_BUSY pages:
    774  *	this routine is holding the lock on the object.   the only time
    775  *	that it can run into a PG_BUSY page that it does not own is if
    776  *	some other process has started I/O on the page (e.g. either
    777  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    778  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    779  *	had a chance to modify it yet.    if the PG_BUSY page is being
    780  *	paged out then it means that someone else has already started
    781  *	cleaning the page for us (how nice!).    in this case, if we
    782  *	have syncio specified, then after we make our pass through the
    783  *	object we need to wait for the other PG_BUSY pages to clear
    784  *	off (i.e. we need to do an iosync).   also note that once a
    785  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    786  *
    787  * note on page traversal:
    788  *	we can traverse the pages in an object either by going down the
    789  *	linked list in "uobj->memq", or we can go over the address range
    790  *	by page doing hash table lookups for each address.    depending
    791  *	on how many pages are in the object it may be cheaper to do one
    792  *	or the other.   we set "by_list" to true if we are using memq.
    793  *	if the cost of a hash lookup was equal to the cost of the list
    794  *	traversal we could compare the number of pages in the start->stop
    795  *	range to the total number of pages in the object.   however, it
    796  *	seems that a hash table lookup is more expensive than the linked
    797  *	list traversal, so we multiply the number of pages in the
    798  *	range by an estimate of the relatively higher cost of the hash lookup.
    799  */
    800 
    801 int
    802 genfs_putpages(void *v)
    803 {
    804 	struct vop_putpages_args /* {
    805 		struct vnode *a_vp;
    806 		voff_t a_offlo;
    807 		voff_t a_offhi;
    808 		int a_flags;
    809 	} */ * const ap = v;
    810 
    811 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    812 	    ap->a_flags, NULL);
    813 }
    814 
    815 int
    816 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    817     int origflags, struct vm_page **busypg)
    818 {
    819 	struct uvm_object * const uobj = &vp->v_uobj;
    820 	kmutex_t * const slock = &uobj->vmobjlock;
    821 	off_t off;
    822 	/* Even for strange MAXPHYS, the shift rounds down to a page */
    823 #define maxpages (MAXPHYS >> PAGE_SHIFT)
    824 	int i, error, npages, nback;
    825 	int freeflag;
    826 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
    827 	bool wasclean, by_list, needs_clean, yld;
    828 	bool async = (origflags & PGO_SYNCIO) == 0;
    829 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    830 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    831 	struct genfs_node * const gp = VTOG(vp);
    832 	int flags;
    833 	int dirtygen;
    834 	bool modified;
    835 	bool need_wapbl;
    836 	bool has_trans;
    837 	bool cleanall;
    838 	bool onworklst;
    839 
    840 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    841 
    842 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    843 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    844 	KASSERT(startoff < endoff || endoff == 0);
    845 
    846 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
    847 	    vp, uobj->uo_npages, startoff, endoff - startoff);
    848 
    849 	has_trans = false;
    850 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
    851 	    (origflags & PGO_JOURNALLOCKED) == 0);
    852 
    853 retry:
    854 	modified = false;
    855 	flags = origflags;
    856 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    857 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    858 	if (uobj->uo_npages == 0) {
    859 		if (vp->v_iflag & VI_ONWORKLST) {
    860 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    861 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    862 				vn_syncer_remove_from_worklist(vp);
    863 		}
    864 		if (has_trans) {
    865 			if (need_wapbl)
    866 				WAPBL_END(vp->v_mount);
    867 			fstrans_done(vp->v_mount);
    868 		}
    869 		mutex_exit(slock);
    870 		return (0);
    871 	}
    872 
    873 	/*
    874 	 * the vnode has pages, set up to process the request.
    875 	 */
    876 
    877 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
    878 		mutex_exit(slock);
    879 		if (pagedaemon) {
    880 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
    881 			if (error)
    882 				return error;
    883 		} else
    884 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
    885 		if (need_wapbl) {
    886 			error = WAPBL_BEGIN(vp->v_mount);
    887 			if (error) {
    888 				fstrans_done(vp->v_mount);
    889 				return error;
    890 			}
    891 		}
    892 		has_trans = true;
    893 		mutex_enter(slock);
    894 		goto retry;
    895 	}
    896 
    897 	error = 0;
    898 	wasclean = (vp->v_numoutput == 0);
    899 	off = startoff;
    900 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    901 		endoff = trunc_page(LLONG_MAX);
    902 	}
    903 	by_list = (uobj->uo_npages <=
    904 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    905 
    906 #if !defined(DEBUG)
    907 	/*
    908 	 * if this vnode is known not to have dirty pages,
    909 	 * don't bother to clean it out.
    910 	 */
    911 
    912 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    913 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
    914 			goto skip_scan;
    915 		}
    916 		flags &= ~PGO_CLEANIT;
    917 	}
    918 #endif /* !defined(DEBUG) */
    919 
    920 	/*
    921 	 * start the loop.  when scanning by list, hold the last page
    922 	 * in the list before we start.  pages allocated after we start
    923 	 * will be added to the end of the list, so we can stop at the
    924 	 * current last page.
    925 	 */
    926 
    927 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
    928 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
    929 	    (vp->v_iflag & VI_ONWORKLST) != 0;
    930 	dirtygen = gp->g_dirtygen;
    931 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
    932 	if (by_list) {
    933 		curmp.flags = PG_MARKER;
    934 		endmp.flags = PG_MARKER;
    935 		pg = TAILQ_FIRST(&uobj->memq);
    936 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
    937 	} else {
    938 		pg = uvm_pagelookup(uobj, off);
    939 	}
    940 	nextpg = NULL;
    941 	while (by_list || off < endoff) {
    942 
    943 		/*
    944 		 * if the current page is not interesting, move on to the next.
    945 		 */
    946 
    947 		KASSERT(pg == NULL || pg->uobject == uobj ||
    948 		    (pg->flags & PG_MARKER) != 0);
    949 		KASSERT(pg == NULL ||
    950 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
    951 		    (pg->flags & (PG_BUSY|PG_MARKER)) != 0);
    952 		if (by_list) {
    953 			if (pg == &endmp) {
    954 				break;
    955 			}
    956 			if (pg->flags & PG_MARKER) {
    957 				pg = TAILQ_NEXT(pg, listq.queue);
    958 				continue;
    959 			}
    960 			if (pg->offset < startoff || pg->offset >= endoff ||
    961 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    962 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    963 					wasclean = false;
    964 				}
    965 				pg = TAILQ_NEXT(pg, listq.queue);
    966 				continue;
    967 			}
    968 			off = pg->offset;
    969 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
    970 			if (pg != NULL) {
    971 				wasclean = false;
    972 			}
    973 			off += PAGE_SIZE;
    974 			if (off < endoff) {
    975 				pg = uvm_pagelookup(uobj, off);
    976 			}
    977 			continue;
    978 		}
    979 
    980 		/*
    981 		 * if the current page needs to be cleaned and it's busy,
    982 		 * wait for it to become unbusy.
    983 		 */
    984 
    985 		yld = (l->l_cpu->ci_schedstate.spc_flags &
    986 		    SPCF_SHOULDYIELD) && !pagedaemon;
    987 		if (pg->flags & PG_BUSY || yld) {
    988 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
    989 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
    990 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
    991 				error = EDEADLK;
    992 				if (busypg != NULL)
    993 					*busypg = pg;
    994 				break;
    995 			}
    996 			if (pagedaemon) {
    997 				/*
    998 				 * someone has taken the page while we
    999 				 * dropped the lock for fstrans_start.
   1000 				 */
   1001 				break;
   1002 			}
   1003 			if (by_list) {
   1004 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1005 				UVMHIST_LOG(ubchist, "curmp next %p",
   1006 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1007 			}
   1008 			if (yld) {
   1009 				mutex_exit(slock);
   1010 				preempt();
   1011 				mutex_enter(slock);
   1012 			} else {
   1013 				pg->flags |= PG_WANTED;
   1014 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1015 				mutex_enter(slock);
   1016 			}
   1017 			if (by_list) {
   1018 				UVMHIST_LOG(ubchist, "after next %p",
   1019 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1020 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1021 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1022 			} else {
   1023 				pg = uvm_pagelookup(uobj, off);
   1024 			}
   1025 			continue;
   1026 		}
   1027 
   1028 		/*
   1029 		 * if we're freeing, remove all mappings of the page now.
   1030 		 * if we're cleaning, check if the page is needs to be cleaned.
   1031 		 */
   1032 
   1033 		if (flags & PGO_FREE) {
   1034 			pmap_page_protect(pg, VM_PROT_NONE);
   1035 		} else if (flags & PGO_CLEANIT) {
   1036 
   1037 			/*
   1038 			 * if we still have some hope to pull this vnode off
   1039 			 * from the syncer queue, write-protect the page.
   1040 			 */
   1041 
   1042 			if (cleanall && wasclean &&
   1043 			    gp->g_dirtygen == dirtygen) {
   1044 
   1045 				/*
   1046 				 * uobj pages get wired only by uvm_fault
   1047 				 * where uobj is locked.
   1048 				 */
   1049 
   1050 				if (pg->wire_count == 0) {
   1051 					pmap_page_protect(pg,
   1052 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1053 				} else {
   1054 					cleanall = false;
   1055 				}
   1056 			}
   1057 		}
   1058 
   1059 		if (flags & PGO_CLEANIT) {
   1060 			needs_clean = pmap_clear_modify(pg) ||
   1061 			    (pg->flags & PG_CLEAN) == 0;
   1062 			pg->flags |= PG_CLEAN;
   1063 		} else {
   1064 			needs_clean = false;
   1065 		}
   1066 
   1067 		/*
   1068 		 * if we're cleaning, build a cluster.
   1069 		 * the cluster will consist of pages which are currently dirty,
   1070 		 * but they will be returned to us marked clean.
   1071 		 * if not cleaning, just operate on the one page.
   1072 		 */
   1073 
   1074 		if (needs_clean) {
   1075 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1076 			wasclean = false;
   1077 			memset(pgs, 0, sizeof(pgs));
   1078 			pg->flags |= PG_BUSY;
   1079 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1080 
   1081 			/*
   1082 			 * first look backward.
   1083 			 */
   1084 
   1085 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1086 			nback = npages;
   1087 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1088 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1089 			if (nback) {
   1090 				memmove(&pgs[0], &pgs[npages - nback],
   1091 				    nback * sizeof(pgs[0]));
   1092 				if (npages - nback < nback)
   1093 					memset(&pgs[nback], 0,
   1094 					    (npages - nback) * sizeof(pgs[0]));
   1095 				else
   1096 					memset(&pgs[npages - nback], 0,
   1097 					    nback * sizeof(pgs[0]));
   1098 			}
   1099 
   1100 			/*
   1101 			 * then plug in our page of interest.
   1102 			 */
   1103 
   1104 			pgs[nback] = pg;
   1105 
   1106 			/*
   1107 			 * then look forward to fill in the remaining space in
   1108 			 * the array of pages.
   1109 			 */
   1110 
   1111 			npages = maxpages - nback - 1;
   1112 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1113 			    &pgs[nback + 1],
   1114 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1115 			npages += nback + 1;
   1116 		} else {
   1117 			pgs[0] = pg;
   1118 			npages = 1;
   1119 			nback = 0;
   1120 		}
   1121 
   1122 		/*
   1123 		 * apply FREE or DEACTIVATE options if requested.
   1124 		 */
   1125 
   1126 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1127 			mutex_enter(&uvm_pageqlock);
   1128 		}
   1129 		for (i = 0; i < npages; i++) {
   1130 			tpg = pgs[i];
   1131 			KASSERT(tpg->uobject == uobj);
   1132 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1133 				pg = tpg;
   1134 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1135 				continue;
   1136 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1137 				uvm_pagedeactivate(tpg);
   1138 			} else if (flags & PGO_FREE) {
   1139 				pmap_page_protect(tpg, VM_PROT_NONE);
   1140 				if (tpg->flags & PG_BUSY) {
   1141 					tpg->flags |= freeflag;
   1142 					if (pagedaemon) {
   1143 						uvm_pageout_start(1);
   1144 						uvm_pagedequeue(tpg);
   1145 					}
   1146 				} else {
   1147 
   1148 					/*
   1149 					 * ``page is not busy''
   1150 					 * implies that npages is 1
   1151 					 * and needs_clean is false.
   1152 					 */
   1153 
   1154 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1155 					uvm_pagefree(tpg);
   1156 					if (pagedaemon)
   1157 						uvmexp.pdfreed++;
   1158 				}
   1159 			}
   1160 		}
   1161 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1162 			mutex_exit(&uvm_pageqlock);
   1163 		}
   1164 		if (needs_clean) {
   1165 			modified = true;
   1166 
   1167 			/*
   1168 			 * start the i/o.  if we're traversing by list,
   1169 			 * keep our place in the list with a marker page.
   1170 			 */
   1171 
   1172 			if (by_list) {
   1173 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1174 				    listq.queue);
   1175 			}
   1176 			mutex_exit(slock);
   1177 			error = GOP_WRITE(vp, pgs, npages, flags);
   1178 			mutex_enter(slock);
   1179 			if (by_list) {
   1180 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1181 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1182 			}
   1183 			if (error) {
   1184 				break;
   1185 			}
   1186 			if (by_list) {
   1187 				continue;
   1188 			}
   1189 		}
   1190 
   1191 		/*
   1192 		 * find the next page and continue if there was no error.
   1193 		 */
   1194 
   1195 		if (by_list) {
   1196 			if (nextpg) {
   1197 				pg = nextpg;
   1198 				nextpg = NULL;
   1199 			} else {
   1200 				pg = TAILQ_NEXT(pg, listq.queue);
   1201 			}
   1202 		} else {
   1203 			off += (npages - nback) << PAGE_SHIFT;
   1204 			if (off < endoff) {
   1205 				pg = uvm_pagelookup(uobj, off);
   1206 			}
   1207 		}
   1208 	}
   1209 	if (by_list) {
   1210 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1211 	}
   1212 
   1213 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1214 	    (vp->v_type != VBLK ||
   1215 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1216 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1217 	}
   1218 
   1219 	/*
   1220 	 * if we're cleaning and there was nothing to clean,
   1221 	 * take us off the syncer list.  if we started any i/o
   1222 	 * and we're doing sync i/o, wait for all writes to finish.
   1223 	 */
   1224 
   1225 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1226 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1227 #if defined(DEBUG)
   1228 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1229 			if ((pg->flags & (PG_FAKE | PG_MARKER)) != 0) {
   1230 				continue;
   1231 			}
   1232 			if ((pg->flags & PG_CLEAN) == 0) {
   1233 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1234 			}
   1235 			if (pmap_is_modified(pg)) {
   1236 				printf("%s: %p: modified\n", __func__, pg);
   1237 			}
   1238 		}
   1239 #endif /* defined(DEBUG) */
   1240 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1241 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1242 			vn_syncer_remove_from_worklist(vp);
   1243 	}
   1244 
   1245 #if !defined(DEBUG)
   1246 skip_scan:
   1247 #endif /* !defined(DEBUG) */
   1248 
   1249 	/* Wait for output to complete. */
   1250 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1251 		while (vp->v_numoutput != 0)
   1252 			cv_wait(&vp->v_cv, slock);
   1253 	}
   1254 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1255 	mutex_exit(slock);
   1256 
   1257 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1258 		/*
   1259 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1260 		 * retrying is not a big deal because, in many cases,
   1261 		 * uobj->uo_npages is already 0 here.
   1262 		 */
   1263 		mutex_enter(slock);
   1264 		goto retry;
   1265 	}
   1266 
   1267 	if (has_trans) {
   1268 		if (need_wapbl)
   1269 			WAPBL_END(vp->v_mount);
   1270 		fstrans_done(vp->v_mount);
   1271 	}
   1272 
   1273 	return (error);
   1274 }
   1275 
   1276 int
   1277 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1278 {
   1279 	off_t off;
   1280 	vaddr_t kva;
   1281 	size_t len;
   1282 	int error;
   1283 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1284 
   1285 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1286 	    vp, pgs, npages, flags);
   1287 
   1288 	off = pgs[0]->offset;
   1289 	kva = uvm_pagermapin(pgs, npages,
   1290 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1291 	len = npages << PAGE_SHIFT;
   1292 
   1293 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1294 			    uvm_aio_biodone);
   1295 
   1296 	return error;
   1297 }
   1298 
   1299 int
   1300 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1301 {
   1302 	off_t off;
   1303 	vaddr_t kva;
   1304 	size_t len;
   1305 	int error;
   1306 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1307 
   1308 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1309 	    vp, pgs, npages, flags);
   1310 
   1311 	off = pgs[0]->offset;
   1312 	kva = uvm_pagermapin(pgs, npages,
   1313 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1314 	len = npages << PAGE_SHIFT;
   1315 
   1316 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1317 			    uvm_aio_biodone);
   1318 
   1319 	return error;
   1320 }
   1321 
   1322 /*
   1323  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1324  * and mapped into kernel memory.  Here we just look up the underlying
   1325  * device block addresses and call the strategy routine.
   1326  */
   1327 
   1328 static int
   1329 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1330     enum uio_rw rw, void (*iodone)(struct buf *))
   1331 {
   1332 	int s, error;
   1333 	int fs_bshift, dev_bshift;
   1334 	off_t eof, offset, startoffset;
   1335 	size_t bytes, iobytes, skipbytes;
   1336 	struct buf *mbp, *bp;
   1337 	const bool async = (flags & PGO_SYNCIO) == 0;
   1338 	const bool iowrite = rw == UIO_WRITE;
   1339 	const int brw = iowrite ? B_WRITE : B_READ;
   1340 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1341 
   1342 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1343 	    vp, kva, len, flags);
   1344 
   1345 	KASSERT(vp->v_size <= vp->v_writesize);
   1346 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1347 	if (vp->v_type != VBLK) {
   1348 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1349 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1350 	} else {
   1351 		fs_bshift = DEV_BSHIFT;
   1352 		dev_bshift = DEV_BSHIFT;
   1353 	}
   1354 	error = 0;
   1355 	startoffset = off;
   1356 	bytes = MIN(len, eof - startoffset);
   1357 	skipbytes = 0;
   1358 	KASSERT(bytes != 0);
   1359 
   1360 	if (iowrite) {
   1361 		mutex_enter(&vp->v_interlock);
   1362 		vp->v_numoutput += 2;
   1363 		mutex_exit(&vp->v_interlock);
   1364 	}
   1365 	mbp = getiobuf(vp, true);
   1366 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1367 	    vp, mbp, vp->v_numoutput, bytes);
   1368 	mbp->b_bufsize = len;
   1369 	mbp->b_data = (void *)kva;
   1370 	mbp->b_resid = mbp->b_bcount = bytes;
   1371 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1372 	if (async) {
   1373 		mbp->b_flags = brw | B_ASYNC;
   1374 		mbp->b_iodone = iodone;
   1375 	} else {
   1376 		mbp->b_flags = brw;
   1377 		mbp->b_iodone = NULL;
   1378 	}
   1379 	if (curlwp == uvm.pagedaemon_lwp)
   1380 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1381 	else if (async)
   1382 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1383 	else
   1384 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1385 
   1386 	bp = NULL;
   1387 	for (offset = startoffset;
   1388 	    bytes > 0;
   1389 	    offset += iobytes, bytes -= iobytes) {
   1390 		int run;
   1391 		daddr_t lbn, blkno;
   1392 		struct vnode *devvp;
   1393 
   1394 		/*
   1395 		 * bmap the file to find out the blkno to read from and
   1396 		 * how much we can read in one i/o.  if bmap returns an error,
   1397 		 * skip the rest of the top-level i/o.
   1398 		 */
   1399 
   1400 		lbn = offset >> fs_bshift;
   1401 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1402 		if (error) {
   1403 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1404 			    lbn,error,0,0);
   1405 			skipbytes += bytes;
   1406 			bytes = 0;
   1407 			goto loopdone;
   1408 		}
   1409 
   1410 		/*
   1411 		 * see how many pages can be read with this i/o.
   1412 		 * reduce the i/o size if necessary to avoid
   1413 		 * overwriting pages with valid data.
   1414 		 */
   1415 
   1416 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1417 		    bytes);
   1418 
   1419 		/*
   1420 		 * if this block isn't allocated, zero it instead of
   1421 		 * reading it.  unless we are going to allocate blocks,
   1422 		 * mark the pages we zeroed PG_RDONLY.
   1423 		 */
   1424 
   1425 		if (blkno == (daddr_t)-1) {
   1426 			if (!iowrite) {
   1427 				memset((char *)kva + (offset - startoffset), 0,
   1428 				    iobytes);
   1429 			}
   1430 			skipbytes += iobytes;
   1431 			continue;
   1432 		}
   1433 
   1434 		/*
   1435 		 * allocate a sub-buf for this piece of the i/o
   1436 		 * (or just use mbp if there's only 1 piece),
   1437 		 * and start it going.
   1438 		 */
   1439 
   1440 		if (offset == startoffset && iobytes == bytes) {
   1441 			bp = mbp;
   1442 		} else {
   1443 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1444 			    vp, bp, vp->v_numoutput, 0);
   1445 			bp = getiobuf(vp, true);
   1446 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1447 		}
   1448 		bp->b_lblkno = 0;
   1449 
   1450 		/* adjust physical blkno for partial blocks */
   1451 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1452 		    dev_bshift);
   1453 
   1454 		UVMHIST_LOG(ubchist,
   1455 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1456 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1457 
   1458 		VOP_STRATEGY(devvp, bp);
   1459 	}
   1460 
   1461 loopdone:
   1462 	if (skipbytes) {
   1463 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1464 	}
   1465 	nestiobuf_done(mbp, skipbytes, error);
   1466 	if (async) {
   1467 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1468 		return (0);
   1469 	}
   1470 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1471 	error = biowait(mbp);
   1472 	s = splbio();
   1473 	(*iodone)(mbp);
   1474 	splx(s);
   1475 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1476 	return (error);
   1477 }
   1478 
   1479 int
   1480 genfs_compat_getpages(void *v)
   1481 {
   1482 	struct vop_getpages_args /* {
   1483 		struct vnode *a_vp;
   1484 		voff_t a_offset;
   1485 		struct vm_page **a_m;
   1486 		int *a_count;
   1487 		int a_centeridx;
   1488 		vm_prot_t a_access_type;
   1489 		int a_advice;
   1490 		int a_flags;
   1491 	} */ *ap = v;
   1492 
   1493 	off_t origoffset;
   1494 	struct vnode *vp = ap->a_vp;
   1495 	struct uvm_object *uobj = &vp->v_uobj;
   1496 	struct vm_page *pg, **pgs;
   1497 	vaddr_t kva;
   1498 	int i, error, orignpages, npages;
   1499 	struct iovec iov;
   1500 	struct uio uio;
   1501 	kauth_cred_t cred = curlwp->l_cred;
   1502 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1503 
   1504 	error = 0;
   1505 	origoffset = ap->a_offset;
   1506 	orignpages = *ap->a_count;
   1507 	pgs = ap->a_m;
   1508 
   1509 	if (ap->a_flags & PGO_LOCKED) {
   1510 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1511 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1512 
   1513 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1514 		if (error == 0 && memwrite) {
   1515 			genfs_markdirty(vp);
   1516 		}
   1517 		return error;
   1518 	}
   1519 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1520 		mutex_exit(&uobj->vmobjlock);
   1521 		return EINVAL;
   1522 	}
   1523 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1524 		mutex_exit(&uobj->vmobjlock);
   1525 		return 0;
   1526 	}
   1527 	npages = orignpages;
   1528 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1529 	mutex_exit(&uobj->vmobjlock);
   1530 	kva = uvm_pagermapin(pgs, npages,
   1531 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1532 	for (i = 0; i < npages; i++) {
   1533 		pg = pgs[i];
   1534 		if ((pg->flags & PG_FAKE) == 0) {
   1535 			continue;
   1536 		}
   1537 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1538 		iov.iov_len = PAGE_SIZE;
   1539 		uio.uio_iov = &iov;
   1540 		uio.uio_iovcnt = 1;
   1541 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1542 		uio.uio_rw = UIO_READ;
   1543 		uio.uio_resid = PAGE_SIZE;
   1544 		UIO_SETUP_SYSSPACE(&uio);
   1545 		/* XXX vn_lock */
   1546 		error = VOP_READ(vp, &uio, 0, cred);
   1547 		if (error) {
   1548 			break;
   1549 		}
   1550 		if (uio.uio_resid) {
   1551 			memset(iov.iov_base, 0, uio.uio_resid);
   1552 		}
   1553 	}
   1554 	uvm_pagermapout(kva, npages);
   1555 	mutex_enter(&uobj->vmobjlock);
   1556 	mutex_enter(&uvm_pageqlock);
   1557 	for (i = 0; i < npages; i++) {
   1558 		pg = pgs[i];
   1559 		if (error && (pg->flags & PG_FAKE) != 0) {
   1560 			pg->flags |= PG_RELEASED;
   1561 		} else {
   1562 			pmap_clear_modify(pg);
   1563 			uvm_pageactivate(pg);
   1564 		}
   1565 	}
   1566 	if (error) {
   1567 		uvm_page_unbusy(pgs, npages);
   1568 	}
   1569 	mutex_exit(&uvm_pageqlock);
   1570 	if (error == 0 && memwrite) {
   1571 		genfs_markdirty(vp);
   1572 	}
   1573 	mutex_exit(&uobj->vmobjlock);
   1574 	return error;
   1575 }
   1576 
   1577 int
   1578 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1579     int flags)
   1580 {
   1581 	off_t offset;
   1582 	struct iovec iov;
   1583 	struct uio uio;
   1584 	kauth_cred_t cred = curlwp->l_cred;
   1585 	struct buf *bp;
   1586 	vaddr_t kva;
   1587 	int error;
   1588 
   1589 	offset = pgs[0]->offset;
   1590 	kva = uvm_pagermapin(pgs, npages,
   1591 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1592 
   1593 	iov.iov_base = (void *)kva;
   1594 	iov.iov_len = npages << PAGE_SHIFT;
   1595 	uio.uio_iov = &iov;
   1596 	uio.uio_iovcnt = 1;
   1597 	uio.uio_offset = offset;
   1598 	uio.uio_rw = UIO_WRITE;
   1599 	uio.uio_resid = npages << PAGE_SHIFT;
   1600 	UIO_SETUP_SYSSPACE(&uio);
   1601 	/* XXX vn_lock */
   1602 	error = VOP_WRITE(vp, &uio, 0, cred);
   1603 
   1604 	mutex_enter(&vp->v_interlock);
   1605 	vp->v_numoutput++;
   1606 	mutex_exit(&vp->v_interlock);
   1607 
   1608 	bp = getiobuf(vp, true);
   1609 	bp->b_cflags = BC_BUSY | BC_AGE;
   1610 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1611 	bp->b_data = (char *)kva;
   1612 	bp->b_bcount = npages << PAGE_SHIFT;
   1613 	bp->b_bufsize = npages << PAGE_SHIFT;
   1614 	bp->b_resid = 0;
   1615 	bp->b_error = error;
   1616 	uvm_aio_aiodone(bp);
   1617 	return (error);
   1618 }
   1619 
   1620 /*
   1621  * Process a uio using direct I/O.  If we reach a part of the request
   1622  * which cannot be processed in this fashion for some reason, just return.
   1623  * The caller must handle some additional part of the request using
   1624  * buffered I/O before trying direct I/O again.
   1625  */
   1626 
   1627 void
   1628 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1629 {
   1630 	struct vmspace *vs;
   1631 	struct iovec *iov;
   1632 	vaddr_t va;
   1633 	size_t len;
   1634 	const int mask = DEV_BSIZE - 1;
   1635 	int error;
   1636 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1637 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1638 
   1639 	/*
   1640 	 * We only support direct I/O to user space for now.
   1641 	 */
   1642 
   1643 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1644 		return;
   1645 	}
   1646 
   1647 	/*
   1648 	 * If the vnode is mapped, we would need to get the getpages lock
   1649 	 * to stabilize the bmap, but then we would get into trouble whil e
   1650 	 * locking the pages if the pages belong to this same vnode (or a
   1651 	 * multi-vnode cascade to the same effect).  Just fall back to
   1652 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1653 	 */
   1654 
   1655 	if (vp->v_vflag & VV_MAPPED) {
   1656 		return;
   1657 	}
   1658 
   1659 	if (need_wapbl) {
   1660 		error = WAPBL_BEGIN(vp->v_mount);
   1661 		if (error)
   1662 			return;
   1663 	}
   1664 
   1665 	/*
   1666 	 * Do as much of the uio as possible with direct I/O.
   1667 	 */
   1668 
   1669 	vs = uio->uio_vmspace;
   1670 	while (uio->uio_resid) {
   1671 		iov = uio->uio_iov;
   1672 		if (iov->iov_len == 0) {
   1673 			uio->uio_iov++;
   1674 			uio->uio_iovcnt--;
   1675 			continue;
   1676 		}
   1677 		va = (vaddr_t)iov->iov_base;
   1678 		len = MIN(iov->iov_len, genfs_maxdio);
   1679 		len &= ~mask;
   1680 
   1681 		/*
   1682 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1683 		 * the current EOF, then fall back to buffered I/O.
   1684 		 */
   1685 
   1686 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1687 			break;
   1688 		}
   1689 
   1690 		/*
   1691 		 * Check alignment.  The file offset must be at least
   1692 		 * sector-aligned.  The exact constraint on memory alignment
   1693 		 * is very hardware-dependent, but requiring sector-aligned
   1694 		 * addresses there too is safe.
   1695 		 */
   1696 
   1697 		if (uio->uio_offset & mask || va & mask) {
   1698 			break;
   1699 		}
   1700 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1701 					  uio->uio_rw);
   1702 		if (error) {
   1703 			break;
   1704 		}
   1705 		iov->iov_base = (char *)iov->iov_base + len;
   1706 		iov->iov_len -= len;
   1707 		uio->uio_offset += len;
   1708 		uio->uio_resid -= len;
   1709 	}
   1710 
   1711 	if (need_wapbl)
   1712 		WAPBL_END(vp->v_mount);
   1713 }
   1714 
   1715 /*
   1716  * Iodone routine for direct I/O.  We don't do much here since the request is
   1717  * always synchronous, so the caller will do most of the work after biowait().
   1718  */
   1719 
   1720 static void
   1721 genfs_dio_iodone(struct buf *bp)
   1722 {
   1723 
   1724 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1725 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1726 		mutex_enter(bp->b_objlock);
   1727 		vwakeup(bp);
   1728 		mutex_exit(bp->b_objlock);
   1729 	}
   1730 	putiobuf(bp);
   1731 }
   1732 
   1733 /*
   1734  * Process one chunk of a direct I/O request.
   1735  */
   1736 
   1737 static int
   1738 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1739     off_t off, enum uio_rw rw)
   1740 {
   1741 	struct vm_map *map;
   1742 	struct pmap *upm, *kpm;
   1743 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1744 	off_t spoff, epoff;
   1745 	vaddr_t kva, puva;
   1746 	paddr_t pa;
   1747 	vm_prot_t prot;
   1748 	int error, rv, poff, koff;
   1749 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1750 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1751 
   1752 	/*
   1753 	 * For writes, verify that this range of the file already has fully
   1754 	 * allocated backing store.  If there are any holes, just punt and
   1755 	 * make the caller take the buffered write path.
   1756 	 */
   1757 
   1758 	if (rw == UIO_WRITE) {
   1759 		daddr_t lbn, elbn, blkno;
   1760 		int bsize, bshift, run;
   1761 
   1762 		bshift = vp->v_mount->mnt_fs_bshift;
   1763 		bsize = 1 << bshift;
   1764 		lbn = off >> bshift;
   1765 		elbn = (off + len + bsize - 1) >> bshift;
   1766 		while (lbn < elbn) {
   1767 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1768 			if (error) {
   1769 				return error;
   1770 			}
   1771 			if (blkno == (daddr_t)-1) {
   1772 				return ENOSPC;
   1773 			}
   1774 			lbn += 1 + run;
   1775 		}
   1776 	}
   1777 
   1778 	/*
   1779 	 * Flush any cached pages for parts of the file that we're about to
   1780 	 * access.  If we're writing, invalidate pages as well.
   1781 	 */
   1782 
   1783 	spoff = trunc_page(off);
   1784 	epoff = round_page(off + len);
   1785 	mutex_enter(&vp->v_interlock);
   1786 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1787 	if (error) {
   1788 		return error;
   1789 	}
   1790 
   1791 	/*
   1792 	 * Wire the user pages and remap them into kernel memory.
   1793 	 */
   1794 
   1795 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1796 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1797 	if (error) {
   1798 		return error;
   1799 	}
   1800 
   1801 	map = &vs->vm_map;
   1802 	upm = vm_map_pmap(map);
   1803 	kpm = vm_map_pmap(kernel_map);
   1804 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1805 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1806 	puva = trunc_page(uva);
   1807 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1808 		rv = pmap_extract(upm, puva + poff, &pa);
   1809 		KASSERT(rv);
   1810 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1811 	}
   1812 	pmap_update(kpm);
   1813 
   1814 	/*
   1815 	 * Do the I/O.
   1816 	 */
   1817 
   1818 	koff = uva - trunc_page(uva);
   1819 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1820 			    genfs_dio_iodone);
   1821 
   1822 	/*
   1823 	 * Tear down the kernel mapping.
   1824 	 */
   1825 
   1826 	pmap_remove(kpm, kva, kva + klen);
   1827 	pmap_update(kpm);
   1828 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1829 
   1830 	/*
   1831 	 * Unwire the user pages.
   1832 	 */
   1833 
   1834 	uvm_vsunlock(vs, (void *)uva, len);
   1835 	return error;
   1836 }
   1837 
   1838