Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.36.2.14
      1 /*	$NetBSD: genfs_io.c,v 1.36.2.14 2010/07/09 12:57:42 uebayasi Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.14 2010/07/09 12:57:42 uebayasi Exp $");
     35 
     36 #include "opt_xip.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/proc.h>
     41 #include <sys/kernel.h>
     42 #include <sys/mount.h>
     43 #include <sys/namei.h>
     44 #include <sys/vnode.h>
     45 #include <sys/fcntl.h>
     46 #include <sys/kmem.h>
     47 #include <sys/poll.h>
     48 #include <sys/mman.h>
     49 #include <sys/file.h>
     50 #include <sys/kauth.h>
     51 #include <sys/fstrans.h>
     52 #include <sys/buf.h>
     53 #include <sys/once.h>
     54 
     55 #include <miscfs/genfs/genfs.h>
     56 #include <miscfs/genfs/genfs_node.h>
     57 #include <miscfs/specfs/specdev.h>
     58 
     59 #include <uvm/uvm.h>
     60 #include <uvm/uvm_pager.h>
     61 
     62 static int genfs_do_getpages(void *);
     63 #ifdef XIP
     64 static int genfs_do_getpages_xip(void *);
     65 #endif
     66 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     67     off_t, enum uio_rw);
     68 static void genfs_dio_iodone(struct buf *);
     69 
     70 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     71     void (*)(struct buf *));
     72 static inline void genfs_rel_pages(struct vm_page **, int);
     73 
     74 int genfs_maxdio = MAXPHYS;
     75 
     76 static inline void
     77 genfs_rel_pages(struct vm_page **pgs, int npages)
     78 {
     79 	int i;
     80 
     81 	for (i = 0; i < npages; i++) {
     82 		struct vm_page *pg = pgs[i];
     83 
     84 		if (pg == NULL || pg == PGO_DONTCARE)
     85 			continue;
     86 		if (pg->flags & PG_FAKE) {
     87 			pg->flags |= PG_RELEASED;
     88 		}
     89 	}
     90 	mutex_enter(&uvm_pageqlock);
     91 	uvm_page_unbusy(pgs, npages);
     92 	mutex_exit(&uvm_pageqlock);
     93 }
     94 
     95 /*
     96  * generic VM getpages routine.
     97  * Return PG_BUSY pages for the given range,
     98  * reading from backing store if necessary.
     99  */
    100 
    101 int
    102 genfs_getpages(void *v)
    103 {
    104 #ifdef XIP
    105 	struct vop_getpages_args /* {
    106 		struct vnode *a_vp;
    107 		voff_t a_offset;
    108 		struct vm_page **a_m;
    109 		int *a_count;
    110 		int a_centeridx;
    111 		vm_prot_t a_access_type;
    112 		int a_advice;
    113 		int a_flags;
    114 	} */ * const ap = v;
    115 	struct vnode * const vp = ap->a_vp;
    116 
    117 	if ((vp->v_vflag & VV_XIP) != 0)
    118 		return genfs_do_getpages_xip(v);
    119 	else
    120 #endif
    121 		return genfs_do_getpages(v);
    122 }
    123 
    124 static int
    125 genfs_do_getpages(void *v)
    126 {
    127 	struct vop_getpages_args /* {
    128 		struct vnode *a_vp;
    129 		voff_t a_offset;
    130 		struct vm_page **a_m;
    131 		int *a_count;
    132 		int a_centeridx;
    133 		vm_prot_t a_access_type;
    134 		int a_advice;
    135 		int a_flags;
    136 	} */ * const ap = v;
    137 
    138 	off_t diskeof, memeof;
    139 	int i, error, npages;
    140 	const int flags = ap->a_flags;
    141 	struct vnode * const vp = ap->a_vp;
    142 	struct genfs_node * const gp = VTOG(vp);
    143 	struct uvm_object * const uobj = &vp->v_uobj;
    144 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    145 	const bool async = (flags & PGO_SYNCIO) == 0;
    146 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    147 	bool has_trans = false;
    148 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    149 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    150 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    151 
    152 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    153 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    154 
    155 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    156 	    vp->v_type == VLNK || vp->v_type == VBLK);
    157 
    158 startover:
    159 	error = 0;
    160 	const voff_t origvsize = vp->v_size;
    161 	const off_t origoffset = ap->a_offset;
    162 	const int orignpages = *ap->a_count;
    163 
    164 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    165 	if (flags & PGO_PASTEOF) {
    166 		off_t newsize;
    167 #if defined(DIAGNOSTIC)
    168 		off_t writeeof;
    169 #endif /* defined(DIAGNOSTIC) */
    170 
    171 		newsize = MAX(origvsize,
    172 		    origoffset + (orignpages << PAGE_SHIFT));
    173 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    174 #if defined(DIAGNOSTIC)
    175 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    176 		if (newsize > round_page(writeeof)) {
    177 			panic("%s: past eof", __func__);
    178 		}
    179 #endif /* defined(DIAGNOSTIC) */
    180 	} else {
    181 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    182 	}
    183 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    184 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    185 	KASSERT(orignpages > 0);
    186 
    187 	/*
    188 	 * Bounds-check the request.
    189 	 */
    190 
    191 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    192 		if ((flags & PGO_LOCKED) == 0) {
    193 			mutex_exit(&uobj->vmobjlock);
    194 		}
    195 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    196 		    origoffset, *ap->a_count, memeof,0);
    197 		error = EINVAL;
    198 		goto out_err;
    199 	}
    200 
    201 	/* uobj is locked */
    202 
    203 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    204 	    (vp->v_type != VBLK ||
    205 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    206 		int updflags = 0;
    207 
    208 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    209 			updflags = GOP_UPDATE_ACCESSED;
    210 		}
    211 		if (memwrite) {
    212 			updflags |= GOP_UPDATE_MODIFIED;
    213 		}
    214 		if (updflags != 0) {
    215 			GOP_MARKUPDATE(vp, updflags);
    216 		}
    217 	}
    218 
    219 	if (memwrite) {
    220 		gp->g_dirtygen++;
    221 		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    222 			vn_syncer_add_to_worklist(vp, filedelay);
    223 		}
    224 		if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    225 			vp->v_iflag |= VI_WRMAPDIRTY;
    226 		}
    227 	}
    228 
    229 	/*
    230 	 * For PGO_LOCKED requests, just return whatever's in memory.
    231 	 */
    232 
    233 	if (flags & PGO_LOCKED) {
    234 		int nfound;
    235 		struct vm_page *pg;
    236 
    237 		npages = *ap->a_count;
    238 #if defined(DEBUG)
    239 		for (i = 0; i < npages; i++) {
    240 			pg = ap->a_m[i];
    241 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    242 		}
    243 #endif /* defined(DEBUG) */
    244 		nfound = uvn_findpages(uobj, origoffset, &npages,
    245 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    246 		KASSERT(npages == *ap->a_count);
    247 		if (nfound == 0) {
    248 			error = EBUSY;
    249 			goto out_err;
    250 		}
    251 		if (!genfs_node_rdtrylock(vp)) {
    252 			genfs_rel_pages(ap->a_m, npages);
    253 
    254 			/*
    255 			 * restore the array.
    256 			 */
    257 
    258 			for (i = 0; i < npages; i++) {
    259 				pg = ap->a_m[i];
    260 
    261 				if (pg != NULL || pg != PGO_DONTCARE) {
    262 					ap->a_m[i] = NULL;
    263 				}
    264 			}
    265 		} else {
    266 			genfs_node_unlock(vp);
    267 		}
    268 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    269 		goto out_err;
    270 	}
    271 	mutex_exit(&uobj->vmobjlock);
    272 
    273 	/*
    274 	 * find the requested pages and make some simple checks.
    275 	 * leave space in the page array for a whole block.
    276 	 */
    277 
    278 	const int fs_bshift = (vp->v_type != VBLK) ?
    279 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    280 	const int dev_bshift = (vp->v_type != VBLK) ?
    281 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    282 	const int fs_bsize = 1 << fs_bshift;
    283 #define	blk_mask	(fs_bsize - 1)
    284 #define	trunc_blk(x)	((x) & ~blk_mask)
    285 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    286 
    287 	const int orignmempages = MIN(orignpages,
    288 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    289 	npages = orignmempages;
    290 	const off_t startoffset = trunc_blk(origoffset);
    291 	const off_t endoffset = MIN(
    292 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    293 	    round_page(memeof));
    294 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    295 
    296 	const int pgs_size = sizeof(struct vm_page *) *
    297 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    298 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    299 
    300 	if (pgs_size > sizeof(pgs_onstack)) {
    301 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    302 		if (pgs == NULL) {
    303 			pgs = pgs_onstack;
    304 			error = ENOMEM;
    305 			goto out_err;
    306 		}
    307 	} else {
    308 		pgs = pgs_onstack;
    309 		(void)memset(pgs, 0, pgs_size);
    310 	}
    311 
    312 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    313 	    ridx, npages, startoffset, endoffset);
    314 
    315 	if (!has_trans) {
    316 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    317 		has_trans = true;
    318 	}
    319 
    320 	/*
    321 	 * hold g_glock to prevent a race with truncate.
    322 	 *
    323 	 * check if our idea of v_size is still valid.
    324 	 */
    325 
    326 	if (blockalloc) {
    327 		rw_enter(&gp->g_glock, RW_WRITER);
    328 	} else {
    329 		rw_enter(&gp->g_glock, RW_READER);
    330 	}
    331 	mutex_enter(&uobj->vmobjlock);
    332 	if (vp->v_size < origvsize) {
    333 		genfs_node_unlock(vp);
    334 		if (pgs != pgs_onstack)
    335 			kmem_free(pgs, pgs_size);
    336 		goto startover;
    337 	}
    338 
    339 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    340 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    341 		genfs_node_unlock(vp);
    342 		KASSERT(async != 0);
    343 		genfs_rel_pages(&pgs[ridx], orignmempages);
    344 		mutex_exit(&uobj->vmobjlock);
    345 		error = EBUSY;
    346 		goto out_err_free;
    347 	}
    348 
    349 	/*
    350 	 * if the pages are already resident, just return them.
    351 	 */
    352 
    353 	for (i = 0; i < npages; i++) {
    354 		struct vm_page *pg = pgs[ridx + i];
    355 
    356 		if ((pg->flags & PG_FAKE) ||
    357 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    358 			break;
    359 		}
    360 	}
    361 	if (i == npages) {
    362 		genfs_node_unlock(vp);
    363 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    364 		npages += ridx;
    365 		goto out;
    366 	}
    367 
    368 	/*
    369 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    370 	 */
    371 
    372 	if (overwrite) {
    373 		genfs_node_unlock(vp);
    374 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    375 
    376 		for (i = 0; i < npages; i++) {
    377 			struct vm_page *pg = pgs[ridx + i];
    378 
    379 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    380 		}
    381 		npages += ridx;
    382 		goto out;
    383 	}
    384 
    385 	/*
    386 	 * the page wasn't resident and we're not overwriting,
    387 	 * so we're going to have to do some i/o.
    388 	 * find any additional pages needed to cover the expanded range.
    389 	 */
    390 
    391 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    392 	if (startoffset != origoffset || npages != orignmempages) {
    393 		int npgs;
    394 
    395 		/*
    396 		 * we need to avoid deadlocks caused by locking
    397 		 * additional pages at lower offsets than pages we
    398 		 * already have locked.  unlock them all and start over.
    399 		 */
    400 
    401 		genfs_rel_pages(&pgs[ridx], orignmempages);
    402 		memset(pgs, 0, pgs_size);
    403 
    404 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    405 		    startoffset, endoffset, 0,0);
    406 		npgs = npages;
    407 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    408 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    409 			genfs_node_unlock(vp);
    410 			KASSERT(async != 0);
    411 			genfs_rel_pages(pgs, npages);
    412 			mutex_exit(&uobj->vmobjlock);
    413 			error = EBUSY;
    414 			goto out_err_free;
    415 		}
    416 	}
    417 
    418 	mutex_exit(&uobj->vmobjlock);
    419 
    420     {
    421 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    422 	vaddr_t kva;
    423 	struct buf *bp, *mbp;
    424 	bool sawhole = false;
    425 
    426 	/*
    427 	 * read the desired page(s).
    428 	 */
    429 
    430 	totalbytes = npages << PAGE_SHIFT;
    431 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    432 	tailbytes = totalbytes - bytes;
    433 	skipbytes = 0;
    434 
    435 	kva = uvm_pagermapin(pgs, npages,
    436 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    437 
    438 	mbp = getiobuf(vp, true);
    439 	mbp->b_bufsize = totalbytes;
    440 	mbp->b_data = (void *)kva;
    441 	mbp->b_resid = mbp->b_bcount = bytes;
    442 	mbp->b_cflags = BC_BUSY;
    443 	if (async) {
    444 		mbp->b_flags = B_READ | B_ASYNC;
    445 		mbp->b_iodone = uvm_aio_biodone;
    446 	} else {
    447 		mbp->b_flags = B_READ;
    448 		mbp->b_iodone = NULL;
    449 	}
    450 	if (async)
    451 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    452 	else
    453 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    454 
    455 	/*
    456 	 * if EOF is in the middle of the range, zero the part past EOF.
    457 	 * skip over pages which are not PG_FAKE since in that case they have
    458 	 * valid data that we need to preserve.
    459 	 */
    460 
    461 	tailstart = bytes;
    462 	while (tailbytes > 0) {
    463 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    464 
    465 		KASSERT(len <= tailbytes);
    466 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    467 			memset((void *)(kva + tailstart), 0, len);
    468 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    469 			    kva, tailstart, len, 0);
    470 		}
    471 		tailstart += len;
    472 		tailbytes -= len;
    473 	}
    474 
    475 	/*
    476 	 * now loop over the pages, reading as needed.
    477 	 */
    478 
    479 	bp = NULL;
    480 	off_t offset;
    481 	for (offset = startoffset;
    482 	    bytes > 0;
    483 	    offset += iobytes, bytes -= iobytes) {
    484 		int run;
    485 		daddr_t lbn, blkno;
    486 		int pidx;
    487 		struct vnode *devvp;
    488 
    489 		/*
    490 		 * skip pages which don't need to be read.
    491 		 */
    492 
    493 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    494 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    495 			size_t b;
    496 
    497 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    498 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    499 				sawhole = true;
    500 			}
    501 			b = MIN(PAGE_SIZE, bytes);
    502 			offset += b;
    503 			bytes -= b;
    504 			skipbytes += b;
    505 			pidx++;
    506 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    507 			    offset, 0,0,0);
    508 			if (bytes == 0) {
    509 				goto loopdone;
    510 			}
    511 		}
    512 
    513 		/*
    514 		 * bmap the file to find out the blkno to read from and
    515 		 * how much we can read in one i/o.  if bmap returns an error,
    516 		 * skip the rest of the top-level i/o.
    517 		 */
    518 
    519 		lbn = offset >> fs_bshift;
    520 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    521 		if (error) {
    522 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    523 			    lbn,error,0,0);
    524 			skipbytes += bytes;
    525 			bytes = 0;
    526 			goto loopdone;
    527 		}
    528 
    529 		/*
    530 		 * see how many pages can be read with this i/o.
    531 		 * reduce the i/o size if necessary to avoid
    532 		 * overwriting pages with valid data.
    533 		 */
    534 
    535 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    536 		    bytes);
    537 		if (offset + iobytes > round_page(offset)) {
    538 			int pcount;
    539 
    540 			pcount = 1;
    541 			while (pidx + pcount < npages &&
    542 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    543 				pcount++;
    544 			}
    545 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    546 			    (offset - trunc_page(offset)));
    547 		}
    548 
    549 		/*
    550 		 * if this block isn't allocated, zero it instead of
    551 		 * reading it.  unless we are going to allocate blocks,
    552 		 * mark the pages we zeroed PG_RDONLY.
    553 		 */
    554 
    555 		if (blkno == (daddr_t)-1) {
    556 			int holepages = (round_page(offset + iobytes) -
    557 			    trunc_page(offset)) >> PAGE_SHIFT;
    558 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    559 
    560 			sawhole = true;
    561 			memset((char *)kva + (offset - startoffset), 0,
    562 			    iobytes);
    563 			skipbytes += iobytes;
    564 
    565 			for (i = 0; i < holepages; i++) {
    566 				if (memwrite) {
    567 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    568 				}
    569 				if (!blockalloc) {
    570 					pgs[pidx + i]->flags |= PG_RDONLY;
    571 				}
    572 			}
    573 			continue;
    574 		}
    575 
    576 		/*
    577 		 * allocate a sub-buf for this piece of the i/o
    578 		 * (or just use mbp if there's only 1 piece),
    579 		 * and start it going.
    580 		 */
    581 
    582 		if (offset == startoffset && iobytes == bytes) {
    583 			bp = mbp;
    584 		} else {
    585 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    586 			    vp, bp, vp->v_numoutput, 0);
    587 			bp = getiobuf(vp, true);
    588 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    589 		}
    590 		bp->b_lblkno = 0;
    591 
    592 		/* adjust physical blkno for partial blocks */
    593 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    594 		    dev_bshift);
    595 
    596 		UVMHIST_LOG(ubchist,
    597 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    598 		    bp, offset, bp->b_bcount, bp->b_blkno);
    599 
    600 		VOP_STRATEGY(devvp, bp);
    601 	}
    602 
    603 loopdone:
    604 	nestiobuf_done(mbp, skipbytes, error);
    605 	if (async) {
    606 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    607 		genfs_node_unlock(vp);
    608 		error = 0;
    609 		goto out_err_free;
    610 	}
    611 	if (bp != NULL) {
    612 		error = biowait(mbp);
    613 	}
    614 
    615 	/* Remove the mapping (make KVA available as soon as possible) */
    616 	uvm_pagermapout(kva, npages);
    617 
    618 	/*
    619 	 * if this we encountered a hole then we have to do a little more work.
    620 	 * for read faults, we marked the page PG_RDONLY so that future
    621 	 * write accesses to the page will fault again.
    622 	 * for write faults, we must make sure that the backing store for
    623 	 * the page is completely allocated while the pages are locked.
    624 	 */
    625 
    626 	if (!error && sawhole && blockalloc) {
    627 		/*
    628 		 * XXX: This assumes that we come here only via
    629 		 * the mmio path
    630 		 */
    631 		if (vp->v_mount->mnt_wapbl) {
    632 			error = WAPBL_BEGIN(vp->v_mount);
    633 		}
    634 
    635 		if (!error) {
    636 			error = GOP_ALLOC(vp, startoffset,
    637 			    npages << PAGE_SHIFT, 0, cred);
    638 			if (vp->v_mount->mnt_wapbl) {
    639 				WAPBL_END(vp->v_mount);
    640 			}
    641 		}
    642 
    643 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    644 		    startoffset, npages << PAGE_SHIFT, error,0);
    645 		if (!error) {
    646 			for (i = 0; i < npages; i++) {
    647 				struct vm_page *pg = pgs[i];
    648 
    649 				if (pg == NULL) {
    650 					continue;
    651 				}
    652 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    653 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    654 				    pg,0,0,0);
    655 			}
    656 		}
    657 	}
    658 	genfs_node_unlock(vp);
    659 
    660 	putiobuf(mbp);
    661     }
    662 
    663 	mutex_enter(&uobj->vmobjlock);
    664 
    665 	/*
    666 	 * we're almost done!  release the pages...
    667 	 * for errors, we free the pages.
    668 	 * otherwise we activate them and mark them as valid and clean.
    669 	 * also, unbusy pages that were not actually requested.
    670 	 */
    671 
    672 	if (error) {
    673 		for (i = 0; i < npages; i++) {
    674 			struct vm_page *pg = pgs[i];
    675 
    676 			if (pg == NULL) {
    677 				continue;
    678 			}
    679 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    680 			    pg, pg->flags, 0,0);
    681 			if (pg->flags & PG_FAKE) {
    682 				pg->flags |= PG_RELEASED;
    683 			}
    684 		}
    685 		mutex_enter(&uvm_pageqlock);
    686 		uvm_page_unbusy(pgs, npages);
    687 		mutex_exit(&uvm_pageqlock);
    688 		mutex_exit(&uobj->vmobjlock);
    689 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    690 		goto out_err_free;
    691 	}
    692 
    693 out:
    694 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    695 	error = 0;
    696 	mutex_enter(&uvm_pageqlock);
    697 	for (i = 0; i < npages; i++) {
    698 		struct vm_page *pg = pgs[i];
    699 		if (pg == NULL) {
    700 			continue;
    701 		}
    702 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    703 		    pg, pg->flags, 0,0);
    704 		if (pg->flags & PG_FAKE && !overwrite) {
    705 			pg->flags &= ~(PG_FAKE);
    706 			pmap_clear_modify(pgs[i]);
    707 		}
    708 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    709 		if (i < ridx || i >= ridx + orignmempages || async) {
    710 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    711 			    pg, pg->offset,0,0);
    712 			if (pg->flags & PG_WANTED) {
    713 				wakeup(pg);
    714 			}
    715 			if (pg->flags & PG_FAKE) {
    716 				KASSERT(overwrite);
    717 				uvm_pagezero(pg);
    718 			}
    719 			if (pg->flags & PG_RELEASED) {
    720 				uvm_pagefree(pg);
    721 				continue;
    722 			}
    723 			uvm_pageenqueue(pg);
    724 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    725 			UVM_PAGE_OWN(pg, NULL);
    726 		}
    727 	}
    728 	mutex_exit(&uvm_pageqlock);
    729 	mutex_exit(&uobj->vmobjlock);
    730 	if (ap->a_m != NULL) {
    731 		memcpy(ap->a_m, &pgs[ridx],
    732 		    orignmempages * sizeof(struct vm_page *));
    733 	}
    734 
    735 out_err_free:
    736 	if (pgs != NULL && pgs != pgs_onstack)
    737 		kmem_free(pgs, pgs_size);
    738 out_err:
    739 	if (has_trans)
    740 		fstrans_done(vp->v_mount);
    741 	return (error);
    742 }
    743 
    744 #ifdef XIP
    745 static struct uvm_object xip_zero_obj;
    746 static struct vm_page *xip_zero_page;
    747 
    748 static int
    749 xip_zero_page_init(void)
    750 {
    751 
    752 	UVM_OBJ_INIT(&xip_zero_obj, NULL, 0);
    753 	xip_zero_page = uvm_pagealloc(&xip_zero_obj, 0, NULL, UVM_PGA_ZERO);
    754 	KASSERT(xip_zero_page != NULL);
    755 	uvm_pagewire(xip_zero_page);
    756 	return 0;
    757 }
    758 
    759 /*
    760  * genfs_do_getpages_xip
    761  *      Return "direct pages" of XIP vnode.  The block addresses of XIP
    762  *      vnode pages are returned back to the VM fault handler as the
    763  *	actually mapped physical addresses.
    764  *
    765  * XXX Should be merged into genfs_do_getpages() after
    766  * XXX genfs_do_getpages() and genfs_do_io() are merged.
    767  */
    768 static int
    769 genfs_do_getpages_xip(void *v)
    770 {
    771 	struct vop_getpages_args /* {
    772 		struct vnode *a_vp;
    773 		voff_t a_offset;
    774 		struct vm_page **a_m;
    775 		int *a_count;
    776 		int a_centeridx;
    777 		vm_prot_t a_access_type;
    778 		int a_advice;
    779 		int a_flags;
    780 	} */ * const ap = v;
    781 
    782 	struct vnode * const vp = ap->a_vp;
    783 	int *npagesp = ap->a_count;
    784 	const off_t offset = ap->a_offset;
    785 	struct vm_page **pps = ap->a_m;
    786 	struct uvm_object * const uobj = &vp->v_uobj;
    787 	const int flags = ap->a_flags;
    788 
    789 	int error;
    790 	off_t eof, sbkoff, ebkoff, off;
    791 	int npages;
    792 	int fs_bshift, fs_bsize, dev_bshift, dev_bsize;
    793 	int i;
    794 	paddr_t phys_addr;
    795 
    796 	UVMHIST_FUNC("genfs_do_getpages_xip"); UVMHIST_CALLED(ubchist);
    797 
    798 	KASSERT((vp->v_vflag & VV_XIP) != 0);
    799 
    800 	GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM);
    801 	npages = MIN(*npagesp, round_page(eof - offset) >> PAGE_SHIFT);
    802 
    803 	fs_bshift = vp->v_mount->mnt_fs_bshift;
    804 	fs_bsize = 1 << fs_bshift;
    805 	dev_bshift = vp->v_mount->mnt_dev_bshift;
    806 	dev_bsize = 1 << dev_bshift;
    807 
    808 	sbkoff = offset & ~(fs_bsize - 1);
    809 	ebkoff = ((offset + PAGE_SIZE * npages) + (fs_bsize - 1)) & ~(fs_bsize - 1);
    810 
    811 	UVMHIST_LOG(ubchist, "xip npages=%d sbkoff=%lx ebkoff=%lx", npages, (long)sbkoff, (long)ebkoff, 0);
    812 
    813 	if ((flags & PGO_LOCKED) == 0)
    814 		mutex_exit(&uobj->vmobjlock);
    815 
    816 	/* XXX optimize */
    817 	off = offset;
    818 	for (i = 0; i < npages; i++) {
    819 		daddr_t lbn, blkno;
    820 		int run;
    821 		struct vnode *devvp;
    822 
    823 		lbn = (off & ~(fs_bsize - 1)) >> fs_bshift;
    824 
    825 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    826 		KASSERT(error == 0);
    827 		UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d", (long)lbn, (long)blkno, run, 0);
    828 
    829 		/*
    830 		 * XIP page metadata assignment
    831 		 * - Unallocated block is redirected to the dedicated zero'ed
    832 		 *   page.
    833 		 * - Assume that struct vm_page *[] array of this segment is
    834 		 *   allocated and linearly ordered by physical address.
    835 		 */
    836 		if (blkno < 0) {
    837 			static ONCE_DECL(xip_zero_page_inited);
    838 
    839 			RUN_ONCE(&xip_zero_page_inited, xip_zero_page_init);
    840 			pps[i] = xip_zero_page;
    841 		} else {
    842 			struct vm_physseg *seg;
    843 			struct vm_page *pg;
    844 
    845 			seg = devvp->v_physseg;
    846 			KASSERT(seg != NULL);
    847 			/* bus_space_mmap cookie -> paddr_t */
    848 			phys_addr = pmap_phys_address(seg->start) +
    849 			    (blkno << dev_bshift) +
    850 			    (off - (lbn << fs_bshift));
    851 			pg = seg->pgs +
    852 			    ((phys_addr >> PAGE_SHIFT) - seg->start);
    853 			KASSERT(pg->phys_addr == phys_addr);
    854 
    855 			pps[i] = pg;
    856 		}
    857 
    858 		UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
    859 			i,
    860 			(long)phys_addr,
    861 			pps[i],
    862 			0);
    863 
    864 		off += PAGE_SIZE;
    865 	}
    866 
    867 	if ((flags & PGO_LOCKED) == 0)
    868 		mutex_enter(&uobj->vmobjlock);
    869 	KASSERT(mutex_owned(&uobj->vmobjlock));
    870 
    871 	for (i = 0; i < npages; i++) {
    872 		struct vm_page *pg = pps[i];
    873 
    874 		if (pg == xip_zero_page) {
    875 		} else {
    876 			KASSERT((pg->flags & PG_BUSY) == 0);
    877 			KASSERT((pg->flags & PG_RDONLY) != 0);
    878 			KASSERT((pg->flags & PG_CLEAN) != 0);
    879 			KASSERT((pg->flags & PG_DIRECT) != 0);
    880 			pg->flags |= PG_BUSY;
    881 			pg->uobject = &vp->v_uobj;
    882 		}
    883 	}
    884 
    885 	if ((flags & PGO_LOCKED) == 0)
    886 		mutex_exit(&uobj->vmobjlock);
    887 
    888 	*npagesp = npages;
    889 
    890 	return 0;
    891 }
    892 #endif
    893 
    894 /*
    895  * generic VM putpages routine.
    896  * Write the given range of pages to backing store.
    897  *
    898  * => "offhi == 0" means flush all pages at or after "offlo".
    899  * => object should be locked by caller.  we return with the
    900  *      object unlocked.
    901  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    902  *	thus, a caller might want to unlock higher level resources
    903  *	(e.g. vm_map) before calling flush.
    904  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    905  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    906  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    907  *	that new pages are inserted on the tail end of the list.   thus,
    908  *	we can make a complete pass through the object in one go by starting
    909  *	at the head and working towards the tail (new pages are put in
    910  *	front of us).
    911  * => NOTE: we are allowed to lock the page queues, so the caller
    912  *	must not be holding the page queue lock.
    913  *
    914  * note on "cleaning" object and PG_BUSY pages:
    915  *	this routine is holding the lock on the object.   the only time
    916  *	that it can run into a PG_BUSY page that it does not own is if
    917  *	some other process has started I/O on the page (e.g. either
    918  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    919  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    920  *	had a chance to modify it yet.    if the PG_BUSY page is being
    921  *	paged out then it means that someone else has already started
    922  *	cleaning the page for us (how nice!).    in this case, if we
    923  *	have syncio specified, then after we make our pass through the
    924  *	object we need to wait for the other PG_BUSY pages to clear
    925  *	off (i.e. we need to do an iosync).   also note that once a
    926  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    927  *
    928  * note on page traversal:
    929  *	we can traverse the pages in an object either by going down the
    930  *	linked list in "uobj->memq", or we can go over the address range
    931  *	by page doing hash table lookups for each address.    depending
    932  *	on how many pages are in the object it may be cheaper to do one
    933  *	or the other.   we set "by_list" to true if we are using memq.
    934  *	if the cost of a hash lookup was equal to the cost of the list
    935  *	traversal we could compare the number of pages in the start->stop
    936  *	range to the total number of pages in the object.   however, it
    937  *	seems that a hash table lookup is more expensive than the linked
    938  *	list traversal, so we multiply the number of pages in the
    939  *	range by an estimate of the relatively higher cost of the hash lookup.
    940  */
    941 
    942 int
    943 genfs_putpages(void *v)
    944 {
    945 	struct vop_putpages_args /* {
    946 		struct vnode *a_vp;
    947 		voff_t a_offlo;
    948 		voff_t a_offhi;
    949 		int a_flags;
    950 	} */ * const ap = v;
    951 
    952 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    953 	    ap->a_flags, NULL);
    954 }
    955 
    956 int
    957 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    958     int origflags, struct vm_page **busypg)
    959 {
    960 	struct uvm_object * const uobj = &vp->v_uobj;
    961 	kmutex_t * const slock = &uobj->vmobjlock;
    962 	off_t off;
    963 	/* Even for strange MAXPHYS, the shift rounds down to a page */
    964 #define maxpages (MAXPHYS >> PAGE_SHIFT)
    965 	int i, error, npages, nback;
    966 	int freeflag;
    967 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
    968 	bool wasclean, by_list, needs_clean, yld;
    969 	bool async = (origflags & PGO_SYNCIO) == 0;
    970 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    971 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    972 	struct genfs_node * const gp = VTOG(vp);
    973 	int flags;
    974 	int dirtygen;
    975 	bool modified;
    976 	bool need_wapbl;
    977 	bool has_trans;
    978 	bool cleanall;
    979 	bool onworklst;
    980 
    981 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    982 
    983 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    984 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    985 	KASSERT(startoff < endoff || endoff == 0);
    986 
    987 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
    988 	    vp, uobj->uo_npages, startoff, endoff - startoff);
    989 
    990 	has_trans = false;
    991 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
    992 	    (origflags & PGO_JOURNALLOCKED) == 0);
    993 
    994 retry:
    995 	modified = false;
    996 	flags = origflags;
    997 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    998 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    999 	if (uobj->uo_npages == 0) {
   1000 		if (vp->v_iflag & VI_ONWORKLST) {
   1001 			vp->v_iflag &= ~VI_WRMAPDIRTY;
   1002 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1003 				vn_syncer_remove_from_worklist(vp);
   1004 		}
   1005 		if (has_trans) {
   1006 			if (need_wapbl)
   1007 				WAPBL_END(vp->v_mount);
   1008 			fstrans_done(vp->v_mount);
   1009 		}
   1010 		mutex_exit(slock);
   1011 		return (0);
   1012 	}
   1013 
   1014 	/*
   1015 	 * the vnode has pages, set up to process the request.
   1016 	 */
   1017 
   1018 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
   1019 		mutex_exit(slock);
   1020 		if (pagedaemon) {
   1021 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
   1022 			if (error)
   1023 				return error;
   1024 		} else
   1025 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
   1026 		if (need_wapbl) {
   1027 			error = WAPBL_BEGIN(vp->v_mount);
   1028 			if (error) {
   1029 				fstrans_done(vp->v_mount);
   1030 				return error;
   1031 			}
   1032 		}
   1033 		has_trans = true;
   1034 		mutex_enter(slock);
   1035 		goto retry;
   1036 	}
   1037 
   1038 	error = 0;
   1039 	wasclean = (vp->v_numoutput == 0);
   1040 	off = startoff;
   1041 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1042 		endoff = trunc_page(LLONG_MAX);
   1043 	}
   1044 	by_list = (uobj->uo_npages <=
   1045 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
   1046 
   1047 #if !defined(DEBUG)
   1048 	/*
   1049 	 * if this vnode is known not to have dirty pages,
   1050 	 * don't bother to clean it out.
   1051 	 */
   1052 
   1053 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
   1054 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1055 			goto skip_scan;
   1056 		}
   1057 		flags &= ~PGO_CLEANIT;
   1058 	}
   1059 #endif /* !defined(DEBUG) */
   1060 
   1061 	/*
   1062 	 * start the loop.  when scanning by list, hold the last page
   1063 	 * in the list before we start.  pages allocated after we start
   1064 	 * will be added to the end of the list, so we can stop at the
   1065 	 * current last page.
   1066 	 */
   1067 
   1068 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1069 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1070 	    (vp->v_iflag & VI_ONWORKLST) != 0;
   1071 	dirtygen = gp->g_dirtygen;
   1072 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1073 	if (by_list) {
   1074 		curmp.uobject = uobj;
   1075 		curmp.offset = (voff_t)-1;
   1076 		curmp.flags = PG_BUSY;
   1077 		endmp.uobject = uobj;
   1078 		endmp.offset = (voff_t)-1;
   1079 		endmp.flags = PG_BUSY;
   1080 		pg = TAILQ_FIRST(&uobj->memq);
   1081 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
   1082 	} else {
   1083 		pg = uvm_pagelookup(uobj, off);
   1084 	}
   1085 	nextpg = NULL;
   1086 	while (by_list || off < endoff) {
   1087 
   1088 		/*
   1089 		 * if the current page is not interesting, move on to the next.
   1090 		 */
   1091 
   1092 		KASSERT(pg == NULL || pg->uobject == uobj);
   1093 		KASSERT(pg == NULL ||
   1094 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1095 		    (pg->flags & PG_BUSY) != 0);
   1096 		if (by_list) {
   1097 			if (pg == &endmp) {
   1098 				break;
   1099 			}
   1100 			if (pg->offset < startoff || pg->offset >= endoff ||
   1101 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1102 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1103 					wasclean = false;
   1104 				}
   1105 				pg = TAILQ_NEXT(pg, listq.queue);
   1106 				continue;
   1107 			}
   1108 			off = pg->offset;
   1109 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1110 			if (pg != NULL) {
   1111 				wasclean = false;
   1112 			}
   1113 			off += PAGE_SIZE;
   1114 			if (off < endoff) {
   1115 				pg = uvm_pagelookup(uobj, off);
   1116 			}
   1117 			continue;
   1118 		}
   1119 
   1120 		/*
   1121 		 * if the current page needs to be cleaned and it's busy,
   1122 		 * wait for it to become unbusy.
   1123 		 */
   1124 
   1125 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1126 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1127 		if (pg->flags & PG_BUSY || yld) {
   1128 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1129 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1130 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1131 				error = EDEADLK;
   1132 				if (busypg != NULL)
   1133 					*busypg = pg;
   1134 				break;
   1135 			}
   1136 			if (pagedaemon) {
   1137 				/*
   1138 				 * someone has taken the page while we
   1139 				 * dropped the lock for fstrans_start.
   1140 				 */
   1141 				break;
   1142 			}
   1143 			if (by_list) {
   1144 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1145 				UVMHIST_LOG(ubchist, "curmp next %p",
   1146 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1147 			}
   1148 			if (yld) {
   1149 				mutex_exit(slock);
   1150 				preempt();
   1151 				mutex_enter(slock);
   1152 			} else {
   1153 				pg->flags |= PG_WANTED;
   1154 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1155 				mutex_enter(slock);
   1156 			}
   1157 			if (by_list) {
   1158 				UVMHIST_LOG(ubchist, "after next %p",
   1159 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1160 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1161 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1162 			} else {
   1163 				pg = uvm_pagelookup(uobj, off);
   1164 			}
   1165 			continue;
   1166 		}
   1167 
   1168 		/*
   1169 		 * if we're freeing, remove all mappings of the page now.
   1170 		 * if we're cleaning, check if the page is needs to be cleaned.
   1171 		 */
   1172 
   1173 		if (flags & PGO_FREE) {
   1174 			pmap_page_protect(pg, VM_PROT_NONE);
   1175 		} else if (flags & PGO_CLEANIT) {
   1176 
   1177 			/*
   1178 			 * if we still have some hope to pull this vnode off
   1179 			 * from the syncer queue, write-protect the page.
   1180 			 */
   1181 
   1182 			if (cleanall && wasclean &&
   1183 			    gp->g_dirtygen == dirtygen) {
   1184 
   1185 				/*
   1186 				 * uobj pages get wired only by uvm_fault
   1187 				 * where uobj is locked.
   1188 				 */
   1189 
   1190 				if (pg->wire_count == 0) {
   1191 					pmap_page_protect(pg,
   1192 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1193 				} else {
   1194 					cleanall = false;
   1195 				}
   1196 			}
   1197 		}
   1198 
   1199 		if (flags & PGO_CLEANIT) {
   1200 			needs_clean = pmap_clear_modify(pg) ||
   1201 			    (pg->flags & PG_CLEAN) == 0;
   1202 			pg->flags |= PG_CLEAN;
   1203 		} else {
   1204 			needs_clean = false;
   1205 		}
   1206 
   1207 		/*
   1208 		 * if we're cleaning, build a cluster.
   1209 		 * the cluster will consist of pages which are currently dirty,
   1210 		 * but they will be returned to us marked clean.
   1211 		 * if not cleaning, just operate on the one page.
   1212 		 */
   1213 
   1214 		if (needs_clean) {
   1215 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1216 			wasclean = false;
   1217 			memset(pgs, 0, sizeof(pgs));
   1218 			pg->flags |= PG_BUSY;
   1219 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1220 
   1221 			/*
   1222 			 * first look backward.
   1223 			 */
   1224 
   1225 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1226 			nback = npages;
   1227 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1228 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1229 			if (nback) {
   1230 				memmove(&pgs[0], &pgs[npages - nback],
   1231 				    nback * sizeof(pgs[0]));
   1232 				if (npages - nback < nback)
   1233 					memset(&pgs[nback], 0,
   1234 					    (npages - nback) * sizeof(pgs[0]));
   1235 				else
   1236 					memset(&pgs[npages - nback], 0,
   1237 					    nback * sizeof(pgs[0]));
   1238 			}
   1239 
   1240 			/*
   1241 			 * then plug in our page of interest.
   1242 			 */
   1243 
   1244 			pgs[nback] = pg;
   1245 
   1246 			/*
   1247 			 * then look forward to fill in the remaining space in
   1248 			 * the array of pages.
   1249 			 */
   1250 
   1251 			npages = maxpages - nback - 1;
   1252 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1253 			    &pgs[nback + 1],
   1254 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1255 			npages += nback + 1;
   1256 		} else {
   1257 			pgs[0] = pg;
   1258 			npages = 1;
   1259 			nback = 0;
   1260 		}
   1261 
   1262 		/*
   1263 		 * apply FREE or DEACTIVATE options if requested.
   1264 		 */
   1265 
   1266 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1267 			mutex_enter(&uvm_pageqlock);
   1268 		}
   1269 		for (i = 0; i < npages; i++) {
   1270 			tpg = pgs[i];
   1271 			KASSERT(tpg->uobject == uobj);
   1272 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1273 				pg = tpg;
   1274 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1275 				continue;
   1276 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1277 				uvm_pagedeactivate(tpg);
   1278 			} else if (flags & PGO_FREE) {
   1279 				pmap_page_protect(tpg, VM_PROT_NONE);
   1280 				if (tpg->flags & PG_BUSY) {
   1281 					tpg->flags |= freeflag;
   1282 					if (pagedaemon) {
   1283 						uvm_pageout_start(1);
   1284 						uvm_pagedequeue(tpg);
   1285 					}
   1286 				} else {
   1287 
   1288 					/*
   1289 					 * ``page is not busy''
   1290 					 * implies that npages is 1
   1291 					 * and needs_clean is false.
   1292 					 */
   1293 
   1294 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1295 					uvm_pagefree(tpg);
   1296 					if (pagedaemon)
   1297 						uvmexp.pdfreed++;
   1298 				}
   1299 			}
   1300 		}
   1301 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1302 			mutex_exit(&uvm_pageqlock);
   1303 		}
   1304 		if (needs_clean) {
   1305 			modified = true;
   1306 
   1307 			/*
   1308 			 * start the i/o.  if we're traversing by list,
   1309 			 * keep our place in the list with a marker page.
   1310 			 */
   1311 
   1312 			if (by_list) {
   1313 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1314 				    listq.queue);
   1315 			}
   1316 			mutex_exit(slock);
   1317 			error = GOP_WRITE(vp, pgs, npages, flags);
   1318 			mutex_enter(slock);
   1319 			if (by_list) {
   1320 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1321 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1322 			}
   1323 			if (error) {
   1324 				break;
   1325 			}
   1326 			if (by_list) {
   1327 				continue;
   1328 			}
   1329 		}
   1330 
   1331 		/*
   1332 		 * find the next page and continue if there was no error.
   1333 		 */
   1334 
   1335 		if (by_list) {
   1336 			if (nextpg) {
   1337 				pg = nextpg;
   1338 				nextpg = NULL;
   1339 			} else {
   1340 				pg = TAILQ_NEXT(pg, listq.queue);
   1341 			}
   1342 		} else {
   1343 			off += (npages - nback) << PAGE_SHIFT;
   1344 			if (off < endoff) {
   1345 				pg = uvm_pagelookup(uobj, off);
   1346 			}
   1347 		}
   1348 	}
   1349 	if (by_list) {
   1350 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1351 	}
   1352 
   1353 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1354 	    (vp->v_type != VBLK ||
   1355 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1356 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1357 	}
   1358 
   1359 	/*
   1360 	 * if we're cleaning and there was nothing to clean,
   1361 	 * take us off the syncer list.  if we started any i/o
   1362 	 * and we're doing sync i/o, wait for all writes to finish.
   1363 	 */
   1364 
   1365 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1366 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1367 #if defined(DEBUG)
   1368 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1369 			if ((pg->flags & PG_CLEAN) == 0) {
   1370 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1371 			}
   1372 			if (pmap_is_modified(pg)) {
   1373 				printf("%s: %p: modified\n", __func__, pg);
   1374 			}
   1375 		}
   1376 #endif /* defined(DEBUG) */
   1377 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1378 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1379 			vn_syncer_remove_from_worklist(vp);
   1380 	}
   1381 
   1382 #if !defined(DEBUG)
   1383 skip_scan:
   1384 #endif /* !defined(DEBUG) */
   1385 
   1386 	/* Wait for output to complete. */
   1387 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1388 		while (vp->v_numoutput != 0)
   1389 			cv_wait(&vp->v_cv, slock);
   1390 	}
   1391 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1392 	mutex_exit(slock);
   1393 
   1394 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1395 		/*
   1396 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1397 		 * retrying is not a big deal because, in many cases,
   1398 		 * uobj->uo_npages is already 0 here.
   1399 		 */
   1400 		mutex_enter(slock);
   1401 		goto retry;
   1402 	}
   1403 
   1404 	if (has_trans) {
   1405 		if (need_wapbl)
   1406 			WAPBL_END(vp->v_mount);
   1407 		fstrans_done(vp->v_mount);
   1408 	}
   1409 
   1410 	return (error);
   1411 }
   1412 
   1413 int
   1414 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1415 {
   1416 	off_t off;
   1417 	vaddr_t kva;
   1418 	size_t len;
   1419 	int error;
   1420 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1421 
   1422 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1423 	    vp, pgs, npages, flags);
   1424 
   1425 	off = pgs[0]->offset;
   1426 	kva = uvm_pagermapin(pgs, npages,
   1427 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1428 	len = npages << PAGE_SHIFT;
   1429 
   1430 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1431 			    uvm_aio_biodone);
   1432 
   1433 	return error;
   1434 }
   1435 
   1436 int
   1437 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1438 {
   1439 	off_t off;
   1440 	vaddr_t kva;
   1441 	size_t len;
   1442 	int error;
   1443 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1444 
   1445 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1446 	    vp, pgs, npages, flags);
   1447 
   1448 	off = pgs[0]->offset;
   1449 	kva = uvm_pagermapin(pgs, npages,
   1450 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1451 	len = npages << PAGE_SHIFT;
   1452 
   1453 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1454 			    uvm_aio_biodone);
   1455 
   1456 	return error;
   1457 }
   1458 
   1459 /*
   1460  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1461  * and mapped into kernel memory.  Here we just look up the underlying
   1462  * device block addresses and call the strategy routine.
   1463  */
   1464 
   1465 static int
   1466 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1467     enum uio_rw rw, void (*iodone)(struct buf *))
   1468 {
   1469 	int s, error;
   1470 	int fs_bshift, dev_bshift;
   1471 	off_t eof, offset, startoffset;
   1472 	size_t bytes, iobytes, skipbytes;
   1473 	struct buf *mbp, *bp;
   1474 	const bool async = (flags & PGO_SYNCIO) == 0;
   1475 	const bool iowrite = rw == UIO_WRITE;
   1476 	const int brw = iowrite ? B_WRITE : B_READ;
   1477 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1478 
   1479 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1480 	    vp, kva, len, flags);
   1481 
   1482 	KASSERT(vp->v_size <= vp->v_writesize);
   1483 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1484 	if (vp->v_type != VBLK) {
   1485 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1486 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1487 	} else {
   1488 		fs_bshift = DEV_BSHIFT;
   1489 		dev_bshift = DEV_BSHIFT;
   1490 	}
   1491 	error = 0;
   1492 	startoffset = off;
   1493 	bytes = MIN(len, eof - startoffset);
   1494 	skipbytes = 0;
   1495 	KASSERT(bytes != 0);
   1496 
   1497 	if (iowrite) {
   1498 		mutex_enter(&vp->v_interlock);
   1499 		vp->v_numoutput += 2;
   1500 		mutex_exit(&vp->v_interlock);
   1501 	}
   1502 	mbp = getiobuf(vp, true);
   1503 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1504 	    vp, mbp, vp->v_numoutput, bytes);
   1505 	mbp->b_bufsize = len;
   1506 	mbp->b_data = (void *)kva;
   1507 	mbp->b_resid = mbp->b_bcount = bytes;
   1508 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1509 	if (async) {
   1510 		mbp->b_flags = brw | B_ASYNC;
   1511 		mbp->b_iodone = iodone;
   1512 	} else {
   1513 		mbp->b_flags = brw;
   1514 		mbp->b_iodone = NULL;
   1515 	}
   1516 	if (curlwp == uvm.pagedaemon_lwp)
   1517 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1518 	else if (async)
   1519 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1520 	else
   1521 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1522 
   1523 	bp = NULL;
   1524 	for (offset = startoffset;
   1525 	    bytes > 0;
   1526 	    offset += iobytes, bytes -= iobytes) {
   1527 		int run;
   1528 		daddr_t lbn, blkno;
   1529 		struct vnode *devvp;
   1530 
   1531 		/*
   1532 		 * bmap the file to find out the blkno to read from and
   1533 		 * how much we can read in one i/o.  if bmap returns an error,
   1534 		 * skip the rest of the top-level i/o.
   1535 		 */
   1536 
   1537 		lbn = offset >> fs_bshift;
   1538 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1539 		if (error) {
   1540 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1541 			    lbn,error,0,0);
   1542 			skipbytes += bytes;
   1543 			bytes = 0;
   1544 			goto loopdone;
   1545 		}
   1546 
   1547 		/*
   1548 		 * see how many pages can be read with this i/o.
   1549 		 * reduce the i/o size if necessary to avoid
   1550 		 * overwriting pages with valid data.
   1551 		 */
   1552 
   1553 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1554 		    bytes);
   1555 
   1556 		/*
   1557 		 * if this block isn't allocated, zero it instead of
   1558 		 * reading it.  unless we are going to allocate blocks,
   1559 		 * mark the pages we zeroed PG_RDONLY.
   1560 		 */
   1561 
   1562 		if (blkno == (daddr_t)-1) {
   1563 			if (!iowrite) {
   1564 				memset((char *)kva + (offset - startoffset), 0,
   1565 				    iobytes);
   1566 			}
   1567 			skipbytes += iobytes;
   1568 			continue;
   1569 		}
   1570 
   1571 		/*
   1572 		 * allocate a sub-buf for this piece of the i/o
   1573 		 * (or just use mbp if there's only 1 piece),
   1574 		 * and start it going.
   1575 		 */
   1576 
   1577 		if (offset == startoffset && iobytes == bytes) {
   1578 			bp = mbp;
   1579 		} else {
   1580 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1581 			    vp, bp, vp->v_numoutput, 0);
   1582 			bp = getiobuf(vp, true);
   1583 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1584 		}
   1585 		bp->b_lblkno = 0;
   1586 
   1587 		/* adjust physical blkno for partial blocks */
   1588 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1589 		    dev_bshift);
   1590 
   1591 		UVMHIST_LOG(ubchist,
   1592 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1593 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1594 
   1595 		VOP_STRATEGY(devvp, bp);
   1596 	}
   1597 
   1598 loopdone:
   1599 	if (skipbytes) {
   1600 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1601 	}
   1602 	nestiobuf_done(mbp, skipbytes, error);
   1603 	if (async) {
   1604 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1605 		return (0);
   1606 	}
   1607 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1608 	error = biowait(mbp);
   1609 	s = splbio();
   1610 	(*iodone)(mbp);
   1611 	splx(s);
   1612 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1613 	return (error);
   1614 }
   1615 
   1616 int
   1617 genfs_compat_getpages(void *v)
   1618 {
   1619 	struct vop_getpages_args /* {
   1620 		struct vnode *a_vp;
   1621 		voff_t a_offset;
   1622 		struct vm_page **a_m;
   1623 		int *a_count;
   1624 		int a_centeridx;
   1625 		vm_prot_t a_access_type;
   1626 		int a_advice;
   1627 		int a_flags;
   1628 	} */ *ap = v;
   1629 
   1630 	off_t origoffset;
   1631 	struct vnode *vp = ap->a_vp;
   1632 	struct uvm_object *uobj = &vp->v_uobj;
   1633 	struct vm_page *pg, **pgs;
   1634 	vaddr_t kva;
   1635 	int i, error, orignpages, npages;
   1636 	struct iovec iov;
   1637 	struct uio uio;
   1638 	kauth_cred_t cred = curlwp->l_cred;
   1639 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1640 
   1641 	error = 0;
   1642 	origoffset = ap->a_offset;
   1643 	orignpages = *ap->a_count;
   1644 	pgs = ap->a_m;
   1645 
   1646 	if (memwrite && (vp->v_iflag & VI_ONWORKLST) == 0) {
   1647 		vn_syncer_add_to_worklist(vp, filedelay);
   1648 	}
   1649 	if (ap->a_flags & PGO_LOCKED) {
   1650 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1651 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1652 
   1653 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1654 	}
   1655 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1656 		mutex_exit(&uobj->vmobjlock);
   1657 		return (EINVAL);
   1658 	}
   1659 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1660 		mutex_exit(&uobj->vmobjlock);
   1661 		return 0;
   1662 	}
   1663 	npages = orignpages;
   1664 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1665 	mutex_exit(&uobj->vmobjlock);
   1666 	kva = uvm_pagermapin(pgs, npages,
   1667 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1668 	for (i = 0; i < npages; i++) {
   1669 		pg = pgs[i];
   1670 		if ((pg->flags & PG_FAKE) == 0) {
   1671 			continue;
   1672 		}
   1673 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1674 		iov.iov_len = PAGE_SIZE;
   1675 		uio.uio_iov = &iov;
   1676 		uio.uio_iovcnt = 1;
   1677 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1678 		uio.uio_rw = UIO_READ;
   1679 		uio.uio_resid = PAGE_SIZE;
   1680 		UIO_SETUP_SYSSPACE(&uio);
   1681 		/* XXX vn_lock */
   1682 		error = VOP_READ(vp, &uio, 0, cred);
   1683 		if (error) {
   1684 			break;
   1685 		}
   1686 		if (uio.uio_resid) {
   1687 			memset(iov.iov_base, 0, uio.uio_resid);
   1688 		}
   1689 	}
   1690 	uvm_pagermapout(kva, npages);
   1691 	mutex_enter(&uobj->vmobjlock);
   1692 	mutex_enter(&uvm_pageqlock);
   1693 	for (i = 0; i < npages; i++) {
   1694 		pg = pgs[i];
   1695 		if (error && (pg->flags & PG_FAKE) != 0) {
   1696 			pg->flags |= PG_RELEASED;
   1697 		} else {
   1698 			pmap_clear_modify(pg);
   1699 			uvm_pageactivate(pg);
   1700 		}
   1701 	}
   1702 	if (error) {
   1703 		uvm_page_unbusy(pgs, npages);
   1704 	}
   1705 	mutex_exit(&uvm_pageqlock);
   1706 	mutex_exit(&uobj->vmobjlock);
   1707 	return (error);
   1708 }
   1709 
   1710 int
   1711 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1712     int flags)
   1713 {
   1714 	off_t offset;
   1715 	struct iovec iov;
   1716 	struct uio uio;
   1717 	kauth_cred_t cred = curlwp->l_cred;
   1718 	struct buf *bp;
   1719 	vaddr_t kva;
   1720 	int error;
   1721 
   1722 	offset = pgs[0]->offset;
   1723 	kva = uvm_pagermapin(pgs, npages,
   1724 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1725 
   1726 	iov.iov_base = (void *)kva;
   1727 	iov.iov_len = npages << PAGE_SHIFT;
   1728 	uio.uio_iov = &iov;
   1729 	uio.uio_iovcnt = 1;
   1730 	uio.uio_offset = offset;
   1731 	uio.uio_rw = UIO_WRITE;
   1732 	uio.uio_resid = npages << PAGE_SHIFT;
   1733 	UIO_SETUP_SYSSPACE(&uio);
   1734 	/* XXX vn_lock */
   1735 	error = VOP_WRITE(vp, &uio, 0, cred);
   1736 
   1737 	mutex_enter(&vp->v_interlock);
   1738 	vp->v_numoutput++;
   1739 	mutex_exit(&vp->v_interlock);
   1740 
   1741 	bp = getiobuf(vp, true);
   1742 	bp->b_cflags = BC_BUSY | BC_AGE;
   1743 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1744 	bp->b_data = (char *)kva;
   1745 	bp->b_bcount = npages << PAGE_SHIFT;
   1746 	bp->b_bufsize = npages << PAGE_SHIFT;
   1747 	bp->b_resid = 0;
   1748 	bp->b_error = error;
   1749 	uvm_aio_aiodone(bp);
   1750 	return (error);
   1751 }
   1752 
   1753 /*
   1754  * Process a uio using direct I/O.  If we reach a part of the request
   1755  * which cannot be processed in this fashion for some reason, just return.
   1756  * The caller must handle some additional part of the request using
   1757  * buffered I/O before trying direct I/O again.
   1758  */
   1759 
   1760 void
   1761 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1762 {
   1763 	struct vmspace *vs;
   1764 	struct iovec *iov;
   1765 	vaddr_t va;
   1766 	size_t len;
   1767 	const int mask = DEV_BSIZE - 1;
   1768 	int error;
   1769 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1770 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1771 
   1772 	/*
   1773 	 * We only support direct I/O to user space for now.
   1774 	 */
   1775 
   1776 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1777 		return;
   1778 	}
   1779 
   1780 	/*
   1781 	 * If the vnode is mapped, we would need to get the getpages lock
   1782 	 * to stabilize the bmap, but then we would get into trouble whil e
   1783 	 * locking the pages if the pages belong to this same vnode (or a
   1784 	 * multi-vnode cascade to the same effect).  Just fall back to
   1785 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1786 	 */
   1787 
   1788 	if (vp->v_vflag & VV_MAPPED) {
   1789 		return;
   1790 	}
   1791 
   1792 	if (need_wapbl) {
   1793 		error = WAPBL_BEGIN(vp->v_mount);
   1794 		if (error)
   1795 			return;
   1796 	}
   1797 
   1798 	/*
   1799 	 * Do as much of the uio as possible with direct I/O.
   1800 	 */
   1801 
   1802 	vs = uio->uio_vmspace;
   1803 	while (uio->uio_resid) {
   1804 		iov = uio->uio_iov;
   1805 		if (iov->iov_len == 0) {
   1806 			uio->uio_iov++;
   1807 			uio->uio_iovcnt--;
   1808 			continue;
   1809 		}
   1810 		va = (vaddr_t)iov->iov_base;
   1811 		len = MIN(iov->iov_len, genfs_maxdio);
   1812 		len &= ~mask;
   1813 
   1814 		/*
   1815 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1816 		 * the current EOF, then fall back to buffered I/O.
   1817 		 */
   1818 
   1819 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1820 			break;
   1821 		}
   1822 
   1823 		/*
   1824 		 * Check alignment.  The file offset must be at least
   1825 		 * sector-aligned.  The exact constraint on memory alignment
   1826 		 * is very hardware-dependent, but requiring sector-aligned
   1827 		 * addresses there too is safe.
   1828 		 */
   1829 
   1830 		if (uio->uio_offset & mask || va & mask) {
   1831 			break;
   1832 		}
   1833 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1834 					  uio->uio_rw);
   1835 		if (error) {
   1836 			break;
   1837 		}
   1838 		iov->iov_base = (char *)iov->iov_base + len;
   1839 		iov->iov_len -= len;
   1840 		uio->uio_offset += len;
   1841 		uio->uio_resid -= len;
   1842 	}
   1843 
   1844 	if (need_wapbl)
   1845 		WAPBL_END(vp->v_mount);
   1846 }
   1847 
   1848 /*
   1849  * Iodone routine for direct I/O.  We don't do much here since the request is
   1850  * always synchronous, so the caller will do most of the work after biowait().
   1851  */
   1852 
   1853 static void
   1854 genfs_dio_iodone(struct buf *bp)
   1855 {
   1856 
   1857 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1858 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1859 		mutex_enter(bp->b_objlock);
   1860 		vwakeup(bp);
   1861 		mutex_exit(bp->b_objlock);
   1862 	}
   1863 	putiobuf(bp);
   1864 }
   1865 
   1866 /*
   1867  * Process one chunk of a direct I/O request.
   1868  */
   1869 
   1870 static int
   1871 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1872     off_t off, enum uio_rw rw)
   1873 {
   1874 	struct vm_map *map;
   1875 	struct pmap *upm, *kpm;
   1876 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1877 	off_t spoff, epoff;
   1878 	vaddr_t kva, puva;
   1879 	paddr_t pa;
   1880 	vm_prot_t prot;
   1881 	int error, rv, poff, koff;
   1882 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1883 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1884 
   1885 	/*
   1886 	 * For writes, verify that this range of the file already has fully
   1887 	 * allocated backing store.  If there are any holes, just punt and
   1888 	 * make the caller take the buffered write path.
   1889 	 */
   1890 
   1891 	if (rw == UIO_WRITE) {
   1892 		daddr_t lbn, elbn, blkno;
   1893 		int bsize, bshift, run;
   1894 
   1895 		bshift = vp->v_mount->mnt_fs_bshift;
   1896 		bsize = 1 << bshift;
   1897 		lbn = off >> bshift;
   1898 		elbn = (off + len + bsize - 1) >> bshift;
   1899 		while (lbn < elbn) {
   1900 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1901 			if (error) {
   1902 				return error;
   1903 			}
   1904 			if (blkno == (daddr_t)-1) {
   1905 				return ENOSPC;
   1906 			}
   1907 			lbn += 1 + run;
   1908 		}
   1909 	}
   1910 
   1911 	/*
   1912 	 * Flush any cached pages for parts of the file that we're about to
   1913 	 * access.  If we're writing, invalidate pages as well.
   1914 	 */
   1915 
   1916 	spoff = trunc_page(off);
   1917 	epoff = round_page(off + len);
   1918 	mutex_enter(&vp->v_interlock);
   1919 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1920 	if (error) {
   1921 		return error;
   1922 	}
   1923 
   1924 	/*
   1925 	 * Wire the user pages and remap them into kernel memory.
   1926 	 */
   1927 
   1928 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1929 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1930 	if (error) {
   1931 		return error;
   1932 	}
   1933 
   1934 	map = &vs->vm_map;
   1935 	upm = vm_map_pmap(map);
   1936 	kpm = vm_map_pmap(kernel_map);
   1937 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1938 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1939 	puva = trunc_page(uva);
   1940 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1941 		rv = pmap_extract(upm, puva + poff, &pa);
   1942 		KASSERT(rv);
   1943 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1944 	}
   1945 	pmap_update(kpm);
   1946 
   1947 	/*
   1948 	 * Do the I/O.
   1949 	 */
   1950 
   1951 	koff = uva - trunc_page(uva);
   1952 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1953 			    genfs_dio_iodone);
   1954 
   1955 	/*
   1956 	 * Tear down the kernel mapping.
   1957 	 */
   1958 
   1959 	pmap_remove(kpm, kva, kva + klen);
   1960 	pmap_update(kpm);
   1961 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1962 
   1963 	/*
   1964 	 * Unwire the user pages.
   1965 	 */
   1966 
   1967 	uvm_vsunlock(vs, (void *)uva, len);
   1968 	return error;
   1969 }
   1970 
   1971