Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.36.2.6
      1 /*	$NetBSD: genfs_io.c,v 1.36.2.6 2010/03/23 01:58:13 uebayasi Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.36.2.6 2010/03/23 01:58:13 uebayasi Exp $");
     35 
     36 #include "opt_device_page.h"
     37 #include "opt_xip.h"
     38 
     39 #include <sys/param.h>
     40 #include <sys/systm.h>
     41 #include <sys/proc.h>
     42 #include <sys/kernel.h>
     43 #include <sys/mount.h>
     44 #include <sys/namei.h>
     45 #include <sys/vnode.h>
     46 #include <sys/fcntl.h>
     47 #include <sys/kmem.h>
     48 #include <sys/poll.h>
     49 #include <sys/mman.h>
     50 #include <sys/file.h>
     51 #include <sys/kauth.h>
     52 #include <sys/fstrans.h>
     53 #include <sys/buf.h>
     54 
     55 #include <miscfs/genfs/genfs.h>
     56 #include <miscfs/genfs/genfs_node.h>
     57 #include <miscfs/specfs/specdev.h>
     58 
     59 #include <uvm/uvm.h>
     60 #include <uvm/uvm_pager.h>
     61 
     62 static int genfs_do_getpages(void *);
     63 #ifdef XIP
     64 static int genfs_do_getpages_xip(void *);
     65 #endif
     66 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     67     off_t, enum uio_rw);
     68 static void genfs_dio_iodone(struct buf *);
     69 
     70 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     71     void (*)(struct buf *));
     72 static inline void genfs_rel_pages(struct vm_page **, int);
     73 
     74 int genfs_maxdio = MAXPHYS;
     75 
     76 static inline void
     77 genfs_rel_pages(struct vm_page **pgs, int npages)
     78 {
     79 	int i;
     80 
     81 	for (i = 0; i < npages; i++) {
     82 		struct vm_page *pg = pgs[i];
     83 
     84 		if (pg == NULL || pg == PGO_DONTCARE)
     85 			continue;
     86 		if (pg->flags & PG_FAKE) {
     87 			pg->flags |= PG_RELEASED;
     88 		}
     89 	}
     90 	mutex_enter(&uvm_pageqlock);
     91 	uvm_page_unbusy(pgs, npages);
     92 	mutex_exit(&uvm_pageqlock);
     93 }
     94 
     95 /*
     96  * generic VM getpages routine.
     97  * Return PG_BUSY pages for the given range,
     98  * reading from backing store if necessary.
     99  */
    100 
    101 int
    102 genfs_getpages(void *v)
    103 {
    104 #ifdef XIP
    105 	struct vop_getpages_args /* {
    106 		struct vnode *a_vp;
    107 		voff_t a_offset;
    108 		struct vm_page **a_m;
    109 		int *a_count;
    110 		int a_centeridx;
    111 		vm_prot_t a_access_type;
    112 		int a_advice;
    113 		int a_flags;
    114 	} */ * const ap = v;
    115 	struct vnode * const vp = ap->a_vp;
    116 
    117 	/* XXX should be merged into genfs_do_getpages() */
    118 	if ((vp->v_vflag & VV_XIP) != 0)
    119 		return genfs_do_getpages_xip(v);
    120 	else
    121 #endif
    122 		return genfs_do_getpages(v);
    123 }
    124 
    125 static int
    126 genfs_do_getpages(void *v)
    127 {
    128 	struct vop_getpages_args /* {
    129 		struct vnode *a_vp;
    130 		voff_t a_offset;
    131 		struct vm_page **a_m;
    132 		int *a_count;
    133 		int a_centeridx;
    134 		vm_prot_t a_access_type;
    135 		int a_advice;
    136 		int a_flags;
    137 	} */ * const ap = v;
    138 
    139 	off_t diskeof, memeof;
    140 	int i, error, npages;
    141 	const int flags = ap->a_flags;
    142 	struct vnode * const vp = ap->a_vp;
    143 	struct genfs_node * const gp = VTOG(vp);
    144 	struct uvm_object * const uobj = &vp->v_uobj;
    145 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    146 	const bool async = (flags & PGO_SYNCIO) == 0;
    147 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    148 	bool has_trans = false;
    149 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    150 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    151 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    152 
    153 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    154 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    155 
    156 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    157 	    vp->v_type == VLNK || vp->v_type == VBLK);
    158 
    159 startover:
    160 	error = 0;
    161 	const voff_t origvsize = vp->v_size;
    162 	const off_t origoffset = ap->a_offset;
    163 	const int orignpages = *ap->a_count;
    164 
    165 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    166 	if (flags & PGO_PASTEOF) {
    167 		off_t newsize;
    168 #if defined(DIAGNOSTIC)
    169 		off_t writeeof;
    170 #endif /* defined(DIAGNOSTIC) */
    171 
    172 		newsize = MAX(origvsize,
    173 		    origoffset + (orignpages << PAGE_SHIFT));
    174 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    175 #if defined(DIAGNOSTIC)
    176 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    177 		if (newsize > round_page(writeeof)) {
    178 			panic("%s: past eof", __func__);
    179 		}
    180 #endif /* defined(DIAGNOSTIC) */
    181 	} else {
    182 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    183 	}
    184 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    185 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    186 	KASSERT(orignpages > 0);
    187 
    188 	/*
    189 	 * Bounds-check the request.
    190 	 */
    191 
    192 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    193 		if ((flags & PGO_LOCKED) == 0) {
    194 			mutex_exit(&uobj->vmobjlock);
    195 		}
    196 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    197 		    origoffset, *ap->a_count, memeof,0);
    198 		error = EINVAL;
    199 		goto out_err;
    200 	}
    201 
    202 	/* uobj is locked */
    203 
    204 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    205 	    (vp->v_type != VBLK ||
    206 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    207 		int updflags = 0;
    208 
    209 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    210 			updflags = GOP_UPDATE_ACCESSED;
    211 		}
    212 		if (memwrite) {
    213 			updflags |= GOP_UPDATE_MODIFIED;
    214 		}
    215 		if (updflags != 0) {
    216 			GOP_MARKUPDATE(vp, updflags);
    217 		}
    218 	}
    219 
    220 	if (memwrite) {
    221 		gp->g_dirtygen++;
    222 		if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    223 			vn_syncer_add_to_worklist(vp, filedelay);
    224 		}
    225 		if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
    226 			vp->v_iflag |= VI_WRMAPDIRTY;
    227 		}
    228 	}
    229 
    230 	/*
    231 	 * For PGO_LOCKED requests, just return whatever's in memory.
    232 	 */
    233 
    234 	if (flags & PGO_LOCKED) {
    235 		int nfound;
    236 		struct vm_page *pg;
    237 
    238 		npages = *ap->a_count;
    239 #if defined(DEBUG)
    240 		for (i = 0; i < npages; i++) {
    241 			pg = ap->a_m[i];
    242 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    243 		}
    244 #endif /* defined(DEBUG) */
    245 		nfound = uvn_findpages(uobj, origoffset, &npages,
    246 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    247 		KASSERT(npages == *ap->a_count);
    248 		if (nfound == 0) {
    249 			error = EBUSY;
    250 			goto out_err;
    251 		}
    252 		if (!genfs_node_rdtrylock(vp)) {
    253 			genfs_rel_pages(ap->a_m, npages);
    254 
    255 			/*
    256 			 * restore the array.
    257 			 */
    258 
    259 			for (i = 0; i < npages; i++) {
    260 				pg = ap->a_m[i];
    261 
    262 				if (pg != NULL || pg != PGO_DONTCARE) {
    263 					ap->a_m[i] = NULL;
    264 				}
    265 			}
    266 		} else {
    267 			genfs_node_unlock(vp);
    268 		}
    269 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    270 		goto out_err;
    271 	}
    272 	mutex_exit(&uobj->vmobjlock);
    273 
    274 	/*
    275 	 * find the requested pages and make some simple checks.
    276 	 * leave space in the page array for a whole block.
    277 	 */
    278 
    279 	const int fs_bshift = (vp->v_type != VBLK) ?
    280 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    281 	const int dev_bshift = (vp->v_type != VBLK) ?
    282 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    283 	const int fs_bsize = 1 << fs_bshift;
    284 #define	blk_mask	(fs_bsize - 1)
    285 #define	trunc_blk(x)	((x) & ~blk_mask)
    286 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    287 
    288 	const int orignmempages = MIN(orignpages,
    289 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    290 	npages = orignmempages;
    291 	const off_t startoffset = trunc_blk(origoffset);
    292 	const off_t endoffset = MIN(
    293 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    294 	    round_page(memeof));
    295 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    296 
    297 	const int pgs_size = sizeof(struct vm_page *) *
    298 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    299 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    300 
    301 	if (pgs_size > sizeof(pgs_onstack)) {
    302 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    303 		if (pgs == NULL) {
    304 			pgs = pgs_onstack;
    305 			error = ENOMEM;
    306 			goto out_err;
    307 		}
    308 	} else {
    309 		pgs = pgs_onstack;
    310 		(void)memset(pgs, 0, pgs_size);
    311 	}
    312 
    313 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    314 	    ridx, npages, startoffset, endoffset);
    315 
    316 	if (!has_trans) {
    317 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    318 		has_trans = true;
    319 	}
    320 
    321 	/*
    322 	 * hold g_glock to prevent a race with truncate.
    323 	 *
    324 	 * check if our idea of v_size is still valid.
    325 	 */
    326 
    327 	if (blockalloc) {
    328 		rw_enter(&gp->g_glock, RW_WRITER);
    329 	} else {
    330 		rw_enter(&gp->g_glock, RW_READER);
    331 	}
    332 	mutex_enter(&uobj->vmobjlock);
    333 	if (vp->v_size < origvsize) {
    334 		genfs_node_unlock(vp);
    335 		if (pgs != pgs_onstack)
    336 			kmem_free(pgs, pgs_size);
    337 		goto startover;
    338 	}
    339 
    340 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    341 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    342 		genfs_node_unlock(vp);
    343 		KASSERT(async != 0);
    344 		genfs_rel_pages(&pgs[ridx], orignmempages);
    345 		mutex_exit(&uobj->vmobjlock);
    346 		error = EBUSY;
    347 		goto out_err_free;
    348 	}
    349 
    350 	/*
    351 	 * if the pages are already resident, just return them.
    352 	 */
    353 
    354 	for (i = 0; i < npages; i++) {
    355 		struct vm_page *pg = pgs[ridx + i];
    356 
    357 		if ((pg->flags & PG_FAKE) ||
    358 		    (blockalloc && (pg->flags & PG_RDONLY))) {
    359 			break;
    360 		}
    361 	}
    362 	if (i == npages) {
    363 		genfs_node_unlock(vp);
    364 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    365 		npages += ridx;
    366 		goto out;
    367 	}
    368 
    369 	/*
    370 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    371 	 */
    372 
    373 	if (overwrite) {
    374 		genfs_node_unlock(vp);
    375 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    376 
    377 		for (i = 0; i < npages; i++) {
    378 			struct vm_page *pg = pgs[ridx + i];
    379 
    380 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    381 		}
    382 		npages += ridx;
    383 		goto out;
    384 	}
    385 
    386 	/*
    387 	 * the page wasn't resident and we're not overwriting,
    388 	 * so we're going to have to do some i/o.
    389 	 * find any additional pages needed to cover the expanded range.
    390 	 */
    391 
    392 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    393 	if (startoffset != origoffset || npages != orignmempages) {
    394 		int npgs;
    395 
    396 		/*
    397 		 * we need to avoid deadlocks caused by locking
    398 		 * additional pages at lower offsets than pages we
    399 		 * already have locked.  unlock them all and start over.
    400 		 */
    401 
    402 		genfs_rel_pages(&pgs[ridx], orignmempages);
    403 		memset(pgs, 0, pgs_size);
    404 
    405 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    406 		    startoffset, endoffset, 0,0);
    407 		npgs = npages;
    408 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    409 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    410 			genfs_node_unlock(vp);
    411 			KASSERT(async != 0);
    412 			genfs_rel_pages(pgs, npages);
    413 			mutex_exit(&uobj->vmobjlock);
    414 			error = EBUSY;
    415 			goto out_err_free;
    416 		}
    417 	}
    418 
    419 	mutex_exit(&uobj->vmobjlock);
    420 
    421     {
    422 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    423 	vaddr_t kva;
    424 	struct buf *bp, *mbp;
    425 	bool sawhole = false;
    426 
    427 	/*
    428 	 * read the desired page(s).
    429 	 */
    430 
    431 	totalbytes = npages << PAGE_SHIFT;
    432 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    433 	tailbytes = totalbytes - bytes;
    434 	skipbytes = 0;
    435 
    436 	kva = uvm_pagermapin(pgs, npages,
    437 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    438 
    439 	mbp = getiobuf(vp, true);
    440 	mbp->b_bufsize = totalbytes;
    441 	mbp->b_data = (void *)kva;
    442 	mbp->b_resid = mbp->b_bcount = bytes;
    443 	mbp->b_cflags = BC_BUSY;
    444 	if (async) {
    445 		mbp->b_flags = B_READ | B_ASYNC;
    446 		mbp->b_iodone = uvm_aio_biodone;
    447 	} else {
    448 		mbp->b_flags = B_READ;
    449 		mbp->b_iodone = NULL;
    450 	}
    451 	if (async)
    452 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    453 	else
    454 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    455 
    456 	/*
    457 	 * if EOF is in the middle of the range, zero the part past EOF.
    458 	 * skip over pages which are not PG_FAKE since in that case they have
    459 	 * valid data that we need to preserve.
    460 	 */
    461 
    462 	tailstart = bytes;
    463 	while (tailbytes > 0) {
    464 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    465 
    466 		KASSERT(len <= tailbytes);
    467 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    468 			memset((void *)(kva + tailstart), 0, len);
    469 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    470 			    kva, tailstart, len, 0);
    471 		}
    472 		tailstart += len;
    473 		tailbytes -= len;
    474 	}
    475 
    476 	/*
    477 	 * now loop over the pages, reading as needed.
    478 	 */
    479 
    480 	bp = NULL;
    481 	off_t offset;
    482 	for (offset = startoffset;
    483 	    bytes > 0;
    484 	    offset += iobytes, bytes -= iobytes) {
    485 		int run;
    486 		daddr_t lbn, blkno;
    487 		int pidx;
    488 		struct vnode *devvp;
    489 
    490 		/*
    491 		 * skip pages which don't need to be read.
    492 		 */
    493 
    494 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    495 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    496 			size_t b;
    497 
    498 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    499 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    500 				sawhole = true;
    501 			}
    502 			b = MIN(PAGE_SIZE, bytes);
    503 			offset += b;
    504 			bytes -= b;
    505 			skipbytes += b;
    506 			pidx++;
    507 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    508 			    offset, 0,0,0);
    509 			if (bytes == 0) {
    510 				goto loopdone;
    511 			}
    512 		}
    513 
    514 		/*
    515 		 * bmap the file to find out the blkno to read from and
    516 		 * how much we can read in one i/o.  if bmap returns an error,
    517 		 * skip the rest of the top-level i/o.
    518 		 */
    519 
    520 		lbn = offset >> fs_bshift;
    521 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    522 		if (error) {
    523 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    524 			    lbn,error,0,0);
    525 			skipbytes += bytes;
    526 			bytes = 0;
    527 			goto loopdone;
    528 		}
    529 
    530 		/*
    531 		 * see how many pages can be read with this i/o.
    532 		 * reduce the i/o size if necessary to avoid
    533 		 * overwriting pages with valid data.
    534 		 */
    535 
    536 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    537 		    bytes);
    538 		if (offset + iobytes > round_page(offset)) {
    539 			int pcount;
    540 
    541 			pcount = 1;
    542 			while (pidx + pcount < npages &&
    543 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    544 				pcount++;
    545 			}
    546 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    547 			    (offset - trunc_page(offset)));
    548 		}
    549 
    550 		/*
    551 		 * if this block isn't allocated, zero it instead of
    552 		 * reading it.  unless we are going to allocate blocks,
    553 		 * mark the pages we zeroed PG_RDONLY.
    554 		 */
    555 
    556 		if (blkno == (daddr_t)-1) {
    557 			int holepages = (round_page(offset + iobytes) -
    558 			    trunc_page(offset)) >> PAGE_SHIFT;
    559 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    560 
    561 			sawhole = true;
    562 			memset((char *)kva + (offset - startoffset), 0,
    563 			    iobytes);
    564 			skipbytes += iobytes;
    565 
    566 			for (i = 0; i < holepages; i++) {
    567 				if (memwrite) {
    568 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    569 				}
    570 				if (!blockalloc) {
    571 					pgs[pidx + i]->flags |= PG_RDONLY;
    572 				}
    573 			}
    574 			continue;
    575 		}
    576 
    577 		/*
    578 		 * allocate a sub-buf for this piece of the i/o
    579 		 * (or just use mbp if there's only 1 piece),
    580 		 * and start it going.
    581 		 */
    582 
    583 		if (offset == startoffset && iobytes == bytes) {
    584 			bp = mbp;
    585 		} else {
    586 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    587 			    vp, bp, vp->v_numoutput, 0);
    588 			bp = getiobuf(vp, true);
    589 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    590 		}
    591 		bp->b_lblkno = 0;
    592 
    593 		/* adjust physical blkno for partial blocks */
    594 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    595 		    dev_bshift);
    596 
    597 		UVMHIST_LOG(ubchist,
    598 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    599 		    bp, offset, bp->b_bcount, bp->b_blkno);
    600 
    601 		VOP_STRATEGY(devvp, bp);
    602 	}
    603 
    604 loopdone:
    605 	nestiobuf_done(mbp, skipbytes, error);
    606 	if (async) {
    607 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    608 		genfs_node_unlock(vp);
    609 		error = 0;
    610 		goto out_err_free;
    611 	}
    612 	if (bp != NULL) {
    613 		error = biowait(mbp);
    614 	}
    615 
    616 	/* Remove the mapping (make KVA available as soon as possible) */
    617 	uvm_pagermapout(kva, npages);
    618 
    619 	/*
    620 	 * if this we encountered a hole then we have to do a little more work.
    621 	 * for read faults, we marked the page PG_RDONLY so that future
    622 	 * write accesses to the page will fault again.
    623 	 * for write faults, we must make sure that the backing store for
    624 	 * the page is completely allocated while the pages are locked.
    625 	 */
    626 
    627 	if (!error && sawhole && blockalloc) {
    628 		/*
    629 		 * XXX: This assumes that we come here only via
    630 		 * the mmio path
    631 		 */
    632 		if (vp->v_mount->mnt_wapbl) {
    633 			error = WAPBL_BEGIN(vp->v_mount);
    634 		}
    635 
    636 		if (!error) {
    637 			error = GOP_ALLOC(vp, startoffset,
    638 			    npages << PAGE_SHIFT, 0, cred);
    639 			if (vp->v_mount->mnt_wapbl) {
    640 				WAPBL_END(vp->v_mount);
    641 			}
    642 		}
    643 
    644 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    645 		    startoffset, npages << PAGE_SHIFT, error,0);
    646 		if (!error) {
    647 			for (i = 0; i < npages; i++) {
    648 				struct vm_page *pg = pgs[i];
    649 
    650 				if (pg == NULL) {
    651 					continue;
    652 				}
    653 				pg->flags &= ~(PG_CLEAN|PG_RDONLY);
    654 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    655 				    pg,0,0,0);
    656 			}
    657 		}
    658 	}
    659 	genfs_node_unlock(vp);
    660 
    661 	putiobuf(mbp);
    662     }
    663 
    664 	mutex_enter(&uobj->vmobjlock);
    665 
    666 	/*
    667 	 * we're almost done!  release the pages...
    668 	 * for errors, we free the pages.
    669 	 * otherwise we activate them and mark them as valid and clean.
    670 	 * also, unbusy pages that were not actually requested.
    671 	 */
    672 
    673 	if (error) {
    674 		for (i = 0; i < npages; i++) {
    675 			struct vm_page *pg = pgs[i];
    676 
    677 			if (pg == NULL) {
    678 				continue;
    679 			}
    680 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    681 			    pg, pg->flags, 0,0);
    682 			if (pg->flags & PG_FAKE) {
    683 				pg->flags |= PG_RELEASED;
    684 			}
    685 		}
    686 		mutex_enter(&uvm_pageqlock);
    687 		uvm_page_unbusy(pgs, npages);
    688 		mutex_exit(&uvm_pageqlock);
    689 		mutex_exit(&uobj->vmobjlock);
    690 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    691 		goto out_err_free;
    692 	}
    693 
    694 out:
    695 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    696 	error = 0;
    697 	mutex_enter(&uvm_pageqlock);
    698 	for (i = 0; i < npages; i++) {
    699 		struct vm_page *pg = pgs[i];
    700 		if (pg == NULL) {
    701 			continue;
    702 		}
    703 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    704 		    pg, pg->flags, 0,0);
    705 		if (pg->flags & PG_FAKE && !overwrite) {
    706 			pg->flags &= ~(PG_FAKE);
    707 			pmap_clear_modify(pgs[i]);
    708 		}
    709 		KASSERT(!memwrite || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    710 		if (i < ridx || i >= ridx + orignmempages || async) {
    711 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    712 			    pg, pg->offset,0,0);
    713 			if (pg->flags & PG_WANTED) {
    714 				wakeup(pg);
    715 			}
    716 			if (pg->flags & PG_FAKE) {
    717 				KASSERT(overwrite);
    718 				uvm_pagezero(pg);
    719 			}
    720 			if (pg->flags & PG_RELEASED) {
    721 				uvm_pagefree(pg);
    722 				continue;
    723 			}
    724 			uvm_pageenqueue(pg);
    725 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    726 			UVM_PAGE_OWN(pg, NULL);
    727 		}
    728 	}
    729 	mutex_exit(&uvm_pageqlock);
    730 	mutex_exit(&uobj->vmobjlock);
    731 	if (ap->a_m != NULL) {
    732 		memcpy(ap->a_m, &pgs[ridx],
    733 		    orignmempages * sizeof(struct vm_page *));
    734 	}
    735 
    736 out_err_free:
    737 	if (pgs != NULL && pgs != pgs_onstack)
    738 		kmem_free(pgs, pgs_size);
    739 out_err:
    740 	if (has_trans)
    741 		fstrans_done(vp->v_mount);
    742 	return (error);
    743 }
    744 
    745 #ifdef XIP
    746 /* XXX should be merged into genfs_do_getpages() */
    747 static int
    748 genfs_do_getpages_xip(void *v)
    749 {
    750 	struct vop_getpages_args /* {
    751 		struct vnode *a_vp;
    752 		voff_t a_offset;
    753 		struct vm_page **a_m;
    754 		int *a_count;
    755 		int a_centeridx;
    756 		vm_prot_t a_access_type;
    757 		int a_advice;
    758 		int a_flags;
    759 	} */ * const ap = v;
    760 
    761 	struct vnode * const vp = ap->a_vp;
    762 	int *npagesp = ap->a_count;
    763 	const off_t offset = ap->a_offset;
    764 	struct vm_page **pps = ap->a_m;
    765 	struct uvm_object * const uobj = &vp->v_uobj;
    766 	const int flags = ap->a_flags;
    767 
    768 	int error;
    769 	off_t eof, sbkoff, ebkoff, off;
    770 	int npages;
    771 	int fs_bshift, fs_bsize, dev_bshift, dev_bsize;
    772 	int i;
    773 	paddr_t phys_addr;
    774 
    775 	UVMHIST_FUNC("genfs_do_getpages_xip"); UVMHIST_CALLED(ubchist);
    776 
    777 	KASSERT((vp->v_vflag & VV_XIP) != 0);
    778 
    779 	/* XXXUEBS should we care about PGO_LOCKED? */
    780 
    781 	GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_MEM);
    782 	npages = MIN(*npagesp, round_page(eof - offset) >> PAGE_SHIFT);
    783 
    784 	fs_bshift = vp->v_mount->mnt_fs_bshift;
    785 	fs_bsize = 1 << fs_bshift;
    786 	dev_bshift = vp->v_mount->mnt_dev_bshift;
    787 	dev_bsize = 1 << dev_bshift;
    788 
    789 	sbkoff = offset & ~(fs_bsize - 1);
    790 	ebkoff = ((offset + PAGE_SIZE * npages) + (fs_bsize - 1)) & ~(fs_bsize - 1);
    791 
    792 	UVMHIST_LOG(ubchist, "xip npages=%d sbkoff=%lx ebkoff=%lx", npages, (long)sbkoff, (long)ebkoff, 0);
    793 
    794 	if ((flags & PGO_LOCKED) == 0)
    795 		mutex_exit(&uobj->vmobjlock);
    796 
    797 	/* XXX optimize */
    798 	off = offset;
    799 	i = 0;
    800 	while (i < npages) {
    801 		daddr_t lbn, blkno;
    802 		int run;
    803 		struct vnode *devvp;
    804 
    805 		lbn = (off & ~(fs_bsize - 1)) >> fs_bshift;
    806 
    807 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    808 		KASSERT(error == 0);
    809 		UVMHIST_LOG(ubchist, "xip VOP_BMAP: lbn=%ld blkno=%ld run=%d", (long)lbn, (long)blkno, run, 0);
    810 
    811 		if (blkno < 0) {
    812 			/* unallocated page is redirected to read-only zero-filled page */
    813 			phys_addr = uvm_pageofzero_xip_phys_addr();
    814 		} else {
    815 			/* bus_space_mmap cookie -> paddr_t */
    816 			phys_addr = pmap_phys_address(devvp->v_phys_addr) +
    817 			    (blkno << dev_bshift) +
    818 			    (off - (lbn << fs_bshift));
    819 		}
    820 
    821 		pps[i] = uvm_phys_to_vm_page_device(phys_addr);
    822 
    823 		UVMHIST_LOG(ubchist, "xip pgs %d => phys_addr=0x%lx (%p)",
    824 			i,
    825 			(long)phys_addr,
    826 			pps[i],
    827 			0);
    828 
    829 		off += PAGE_SIZE;
    830 		i++;
    831 	}
    832 
    833 	*npagesp = i;
    834 
    835 	return 0;
    836 }
    837 #endif
    838 
    839 /*
    840  * generic VM putpages routine.
    841  * Write the given range of pages to backing store.
    842  *
    843  * => "offhi == 0" means flush all pages at or after "offlo".
    844  * => object should be locked by caller.  we return with the
    845  *      object unlocked.
    846  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    847  *	thus, a caller might want to unlock higher level resources
    848  *	(e.g. vm_map) before calling flush.
    849  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    850  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    851  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    852  *	that new pages are inserted on the tail end of the list.   thus,
    853  *	we can make a complete pass through the object in one go by starting
    854  *	at the head and working towards the tail (new pages are put in
    855  *	front of us).
    856  * => NOTE: we are allowed to lock the page queues, so the caller
    857  *	must not be holding the page queue lock.
    858  *
    859  * note on "cleaning" object and PG_BUSY pages:
    860  *	this routine is holding the lock on the object.   the only time
    861  *	that it can run into a PG_BUSY page that it does not own is if
    862  *	some other process has started I/O on the page (e.g. either
    863  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    864  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    865  *	had a chance to modify it yet.    if the PG_BUSY page is being
    866  *	paged out then it means that someone else has already started
    867  *	cleaning the page for us (how nice!).    in this case, if we
    868  *	have syncio specified, then after we make our pass through the
    869  *	object we need to wait for the other PG_BUSY pages to clear
    870  *	off (i.e. we need to do an iosync).   also note that once a
    871  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    872  *
    873  * note on page traversal:
    874  *	we can traverse the pages in an object either by going down the
    875  *	linked list in "uobj->memq", or we can go over the address range
    876  *	by page doing hash table lookups for each address.    depending
    877  *	on how many pages are in the object it may be cheaper to do one
    878  *	or the other.   we set "by_list" to true if we are using memq.
    879  *	if the cost of a hash lookup was equal to the cost of the list
    880  *	traversal we could compare the number of pages in the start->stop
    881  *	range to the total number of pages in the object.   however, it
    882  *	seems that a hash table lookup is more expensive than the linked
    883  *	list traversal, so we multiply the number of pages in the
    884  *	range by an estimate of the relatively higher cost of the hash lookup.
    885  */
    886 
    887 int
    888 genfs_putpages(void *v)
    889 {
    890 	struct vop_putpages_args /* {
    891 		struct vnode *a_vp;
    892 		voff_t a_offlo;
    893 		voff_t a_offhi;
    894 		int a_flags;
    895 	} */ * const ap = v;
    896 
    897 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    898 	    ap->a_flags, NULL);
    899 }
    900 
    901 int
    902 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    903     int origflags, struct vm_page **busypg)
    904 {
    905 	struct uvm_object * const uobj = &vp->v_uobj;
    906 	kmutex_t * const slock = &uobj->vmobjlock;
    907 	off_t off;
    908 	/* Even for strange MAXPHYS, the shift rounds down to a page */
    909 #define maxpages (MAXPHYS >> PAGE_SHIFT)
    910 	int i, error, npages, nback;
    911 	int freeflag;
    912 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
    913 	bool wasclean, by_list, needs_clean, yld;
    914 	bool async = (origflags & PGO_SYNCIO) == 0;
    915 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    916 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    917 	struct genfs_node * const gp = VTOG(vp);
    918 	int flags;
    919 	int dirtygen;
    920 	bool modified;
    921 	bool need_wapbl;
    922 	bool has_trans;
    923 	bool cleanall;
    924 	bool onworklst;
    925 
    926 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    927 
    928 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    929 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    930 	KASSERT(startoff < endoff || endoff == 0);
    931 
    932 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
    933 	    vp, uobj->uo_npages, startoff, endoff - startoff);
    934 
    935 	has_trans = false;
    936 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
    937 	    (origflags & PGO_JOURNALLOCKED) == 0);
    938 
    939 retry:
    940 	modified = false;
    941 	flags = origflags;
    942 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    943 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    944 	if (uobj->uo_npages == 0) {
    945 		if (vp->v_iflag & VI_ONWORKLST) {
    946 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    947 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    948 				vn_syncer_remove_from_worklist(vp);
    949 		}
    950 		if (has_trans) {
    951 			if (need_wapbl)
    952 				WAPBL_END(vp->v_mount);
    953 			fstrans_done(vp->v_mount);
    954 		}
    955 		mutex_exit(slock);
    956 		return (0);
    957 	}
    958 
    959 	/*
    960 	 * the vnode has pages, set up to process the request.
    961 	 */
    962 
    963 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
    964 		mutex_exit(slock);
    965 		if (pagedaemon) {
    966 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
    967 			if (error)
    968 				return error;
    969 		} else
    970 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
    971 		if (need_wapbl) {
    972 			error = WAPBL_BEGIN(vp->v_mount);
    973 			if (error) {
    974 				fstrans_done(vp->v_mount);
    975 				return error;
    976 			}
    977 		}
    978 		has_trans = true;
    979 		mutex_enter(slock);
    980 		goto retry;
    981 	}
    982 
    983 	error = 0;
    984 	wasclean = (vp->v_numoutput == 0);
    985 	off = startoff;
    986 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    987 		endoff = trunc_page(LLONG_MAX);
    988 	}
    989 	by_list = (uobj->uo_npages <=
    990 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_TREE_PENALTY);
    991 
    992 #if !defined(DEBUG)
    993 	/*
    994 	 * if this vnode is known not to have dirty pages,
    995 	 * don't bother to clean it out.
    996 	 */
    997 
    998 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    999 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1000 			goto skip_scan;
   1001 		}
   1002 		flags &= ~PGO_CLEANIT;
   1003 	}
   1004 #endif /* !defined(DEBUG) */
   1005 
   1006 	/*
   1007 	 * start the loop.  when scanning by list, hold the last page
   1008 	 * in the list before we start.  pages allocated after we start
   1009 	 * will be added to the end of the list, so we can stop at the
   1010 	 * current last page.
   1011 	 */
   1012 
   1013 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1014 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1015 	    (vp->v_iflag & VI_ONWORKLST) != 0;
   1016 	dirtygen = gp->g_dirtygen;
   1017 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1018 	if (by_list) {
   1019 		curmp.uobject = uobj;
   1020 		curmp.offset = (voff_t)-1;
   1021 		curmp.flags = PG_BUSY;
   1022 		endmp.uobject = uobj;
   1023 		endmp.offset = (voff_t)-1;
   1024 		endmp.flags = PG_BUSY;
   1025 		pg = TAILQ_FIRST(&uobj->memq);
   1026 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq.queue);
   1027 	} else {
   1028 		pg = uvm_pagelookup(uobj, off);
   1029 	}
   1030 	nextpg = NULL;
   1031 	while (by_list || off < endoff) {
   1032 
   1033 		/*
   1034 		 * if the current page is not interesting, move on to the next.
   1035 		 */
   1036 
   1037 		KASSERT(pg == NULL || pg->uobject == uobj);
   1038 		KASSERT(pg == NULL ||
   1039 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1040 		    (pg->flags & PG_BUSY) != 0);
   1041 		if (by_list) {
   1042 			if (pg == &endmp) {
   1043 				break;
   1044 			}
   1045 			if (pg->offset < startoff || pg->offset >= endoff ||
   1046 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1047 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1048 					wasclean = false;
   1049 				}
   1050 				pg = TAILQ_NEXT(pg, listq.queue);
   1051 				continue;
   1052 			}
   1053 			off = pg->offset;
   1054 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1055 			if (pg != NULL) {
   1056 				wasclean = false;
   1057 			}
   1058 			off += PAGE_SIZE;
   1059 			if (off < endoff) {
   1060 				pg = uvm_pagelookup(uobj, off);
   1061 			}
   1062 			continue;
   1063 		}
   1064 
   1065 		/*
   1066 		 * if the current page needs to be cleaned and it's busy,
   1067 		 * wait for it to become unbusy.
   1068 		 */
   1069 
   1070 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1071 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1072 		if (pg->flags & PG_BUSY || yld) {
   1073 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1074 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1075 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1076 				error = EDEADLK;
   1077 				if (busypg != NULL)
   1078 					*busypg = pg;
   1079 				break;
   1080 			}
   1081 			if (pagedaemon) {
   1082 				/*
   1083 				 * someone has taken the page while we
   1084 				 * dropped the lock for fstrans_start.
   1085 				 */
   1086 				break;
   1087 			}
   1088 			if (by_list) {
   1089 				TAILQ_INSERT_BEFORE(pg, &curmp, listq.queue);
   1090 				UVMHIST_LOG(ubchist, "curmp next %p",
   1091 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1092 			}
   1093 			if (yld) {
   1094 				mutex_exit(slock);
   1095 				preempt();
   1096 				mutex_enter(slock);
   1097 			} else {
   1098 				pg->flags |= PG_WANTED;
   1099 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1100 				mutex_enter(slock);
   1101 			}
   1102 			if (by_list) {
   1103 				UVMHIST_LOG(ubchist, "after next %p",
   1104 				    TAILQ_NEXT(&curmp, listq.queue), 0,0,0);
   1105 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1106 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1107 			} else {
   1108 				pg = uvm_pagelookup(uobj, off);
   1109 			}
   1110 			continue;
   1111 		}
   1112 
   1113 		/*
   1114 		 * if we're freeing, remove all mappings of the page now.
   1115 		 * if we're cleaning, check if the page is needs to be cleaned.
   1116 		 */
   1117 
   1118 		if (flags & PGO_FREE) {
   1119 			pmap_page_protect(pg, VM_PROT_NONE);
   1120 		} else if (flags & PGO_CLEANIT) {
   1121 
   1122 			/*
   1123 			 * if we still have some hope to pull this vnode off
   1124 			 * from the syncer queue, write-protect the page.
   1125 			 */
   1126 
   1127 			if (cleanall && wasclean &&
   1128 			    gp->g_dirtygen == dirtygen) {
   1129 
   1130 				/*
   1131 				 * uobj pages get wired only by uvm_fault
   1132 				 * where uobj is locked.
   1133 				 */
   1134 
   1135 				if (pg->wire_count == 0) {
   1136 					pmap_page_protect(pg,
   1137 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1138 				} else {
   1139 					cleanall = false;
   1140 				}
   1141 			}
   1142 		}
   1143 
   1144 		if (flags & PGO_CLEANIT) {
   1145 			needs_clean = pmap_clear_modify(pg) ||
   1146 			    (pg->flags & PG_CLEAN) == 0;
   1147 			pg->flags |= PG_CLEAN;
   1148 		} else {
   1149 			needs_clean = false;
   1150 		}
   1151 
   1152 		/*
   1153 		 * if we're cleaning, build a cluster.
   1154 		 * the cluster will consist of pages which are currently dirty,
   1155 		 * but they will be returned to us marked clean.
   1156 		 * if not cleaning, just operate on the one page.
   1157 		 */
   1158 
   1159 		if (needs_clean) {
   1160 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1161 			wasclean = false;
   1162 			memset(pgs, 0, sizeof(pgs));
   1163 			pg->flags |= PG_BUSY;
   1164 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1165 
   1166 			/*
   1167 			 * first look backward.
   1168 			 */
   1169 
   1170 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1171 			nback = npages;
   1172 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1173 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1174 			if (nback) {
   1175 				memmove(&pgs[0], &pgs[npages - nback],
   1176 				    nback * sizeof(pgs[0]));
   1177 				if (npages - nback < nback)
   1178 					memset(&pgs[nback], 0,
   1179 					    (npages - nback) * sizeof(pgs[0]));
   1180 				else
   1181 					memset(&pgs[npages - nback], 0,
   1182 					    nback * sizeof(pgs[0]));
   1183 			}
   1184 
   1185 			/*
   1186 			 * then plug in our page of interest.
   1187 			 */
   1188 
   1189 			pgs[nback] = pg;
   1190 
   1191 			/*
   1192 			 * then look forward to fill in the remaining space in
   1193 			 * the array of pages.
   1194 			 */
   1195 
   1196 			npages = maxpages - nback - 1;
   1197 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1198 			    &pgs[nback + 1],
   1199 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1200 			npages += nback + 1;
   1201 		} else {
   1202 			pgs[0] = pg;
   1203 			npages = 1;
   1204 			nback = 0;
   1205 		}
   1206 
   1207 		/*
   1208 		 * apply FREE or DEACTIVATE options if requested.
   1209 		 */
   1210 
   1211 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1212 			mutex_enter(&uvm_pageqlock);
   1213 		}
   1214 		for (i = 0; i < npages; i++) {
   1215 			tpg = pgs[i];
   1216 			KASSERT(tpg->uobject == uobj);
   1217 			if (by_list && tpg == TAILQ_NEXT(pg, listq.queue))
   1218 				pg = tpg;
   1219 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1220 				continue;
   1221 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1222 				uvm_pagedeactivate(tpg);
   1223 			} else if (flags & PGO_FREE) {
   1224 				pmap_page_protect(tpg, VM_PROT_NONE);
   1225 				if (tpg->flags & PG_BUSY) {
   1226 					tpg->flags |= freeflag;
   1227 					if (pagedaemon) {
   1228 						uvm_pageout_start(1);
   1229 						uvm_pagedequeue(tpg);
   1230 					}
   1231 				} else {
   1232 
   1233 					/*
   1234 					 * ``page is not busy''
   1235 					 * implies that npages is 1
   1236 					 * and needs_clean is false.
   1237 					 */
   1238 
   1239 					nextpg = TAILQ_NEXT(tpg, listq.queue);
   1240 					uvm_pagefree(tpg);
   1241 					if (pagedaemon)
   1242 						uvmexp.pdfreed++;
   1243 				}
   1244 			}
   1245 		}
   1246 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1247 			mutex_exit(&uvm_pageqlock);
   1248 		}
   1249 		if (needs_clean) {
   1250 			modified = true;
   1251 
   1252 			/*
   1253 			 * start the i/o.  if we're traversing by list,
   1254 			 * keep our place in the list with a marker page.
   1255 			 */
   1256 
   1257 			if (by_list) {
   1258 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1259 				    listq.queue);
   1260 			}
   1261 			mutex_exit(slock);
   1262 			error = GOP_WRITE(vp, pgs, npages, flags);
   1263 			mutex_enter(slock);
   1264 			if (by_list) {
   1265 				pg = TAILQ_NEXT(&curmp, listq.queue);
   1266 				TAILQ_REMOVE(&uobj->memq, &curmp, listq.queue);
   1267 			}
   1268 			if (error) {
   1269 				break;
   1270 			}
   1271 			if (by_list) {
   1272 				continue;
   1273 			}
   1274 		}
   1275 
   1276 		/*
   1277 		 * find the next page and continue if there was no error.
   1278 		 */
   1279 
   1280 		if (by_list) {
   1281 			if (nextpg) {
   1282 				pg = nextpg;
   1283 				nextpg = NULL;
   1284 			} else {
   1285 				pg = TAILQ_NEXT(pg, listq.queue);
   1286 			}
   1287 		} else {
   1288 			off += (npages - nback) << PAGE_SHIFT;
   1289 			if (off < endoff) {
   1290 				pg = uvm_pagelookup(uobj, off);
   1291 			}
   1292 		}
   1293 	}
   1294 	if (by_list) {
   1295 		TAILQ_REMOVE(&uobj->memq, &endmp, listq.queue);
   1296 	}
   1297 
   1298 	if (modified && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1299 	    (vp->v_type != VBLK ||
   1300 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1301 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1302 	}
   1303 
   1304 	/*
   1305 	 * if we're cleaning and there was nothing to clean,
   1306 	 * take us off the syncer list.  if we started any i/o
   1307 	 * and we're doing sync i/o, wait for all writes to finish.
   1308 	 */
   1309 
   1310 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1311 	    (vp->v_iflag & VI_ONWORKLST) != 0) {
   1312 #if defined(DEBUG)
   1313 		TAILQ_FOREACH(pg, &uobj->memq, listq.queue) {
   1314 			if ((pg->flags & PG_CLEAN) == 0) {
   1315 				printf("%s: %p: !CLEAN\n", __func__, pg);
   1316 			}
   1317 			if (pmap_is_modified(pg)) {
   1318 				printf("%s: %p: modified\n", __func__, pg);
   1319 			}
   1320 		}
   1321 #endif /* defined(DEBUG) */
   1322 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1323 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1324 			vn_syncer_remove_from_worklist(vp);
   1325 	}
   1326 
   1327 #if !defined(DEBUG)
   1328 skip_scan:
   1329 #endif /* !defined(DEBUG) */
   1330 
   1331 	/* Wait for output to complete. */
   1332 	if (!wasclean && !async && vp->v_numoutput != 0) {
   1333 		while (vp->v_numoutput != 0)
   1334 			cv_wait(&vp->v_cv, slock);
   1335 	}
   1336 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1337 	mutex_exit(slock);
   1338 
   1339 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1340 		/*
   1341 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1342 		 * retrying is not a big deal because, in many cases,
   1343 		 * uobj->uo_npages is already 0 here.
   1344 		 */
   1345 		mutex_enter(slock);
   1346 		goto retry;
   1347 	}
   1348 
   1349 	if (has_trans) {
   1350 		if (need_wapbl)
   1351 			WAPBL_END(vp->v_mount);
   1352 		fstrans_done(vp->v_mount);
   1353 	}
   1354 
   1355 	return (error);
   1356 }
   1357 
   1358 int
   1359 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1360 {
   1361 	off_t off;
   1362 	vaddr_t kva;
   1363 	size_t len;
   1364 	int error;
   1365 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1366 
   1367 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1368 	    vp, pgs, npages, flags);
   1369 
   1370 	off = pgs[0]->offset;
   1371 	kva = uvm_pagermapin(pgs, npages,
   1372 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1373 	len = npages << PAGE_SHIFT;
   1374 
   1375 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1376 			    uvm_aio_biodone);
   1377 
   1378 	return error;
   1379 }
   1380 
   1381 int
   1382 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1383 {
   1384 	off_t off;
   1385 	vaddr_t kva;
   1386 	size_t len;
   1387 	int error;
   1388 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1389 
   1390 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1391 	    vp, pgs, npages, flags);
   1392 
   1393 	off = pgs[0]->offset;
   1394 	kva = uvm_pagermapin(pgs, npages,
   1395 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1396 	len = npages << PAGE_SHIFT;
   1397 
   1398 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1399 			    uvm_aio_biodone);
   1400 
   1401 	return error;
   1402 }
   1403 
   1404 /*
   1405  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1406  * and mapped into kernel memory.  Here we just look up the underlying
   1407  * device block addresses and call the strategy routine.
   1408  */
   1409 
   1410 static int
   1411 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1412     enum uio_rw rw, void (*iodone)(struct buf *))
   1413 {
   1414 	int s, error;
   1415 	int fs_bshift, dev_bshift;
   1416 	off_t eof, offset, startoffset;
   1417 	size_t bytes, iobytes, skipbytes;
   1418 	struct buf *mbp, *bp;
   1419 	const bool async = (flags & PGO_SYNCIO) == 0;
   1420 	const bool iowrite = rw == UIO_WRITE;
   1421 	const int brw = iowrite ? B_WRITE : B_READ;
   1422 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1423 
   1424 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1425 	    vp, kva, len, flags);
   1426 
   1427 	KASSERT(vp->v_size <= vp->v_writesize);
   1428 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1429 	if (vp->v_type != VBLK) {
   1430 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1431 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1432 	} else {
   1433 		fs_bshift = DEV_BSHIFT;
   1434 		dev_bshift = DEV_BSHIFT;
   1435 	}
   1436 	error = 0;
   1437 	startoffset = off;
   1438 	bytes = MIN(len, eof - startoffset);
   1439 	skipbytes = 0;
   1440 	KASSERT(bytes != 0);
   1441 
   1442 	if (iowrite) {
   1443 		mutex_enter(&vp->v_interlock);
   1444 		vp->v_numoutput += 2;
   1445 		mutex_exit(&vp->v_interlock);
   1446 	}
   1447 	mbp = getiobuf(vp, true);
   1448 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1449 	    vp, mbp, vp->v_numoutput, bytes);
   1450 	mbp->b_bufsize = len;
   1451 	mbp->b_data = (void *)kva;
   1452 	mbp->b_resid = mbp->b_bcount = bytes;
   1453 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1454 	if (async) {
   1455 		mbp->b_flags = brw | B_ASYNC;
   1456 		mbp->b_iodone = iodone;
   1457 	} else {
   1458 		mbp->b_flags = brw;
   1459 		mbp->b_iodone = NULL;
   1460 	}
   1461 	if (curlwp == uvm.pagedaemon_lwp)
   1462 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1463 	else if (async)
   1464 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1465 	else
   1466 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1467 
   1468 	bp = NULL;
   1469 	for (offset = startoffset;
   1470 	    bytes > 0;
   1471 	    offset += iobytes, bytes -= iobytes) {
   1472 		int run;
   1473 		daddr_t lbn, blkno;
   1474 		struct vnode *devvp;
   1475 
   1476 		/*
   1477 		 * bmap the file to find out the blkno to read from and
   1478 		 * how much we can read in one i/o.  if bmap returns an error,
   1479 		 * skip the rest of the top-level i/o.
   1480 		 */
   1481 
   1482 		lbn = offset >> fs_bshift;
   1483 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1484 		if (error) {
   1485 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1486 			    lbn,error,0,0);
   1487 			skipbytes += bytes;
   1488 			bytes = 0;
   1489 			goto loopdone;
   1490 		}
   1491 
   1492 		/*
   1493 		 * see how many pages can be read with this i/o.
   1494 		 * reduce the i/o size if necessary to avoid
   1495 		 * overwriting pages with valid data.
   1496 		 */
   1497 
   1498 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1499 		    bytes);
   1500 
   1501 		/*
   1502 		 * if this block isn't allocated, zero it instead of
   1503 		 * reading it.  unless we are going to allocate blocks,
   1504 		 * mark the pages we zeroed PG_RDONLY.
   1505 		 */
   1506 
   1507 		if (blkno == (daddr_t)-1) {
   1508 			if (!iowrite) {
   1509 				memset((char *)kva + (offset - startoffset), 0,
   1510 				    iobytes);
   1511 			}
   1512 			skipbytes += iobytes;
   1513 			continue;
   1514 		}
   1515 
   1516 		/*
   1517 		 * allocate a sub-buf for this piece of the i/o
   1518 		 * (or just use mbp if there's only 1 piece),
   1519 		 * and start it going.
   1520 		 */
   1521 
   1522 		if (offset == startoffset && iobytes == bytes) {
   1523 			bp = mbp;
   1524 		} else {
   1525 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1526 			    vp, bp, vp->v_numoutput, 0);
   1527 			bp = getiobuf(vp, true);
   1528 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1529 		}
   1530 		bp->b_lblkno = 0;
   1531 
   1532 		/* adjust physical blkno for partial blocks */
   1533 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1534 		    dev_bshift);
   1535 
   1536 		UVMHIST_LOG(ubchist,
   1537 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1538 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1539 
   1540 		VOP_STRATEGY(devvp, bp);
   1541 	}
   1542 
   1543 loopdone:
   1544 	if (skipbytes) {
   1545 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1546 	}
   1547 	nestiobuf_done(mbp, skipbytes, error);
   1548 	if (async) {
   1549 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1550 		return (0);
   1551 	}
   1552 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1553 	error = biowait(mbp);
   1554 	s = splbio();
   1555 	(*iodone)(mbp);
   1556 	splx(s);
   1557 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1558 	return (error);
   1559 }
   1560 
   1561 int
   1562 genfs_compat_getpages(void *v)
   1563 {
   1564 	struct vop_getpages_args /* {
   1565 		struct vnode *a_vp;
   1566 		voff_t a_offset;
   1567 		struct vm_page **a_m;
   1568 		int *a_count;
   1569 		int a_centeridx;
   1570 		vm_prot_t a_access_type;
   1571 		int a_advice;
   1572 		int a_flags;
   1573 	} */ *ap = v;
   1574 
   1575 	off_t origoffset;
   1576 	struct vnode *vp = ap->a_vp;
   1577 	struct uvm_object *uobj = &vp->v_uobj;
   1578 	struct vm_page *pg, **pgs;
   1579 	vaddr_t kva;
   1580 	int i, error, orignpages, npages;
   1581 	struct iovec iov;
   1582 	struct uio uio;
   1583 	kauth_cred_t cred = curlwp->l_cred;
   1584 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1585 
   1586 	error = 0;
   1587 	origoffset = ap->a_offset;
   1588 	orignpages = *ap->a_count;
   1589 	pgs = ap->a_m;
   1590 
   1591 	if (memwrite && (vp->v_iflag & VI_ONWORKLST) == 0) {
   1592 		vn_syncer_add_to_worklist(vp, filedelay);
   1593 	}
   1594 	if (ap->a_flags & PGO_LOCKED) {
   1595 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1596 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1597 
   1598 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1599 	}
   1600 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1601 		mutex_exit(&uobj->vmobjlock);
   1602 		return (EINVAL);
   1603 	}
   1604 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1605 		mutex_exit(&uobj->vmobjlock);
   1606 		return 0;
   1607 	}
   1608 	npages = orignpages;
   1609 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1610 	mutex_exit(&uobj->vmobjlock);
   1611 	kva = uvm_pagermapin(pgs, npages,
   1612 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1613 	for (i = 0; i < npages; i++) {
   1614 		pg = pgs[i];
   1615 		if ((pg->flags & PG_FAKE) == 0) {
   1616 			continue;
   1617 		}
   1618 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1619 		iov.iov_len = PAGE_SIZE;
   1620 		uio.uio_iov = &iov;
   1621 		uio.uio_iovcnt = 1;
   1622 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1623 		uio.uio_rw = UIO_READ;
   1624 		uio.uio_resid = PAGE_SIZE;
   1625 		UIO_SETUP_SYSSPACE(&uio);
   1626 		/* XXX vn_lock */
   1627 		error = VOP_READ(vp, &uio, 0, cred);
   1628 		if (error) {
   1629 			break;
   1630 		}
   1631 		if (uio.uio_resid) {
   1632 			memset(iov.iov_base, 0, uio.uio_resid);
   1633 		}
   1634 	}
   1635 	uvm_pagermapout(kva, npages);
   1636 	mutex_enter(&uobj->vmobjlock);
   1637 	mutex_enter(&uvm_pageqlock);
   1638 	for (i = 0; i < npages; i++) {
   1639 		pg = pgs[i];
   1640 		if (error && (pg->flags & PG_FAKE) != 0) {
   1641 			pg->flags |= PG_RELEASED;
   1642 		} else {
   1643 			pmap_clear_modify(pg);
   1644 			uvm_pageactivate(pg);
   1645 		}
   1646 	}
   1647 	if (error) {
   1648 		uvm_page_unbusy(pgs, npages);
   1649 	}
   1650 	mutex_exit(&uvm_pageqlock);
   1651 	mutex_exit(&uobj->vmobjlock);
   1652 	return (error);
   1653 }
   1654 
   1655 int
   1656 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1657     int flags)
   1658 {
   1659 	off_t offset;
   1660 	struct iovec iov;
   1661 	struct uio uio;
   1662 	kauth_cred_t cred = curlwp->l_cred;
   1663 	struct buf *bp;
   1664 	vaddr_t kva;
   1665 	int error;
   1666 
   1667 	offset = pgs[0]->offset;
   1668 	kva = uvm_pagermapin(pgs, npages,
   1669 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1670 
   1671 	iov.iov_base = (void *)kva;
   1672 	iov.iov_len = npages << PAGE_SHIFT;
   1673 	uio.uio_iov = &iov;
   1674 	uio.uio_iovcnt = 1;
   1675 	uio.uio_offset = offset;
   1676 	uio.uio_rw = UIO_WRITE;
   1677 	uio.uio_resid = npages << PAGE_SHIFT;
   1678 	UIO_SETUP_SYSSPACE(&uio);
   1679 	/* XXX vn_lock */
   1680 	error = VOP_WRITE(vp, &uio, 0, cred);
   1681 
   1682 	mutex_enter(&vp->v_interlock);
   1683 	vp->v_numoutput++;
   1684 	mutex_exit(&vp->v_interlock);
   1685 
   1686 	bp = getiobuf(vp, true);
   1687 	bp->b_cflags = BC_BUSY | BC_AGE;
   1688 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1689 	bp->b_data = (char *)kva;
   1690 	bp->b_bcount = npages << PAGE_SHIFT;
   1691 	bp->b_bufsize = npages << PAGE_SHIFT;
   1692 	bp->b_resid = 0;
   1693 	bp->b_error = error;
   1694 	uvm_aio_aiodone(bp);
   1695 	return (error);
   1696 }
   1697 
   1698 /*
   1699  * Process a uio using direct I/O.  If we reach a part of the request
   1700  * which cannot be processed in this fashion for some reason, just return.
   1701  * The caller must handle some additional part of the request using
   1702  * buffered I/O before trying direct I/O again.
   1703  */
   1704 
   1705 void
   1706 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1707 {
   1708 	struct vmspace *vs;
   1709 	struct iovec *iov;
   1710 	vaddr_t va;
   1711 	size_t len;
   1712 	const int mask = DEV_BSIZE - 1;
   1713 	int error;
   1714 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1715 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1716 
   1717 	/*
   1718 	 * We only support direct I/O to user space for now.
   1719 	 */
   1720 
   1721 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1722 		return;
   1723 	}
   1724 
   1725 	/*
   1726 	 * If the vnode is mapped, we would need to get the getpages lock
   1727 	 * to stabilize the bmap, but then we would get into trouble whil e
   1728 	 * locking the pages if the pages belong to this same vnode (or a
   1729 	 * multi-vnode cascade to the same effect).  Just fall back to
   1730 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1731 	 */
   1732 
   1733 	if (vp->v_vflag & VV_MAPPED) {
   1734 		return;
   1735 	}
   1736 
   1737 	if (need_wapbl) {
   1738 		error = WAPBL_BEGIN(vp->v_mount);
   1739 		if (error)
   1740 			return;
   1741 	}
   1742 
   1743 	/*
   1744 	 * Do as much of the uio as possible with direct I/O.
   1745 	 */
   1746 
   1747 	vs = uio->uio_vmspace;
   1748 	while (uio->uio_resid) {
   1749 		iov = uio->uio_iov;
   1750 		if (iov->iov_len == 0) {
   1751 			uio->uio_iov++;
   1752 			uio->uio_iovcnt--;
   1753 			continue;
   1754 		}
   1755 		va = (vaddr_t)iov->iov_base;
   1756 		len = MIN(iov->iov_len, genfs_maxdio);
   1757 		len &= ~mask;
   1758 
   1759 		/*
   1760 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1761 		 * the current EOF, then fall back to buffered I/O.
   1762 		 */
   1763 
   1764 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1765 			break;
   1766 		}
   1767 
   1768 		/*
   1769 		 * Check alignment.  The file offset must be at least
   1770 		 * sector-aligned.  The exact constraint on memory alignment
   1771 		 * is very hardware-dependent, but requiring sector-aligned
   1772 		 * addresses there too is safe.
   1773 		 */
   1774 
   1775 		if (uio->uio_offset & mask || va & mask) {
   1776 			break;
   1777 		}
   1778 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1779 					  uio->uio_rw);
   1780 		if (error) {
   1781 			break;
   1782 		}
   1783 		iov->iov_base = (char *)iov->iov_base + len;
   1784 		iov->iov_len -= len;
   1785 		uio->uio_offset += len;
   1786 		uio->uio_resid -= len;
   1787 	}
   1788 
   1789 	if (need_wapbl)
   1790 		WAPBL_END(vp->v_mount);
   1791 }
   1792 
   1793 /*
   1794  * Iodone routine for direct I/O.  We don't do much here since the request is
   1795  * always synchronous, so the caller will do most of the work after biowait().
   1796  */
   1797 
   1798 static void
   1799 genfs_dio_iodone(struct buf *bp)
   1800 {
   1801 
   1802 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1803 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1804 		mutex_enter(bp->b_objlock);
   1805 		vwakeup(bp);
   1806 		mutex_exit(bp->b_objlock);
   1807 	}
   1808 	putiobuf(bp);
   1809 }
   1810 
   1811 /*
   1812  * Process one chunk of a direct I/O request.
   1813  */
   1814 
   1815 static int
   1816 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1817     off_t off, enum uio_rw rw)
   1818 {
   1819 	struct vm_map *map;
   1820 	struct pmap *upm, *kpm;
   1821 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1822 	off_t spoff, epoff;
   1823 	vaddr_t kva, puva;
   1824 	paddr_t pa;
   1825 	vm_prot_t prot;
   1826 	int error, rv, poff, koff;
   1827 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1828 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1829 
   1830 	/*
   1831 	 * For writes, verify that this range of the file already has fully
   1832 	 * allocated backing store.  If there are any holes, just punt and
   1833 	 * make the caller take the buffered write path.
   1834 	 */
   1835 
   1836 	if (rw == UIO_WRITE) {
   1837 		daddr_t lbn, elbn, blkno;
   1838 		int bsize, bshift, run;
   1839 
   1840 		bshift = vp->v_mount->mnt_fs_bshift;
   1841 		bsize = 1 << bshift;
   1842 		lbn = off >> bshift;
   1843 		elbn = (off + len + bsize - 1) >> bshift;
   1844 		while (lbn < elbn) {
   1845 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1846 			if (error) {
   1847 				return error;
   1848 			}
   1849 			if (blkno == (daddr_t)-1) {
   1850 				return ENOSPC;
   1851 			}
   1852 			lbn += 1 + run;
   1853 		}
   1854 	}
   1855 
   1856 	/*
   1857 	 * Flush any cached pages for parts of the file that we're about to
   1858 	 * access.  If we're writing, invalidate pages as well.
   1859 	 */
   1860 
   1861 	spoff = trunc_page(off);
   1862 	epoff = round_page(off + len);
   1863 	mutex_enter(&vp->v_interlock);
   1864 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1865 	if (error) {
   1866 		return error;
   1867 	}
   1868 
   1869 	/*
   1870 	 * Wire the user pages and remap them into kernel memory.
   1871 	 */
   1872 
   1873 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1874 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1875 	if (error) {
   1876 		return error;
   1877 	}
   1878 
   1879 	map = &vs->vm_map;
   1880 	upm = vm_map_pmap(map);
   1881 	kpm = vm_map_pmap(kernel_map);
   1882 	kva = uvm_km_alloc(kernel_map, klen, 0,
   1883 			   UVM_KMF_VAONLY | UVM_KMF_WAITVA);
   1884 	puva = trunc_page(uva);
   1885 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1886 		rv = pmap_extract(upm, puva + poff, &pa);
   1887 		KASSERT(rv);
   1888 		pmap_enter(kpm, kva + poff, pa, prot, prot | PMAP_WIRED);
   1889 	}
   1890 	pmap_update(kpm);
   1891 
   1892 	/*
   1893 	 * Do the I/O.
   1894 	 */
   1895 
   1896 	koff = uva - trunc_page(uva);
   1897 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1898 			    genfs_dio_iodone);
   1899 
   1900 	/*
   1901 	 * Tear down the kernel mapping.
   1902 	 */
   1903 
   1904 	pmap_remove(kpm, kva, kva + klen);
   1905 	pmap_update(kpm);
   1906 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1907 
   1908 	/*
   1909 	 * Unwire the user pages.
   1910 	 */
   1911 
   1912 	uvm_vsunlock(vs, (void *)uva, len);
   1913 	return error;
   1914 }
   1915 
   1916