Home | History | Annotate | Line # | Download | only in genfs
genfs_io.c revision 1.53.2.19
      1 /*	$NetBSD: genfs_io.c,v 1.53.2.19 2014/05/22 11:41:05 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_io.c,v 1.53.2.19 2014/05/22 11:41:05 yamt Exp $");
     35 
     36 #include <sys/param.h>
     37 #include <sys/systm.h>
     38 #include <sys/proc.h>
     39 #include <sys/kernel.h>
     40 #include <sys/mount.h>
     41 #include <sys/vnode.h>
     42 #include <sys/kmem.h>
     43 #include <sys/kauth.h>
     44 #include <sys/fstrans.h>
     45 #include <sys/buf.h>
     46 #include <sys/radixtree.h>
     47 
     48 #include <miscfs/genfs/genfs.h>
     49 #include <miscfs/genfs/genfs_node.h>
     50 #include <miscfs/specfs/specdev.h>
     51 #include <miscfs/syncfs/syncfs.h>
     52 
     53 #include <uvm/uvm.h>
     54 #include <uvm/uvm_pager.h>
     55 #include <uvm/uvm_page_array.h>
     56 
     57 static int genfs_do_directio(struct vmspace *, vaddr_t, size_t, struct vnode *,
     58     off_t, enum uio_rw);
     59 static void genfs_dio_iodone(struct buf *);
     60 
     61 static int genfs_do_io(struct vnode *, off_t, vaddr_t, size_t, int, enum uio_rw,
     62     void (*)(struct buf *));
     63 static void genfs_rel_pages(struct vm_page **, unsigned int);
     64 static void genfs_markdirty(struct vnode *);
     65 
     66 int genfs_maxdio = MAXPHYS;
     67 
     68 static void
     69 genfs_rel_pages(struct vm_page **pgs, unsigned int npages)
     70 {
     71 	unsigned int i;
     72 
     73 	for (i = 0; i < npages; i++) {
     74 		struct vm_page *pg = pgs[i];
     75 
     76 		if (pg == NULL || pg == PGO_DONTCARE)
     77 			continue;
     78 		KASSERT(uvm_page_locked_p(pg));
     79 		if (pg->flags & PG_FAKE) {
     80 			pg->flags |= PG_RELEASED;
     81 		}
     82 	}
     83 	mutex_enter(&uvm_pageqlock);
     84 	uvm_page_unbusy(pgs, npages);
     85 	mutex_exit(&uvm_pageqlock);
     86 }
     87 
     88 static void
     89 genfs_markdirty(struct vnode *vp)
     90 {
     91 
     92 	KASSERT(mutex_owned(vp->v_interlock));
     93 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
     94 		vn_syncer_add_to_worklist(vp, filedelay);
     95 	}
     96 	if ((vp->v_iflag & (VI_WRMAP|VI_WRMAPDIRTY)) == VI_WRMAP) {
     97 		vp->v_iflag |= VI_WRMAPDIRTY;
     98 	}
     99 }
    100 
    101 /*
    102  * generic VM getpages routine.
    103  * Return PG_BUSY pages for the given range,
    104  * reading from backing store if necessary.
    105  */
    106 
    107 int
    108 genfs_getpages(void *v)
    109 {
    110 	struct vop_getpages_args /* {
    111 		struct vnode *a_vp;
    112 		voff_t a_offset;
    113 		struct vm_page **a_m;
    114 		int *a_count;
    115 		int a_centeridx;
    116 		vm_prot_t a_access_type;
    117 		int a_advice;
    118 		int a_flags;
    119 	} */ * const ap = v;
    120 
    121 	off_t diskeof, memeof;
    122 	int i, error, npages;
    123 	const int flags = ap->a_flags;
    124 	struct vnode * const vp = ap->a_vp;
    125 	struct uvm_object * const uobj = &vp->v_uobj;
    126 	kauth_cred_t const cred = curlwp->l_cred;		/* XXXUBC curlwp */
    127 	const bool async = (flags & PGO_SYNCIO) == 0;
    128 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
    129 	const bool overwrite = (flags & PGO_OVERWRITE) != 0;
    130 	const bool blockalloc = memwrite && (flags & PGO_NOBLOCKALLOC) == 0;
    131 	const bool glocked = (flags & PGO_GLOCKHELD) != 0;
    132 	const bool need_wapbl = blockalloc && vp->v_mount->mnt_wapbl;
    133 	bool has_trans_wapbl = false;
    134 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    135 
    136 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    137 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    138 
    139 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    140 	    vp->v_type == VLNK || vp->v_type == VBLK);
    141 
    142 startover:
    143 	error = 0;
    144 	const voff_t origvsize = vp->v_size;
    145 	const off_t origoffset = ap->a_offset;
    146 	const int orignpages = *ap->a_count;
    147 
    148 	GOP_SIZE(vp, origvsize, &diskeof, 0);
    149 	if (flags & PGO_PASTEOF) {
    150 		off_t newsize;
    151 #if defined(DIAGNOSTIC)
    152 		off_t writeeof;
    153 #endif /* defined(DIAGNOSTIC) */
    154 
    155 		newsize = MAX(origvsize,
    156 		    origoffset + (orignpages << PAGE_SHIFT));
    157 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    158 #if defined(DIAGNOSTIC)
    159 		GOP_SIZE(vp, vp->v_writesize, &writeeof, GOP_SIZE_MEM);
    160 		if (newsize > round_page(writeeof)) {
    161 			panic("%s: past eof: %" PRId64 " vs. %" PRId64,
    162 			    __func__, newsize, round_page(writeeof));
    163 		}
    164 #endif /* defined(DIAGNOSTIC) */
    165 	} else {
    166 		GOP_SIZE(vp, origvsize, &memeof, GOP_SIZE_MEM);
    167 	}
    168 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    169 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    170 	KASSERT(orignpages > 0);
    171 
    172 	/*
    173 	 * Bounds-check the request.
    174 	 */
    175 
    176 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    177 		if ((flags & PGO_LOCKED) == 0) {
    178 			mutex_exit(uobj->vmobjlock);
    179 		}
    180 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    181 		    origoffset, *ap->a_count, memeof,0);
    182 		error = EINVAL;
    183 		goto out_err;
    184 	}
    185 
    186 	/* uobj is locked */
    187 
    188 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    189 	    (vp->v_type != VBLK ||
    190 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    191 		int updflags = 0;
    192 
    193 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    194 			updflags = GOP_UPDATE_ACCESSED;
    195 		}
    196 		if (memwrite) {
    197 			updflags |= GOP_UPDATE_MODIFIED;
    198 		}
    199 		if (updflags != 0) {
    200 			GOP_MARKUPDATE(vp, updflags);
    201 		}
    202 	}
    203 
    204 	/*
    205 	 * For PGO_LOCKED requests, just return whatever's in memory.
    206 	 */
    207 
    208 	if (flags & PGO_LOCKED) {
    209 		int nfound;
    210 		struct vm_page *pg;
    211 
    212 		KASSERT(!glocked);
    213 		npages = *ap->a_count;
    214 #if defined(DEBUG)
    215 		for (i = 0; i < npages; i++) {
    216 			pg = ap->a_m[i];
    217 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    218 		}
    219 #endif /* defined(DEBUG) */
    220 		nfound = uvn_findpages(uobj, origoffset, &npages,
    221 		    ap->a_m, NULL,
    222 		    UFP_NOWAIT|UFP_NOALLOC|(memwrite ? UFP_NORDONLY : 0));
    223 		KASSERT(npages == *ap->a_count);
    224 		if (nfound == 0) {
    225 			error = EBUSY;
    226 			goto out_err;
    227 		}
    228 		/*
    229 		 * lock and unlock g_glock to ensure that no one is truncating
    230 		 * the file behind us.
    231 		 */
    232 		if (!genfs_node_rdtrylock(vp)) {
    233 			genfs_rel_pages(ap->a_m, npages);
    234 
    235 			/*
    236 			 * restore the array.
    237 			 */
    238 
    239 			for (i = 0; i < npages; i++) {
    240 				pg = ap->a_m[i];
    241 
    242 				if (pg != NULL && pg != PGO_DONTCARE) {
    243 					ap->a_m[i] = NULL;
    244 				}
    245 				KASSERT(ap->a_m[i] == NULL ||
    246 				    ap->a_m[i] == PGO_DONTCARE);
    247 			}
    248 		} else {
    249 			genfs_node_unlock(vp);
    250 		}
    251 		error = (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    252 		if (error == 0 && memwrite) {
    253 			for (i = 0; i < npages; i++) {
    254 				pg = ap->a_m[i];
    255 				if (pg == NULL || pg == PGO_DONTCARE) {
    256 					continue;
    257 				}
    258 				if (uvm_pagegetdirty(pg) ==
    259 				    UVM_PAGE_STATUS_CLEAN) {
    260 					uvm_pagemarkdirty(pg,
    261 					    UVM_PAGE_STATUS_UNKNOWN);
    262 				}
    263 			}
    264 			genfs_markdirty(vp);
    265 		}
    266 		goto out_err;
    267 	}
    268 	mutex_exit(uobj->vmobjlock);
    269 
    270 	/*
    271 	 * find the requested pages and make some simple checks.
    272 	 * leave space in the page array for a whole block.
    273 	 */
    274 
    275 	const int fs_bshift = (vp->v_type != VBLK) ?
    276 	    vp->v_mount->mnt_fs_bshift : DEV_BSHIFT;
    277 	const int dev_bshift = (vp->v_type != VBLK) ?
    278 	    vp->v_mount->mnt_dev_bshift : DEV_BSHIFT;
    279 	const int fs_bsize = 1 << fs_bshift;
    280 #define	blk_mask	(fs_bsize - 1)
    281 #define	trunc_blk(x)	((x) & ~blk_mask)
    282 #define	round_blk(x)	(((x) + blk_mask) & ~blk_mask)
    283 
    284 	const int orignmempages = MIN(orignpages,
    285 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    286 	npages = orignmempages;
    287 	const off_t startoffset = trunc_blk(origoffset);
    288 	const off_t endoffset = MIN(
    289 	    round_page(round_blk(origoffset + (npages << PAGE_SHIFT))),
    290 	    round_page(memeof));
    291 	const int ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    292 
    293 	const int pgs_size = sizeof(struct vm_page *) *
    294 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    295 	struct vm_page **pgs, *pgs_onstack[UBC_MAX_PAGES];
    296 
    297 	if (pgs_size > sizeof(pgs_onstack)) {
    298 		pgs = kmem_zalloc(pgs_size, async ? KM_NOSLEEP : KM_SLEEP);
    299 		if (pgs == NULL) {
    300 			pgs = pgs_onstack;
    301 			error = ENOMEM;
    302 			goto out_err;
    303 		}
    304 	} else {
    305 		pgs = pgs_onstack;
    306 		(void)memset(pgs, 0, pgs_size);
    307 	}
    308 
    309 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    310 	    ridx, npages, startoffset, endoffset);
    311 
    312 	if (!has_trans_wapbl) {
    313 		fstrans_start(vp->v_mount, FSTRANS_SHARED);
    314 		/*
    315 		 * XXX: This assumes that we come here only via
    316 		 * the mmio path
    317 		 */
    318 		if (need_wapbl) {
    319 			error = WAPBL_BEGIN(vp->v_mount);
    320 			if (error) {
    321 				fstrans_done(vp->v_mount);
    322 				goto out_err_free;
    323 			}
    324 		}
    325 		has_trans_wapbl = true;
    326 	}
    327 
    328 	/*
    329 	 * hold g_glock to prevent a race with truncate.
    330 	 *
    331 	 * check if our idea of v_size is still valid.
    332 	 */
    333 
    334 	KASSERT(!glocked || genfs_node_wrlocked(vp));
    335 	if (!glocked) {
    336 		if (blockalloc) {
    337 			genfs_node_wrlock(vp);
    338 		} else {
    339 			genfs_node_rdlock(vp);
    340 		}
    341 	}
    342 	mutex_enter(uobj->vmobjlock);
    343 	if (vp->v_size < origvsize) {
    344 		if (!glocked) {
    345 			genfs_node_unlock(vp);
    346 		}
    347 		if (pgs != pgs_onstack)
    348 			kmem_free(pgs, pgs_size);
    349 		goto startover;
    350 	}
    351 
    352 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx], NULL,
    353 	    async ? UFP_NOWAIT : UFP_ALL) != orignmempages) {
    354 		if (!glocked) {
    355 			genfs_node_unlock(vp);
    356 		}
    357 		KASSERT(async != 0);
    358 		genfs_rel_pages(&pgs[ridx], orignmempages);
    359 		mutex_exit(uobj->vmobjlock);
    360 		error = EBUSY;
    361 		goto out_err_free;
    362 	}
    363 
    364 	/*
    365 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    366 	 */
    367 
    368 	if (overwrite) {
    369 		if (!glocked) {
    370 			genfs_node_unlock(vp);
    371 		}
    372 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    373 
    374 		for (i = 0; i < npages; i++) {
    375 			struct vm_page *pg = pgs[ridx + i];
    376 
    377 			/*
    378 			 * it's caller's responsibility to allocate blocks
    379 			 * beforehand for the overwrite case.
    380 			 */
    381 			pg->flags &= ~PG_RDONLY;
    382 			/*
    383 			 * mark the page DIRTY.
    384 			 * otherwise another thread can do putpages and pull
    385 			 * our vnode from syncer's queue before our caller does
    386 			 * ubc_release.  note that putpages won't see CLEAN
    387 			 * pages even if they are BUSY.
    388 			 */
    389 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    390 		}
    391 		npages += ridx;
    392 		goto out;
    393 	}
    394 
    395 	/*
    396 	 * if the pages are already resident, just return them.
    397 	 */
    398 
    399 	for (i = 0; i < npages; i++) {
    400 		struct vm_page *pg = pgs[ridx + i];
    401 
    402 		if ((pg->flags & PG_FAKE) ||
    403 		    (memwrite && (pg->flags & PG_RDONLY) != 0)) {
    404 			break;
    405 		}
    406 	}
    407 	if (i == npages) {
    408 		if (!glocked) {
    409 			genfs_node_unlock(vp);
    410 		}
    411 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    412 		npages += ridx;
    413 		goto out;
    414 	}
    415 
    416 	/*
    417 	 * the page wasn't resident and we're not overwriting,
    418 	 * so we're going to have to do some i/o.
    419 	 * find any additional pages needed to cover the expanded range.
    420 	 */
    421 
    422 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    423 	if (startoffset != origoffset || npages != orignmempages) {
    424 		int npgs;
    425 
    426 		/*
    427 		 * we need to avoid deadlocks caused by locking
    428 		 * additional pages at lower offsets than pages we
    429 		 * already have locked.  unlock them all and start over.
    430 		 */
    431 
    432 		genfs_rel_pages(&pgs[ridx], orignmempages);
    433 		memset(pgs, 0, pgs_size);
    434 
    435 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    436 		    startoffset, endoffset, 0,0);
    437 		npgs = npages;
    438 		if (uvn_findpages(uobj, startoffset, &npgs, pgs, NULL,
    439 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    440 			if (!glocked) {
    441 				genfs_node_unlock(vp);
    442 			}
    443 			KASSERT(async != 0);
    444 			genfs_rel_pages(pgs, npages);
    445 			mutex_exit(uobj->vmobjlock);
    446 			error = EBUSY;
    447 			goto out_err_free;
    448 		}
    449 	}
    450 
    451 	mutex_exit(uobj->vmobjlock);
    452 
    453     {
    454 	size_t bytes, iobytes, tailstart, tailbytes, totalbytes, skipbytes;
    455 	vaddr_t kva;
    456 	struct buf *bp, *mbp;
    457 	bool sawhole = false;
    458 
    459 	/*
    460 	 * read the desired page(s).
    461 	 */
    462 
    463 	totalbytes = npages << PAGE_SHIFT;
    464 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    465 	tailbytes = totalbytes - bytes;
    466 	skipbytes = 0;
    467 
    468 	kva = uvm_pagermapin(pgs, npages,
    469 	    UVMPAGER_MAPIN_READ | (async ? 0 : UVMPAGER_MAPIN_WAITOK));
    470 	if (kva == 0) {
    471 		error = EBUSY;
    472 		goto mapin_fail;
    473 	}
    474 
    475 	mbp = getiobuf(vp, true);
    476 	mbp->b_bufsize = totalbytes;
    477 	mbp->b_data = (void *)kva;
    478 	mbp->b_resid = mbp->b_bcount = bytes;
    479 	mbp->b_cflags = BC_BUSY;
    480 	if (async) {
    481 		mbp->b_flags = B_READ | B_ASYNC;
    482 		mbp->b_iodone = uvm_aio_biodone;
    483 	} else {
    484 		mbp->b_flags = B_READ;
    485 		mbp->b_iodone = NULL;
    486 	}
    487 	if (async)
    488 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    489 	else
    490 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    491 
    492 	/*
    493 	 * if EOF is in the middle of the range, zero the part past EOF.
    494 	 * skip over pages which are not PG_FAKE since in that case they have
    495 	 * valid data that we need to preserve.
    496 	 */
    497 
    498 	tailstart = bytes;
    499 	while (tailbytes > 0) {
    500 		const int len = PAGE_SIZE - (tailstart & PAGE_MASK);
    501 
    502 		KASSERT(len <= tailbytes);
    503 		if ((pgs[tailstart >> PAGE_SHIFT]->flags & PG_FAKE) != 0) {
    504 			memset((void *)(kva + tailstart), 0, len);
    505 			UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    506 			    kva, tailstart, len, 0);
    507 		}
    508 		tailstart += len;
    509 		tailbytes -= len;
    510 	}
    511 
    512 	/*
    513 	 * now loop over the pages, reading as needed.
    514 	 */
    515 
    516 	bp = NULL;
    517 	off_t offset;
    518 	for (offset = startoffset;
    519 	    bytes > 0;
    520 	    offset += iobytes, bytes -= iobytes) {
    521 		int run;
    522 		daddr_t lbn, blkno;
    523 		int pidx;
    524 		struct vnode *devvp;
    525 
    526 		/*
    527 		 * skip pages which don't need to be read.
    528 		 */
    529 
    530 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    531 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    532 			size_t b;
    533 
    534 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    535 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    536 				sawhole = true;
    537 			}
    538 			b = MIN(PAGE_SIZE, bytes);
    539 			offset += b;
    540 			bytes -= b;
    541 			skipbytes += b;
    542 			pidx++;
    543 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    544 			    offset, 0,0,0);
    545 			if (bytes == 0) {
    546 				goto loopdone;
    547 			}
    548 		}
    549 
    550 		/*
    551 		 * bmap the file to find out the blkno to read from and
    552 		 * how much we can read in one i/o.  if bmap returns an error,
    553 		 * skip the rest of the top-level i/o.
    554 		 */
    555 
    556 		lbn = offset >> fs_bshift;
    557 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    558 		if (error) {
    559 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    560 			    lbn,error,0,0);
    561 			skipbytes += bytes;
    562 			bytes = 0;
    563 			goto loopdone;
    564 		}
    565 
    566 		/*
    567 		 * see how many pages can be read with this i/o.
    568 		 * reduce the i/o size if necessary to avoid
    569 		 * overwriting pages with valid data.
    570 		 */
    571 
    572 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    573 		    bytes);
    574 		if (offset + iobytes > round_page(offset)) {
    575 			int pcount;
    576 
    577 			pcount = 1;
    578 			while (pidx + pcount < npages &&
    579 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    580 				pcount++;
    581 			}
    582 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    583 			    (offset - trunc_page(offset)));
    584 		}
    585 
    586 		/*
    587 		 * if this block isn't allocated, zero it instead of
    588 		 * reading it.  unless we are going to allocate blocks,
    589 		 * mark the pages we zeroed PG_RDONLY.
    590 		 */
    591 
    592 		if (blkno == (daddr_t)-1) {
    593 			int holepages = (round_page(offset + iobytes) -
    594 			    trunc_page(offset)) >> PAGE_SHIFT;
    595 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    596 
    597 			sawhole = true;
    598 			memset((char *)kva + (offset - startoffset), 0,
    599 			    iobytes);
    600 			skipbytes += iobytes;
    601 
    602 			if (!blockalloc) {
    603 				mutex_enter(uobj->vmobjlock);
    604 				for (i = 0; i < holepages; i++) {
    605 					pgs[pidx + i]->flags |= PG_RDONLY;
    606 				}
    607 				mutex_exit(uobj->vmobjlock);
    608 			}
    609 			continue;
    610 		}
    611 
    612 		/*
    613 		 * allocate a sub-buf for this piece of the i/o
    614 		 * (or just use mbp if there's only 1 piece),
    615 		 * and start it going.
    616 		 */
    617 
    618 		if (offset == startoffset && iobytes == bytes) {
    619 			bp = mbp;
    620 		} else {
    621 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
    622 			    vp, bp, vp->v_numoutput, 0);
    623 			bp = getiobuf(vp, true);
    624 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    625 		}
    626 		bp->b_lblkno = 0;
    627 
    628 		/* adjust physical blkno for partial blocks */
    629 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    630 		    dev_bshift);
    631 
    632 		UVMHIST_LOG(ubchist,
    633 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    634 		    bp, offset, bp->b_bcount, bp->b_blkno);
    635 
    636 		VOP_STRATEGY(devvp, bp);
    637 	}
    638 
    639 loopdone:
    640 	nestiobuf_done(mbp, skipbytes, error);
    641 	if (async) {
    642 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    643 		if (!glocked) {
    644 			genfs_node_unlock(vp);
    645 		}
    646 		error = 0;
    647 		goto out_err_free;
    648 	}
    649 	if (bp != NULL) {
    650 		error = biowait(mbp);
    651 	}
    652 
    653 	/* Remove the mapping (make KVA available as soon as possible) */
    654 	uvm_pagermapout(kva, npages);
    655 
    656 	/*
    657 	 * if this we encountered a hole then we have to do a little more work.
    658 	 * if blockalloc is false, we marked the page PG_RDONLY so that future
    659 	 * write accesses to the page will fault again.
    660 	 * if blockalloc is true, we must make sure that the backing store for
    661 	 * the page is completely allocated while the pages are locked.
    662 	 */
    663 
    664 	if (!error && sawhole && blockalloc) {
    665 		error = GOP_ALLOC(vp, startoffset,
    666 		    npages << PAGE_SHIFT, 0, cred);
    667 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    668 		    startoffset, npages << PAGE_SHIFT, error,0);
    669 		if (!error) {
    670 			mutex_enter(uobj->vmobjlock);
    671 			for (i = 0; i < npages; i++) {
    672 				struct vm_page *pg = pgs[i];
    673 
    674 				if (pg == NULL) {
    675 					continue;
    676 				}
    677 				pg->flags &= ~PG_RDONLY;
    678 				uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    679 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    680 				    pg,0,0,0);
    681 			}
    682 			mutex_exit(uobj->vmobjlock);
    683 		}
    684 	}
    685 
    686 	putiobuf(mbp);
    687     }
    688 
    689 mapin_fail:
    690 	if (!glocked) {
    691 		genfs_node_unlock(vp);
    692 	}
    693 	mutex_enter(uobj->vmobjlock);
    694 
    695 	/*
    696 	 * we're almost done!  release the pages...
    697 	 * for errors, we free the pages.
    698 	 * otherwise we activate them and mark them as valid and clean.
    699 	 * also, unbusy pages that were not actually requested.
    700 	 */
    701 
    702 	if (error) {
    703 		genfs_rel_pages(pgs, npages);
    704 		mutex_exit(uobj->vmobjlock);
    705 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    706 		goto out_err_free;
    707 	}
    708 
    709 out:
    710 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    711 	error = 0;
    712 	mutex_enter(&uvm_pageqlock);
    713 	for (i = 0; i < npages; i++) {
    714 		struct vm_page *pg = pgs[i];
    715 		if (pg == NULL) {
    716 			continue;
    717 		}
    718 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    719 		    pg, pg->flags, 0,0);
    720 		if (pg->flags & PG_FAKE && !overwrite) {
    721 			/*
    722 			 * we've read page's contents from the backing storage.
    723 			 *
    724 			 * for a read fault, we keep them CLEAN.
    725 			 */
    726 			KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
    727 			pg->flags &= ~PG_FAKE;
    728 		}
    729 		KASSERT(!blockalloc || (pg->flags & PG_RDONLY) == 0);
    730 		if (i < ridx || i >= ridx + orignmempages || async) {
    731 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    732 			    pg, pg->offset,0,0);
    733 			KASSERT(!overwrite);
    734 			if (pg->flags & PG_WANTED) {
    735 				wakeup(pg);
    736 			}
    737 			if (pg->flags & PG_FAKE && overwrite) {
    738 				uvm_pagezero(pg);
    739 			}
    740 			if (pg->flags & PG_RELEASED) {
    741 				uvm_pagefree(pg);
    742 				continue;
    743 			}
    744 			uvm_pageenqueue(pg);
    745 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    746 			UVM_PAGE_OWN(pg, NULL);
    747 		} else if (memwrite && !overwrite &&
    748 		    uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
    749 			/*
    750 			 * for a write fault, start dirtiness tracking of
    751 			 * requested pages.
    752 			 */
    753 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
    754 		}
    755 	}
    756 	mutex_exit(&uvm_pageqlock);
    757 	if (memwrite) {
    758 		genfs_markdirty(vp);
    759 	}
    760 	mutex_exit(uobj->vmobjlock);
    761 	if (ap->a_m != NULL) {
    762 		memcpy(ap->a_m, &pgs[ridx],
    763 		    orignmempages * sizeof(struct vm_page *));
    764 	}
    765 
    766 out_err_free:
    767 	if (pgs != NULL && pgs != pgs_onstack)
    768 		kmem_free(pgs, pgs_size);
    769 out_err:
    770 	if (has_trans_wapbl) {
    771 		if (need_wapbl)
    772 			WAPBL_END(vp->v_mount);
    773 		fstrans_done(vp->v_mount);
    774 	}
    775 	return error;
    776 }
    777 
    778 /*
    779  * generic VM putpages routine.
    780  * Write the given range of pages to backing store.
    781  *
    782  * => "offhi == 0" means flush all pages at or after "offlo".
    783  * => object should be locked by caller.  we return with the
    784  *      object unlocked.
    785  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    786  *	thus, a caller might want to unlock higher level resources
    787  *	(e.g. vm_map) before calling flush.
    788  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, we will not block
    789  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    790  * => NOTE: we are allowed to lock the page queues, so the caller
    791  *	must not be holding the page queue lock.
    792  *
    793  * note on "cleaning" object and PG_BUSY pages:
    794  *	this routine is holding the lock on the object.   the only time
    795  *	that it can run into a PG_BUSY page that it does not own is if
    796  *	some other process has started I/O on the page (e.g. either
    797  *	a pagein, or a pageout).  if the PG_BUSY page is being paged
    798  *	in, then it can not be dirty (!UVM_PAGE_STATUS_CLEAN) because no
    799  *	one has	had a chance to modify it yet.  if the PG_BUSY page is
    800  *	being paged out then it means that someone else has already started
    801  *	cleaning the page for us (how nice!).  in this case, if we
    802  *	have syncio specified, then after we make our pass through the
    803  *	object we need to wait for the other PG_BUSY pages to clear
    804  *	off (i.e. we need to do an iosync).   also note that once a
    805  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    806  */
    807 
    808 int
    809 genfs_putpages(void *v)
    810 {
    811 	struct vop_putpages_args /* {
    812 		struct vnode *a_vp;
    813 		voff_t a_offlo;
    814 		voff_t a_offhi;
    815 		int a_flags;
    816 	} */ * const ap = v;
    817 
    818 	return genfs_do_putpages(ap->a_vp, ap->a_offlo, ap->a_offhi,
    819 	    ap->a_flags, NULL);
    820 }
    821 
    822 int
    823 genfs_do_putpages(struct vnode *vp, off_t startoff, off_t endoff,
    824     int origflags, struct vm_page **busypg)
    825 {
    826 	struct uvm_object * const uobj = &vp->v_uobj;
    827 	kmutex_t * const slock = uobj->vmobjlock;
    828 	off_t nextoff;
    829 	/* Even for strange MAXPHYS, the shift rounds down to a page */
    830 #define maxpages (MAXPHYS >> PAGE_SHIFT)
    831 	unsigned int i;
    832 	unsigned int npages, nback;
    833 	unsigned int freeflag;
    834 	int error;
    835 	struct vm_page *pgs[maxpages], *pg;
    836 	struct uvm_page_array a;
    837 	bool wasclean, needs_clean;
    838 	bool pagedaemon = curlwp == uvm.pagedaemon_lwp;
    839 	struct lwp * const l = curlwp ? curlwp : &lwp0;
    840 	int flags;
    841 	bool written;		/* if we write out any pages */
    842 	bool need_wapbl;
    843 	bool has_trans;
    844 	bool tryclean;		/* try to pull off from the syncer's list */
    845 	bool onworklst;
    846 	const bool integrity_sync =
    847 	    (origflags & (PGO_LAZY|PGO_SYNCIO|PGO_CLEANIT)) ==
    848 	    (PGO_SYNCIO|PGO_CLEANIT);
    849 	const bool dirtyonly = (origflags & (PGO_DEACTIVATE|PGO_FREE)) == 0;
    850 
    851 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
    852 
    853 	KASSERT(origflags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
    854 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
    855 	KASSERT(startoff < endoff || endoff == 0);
    856 
    857 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
    858 	    vp, uobj->uo_npages, startoff, endoff - startoff);
    859 
    860 	has_trans = false;
    861 	need_wapbl = (!pagedaemon && vp->v_mount && vp->v_mount->mnt_wapbl &&
    862 	    (origflags & PGO_JOURNALLOCKED) == 0);
    863 
    864 retry:
    865 	flags = origflags;
    866 	KASSERT((vp->v_iflag & VI_ONWORKLST) != 0 ||
    867 	    (vp->v_iflag & VI_WRMAPDIRTY) == 0);
    868 
    869 	/*
    870 	 * shortcut if we have no pages to process.
    871 	 */
    872 
    873 	if (uobj->uo_npages == 0 || (dirtyonly &&
    874 	    radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
    875 	    UVM_PAGE_DIRTY_TAG))) {
    876 		if (vp->v_iflag & VI_ONWORKLST) {
    877 			vp->v_iflag &= ~VI_WRMAPDIRTY;
    878 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
    879 				vn_syncer_remove_from_worklist(vp);
    880 		}
    881 		if (has_trans) {
    882 			if (need_wapbl)
    883 				WAPBL_END(vp->v_mount);
    884 			fstrans_done(vp->v_mount);
    885 		}
    886 		mutex_exit(slock);
    887 		return (0);
    888 	}
    889 
    890 	/*
    891 	 * the vnode has pages, set up to process the request.
    892 	 */
    893 
    894 	if (!has_trans && (flags & PGO_CLEANIT) != 0) {
    895 		mutex_exit(slock);
    896 		if (pagedaemon) {
    897 			error = fstrans_start_nowait(vp->v_mount, FSTRANS_LAZY);
    898 			if (error)
    899 				return error;
    900 		} else
    901 			fstrans_start(vp->v_mount, FSTRANS_LAZY);
    902 		if (need_wapbl) {
    903 			error = WAPBL_BEGIN(vp->v_mount);
    904 			if (error) {
    905 				fstrans_done(vp->v_mount);
    906 				return error;
    907 			}
    908 		}
    909 		has_trans = true;
    910 		mutex_enter(slock);
    911 		goto retry;
    912 	}
    913 
    914 	error = 0;
    915 	wasclean = (vp->v_numoutput == 0);
    916 
    917 	/*
    918 	 * if this vnode is known not to have dirty pages,
    919 	 * don't bother to clean it out.
    920 	 */
    921 
    922 	if ((vp->v_iflag & VI_ONWORKLST) == 0) {
    923 #if !defined(DEBUG)
    924 		if (dirtyonly) {
    925 			goto skip_scan;
    926 		}
    927 #endif /* !defined(DEBUG) */
    928 		flags &= ~PGO_CLEANIT;
    929 	}
    930 
    931 	/*
    932 	 * start the loop to scan pages.
    933 	 */
    934 
    935 	written = false;
    936 	nextoff = startoff;
    937 	if (endoff == 0 || flags & PGO_ALLPAGES) {
    938 		endoff = trunc_page(LLONG_MAX);
    939 	}
    940 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
    941 	tryclean = true;
    942 	uvm_page_array_init(&a);
    943 	for (;;) {
    944 		bool protected;
    945 
    946 		/*
    947 		 * if !dirtyonly, iterate over all resident pages in the range.
    948 		 *
    949 		 * if dirtyonly, only possibly dirty pages are interested.
    950 		 * however, if we are asked to sync for integrity, we should
    951 		 * wait on pages being written back by another threads as well.
    952 		 */
    953 
    954 		pg = uvm_page_array_fill_and_peek(&a, uobj, nextoff, 0,
    955 		    dirtyonly ? (UVM_PAGE_ARRAY_FILL_DIRTY |
    956 		    (integrity_sync ? UVM_PAGE_ARRAY_FILL_WRITEBACK : 0)) : 0);
    957 		if (pg == NULL) {
    958 			break;
    959 		}
    960 
    961 		KASSERT(pg->uobject == uobj);
    962 		KASSERT((pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
    963 		    (pg->flags & (PG_BUSY)) != 0);
    964 		KASSERT(pg->offset >= startoff);
    965 		KASSERT(pg->offset >= nextoff);
    966 		KASSERT(!dirtyonly ||
    967 		    uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN ||
    968 		    radix_tree_get_tag(&uobj->uo_pages,
    969 			pg->offset >> PAGE_SHIFT, UVM_PAGE_WRITEBACK_TAG));
    970 		if (pg->offset >= endoff) {
    971 			break;
    972 		}
    973 
    974 		/*
    975 		 * a preempt point.
    976 		 */
    977 
    978 		if ((l->l_cpu->ci_schedstate.spc_flags & SPCF_SHOULDYIELD)
    979 		    != 0) {
    980 			nextoff = pg->offset; /* visit this page again */
    981 			mutex_exit(slock);
    982 			preempt();
    983 			/*
    984 			 * as we dropped the object lock, our cached pages can
    985 			 * be stale.
    986 			 */
    987 			uvm_page_array_clear(&a);
    988 			mutex_enter(slock);
    989 			continue;
    990 		}
    991 
    992 		/*
    993 		 * if the current page is busy, wait for it to become unbusy.
    994 		 */
    995 
    996 		if ((pg->flags & PG_BUSY) != 0) {
    997 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
    998 			if ((pg->flags & (PG_RELEASED|PG_PAGEOUT)) != 0
    999 			    && (flags & PGO_BUSYFAIL) != 0) {
   1000 				UVMHIST_LOG(ubchist, "busyfail %p", pg,
   1001 				    0,0,0);
   1002 				error = EDEADLK;
   1003 				if (busypg != NULL)
   1004 					*busypg = pg;
   1005 				break;
   1006 			}
   1007 			if (pagedaemon) {
   1008 				/*
   1009 				 * someone has taken the page while we
   1010 				 * dropped the lock for fstrans_start.
   1011 				 */
   1012 				break;
   1013 			}
   1014 			/*
   1015 			 * don't bother to wait on other's activities
   1016 			 * unless we are asked to sync for integrity.
   1017 			 */
   1018 			if (!integrity_sync && (flags & PGO_RECLAIM) == 0) {
   1019 				wasclean = false;
   1020 				nextoff = pg->offset + PAGE_SIZE;
   1021 				uvm_page_array_advance(&a);
   1022 				continue;
   1023 			}
   1024 			nextoff = pg->offset; /* visit this page again */
   1025 			pg->flags |= PG_WANTED;
   1026 			UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1027 			/*
   1028 			 * as we dropped the object lock, our cached pages can
   1029 			 * be stale.
   1030 			 */
   1031 			uvm_page_array_clear(&a);
   1032 			mutex_enter(slock);
   1033 			continue;
   1034 		}
   1035 
   1036 		nextoff = pg->offset + PAGE_SIZE;
   1037 		uvm_page_array_advance(&a);
   1038 
   1039 		/*
   1040 		 * if we're freeing, remove all mappings of the page now.
   1041 		 * if we're cleaning, check if the page needs to be cleaned.
   1042 		 */
   1043 
   1044 		protected = false;
   1045 		if (flags & PGO_FREE) {
   1046 			pmap_page_protect(pg, VM_PROT_NONE);
   1047 			protected = true;
   1048 		} else if (flags & PGO_CLEANIT) {
   1049 
   1050 			/*
   1051 			 * if we still have some hope to pull this vnode off
   1052 			 * from the syncer queue, write-protect the page.
   1053 			 */
   1054 
   1055 			if (tryclean && wasclean) {
   1056 
   1057 				/*
   1058 				 * uobj pages get wired only by uvm_fault
   1059 				 * where uobj is locked.
   1060 				 */
   1061 
   1062 				if (pg->wire_count == 0) {
   1063 					pmap_page_protect(pg,
   1064 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1065 					protected = true;
   1066 				} else {
   1067 					/*
   1068 					 * give up.
   1069 					 */
   1070 					tryclean = false;
   1071 				}
   1072 			}
   1073 		}
   1074 
   1075 		if (flags & PGO_CLEANIT) {
   1076 			needs_clean = uvm_pagecheckdirty(pg, protected);
   1077 		} else {
   1078 			needs_clean = false;
   1079 		}
   1080 
   1081 		/*
   1082 		 * if we're cleaning, build a cluster.
   1083 		 * the cluster will consist of pages which are currently dirty.
   1084 		 * if not cleaning, just operate on the one page.
   1085 		 */
   1086 
   1087 		if (needs_clean) {
   1088 			unsigned int nforw;
   1089 			unsigned int fpflags;
   1090 
   1091 			KDASSERT((vp->v_iflag & VI_ONWORKLST));
   1092 			wasclean = false;
   1093 			memset(pgs, 0, sizeof(pgs));
   1094 			pg->flags |= PG_BUSY;
   1095 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1096 
   1097 			fpflags = UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY;
   1098 
   1099 			/*
   1100 			 * XXX PG_PAGER1 incompatibility check.
   1101 			 *
   1102 			 * this is a kludge for nfs.  nfs has two kind of dirty
   1103 			 * pages:
   1104 			 *	- not written to the server yet
   1105 			 *	- written to the server but not committed yet
   1106 			 * the latter is marked as PG_NEEDCOMMIT. (== PG_PAGER1)
   1107 			 * nfs doesn't want them being clustered together.
   1108 			 *
   1109 			 * probably it's better to make PG_NEEDCOMMIT a first
   1110 			 * level citizen for uvm/genfs.
   1111 			 */
   1112 			if ((pg->flags & PG_PAGER1) != 0) {
   1113 				fpflags |= UFP_ONLYPAGER1;
   1114 			} else {
   1115 				fpflags |= UFP_NOPAGER1;
   1116 			}
   1117 
   1118 			/*
   1119 			 * first look backward.
   1120 			 *
   1121 			 * because we always scan pages in the ascending order,
   1122 			 * backward scan can be useful only for the first page
   1123 			 * in the range.
   1124 			 */
   1125 			if (startoff == pg->offset) {
   1126 				npages = MIN(maxpages >> 1,
   1127 				    pg->offset >> PAGE_SHIFT);
   1128 				nback = npages;
   1129 				uvn_findpages(uobj, pg->offset - PAGE_SIZE,
   1130 				    &nback, &pgs[0], NULL,
   1131 				    fpflags | UFP_BACKWARD);
   1132 				if (nback) {
   1133 					memmove(&pgs[0], &pgs[npages - nback],
   1134 					    nback * sizeof(pgs[0]));
   1135 					if (npages - nback < nback)
   1136 						memset(&pgs[nback], 0,
   1137 						    (npages - nback) *
   1138 						    sizeof(pgs[0]));
   1139 					else
   1140 						memset(&pgs[npages - nback], 0,
   1141 						    nback * sizeof(pgs[0]));
   1142 				}
   1143 			} else {
   1144 				nback = 0;
   1145 			}
   1146 
   1147 			/*
   1148 			 * then plug in our page of interest.
   1149 			 */
   1150 
   1151 			pgs[nback] = pg;
   1152 
   1153 			/*
   1154 			 * then look forward to fill in the remaining space in
   1155 			 * the array of pages.
   1156 			 *
   1157 			 * pass our cached array of pages so that hopefully
   1158 			 * uvn_findpages can find some good pages in it.
   1159 			 * the array a was filled above with the one of
   1160 			 * following sets of flags:
   1161 			 *	0
   1162 			 *	UVM_PAGE_ARRAY_FILL_DIRTY
   1163 			 *	UVM_PAGE_ARRAY_FILL_DIRTY|WRITEBACK
   1164 			 */
   1165 
   1166 			nforw = maxpages - nback - 1;
   1167 			uvn_findpages(uobj, pg->offset + PAGE_SIZE,
   1168 			    &nforw, &pgs[nback + 1], &a, fpflags);
   1169 			npages = nback + 1 + nforw;
   1170 		} else {
   1171 			pgs[0] = pg;
   1172 			npages = 1;
   1173 			nback = 0;
   1174 		}
   1175 
   1176 		/*
   1177 		 * apply FREE or DEACTIVATE options if requested.
   1178 		 */
   1179 
   1180 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1181 			mutex_enter(&uvm_pageqlock);
   1182 		}
   1183 		for (i = 0; i < npages; i++) {
   1184 			struct vm_page *tpg = pgs[i];
   1185 
   1186 			KASSERT(tpg->uobject == uobj);
   1187 			KASSERT(i == 0 ||
   1188 			    pgs[i-1]->offset + PAGE_SIZE == tpg->offset);
   1189 			KASSERT(!needs_clean || uvm_pagegetdirty(pgs[i]) !=
   1190 			    UVM_PAGE_STATUS_DIRTY);
   1191 			if (needs_clean) {
   1192 				/*
   1193 				 * mark pages as WRITEBACK so that concurrent
   1194 				 * fsync can find and wait for our activities.
   1195 				 */
   1196 				radix_tree_set_tag(&uobj->uo_pages,
   1197 				    pgs[i]->offset >> PAGE_SHIFT,
   1198 				    UVM_PAGE_WRITEBACK_TAG);
   1199 			}
   1200 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1201 				continue;
   1202 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1203 				uvm_pagedeactivate(tpg);
   1204 			} else if (flags & PGO_FREE) {
   1205 				pmap_page_protect(tpg, VM_PROT_NONE);
   1206 				if (tpg->flags & PG_BUSY) {
   1207 					tpg->flags |= freeflag;
   1208 					if (pagedaemon) {
   1209 						uvm_pageout_start(1);
   1210 						uvm_pagedequeue(tpg);
   1211 					}
   1212 				} else {
   1213 
   1214 					/*
   1215 					 * ``page is not busy''
   1216 					 * implies that npages is 1
   1217 					 * and needs_clean is false.
   1218 					 */
   1219 
   1220 					KASSERT(npages == 1);
   1221 					KASSERT(!needs_clean);
   1222 					KASSERT(pg == tpg);
   1223 					KASSERT(nextoff ==
   1224 					    tpg->offset + PAGE_SIZE);
   1225 					uvm_pagefree(tpg);
   1226 					if (pagedaemon)
   1227 						uvmexp.pdfreed++;
   1228 				}
   1229 			}
   1230 		}
   1231 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1232 			mutex_exit(&uvm_pageqlock);
   1233 		}
   1234 		if (needs_clean) {
   1235 			mutex_exit(slock);
   1236 			KASSERT(nextoff == pg->offset + PAGE_SIZE);
   1237 			KASSERT(nback < npages);
   1238 			nextoff = pg->offset + ((npages - nback) << PAGE_SHIFT);
   1239 			KASSERT(pgs[nback] == pg);
   1240 			KASSERT(nextoff == pgs[npages - 1]->offset + PAGE_SIZE);
   1241 
   1242 			/*
   1243 			 * start the i/o.
   1244 			 */
   1245 			error = GOP_WRITE(vp, pgs, npages, flags);
   1246 			written = true;
   1247 			/*
   1248 			 * as we dropped the object lock, our cached pages can
   1249 			 * be stale.
   1250 			 */
   1251 			uvm_page_array_clear(&a);
   1252 			mutex_enter(slock);
   1253 			if (error) {
   1254 				break;
   1255 			}
   1256 		}
   1257 	}
   1258 	uvm_page_array_fini(&a);
   1259 
   1260 	/*
   1261 	 * update ctime/mtime if the modification we started writing out might
   1262 	 * be from mmap'ed write.
   1263 	 *
   1264 	 * this is necessary when an application keeps a file mmaped and
   1265 	 * repeatedly modifies it via the window.  note that, because we
   1266 	 * don't always write-protect pages when cleaning, such modifications
   1267 	 * might not involve any page faults.
   1268 	 */
   1269 
   1270 	if (written && (vp->v_iflag & VI_WRMAPDIRTY) != 0 &&
   1271 	    (vp->v_type != VBLK ||
   1272 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1273 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1274 	}
   1275 
   1276 	/*
   1277 	 * if we no longer have any possibly dirty pages, take us off the
   1278 	 * syncer list.
   1279 	 */
   1280 
   1281 	if ((vp->v_iflag & VI_ONWORKLST) != 0 &&
   1282 	    radix_tree_empty_tagged_tree_p(&uobj->uo_pages,
   1283 	    UVM_PAGE_DIRTY_TAG)) {
   1284 		vp->v_iflag &= ~VI_WRMAPDIRTY;
   1285 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL)
   1286 			vn_syncer_remove_from_worklist(vp);
   1287 	}
   1288 
   1289 #if !defined(DEBUG)
   1290 skip_scan:
   1291 #endif /* !defined(DEBUG) */
   1292 
   1293 	/*
   1294 	 * if we found or started any i/o and we're asked to sync for integrity,
   1295 	 * wait for all writes to finish.
   1296 	 */
   1297 
   1298 	if (!wasclean && integrity_sync) {
   1299 		while (vp->v_numoutput != 0)
   1300 			cv_wait(&vp->v_cv, slock);
   1301 	}
   1302 	onworklst = (vp->v_iflag & VI_ONWORKLST) != 0;
   1303 	mutex_exit(slock);
   1304 
   1305 	if ((flags & PGO_RECLAIM) != 0 && onworklst) {
   1306 		/*
   1307 		 * in the case of PGO_RECLAIM, ensure to make the vnode clean.
   1308 		 * retrying is not a big deal because, in many cases,
   1309 		 * uobj->uo_npages is already 0 here.
   1310 		 */
   1311 		mutex_enter(slock);
   1312 		goto retry;
   1313 	}
   1314 
   1315 	if (has_trans) {
   1316 		if (need_wapbl)
   1317 			WAPBL_END(vp->v_mount);
   1318 		fstrans_done(vp->v_mount);
   1319 	}
   1320 
   1321 	return (error);
   1322 }
   1323 
   1324 int
   1325 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1326 {
   1327 	off_t off;
   1328 	vaddr_t kva;
   1329 	size_t len;
   1330 	int error;
   1331 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1332 
   1333 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1334 	    vp, pgs, npages, flags);
   1335 
   1336 	off = pgs[0]->offset;
   1337 	kva = uvm_pagermapin(pgs, npages,
   1338 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1339 	len = npages << PAGE_SHIFT;
   1340 
   1341 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1342 			    uvm_aio_biodone);
   1343 
   1344 	return error;
   1345 }
   1346 
   1347 /*
   1348  * genfs_gop_write_rwmap:
   1349  *
   1350  * a variant of genfs_gop_write.  it's used by UDF for its directory buffers.
   1351  * this maps pages with PROT_WRITE so that VOP_STRATEGY can modifies
   1352  * the contents before writing it out to the underlying storage.
   1353  */
   1354 
   1355 int
   1356 genfs_gop_write_rwmap(struct vnode *vp, struct vm_page **pgs, int npages,
   1357     int flags)
   1358 {
   1359 	off_t off;
   1360 	vaddr_t kva;
   1361 	size_t len;
   1362 	int error;
   1363 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1364 
   1365 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1366 	    vp, pgs, npages, flags);
   1367 
   1368 	off = pgs[0]->offset;
   1369 	kva = uvm_pagermapin(pgs, npages,
   1370 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1371 	len = npages << PAGE_SHIFT;
   1372 
   1373 	error = genfs_do_io(vp, off, kva, len, flags, UIO_WRITE,
   1374 			    uvm_aio_biodone);
   1375 
   1376 	return error;
   1377 }
   1378 
   1379 /*
   1380  * Backend routine for doing I/O to vnode pages.  Pages are already locked
   1381  * and mapped into kernel memory.  Here we just look up the underlying
   1382  * device block addresses and call the strategy routine.
   1383  */
   1384 
   1385 static int
   1386 genfs_do_io(struct vnode *vp, off_t off, vaddr_t kva, size_t len, int flags,
   1387     enum uio_rw rw, void (*iodone)(struct buf *))
   1388 {
   1389 	int s, error;
   1390 	int fs_bshift, dev_bshift;
   1391 	off_t eof, offset, startoffset;
   1392 	size_t bytes, iobytes, skipbytes;
   1393 	struct buf *mbp, *bp;
   1394 	const bool async = (flags & PGO_SYNCIO) == 0;
   1395 	const bool lazy = (flags & PGO_LAZY) == 0;
   1396 	const bool iowrite = rw == UIO_WRITE;
   1397 	const int brw = iowrite ? B_WRITE : B_READ;
   1398 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(ubchist);
   1399 
   1400 	UVMHIST_LOG(ubchist, "vp %p kva %p len 0x%x flags 0x%x",
   1401 	    vp, kva, len, flags);
   1402 
   1403 	KASSERT(vp->v_size <= vp->v_writesize);
   1404 	GOP_SIZE(vp, vp->v_writesize, &eof, 0);
   1405 	if (vp->v_type != VBLK) {
   1406 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1407 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1408 	} else {
   1409 		fs_bshift = DEV_BSHIFT;
   1410 		dev_bshift = DEV_BSHIFT;
   1411 	}
   1412 	error = 0;
   1413 	startoffset = off;
   1414 	bytes = MIN(len, eof - startoffset);
   1415 	skipbytes = 0;
   1416 	KASSERT(bytes != 0);
   1417 
   1418 	if (iowrite) {
   1419 		/*
   1420 		 * why += 2?
   1421 		 * 1 for biodone, 1 for uvm_aio_aiodone.
   1422 		 */
   1423 		mutex_enter(vp->v_interlock);
   1424 		vp->v_numoutput += 2;
   1425 		mutex_exit(vp->v_interlock);
   1426 	}
   1427 	mbp = getiobuf(vp, true);
   1428 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1429 	    vp, mbp, vp->v_numoutput, bytes);
   1430 	mbp->b_bufsize = len;
   1431 	mbp->b_data = (void *)kva;
   1432 	mbp->b_resid = mbp->b_bcount = bytes;
   1433 	mbp->b_cflags = BC_BUSY | BC_AGE;
   1434 	if (async) {
   1435 		mbp->b_flags = brw | B_ASYNC;
   1436 		mbp->b_iodone = iodone;
   1437 	} else {
   1438 		mbp->b_flags = brw;
   1439 		mbp->b_iodone = NULL;
   1440 	}
   1441 	if (curlwp == uvm.pagedaemon_lwp)
   1442 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1443 	else if (async || lazy)
   1444 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1445 	else
   1446 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1447 
   1448 	bp = NULL;
   1449 	for (offset = startoffset;
   1450 	    bytes > 0;
   1451 	    offset += iobytes, bytes -= iobytes) {
   1452 		int run;
   1453 		daddr_t lbn, blkno;
   1454 		struct vnode *devvp;
   1455 
   1456 		/*
   1457 		 * bmap the file to find out the blkno to read from and
   1458 		 * how much we can read in one i/o.  if bmap returns an error,
   1459 		 * skip the rest of the top-level i/o.
   1460 		 */
   1461 
   1462 		lbn = offset >> fs_bshift;
   1463 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1464 		if (error) {
   1465 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
   1466 			    lbn,error,0,0);
   1467 			skipbytes += bytes;
   1468 			bytes = 0;
   1469 			goto loopdone;
   1470 		}
   1471 
   1472 		/*
   1473 		 * see how many pages can be read with this i/o.
   1474 		 * reduce the i/o size if necessary to avoid
   1475 		 * overwriting pages with valid data.
   1476 		 */
   1477 
   1478 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1479 		    bytes);
   1480 
   1481 		/*
   1482 		 * if this block isn't allocated, zero it instead of
   1483 		 * reading it.  unless we are going to allocate blocks,
   1484 		 * mark the pages we zeroed PG_RDONLY.
   1485 		 */
   1486 
   1487 		if (blkno == (daddr_t)-1) {
   1488 			if (!iowrite) {
   1489 				memset((char *)kva + (offset - startoffset), 0,
   1490 				    iobytes);
   1491 			}
   1492 			skipbytes += iobytes;
   1493 			continue;
   1494 		}
   1495 
   1496 		/*
   1497 		 * allocate a sub-buf for this piece of the i/o
   1498 		 * (or just use mbp if there's only 1 piece),
   1499 		 * and start it going.
   1500 		 */
   1501 
   1502 		if (offset == startoffset && iobytes == bytes) {
   1503 			bp = mbp;
   1504 		} else {
   1505 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1506 			    vp, bp, vp->v_numoutput, 0);
   1507 			bp = getiobuf(vp, true);
   1508 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
   1509 		}
   1510 		bp->b_lblkno = 0;
   1511 
   1512 		/* adjust physical blkno for partial blocks */
   1513 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1514 		    dev_bshift);
   1515 
   1516 		UVMHIST_LOG(ubchist,
   1517 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1518 		    bp, offset, bp->b_bcount, bp->b_blkno);
   1519 
   1520 		VOP_STRATEGY(devvp, bp);
   1521 	}
   1522 
   1523 loopdone:
   1524 	if (skipbytes) {
   1525 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1526 	}
   1527 	nestiobuf_done(mbp, skipbytes, error);
   1528 	if (async) {
   1529 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1530 		return (0);
   1531 	}
   1532 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1533 	error = biowait(mbp);
   1534 	s = splbio();
   1535 	(*iodone)(mbp);
   1536 	splx(s);
   1537 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1538 	return (error);
   1539 }
   1540 
   1541 int
   1542 genfs_compat_getpages(void *v)
   1543 {
   1544 	struct vop_getpages_args /* {
   1545 		struct vnode *a_vp;
   1546 		voff_t a_offset;
   1547 		struct vm_page **a_m;
   1548 		int *a_count;
   1549 		int a_centeridx;
   1550 		vm_prot_t a_access_type;
   1551 		int a_advice;
   1552 		int a_flags;
   1553 	} */ *ap = v;
   1554 
   1555 	off_t origoffset;
   1556 	struct vnode *vp = ap->a_vp;
   1557 	struct uvm_object *uobj = &vp->v_uobj;
   1558 	struct vm_page *pg, **pgs;
   1559 	vaddr_t kva;
   1560 	int i, error, orignpages, npages;
   1561 	struct iovec iov;
   1562 	struct uio uio;
   1563 	kauth_cred_t cred = curlwp->l_cred;
   1564 	const bool memwrite = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1565 
   1566 	error = 0;
   1567 	origoffset = ap->a_offset;
   1568 	orignpages = *ap->a_count;
   1569 	pgs = ap->a_m;
   1570 
   1571 	if (ap->a_flags & PGO_LOCKED) {
   1572 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m, NULL,
   1573 		    UFP_NOWAIT|UFP_NOALLOC| (memwrite ? UFP_NORDONLY : 0));
   1574 
   1575 		error = ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0;
   1576 		if (error == 0 && memwrite) {
   1577 			genfs_markdirty(vp);
   1578 		}
   1579 		return error;
   1580 	}
   1581 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1582 		mutex_exit(uobj->vmobjlock);
   1583 		return EINVAL;
   1584 	}
   1585 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1586 		mutex_exit(uobj->vmobjlock);
   1587 		return 0;
   1588 	}
   1589 	npages = orignpages;
   1590 	uvn_findpages(uobj, origoffset, &npages, pgs, NULL, UFP_ALL);
   1591 	mutex_exit(uobj->vmobjlock);
   1592 	kva = uvm_pagermapin(pgs, npages,
   1593 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1594 	for (i = 0; i < npages; i++) {
   1595 		pg = pgs[i];
   1596 		if ((pg->flags & PG_FAKE) == 0) {
   1597 			continue;
   1598 		}
   1599 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1600 		iov.iov_len = PAGE_SIZE;
   1601 		uio.uio_iov = &iov;
   1602 		uio.uio_iovcnt = 1;
   1603 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1604 		uio.uio_rw = UIO_READ;
   1605 		uio.uio_resid = PAGE_SIZE;
   1606 		UIO_SETUP_SYSSPACE(&uio);
   1607 		/* XXX vn_lock */
   1608 		error = VOP_READ(vp, &uio, 0, cred);
   1609 		if (error) {
   1610 			break;
   1611 		}
   1612 		if (uio.uio_resid) {
   1613 			memset(iov.iov_base, 0, uio.uio_resid);
   1614 		}
   1615 	}
   1616 	uvm_pagermapout(kva, npages);
   1617 	mutex_enter(uobj->vmobjlock);
   1618 	mutex_enter(&uvm_pageqlock);
   1619 	for (i = 0; i < npages; i++) {
   1620 		pg = pgs[i];
   1621 		if (error && (pg->flags & PG_FAKE) != 0) {
   1622 			pg->flags |= PG_RELEASED;
   1623 		} else {
   1624 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
   1625 			uvm_pageactivate(pg);
   1626 		}
   1627 	}
   1628 	if (error) {
   1629 		uvm_page_unbusy(pgs, npages);
   1630 	}
   1631 	mutex_exit(&uvm_pageqlock);
   1632 	if (error == 0 && memwrite) {
   1633 		genfs_markdirty(vp);
   1634 	}
   1635 	mutex_exit(uobj->vmobjlock);
   1636 	return error;
   1637 }
   1638 
   1639 int
   1640 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1641     int flags)
   1642 {
   1643 	off_t offset;
   1644 	struct iovec iov;
   1645 	struct uio uio;
   1646 	kauth_cred_t cred = curlwp->l_cred;
   1647 	struct buf *bp;
   1648 	vaddr_t kva;
   1649 	int error;
   1650 
   1651 	offset = pgs[0]->offset;
   1652 	kva = uvm_pagermapin(pgs, npages,
   1653 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1654 
   1655 	iov.iov_base = (void *)kva;
   1656 	iov.iov_len = npages << PAGE_SHIFT;
   1657 	uio.uio_iov = &iov;
   1658 	uio.uio_iovcnt = 1;
   1659 	uio.uio_offset = offset;
   1660 	uio.uio_rw = UIO_WRITE;
   1661 	uio.uio_resid = npages << PAGE_SHIFT;
   1662 	UIO_SETUP_SYSSPACE(&uio);
   1663 	/* XXX vn_lock */
   1664 	error = VOP_WRITE(vp, &uio, 0, cred);
   1665 
   1666 	mutex_enter(vp->v_interlock);
   1667 	vp->v_numoutput++;
   1668 	mutex_exit(vp->v_interlock);
   1669 
   1670 	bp = getiobuf(vp, true);
   1671 	bp->b_cflags = BC_BUSY | BC_AGE;
   1672 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1673 	bp->b_data = (char *)kva;
   1674 	bp->b_bcount = npages << PAGE_SHIFT;
   1675 	bp->b_bufsize = npages << PAGE_SHIFT;
   1676 	bp->b_resid = 0;
   1677 	bp->b_error = error;
   1678 	uvm_aio_aiodone(bp);
   1679 	return (error);
   1680 }
   1681 
   1682 /*
   1683  * Process a uio using direct I/O.  If we reach a part of the request
   1684  * which cannot be processed in this fashion for some reason, just return.
   1685  * The caller must handle some additional part of the request using
   1686  * buffered I/O before trying direct I/O again.
   1687  */
   1688 
   1689 void
   1690 genfs_directio(struct vnode *vp, struct uio *uio, int ioflag)
   1691 {
   1692 	struct vmspace *vs;
   1693 	struct iovec *iov;
   1694 	vaddr_t va;
   1695 	size_t len;
   1696 	const int mask = DEV_BSIZE - 1;
   1697 	int error;
   1698 	bool need_wapbl = (vp->v_mount && vp->v_mount->mnt_wapbl &&
   1699 	    (ioflag & IO_JOURNALLOCKED) == 0);
   1700 
   1701 	/*
   1702 	 * We only support direct I/O to user space for now.
   1703 	 */
   1704 
   1705 	if (VMSPACE_IS_KERNEL_P(uio->uio_vmspace)) {
   1706 		return;
   1707 	}
   1708 
   1709 	/*
   1710 	 * If the vnode is mapped, we would need to get the getpages lock
   1711 	 * to stabilize the bmap, but then we would get into trouble while
   1712 	 * locking the pages if the pages belong to this same vnode (or a
   1713 	 * multi-vnode cascade to the same effect).  Just fall back to
   1714 	 * buffered I/O if the vnode is mapped to avoid this mess.
   1715 	 */
   1716 
   1717 	if (vp->v_vflag & VV_MAPPED) {
   1718 		return;
   1719 	}
   1720 
   1721 	if (need_wapbl) {
   1722 		error = WAPBL_BEGIN(vp->v_mount);
   1723 		if (error)
   1724 			return;
   1725 	}
   1726 
   1727 	/*
   1728 	 * Do as much of the uio as possible with direct I/O.
   1729 	 */
   1730 
   1731 	vs = uio->uio_vmspace;
   1732 	while (uio->uio_resid) {
   1733 		iov = uio->uio_iov;
   1734 		if (iov->iov_len == 0) {
   1735 			uio->uio_iov++;
   1736 			uio->uio_iovcnt--;
   1737 			continue;
   1738 		}
   1739 		va = (vaddr_t)iov->iov_base;
   1740 		len = MIN(iov->iov_len, genfs_maxdio);
   1741 		len &= ~mask;
   1742 
   1743 		/*
   1744 		 * If the next chunk is smaller than DEV_BSIZE or extends past
   1745 		 * the current EOF, then fall back to buffered I/O.
   1746 		 */
   1747 
   1748 		if (len == 0 || uio->uio_offset + len > vp->v_size) {
   1749 			break;
   1750 		}
   1751 
   1752 		/*
   1753 		 * Check alignment.  The file offset must be at least
   1754 		 * sector-aligned.  The exact constraint on memory alignment
   1755 		 * is very hardware-dependent, but requiring sector-aligned
   1756 		 * addresses there too is safe.
   1757 		 */
   1758 
   1759 		if (uio->uio_offset & mask || va & mask) {
   1760 			break;
   1761 		}
   1762 		error = genfs_do_directio(vs, va, len, vp, uio->uio_offset,
   1763 					  uio->uio_rw);
   1764 		if (error) {
   1765 			break;
   1766 		}
   1767 		iov->iov_base = (char *)iov->iov_base + len;
   1768 		iov->iov_len -= len;
   1769 		uio->uio_offset += len;
   1770 		uio->uio_resid -= len;
   1771 	}
   1772 
   1773 	if (need_wapbl)
   1774 		WAPBL_END(vp->v_mount);
   1775 }
   1776 
   1777 /*
   1778  * Iodone routine for direct I/O.  We don't do much here since the request is
   1779  * always synchronous, so the caller will do most of the work after biowait().
   1780  */
   1781 
   1782 static void
   1783 genfs_dio_iodone(struct buf *bp)
   1784 {
   1785 
   1786 	KASSERT((bp->b_flags & B_ASYNC) == 0);
   1787 	if ((bp->b_flags & B_READ) == 0 && (bp->b_cflags & BC_AGE) != 0) {
   1788 		mutex_enter(bp->b_objlock);
   1789 		vwakeup(bp);
   1790 		mutex_exit(bp->b_objlock);
   1791 	}
   1792 	putiobuf(bp);
   1793 }
   1794 
   1795 /*
   1796  * Process one chunk of a direct I/O request.
   1797  */
   1798 
   1799 static int
   1800 genfs_do_directio(struct vmspace *vs, vaddr_t uva, size_t len, struct vnode *vp,
   1801     off_t off, enum uio_rw rw)
   1802 {
   1803 	struct vm_map *map;
   1804 	struct pmap *upm, *kpm __unused;
   1805 	size_t klen = round_page(uva + len) - trunc_page(uva);
   1806 	off_t spoff, epoff;
   1807 	vaddr_t kva, puva;
   1808 	paddr_t pa;
   1809 	vm_prot_t prot;
   1810 	int error, rv __diagused, poff, koff;
   1811 	const int pgoflags = PGO_CLEANIT | PGO_SYNCIO | PGO_JOURNALLOCKED |
   1812 		(rw == UIO_WRITE ? PGO_FREE : 0);
   1813 
   1814 	/*
   1815 	 * For writes, verify that this range of the file already has fully
   1816 	 * allocated backing store.  If there are any holes, just punt and
   1817 	 * make the caller take the buffered write path.
   1818 	 */
   1819 
   1820 	if (rw == UIO_WRITE) {
   1821 		daddr_t lbn, elbn, blkno;
   1822 		int bsize, bshift, run;
   1823 
   1824 		bshift = vp->v_mount->mnt_fs_bshift;
   1825 		bsize = 1 << bshift;
   1826 		lbn = off >> bshift;
   1827 		elbn = (off + len + bsize - 1) >> bshift;
   1828 		while (lbn < elbn) {
   1829 			error = VOP_BMAP(vp, lbn, NULL, &blkno, &run);
   1830 			if (error) {
   1831 				return error;
   1832 			}
   1833 			if (blkno == (daddr_t)-1) {
   1834 				return ENOSPC;
   1835 			}
   1836 			lbn += 1 + run;
   1837 		}
   1838 	}
   1839 
   1840 	/*
   1841 	 * Flush any cached pages for parts of the file that we're about to
   1842 	 * access.  If we're writing, invalidate pages as well.
   1843 	 */
   1844 
   1845 	spoff = trunc_page(off);
   1846 	epoff = round_page(off + len);
   1847 	mutex_enter(vp->v_interlock);
   1848 	error = VOP_PUTPAGES(vp, spoff, epoff, pgoflags);
   1849 	if (error) {
   1850 		return error;
   1851 	}
   1852 
   1853 	/*
   1854 	 * Wire the user pages and remap them into kernel memory.
   1855 	 */
   1856 
   1857 	prot = rw == UIO_READ ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ;
   1858 	error = uvm_vslock(vs, (void *)uva, len, prot);
   1859 	if (error) {
   1860 		return error;
   1861 	}
   1862 
   1863 	map = &vs->vm_map;
   1864 	upm = vm_map_pmap(map);
   1865 	kpm = vm_map_pmap(kernel_map);
   1866 	puva = trunc_page(uva);
   1867 	kva = uvm_km_alloc(kernel_map, klen, atop(puva) & uvmexp.colormask,
   1868 	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
   1869 	for (poff = 0; poff < klen; poff += PAGE_SIZE) {
   1870 		rv = pmap_extract(upm, puva + poff, &pa);
   1871 		KASSERT(rv);
   1872 		pmap_kenter_pa(kva + poff, pa, prot, PMAP_WIRED);
   1873 	}
   1874 	pmap_update(kpm);
   1875 
   1876 	/*
   1877 	 * Do the I/O.
   1878 	 */
   1879 
   1880 	koff = uva - trunc_page(uva);
   1881 	error = genfs_do_io(vp, off, kva + koff, len, PGO_SYNCIO, rw,
   1882 			    genfs_dio_iodone);
   1883 
   1884 	/*
   1885 	 * Tear down the kernel mapping.
   1886 	 */
   1887 
   1888 	pmap_kremove(kva, klen);
   1889 	pmap_update(kpm);
   1890 	uvm_km_free(kernel_map, kva, klen, UVM_KMF_VAONLY);
   1891 
   1892 	/*
   1893 	 * Unwire the user pages.
   1894 	 */
   1895 
   1896 	uvm_vsunlock(vs, (void *)uva, len);
   1897 	return error;
   1898 }
   1899