Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.63
      1 /*	$NetBSD: genfs_vnops.c,v 1.63 2002/05/18 02:54:57 enami Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  */
     36 
     37 #include <sys/cdefs.h>
     38 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.63 2002/05/18 02:54:57 enami Exp $");
     39 
     40 #include "opt_nfsserver.h"
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/proc.h>
     45 #include <sys/kernel.h>
     46 #include <sys/mount.h>
     47 #include <sys/namei.h>
     48 #include <sys/vnode.h>
     49 #include <sys/fcntl.h>
     50 #include <sys/malloc.h>
     51 #include <sys/poll.h>
     52 #include <sys/mman.h>
     53 
     54 #include <miscfs/genfs/genfs.h>
     55 #include <miscfs/genfs/genfs_node.h>
     56 #include <miscfs/specfs/specdev.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_pager.h>
     60 
     61 #ifdef NFSSERVER
     62 #include <nfs/rpcv2.h>
     63 #include <nfs/nfsproto.h>
     64 #include <nfs/nfs.h>
     65 #include <nfs/nqnfs.h>
     66 #include <nfs/nfs_var.h>
     67 #endif
     68 
     69 static __inline void genfs_rel_pages(struct vm_page **, int);
     70 
     71 #define MAX_READ_AHEAD	16 	/* XXXUBC 16 */
     72 int genfs_rapages = MAX_READ_AHEAD; /* # of pages in each chunk of readahead */
     73 int genfs_racount = 2;		/* # of page chunks to readahead */
     74 int genfs_raskip = 2;		/* # of busy page chunks allowed to skip */
     75 
     76 int
     77 genfs_poll(void *v)
     78 {
     79 	struct vop_poll_args /* {
     80 		struct vnode *a_vp;
     81 		int a_events;
     82 		struct proc *a_p;
     83 	} */ *ap = v;
     84 
     85 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     86 }
     87 
     88 int
     89 genfs_fsync(void *v)
     90 {
     91 	struct vop_fsync_args /* {
     92 		struct vnode *a_vp;
     93 		struct ucred *a_cred;
     94 		int a_flags;
     95 		off_t offlo;
     96 		off_t offhi;
     97 		struct proc *a_p;
     98 	} */ *ap = v;
     99 	struct vnode *vp = ap->a_vp;
    100 	int wait;
    101 
    102 	wait = (ap->a_flags & FSYNC_WAIT) != 0;
    103 	vflushbuf(vp, wait);
    104 	if ((ap->a_flags & FSYNC_DATAONLY) != 0)
    105 		return (0);
    106 	else
    107 		return (VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0));
    108 }
    109 
    110 int
    111 genfs_seek(void *v)
    112 {
    113 	struct vop_seek_args /* {
    114 		struct vnode *a_vp;
    115 		off_t a_oldoff;
    116 		off_t a_newoff;
    117 		struct ucred *a_ucred;
    118 	} */ *ap = v;
    119 
    120 	if (ap->a_newoff < 0)
    121 		return (EINVAL);
    122 
    123 	return (0);
    124 }
    125 
    126 int
    127 genfs_abortop(void *v)
    128 {
    129 	struct vop_abortop_args /* {
    130 		struct vnode *a_dvp;
    131 		struct componentname *a_cnp;
    132 	} */ *ap = v;
    133 
    134 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    135 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    136 	return (0);
    137 }
    138 
    139 int
    140 genfs_fcntl(void *v)
    141 {
    142 	struct vop_fcntl_args /* {
    143 		struct vnode *a_vp;
    144 		u_int a_command;
    145 		caddr_t a_data;
    146 		int a_fflag;
    147 		struct ucred *a_cred;
    148 		struct proc *a_p;
    149 	} */ *ap = v;
    150 
    151 	if (ap->a_command == F_SETFL)
    152 		return (0);
    153 	else
    154 		return (EOPNOTSUPP);
    155 }
    156 
    157 /*ARGSUSED*/
    158 int
    159 genfs_badop(void *v)
    160 {
    161 
    162 	panic("genfs: bad op");
    163 }
    164 
    165 /*ARGSUSED*/
    166 int
    167 genfs_nullop(void *v)
    168 {
    169 
    170 	return (0);
    171 }
    172 
    173 /*ARGSUSED*/
    174 int
    175 genfs_einval(void *v)
    176 {
    177 
    178 	return (EINVAL);
    179 }
    180 
    181 /*ARGSUSED*/
    182 int
    183 genfs_eopnotsupp(void *v)
    184 {
    185 
    186 	return (EOPNOTSUPP);
    187 }
    188 
    189 /*
    190  * Called when an fs doesn't support a particular vop but the vop needs to
    191  * vrele, vput, or vunlock passed in vnodes.
    192  */
    193 int
    194 genfs_eopnotsupp_rele(void *v)
    195 {
    196 	struct vop_generic_args /*
    197 		struct vnodeop_desc *a_desc;
    198 		/ * other random data follows, presumably * /
    199 	} */ *ap = v;
    200 	struct vnodeop_desc *desc = ap->a_desc;
    201 	struct vnode *vp;
    202 	int flags, i, j, offset;
    203 
    204 	flags = desc->vdesc_flags;
    205 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    206 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    207 			break;	/* stop at end of list */
    208 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    209 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    210 			switch (j) {
    211 			case VDESC_VP0_WILLPUT:
    212 				vput(vp);
    213 				break;
    214 			case VDESC_VP0_WILLUNLOCK:
    215 				VOP_UNLOCK(vp, 0);
    216 				break;
    217 			case VDESC_VP0_WILLRELE:
    218 				vrele(vp);
    219 				break;
    220 			}
    221 		}
    222 	}
    223 
    224 	return (EOPNOTSUPP);
    225 }
    226 
    227 /*ARGSUSED*/
    228 int
    229 genfs_ebadf(void *v)
    230 {
    231 
    232 	return (EBADF);
    233 }
    234 
    235 /* ARGSUSED */
    236 int
    237 genfs_enoioctl(void *v)
    238 {
    239 
    240 	return (EPASSTHROUGH);
    241 }
    242 
    243 
    244 /*
    245  * Eliminate all activity associated with the requested vnode
    246  * and with all vnodes aliased to the requested vnode.
    247  */
    248 int
    249 genfs_revoke(void *v)
    250 {
    251 	struct vop_revoke_args /* {
    252 		struct vnode *a_vp;
    253 		int a_flags;
    254 	} */ *ap = v;
    255 	struct vnode *vp, *vq;
    256 	struct proc *p = curproc;	/* XXX */
    257 
    258 #ifdef DIAGNOSTIC
    259 	if ((ap->a_flags & REVOKEALL) == 0)
    260 		panic("genfs_revoke: not revokeall");
    261 #endif
    262 
    263 	vp = ap->a_vp;
    264 	simple_lock(&vp->v_interlock);
    265 
    266 	if (vp->v_flag & VALIASED) {
    267 		/*
    268 		 * If a vgone (or vclean) is already in progress,
    269 		 * wait until it is done and return.
    270 		 */
    271 		if (vp->v_flag & VXLOCK) {
    272 			vp->v_flag |= VXWANT;
    273 			simple_unlock(&vp->v_interlock);
    274 			tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
    275 			return (0);
    276 		}
    277 		/*
    278 		 * Ensure that vp will not be vgone'd while we
    279 		 * are eliminating its aliases.
    280 		 */
    281 		vp->v_flag |= VXLOCK;
    282 		simple_unlock(&vp->v_interlock);
    283 		while (vp->v_flag & VALIASED) {
    284 			simple_lock(&spechash_slock);
    285 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    286 				if (vq->v_rdev != vp->v_rdev ||
    287 				    vq->v_type != vp->v_type || vp == vq)
    288 					continue;
    289 				simple_unlock(&spechash_slock);
    290 				vgone(vq);
    291 				break;
    292 			}
    293 			if (vq == NULLVP)
    294 				simple_unlock(&spechash_slock);
    295 		}
    296 		/*
    297 		 * Remove the lock so that vgone below will
    298 		 * really eliminate the vnode after which time
    299 		 * vgone will awaken any sleepers.
    300 		 */
    301 		simple_lock(&vp->v_interlock);
    302 		vp->v_flag &= ~VXLOCK;
    303 	}
    304 	vgonel(vp, p);
    305 	return (0);
    306 }
    307 
    308 /*
    309  * Lock the node.
    310  */
    311 int
    312 genfs_lock(void *v)
    313 {
    314 	struct vop_lock_args /* {
    315 		struct vnode *a_vp;
    316 		int a_flags;
    317 	} */ *ap = v;
    318 	struct vnode *vp = ap->a_vp;
    319 
    320 	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
    321 }
    322 
    323 /*
    324  * Unlock the node.
    325  */
    326 int
    327 genfs_unlock(void *v)
    328 {
    329 	struct vop_unlock_args /* {
    330 		struct vnode *a_vp;
    331 		int a_flags;
    332 	} */ *ap = v;
    333 	struct vnode *vp = ap->a_vp;
    334 
    335 	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
    336 	    &vp->v_interlock));
    337 }
    338 
    339 /*
    340  * Return whether or not the node is locked.
    341  */
    342 int
    343 genfs_islocked(void *v)
    344 {
    345 	struct vop_islocked_args /* {
    346 		struct vnode *a_vp;
    347 	} */ *ap = v;
    348 	struct vnode *vp = ap->a_vp;
    349 
    350 	return (lockstatus(&vp->v_lock));
    351 }
    352 
    353 /*
    354  * Stubs to use when there is no locking to be done on the underlying object.
    355  */
    356 int
    357 genfs_nolock(void *v)
    358 {
    359 	struct vop_lock_args /* {
    360 		struct vnode *a_vp;
    361 		int a_flags;
    362 		struct proc *a_p;
    363 	} */ *ap = v;
    364 
    365 	/*
    366 	 * Since we are not using the lock manager, we must clear
    367 	 * the interlock here.
    368 	 */
    369 	if (ap->a_flags & LK_INTERLOCK)
    370 		simple_unlock(&ap->a_vp->v_interlock);
    371 	return (0);
    372 }
    373 
    374 int
    375 genfs_nounlock(void *v)
    376 {
    377 
    378 	return (0);
    379 }
    380 
    381 int
    382 genfs_noislocked(void *v)
    383 {
    384 
    385 	return (0);
    386 }
    387 
    388 /*
    389  * Local lease check for NFS servers.  Just set up args and let
    390  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    391  * this is a null operation.
    392  */
    393 int
    394 genfs_lease_check(void *v)
    395 {
    396 #ifdef NFSSERVER
    397 	struct vop_lease_args /* {
    398 		struct vnode *a_vp;
    399 		struct proc *a_p;
    400 		struct ucred *a_cred;
    401 		int a_flag;
    402 	} */ *ap = v;
    403 	u_int32_t duration = 0;
    404 	int cache;
    405 	u_quad_t frev;
    406 
    407 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    408 	    NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    409 	return (0);
    410 #else
    411 	return (0);
    412 #endif /* NFSSERVER */
    413 }
    414 
    415 int
    416 genfs_mmap(void *v)
    417 {
    418 
    419 	return (0);
    420 }
    421 
    422 static __inline void
    423 genfs_rel_pages(struct vm_page **pgs, int npages)
    424 {
    425 	int i;
    426 
    427 	for (i = 0; i < npages; i++) {
    428 		struct vm_page *pg = pgs[i];
    429 
    430 		if (pg == NULL)
    431 			continue;
    432 		if (pg->flags & PG_FAKE) {
    433 			pg->flags |= PG_RELEASED;
    434 		}
    435 	}
    436 	uvm_page_unbusy(pgs, npages);
    437 }
    438 
    439 /*
    440  * generic VM getpages routine.
    441  * Return PG_BUSY pages for the given range,
    442  * reading from backing store if necessary.
    443  */
    444 
    445 int
    446 genfs_getpages(void *v)
    447 {
    448 	struct vop_getpages_args /* {
    449 		struct vnode *a_vp;
    450 		voff_t a_offset;
    451 		struct vm_page **a_m;
    452 		int *a_count;
    453 		int a_centeridx;
    454 		vm_prot_t a_access_type;
    455 		int a_advice;
    456 		int a_flags;
    457 	} */ *ap = v;
    458 
    459 	off_t newsize, diskeof, memeof;
    460 	off_t offset, origoffset, startoffset, endoffset, raoffset;
    461 	daddr_t lbn, blkno;
    462 	int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    463 	int fs_bshift, fs_bsize, dev_bshift;
    464 	int flags = ap->a_flags;
    465 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    466 	vaddr_t kva;
    467 	struct buf *bp, *mbp;
    468 	struct vnode *vp = ap->a_vp;
    469 	struct vnode *devvp;
    470 	struct genfs_node *gp = VTOG(vp);
    471 	struct uvm_object *uobj = &vp->v_uobj;
    472 	struct vm_page *pg, *pgs[MAX_READ_AHEAD];
    473 	struct ucred *cred = curproc->p_ucred;		/* XXXUBC curproc */
    474 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    475 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    476 	boolean_t sawhole = FALSE;
    477 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    478 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    479 
    480 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    481 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    482 
    483 	/* XXXUBC temp limit */
    484 	if (*ap->a_count > MAX_READ_AHEAD) {
    485 		panic("genfs_getpages: too many pages");
    486 	}
    487 
    488 	error = 0;
    489 	origoffset = ap->a_offset;
    490 	orignpages = *ap->a_count;
    491 	GOP_SIZE(vp, vp->v_size, &diskeof);
    492 	if (flags & PGO_PASTEOF) {
    493 		newsize = MAX(vp->v_size,
    494 		    origoffset + (orignpages << PAGE_SHIFT));
    495 		GOP_SIZE(vp, newsize, &memeof);
    496 	} else {
    497 		memeof = diskeof;
    498 	}
    499 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    500 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    501 	KASSERT(orignpages > 0);
    502 
    503 	/*
    504 	 * Bounds-check the request.
    505 	 */
    506 
    507 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    508 		if ((flags & PGO_LOCKED) == 0) {
    509 			simple_unlock(&uobj->vmobjlock);
    510 		}
    511 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    512 		    origoffset, *ap->a_count, memeof,0);
    513 		return (EINVAL);
    514 	}
    515 
    516 	/*
    517 	 * For PGO_LOCKED requests, just return whatever's in memory.
    518 	 */
    519 
    520 	if (flags & PGO_LOCKED) {
    521 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
    522 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
    523 
    524 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    525 	}
    526 
    527 	/* vnode is VOP_LOCKed, uobj is locked */
    528 
    529 	if (write && (vp->v_flag & VONWORKLST) == 0) {
    530 		vn_syncer_add_to_worklist(vp, filedelay);
    531 	}
    532 
    533 	/*
    534 	 * find the requested pages and make some simple checks.
    535 	 * leave space in the page array for a whole block.
    536 	 */
    537 
    538 	if (vp->v_type == VREG) {
    539 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    540 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    541 	} else {
    542 		fs_bshift = DEV_BSHIFT;
    543 		dev_bshift = DEV_BSHIFT;
    544 	}
    545 	fs_bsize = 1 << fs_bshift;
    546 
    547 	orignpages = MIN(orignpages,
    548 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    549 	npages = orignpages;
    550 	startoffset = origoffset & ~(fs_bsize - 1);
    551 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    552 	    fs_bsize - 1) & ~(fs_bsize - 1));
    553 	endoffset = MIN(endoffset, round_page(memeof));
    554 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    555 
    556 	memset(pgs, 0, sizeof(pgs));
    557 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    558 	    ridx, npages, startoffset, endoffset);
    559 	KASSERT(&pgs[ridx + npages] <= &pgs[MAX_READ_AHEAD]);
    560 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    561 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    562 		KASSERT(async != 0);
    563 		genfs_rel_pages(&pgs[ridx], orignpages);
    564 		simple_unlock(&uobj->vmobjlock);
    565 		return (EBUSY);
    566 	}
    567 
    568 	/*
    569 	 * if the pages are already resident, just return them.
    570 	 */
    571 
    572 	for (i = 0; i < npages; i++) {
    573 		struct vm_page *pg = pgs[ridx + i];
    574 
    575 		if ((pg->flags & PG_FAKE) ||
    576 		    (write && (pg->flags & PG_RDONLY))) {
    577 			break;
    578 		}
    579 	}
    580 	if (i == npages) {
    581 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    582 		raoffset = origoffset + (orignpages << PAGE_SHIFT);
    583 		npages += ridx;
    584 		goto raout;
    585 	}
    586 
    587 	/*
    588 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    589 	 */
    590 
    591 	if (flags & PGO_OVERWRITE) {
    592 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    593 
    594 		for (i = 0; i < npages; i++) {
    595 			struct vm_page *pg = pgs[ridx + i];
    596 
    597 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    598 		}
    599 		npages += ridx;
    600 		goto out;
    601 	}
    602 
    603 	/*
    604 	 * the page wasn't resident and we're not overwriting,
    605 	 * so we're going to have to do some i/o.
    606 	 * find any additional pages needed to cover the expanded range.
    607 	 */
    608 
    609 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    610 	if (startoffset != origoffset || npages != orignpages) {
    611 
    612 		/*
    613 		 * we need to avoid deadlocks caused by locking
    614 		 * additional pages at lower offsets than pages we
    615 		 * already have locked.  unlock them all and start over.
    616 		 */
    617 
    618 		genfs_rel_pages(&pgs[ridx], orignpages);
    619 		memset(pgs, 0, sizeof(pgs));
    620 
    621 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    622 		    startoffset, endoffset, 0,0);
    623 		npgs = npages;
    624 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    625 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    626 			KASSERT(async != 0);
    627 			genfs_rel_pages(pgs, npages);
    628 			simple_unlock(&uobj->vmobjlock);
    629 			return (EBUSY);
    630 		}
    631 	}
    632 	simple_unlock(&uobj->vmobjlock);
    633 
    634 	/*
    635 	 * read the desired page(s).
    636 	 */
    637 
    638 	totalbytes = npages << PAGE_SHIFT;
    639 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    640 	tailbytes = totalbytes - bytes;
    641 	skipbytes = 0;
    642 
    643 	kva = uvm_pagermapin(pgs, npages,
    644 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    645 
    646 	s = splbio();
    647 	mbp = pool_get(&bufpool, PR_WAITOK);
    648 	splx(s);
    649 	mbp->b_bufsize = totalbytes;
    650 	mbp->b_data = (void *)kva;
    651 	mbp->b_resid = mbp->b_bcount = bytes;
    652 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL : 0);
    653 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    654 	mbp->b_vp = vp;
    655 	LIST_INIT(&mbp->b_dep);
    656 
    657 	/*
    658 	 * if EOF is in the middle of the range, zero the part past EOF.
    659 	 * if the page including EOF is not PG_FAKE, skip over it since
    660 	 * in that case it has valid data that we need to preserve.
    661 	 */
    662 
    663 	if (tailbytes > 0) {
    664 		size_t tailstart = bytes;
    665 
    666 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    667 			tailstart = round_page(tailstart);
    668 			tailbytes -= tailstart - bytes;
    669 		}
    670 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    671 		    kva, tailstart, tailbytes,0);
    672 		memset((void *)(kva + tailstart), 0, tailbytes);
    673 	}
    674 
    675 	/*
    676 	 * now loop over the pages, reading as needed.
    677 	 */
    678 
    679 	if (write) {
    680 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    681 	} else {
    682 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    683 	}
    684 
    685 	bp = NULL;
    686 	for (offset = startoffset;
    687 	    bytes > 0;
    688 	    offset += iobytes, bytes -= iobytes) {
    689 
    690 		/*
    691 		 * skip pages which don't need to be read.
    692 		 */
    693 
    694 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    695 		while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) {
    696 			size_t b;
    697 
    698 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    699 			b = MIN(PAGE_SIZE, bytes);
    700 			offset += b;
    701 			bytes -= b;
    702 			skipbytes += b;
    703 			pidx++;
    704 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    705 			    offset, 0,0,0);
    706 			if (bytes == 0) {
    707 				goto loopdone;
    708 			}
    709 		}
    710 
    711 		/*
    712 		 * bmap the file to find out the blkno to read from and
    713 		 * how much we can read in one i/o.  if bmap returns an error,
    714 		 * skip the rest of the top-level i/o.
    715 		 */
    716 
    717 		lbn = offset >> fs_bshift;
    718 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    719 		if (error) {
    720 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    721 			    lbn, error,0,0);
    722 			skipbytes += bytes;
    723 			goto loopdone;
    724 		}
    725 
    726 		/*
    727 		 * see how many pages can be read with this i/o.
    728 		 * reduce the i/o size if necessary to avoid
    729 		 * overwriting pages with valid data.
    730 		 */
    731 
    732 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    733 		    bytes);
    734 		if (offset + iobytes > round_page(offset)) {
    735 			pcount = 1;
    736 			while (pidx + pcount < npages &&
    737 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    738 				pcount++;
    739 			}
    740 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    741 			    (offset - trunc_page(offset)));
    742 		}
    743 
    744 		/*
    745 		 * if this block isn't allocated, zero it instead of
    746 		 * reading it.  if this is a read access, mark the
    747 		 * pages we zeroed PG_RDONLY.
    748 		 */
    749 
    750 		if (blkno < 0) {
    751 			int holepages = (round_page(offset + iobytes) -
    752 			    trunc_page(offset)) >> PAGE_SHIFT;
    753 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    754 
    755 			sawhole = TRUE;
    756 			memset((char *)kva + (offset - startoffset), 0,
    757 			    iobytes);
    758 			skipbytes += iobytes;
    759 
    760 			for (i = 0; i < holepages; i++) {
    761 				if (write) {
    762 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    763 				} else {
    764 					pgs[pidx + i]->flags |= PG_RDONLY;
    765 				}
    766 			}
    767 			continue;
    768 		}
    769 
    770 		/*
    771 		 * allocate a sub-buf for this piece of the i/o
    772 		 * (or just use mbp if there's only 1 piece),
    773 		 * and start it going.
    774 		 */
    775 
    776 		if (offset == startoffset && iobytes == bytes) {
    777 			bp = mbp;
    778 		} else {
    779 			s = splbio();
    780 			bp = pool_get(&bufpool, PR_WAITOK);
    781 			splx(s);
    782 			bp->b_data = (char *)kva + offset - startoffset;
    783 			bp->b_resid = bp->b_bcount = iobytes;
    784 			bp->b_flags = B_BUSY|B_READ|B_CALL;
    785 			bp->b_iodone = uvm_aio_biodone1;
    786 			bp->b_vp = vp;
    787 			bp->b_proc = NULL;
    788 			LIST_INIT(&bp->b_dep);
    789 		}
    790 		bp->b_lblkno = 0;
    791 		bp->b_private = mbp;
    792 		if (devvp->v_type == VBLK) {
    793 			bp->b_dev = devvp->v_rdev;
    794 		}
    795 
    796 		/* adjust physical blkno for partial blocks */
    797 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    798 		    dev_bshift);
    799 
    800 		UVMHIST_LOG(ubchist,
    801 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    802 		    bp, offset, iobytes, bp->b_blkno);
    803 
    804 		VOP_STRATEGY(bp);
    805 	}
    806 
    807 loopdone:
    808 	if (skipbytes) {
    809 		s = splbio();
    810 		if (error) {
    811 			mbp->b_flags |= B_ERROR;
    812 			mbp->b_error = error;
    813 		}
    814 		mbp->b_resid -= skipbytes;
    815 		if (mbp->b_resid == 0) {
    816 			biodone(mbp);
    817 		}
    818 		splx(s);
    819 	}
    820 
    821 	if (async) {
    822 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    823 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    824 		return (0);
    825 	}
    826 	if (bp != NULL) {
    827 		error = biowait(mbp);
    828 	}
    829 	s = splbio();
    830 	pool_put(&bufpool, mbp);
    831 	splx(s);
    832 	uvm_pagermapout(kva, npages);
    833 	raoffset = startoffset + totalbytes;
    834 
    835 	/*
    836 	 * if this we encountered a hole then we have to do a little more work.
    837 	 * for read faults, we marked the page PG_RDONLY so that future
    838 	 * write accesses to the page will fault again.
    839 	 * for write faults, we must make sure that the backing store for
    840 	 * the page is completely allocated while the pages are locked.
    841 	 */
    842 
    843 	if (!error && sawhole && write) {
    844 		for (i = 0; i < npages; i++) {
    845 			if (pgs[i] == NULL) {
    846 				continue;
    847 			}
    848 			pgs[i]->flags &= ~PG_CLEAN;
    849 			UVMHIST_LOG(ubchist, "mark dirty pg %p", pgs[i],0,0,0);
    850 		}
    851 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    852 		    cred);
    853 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    854 		    startoffset, npages << PAGE_SHIFT, error,0);
    855 	}
    856 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    857 	simple_lock(&uobj->vmobjlock);
    858 
    859 	/*
    860 	 * see if we want to start any readahead.
    861 	 * XXXUBC for now, just read the next 128k on 64k boundaries.
    862 	 * this is pretty nonsensical, but it is 50% faster than reading
    863 	 * just the next 64k.
    864 	 */
    865 
    866 raout:
    867 	if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
    868 	    PAGE_SHIFT <= 16) {
    869 		off_t rasize;
    870 		int rapages, err, i, skipped;
    871 
    872 		/* XXXUBC temp limit, from above */
    873 		rapages = MIN(MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD),
    874 		    genfs_rapages);
    875 		rasize = rapages << PAGE_SHIFT;
    876 		for (i = skipped = 0; i < genfs_racount; i++) {
    877 			err = VOP_GETPAGES(vp, raoffset, NULL, &rapages, 0,
    878 			    VM_PROT_READ, 0, 0);
    879 			simple_lock(&uobj->vmobjlock);
    880 			if (err) {
    881 				if (err != EBUSY ||
    882 				    skipped++ == genfs_raskip)
    883 					break;
    884 			}
    885 			raoffset += rasize;
    886 			rapages = rasize >> PAGE_SHIFT;
    887 		}
    888 	}
    889 
    890 	/*
    891 	 * we're almost done!  release the pages...
    892 	 * for errors, we free the pages.
    893 	 * otherwise we activate them and mark them as valid and clean.
    894 	 * also, unbusy pages that were not actually requested.
    895 	 */
    896 
    897 	if (error) {
    898 		for (i = 0; i < npages; i++) {
    899 			if (pgs[i] == NULL) {
    900 				continue;
    901 			}
    902 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    903 			    pgs[i], pgs[i]->flags, 0,0);
    904 			if (pgs[i]->flags & PG_FAKE) {
    905 				pgs[i]->flags |= PG_RELEASED;
    906 			}
    907 		}
    908 		uvm_lock_pageq();
    909 		uvm_page_unbusy(pgs, npages);
    910 		uvm_unlock_pageq();
    911 		simple_unlock(&uobj->vmobjlock);
    912 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    913 		return (error);
    914 	}
    915 
    916 out:
    917 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    918 	uvm_lock_pageq();
    919 	for (i = 0; i < npages; i++) {
    920 		pg = pgs[i];
    921 		if (pg == NULL) {
    922 			continue;
    923 		}
    924 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    925 		    pg, pg->flags, 0,0);
    926 		if (pg->flags & PG_FAKE && !overwrite) {
    927 			pg->flags &= ~(PG_FAKE);
    928 			pmap_clear_modify(pgs[i]);
    929 		}
    930 		if (write) {
    931 			pg->flags &= ~(PG_RDONLY);
    932 		}
    933 		if (i < ridx || i >= ridx + orignpages || async) {
    934 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    935 			    pg, pg->offset,0,0);
    936 			if (pg->flags & PG_WANTED) {
    937 				wakeup(pg);
    938 			}
    939 			if (pg->flags & PG_FAKE) {
    940 				KASSERT(overwrite);
    941 				uvm_pagezero(pg);
    942 			}
    943 			if (pg->flags & PG_RELEASED) {
    944 				uvm_pagefree(pg);
    945 				continue;
    946 			}
    947 			uvm_pageactivate(pg);
    948 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    949 			UVM_PAGE_OWN(pg, NULL);
    950 		}
    951 	}
    952 	uvm_unlock_pageq();
    953 	simple_unlock(&uobj->vmobjlock);
    954 	if (ap->a_m != NULL) {
    955 		memcpy(ap->a_m, &pgs[ridx],
    956 		    orignpages * sizeof(struct vm_page *));
    957 	}
    958 	return (0);
    959 }
    960 
    961 /*
    962  * generic VM putpages routine.
    963  * Write the given range of pages to backing store.
    964  *
    965  * => "offhi == 0" means flush all pages at or after "offlo".
    966  * => object should be locked by caller.   we may _unlock_ the object
    967  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
    968  *	if PGO_SYNCIO is set and there are pages busy.
    969  *	we return with the object locked.
    970  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    971  *	thus, a caller might want to unlock higher level resources
    972  *	(e.g. vm_map) before calling flush.
    973  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
    974  *	unlock the object nor block.
    975  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    976  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    977  *	that new pages are inserted on the tail end of the list.   thus,
    978  *	we can make a complete pass through the object in one go by starting
    979  *	at the head and working towards the tail (new pages are put in
    980  *	front of us).
    981  * => NOTE: we are allowed to lock the page queues, so the caller
    982  *	must not be holding the page queue lock.
    983  *
    984  * note on "cleaning" object and PG_BUSY pages:
    985  *	this routine is holding the lock on the object.   the only time
    986  *	that it can run into a PG_BUSY page that it does not own is if
    987  *	some other process has started I/O on the page (e.g. either
    988  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    989  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    990  *	had a chance to modify it yet.    if the PG_BUSY page is being
    991  *	paged out then it means that someone else has already started
    992  *	cleaning the page for us (how nice!).    in this case, if we
    993  *	have syncio specified, then after we make our pass through the
    994  *	object we need to wait for the other PG_BUSY pages to clear
    995  *	off (i.e. we need to do an iosync).   also note that once a
    996  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    997  *
    998  * note on page traversal:
    999  *	we can traverse the pages in an object either by going down the
   1000  *	linked list in "uobj->memq", or we can go over the address range
   1001  *	by page doing hash table lookups for each address.    depending
   1002  *	on how many pages are in the object it may be cheaper to do one
   1003  *	or the other.   we set "by_list" to true if we are using memq.
   1004  *	if the cost of a hash lookup was equal to the cost of the list
   1005  *	traversal we could compare the number of pages in the start->stop
   1006  *	range to the total number of pages in the object.   however, it
   1007  *	seems that a hash table lookup is more expensive than the linked
   1008  *	list traversal, so we multiply the number of pages in the
   1009  *	range by an estimate of the relatively higher cost of the hash lookup.
   1010  */
   1011 
   1012 int
   1013 genfs_putpages(void *v)
   1014 {
   1015 	struct vop_putpages_args /* {
   1016 		struct vnode *a_vp;
   1017 		voff_t a_offlo;
   1018 		voff_t a_offhi;
   1019 		int a_flags;
   1020 	} */ *ap = v;
   1021 	struct vnode *vp = ap->a_vp;
   1022 	struct uvm_object *uobj = &vp->v_uobj;
   1023 	struct simplelock *slock = &uobj->vmobjlock;
   1024 	off_t startoff = ap->a_offlo;
   1025 	off_t endoff = ap->a_offhi;
   1026 	off_t off;
   1027 	int flags = ap->a_flags;
   1028 	const int maxpages = MAXBSIZE >> PAGE_SHIFT;
   1029 	int i, s, error, npages, nback;
   1030 	int freeflag;
   1031 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1032 	boolean_t wasclean, by_list, needs_clean, yield;
   1033 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1034 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1035 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1036 
   1037 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1038 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1039 	KASSERT(startoff < endoff || endoff == 0);
   1040 
   1041 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1042 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1043 	if (uobj->uo_npages == 0) {
   1044 		s = splbio();
   1045 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1046 		    (vp->v_flag & VONWORKLST)) {
   1047 			vp->v_flag &= ~VONWORKLST;
   1048 			LIST_REMOVE(vp, v_synclist);
   1049 		}
   1050 		splx(s);
   1051 		simple_unlock(slock);
   1052 		return (0);
   1053 	}
   1054 
   1055 	/*
   1056 	 * the vnode has pages, set up to process the request.
   1057 	 */
   1058 
   1059 	error = 0;
   1060 	s = splbio();
   1061 	wasclean = (vp->v_numoutput == 0);
   1062 	splx(s);
   1063 	off = startoff;
   1064 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1065 		endoff = trunc_page(LLONG_MAX);
   1066 	}
   1067 	by_list = (uobj->uo_npages <=
   1068 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1069 
   1070 	/*
   1071 	 * start the loop.  when scanning by list, hold the last page
   1072 	 * in the list before we start.  pages allocated after we start
   1073 	 * will be added to the end of the list, so we can stop at the
   1074 	 * current last page.
   1075 	 */
   1076 
   1077 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1078 	curmp.uobject = uobj;
   1079 	curmp.offset = (voff_t)-1;
   1080 	curmp.flags = PG_BUSY;
   1081 	endmp.uobject = uobj;
   1082 	endmp.offset = (voff_t)-1;
   1083 	endmp.flags = PG_BUSY;
   1084 	if (by_list) {
   1085 		pg = TAILQ_FIRST(&uobj->memq);
   1086 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1087 		PHOLD(curproc);
   1088 	} else {
   1089 		pg = uvm_pagelookup(uobj, off);
   1090 	}
   1091 	nextpg = NULL;
   1092 	while (by_list || off < endoff) {
   1093 
   1094 		/*
   1095 		 * if the current page is not interesting, move on to the next.
   1096 		 */
   1097 
   1098 		KASSERT(pg == NULL || pg->uobject == uobj);
   1099 		KASSERT(pg == NULL ||
   1100 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1101 		    (pg->flags & PG_BUSY) != 0);
   1102 		if (by_list) {
   1103 			if (pg == &endmp) {
   1104 				break;
   1105 			}
   1106 			if (pg->offset < startoff || pg->offset >= endoff ||
   1107 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1108 				pg = TAILQ_NEXT(pg, listq);
   1109 				continue;
   1110 			}
   1111 			off = pg->offset;
   1112 		} else if (pg == NULL ||
   1113 		    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1114 			off += PAGE_SIZE;
   1115 			if (off < endoff) {
   1116 				pg = uvm_pagelookup(uobj, off);
   1117 			}
   1118 			continue;
   1119 		}
   1120 
   1121 		/*
   1122 		 * if the current page needs to be cleaned and it's busy,
   1123 		 * wait for it to become unbusy.
   1124 		 */
   1125 
   1126 		yield = (curproc->p_cpu->ci_schedstate.spc_flags &
   1127 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1128 		if (pg->flags & PG_BUSY || yield) {
   1129 			KASSERT(!pagedaemon);
   1130 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1131 			if (by_list) {
   1132 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1133 				UVMHIST_LOG(ubchist, "curmp next %p",
   1134 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1135 			}
   1136 			if (yield) {
   1137 				simple_unlock(slock);
   1138 				preempt(NULL);
   1139 				simple_lock(slock);
   1140 			} else {
   1141 				pg->flags |= PG_WANTED;
   1142 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1143 				simple_lock(slock);
   1144 			}
   1145 			if (by_list) {
   1146 				UVMHIST_LOG(ubchist, "after next %p",
   1147 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1148 				pg = TAILQ_NEXT(&curmp, listq);
   1149 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1150 			} else {
   1151 				pg = uvm_pagelookup(uobj, off);
   1152 			}
   1153 			continue;
   1154 		}
   1155 
   1156 		/*
   1157 		 * if we're freeing, remove all mappings of the page now.
   1158 		 * if we're cleaning, check if the page is needs to be cleaned.
   1159 		 */
   1160 
   1161 		if (flags & PGO_FREE) {
   1162 			pmap_page_protect(pg, VM_PROT_NONE);
   1163 		}
   1164 		if (flags & PGO_CLEANIT) {
   1165 			needs_clean = pmap_clear_modify(pg) ||
   1166 			    (pg->flags & PG_CLEAN) == 0;
   1167 			pg->flags |= PG_CLEAN;
   1168 		} else {
   1169 			needs_clean = FALSE;
   1170 		}
   1171 
   1172 		/*
   1173 		 * if we're cleaning, build a cluster.
   1174 		 * the cluster will consist of pages which are currently dirty,
   1175 		 * but they will be returned to us marked clean.
   1176 		 * if not cleaning, just operate on the one page.
   1177 		 */
   1178 
   1179 		if (needs_clean) {
   1180 			wasclean = FALSE;
   1181 			memset(pgs, 0, sizeof(pgs));
   1182 			pg->flags |= PG_BUSY;
   1183 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1184 
   1185 			/*
   1186 			 * first look backward.
   1187 			 */
   1188 
   1189 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1190 			nback = npages;
   1191 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1192 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1193 			if (nback) {
   1194 				memmove(&pgs[0], &pgs[npages - nback],
   1195 				    nback * sizeof(pgs[0]));
   1196 				if (npages - nback < nback)
   1197 					memset(&pgs[nback], 0,
   1198 					    (npages - nback) * sizeof(pgs[0]));
   1199 				else
   1200 					memset(&pgs[npages - nback], 0,
   1201 					    nback * sizeof(pgs[0]));
   1202 			}
   1203 
   1204 			/*
   1205 			 * then plug in our page of interest.
   1206 			 */
   1207 
   1208 			pgs[nback] = pg;
   1209 
   1210 			/*
   1211 			 * then look forward to fill in the remaining space in
   1212 			 * the array of pages.
   1213 			 */
   1214 
   1215 			npages = maxpages - nback - 1;
   1216 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1217 			    &pgs[nback + 1],
   1218 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1219 			npages += nback + 1;
   1220 		} else {
   1221 			pgs[0] = pg;
   1222 			npages = 1;
   1223 			nback = 0;
   1224 		}
   1225 
   1226 		/*
   1227 		 * apply FREE or DEACTIVATE options if requested.
   1228 		 */
   1229 
   1230 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1231 			uvm_lock_pageq();
   1232 		}
   1233 		for (i = 0; i < npages; i++) {
   1234 			tpg = pgs[i];
   1235 			KASSERT(tpg->uobject == uobj);
   1236 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1237 				pg = tpg;
   1238 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1239 				continue;
   1240 			if (flags & PGO_DEACTIVATE &&
   1241 			    (tpg->pqflags & PQ_INACTIVE) == 0 &&
   1242 			    tpg->wire_count == 0) {
   1243 				(void) pmap_clear_reference(tpg);
   1244 				uvm_pagedeactivate(tpg);
   1245 			} else if (flags & PGO_FREE) {
   1246 				pmap_page_protect(tpg, VM_PROT_NONE);
   1247 				if (tpg->flags & PG_BUSY) {
   1248 					tpg->flags |= freeflag;
   1249 					if (pagedaemon) {
   1250 						uvmexp.paging++;
   1251 						uvm_pagedequeue(tpg);
   1252 					}
   1253 				} else {
   1254 
   1255 					/*
   1256 					 * ``page is not busy''
   1257 					 * implies that npages is 1
   1258 					 * and needs_clean is false.
   1259 					 */
   1260 
   1261 					nextpg = TAILQ_NEXT(tpg, listq);
   1262 					uvm_pagefree(tpg);
   1263 				}
   1264 			}
   1265 		}
   1266 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1267 			uvm_unlock_pageq();
   1268 		}
   1269 		if (needs_clean) {
   1270 
   1271 			/*
   1272 			 * start the i/o.  if we're traversing by list,
   1273 			 * keep our place in the list with a marker page.
   1274 			 */
   1275 
   1276 			if (by_list) {
   1277 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1278 				    listq);
   1279 			}
   1280 			simple_unlock(slock);
   1281 			error = GOP_WRITE(vp, pgs, npages, flags);
   1282 			simple_lock(slock);
   1283 			if (by_list) {
   1284 				pg = TAILQ_NEXT(&curmp, listq);
   1285 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1286 			}
   1287 			if (error) {
   1288 				break;
   1289 			}
   1290 			if (by_list) {
   1291 				continue;
   1292 			}
   1293 		}
   1294 
   1295 		/*
   1296 		 * find the next page and continue if there was no error.
   1297 		 */
   1298 
   1299 		if (by_list) {
   1300 			if (nextpg) {
   1301 				pg = nextpg;
   1302 				nextpg = NULL;
   1303 			} else {
   1304 				pg = TAILQ_NEXT(pg, listq);
   1305 			}
   1306 		} else {
   1307 			off += (npages - nback) << PAGE_SHIFT;
   1308 			if (off < endoff) {
   1309 				pg = uvm_pagelookup(uobj, off);
   1310 			}
   1311 		}
   1312 	}
   1313 	if (by_list) {
   1314 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1315 		PRELE(curproc);
   1316 	}
   1317 
   1318 	/*
   1319 	 * if we're cleaning and there was nothing to clean,
   1320 	 * take us off the syncer list.  if we started any i/o
   1321 	 * and we're doing sync i/o, wait for all writes to finish.
   1322 	 */
   1323 
   1324 	s = splbio();
   1325 	if ((flags & PGO_CLEANIT) && wasclean &&
   1326 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1327 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1328 	    (vp->v_flag & VONWORKLST)) {
   1329 		vp->v_flag &= ~VONWORKLST;
   1330 		LIST_REMOVE(vp, v_synclist);
   1331 	}
   1332 	splx(s);
   1333 	if (!wasclean && !async) {
   1334 		s = splbio();
   1335 		while (vp->v_numoutput != 0) {
   1336 			vp->v_flag |= VBWAIT;
   1337 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1338 			    "genput2", 0);
   1339 			simple_lock(slock);
   1340 		}
   1341 		splx(s);
   1342 	}
   1343 	simple_unlock(&uobj->vmobjlock);
   1344 	return (error);
   1345 }
   1346 
   1347 int
   1348 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1349 {
   1350 	int s, error, run;
   1351 	int fs_bshift, dev_bshift;
   1352 	vaddr_t kva;
   1353 	off_t eof, offset, startoffset;
   1354 	size_t bytes, iobytes, skipbytes;
   1355 	daddr_t lbn, blkno;
   1356 	struct vm_page *pg;
   1357 	struct buf *mbp, *bp;
   1358 	struct vnode *devvp;
   1359 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1360 	UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
   1361 
   1362 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1363 	    vp, pgs, npages, flags);
   1364 
   1365 	GOP_SIZE(vp, vp->v_size, &eof);
   1366 	if (vp->v_type == VREG) {
   1367 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1368 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1369 	} else {
   1370 		fs_bshift = DEV_BSHIFT;
   1371 		dev_bshift = DEV_BSHIFT;
   1372 	}
   1373 	error = 0;
   1374 	pg = pgs[0];
   1375 	startoffset = pg->offset;
   1376 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
   1377 	skipbytes = 0;
   1378 	KASSERT(bytes != 0);
   1379 
   1380 	kva = uvm_pagermapin(pgs, npages,
   1381 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1382 
   1383 	s = splbio();
   1384 	vp->v_numoutput += 2;
   1385 	mbp = pool_get(&bufpool, PR_WAITOK);
   1386 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1387 	    vp, mbp, vp->v_numoutput, bytes);
   1388 	splx(s);
   1389 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1390 	mbp->b_data = (void *)kva;
   1391 	mbp->b_resid = mbp->b_bcount = bytes;
   1392 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
   1393 	mbp->b_iodone = uvm_aio_biodone;
   1394 	mbp->b_vp = vp;
   1395 	LIST_INIT(&mbp->b_dep);
   1396 
   1397 	bp = NULL;
   1398 	for (offset = startoffset;
   1399 	    bytes > 0;
   1400 	    offset += iobytes, bytes -= iobytes) {
   1401 		lbn = offset >> fs_bshift;
   1402 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1403 		if (error) {
   1404 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1405 			skipbytes += bytes;
   1406 			bytes = 0;
   1407 			break;
   1408 		}
   1409 
   1410 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1411 		    bytes);
   1412 		if (blkno == (daddr_t)-1) {
   1413 			skipbytes += iobytes;
   1414 			continue;
   1415 		}
   1416 
   1417 		/* if it's really one i/o, don't make a second buf */
   1418 		if (offset == startoffset && iobytes == bytes) {
   1419 			bp = mbp;
   1420 		} else {
   1421 			s = splbio();
   1422 			vp->v_numoutput++;
   1423 			bp = pool_get(&bufpool, PR_WAITOK);
   1424 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1425 			    vp, bp, vp->v_numoutput, 0);
   1426 			splx(s);
   1427 			bp->b_data = (char *)kva +
   1428 			    (vaddr_t)(offset - pg->offset);
   1429 			bp->b_resid = bp->b_bcount = iobytes;
   1430 			bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
   1431 			bp->b_iodone = uvm_aio_biodone1;
   1432 			bp->b_vp = vp;
   1433 			LIST_INIT(&bp->b_dep);
   1434 		}
   1435 		bp->b_lblkno = 0;
   1436 		bp->b_private = mbp;
   1437 		if (devvp->v_type == VBLK) {
   1438 			bp->b_dev = devvp->v_rdev;
   1439 		}
   1440 
   1441 		/* adjust physical blkno for partial blocks */
   1442 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1443 		    dev_bshift);
   1444 		UVMHIST_LOG(ubchist,
   1445 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1446 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1447 		VOP_STRATEGY(bp);
   1448 	}
   1449 	if (skipbytes) {
   1450 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1451 		s = splbio();
   1452 		if (error) {
   1453 			mbp->b_flags |= B_ERROR;
   1454 			mbp->b_error = error;
   1455 		}
   1456 		mbp->b_resid -= skipbytes;
   1457 		if (mbp->b_resid == 0) {
   1458 			biodone(mbp);
   1459 		}
   1460 		splx(s);
   1461 	}
   1462 	if (async) {
   1463 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1464 		return (0);
   1465 	}
   1466 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1467 	error = biowait(mbp);
   1468 	uvm_aio_aiodone(mbp);
   1469 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1470 	return (error);
   1471 }
   1472 
   1473 /*
   1474  * VOP_PUTPAGES() for vnodes which never have pages.
   1475  */
   1476 
   1477 int
   1478 genfs_null_putpages(void *v)
   1479 {
   1480 	struct vop_putpages_args /* {
   1481 		struct vnode *a_vp;
   1482 		voff_t a_offlo;
   1483 		voff_t a_offhi;
   1484 		int a_flags;
   1485 	} */ *ap = v;
   1486 	struct vnode *vp = ap->a_vp;
   1487 
   1488 	KASSERT(vp->v_uobj.uo_npages == 0);
   1489 	simple_unlock(&vp->v_interlock);
   1490 	return (0);
   1491 }
   1492 
   1493 void
   1494 genfs_node_init(struct vnode *vp, struct genfs_ops *ops)
   1495 {
   1496 	struct genfs_node *gp = VTOG(vp);
   1497 
   1498 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1499 	gp->g_op = ops;
   1500 }
   1501 
   1502 void
   1503 genfs_size(struct vnode *vp, off_t size, off_t *eobp)
   1504 {
   1505 	int bsize;
   1506 
   1507 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1508 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1509 }
   1510 
   1511 int
   1512 genfs_compat_getpages(void *v)
   1513 {
   1514 	struct vop_getpages_args /* {
   1515 		struct vnode *a_vp;
   1516 		voff_t a_offset;
   1517 		struct vm_page **a_m;
   1518 		int *a_count;
   1519 		int a_centeridx;
   1520 		vm_prot_t a_access_type;
   1521 		int a_advice;
   1522 		int a_flags;
   1523 	} */ *ap = v;
   1524 
   1525 	off_t origoffset;
   1526 	struct vnode *vp = ap->a_vp;
   1527 	struct uvm_object *uobj = &vp->v_uobj;
   1528 	struct vm_page *pg, **pgs;
   1529 	vaddr_t kva;
   1530 	int i, error, orignpages, npages;
   1531 	struct iovec iov;
   1532 	struct uio uio;
   1533 	struct ucred *cred = curproc->p_ucred;
   1534 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1535 
   1536 	error = 0;
   1537 	origoffset = ap->a_offset;
   1538 	orignpages = *ap->a_count;
   1539 	pgs = ap->a_m;
   1540 
   1541 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1542 		vn_syncer_add_to_worklist(vp, filedelay);
   1543 	}
   1544 	if (ap->a_flags & PGO_LOCKED) {
   1545 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1546 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1547 
   1548 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1549 	}
   1550 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1551 		simple_unlock(&uobj->vmobjlock);
   1552 		return (EINVAL);
   1553 	}
   1554 	npages = orignpages;
   1555 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1556 	simple_unlock(&uobj->vmobjlock);
   1557 	kva = uvm_pagermapin(pgs, npages,
   1558 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1559 	for (i = 0; i < npages; i++) {
   1560 		pg = pgs[i];
   1561 		if ((pg->flags & PG_FAKE) == 0) {
   1562 			continue;
   1563 		}
   1564 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1565 		iov.iov_len = PAGE_SIZE;
   1566 		uio.uio_iov = &iov;
   1567 		uio.uio_iovcnt = 1;
   1568 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1569 		uio.uio_segflg = UIO_SYSSPACE;
   1570 		uio.uio_rw = UIO_READ;
   1571 		uio.uio_resid = PAGE_SIZE;
   1572 		uio.uio_procp = curproc;
   1573 		error = VOP_READ(vp, &uio, 0, cred);
   1574 		if (error) {
   1575 			break;
   1576 		}
   1577 		if (uio.uio_resid) {
   1578 			memset(iov.iov_base, 0, uio.uio_resid);
   1579 		}
   1580 	}
   1581 	uvm_pagermapout(kva, npages);
   1582 	simple_lock(&uobj->vmobjlock);
   1583 	uvm_lock_pageq();
   1584 	for (i = 0; i < npages; i++) {
   1585 		pg = pgs[i];
   1586 		if (error && (pg->flags & PG_FAKE) != 0) {
   1587 			pg->flags |= PG_RELEASED;
   1588 		} else {
   1589 			pmap_clear_modify(pg);
   1590 			uvm_pageactivate(pg);
   1591 		}
   1592 	}
   1593 	if (error) {
   1594 		uvm_page_unbusy(pgs, npages);
   1595 	}
   1596 	uvm_unlock_pageq();
   1597 	simple_unlock(&uobj->vmobjlock);
   1598 	return (error);
   1599 }
   1600 
   1601 int
   1602 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1603     int flags)
   1604 {
   1605 	off_t offset;
   1606 	struct iovec iov;
   1607 	struct uio uio;
   1608 	struct ucred *cred = curproc->p_ucred;
   1609 	struct buf *bp;
   1610 	vaddr_t kva;
   1611 	int s, error;
   1612 
   1613 	offset = pgs[0]->offset;
   1614 	kva = uvm_pagermapin(pgs, npages,
   1615 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1616 
   1617 	iov.iov_base = (void *)kva;
   1618 	iov.iov_len = npages << PAGE_SHIFT;
   1619 	uio.uio_iov = &iov;
   1620 	uio.uio_iovcnt = npages;
   1621 	uio.uio_offset = offset;
   1622 	uio.uio_segflg = UIO_SYSSPACE;
   1623 	uio.uio_rw = UIO_WRITE;
   1624 	uio.uio_resid = npages << PAGE_SHIFT;
   1625 	uio.uio_procp = curproc;
   1626 	error = VOP_WRITE(vp, &uio, 0, cred);
   1627 
   1628 	s = splbio();
   1629 	vp->v_numoutput++;
   1630 	bp = pool_get(&bufpool, PR_WAITOK);
   1631 	splx(s);
   1632 
   1633 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1634 	bp->b_vp = vp;
   1635 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1636 	bp->b_data = (char *)kva;
   1637 	bp->b_bcount = npages << PAGE_SHIFT;
   1638 	bp->b_bufsize = npages << PAGE_SHIFT;
   1639 	bp->b_resid = 0;
   1640 	LIST_INIT(&bp->b_dep);
   1641 	if (error) {
   1642 		bp->b_flags |= B_ERROR;
   1643 		bp->b_error = error;
   1644 	}
   1645 	uvm_aio_aiodone(bp);
   1646 	return (error);
   1647 }
   1648