Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.123
      1 /*	$NetBSD: genfs_vnops.c,v 1.123 2006/03/30 12:40:06 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.123 2006/03/30 12:40:06 yamt Exp $");
     35 
     36 #if defined(_KERNEL_OPT)
     37 #include "opt_nfsserver.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/proc.h>
     43 #include <sys/kernel.h>
     44 #include <sys/mount.h>
     45 #include <sys/namei.h>
     46 #include <sys/vnode.h>
     47 #include <sys/fcntl.h>
     48 #include <sys/malloc.h>
     49 #include <sys/poll.h>
     50 #include <sys/mman.h>
     51 #include <sys/file.h>
     52 
     53 #include <miscfs/genfs/genfs.h>
     54 #include <miscfs/genfs/genfs_node.h>
     55 #include <miscfs/specfs/specdev.h>
     56 
     57 #include <uvm/uvm.h>
     58 #include <uvm/uvm_pager.h>
     59 
     60 #ifdef NFSSERVER
     61 #include <nfs/rpcv2.h>
     62 #include <nfs/nfsproto.h>
     63 #include <nfs/nfs.h>
     64 #include <nfs/nqnfs.h>
     65 #include <nfs/nfs_var.h>
     66 #endif
     67 
     68 static inline void genfs_rel_pages(struct vm_page **, int);
     69 static void filt_genfsdetach(struct knote *);
     70 static int filt_genfsread(struct knote *, long);
     71 static int filt_genfsvnode(struct knote *, long);
     72 
     73 #define MAX_READ_PAGES	16 	/* XXXUBC 16 */
     74 
     75 int
     76 genfs_poll(void *v)
     77 {
     78 	struct vop_poll_args /* {
     79 		struct vnode *a_vp;
     80 		int a_events;
     81 		struct lwp *a_l;
     82 	} */ *ap = v;
     83 
     84 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     85 }
     86 
     87 int
     88 genfs_seek(void *v)
     89 {
     90 	struct vop_seek_args /* {
     91 		struct vnode *a_vp;
     92 		off_t a_oldoff;
     93 		off_t a_newoff;
     94 		struct ucred *a_ucred;
     95 	} */ *ap = v;
     96 
     97 	if (ap->a_newoff < 0)
     98 		return (EINVAL);
     99 
    100 	return (0);
    101 }
    102 
    103 int
    104 genfs_abortop(void *v)
    105 {
    106 	struct vop_abortop_args /* {
    107 		struct vnode *a_dvp;
    108 		struct componentname *a_cnp;
    109 	} */ *ap = v;
    110 
    111 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    112 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    113 	return (0);
    114 }
    115 
    116 int
    117 genfs_fcntl(void *v)
    118 {
    119 	struct vop_fcntl_args /* {
    120 		struct vnode *a_vp;
    121 		u_int a_command;
    122 		caddr_t a_data;
    123 		int a_fflag;
    124 		struct ucred *a_cred;
    125 		struct lwp *a_l;
    126 	} */ *ap = v;
    127 
    128 	if (ap->a_command == F_SETFL)
    129 		return (0);
    130 	else
    131 		return (EOPNOTSUPP);
    132 }
    133 
    134 /*ARGSUSED*/
    135 int
    136 genfs_badop(void *v)
    137 {
    138 
    139 	panic("genfs: bad op");
    140 }
    141 
    142 /*ARGSUSED*/
    143 int
    144 genfs_nullop(void *v)
    145 {
    146 
    147 	return (0);
    148 }
    149 
    150 /*ARGSUSED*/
    151 int
    152 genfs_einval(void *v)
    153 {
    154 
    155 	return (EINVAL);
    156 }
    157 
    158 /*
    159  * Called when an fs doesn't support a particular vop.
    160  * This takes care to vrele, vput, or vunlock passed in vnodes.
    161  */
    162 int
    163 genfs_eopnotsupp(void *v)
    164 {
    165 	struct vop_generic_args /*
    166 		struct vnodeop_desc *a_desc;
    167 		/ * other random data follows, presumably * /
    168 	} */ *ap = v;
    169 	struct vnodeop_desc *desc = ap->a_desc;
    170 	struct vnode *vp, *vp_last = NULL;
    171 	int flags, i, j, offset;
    172 
    173 	flags = desc->vdesc_flags;
    174 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    175 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    176 			break;	/* stop at end of list */
    177 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    178 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    179 
    180 			/* Skip if NULL */
    181 			if (!vp)
    182 				continue;
    183 
    184 			switch (j) {
    185 			case VDESC_VP0_WILLPUT:
    186 				/* Check for dvp == vp cases */
    187 				if (vp == vp_last)
    188 					vrele(vp);
    189 				else {
    190 					vput(vp);
    191 					vp_last = vp;
    192 				}
    193 				break;
    194 			case VDESC_VP0_WILLUNLOCK:
    195 				VOP_UNLOCK(vp, 0);
    196 				break;
    197 			case VDESC_VP0_WILLRELE:
    198 				vrele(vp);
    199 				break;
    200 			}
    201 		}
    202 	}
    203 
    204 	return (EOPNOTSUPP);
    205 }
    206 
    207 /*ARGSUSED*/
    208 int
    209 genfs_ebadf(void *v)
    210 {
    211 
    212 	return (EBADF);
    213 }
    214 
    215 /* ARGSUSED */
    216 int
    217 genfs_enoioctl(void *v)
    218 {
    219 
    220 	return (EPASSTHROUGH);
    221 }
    222 
    223 
    224 /*
    225  * Eliminate all activity associated with the requested vnode
    226  * and with all vnodes aliased to the requested vnode.
    227  */
    228 int
    229 genfs_revoke(void *v)
    230 {
    231 	struct vop_revoke_args /* {
    232 		struct vnode *a_vp;
    233 		int a_flags;
    234 	} */ *ap = v;
    235 	struct vnode *vp, *vq;
    236 	struct lwp *l = curlwp;		/* XXX */
    237 
    238 #ifdef DIAGNOSTIC
    239 	if ((ap->a_flags & REVOKEALL) == 0)
    240 		panic("genfs_revoke: not revokeall");
    241 #endif
    242 
    243 	vp = ap->a_vp;
    244 	simple_lock(&vp->v_interlock);
    245 
    246 	if (vp->v_flag & VALIASED) {
    247 		/*
    248 		 * If a vgone (or vclean) is already in progress,
    249 		 * wait until it is done and return.
    250 		 */
    251 		if (vp->v_flag & VXLOCK) {
    252 			vp->v_flag |= VXWANT;
    253 			ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
    254 				&vp->v_interlock);
    255 			return (0);
    256 		}
    257 		/*
    258 		 * Ensure that vp will not be vgone'd while we
    259 		 * are eliminating its aliases.
    260 		 */
    261 		vp->v_flag |= VXLOCK;
    262 		simple_unlock(&vp->v_interlock);
    263 		while (vp->v_flag & VALIASED) {
    264 			simple_lock(&spechash_slock);
    265 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    266 				if (vq->v_rdev != vp->v_rdev ||
    267 				    vq->v_type != vp->v_type || vp == vq)
    268 					continue;
    269 				simple_unlock(&spechash_slock);
    270 				vgone(vq);
    271 				break;
    272 			}
    273 			if (vq == NULLVP)
    274 				simple_unlock(&spechash_slock);
    275 		}
    276 		/*
    277 		 * Remove the lock so that vgone below will
    278 		 * really eliminate the vnode after which time
    279 		 * vgone will awaken any sleepers.
    280 		 */
    281 		simple_lock(&vp->v_interlock);
    282 		vp->v_flag &= ~VXLOCK;
    283 	}
    284 	vgonel(vp, l);
    285 	return (0);
    286 }
    287 
    288 /*
    289  * Lock the node.
    290  */
    291 int
    292 genfs_lock(void *v)
    293 {
    294 	struct vop_lock_args /* {
    295 		struct vnode *a_vp;
    296 		int a_flags;
    297 	} */ *ap = v;
    298 	struct vnode *vp = ap->a_vp;
    299 
    300 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    301 }
    302 
    303 /*
    304  * Unlock the node.
    305  */
    306 int
    307 genfs_unlock(void *v)
    308 {
    309 	struct vop_unlock_args /* {
    310 		struct vnode *a_vp;
    311 		int a_flags;
    312 	} */ *ap = v;
    313 	struct vnode *vp = ap->a_vp;
    314 
    315 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    316 	    &vp->v_interlock));
    317 }
    318 
    319 /*
    320  * Return whether or not the node is locked.
    321  */
    322 int
    323 genfs_islocked(void *v)
    324 {
    325 	struct vop_islocked_args /* {
    326 		struct vnode *a_vp;
    327 	} */ *ap = v;
    328 	struct vnode *vp = ap->a_vp;
    329 
    330 	return (lockstatus(vp->v_vnlock));
    331 }
    332 
    333 /*
    334  * Stubs to use when there is no locking to be done on the underlying object.
    335  */
    336 int
    337 genfs_nolock(void *v)
    338 {
    339 	struct vop_lock_args /* {
    340 		struct vnode *a_vp;
    341 		int a_flags;
    342 		struct lwp *a_l;
    343 	} */ *ap = v;
    344 
    345 	/*
    346 	 * Since we are not using the lock manager, we must clear
    347 	 * the interlock here.
    348 	 */
    349 	if (ap->a_flags & LK_INTERLOCK)
    350 		simple_unlock(&ap->a_vp->v_interlock);
    351 	return (0);
    352 }
    353 
    354 int
    355 genfs_nounlock(void *v)
    356 {
    357 
    358 	return (0);
    359 }
    360 
    361 int
    362 genfs_noislocked(void *v)
    363 {
    364 
    365 	return (0);
    366 }
    367 
    368 /*
    369  * Local lease check for NFS servers.  Just set up args and let
    370  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    371  * this is a null operation.
    372  */
    373 int
    374 genfs_lease_check(void *v)
    375 {
    376 #ifdef NFSSERVER
    377 	struct vop_lease_args /* {
    378 		struct vnode *a_vp;
    379 		struct lwp *a_l;
    380 		struct ucred *a_cred;
    381 		int a_flag;
    382 	} */ *ap = v;
    383 	u_int32_t duration = 0;
    384 	int cache;
    385 	u_quad_t frev;
    386 
    387 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    388 	    NQLOCALSLP, ap->a_l, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    389 	return (0);
    390 #else
    391 	return (0);
    392 #endif /* NFSSERVER */
    393 }
    394 
    395 int
    396 genfs_mmap(void *v)
    397 {
    398 
    399 	return (0);
    400 }
    401 
    402 static inline void
    403 genfs_rel_pages(struct vm_page **pgs, int npages)
    404 {
    405 	int i;
    406 
    407 	for (i = 0; i < npages; i++) {
    408 		struct vm_page *pg = pgs[i];
    409 
    410 		if (pg == NULL)
    411 			continue;
    412 		if (pg->flags & PG_FAKE) {
    413 			pg->flags |= PG_RELEASED;
    414 		}
    415 	}
    416 	uvm_lock_pageq();
    417 	uvm_page_unbusy(pgs, npages);
    418 	uvm_unlock_pageq();
    419 }
    420 
    421 /*
    422  * generic VM getpages routine.
    423  * Return PG_BUSY pages for the given range,
    424  * reading from backing store if necessary.
    425  */
    426 
    427 int
    428 genfs_getpages(void *v)
    429 {
    430 	struct vop_getpages_args /* {
    431 		struct vnode *a_vp;
    432 		voff_t a_offset;
    433 		struct vm_page **a_m;
    434 		int *a_count;
    435 		int a_centeridx;
    436 		vm_prot_t a_access_type;
    437 		int a_advice;
    438 		int a_flags;
    439 	} */ *ap = v;
    440 
    441 	off_t newsize, diskeof, memeof;
    442 	off_t offset, origoffset, startoffset, endoffset, raoffset;
    443 	daddr_t lbn, blkno;
    444 	int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    445 	int fs_bshift, fs_bsize, dev_bshift;
    446 	int flags = ap->a_flags;
    447 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    448 	vaddr_t kva;
    449 	struct buf *bp, *mbp;
    450 	struct vnode *vp = ap->a_vp;
    451 	struct vnode *devvp;
    452 	struct genfs_node *gp = VTOG(vp);
    453 	struct uvm_object *uobj = &vp->v_uobj;
    454 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
    455 	int pgs_size;
    456 	struct ucred *cred = curproc->p_ucred;		/* XXXUBC curlwp */
    457 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    458 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    459 	boolean_t sawhole = FALSE;
    460 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    461 	boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    462 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    463 
    464 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    465 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    466 
    467 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    468 	    vp->v_type == VLNK || vp->v_type == VBLK);
    469 
    470 	/* XXXUBC temp limit */
    471 	if (*ap->a_count > MAX_READ_PAGES) {
    472 		panic("genfs_getpages: too many pages");
    473 	}
    474 
    475 	error = 0;
    476 	origoffset = ap->a_offset;
    477 	orignpages = *ap->a_count;
    478 	GOP_SIZE(vp, vp->v_size, &diskeof, 0);
    479 	if (flags & PGO_PASTEOF) {
    480 		newsize = MAX(vp->v_size,
    481 		    origoffset + (orignpages << PAGE_SHIFT));
    482 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    483 	} else {
    484 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
    485 	}
    486 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    487 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    488 	KASSERT(orignpages > 0);
    489 
    490 	/*
    491 	 * Bounds-check the request.
    492 	 */
    493 
    494 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    495 		if ((flags & PGO_LOCKED) == 0) {
    496 			simple_unlock(&uobj->vmobjlock);
    497 		}
    498 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    499 		    origoffset, *ap->a_count, memeof,0);
    500 		return (EINVAL);
    501 	}
    502 
    503 	/* uobj is locked */
    504 
    505 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    506 	    (vp->v_type != VBLK ||
    507 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    508 		int updflags = 0;
    509 
    510 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    511 			updflags = GOP_UPDATE_ACCESSED;
    512 		}
    513 		if (write) {
    514 			updflags |= GOP_UPDATE_MODIFIED;
    515 		}
    516 		if (updflags != 0) {
    517 			GOP_MARKUPDATE(vp, updflags);
    518 		}
    519 	}
    520 
    521 	if (write) {
    522 		gp->g_dirtygen++;
    523 		if ((vp->v_flag & VONWORKLST) == 0) {
    524 			vn_syncer_add_to_worklist(vp, filedelay);
    525 		}
    526 		if ((vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP) {
    527 			vp->v_flag |= VWRITEMAPDIRTY;
    528 		}
    529 	}
    530 
    531 	/*
    532 	 * For PGO_LOCKED requests, just return whatever's in memory.
    533 	 */
    534 
    535 	if (flags & PGO_LOCKED) {
    536 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
    537 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
    538 
    539 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    540 	}
    541 
    542 	/*
    543 	 * find the requested pages and make some simple checks.
    544 	 * leave space in the page array for a whole block.
    545 	 */
    546 
    547 	if (vp->v_type != VBLK) {
    548 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    549 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    550 	} else {
    551 		fs_bshift = DEV_BSHIFT;
    552 		dev_bshift = DEV_BSHIFT;
    553 	}
    554 	fs_bsize = 1 << fs_bshift;
    555 
    556 	orignpages = MIN(orignpages,
    557 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    558 	npages = orignpages;
    559 	startoffset = origoffset & ~(fs_bsize - 1);
    560 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    561 	    fs_bsize - 1) & ~(fs_bsize - 1));
    562 	endoffset = MIN(endoffset, round_page(memeof));
    563 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    564 
    565 	pgs_size = sizeof(struct vm_page *) *
    566 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    567 	if (pgs_size > sizeof(pgs_onstack)) {
    568 		pgs = malloc(pgs_size, M_DEVBUF, M_NOWAIT | M_ZERO);
    569 		if (pgs == NULL) {
    570 			simple_unlock(&uobj->vmobjlock);
    571 			return (ENOMEM);
    572 		}
    573 	} else {
    574 		pgs = pgs_onstack;
    575 		memset(pgs, 0, pgs_size);
    576 	}
    577 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    578 	    ridx, npages, startoffset, endoffset);
    579 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    580 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    581 		KASSERT(async != 0);
    582 		genfs_rel_pages(&pgs[ridx], orignpages);
    583 		simple_unlock(&uobj->vmobjlock);
    584 		if (pgs != pgs_onstack)
    585 			free(pgs, M_DEVBUF);
    586 		return (EBUSY);
    587 	}
    588 
    589 	/*
    590 	 * if the pages are already resident, just return them.
    591 	 */
    592 
    593 	for (i = 0; i < npages; i++) {
    594 		struct vm_page *pg1 = pgs[ridx + i];
    595 
    596 		if ((pg1->flags & PG_FAKE) ||
    597 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    598 			break;
    599 		}
    600 	}
    601 	if (i == npages) {
    602 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    603 		raoffset = origoffset + (orignpages << PAGE_SHIFT);
    604 		npages += ridx;
    605 		goto out;
    606 	}
    607 
    608 	/*
    609 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    610 	 */
    611 
    612 	if (flags & PGO_OVERWRITE) {
    613 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    614 
    615 		for (i = 0; i < npages; i++) {
    616 			struct vm_page *pg1 = pgs[ridx + i];
    617 
    618 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    619 		}
    620 		npages += ridx;
    621 		goto out;
    622 	}
    623 
    624 	/*
    625 	 * the page wasn't resident and we're not overwriting,
    626 	 * so we're going to have to do some i/o.
    627 	 * find any additional pages needed to cover the expanded range.
    628 	 */
    629 
    630 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    631 	if (startoffset != origoffset || npages != orignpages) {
    632 
    633 		/*
    634 		 * we need to avoid deadlocks caused by locking
    635 		 * additional pages at lower offsets than pages we
    636 		 * already have locked.  unlock them all and start over.
    637 		 */
    638 
    639 		genfs_rel_pages(&pgs[ridx], orignpages);
    640 		memset(pgs, 0, pgs_size);
    641 
    642 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    643 		    startoffset, endoffset, 0,0);
    644 		npgs = npages;
    645 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    646 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    647 			KASSERT(async != 0);
    648 			genfs_rel_pages(pgs, npages);
    649 			simple_unlock(&uobj->vmobjlock);
    650 			if (pgs != pgs_onstack)
    651 				free(pgs, M_DEVBUF);
    652 			return (EBUSY);
    653 		}
    654 	}
    655 	simple_unlock(&uobj->vmobjlock);
    656 
    657 	/*
    658 	 * read the desired page(s).
    659 	 */
    660 
    661 	totalbytes = npages << PAGE_SHIFT;
    662 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    663 	tailbytes = totalbytes - bytes;
    664 	skipbytes = 0;
    665 
    666 	kva = uvm_pagermapin(pgs, npages,
    667 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    668 
    669 	mbp = getiobuf();
    670 	mbp->b_bufsize = totalbytes;
    671 	mbp->b_data = (void *)kva;
    672 	mbp->b_resid = mbp->b_bcount = bytes;
    673 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    674 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    675 	mbp->b_vp = vp;
    676 	if (async)
    677 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    678 	else
    679 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    680 
    681 	/*
    682 	 * if EOF is in the middle of the range, zero the part past EOF.
    683 	 * if the page including EOF is not PG_FAKE, skip over it since
    684 	 * in that case it has valid data that we need to preserve.
    685 	 */
    686 
    687 	if (tailbytes > 0) {
    688 		size_t tailstart = bytes;
    689 
    690 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    691 			tailstart = round_page(tailstart);
    692 			tailbytes -= tailstart - bytes;
    693 		}
    694 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    695 		    kva, tailstart, tailbytes,0);
    696 		memset((void *)(kva + tailstart), 0, tailbytes);
    697 	}
    698 
    699 	/*
    700 	 * now loop over the pages, reading as needed.
    701 	 */
    702 
    703 	if (blockalloc) {
    704 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    705 	} else {
    706 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    707 	}
    708 
    709 	bp = NULL;
    710 	for (offset = startoffset;
    711 	    bytes > 0;
    712 	    offset += iobytes, bytes -= iobytes) {
    713 
    714 		/*
    715 		 * skip pages which don't need to be read.
    716 		 */
    717 
    718 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    719 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    720 			size_t b;
    721 
    722 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    723 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    724 				sawhole = TRUE;
    725 			}
    726 			b = MIN(PAGE_SIZE, bytes);
    727 			offset += b;
    728 			bytes -= b;
    729 			skipbytes += b;
    730 			pidx++;
    731 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    732 			    offset, 0,0,0);
    733 			if (bytes == 0) {
    734 				goto loopdone;
    735 			}
    736 		}
    737 
    738 		/*
    739 		 * bmap the file to find out the blkno to read from and
    740 		 * how much we can read in one i/o.  if bmap returns an error,
    741 		 * skip the rest of the top-level i/o.
    742 		 */
    743 
    744 		lbn = offset >> fs_bshift;
    745 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    746 		if (error) {
    747 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    748 			    lbn, error,0,0);
    749 			skipbytes += bytes;
    750 			goto loopdone;
    751 		}
    752 
    753 		/*
    754 		 * see how many pages can be read with this i/o.
    755 		 * reduce the i/o size if necessary to avoid
    756 		 * overwriting pages with valid data.
    757 		 */
    758 
    759 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    760 		    bytes);
    761 		if (offset + iobytes > round_page(offset)) {
    762 			pcount = 1;
    763 			while (pidx + pcount < npages &&
    764 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    765 				pcount++;
    766 			}
    767 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    768 			    (offset - trunc_page(offset)));
    769 		}
    770 
    771 		/*
    772 		 * if this block isn't allocated, zero it instead of
    773 		 * reading it.  unless we are going to allocate blocks,
    774 		 * mark the pages we zeroed PG_RDONLY.
    775 		 */
    776 
    777 		if (blkno < 0) {
    778 			int holepages = (round_page(offset + iobytes) -
    779 			    trunc_page(offset)) >> PAGE_SHIFT;
    780 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    781 
    782 			sawhole = TRUE;
    783 			memset((char *)kva + (offset - startoffset), 0,
    784 			    iobytes);
    785 			skipbytes += iobytes;
    786 
    787 			for (i = 0; i < holepages; i++) {
    788 				if (write) {
    789 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    790 				}
    791 				if (!blockalloc) {
    792 					pgs[pidx + i]->flags |= PG_RDONLY;
    793 				}
    794 			}
    795 			continue;
    796 		}
    797 
    798 		/*
    799 		 * allocate a sub-buf for this piece of the i/o
    800 		 * (or just use mbp if there's only 1 piece),
    801 		 * and start it going.
    802 		 */
    803 
    804 		if (offset == startoffset && iobytes == bytes) {
    805 			bp = mbp;
    806 		} else {
    807 			bp = getiobuf();
    808 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    809 		}
    810 		bp->b_lblkno = 0;
    811 
    812 		/* adjust physical blkno for partial blocks */
    813 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    814 		    dev_bshift);
    815 
    816 		UVMHIST_LOG(ubchist,
    817 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    818 		    bp, offset, iobytes, bp->b_blkno);
    819 
    820 		VOP_STRATEGY(devvp, bp);
    821 	}
    822 
    823 loopdone:
    824 	nestiobuf_done(mbp, skipbytes, error);
    825 	if (async) {
    826 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    827 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    828 		if (pgs != pgs_onstack)
    829 			free(pgs, M_DEVBUF);
    830 		return (0);
    831 	}
    832 	if (bp != NULL) {
    833 		error = biowait(mbp);
    834 	}
    835 	putiobuf(mbp);
    836 	uvm_pagermapout(kva, npages);
    837 	raoffset = startoffset + totalbytes;
    838 
    839 	/*
    840 	 * if this we encountered a hole then we have to do a little more work.
    841 	 * for read faults, we marked the page PG_RDONLY so that future
    842 	 * write accesses to the page will fault again.
    843 	 * for write faults, we must make sure that the backing store for
    844 	 * the page is completely allocated while the pages are locked.
    845 	 */
    846 
    847 	if (!error && sawhole && blockalloc) {
    848 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    849 		    cred);
    850 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    851 		    startoffset, npages << PAGE_SHIFT, error,0);
    852 		if (!error) {
    853 			for (i = 0; i < npages; i++) {
    854 				if (pgs[i] == NULL) {
    855 					continue;
    856 				}
    857 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    858 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    859 				    pgs[i],0,0,0);
    860 			}
    861 		}
    862 	}
    863 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    864 	simple_lock(&uobj->vmobjlock);
    865 
    866 	/*
    867 	 * we're almost done!  release the pages...
    868 	 * for errors, we free the pages.
    869 	 * otherwise we activate them and mark them as valid and clean.
    870 	 * also, unbusy pages that were not actually requested.
    871 	 */
    872 
    873 	if (error) {
    874 		for (i = 0; i < npages; i++) {
    875 			if (pgs[i] == NULL) {
    876 				continue;
    877 			}
    878 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    879 			    pgs[i], pgs[i]->flags, 0,0);
    880 			if (pgs[i]->flags & PG_FAKE) {
    881 				pgs[i]->flags |= PG_RELEASED;
    882 			}
    883 		}
    884 		uvm_lock_pageq();
    885 		uvm_page_unbusy(pgs, npages);
    886 		uvm_unlock_pageq();
    887 		simple_unlock(&uobj->vmobjlock);
    888 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    889 		if (pgs != pgs_onstack)
    890 			free(pgs, M_DEVBUF);
    891 		return (error);
    892 	}
    893 
    894 out:
    895 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    896 	uvm_lock_pageq();
    897 	for (i = 0; i < npages; i++) {
    898 		pg = pgs[i];
    899 		if (pg == NULL) {
    900 			continue;
    901 		}
    902 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    903 		    pg, pg->flags, 0,0);
    904 		if (pg->flags & PG_FAKE && !overwrite) {
    905 			pg->flags &= ~(PG_FAKE);
    906 			pmap_clear_modify(pgs[i]);
    907 		}
    908 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    909 		if (i < ridx || i >= ridx + orignpages || async) {
    910 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    911 			    pg, pg->offset,0,0);
    912 			if (pg->flags & PG_WANTED) {
    913 				wakeup(pg);
    914 			}
    915 			if (pg->flags & PG_FAKE) {
    916 				KASSERT(overwrite);
    917 				uvm_pagezero(pg);
    918 			}
    919 			if (pg->flags & PG_RELEASED) {
    920 				uvm_pagefree(pg);
    921 				continue;
    922 			}
    923 			uvm_pageactivate(pg);
    924 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    925 			UVM_PAGE_OWN(pg, NULL);
    926 		}
    927 	}
    928 	uvm_unlock_pageq();
    929 	simple_unlock(&uobj->vmobjlock);
    930 	if (ap->a_m != NULL) {
    931 		memcpy(ap->a_m, &pgs[ridx],
    932 		    orignpages * sizeof(struct vm_page *));
    933 	}
    934 	if (pgs != pgs_onstack)
    935 		free(pgs, M_DEVBUF);
    936 	return (0);
    937 }
    938 
    939 /*
    940  * generic VM putpages routine.
    941  * Write the given range of pages to backing store.
    942  *
    943  * => "offhi == 0" means flush all pages at or after "offlo".
    944  * => object should be locked by caller.   we may _unlock_ the object
    945  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
    946  *	if PGO_SYNCIO is set and there are pages busy.
    947  *	we return with the object locked.
    948  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    949  *	thus, a caller might want to unlock higher level resources
    950  *	(e.g. vm_map) before calling flush.
    951  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
    952  *	unlock the object nor block.
    953  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    954  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    955  *	that new pages are inserted on the tail end of the list.   thus,
    956  *	we can make a complete pass through the object in one go by starting
    957  *	at the head and working towards the tail (new pages are put in
    958  *	front of us).
    959  * => NOTE: we are allowed to lock the page queues, so the caller
    960  *	must not be holding the page queue lock.
    961  *
    962  * note on "cleaning" object and PG_BUSY pages:
    963  *	this routine is holding the lock on the object.   the only time
    964  *	that it can run into a PG_BUSY page that it does not own is if
    965  *	some other process has started I/O on the page (e.g. either
    966  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    967  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    968  *	had a chance to modify it yet.    if the PG_BUSY page is being
    969  *	paged out then it means that someone else has already started
    970  *	cleaning the page for us (how nice!).    in this case, if we
    971  *	have syncio specified, then after we make our pass through the
    972  *	object we need to wait for the other PG_BUSY pages to clear
    973  *	off (i.e. we need to do an iosync).   also note that once a
    974  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    975  *
    976  * note on page traversal:
    977  *	we can traverse the pages in an object either by going down the
    978  *	linked list in "uobj->memq", or we can go over the address range
    979  *	by page doing hash table lookups for each address.    depending
    980  *	on how many pages are in the object it may be cheaper to do one
    981  *	or the other.   we set "by_list" to true if we are using memq.
    982  *	if the cost of a hash lookup was equal to the cost of the list
    983  *	traversal we could compare the number of pages in the start->stop
    984  *	range to the total number of pages in the object.   however, it
    985  *	seems that a hash table lookup is more expensive than the linked
    986  *	list traversal, so we multiply the number of pages in the
    987  *	range by an estimate of the relatively higher cost of the hash lookup.
    988  */
    989 
    990 int
    991 genfs_putpages(void *v)
    992 {
    993 	struct vop_putpages_args /* {
    994 		struct vnode *a_vp;
    995 		voff_t a_offlo;
    996 		voff_t a_offhi;
    997 		int a_flags;
    998 	} */ *ap = v;
    999 	struct vnode *vp = ap->a_vp;
   1000 	struct uvm_object *uobj = &vp->v_uobj;
   1001 	struct simplelock *slock = &uobj->vmobjlock;
   1002 	off_t startoff = ap->a_offlo;
   1003 	off_t endoff = ap->a_offhi;
   1004 	off_t off;
   1005 	int flags = ap->a_flags;
   1006 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1007 	const int maxpages = MAXPHYS >> PAGE_SHIFT;
   1008 	int i, s, error, npages, nback;
   1009 	int freeflag;
   1010 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1011 	boolean_t wasclean, by_list, needs_clean, yld;
   1012 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1013 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1014 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1015 	struct genfs_node *gp = VTOG(vp);
   1016 	int dirtygen;
   1017 	boolean_t modified = FALSE;
   1018 	boolean_t cleanall;
   1019 
   1020 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1021 
   1022 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1023 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1024 	KASSERT(startoff < endoff || endoff == 0);
   1025 
   1026 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1027 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1028 
   1029 	KASSERT((vp->v_flag & VONWORKLST) != 0 ||
   1030 	    (vp->v_flag & VWRITEMAPDIRTY) == 0);
   1031 	if (uobj->uo_npages == 0) {
   1032 		s = splbio();
   1033 		if (vp->v_flag & VONWORKLST) {
   1034 			vp->v_flag &= ~VWRITEMAPDIRTY;
   1035 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1036 				vp->v_flag &= ~VONWORKLST;
   1037 				LIST_REMOVE(vp, v_synclist);
   1038 			}
   1039 		}
   1040 		splx(s);
   1041 		simple_unlock(slock);
   1042 		return (0);
   1043 	}
   1044 
   1045 	/*
   1046 	 * the vnode has pages, set up to process the request.
   1047 	 */
   1048 
   1049 	error = 0;
   1050 	s = splbio();
   1051 	simple_lock(&global_v_numoutput_slock);
   1052 	wasclean = (vp->v_numoutput == 0);
   1053 	simple_unlock(&global_v_numoutput_slock);
   1054 	splx(s);
   1055 	off = startoff;
   1056 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1057 		endoff = trunc_page(LLONG_MAX);
   1058 	}
   1059 	by_list = (uobj->uo_npages <=
   1060 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1061 
   1062 #if !defined(DEBUG)
   1063 	/*
   1064 	 * if this vnode is known not to have dirty pages,
   1065 	 * don't bother to clean it out.
   1066 	 */
   1067 
   1068 	if ((vp->v_flag & VONWORKLST) == 0) {
   1069 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1070 			goto skip_scan;
   1071 		}
   1072 		flags &= ~PGO_CLEANIT;
   1073 	}
   1074 #endif /* !defined(DEBUG) */
   1075 
   1076 	/*
   1077 	 * start the loop.  when scanning by list, hold the last page
   1078 	 * in the list before we start.  pages allocated after we start
   1079 	 * will be added to the end of the list, so we can stop at the
   1080 	 * current last page.
   1081 	 */
   1082 
   1083 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1084 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1085 	    (vp->v_flag & VONWORKLST) != 0;
   1086 	dirtygen = gp->g_dirtygen;
   1087 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1088 	if (by_list) {
   1089 		curmp.uobject = uobj;
   1090 		curmp.offset = (voff_t)-1;
   1091 		curmp.flags = PG_BUSY;
   1092 		endmp.uobject = uobj;
   1093 		endmp.offset = (voff_t)-1;
   1094 		endmp.flags = PG_BUSY;
   1095 		pg = TAILQ_FIRST(&uobj->memq);
   1096 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1097 		PHOLD(l);
   1098 	} else {
   1099 		pg = uvm_pagelookup(uobj, off);
   1100 	}
   1101 	nextpg = NULL;
   1102 	while (by_list || off < endoff) {
   1103 
   1104 		/*
   1105 		 * if the current page is not interesting, move on to the next.
   1106 		 */
   1107 
   1108 		KASSERT(pg == NULL || pg->uobject == uobj);
   1109 		KASSERT(pg == NULL ||
   1110 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1111 		    (pg->flags & PG_BUSY) != 0);
   1112 		if (by_list) {
   1113 			if (pg == &endmp) {
   1114 				break;
   1115 			}
   1116 			if (pg->offset < startoff || pg->offset >= endoff ||
   1117 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1118 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1119 					wasclean = FALSE;
   1120 				}
   1121 				pg = TAILQ_NEXT(pg, listq);
   1122 				continue;
   1123 			}
   1124 			off = pg->offset;
   1125 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1126 			if (pg != NULL) {
   1127 				wasclean = FALSE;
   1128 			}
   1129 			off += PAGE_SIZE;
   1130 			if (off < endoff) {
   1131 				pg = uvm_pagelookup(uobj, off);
   1132 			}
   1133 			continue;
   1134 		}
   1135 
   1136 		/*
   1137 		 * if the current page needs to be cleaned and it's busy,
   1138 		 * wait for it to become unbusy.
   1139 		 */
   1140 
   1141 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1142 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1143 		if (pg->flags & PG_BUSY || yld) {
   1144 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1145 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1146 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1147 				error = EDEADLK;
   1148 				break;
   1149 			}
   1150 			KASSERT(!pagedaemon);
   1151 			if (by_list) {
   1152 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1153 				UVMHIST_LOG(ubchist, "curmp next %p",
   1154 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1155 			}
   1156 			if (yld) {
   1157 				simple_unlock(slock);
   1158 				preempt(1);
   1159 				simple_lock(slock);
   1160 			} else {
   1161 				pg->flags |= PG_WANTED;
   1162 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1163 				simple_lock(slock);
   1164 			}
   1165 			if (by_list) {
   1166 				UVMHIST_LOG(ubchist, "after next %p",
   1167 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1168 				pg = TAILQ_NEXT(&curmp, listq);
   1169 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1170 			} else {
   1171 				pg = uvm_pagelookup(uobj, off);
   1172 			}
   1173 			continue;
   1174 		}
   1175 
   1176 		/*
   1177 		 * if we're freeing, remove all mappings of the page now.
   1178 		 * if we're cleaning, check if the page is needs to be cleaned.
   1179 		 */
   1180 
   1181 		if (flags & PGO_FREE) {
   1182 			pmap_page_protect(pg, VM_PROT_NONE);
   1183 		} else if (flags & PGO_CLEANIT) {
   1184 
   1185 			/*
   1186 			 * if we still have some hope to pull this vnode off
   1187 			 * from the syncer queue, write-protect the page.
   1188 			 */
   1189 
   1190 			if (cleanall && wasclean &&
   1191 			    gp->g_dirtygen == dirtygen) {
   1192 
   1193 				/*
   1194 				 * uobj pages get wired only by uvm_fault
   1195 				 * where uobj is locked.
   1196 				 */
   1197 
   1198 				if (pg->wire_count == 0) {
   1199 					pmap_page_protect(pg,
   1200 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1201 				} else {
   1202 					cleanall = FALSE;
   1203 				}
   1204 			}
   1205 		}
   1206 
   1207 		if (flags & PGO_CLEANIT) {
   1208 			needs_clean = pmap_clear_modify(pg) ||
   1209 			    (pg->flags & PG_CLEAN) == 0;
   1210 			pg->flags |= PG_CLEAN;
   1211 		} else {
   1212 			needs_clean = FALSE;
   1213 		}
   1214 
   1215 		/*
   1216 		 * if we're cleaning, build a cluster.
   1217 		 * the cluster will consist of pages which are currently dirty,
   1218 		 * but they will be returned to us marked clean.
   1219 		 * if not cleaning, just operate on the one page.
   1220 		 */
   1221 
   1222 		if (needs_clean) {
   1223 			KDASSERT((vp->v_flag & VONWORKLST));
   1224 			wasclean = FALSE;
   1225 			memset(pgs, 0, sizeof(pgs));
   1226 			pg->flags |= PG_BUSY;
   1227 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1228 
   1229 			/*
   1230 			 * first look backward.
   1231 			 */
   1232 
   1233 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1234 			nback = npages;
   1235 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1236 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1237 			if (nback) {
   1238 				memmove(&pgs[0], &pgs[npages - nback],
   1239 				    nback * sizeof(pgs[0]));
   1240 				if (npages - nback < nback)
   1241 					memset(&pgs[nback], 0,
   1242 					    (npages - nback) * sizeof(pgs[0]));
   1243 				else
   1244 					memset(&pgs[npages - nback], 0,
   1245 					    nback * sizeof(pgs[0]));
   1246 			}
   1247 
   1248 			/*
   1249 			 * then plug in our page of interest.
   1250 			 */
   1251 
   1252 			pgs[nback] = pg;
   1253 
   1254 			/*
   1255 			 * then look forward to fill in the remaining space in
   1256 			 * the array of pages.
   1257 			 */
   1258 
   1259 			npages = maxpages - nback - 1;
   1260 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1261 			    &pgs[nback + 1],
   1262 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1263 			npages += nback + 1;
   1264 		} else {
   1265 			pgs[0] = pg;
   1266 			npages = 1;
   1267 			nback = 0;
   1268 		}
   1269 
   1270 		/*
   1271 		 * apply FREE or DEACTIVATE options if requested.
   1272 		 */
   1273 
   1274 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1275 			uvm_lock_pageq();
   1276 		}
   1277 		for (i = 0; i < npages; i++) {
   1278 			tpg = pgs[i];
   1279 			KASSERT(tpg->uobject == uobj);
   1280 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1281 				pg = tpg;
   1282 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1283 				continue;
   1284 			if (flags & PGO_DEACTIVATE &&
   1285 			    (tpg->pqflags & PQ_INACTIVE) == 0 &&
   1286 			    tpg->wire_count == 0) {
   1287 				(void) pmap_clear_reference(tpg);
   1288 				uvm_pagedeactivate(tpg);
   1289 			} else if (flags & PGO_FREE) {
   1290 				pmap_page_protect(tpg, VM_PROT_NONE);
   1291 				if (tpg->flags & PG_BUSY) {
   1292 					tpg->flags |= freeflag;
   1293 					if (pagedaemon) {
   1294 						uvmexp.paging++;
   1295 						uvm_pagedequeue(tpg);
   1296 					}
   1297 				} else {
   1298 
   1299 					/*
   1300 					 * ``page is not busy''
   1301 					 * implies that npages is 1
   1302 					 * and needs_clean is false.
   1303 					 */
   1304 
   1305 					nextpg = TAILQ_NEXT(tpg, listq);
   1306 					uvm_pagefree(tpg);
   1307 					if (pagedaemon)
   1308 						uvmexp.pdfreed++;
   1309 				}
   1310 			}
   1311 		}
   1312 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1313 			uvm_unlock_pageq();
   1314 		}
   1315 		if (needs_clean) {
   1316 			modified = TRUE;
   1317 
   1318 			/*
   1319 			 * start the i/o.  if we're traversing by list,
   1320 			 * keep our place in the list with a marker page.
   1321 			 */
   1322 
   1323 			if (by_list) {
   1324 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1325 				    listq);
   1326 			}
   1327 			simple_unlock(slock);
   1328 			error = GOP_WRITE(vp, pgs, npages, flags);
   1329 			simple_lock(slock);
   1330 			if (by_list) {
   1331 				pg = TAILQ_NEXT(&curmp, listq);
   1332 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1333 			}
   1334 			if (error) {
   1335 				break;
   1336 			}
   1337 			if (by_list) {
   1338 				continue;
   1339 			}
   1340 		}
   1341 
   1342 		/*
   1343 		 * find the next page and continue if there was no error.
   1344 		 */
   1345 
   1346 		if (by_list) {
   1347 			if (nextpg) {
   1348 				pg = nextpg;
   1349 				nextpg = NULL;
   1350 			} else {
   1351 				pg = TAILQ_NEXT(pg, listq);
   1352 			}
   1353 		} else {
   1354 			off += (npages - nback) << PAGE_SHIFT;
   1355 			if (off < endoff) {
   1356 				pg = uvm_pagelookup(uobj, off);
   1357 			}
   1358 		}
   1359 	}
   1360 	if (by_list) {
   1361 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1362 		PRELE(l);
   1363 	}
   1364 
   1365 	if (modified && (vp->v_flag & VWRITEMAPDIRTY) != 0 &&
   1366 	    (vp->v_type != VBLK ||
   1367 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1368 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1369 	}
   1370 
   1371 	/*
   1372 	 * if we're cleaning and there was nothing to clean,
   1373 	 * take us off the syncer list.  if we started any i/o
   1374 	 * and we're doing sync i/o, wait for all writes to finish.
   1375 	 */
   1376 
   1377 	s = splbio();
   1378 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1379 	    (vp->v_flag & VONWORKLST) != 0) {
   1380 		vp->v_flag &= ~VWRITEMAPDIRTY;
   1381 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1382 			vp->v_flag &= ~VONWORKLST;
   1383 			LIST_REMOVE(vp, v_synclist);
   1384 		}
   1385 	}
   1386 	splx(s);
   1387 
   1388 #if !defined(DEBUG)
   1389 skip_scan:
   1390 #endif /* !defined(DEBUG) */
   1391 	if (!wasclean && !async) {
   1392 		s = splbio();
   1393 		/*
   1394 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1395 		 *	 but the slot in ltsleep() is taken!
   1396 		 * XXX - try to recover from missed wakeups with a timeout..
   1397 		 *	 must think of something better.
   1398 		 */
   1399 		while (vp->v_numoutput != 0) {
   1400 			vp->v_flag |= VBWAIT;
   1401 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1402 			    "genput2", hz);
   1403 			simple_lock(slock);
   1404 		}
   1405 		splx(s);
   1406 	}
   1407 	simple_unlock(&uobj->vmobjlock);
   1408 	return (error);
   1409 }
   1410 
   1411 int
   1412 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1413 {
   1414 	int s, error, run;
   1415 	int fs_bshift, dev_bshift;
   1416 	vaddr_t kva;
   1417 	off_t eof, offset, startoffset;
   1418 	size_t bytes, iobytes, skipbytes;
   1419 	daddr_t lbn, blkno;
   1420 	struct vm_page *pg;
   1421 	struct buf *mbp, *bp;
   1422 	struct vnode *devvp;
   1423 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1424 	UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
   1425 
   1426 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1427 	    vp, pgs, npages, flags);
   1428 
   1429 	GOP_SIZE(vp, vp->v_size, &eof, 0);
   1430 	if (vp->v_type != VBLK) {
   1431 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1432 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1433 	} else {
   1434 		fs_bshift = DEV_BSHIFT;
   1435 		dev_bshift = DEV_BSHIFT;
   1436 	}
   1437 	error = 0;
   1438 	pg = pgs[0];
   1439 	startoffset = pg->offset;
   1440 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
   1441 	skipbytes = 0;
   1442 	KASSERT(bytes != 0);
   1443 
   1444 	kva = uvm_pagermapin(pgs, npages,
   1445 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1446 
   1447 	s = splbio();
   1448 	simple_lock(&global_v_numoutput_slock);
   1449 	vp->v_numoutput += 2;
   1450 	simple_unlock(&global_v_numoutput_slock);
   1451 	splx(s);
   1452 	mbp = getiobuf();
   1453 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1454 	    vp, mbp, vp->v_numoutput, bytes);
   1455 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1456 	mbp->b_data = (void *)kva;
   1457 	mbp->b_resid = mbp->b_bcount = bytes;
   1458 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
   1459 	mbp->b_iodone = uvm_aio_biodone;
   1460 	mbp->b_vp = vp;
   1461 	if (curproc == uvm.pagedaemon_proc)
   1462 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1463 	else if (async)
   1464 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1465 	else
   1466 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1467 
   1468 	bp = NULL;
   1469 	for (offset = startoffset;
   1470 	    bytes > 0;
   1471 	    offset += iobytes, bytes -= iobytes) {
   1472 		lbn = offset >> fs_bshift;
   1473 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1474 		if (error) {
   1475 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1476 			skipbytes += bytes;
   1477 			bytes = 0;
   1478 			break;
   1479 		}
   1480 
   1481 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1482 		    bytes);
   1483 		if (blkno == (daddr_t)-1) {
   1484 			skipbytes += iobytes;
   1485 			continue;
   1486 		}
   1487 
   1488 		/* if it's really one i/o, don't make a second buf */
   1489 		if (offset == startoffset && iobytes == bytes) {
   1490 			bp = mbp;
   1491 		} else {
   1492 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1493 			    vp, bp, vp->v_numoutput, 0);
   1494 			bp = getiobuf();
   1495 			nestiobuf_setup(mbp, bp, offset - pg->offset, iobytes);
   1496 		}
   1497 		bp->b_lblkno = 0;
   1498 
   1499 		/* adjust physical blkno for partial blocks */
   1500 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1501 		    dev_bshift);
   1502 		UVMHIST_LOG(ubchist,
   1503 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1504 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1505 
   1506 		VOP_STRATEGY(devvp, bp);
   1507 	}
   1508 	if (skipbytes) {
   1509 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1510 	}
   1511 	nestiobuf_done(mbp, skipbytes, error);
   1512 	if (async) {
   1513 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1514 		return (0);
   1515 	}
   1516 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1517 	error = biowait(mbp);
   1518 	uvm_aio_aiodone(mbp);
   1519 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1520 	return (error);
   1521 }
   1522 
   1523 /*
   1524  * VOP_PUTPAGES() for vnodes which never have pages.
   1525  */
   1526 
   1527 int
   1528 genfs_null_putpages(void *v)
   1529 {
   1530 	struct vop_putpages_args /* {
   1531 		struct vnode *a_vp;
   1532 		voff_t a_offlo;
   1533 		voff_t a_offhi;
   1534 		int a_flags;
   1535 	} */ *ap = v;
   1536 	struct vnode *vp = ap->a_vp;
   1537 
   1538 	KASSERT(vp->v_uobj.uo_npages == 0);
   1539 	simple_unlock(&vp->v_interlock);
   1540 	return (0);
   1541 }
   1542 
   1543 void
   1544 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1545 {
   1546 	struct genfs_node *gp = VTOG(vp);
   1547 
   1548 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1549 	gp->g_op = ops;
   1550 }
   1551 
   1552 void
   1553 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1554 {
   1555 	int bsize;
   1556 
   1557 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1558 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1559 }
   1560 
   1561 int
   1562 genfs_compat_getpages(void *v)
   1563 {
   1564 	struct vop_getpages_args /* {
   1565 		struct vnode *a_vp;
   1566 		voff_t a_offset;
   1567 		struct vm_page **a_m;
   1568 		int *a_count;
   1569 		int a_centeridx;
   1570 		vm_prot_t a_access_type;
   1571 		int a_advice;
   1572 		int a_flags;
   1573 	} */ *ap = v;
   1574 
   1575 	off_t origoffset;
   1576 	struct vnode *vp = ap->a_vp;
   1577 	struct uvm_object *uobj = &vp->v_uobj;
   1578 	struct vm_page *pg, **pgs;
   1579 	vaddr_t kva;
   1580 	int i, error, orignpages, npages;
   1581 	struct iovec iov;
   1582 	struct uio uio;
   1583 	struct ucred *cred = curproc->p_ucred;
   1584 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1585 
   1586 	error = 0;
   1587 	origoffset = ap->a_offset;
   1588 	orignpages = *ap->a_count;
   1589 	pgs = ap->a_m;
   1590 
   1591 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1592 		vn_syncer_add_to_worklist(vp, filedelay);
   1593 	}
   1594 	if (ap->a_flags & PGO_LOCKED) {
   1595 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1596 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1597 
   1598 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1599 	}
   1600 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1601 		simple_unlock(&uobj->vmobjlock);
   1602 		return (EINVAL);
   1603 	}
   1604 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1605 		simple_unlock(&uobj->vmobjlock);
   1606 		return 0;
   1607 	}
   1608 	npages = orignpages;
   1609 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1610 	simple_unlock(&uobj->vmobjlock);
   1611 	kva = uvm_pagermapin(pgs, npages,
   1612 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1613 	for (i = 0; i < npages; i++) {
   1614 		pg = pgs[i];
   1615 		if ((pg->flags & PG_FAKE) == 0) {
   1616 			continue;
   1617 		}
   1618 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1619 		iov.iov_len = PAGE_SIZE;
   1620 		uio.uio_iov = &iov;
   1621 		uio.uio_iovcnt = 1;
   1622 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1623 		uio.uio_rw = UIO_READ;
   1624 		uio.uio_resid = PAGE_SIZE;
   1625 		UIO_SETUP_SYSSPACE(&uio);
   1626 		/* XXX vn_lock */
   1627 		error = VOP_READ(vp, &uio, 0, cred);
   1628 		if (error) {
   1629 			break;
   1630 		}
   1631 		if (uio.uio_resid) {
   1632 			memset(iov.iov_base, 0, uio.uio_resid);
   1633 		}
   1634 	}
   1635 	uvm_pagermapout(kva, npages);
   1636 	simple_lock(&uobj->vmobjlock);
   1637 	uvm_lock_pageq();
   1638 	for (i = 0; i < npages; i++) {
   1639 		pg = pgs[i];
   1640 		if (error && (pg->flags & PG_FAKE) != 0) {
   1641 			pg->flags |= PG_RELEASED;
   1642 		} else {
   1643 			pmap_clear_modify(pg);
   1644 			uvm_pageactivate(pg);
   1645 		}
   1646 	}
   1647 	if (error) {
   1648 		uvm_page_unbusy(pgs, npages);
   1649 	}
   1650 	uvm_unlock_pageq();
   1651 	simple_unlock(&uobj->vmobjlock);
   1652 	return (error);
   1653 }
   1654 
   1655 int
   1656 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1657     int flags)
   1658 {
   1659 	off_t offset;
   1660 	struct iovec iov;
   1661 	struct uio uio;
   1662 	struct ucred *cred = curproc->p_ucred;
   1663 	struct buf *bp;
   1664 	vaddr_t kva;
   1665 	int s, error;
   1666 
   1667 	offset = pgs[0]->offset;
   1668 	kva = uvm_pagermapin(pgs, npages,
   1669 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1670 
   1671 	iov.iov_base = (void *)kva;
   1672 	iov.iov_len = npages << PAGE_SHIFT;
   1673 	uio.uio_iov = &iov;
   1674 	uio.uio_iovcnt = 1;
   1675 	uio.uio_offset = offset;
   1676 	uio.uio_rw = UIO_WRITE;
   1677 	uio.uio_resid = npages << PAGE_SHIFT;
   1678 	UIO_SETUP_SYSSPACE(&uio);
   1679 	/* XXX vn_lock */
   1680 	error = VOP_WRITE(vp, &uio, 0, cred);
   1681 
   1682 	s = splbio();
   1683 	V_INCR_NUMOUTPUT(vp);
   1684 	splx(s);
   1685 
   1686 	bp = getiobuf();
   1687 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1688 	bp->b_vp = vp;
   1689 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1690 	bp->b_data = (char *)kva;
   1691 	bp->b_bcount = npages << PAGE_SHIFT;
   1692 	bp->b_bufsize = npages << PAGE_SHIFT;
   1693 	bp->b_resid = 0;
   1694 	if (error) {
   1695 		bp->b_flags |= B_ERROR;
   1696 		bp->b_error = error;
   1697 	}
   1698 	uvm_aio_aiodone(bp);
   1699 	return (error);
   1700 }
   1701 
   1702 static void
   1703 filt_genfsdetach(struct knote *kn)
   1704 {
   1705 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1706 
   1707 	/* XXXLUKEM lock the struct? */
   1708 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   1709 }
   1710 
   1711 static int
   1712 filt_genfsread(struct knote *kn, long hint)
   1713 {
   1714 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1715 
   1716 	/*
   1717 	 * filesystem is gone, so set the EOF flag and schedule
   1718 	 * the knote for deletion.
   1719 	 */
   1720 	if (hint == NOTE_REVOKE) {
   1721 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   1722 		return (1);
   1723 	}
   1724 
   1725 	/* XXXLUKEM lock the struct? */
   1726 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   1727         return (kn->kn_data != 0);
   1728 }
   1729 
   1730 static int
   1731 filt_genfsvnode(struct knote *kn, long hint)
   1732 {
   1733 
   1734 	if (kn->kn_sfflags & hint)
   1735 		kn->kn_fflags |= hint;
   1736 	if (hint == NOTE_REVOKE) {
   1737 		kn->kn_flags |= EV_EOF;
   1738 		return (1);
   1739 	}
   1740 	return (kn->kn_fflags != 0);
   1741 }
   1742 
   1743 static const struct filterops genfsread_filtops =
   1744 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   1745 static const struct filterops genfsvnode_filtops =
   1746 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   1747 
   1748 int
   1749 genfs_kqfilter(void *v)
   1750 {
   1751 	struct vop_kqfilter_args /* {
   1752 		struct vnode	*a_vp;
   1753 		struct knote	*a_kn;
   1754 	} */ *ap = v;
   1755 	struct vnode *vp;
   1756 	struct knote *kn;
   1757 
   1758 	vp = ap->a_vp;
   1759 	kn = ap->a_kn;
   1760 	switch (kn->kn_filter) {
   1761 	case EVFILT_READ:
   1762 		kn->kn_fop = &genfsread_filtops;
   1763 		break;
   1764 	case EVFILT_VNODE:
   1765 		kn->kn_fop = &genfsvnode_filtops;
   1766 		break;
   1767 	default:
   1768 		return (1);
   1769 	}
   1770 
   1771 	kn->kn_hook = vp;
   1772 
   1773 	/* XXXLUKEM lock the struct? */
   1774 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   1775 
   1776 	return (0);
   1777 }
   1778