Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.82
      1 /*	$NetBSD: genfs_vnops.c,v 1.82 2003/09/24 10:22:53 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.82 2003/09/24 10:22:53 yamt Exp $");
     35 
     36 #include "opt_nfsserver.h"
     37 
     38 #include <sys/param.h>
     39 #include <sys/systm.h>
     40 #include <sys/proc.h>
     41 #include <sys/kernel.h>
     42 #include <sys/mount.h>
     43 #include <sys/namei.h>
     44 #include <sys/vnode.h>
     45 #include <sys/fcntl.h>
     46 #include <sys/malloc.h>
     47 #include <sys/poll.h>
     48 #include <sys/mman.h>
     49 #include <sys/file.h>
     50 
     51 #include <miscfs/genfs/genfs.h>
     52 #include <miscfs/genfs/genfs_node.h>
     53 #include <miscfs/specfs/specdev.h>
     54 
     55 #include <uvm/uvm.h>
     56 #include <uvm/uvm_pager.h>
     57 
     58 #ifdef NFSSERVER
     59 #include <nfs/rpcv2.h>
     60 #include <nfs/nfsproto.h>
     61 #include <nfs/nfs.h>
     62 #include <nfs/nqnfs.h>
     63 #include <nfs/nfs_var.h>
     64 #endif
     65 
     66 static __inline void genfs_rel_pages(struct vm_page **, int);
     67 static void filt_genfsdetach(struct knote *);
     68 static int filt_genfsread(struct knote *, long);
     69 static int filt_genfsvnode(struct knote *, long);
     70 
     71 
     72 #define MAX_READ_AHEAD	16 	/* XXXUBC 16 */
     73 int genfs_rapages = MAX_READ_AHEAD; /* # of pages in each chunk of readahead */
     74 int genfs_racount = 2;		/* # of page chunks to readahead */
     75 int genfs_raskip = 2;		/* # of busy page chunks allowed to skip */
     76 
     77 int
     78 genfs_poll(void *v)
     79 {
     80 	struct vop_poll_args /* {
     81 		struct vnode *a_vp;
     82 		int a_events;
     83 		struct proc *a_p;
     84 	} */ *ap = v;
     85 
     86 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     87 }
     88 
     89 int
     90 genfs_fsync(void *v)
     91 {
     92 	struct vop_fsync_args /* {
     93 		struct vnode *a_vp;
     94 		struct ucred *a_cred;
     95 		int a_flags;
     96 		off_t offlo;
     97 		off_t offhi;
     98 		struct proc *a_p;
     99 	} */ *ap = v;
    100 	struct vnode *vp = ap->a_vp;
    101 	int wait;
    102 
    103 	wait = (ap->a_flags & FSYNC_WAIT) != 0;
    104 	vflushbuf(vp, wait);
    105 	if ((ap->a_flags & FSYNC_DATAONLY) != 0)
    106 		return (0);
    107 	else
    108 		return (VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0));
    109 }
    110 
    111 int
    112 genfs_seek(void *v)
    113 {
    114 	struct vop_seek_args /* {
    115 		struct vnode *a_vp;
    116 		off_t a_oldoff;
    117 		off_t a_newoff;
    118 		struct ucred *a_ucred;
    119 	} */ *ap = v;
    120 
    121 	if (ap->a_newoff < 0)
    122 		return (EINVAL);
    123 
    124 	return (0);
    125 }
    126 
    127 int
    128 genfs_abortop(void *v)
    129 {
    130 	struct vop_abortop_args /* {
    131 		struct vnode *a_dvp;
    132 		struct componentname *a_cnp;
    133 	} */ *ap = v;
    134 
    135 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    136 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    137 	return (0);
    138 }
    139 
    140 int
    141 genfs_fcntl(void *v)
    142 {
    143 	struct vop_fcntl_args /* {
    144 		struct vnode *a_vp;
    145 		u_int a_command;
    146 		caddr_t a_data;
    147 		int a_fflag;
    148 		struct ucred *a_cred;
    149 		struct proc *a_p;
    150 	} */ *ap = v;
    151 
    152 	if (ap->a_command == F_SETFL)
    153 		return (0);
    154 	else
    155 		return (EOPNOTSUPP);
    156 }
    157 
    158 /*ARGSUSED*/
    159 int
    160 genfs_badop(void *v)
    161 {
    162 
    163 	panic("genfs: bad op");
    164 }
    165 
    166 /*ARGSUSED*/
    167 int
    168 genfs_nullop(void *v)
    169 {
    170 
    171 	return (0);
    172 }
    173 
    174 /*ARGSUSED*/
    175 int
    176 genfs_einval(void *v)
    177 {
    178 
    179 	return (EINVAL);
    180 }
    181 
    182 /*
    183  * Called when an fs doesn't support a particular vop.
    184  * This takes care to vrele, vput, or vunlock passed in vnodes.
    185  */
    186 int
    187 genfs_eopnotsupp(void *v)
    188 {
    189 	struct vop_generic_args /*
    190 		struct vnodeop_desc *a_desc;
    191 		/ * other random data follows, presumably * /
    192 	} */ *ap = v;
    193 	struct vnodeop_desc *desc = ap->a_desc;
    194 	struct vnode *vp, *vp_last = NULL;
    195 	int flags, i, j, offset;
    196 
    197 	flags = desc->vdesc_flags;
    198 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    199 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    200 			break;	/* stop at end of list */
    201 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    202 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    203 
    204 			/* Skip if NULL */
    205 			if (!vp)
    206 				continue;
    207 
    208 			switch (j) {
    209 			case VDESC_VP0_WILLPUT:
    210 				/* Check for dvp == vp cases */
    211 				if (vp == vp_last)
    212 					vrele(vp);
    213 				else {
    214 					vput(vp);
    215 					vp_last = vp;
    216 				}
    217 				break;
    218 			case VDESC_VP0_WILLUNLOCK:
    219 				VOP_UNLOCK(vp, 0);
    220 				break;
    221 			case VDESC_VP0_WILLRELE:
    222 				vrele(vp);
    223 				break;
    224 			}
    225 		}
    226 	}
    227 
    228 	return (EOPNOTSUPP);
    229 }
    230 
    231 /*ARGSUSED*/
    232 int
    233 genfs_ebadf(void *v)
    234 {
    235 
    236 	return (EBADF);
    237 }
    238 
    239 /* ARGSUSED */
    240 int
    241 genfs_enoioctl(void *v)
    242 {
    243 
    244 	return (EPASSTHROUGH);
    245 }
    246 
    247 
    248 /*
    249  * Eliminate all activity associated with the requested vnode
    250  * and with all vnodes aliased to the requested vnode.
    251  */
    252 int
    253 genfs_revoke(void *v)
    254 {
    255 	struct vop_revoke_args /* {
    256 		struct vnode *a_vp;
    257 		int a_flags;
    258 	} */ *ap = v;
    259 	struct vnode *vp, *vq;
    260 	struct proc *p = curproc;	/* XXX */
    261 
    262 #ifdef DIAGNOSTIC
    263 	if ((ap->a_flags & REVOKEALL) == 0)
    264 		panic("genfs_revoke: not revokeall");
    265 #endif
    266 
    267 	vp = ap->a_vp;
    268 	simple_lock(&vp->v_interlock);
    269 
    270 	if (vp->v_flag & VALIASED) {
    271 		/*
    272 		 * If a vgone (or vclean) is already in progress,
    273 		 * wait until it is done and return.
    274 		 */
    275 		if (vp->v_flag & VXLOCK) {
    276 			vp->v_flag |= VXWANT;
    277 			simple_unlock(&vp->v_interlock);
    278 			tsleep((caddr_t)vp, PINOD, "vop_revokeall", 0);
    279 			return (0);
    280 		}
    281 		/*
    282 		 * Ensure that vp will not be vgone'd while we
    283 		 * are eliminating its aliases.
    284 		 */
    285 		vp->v_flag |= VXLOCK;
    286 		simple_unlock(&vp->v_interlock);
    287 		while (vp->v_flag & VALIASED) {
    288 			simple_lock(&spechash_slock);
    289 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    290 				if (vq->v_rdev != vp->v_rdev ||
    291 				    vq->v_type != vp->v_type || vp == vq)
    292 					continue;
    293 				simple_unlock(&spechash_slock);
    294 				vgone(vq);
    295 				break;
    296 			}
    297 			if (vq == NULLVP)
    298 				simple_unlock(&spechash_slock);
    299 		}
    300 		/*
    301 		 * Remove the lock so that vgone below will
    302 		 * really eliminate the vnode after which time
    303 		 * vgone will awaken any sleepers.
    304 		 */
    305 		simple_lock(&vp->v_interlock);
    306 		vp->v_flag &= ~VXLOCK;
    307 	}
    308 	vgonel(vp, p);
    309 	return (0);
    310 }
    311 
    312 /*
    313  * Lock the node.
    314  */
    315 int
    316 genfs_lock(void *v)
    317 {
    318 	struct vop_lock_args /* {
    319 		struct vnode *a_vp;
    320 		int a_flags;
    321 	} */ *ap = v;
    322 	struct vnode *vp = ap->a_vp;
    323 
    324 	return (lockmgr(&vp->v_lock, ap->a_flags, &vp->v_interlock));
    325 }
    326 
    327 /*
    328  * Unlock the node.
    329  */
    330 int
    331 genfs_unlock(void *v)
    332 {
    333 	struct vop_unlock_args /* {
    334 		struct vnode *a_vp;
    335 		int a_flags;
    336 	} */ *ap = v;
    337 	struct vnode *vp = ap->a_vp;
    338 
    339 	return (lockmgr(&vp->v_lock, ap->a_flags | LK_RELEASE,
    340 	    &vp->v_interlock));
    341 }
    342 
    343 /*
    344  * Return whether or not the node is locked.
    345  */
    346 int
    347 genfs_islocked(void *v)
    348 {
    349 	struct vop_islocked_args /* {
    350 		struct vnode *a_vp;
    351 	} */ *ap = v;
    352 	struct vnode *vp = ap->a_vp;
    353 
    354 	return (lockstatus(&vp->v_lock));
    355 }
    356 
    357 /*
    358  * Stubs to use when there is no locking to be done on the underlying object.
    359  */
    360 int
    361 genfs_nolock(void *v)
    362 {
    363 	struct vop_lock_args /* {
    364 		struct vnode *a_vp;
    365 		int a_flags;
    366 		struct proc *a_p;
    367 	} */ *ap = v;
    368 
    369 	/*
    370 	 * Since we are not using the lock manager, we must clear
    371 	 * the interlock here.
    372 	 */
    373 	if (ap->a_flags & LK_INTERLOCK)
    374 		simple_unlock(&ap->a_vp->v_interlock);
    375 	return (0);
    376 }
    377 
    378 int
    379 genfs_nounlock(void *v)
    380 {
    381 
    382 	return (0);
    383 }
    384 
    385 int
    386 genfs_noislocked(void *v)
    387 {
    388 
    389 	return (0);
    390 }
    391 
    392 /*
    393  * Local lease check for NFS servers.  Just set up args and let
    394  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    395  * this is a null operation.
    396  */
    397 int
    398 genfs_lease_check(void *v)
    399 {
    400 #ifdef NFSSERVER
    401 	struct vop_lease_args /* {
    402 		struct vnode *a_vp;
    403 		struct proc *a_p;
    404 		struct ucred *a_cred;
    405 		int a_flag;
    406 	} */ *ap = v;
    407 	u_int32_t duration = 0;
    408 	int cache;
    409 	u_quad_t frev;
    410 
    411 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    412 	    NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    413 	return (0);
    414 #else
    415 	return (0);
    416 #endif /* NFSSERVER */
    417 }
    418 
    419 int
    420 genfs_mmap(void *v)
    421 {
    422 
    423 	return (0);
    424 }
    425 
    426 static __inline void
    427 genfs_rel_pages(struct vm_page **pgs, int npages)
    428 {
    429 	int i;
    430 
    431 	for (i = 0; i < npages; i++) {
    432 		struct vm_page *pg = pgs[i];
    433 
    434 		if (pg == NULL)
    435 			continue;
    436 		if (pg->flags & PG_FAKE) {
    437 			pg->flags |= PG_RELEASED;
    438 		}
    439 	}
    440 	uvm_lock_pageq();
    441 	uvm_page_unbusy(pgs, npages);
    442 	uvm_unlock_pageq();
    443 }
    444 
    445 /*
    446  * generic VM getpages routine.
    447  * Return PG_BUSY pages for the given range,
    448  * reading from backing store if necessary.
    449  */
    450 
    451 int
    452 genfs_getpages(void *v)
    453 {
    454 	struct vop_getpages_args /* {
    455 		struct vnode *a_vp;
    456 		voff_t a_offset;
    457 		struct vm_page **a_m;
    458 		int *a_count;
    459 		int a_centeridx;
    460 		vm_prot_t a_access_type;
    461 		int a_advice;
    462 		int a_flags;
    463 	} */ *ap = v;
    464 
    465 	off_t newsize, diskeof, memeof;
    466 	off_t offset, origoffset, startoffset, endoffset, raoffset;
    467 	daddr_t lbn, blkno;
    468 	int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    469 	int fs_bshift, fs_bsize, dev_bshift;
    470 	int flags = ap->a_flags;
    471 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    472 	vaddr_t kva;
    473 	struct buf *bp, *mbp;
    474 	struct vnode *vp = ap->a_vp;
    475 	struct vnode *devvp;
    476 	struct genfs_node *gp = VTOG(vp);
    477 	struct uvm_object *uobj = &vp->v_uobj;
    478 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_AHEAD];
    479 	int pgs_size;
    480 	struct ucred *cred = curproc->p_ucred;		/* XXXUBC curlwp */
    481 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    482 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    483 	boolean_t sawhole = FALSE;
    484 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    485 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    486 
    487 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    488 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    489 
    490 	/* XXXUBC temp limit */
    491 	if (*ap->a_count > MAX_READ_AHEAD) {
    492 		panic("genfs_getpages: too many pages");
    493 	}
    494 
    495 	error = 0;
    496 	origoffset = ap->a_offset;
    497 	orignpages = *ap->a_count;
    498 	GOP_SIZE(vp, vp->v_size, &diskeof, GOP_SIZE_READ);
    499 	if (flags & PGO_PASTEOF) {
    500 		newsize = MAX(vp->v_size,
    501 		    origoffset + (orignpages << PAGE_SHIFT));
    502 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_READ|GOP_SIZE_MEM);
    503 	} else {
    504 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_READ|GOP_SIZE_MEM);
    505 	}
    506 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    507 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    508 	KASSERT(orignpages > 0);
    509 
    510 	/*
    511 	 * Bounds-check the request.
    512 	 */
    513 
    514 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    515 		if ((flags & PGO_LOCKED) == 0) {
    516 			simple_unlock(&uobj->vmobjlock);
    517 		}
    518 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    519 		    origoffset, *ap->a_count, memeof,0);
    520 		return (EINVAL);
    521 	}
    522 
    523 	/*
    524 	 * For PGO_LOCKED requests, just return whatever's in memory.
    525 	 */
    526 
    527 	if (flags & PGO_LOCKED) {
    528 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
    529 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
    530 
    531 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    532 	}
    533 
    534 	/* vnode is VOP_LOCKed, uobj is locked */
    535 
    536 	if (write && (vp->v_flag & VONWORKLST) == 0) {
    537 		vn_syncer_add_to_worklist(vp, filedelay);
    538 	}
    539 
    540 	/*
    541 	 * find the requested pages and make some simple checks.
    542 	 * leave space in the page array for a whole block.
    543 	 */
    544 
    545 	if (vp->v_type == VREG) {
    546 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    547 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    548 	} else {
    549 		fs_bshift = DEV_BSHIFT;
    550 		dev_bshift = DEV_BSHIFT;
    551 	}
    552 	fs_bsize = 1 << fs_bshift;
    553 
    554 	orignpages = MIN(orignpages,
    555 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    556 	npages = orignpages;
    557 	startoffset = origoffset & ~(fs_bsize - 1);
    558 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    559 	    fs_bsize - 1) & ~(fs_bsize - 1));
    560 	endoffset = MIN(endoffset, round_page(memeof));
    561 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    562 
    563 	pgs_size = sizeof(struct vm_page *) *
    564 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    565 	if (pgs_size > sizeof(pgs_onstack)) {
    566 		pgs = malloc(pgs_size, M_DEVBUF, M_NOWAIT | M_ZERO);
    567 		if (pgs == NULL) {
    568 			simple_unlock(&uobj->vmobjlock);
    569 			return (ENOMEM);
    570 		}
    571 	} else {
    572 		pgs = pgs_onstack;
    573 		memset(pgs, 0, pgs_size);
    574 	}
    575 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    576 	    ridx, npages, startoffset, endoffset);
    577 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    578 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    579 		KASSERT(async != 0);
    580 		genfs_rel_pages(&pgs[ridx], orignpages);
    581 		simple_unlock(&uobj->vmobjlock);
    582 		if (pgs != pgs_onstack)
    583 			free(pgs, M_DEVBUF);
    584 		return (EBUSY);
    585 	}
    586 
    587 	/*
    588 	 * if the pages are already resident, just return them.
    589 	 */
    590 
    591 	for (i = 0; i < npages; i++) {
    592 		struct vm_page *pg = pgs[ridx + i];
    593 
    594 		if ((pg->flags & PG_FAKE) ||
    595 		    (write && (pg->flags & PG_RDONLY))) {
    596 			break;
    597 		}
    598 	}
    599 	if (i == npages) {
    600 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    601 		raoffset = origoffset + (orignpages << PAGE_SHIFT);
    602 		npages += ridx;
    603 		goto raout;
    604 	}
    605 
    606 	/*
    607 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    608 	 */
    609 
    610 	if (flags & PGO_OVERWRITE) {
    611 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    612 
    613 		for (i = 0; i < npages; i++) {
    614 			struct vm_page *pg = pgs[ridx + i];
    615 
    616 			pg->flags &= ~(PG_RDONLY|PG_CLEAN);
    617 		}
    618 		npages += ridx;
    619 		goto out;
    620 	}
    621 
    622 	/*
    623 	 * the page wasn't resident and we're not overwriting,
    624 	 * so we're going to have to do some i/o.
    625 	 * find any additional pages needed to cover the expanded range.
    626 	 */
    627 
    628 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    629 	if (startoffset != origoffset || npages != orignpages) {
    630 
    631 		/*
    632 		 * we need to avoid deadlocks caused by locking
    633 		 * additional pages at lower offsets than pages we
    634 		 * already have locked.  unlock them all and start over.
    635 		 */
    636 
    637 		genfs_rel_pages(&pgs[ridx], orignpages);
    638 		memset(pgs, 0, pgs_size);
    639 
    640 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    641 		    startoffset, endoffset, 0,0);
    642 		npgs = npages;
    643 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    644 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    645 			KASSERT(async != 0);
    646 			genfs_rel_pages(pgs, npages);
    647 			simple_unlock(&uobj->vmobjlock);
    648 			if (pgs != pgs_onstack)
    649 				free(pgs, M_DEVBUF);
    650 			return (EBUSY);
    651 		}
    652 	}
    653 	simple_unlock(&uobj->vmobjlock);
    654 
    655 	/*
    656 	 * read the desired page(s).
    657 	 */
    658 
    659 	totalbytes = npages << PAGE_SHIFT;
    660 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    661 	tailbytes = totalbytes - bytes;
    662 	skipbytes = 0;
    663 
    664 	kva = uvm_pagermapin(pgs, npages,
    665 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    666 
    667 	s = splbio();
    668 	mbp = pool_get(&bufpool, PR_WAITOK);
    669 	splx(s);
    670 	BUF_INIT(mbp);
    671 	mbp->b_bufsize = totalbytes;
    672 	mbp->b_data = (void *)kva;
    673 	mbp->b_resid = mbp->b_bcount = bytes;
    674 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    675 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    676 	mbp->b_vp = vp;
    677 
    678 	/*
    679 	 * if EOF is in the middle of the range, zero the part past EOF.
    680 	 * if the page including EOF is not PG_FAKE, skip over it since
    681 	 * in that case it has valid data that we need to preserve.
    682 	 */
    683 
    684 	if (tailbytes > 0) {
    685 		size_t tailstart = bytes;
    686 
    687 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    688 			tailstart = round_page(tailstart);
    689 			tailbytes -= tailstart - bytes;
    690 		}
    691 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    692 		    kva, tailstart, tailbytes,0);
    693 		memset((void *)(kva + tailstart), 0, tailbytes);
    694 	}
    695 
    696 	/*
    697 	 * now loop over the pages, reading as needed.
    698 	 */
    699 
    700 	if (write) {
    701 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    702 	} else {
    703 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    704 	}
    705 
    706 	bp = NULL;
    707 	for (offset = startoffset;
    708 	    bytes > 0;
    709 	    offset += iobytes, bytes -= iobytes) {
    710 
    711 		/*
    712 		 * skip pages which don't need to be read.
    713 		 */
    714 
    715 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    716 		while ((pgs[pidx]->flags & (PG_FAKE|PG_RDONLY)) == 0) {
    717 			size_t b;
    718 
    719 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    720 			b = MIN(PAGE_SIZE, bytes);
    721 			offset += b;
    722 			bytes -= b;
    723 			skipbytes += b;
    724 			pidx++;
    725 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    726 			    offset, 0,0,0);
    727 			if (bytes == 0) {
    728 				goto loopdone;
    729 			}
    730 		}
    731 
    732 		/*
    733 		 * bmap the file to find out the blkno to read from and
    734 		 * how much we can read in one i/o.  if bmap returns an error,
    735 		 * skip the rest of the top-level i/o.
    736 		 */
    737 
    738 		lbn = offset >> fs_bshift;
    739 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    740 		if (error) {
    741 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    742 			    lbn, error,0,0);
    743 			skipbytes += bytes;
    744 			goto loopdone;
    745 		}
    746 
    747 		/*
    748 		 * see how many pages can be read with this i/o.
    749 		 * reduce the i/o size if necessary to avoid
    750 		 * overwriting pages with valid data.
    751 		 */
    752 
    753 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    754 		    bytes);
    755 		if (offset + iobytes > round_page(offset)) {
    756 			pcount = 1;
    757 			while (pidx + pcount < npages &&
    758 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    759 				pcount++;
    760 			}
    761 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    762 			    (offset - trunc_page(offset)));
    763 		}
    764 
    765 		/*
    766 		 * if this block isn't allocated, zero it instead of
    767 		 * reading it.  if this is a read access, mark the
    768 		 * pages we zeroed PG_RDONLY.
    769 		 */
    770 
    771 		if (blkno < 0) {
    772 			int holepages = (round_page(offset + iobytes) -
    773 			    trunc_page(offset)) >> PAGE_SHIFT;
    774 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    775 
    776 			sawhole = TRUE;
    777 			memset((char *)kva + (offset - startoffset), 0,
    778 			    iobytes);
    779 			skipbytes += iobytes;
    780 
    781 			for (i = 0; i < holepages; i++) {
    782 				if (write) {
    783 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    784 				} else {
    785 					pgs[pidx + i]->flags |= PG_RDONLY;
    786 				}
    787 			}
    788 			continue;
    789 		}
    790 
    791 		/*
    792 		 * allocate a sub-buf for this piece of the i/o
    793 		 * (or just use mbp if there's only 1 piece),
    794 		 * and start it going.
    795 		 */
    796 
    797 		if (offset == startoffset && iobytes == bytes) {
    798 			bp = mbp;
    799 		} else {
    800 			s = splbio();
    801 			bp = pool_get(&bufpool, PR_WAITOK);
    802 			splx(s);
    803 			BUF_INIT(bp);
    804 			bp->b_data = (char *)kva + offset - startoffset;
    805 			bp->b_resid = bp->b_bcount = iobytes;
    806 			bp->b_flags = B_BUSY|B_READ|B_CALL|B_ASYNC;
    807 			bp->b_iodone = uvm_aio_biodone1;
    808 			bp->b_vp = vp;
    809 			bp->b_proc = NULL;
    810 		}
    811 		bp->b_lblkno = 0;
    812 		bp->b_private = mbp;
    813 		if (devvp->v_type == VBLK) {
    814 			bp->b_dev = devvp->v_rdev;
    815 		}
    816 
    817 		/* adjust physical blkno for partial blocks */
    818 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    819 		    dev_bshift);
    820 
    821 		UVMHIST_LOG(ubchist,
    822 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    823 		    bp, offset, iobytes, bp->b_blkno);
    824 
    825 		VOP_STRATEGY(bp);
    826 	}
    827 
    828 loopdone:
    829 	if (skipbytes) {
    830 		s = splbio();
    831 		if (error) {
    832 			mbp->b_flags |= B_ERROR;
    833 			mbp->b_error = error;
    834 		}
    835 		mbp->b_resid -= skipbytes;
    836 		if (mbp->b_resid == 0) {
    837 			biodone(mbp);
    838 		}
    839 		splx(s);
    840 	}
    841 
    842 	if (async) {
    843 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    844 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    845 		if (pgs != pgs_onstack)
    846 			free(pgs, M_DEVBUF);
    847 		return (0);
    848 	}
    849 	if (bp != NULL) {
    850 		error = biowait(mbp);
    851 	}
    852 	s = splbio();
    853 	pool_put(&bufpool, mbp);
    854 	splx(s);
    855 	uvm_pagermapout(kva, npages);
    856 	raoffset = startoffset + totalbytes;
    857 
    858 	/*
    859 	 * if this we encountered a hole then we have to do a little more work.
    860 	 * for read faults, we marked the page PG_RDONLY so that future
    861 	 * write accesses to the page will fault again.
    862 	 * for write faults, we must make sure that the backing store for
    863 	 * the page is completely allocated while the pages are locked.
    864 	 */
    865 
    866 	if (!error && sawhole && write) {
    867 		for (i = 0; i < npages; i++) {
    868 			if (pgs[i] == NULL) {
    869 				continue;
    870 			}
    871 			pgs[i]->flags &= ~PG_CLEAN;
    872 			UVMHIST_LOG(ubchist, "mark dirty pg %p", pgs[i],0,0,0);
    873 		}
    874 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    875 		    cred);
    876 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    877 		    startoffset, npages << PAGE_SHIFT, error,0);
    878 	}
    879 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    880 	simple_lock(&uobj->vmobjlock);
    881 
    882 	/*
    883 	 * see if we want to start any readahead.
    884 	 * XXXUBC for now, just read the next 128k on 64k boundaries.
    885 	 * this is pretty nonsensical, but it is 50% faster than reading
    886 	 * just the next 64k.
    887 	 */
    888 
    889 raout:
    890 	if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
    891 	    PAGE_SHIFT <= 16) {
    892 		off_t rasize;
    893 		int rapages, err, i, skipped;
    894 
    895 		/* XXXUBC temp limit, from above */
    896 		rapages = MIN(MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD),
    897 		    genfs_rapages);
    898 		rasize = rapages << PAGE_SHIFT;
    899 		for (i = skipped = 0; i < genfs_racount; i++) {
    900 			err = VOP_GETPAGES(vp, raoffset, NULL, &rapages, 0,
    901 			    VM_PROT_READ, 0, 0);
    902 			simple_lock(&uobj->vmobjlock);
    903 			if (err) {
    904 				if (err != EBUSY ||
    905 				    skipped++ == genfs_raskip)
    906 					break;
    907 			}
    908 			raoffset += rasize;
    909 			rapages = rasize >> PAGE_SHIFT;
    910 		}
    911 	}
    912 
    913 	/*
    914 	 * we're almost done!  release the pages...
    915 	 * for errors, we free the pages.
    916 	 * otherwise we activate them and mark them as valid and clean.
    917 	 * also, unbusy pages that were not actually requested.
    918 	 */
    919 
    920 	if (error) {
    921 		for (i = 0; i < npages; i++) {
    922 			if (pgs[i] == NULL) {
    923 				continue;
    924 			}
    925 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    926 			    pgs[i], pgs[i]->flags, 0,0);
    927 			if (pgs[i]->flags & PG_FAKE) {
    928 				pgs[i]->flags |= PG_RELEASED;
    929 			}
    930 		}
    931 		uvm_lock_pageq();
    932 		uvm_page_unbusy(pgs, npages);
    933 		uvm_unlock_pageq();
    934 		simple_unlock(&uobj->vmobjlock);
    935 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    936 		if (pgs != pgs_onstack)
    937 			free(pgs, M_DEVBUF);
    938 		return (error);
    939 	}
    940 
    941 out:
    942 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    943 	uvm_lock_pageq();
    944 	for (i = 0; i < npages; i++) {
    945 		pg = pgs[i];
    946 		if (pg == NULL) {
    947 			continue;
    948 		}
    949 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    950 		    pg, pg->flags, 0,0);
    951 		if (pg->flags & PG_FAKE && !overwrite) {
    952 			pg->flags &= ~(PG_FAKE);
    953 			pmap_clear_modify(pgs[i]);
    954 		}
    955 		if (write) {
    956 			pg->flags &= ~(PG_RDONLY);
    957 		}
    958 		if (i < ridx || i >= ridx + orignpages || async) {
    959 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    960 			    pg, pg->offset,0,0);
    961 			if (pg->flags & PG_WANTED) {
    962 				wakeup(pg);
    963 			}
    964 			if (pg->flags & PG_FAKE) {
    965 				KASSERT(overwrite);
    966 				uvm_pagezero(pg);
    967 			}
    968 			if (pg->flags & PG_RELEASED) {
    969 				uvm_pagefree(pg);
    970 				continue;
    971 			}
    972 			uvm_pageactivate(pg);
    973 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    974 			UVM_PAGE_OWN(pg, NULL);
    975 		}
    976 	}
    977 	uvm_unlock_pageq();
    978 	simple_unlock(&uobj->vmobjlock);
    979 	if (ap->a_m != NULL) {
    980 		memcpy(ap->a_m, &pgs[ridx],
    981 		    orignpages * sizeof(struct vm_page *));
    982 	}
    983 	if (pgs != pgs_onstack)
    984 		free(pgs, M_DEVBUF);
    985 	return (0);
    986 }
    987 
    988 /*
    989  * generic VM putpages routine.
    990  * Write the given range of pages to backing store.
    991  *
    992  * => "offhi == 0" means flush all pages at or after "offlo".
    993  * => object should be locked by caller.   we may _unlock_ the object
    994  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
    995  *	if PGO_SYNCIO is set and there are pages busy.
    996  *	we return with the object locked.
    997  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    998  *	thus, a caller might want to unlock higher level resources
    999  *	(e.g. vm_map) before calling flush.
   1000  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
   1001  *	unlock the object nor block.
   1002  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1003  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1004  *	that new pages are inserted on the tail end of the list.   thus,
   1005  *	we can make a complete pass through the object in one go by starting
   1006  *	at the head and working towards the tail (new pages are put in
   1007  *	front of us).
   1008  * => NOTE: we are allowed to lock the page queues, so the caller
   1009  *	must not be holding the page queue lock.
   1010  *
   1011  * note on "cleaning" object and PG_BUSY pages:
   1012  *	this routine is holding the lock on the object.   the only time
   1013  *	that it can run into a PG_BUSY page that it does not own is if
   1014  *	some other process has started I/O on the page (e.g. either
   1015  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1016  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1017  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1018  *	paged out then it means that someone else has already started
   1019  *	cleaning the page for us (how nice!).    in this case, if we
   1020  *	have syncio specified, then after we make our pass through the
   1021  *	object we need to wait for the other PG_BUSY pages to clear
   1022  *	off (i.e. we need to do an iosync).   also note that once a
   1023  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1024  *
   1025  * note on page traversal:
   1026  *	we can traverse the pages in an object either by going down the
   1027  *	linked list in "uobj->memq", or we can go over the address range
   1028  *	by page doing hash table lookups for each address.    depending
   1029  *	on how many pages are in the object it may be cheaper to do one
   1030  *	or the other.   we set "by_list" to true if we are using memq.
   1031  *	if the cost of a hash lookup was equal to the cost of the list
   1032  *	traversal we could compare the number of pages in the start->stop
   1033  *	range to the total number of pages in the object.   however, it
   1034  *	seems that a hash table lookup is more expensive than the linked
   1035  *	list traversal, so we multiply the number of pages in the
   1036  *	range by an estimate of the relatively higher cost of the hash lookup.
   1037  */
   1038 
   1039 int
   1040 genfs_putpages(void *v)
   1041 {
   1042 	struct vop_putpages_args /* {
   1043 		struct vnode *a_vp;
   1044 		voff_t a_offlo;
   1045 		voff_t a_offhi;
   1046 		int a_flags;
   1047 	} */ *ap = v;
   1048 	struct vnode *vp = ap->a_vp;
   1049 	struct uvm_object *uobj = &vp->v_uobj;
   1050 	struct simplelock *slock = &uobj->vmobjlock;
   1051 	off_t startoff = ap->a_offlo;
   1052 	off_t endoff = ap->a_offhi;
   1053 	off_t off;
   1054 	int flags = ap->a_flags;
   1055 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1056 	const int maxpages = MAXPHYS >> PAGE_SHIFT;
   1057 	int i, s, error, npages, nback;
   1058 	int freeflag;
   1059 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1060 	boolean_t wasclean, by_list, needs_clean, yield;
   1061 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1062 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1063 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1064 
   1065 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1066 
   1067 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1068 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1069 	KASSERT(startoff < endoff || endoff == 0);
   1070 
   1071 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1072 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1073 	if (uobj->uo_npages == 0) {
   1074 		s = splbio();
   1075 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1076 		    (vp->v_flag & VONWORKLST)) {
   1077 			vp->v_flag &= ~VONWORKLST;
   1078 			LIST_REMOVE(vp, v_synclist);
   1079 		}
   1080 		splx(s);
   1081 		simple_unlock(slock);
   1082 		return (0);
   1083 	}
   1084 
   1085 	/*
   1086 	 * the vnode has pages, set up to process the request.
   1087 	 */
   1088 
   1089 	error = 0;
   1090 	s = splbio();
   1091 	simple_lock(&global_v_numoutput_slock);
   1092 	wasclean = (vp->v_numoutput == 0);
   1093 	simple_unlock(&global_v_numoutput_slock);
   1094 	splx(s);
   1095 	off = startoff;
   1096 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1097 		endoff = trunc_page(LLONG_MAX);
   1098 	}
   1099 	by_list = (uobj->uo_npages <=
   1100 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1101 
   1102 	/*
   1103 	 * start the loop.  when scanning by list, hold the last page
   1104 	 * in the list before we start.  pages allocated after we start
   1105 	 * will be added to the end of the list, so we can stop at the
   1106 	 * current last page.
   1107 	 */
   1108 
   1109 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1110 	curmp.uobject = uobj;
   1111 	curmp.offset = (voff_t)-1;
   1112 	curmp.flags = PG_BUSY;
   1113 	endmp.uobject = uobj;
   1114 	endmp.offset = (voff_t)-1;
   1115 	endmp.flags = PG_BUSY;
   1116 	if (by_list) {
   1117 		pg = TAILQ_FIRST(&uobj->memq);
   1118 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1119 		PHOLD(l);
   1120 	} else {
   1121 		pg = uvm_pagelookup(uobj, off);
   1122 	}
   1123 	nextpg = NULL;
   1124 	while (by_list || off < endoff) {
   1125 
   1126 		/*
   1127 		 * if the current page is not interesting, move on to the next.
   1128 		 */
   1129 
   1130 		KASSERT(pg == NULL || pg->uobject == uobj);
   1131 		KASSERT(pg == NULL ||
   1132 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1133 		    (pg->flags & PG_BUSY) != 0);
   1134 		if (by_list) {
   1135 			if (pg == &endmp) {
   1136 				break;
   1137 			}
   1138 			if (pg->offset < startoff || pg->offset >= endoff ||
   1139 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1140 				pg = TAILQ_NEXT(pg, listq);
   1141 				continue;
   1142 			}
   1143 			off = pg->offset;
   1144 		} else if (pg == NULL ||
   1145 		    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1146 			off += PAGE_SIZE;
   1147 			if (off < endoff) {
   1148 				pg = uvm_pagelookup(uobj, off);
   1149 			}
   1150 			continue;
   1151 		}
   1152 
   1153 		/*
   1154 		 * if the current page needs to be cleaned and it's busy,
   1155 		 * wait for it to become unbusy.
   1156 		 */
   1157 
   1158 		yield = (l->l_cpu->ci_schedstate.spc_flags &
   1159 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1160 		if (pg->flags & PG_BUSY || yield) {
   1161 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1162 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1163 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1164 				error = EDEADLK;
   1165 				break;
   1166 			}
   1167 			KASSERT(!pagedaemon);
   1168 			if (by_list) {
   1169 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1170 				UVMHIST_LOG(ubchist, "curmp next %p",
   1171 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1172 			}
   1173 			if (yield) {
   1174 				simple_unlock(slock);
   1175 				preempt(1);
   1176 				simple_lock(slock);
   1177 			} else {
   1178 				pg->flags |= PG_WANTED;
   1179 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1180 				simple_lock(slock);
   1181 			}
   1182 			if (by_list) {
   1183 				UVMHIST_LOG(ubchist, "after next %p",
   1184 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1185 				pg = TAILQ_NEXT(&curmp, listq);
   1186 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1187 			} else {
   1188 				pg = uvm_pagelookup(uobj, off);
   1189 			}
   1190 			continue;
   1191 		}
   1192 
   1193 		/*
   1194 		 * if we're freeing, remove all mappings of the page now.
   1195 		 * if we're cleaning, check if the page is needs to be cleaned.
   1196 		 */
   1197 
   1198 		if (flags & PGO_FREE) {
   1199 			pmap_page_protect(pg, VM_PROT_NONE);
   1200 		}
   1201 		if (flags & PGO_CLEANIT) {
   1202 			needs_clean = pmap_clear_modify(pg) ||
   1203 			    (pg->flags & PG_CLEAN) == 0;
   1204 			pg->flags |= PG_CLEAN;
   1205 		} else {
   1206 			needs_clean = FALSE;
   1207 		}
   1208 
   1209 		/*
   1210 		 * if we're cleaning, build a cluster.
   1211 		 * the cluster will consist of pages which are currently dirty,
   1212 		 * but they will be returned to us marked clean.
   1213 		 * if not cleaning, just operate on the one page.
   1214 		 */
   1215 
   1216 		if (needs_clean) {
   1217 			wasclean = FALSE;
   1218 			memset(pgs, 0, sizeof(pgs));
   1219 			pg->flags |= PG_BUSY;
   1220 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1221 
   1222 			/*
   1223 			 * first look backward.
   1224 			 */
   1225 
   1226 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1227 			nback = npages;
   1228 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1229 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1230 			if (nback) {
   1231 				memmove(&pgs[0], &pgs[npages - nback],
   1232 				    nback * sizeof(pgs[0]));
   1233 				if (npages - nback < nback)
   1234 					memset(&pgs[nback], 0,
   1235 					    (npages - nback) * sizeof(pgs[0]));
   1236 				else
   1237 					memset(&pgs[npages - nback], 0,
   1238 					    nback * sizeof(pgs[0]));
   1239 			}
   1240 
   1241 			/*
   1242 			 * then plug in our page of interest.
   1243 			 */
   1244 
   1245 			pgs[nback] = pg;
   1246 
   1247 			/*
   1248 			 * then look forward to fill in the remaining space in
   1249 			 * the array of pages.
   1250 			 */
   1251 
   1252 			npages = maxpages - nback - 1;
   1253 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1254 			    &pgs[nback + 1],
   1255 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1256 			npages += nback + 1;
   1257 		} else {
   1258 			pgs[0] = pg;
   1259 			npages = 1;
   1260 			nback = 0;
   1261 		}
   1262 
   1263 		/*
   1264 		 * apply FREE or DEACTIVATE options if requested.
   1265 		 */
   1266 
   1267 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1268 			uvm_lock_pageq();
   1269 		}
   1270 		for (i = 0; i < npages; i++) {
   1271 			tpg = pgs[i];
   1272 			KASSERT(tpg->uobject == uobj);
   1273 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1274 				pg = tpg;
   1275 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1276 				continue;
   1277 			if (flags & PGO_DEACTIVATE &&
   1278 			    (tpg->pqflags & PQ_INACTIVE) == 0 &&
   1279 			    tpg->wire_count == 0) {
   1280 				(void) pmap_clear_reference(tpg);
   1281 				uvm_pagedeactivate(tpg);
   1282 			} else if (flags & PGO_FREE) {
   1283 				pmap_page_protect(tpg, VM_PROT_NONE);
   1284 				if (tpg->flags & PG_BUSY) {
   1285 					tpg->flags |= freeflag;
   1286 					if (pagedaemon) {
   1287 						uvmexp.paging++;
   1288 						uvm_pagedequeue(tpg);
   1289 					}
   1290 				} else {
   1291 
   1292 					/*
   1293 					 * ``page is not busy''
   1294 					 * implies that npages is 1
   1295 					 * and needs_clean is false.
   1296 					 */
   1297 
   1298 					nextpg = TAILQ_NEXT(tpg, listq);
   1299 					uvm_pagefree(tpg);
   1300 				}
   1301 			}
   1302 		}
   1303 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1304 			uvm_unlock_pageq();
   1305 		}
   1306 		if (needs_clean) {
   1307 
   1308 			/*
   1309 			 * start the i/o.  if we're traversing by list,
   1310 			 * keep our place in the list with a marker page.
   1311 			 */
   1312 
   1313 			if (by_list) {
   1314 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1315 				    listq);
   1316 			}
   1317 			simple_unlock(slock);
   1318 			error = GOP_WRITE(vp, pgs, npages, flags);
   1319 			simple_lock(slock);
   1320 			if (by_list) {
   1321 				pg = TAILQ_NEXT(&curmp, listq);
   1322 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1323 			}
   1324 			if (error) {
   1325 				break;
   1326 			}
   1327 			if (by_list) {
   1328 				continue;
   1329 			}
   1330 		}
   1331 
   1332 		/*
   1333 		 * find the next page and continue if there was no error.
   1334 		 */
   1335 
   1336 		if (by_list) {
   1337 			if (nextpg) {
   1338 				pg = nextpg;
   1339 				nextpg = NULL;
   1340 			} else {
   1341 				pg = TAILQ_NEXT(pg, listq);
   1342 			}
   1343 		} else {
   1344 			off += (npages - nback) << PAGE_SHIFT;
   1345 			if (off < endoff) {
   1346 				pg = uvm_pagelookup(uobj, off);
   1347 			}
   1348 		}
   1349 	}
   1350 	if (by_list) {
   1351 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1352 		PRELE(l);
   1353 	}
   1354 
   1355 	/*
   1356 	 * if we're cleaning and there was nothing to clean,
   1357 	 * take us off the syncer list.  if we started any i/o
   1358 	 * and we're doing sync i/o, wait for all writes to finish.
   1359 	 */
   1360 
   1361 	s = splbio();
   1362 	if ((flags & PGO_CLEANIT) && wasclean &&
   1363 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1364 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1365 	    (vp->v_flag & VONWORKLST)) {
   1366 		vp->v_flag &= ~VONWORKLST;
   1367 		LIST_REMOVE(vp, v_synclist);
   1368 	}
   1369 	splx(s);
   1370 	if (!wasclean && !async) {
   1371 		s = splbio();
   1372 		/*
   1373 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1374 		 *	 but the slot in ltsleep() is taken!
   1375 		 * XXX - try to recover from missed wakeups with a timeout..
   1376 		 *	 must think of something better.
   1377 		 */
   1378 		while (vp->v_numoutput != 0) {
   1379 			vp->v_flag |= VBWAIT;
   1380 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1381 			    "genput2", hz);
   1382 			simple_lock(slock);
   1383 		}
   1384 		splx(s);
   1385 	}
   1386 	simple_unlock(&uobj->vmobjlock);
   1387 	return (error);
   1388 }
   1389 
   1390 int
   1391 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1392 {
   1393 	int s, error, run;
   1394 	int fs_bshift, dev_bshift;
   1395 	vaddr_t kva;
   1396 	off_t eof, offset, startoffset;
   1397 	size_t bytes, iobytes, skipbytes;
   1398 	daddr_t lbn, blkno;
   1399 	struct vm_page *pg;
   1400 	struct buf *mbp, *bp;
   1401 	struct vnode *devvp;
   1402 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1403 	UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
   1404 
   1405 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1406 	    vp, pgs, npages, flags);
   1407 
   1408 	GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_WRITE);
   1409 	if (vp->v_type == VREG) {
   1410 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1411 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1412 	} else {
   1413 		fs_bshift = DEV_BSHIFT;
   1414 		dev_bshift = DEV_BSHIFT;
   1415 	}
   1416 	error = 0;
   1417 	pg = pgs[0];
   1418 	startoffset = pg->offset;
   1419 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
   1420 	skipbytes = 0;
   1421 	KASSERT(bytes != 0);
   1422 
   1423 	kva = uvm_pagermapin(pgs, npages,
   1424 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1425 
   1426 	s = splbio();
   1427 	simple_lock(&global_v_numoutput_slock);
   1428 	vp->v_numoutput += 2;
   1429 	simple_unlock(&global_v_numoutput_slock);
   1430 	mbp = pool_get(&bufpool, PR_WAITOK);
   1431 	BUF_INIT(mbp);
   1432 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1433 	    vp, mbp, vp->v_numoutput, bytes);
   1434 	splx(s);
   1435 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1436 	mbp->b_data = (void *)kva;
   1437 	mbp->b_resid = mbp->b_bcount = bytes;
   1438 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
   1439 	mbp->b_iodone = uvm_aio_biodone;
   1440 	mbp->b_vp = vp;
   1441 
   1442 	bp = NULL;
   1443 	for (offset = startoffset;
   1444 	    bytes > 0;
   1445 	    offset += iobytes, bytes -= iobytes) {
   1446 		lbn = offset >> fs_bshift;
   1447 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1448 		if (error) {
   1449 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1450 			skipbytes += bytes;
   1451 			bytes = 0;
   1452 			break;
   1453 		}
   1454 
   1455 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1456 		    bytes);
   1457 		if (blkno == (daddr_t)-1) {
   1458 			skipbytes += iobytes;
   1459 			continue;
   1460 		}
   1461 
   1462 		/* if it's really one i/o, don't make a second buf */
   1463 		if (offset == startoffset && iobytes == bytes) {
   1464 			bp = mbp;
   1465 		} else {
   1466 			s = splbio();
   1467 			V_INCR_NUMOUTPUT(vp);
   1468 			bp = pool_get(&bufpool, PR_WAITOK);
   1469 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1470 			    vp, bp, vp->v_numoutput, 0);
   1471 			splx(s);
   1472 			BUF_INIT(bp);
   1473 			bp->b_data = (char *)kva +
   1474 			    (vaddr_t)(offset - pg->offset);
   1475 			bp->b_resid = bp->b_bcount = iobytes;
   1476 			bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
   1477 			bp->b_iodone = uvm_aio_biodone1;
   1478 			bp->b_vp = vp;
   1479 		}
   1480 		bp->b_lblkno = 0;
   1481 		bp->b_private = mbp;
   1482 		if (devvp->v_type == VBLK) {
   1483 			bp->b_dev = devvp->v_rdev;
   1484 		}
   1485 
   1486 		/* adjust physical blkno for partial blocks */
   1487 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1488 		    dev_bshift);
   1489 		UVMHIST_LOG(ubchist,
   1490 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1491 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1492 		VOP_STRATEGY(bp);
   1493 	}
   1494 	if (skipbytes) {
   1495 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1496 		s = splbio();
   1497 		if (error) {
   1498 			mbp->b_flags |= B_ERROR;
   1499 			mbp->b_error = error;
   1500 		}
   1501 		mbp->b_resid -= skipbytes;
   1502 		if (mbp->b_resid == 0) {
   1503 			biodone(mbp);
   1504 		}
   1505 		splx(s);
   1506 	}
   1507 	if (async) {
   1508 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1509 		return (0);
   1510 	}
   1511 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1512 	error = biowait(mbp);
   1513 	uvm_aio_aiodone(mbp);
   1514 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1515 	return (error);
   1516 }
   1517 
   1518 /*
   1519  * VOP_PUTPAGES() for vnodes which never have pages.
   1520  */
   1521 
   1522 int
   1523 genfs_null_putpages(void *v)
   1524 {
   1525 	struct vop_putpages_args /* {
   1526 		struct vnode *a_vp;
   1527 		voff_t a_offlo;
   1528 		voff_t a_offhi;
   1529 		int a_flags;
   1530 	} */ *ap = v;
   1531 	struct vnode *vp = ap->a_vp;
   1532 
   1533 	KASSERT(vp->v_uobj.uo_npages == 0);
   1534 	simple_unlock(&vp->v_interlock);
   1535 	return (0);
   1536 }
   1537 
   1538 void
   1539 genfs_node_init(struct vnode *vp, struct genfs_ops *ops)
   1540 {
   1541 	struct genfs_node *gp = VTOG(vp);
   1542 
   1543 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1544 	gp->g_op = ops;
   1545 }
   1546 
   1547 void
   1548 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1549 {
   1550 	int bsize;
   1551 
   1552 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1553 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1554 }
   1555 
   1556 int
   1557 genfs_compat_getpages(void *v)
   1558 {
   1559 	struct vop_getpages_args /* {
   1560 		struct vnode *a_vp;
   1561 		voff_t a_offset;
   1562 		struct vm_page **a_m;
   1563 		int *a_count;
   1564 		int a_centeridx;
   1565 		vm_prot_t a_access_type;
   1566 		int a_advice;
   1567 		int a_flags;
   1568 	} */ *ap = v;
   1569 
   1570 	off_t origoffset;
   1571 	struct vnode *vp = ap->a_vp;
   1572 	struct uvm_object *uobj = &vp->v_uobj;
   1573 	struct vm_page *pg, **pgs;
   1574 	vaddr_t kva;
   1575 	int i, error, orignpages, npages;
   1576 	struct iovec iov;
   1577 	struct uio uio;
   1578 	struct ucred *cred = curproc->p_ucred;
   1579 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1580 
   1581 	error = 0;
   1582 	origoffset = ap->a_offset;
   1583 	orignpages = *ap->a_count;
   1584 	pgs = ap->a_m;
   1585 
   1586 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1587 		vn_syncer_add_to_worklist(vp, filedelay);
   1588 	}
   1589 	if (ap->a_flags & PGO_LOCKED) {
   1590 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1591 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1592 
   1593 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1594 	}
   1595 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1596 		simple_unlock(&uobj->vmobjlock);
   1597 		return (EINVAL);
   1598 	}
   1599 	npages = orignpages;
   1600 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1601 	simple_unlock(&uobj->vmobjlock);
   1602 	kva = uvm_pagermapin(pgs, npages,
   1603 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1604 	for (i = 0; i < npages; i++) {
   1605 		pg = pgs[i];
   1606 		if ((pg->flags & PG_FAKE) == 0) {
   1607 			continue;
   1608 		}
   1609 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1610 		iov.iov_len = PAGE_SIZE;
   1611 		uio.uio_iov = &iov;
   1612 		uio.uio_iovcnt = 1;
   1613 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1614 		uio.uio_segflg = UIO_SYSSPACE;
   1615 		uio.uio_rw = UIO_READ;
   1616 		uio.uio_resid = PAGE_SIZE;
   1617 		uio.uio_procp = curproc;
   1618 		error = VOP_READ(vp, &uio, 0, cred);
   1619 		if (error) {
   1620 			break;
   1621 		}
   1622 		if (uio.uio_resid) {
   1623 			memset(iov.iov_base, 0, uio.uio_resid);
   1624 		}
   1625 	}
   1626 	uvm_pagermapout(kva, npages);
   1627 	simple_lock(&uobj->vmobjlock);
   1628 	uvm_lock_pageq();
   1629 	for (i = 0; i < npages; i++) {
   1630 		pg = pgs[i];
   1631 		if (error && (pg->flags & PG_FAKE) != 0) {
   1632 			pg->flags |= PG_RELEASED;
   1633 		} else {
   1634 			pmap_clear_modify(pg);
   1635 			uvm_pageactivate(pg);
   1636 		}
   1637 	}
   1638 	if (error) {
   1639 		uvm_page_unbusy(pgs, npages);
   1640 	}
   1641 	uvm_unlock_pageq();
   1642 	simple_unlock(&uobj->vmobjlock);
   1643 	return (error);
   1644 }
   1645 
   1646 int
   1647 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1648     int flags)
   1649 {
   1650 	off_t offset;
   1651 	struct iovec iov;
   1652 	struct uio uio;
   1653 	struct ucred *cred = curproc->p_ucred;
   1654 	struct buf *bp;
   1655 	vaddr_t kva;
   1656 	int s, error;
   1657 
   1658 	offset = pgs[0]->offset;
   1659 	kva = uvm_pagermapin(pgs, npages,
   1660 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1661 
   1662 	iov.iov_base = (void *)kva;
   1663 	iov.iov_len = npages << PAGE_SHIFT;
   1664 	uio.uio_iov = &iov;
   1665 	uio.uio_iovcnt = 1;
   1666 	uio.uio_offset = offset;
   1667 	uio.uio_segflg = UIO_SYSSPACE;
   1668 	uio.uio_rw = UIO_WRITE;
   1669 	uio.uio_resid = npages << PAGE_SHIFT;
   1670 	uio.uio_procp = curproc;
   1671 	error = VOP_WRITE(vp, &uio, 0, cred);
   1672 
   1673 	s = splbio();
   1674 	V_INCR_NUMOUTPUT(vp);
   1675 	bp = pool_get(&bufpool, PR_WAITOK);
   1676 	splx(s);
   1677 
   1678 	BUF_INIT(bp);
   1679 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1680 	bp->b_vp = vp;
   1681 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1682 	bp->b_data = (char *)kva;
   1683 	bp->b_bcount = npages << PAGE_SHIFT;
   1684 	bp->b_bufsize = npages << PAGE_SHIFT;
   1685 	bp->b_resid = 0;
   1686 	if (error) {
   1687 		bp->b_flags |= B_ERROR;
   1688 		bp->b_error = error;
   1689 	}
   1690 	uvm_aio_aiodone(bp);
   1691 	return (error);
   1692 }
   1693 
   1694 static void
   1695 filt_genfsdetach(struct knote *kn)
   1696 {
   1697 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1698 
   1699 	/* XXXLUKEM lock the struct? */
   1700 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   1701 }
   1702 
   1703 static int
   1704 filt_genfsread(struct knote *kn, long hint)
   1705 {
   1706 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1707 
   1708 	/*
   1709 	 * filesystem is gone, so set the EOF flag and schedule
   1710 	 * the knote for deletion.
   1711 	 */
   1712 	if (hint == NOTE_REVOKE) {
   1713 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   1714 		return (1);
   1715 	}
   1716 
   1717 	/* XXXLUKEM lock the struct? */
   1718 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   1719         return (kn->kn_data != 0);
   1720 }
   1721 
   1722 static int
   1723 filt_genfsvnode(struct knote *kn, long hint)
   1724 {
   1725 
   1726 	if (kn->kn_sfflags & hint)
   1727 		kn->kn_fflags |= hint;
   1728 	if (hint == NOTE_REVOKE) {
   1729 		kn->kn_flags |= EV_EOF;
   1730 		return (1);
   1731 	}
   1732 	return (kn->kn_fflags != 0);
   1733 }
   1734 
   1735 static const struct filterops genfsread_filtops =
   1736 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   1737 static const struct filterops genfsvnode_filtops =
   1738 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   1739 
   1740 int
   1741 genfs_kqfilter(void *v)
   1742 {
   1743 	struct vop_kqfilter_args /* {
   1744 		struct vnode	*a_vp;
   1745 		struct knote	*a_kn;
   1746 	} */ *ap = v;
   1747 	struct vnode *vp;
   1748 	struct knote *kn;
   1749 
   1750 	vp = ap->a_vp;
   1751 	kn = ap->a_kn;
   1752 	switch (kn->kn_filter) {
   1753 	case EVFILT_READ:
   1754 		kn->kn_fop = &genfsread_filtops;
   1755 		break;
   1756 	case EVFILT_VNODE:
   1757 		kn->kn_fop = &genfsvnode_filtops;
   1758 		break;
   1759 	default:
   1760 		return (1);
   1761 	}
   1762 
   1763 	kn->kn_hook = vp;
   1764 
   1765 	/* XXXLUKEM lock the struct? */
   1766 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   1767 
   1768 	return (0);
   1769 }
   1770