Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.102
      1 /*	$NetBSD: genfs_vnops.c,v 1.102 2005/07/17 16:07:19 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.102 2005/07/17 16:07:19 yamt Exp $");
     35 
     36 #if defined(_KERNEL_OPT)
     37 #include "opt_nfsserver.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/proc.h>
     43 #include <sys/kernel.h>
     44 #include <sys/mount.h>
     45 #include <sys/namei.h>
     46 #include <sys/vnode.h>
     47 #include <sys/fcntl.h>
     48 #include <sys/malloc.h>
     49 #include <sys/poll.h>
     50 #include <sys/mman.h>
     51 #include <sys/file.h>
     52 
     53 #include <miscfs/genfs/genfs.h>
     54 #include <miscfs/genfs/genfs_node.h>
     55 #include <miscfs/specfs/specdev.h>
     56 
     57 #include <uvm/uvm.h>
     58 #include <uvm/uvm_pager.h>
     59 
     60 #ifdef NFSSERVER
     61 #include <nfs/rpcv2.h>
     62 #include <nfs/nfsproto.h>
     63 #include <nfs/nfs.h>
     64 #include <nfs/nqnfs.h>
     65 #include <nfs/nfs_var.h>
     66 #endif
     67 
     68 static __inline void genfs_rel_pages(struct vm_page **, int);
     69 static void filt_genfsdetach(struct knote *);
     70 static int filt_genfsread(struct knote *, long);
     71 static int filt_genfsvnode(struct knote *, long);
     72 
     73 
     74 #define MAX_READ_AHEAD	16 	/* XXXUBC 16 */
     75 int genfs_rapages = MAX_READ_AHEAD; /* # of pages in each chunk of readahead */
     76 int genfs_racount = 2;		/* # of page chunks to readahead */
     77 int genfs_raskip = 2;		/* # of busy page chunks allowed to skip */
     78 
     79 int
     80 genfs_poll(void *v)
     81 {
     82 	struct vop_poll_args /* {
     83 		struct vnode *a_vp;
     84 		int a_events;
     85 		struct proc *a_p;
     86 	} */ *ap = v;
     87 
     88 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     89 }
     90 
     91 int
     92 genfs_fsync(void *v)
     93 {
     94 	struct vop_fsync_args /* {
     95 		struct vnode *a_vp;
     96 		struct ucred *a_cred;
     97 		int a_flags;
     98 		off_t offlo;
     99 		off_t offhi;
    100 		struct proc *a_p;
    101 	} */ *ap = v;
    102 	struct vnode *vp = ap->a_vp, *dvp;
    103 	int wait;
    104 	int error;
    105 
    106 	wait = (ap->a_flags & FSYNC_WAIT) != 0;
    107 	vflushbuf(vp, wait);
    108 	if ((ap->a_flags & FSYNC_DATAONLY) != 0)
    109 		error = 0;
    110 	else
    111 		error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
    112 
    113 	if (error == 0 && ap->a_flags & FSYNC_CACHE) {
    114 		int l = 0;
    115 		if (VOP_BMAP(vp, 0, &dvp, NULL, NULL))
    116 			error = ENXIO;
    117 		else
    118 			error = VOP_IOCTL(dvp, DIOCCACHESYNC, &l, FWRITE,
    119 					  ap->a_p->p_ucred, ap->a_p);
    120 	}
    121 
    122 	return (error);
    123 }
    124 
    125 int
    126 genfs_seek(void *v)
    127 {
    128 	struct vop_seek_args /* {
    129 		struct vnode *a_vp;
    130 		off_t a_oldoff;
    131 		off_t a_newoff;
    132 		struct ucred *a_ucred;
    133 	} */ *ap = v;
    134 
    135 	if (ap->a_newoff < 0)
    136 		return (EINVAL);
    137 
    138 	return (0);
    139 }
    140 
    141 int
    142 genfs_abortop(void *v)
    143 {
    144 	struct vop_abortop_args /* {
    145 		struct vnode *a_dvp;
    146 		struct componentname *a_cnp;
    147 	} */ *ap = v;
    148 
    149 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    150 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    151 	return (0);
    152 }
    153 
    154 int
    155 genfs_fcntl(void *v)
    156 {
    157 	struct vop_fcntl_args /* {
    158 		struct vnode *a_vp;
    159 		u_int a_command;
    160 		caddr_t a_data;
    161 		int a_fflag;
    162 		struct ucred *a_cred;
    163 		struct proc *a_p;
    164 	} */ *ap = v;
    165 
    166 	if (ap->a_command == F_SETFL)
    167 		return (0);
    168 	else
    169 		return (EOPNOTSUPP);
    170 }
    171 
    172 /*ARGSUSED*/
    173 int
    174 genfs_badop(void *v)
    175 {
    176 
    177 	panic("genfs: bad op");
    178 }
    179 
    180 /*ARGSUSED*/
    181 int
    182 genfs_nullop(void *v)
    183 {
    184 
    185 	return (0);
    186 }
    187 
    188 /*ARGSUSED*/
    189 int
    190 genfs_einval(void *v)
    191 {
    192 
    193 	return (EINVAL);
    194 }
    195 
    196 /*
    197  * Called when an fs doesn't support a particular vop.
    198  * This takes care to vrele, vput, or vunlock passed in vnodes.
    199  */
    200 int
    201 genfs_eopnotsupp(void *v)
    202 {
    203 	struct vop_generic_args /*
    204 		struct vnodeop_desc *a_desc;
    205 		/ * other random data follows, presumably * /
    206 	} */ *ap = v;
    207 	struct vnodeop_desc *desc = ap->a_desc;
    208 	struct vnode *vp, *vp_last = NULL;
    209 	int flags, i, j, offset;
    210 
    211 	flags = desc->vdesc_flags;
    212 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    213 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    214 			break;	/* stop at end of list */
    215 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    216 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    217 
    218 			/* Skip if NULL */
    219 			if (!vp)
    220 				continue;
    221 
    222 			switch (j) {
    223 			case VDESC_VP0_WILLPUT:
    224 				/* Check for dvp == vp cases */
    225 				if (vp == vp_last)
    226 					vrele(vp);
    227 				else {
    228 					vput(vp);
    229 					vp_last = vp;
    230 				}
    231 				break;
    232 			case VDESC_VP0_WILLUNLOCK:
    233 				VOP_UNLOCK(vp, 0);
    234 				break;
    235 			case VDESC_VP0_WILLRELE:
    236 				vrele(vp);
    237 				break;
    238 			}
    239 		}
    240 	}
    241 
    242 	return (EOPNOTSUPP);
    243 }
    244 
    245 /*ARGSUSED*/
    246 int
    247 genfs_ebadf(void *v)
    248 {
    249 
    250 	return (EBADF);
    251 }
    252 
    253 /* ARGSUSED */
    254 int
    255 genfs_enoioctl(void *v)
    256 {
    257 
    258 	return (EPASSTHROUGH);
    259 }
    260 
    261 
    262 /*
    263  * Eliminate all activity associated with the requested vnode
    264  * and with all vnodes aliased to the requested vnode.
    265  */
    266 int
    267 genfs_revoke(void *v)
    268 {
    269 	struct vop_revoke_args /* {
    270 		struct vnode *a_vp;
    271 		int a_flags;
    272 	} */ *ap = v;
    273 	struct vnode *vp, *vq;
    274 	struct proc *p = curproc;	/* XXX */
    275 
    276 #ifdef DIAGNOSTIC
    277 	if ((ap->a_flags & REVOKEALL) == 0)
    278 		panic("genfs_revoke: not revokeall");
    279 #endif
    280 
    281 	vp = ap->a_vp;
    282 	simple_lock(&vp->v_interlock);
    283 
    284 	if (vp->v_flag & VALIASED) {
    285 		/*
    286 		 * If a vgone (or vclean) is already in progress,
    287 		 * wait until it is done and return.
    288 		 */
    289 		if (vp->v_flag & VXLOCK) {
    290 			vp->v_flag |= VXWANT;
    291 			ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
    292 				&vp->v_interlock);
    293 			return (0);
    294 		}
    295 		/*
    296 		 * Ensure that vp will not be vgone'd while we
    297 		 * are eliminating its aliases.
    298 		 */
    299 		vp->v_flag |= VXLOCK;
    300 		simple_unlock(&vp->v_interlock);
    301 		while (vp->v_flag & VALIASED) {
    302 			simple_lock(&spechash_slock);
    303 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    304 				if (vq->v_rdev != vp->v_rdev ||
    305 				    vq->v_type != vp->v_type || vp == vq)
    306 					continue;
    307 				simple_unlock(&spechash_slock);
    308 				vgone(vq);
    309 				break;
    310 			}
    311 			if (vq == NULLVP)
    312 				simple_unlock(&spechash_slock);
    313 		}
    314 		/*
    315 		 * Remove the lock so that vgone below will
    316 		 * really eliminate the vnode after which time
    317 		 * vgone will awaken any sleepers.
    318 		 */
    319 		simple_lock(&vp->v_interlock);
    320 		vp->v_flag &= ~VXLOCK;
    321 	}
    322 	vgonel(vp, p);
    323 	return (0);
    324 }
    325 
    326 /*
    327  * Lock the node.
    328  */
    329 int
    330 genfs_lock(void *v)
    331 {
    332 	struct vop_lock_args /* {
    333 		struct vnode *a_vp;
    334 		int a_flags;
    335 	} */ *ap = v;
    336 	struct vnode *vp = ap->a_vp;
    337 
    338 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    339 }
    340 
    341 /*
    342  * Unlock the node.
    343  */
    344 int
    345 genfs_unlock(void *v)
    346 {
    347 	struct vop_unlock_args /* {
    348 		struct vnode *a_vp;
    349 		int a_flags;
    350 	} */ *ap = v;
    351 	struct vnode *vp = ap->a_vp;
    352 
    353 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    354 	    &vp->v_interlock));
    355 }
    356 
    357 /*
    358  * Return whether or not the node is locked.
    359  */
    360 int
    361 genfs_islocked(void *v)
    362 {
    363 	struct vop_islocked_args /* {
    364 		struct vnode *a_vp;
    365 	} */ *ap = v;
    366 	struct vnode *vp = ap->a_vp;
    367 
    368 	return (lockstatus(vp->v_vnlock));
    369 }
    370 
    371 /*
    372  * Stubs to use when there is no locking to be done on the underlying object.
    373  */
    374 int
    375 genfs_nolock(void *v)
    376 {
    377 	struct vop_lock_args /* {
    378 		struct vnode *a_vp;
    379 		int a_flags;
    380 		struct proc *a_p;
    381 	} */ *ap = v;
    382 
    383 	/*
    384 	 * Since we are not using the lock manager, we must clear
    385 	 * the interlock here.
    386 	 */
    387 	if (ap->a_flags & LK_INTERLOCK)
    388 		simple_unlock(&ap->a_vp->v_interlock);
    389 	return (0);
    390 }
    391 
    392 int
    393 genfs_nounlock(void *v)
    394 {
    395 
    396 	return (0);
    397 }
    398 
    399 int
    400 genfs_noislocked(void *v)
    401 {
    402 
    403 	return (0);
    404 }
    405 
    406 /*
    407  * Local lease check for NFS servers.  Just set up args and let
    408  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    409  * this is a null operation.
    410  */
    411 int
    412 genfs_lease_check(void *v)
    413 {
    414 #ifdef NFSSERVER
    415 	struct vop_lease_args /* {
    416 		struct vnode *a_vp;
    417 		struct proc *a_p;
    418 		struct ucred *a_cred;
    419 		int a_flag;
    420 	} */ *ap = v;
    421 	u_int32_t duration = 0;
    422 	int cache;
    423 	u_quad_t frev;
    424 
    425 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    426 	    NQLOCALSLP, ap->a_p, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    427 	return (0);
    428 #else
    429 	return (0);
    430 #endif /* NFSSERVER */
    431 }
    432 
    433 int
    434 genfs_mmap(void *v)
    435 {
    436 
    437 	return (0);
    438 }
    439 
    440 static __inline void
    441 genfs_rel_pages(struct vm_page **pgs, int npages)
    442 {
    443 	int i;
    444 
    445 	for (i = 0; i < npages; i++) {
    446 		struct vm_page *pg = pgs[i];
    447 
    448 		if (pg == NULL)
    449 			continue;
    450 		if (pg->flags & PG_FAKE) {
    451 			pg->flags |= PG_RELEASED;
    452 		}
    453 	}
    454 	uvm_lock_pageq();
    455 	uvm_page_unbusy(pgs, npages);
    456 	uvm_unlock_pageq();
    457 }
    458 
    459 /*
    460  * generic VM getpages routine.
    461  * Return PG_BUSY pages for the given range,
    462  * reading from backing store if necessary.
    463  */
    464 
    465 int
    466 genfs_getpages(void *v)
    467 {
    468 	struct vop_getpages_args /* {
    469 		struct vnode *a_vp;
    470 		voff_t a_offset;
    471 		struct vm_page **a_m;
    472 		int *a_count;
    473 		int a_centeridx;
    474 		vm_prot_t a_access_type;
    475 		int a_advice;
    476 		int a_flags;
    477 	} */ *ap = v;
    478 
    479 	off_t newsize, diskeof, memeof;
    480 	off_t offset, origoffset, startoffset, endoffset, raoffset;
    481 	daddr_t lbn, blkno;
    482 	int s, i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    483 	int fs_bshift, fs_bsize, dev_bshift;
    484 	int flags = ap->a_flags;
    485 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    486 	vaddr_t kva;
    487 	struct buf *bp, *mbp;
    488 	struct vnode *vp = ap->a_vp;
    489 	struct vnode *devvp;
    490 	struct genfs_node *gp = VTOG(vp);
    491 	struct uvm_object *uobj = &vp->v_uobj;
    492 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_AHEAD];
    493 	int pgs_size;
    494 	struct ucred *cred = curproc->p_ucred;		/* XXXUBC curlwp */
    495 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    496 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    497 	boolean_t sawhole = FALSE;
    498 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    499 	boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    500 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    501 
    502 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    503 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    504 
    505 	/* XXXUBC temp limit */
    506 	if (*ap->a_count > MAX_READ_AHEAD) {
    507 		panic("genfs_getpages: too many pages");
    508 	}
    509 
    510 	error = 0;
    511 	origoffset = ap->a_offset;
    512 	orignpages = *ap->a_count;
    513 	GOP_SIZE(vp, vp->v_size, &diskeof, GOP_SIZE_READ);
    514 	if (flags & PGO_PASTEOF) {
    515 		newsize = MAX(vp->v_size,
    516 		    origoffset + (orignpages << PAGE_SHIFT));
    517 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_READ|GOP_SIZE_MEM);
    518 	} else {
    519 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_READ|GOP_SIZE_MEM);
    520 	}
    521 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    522 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    523 	KASSERT(orignpages > 0);
    524 
    525 	/*
    526 	 * Bounds-check the request.
    527 	 */
    528 
    529 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    530 		if ((flags & PGO_LOCKED) == 0) {
    531 			simple_unlock(&uobj->vmobjlock);
    532 		}
    533 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    534 		    origoffset, *ap->a_count, memeof,0);
    535 		return (EINVAL);
    536 	}
    537 
    538 	/* uobj is locked */
    539 
    540 	if (write) {
    541 		gp->g_dirtygen++;
    542 		if ((vp->v_flag & VONWORKLST) == 0) {
    543 			vn_syncer_add_to_worklist(vp, filedelay);
    544 		}
    545 	}
    546 
    547 	/*
    548 	 * For PGO_LOCKED requests, just return whatever's in memory.
    549 	 */
    550 
    551 	if (flags & PGO_LOCKED) {
    552 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
    553 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
    554 
    555 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    556 	}
    557 
    558 	/*
    559 	 * find the requested pages and make some simple checks.
    560 	 * leave space in the page array for a whole block.
    561 	 */
    562 
    563 	if (vp->v_type == VREG) {
    564 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    565 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    566 	} else {
    567 		fs_bshift = DEV_BSHIFT;
    568 		dev_bshift = DEV_BSHIFT;
    569 	}
    570 	fs_bsize = 1 << fs_bshift;
    571 
    572 	orignpages = MIN(orignpages,
    573 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    574 	npages = orignpages;
    575 	startoffset = origoffset & ~(fs_bsize - 1);
    576 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    577 	    fs_bsize - 1) & ~(fs_bsize - 1));
    578 	endoffset = MIN(endoffset, round_page(memeof));
    579 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    580 
    581 	pgs_size = sizeof(struct vm_page *) *
    582 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    583 	if (pgs_size > sizeof(pgs_onstack)) {
    584 		pgs = malloc(pgs_size, M_DEVBUF, M_NOWAIT | M_ZERO);
    585 		if (pgs == NULL) {
    586 			simple_unlock(&uobj->vmobjlock);
    587 			return (ENOMEM);
    588 		}
    589 	} else {
    590 		pgs = pgs_onstack;
    591 		memset(pgs, 0, pgs_size);
    592 	}
    593 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    594 	    ridx, npages, startoffset, endoffset);
    595 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    596 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    597 		KASSERT(async != 0);
    598 		genfs_rel_pages(&pgs[ridx], orignpages);
    599 		simple_unlock(&uobj->vmobjlock);
    600 		if (pgs != pgs_onstack)
    601 			free(pgs, M_DEVBUF);
    602 		return (EBUSY);
    603 	}
    604 
    605 	/*
    606 	 * if the pages are already resident, just return them.
    607 	 */
    608 
    609 	for (i = 0; i < npages; i++) {
    610 		struct vm_page *pg1 = pgs[ridx + i];
    611 
    612 		if ((pg1->flags & PG_FAKE) ||
    613 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    614 			break;
    615 		}
    616 	}
    617 	if (i == npages) {
    618 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    619 		raoffset = origoffset + (orignpages << PAGE_SHIFT);
    620 		npages += ridx;
    621 		goto raout;
    622 	}
    623 
    624 	/*
    625 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    626 	 */
    627 
    628 	if (flags & PGO_OVERWRITE) {
    629 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    630 
    631 		for (i = 0; i < npages; i++) {
    632 			struct vm_page *pg1 = pgs[ridx + i];
    633 
    634 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    635 		}
    636 		npages += ridx;
    637 		goto out;
    638 	}
    639 
    640 	/*
    641 	 * the page wasn't resident and we're not overwriting,
    642 	 * so we're going to have to do some i/o.
    643 	 * find any additional pages needed to cover the expanded range.
    644 	 */
    645 
    646 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    647 	if (startoffset != origoffset || npages != orignpages) {
    648 
    649 		/*
    650 		 * we need to avoid deadlocks caused by locking
    651 		 * additional pages at lower offsets than pages we
    652 		 * already have locked.  unlock them all and start over.
    653 		 */
    654 
    655 		genfs_rel_pages(&pgs[ridx], orignpages);
    656 		memset(pgs, 0, pgs_size);
    657 
    658 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    659 		    startoffset, endoffset, 0,0);
    660 		npgs = npages;
    661 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    662 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    663 			KASSERT(async != 0);
    664 			genfs_rel_pages(pgs, npages);
    665 			simple_unlock(&uobj->vmobjlock);
    666 			if (pgs != pgs_onstack)
    667 				free(pgs, M_DEVBUF);
    668 			return (EBUSY);
    669 		}
    670 	}
    671 	simple_unlock(&uobj->vmobjlock);
    672 
    673 	/*
    674 	 * read the desired page(s).
    675 	 */
    676 
    677 	totalbytes = npages << PAGE_SHIFT;
    678 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    679 	tailbytes = totalbytes - bytes;
    680 	skipbytes = 0;
    681 
    682 	kva = uvm_pagermapin(pgs, npages,
    683 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    684 
    685 	s = splbio();
    686 	mbp = pool_get(&bufpool, PR_WAITOK);
    687 	splx(s);
    688 	BUF_INIT(mbp);
    689 	mbp->b_bufsize = totalbytes;
    690 	mbp->b_data = (void *)kva;
    691 	mbp->b_resid = mbp->b_bcount = bytes;
    692 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    693 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    694 	mbp->b_vp = vp;
    695 
    696 	/*
    697 	 * if EOF is in the middle of the range, zero the part past EOF.
    698 	 * if the page including EOF is not PG_FAKE, skip over it since
    699 	 * in that case it has valid data that we need to preserve.
    700 	 */
    701 
    702 	if (tailbytes > 0) {
    703 		size_t tailstart = bytes;
    704 
    705 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    706 			tailstart = round_page(tailstart);
    707 			tailbytes -= tailstart - bytes;
    708 		}
    709 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    710 		    kva, tailstart, tailbytes,0);
    711 		memset((void *)(kva + tailstart), 0, tailbytes);
    712 	}
    713 
    714 	/*
    715 	 * now loop over the pages, reading as needed.
    716 	 */
    717 
    718 	if (blockalloc) {
    719 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    720 	} else {
    721 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    722 	}
    723 
    724 	bp = NULL;
    725 	for (offset = startoffset;
    726 	    bytes > 0;
    727 	    offset += iobytes, bytes -= iobytes) {
    728 
    729 		/*
    730 		 * skip pages which don't need to be read.
    731 		 */
    732 
    733 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    734 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    735 			size_t b;
    736 
    737 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    738 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    739 				sawhole = TRUE;
    740 			}
    741 			b = MIN(PAGE_SIZE, bytes);
    742 			offset += b;
    743 			bytes -= b;
    744 			skipbytes += b;
    745 			pidx++;
    746 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    747 			    offset, 0,0,0);
    748 			if (bytes == 0) {
    749 				goto loopdone;
    750 			}
    751 		}
    752 
    753 		/*
    754 		 * bmap the file to find out the blkno to read from and
    755 		 * how much we can read in one i/o.  if bmap returns an error,
    756 		 * skip the rest of the top-level i/o.
    757 		 */
    758 
    759 		lbn = offset >> fs_bshift;
    760 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    761 		if (error) {
    762 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    763 			    lbn, error,0,0);
    764 			skipbytes += bytes;
    765 			goto loopdone;
    766 		}
    767 
    768 		/*
    769 		 * see how many pages can be read with this i/o.
    770 		 * reduce the i/o size if necessary to avoid
    771 		 * overwriting pages with valid data.
    772 		 */
    773 
    774 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    775 		    bytes);
    776 		if (offset + iobytes > round_page(offset)) {
    777 			pcount = 1;
    778 			while (pidx + pcount < npages &&
    779 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    780 				pcount++;
    781 			}
    782 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    783 			    (offset - trunc_page(offset)));
    784 		}
    785 
    786 		/*
    787 		 * if this block isn't allocated, zero it instead of
    788 		 * reading it.  unless we are going to allocate blocks,
    789 		 * mark the pages we zeroed PG_RDONLY.
    790 		 */
    791 
    792 		if (blkno < 0) {
    793 			int holepages = (round_page(offset + iobytes) -
    794 			    trunc_page(offset)) >> PAGE_SHIFT;
    795 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    796 
    797 			sawhole = TRUE;
    798 			memset((char *)kva + (offset - startoffset), 0,
    799 			    iobytes);
    800 			skipbytes += iobytes;
    801 
    802 			for (i = 0; i < holepages; i++) {
    803 				if (write) {
    804 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    805 				}
    806 				if (!blockalloc) {
    807 					pgs[pidx + i]->flags |= PG_RDONLY;
    808 				}
    809 			}
    810 			continue;
    811 		}
    812 
    813 		/*
    814 		 * allocate a sub-buf for this piece of the i/o
    815 		 * (or just use mbp if there's only 1 piece),
    816 		 * and start it going.
    817 		 */
    818 
    819 		if (offset == startoffset && iobytes == bytes) {
    820 			bp = mbp;
    821 		} else {
    822 			s = splbio();
    823 			bp = pool_get(&bufpool, PR_WAITOK);
    824 			splx(s);
    825 			BUF_INIT(bp);
    826 			bp->b_data = (char *)kva + offset - startoffset;
    827 			bp->b_resid = bp->b_bcount = iobytes;
    828 			bp->b_flags = B_BUSY|B_READ|B_CALL|B_ASYNC;
    829 			bp->b_iodone = uvm_aio_biodone1;
    830 			bp->b_vp = vp;
    831 			bp->b_proc = NULL;
    832 		}
    833 		bp->b_lblkno = 0;
    834 		bp->b_private = mbp;
    835 		if (devvp->v_type == VBLK) {
    836 			bp->b_dev = devvp->v_rdev;
    837 		}
    838 
    839 		/* adjust physical blkno for partial blocks */
    840 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    841 		    dev_bshift);
    842 
    843 		UVMHIST_LOG(ubchist,
    844 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    845 		    bp, offset, iobytes, bp->b_blkno);
    846 
    847 		if (async)
    848 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
    849 		else
    850 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
    851 		VOP_STRATEGY(bp->b_vp, bp);
    852 	}
    853 
    854 loopdone:
    855 	if (skipbytes) {
    856 		s = splbio();
    857 		if (error) {
    858 			mbp->b_flags |= B_ERROR;
    859 			mbp->b_error = error;
    860 		}
    861 		mbp->b_resid -= skipbytes;
    862 		if (mbp->b_resid == 0) {
    863 			biodone(mbp);
    864 		}
    865 		splx(s);
    866 	}
    867 
    868 	if (async) {
    869 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    870 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    871 		if (pgs != pgs_onstack)
    872 			free(pgs, M_DEVBUF);
    873 		return (0);
    874 	}
    875 	if (bp != NULL) {
    876 		error = biowait(mbp);
    877 	}
    878 	s = splbio();
    879 	pool_put(&bufpool, mbp);
    880 	splx(s);
    881 	uvm_pagermapout(kva, npages);
    882 	raoffset = startoffset + totalbytes;
    883 
    884 	/*
    885 	 * if this we encountered a hole then we have to do a little more work.
    886 	 * for read faults, we marked the page PG_RDONLY so that future
    887 	 * write accesses to the page will fault again.
    888 	 * for write faults, we must make sure that the backing store for
    889 	 * the page is completely allocated while the pages are locked.
    890 	 */
    891 
    892 	if (!error && sawhole && blockalloc) {
    893 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    894 		    cred);
    895 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    896 		    startoffset, npages << PAGE_SHIFT, error,0);
    897 		if (!error) {
    898 			for (i = 0; i < npages; i++) {
    899 				if (pgs[i] == NULL) {
    900 					continue;
    901 				}
    902 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    903 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    904 				    pgs[i],0,0,0);
    905 			}
    906 		}
    907 	}
    908 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    909 	simple_lock(&uobj->vmobjlock);
    910 
    911 	/*
    912 	 * see if we want to start any readahead.
    913 	 * XXXUBC for now, just read the next 128k on 64k boundaries.
    914 	 * this is pretty nonsensical, but it is 50% faster than reading
    915 	 * just the next 64k.
    916 	 */
    917 
    918 raout:
    919 	if (!error && !async && !write && ((int)raoffset & 0xffff) == 0 &&
    920 	    PAGE_SHIFT <= 16) {
    921 		off_t rasize;
    922 		int rapages, err, j, skipped;
    923 
    924 		/* XXXUBC temp limit, from above */
    925 		rapages = MIN(MIN(1 << (16 - PAGE_SHIFT), MAX_READ_AHEAD),
    926 		    genfs_rapages);
    927 		rasize = rapages << PAGE_SHIFT;
    928 		for (j = skipped = 0; j < genfs_racount; j++) {
    929 
    930 			if (raoffset >= memeof)
    931 				break;
    932 
    933 			err = VOP_GETPAGES(vp, raoffset, NULL, &rapages, 0,
    934 			    VM_PROT_READ, 0, 0);
    935 			simple_lock(&uobj->vmobjlock);
    936 			if (err) {
    937 				if (err != EBUSY ||
    938 				    skipped++ == genfs_raskip)
    939 					break;
    940 			}
    941 			raoffset += rasize;
    942 			rapages = rasize >> PAGE_SHIFT;
    943 		}
    944 	}
    945 
    946 	/*
    947 	 * we're almost done!  release the pages...
    948 	 * for errors, we free the pages.
    949 	 * otherwise we activate them and mark them as valid and clean.
    950 	 * also, unbusy pages that were not actually requested.
    951 	 */
    952 
    953 	if (error) {
    954 		for (i = 0; i < npages; i++) {
    955 			if (pgs[i] == NULL) {
    956 				continue;
    957 			}
    958 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    959 			    pgs[i], pgs[i]->flags, 0,0);
    960 			if (pgs[i]->flags & PG_FAKE) {
    961 				pgs[i]->flags |= PG_RELEASED;
    962 			}
    963 		}
    964 		uvm_lock_pageq();
    965 		uvm_page_unbusy(pgs, npages);
    966 		uvm_unlock_pageq();
    967 		simple_unlock(&uobj->vmobjlock);
    968 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    969 		if (pgs != pgs_onstack)
    970 			free(pgs, M_DEVBUF);
    971 		return (error);
    972 	}
    973 
    974 out:
    975 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    976 	uvm_lock_pageq();
    977 	for (i = 0; i < npages; i++) {
    978 		pg = pgs[i];
    979 		if (pg == NULL) {
    980 			continue;
    981 		}
    982 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    983 		    pg, pg->flags, 0,0);
    984 		if (pg->flags & PG_FAKE && !overwrite) {
    985 			pg->flags &= ~(PG_FAKE);
    986 			pmap_clear_modify(pgs[i]);
    987 		}
    988 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    989 		if (i < ridx || i >= ridx + orignpages || async) {
    990 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    991 			    pg, pg->offset,0,0);
    992 			if (pg->flags & PG_WANTED) {
    993 				wakeup(pg);
    994 			}
    995 			if (pg->flags & PG_FAKE) {
    996 				KASSERT(overwrite);
    997 				uvm_pagezero(pg);
    998 			}
    999 			if (pg->flags & PG_RELEASED) {
   1000 				uvm_pagefree(pg);
   1001 				continue;
   1002 			}
   1003 			uvm_pageactivate(pg);
   1004 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
   1005 			UVM_PAGE_OWN(pg, NULL);
   1006 		}
   1007 	}
   1008 	uvm_unlock_pageq();
   1009 	simple_unlock(&uobj->vmobjlock);
   1010 	if (ap->a_m != NULL) {
   1011 		memcpy(ap->a_m, &pgs[ridx],
   1012 		    orignpages * sizeof(struct vm_page *));
   1013 	}
   1014 	if (pgs != pgs_onstack)
   1015 		free(pgs, M_DEVBUF);
   1016 	return (0);
   1017 }
   1018 
   1019 /*
   1020  * generic VM putpages routine.
   1021  * Write the given range of pages to backing store.
   1022  *
   1023  * => "offhi == 0" means flush all pages at or after "offlo".
   1024  * => object should be locked by caller.   we may _unlock_ the object
   1025  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
   1026  *	if PGO_SYNCIO is set and there are pages busy.
   1027  *	we return with the object locked.
   1028  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
   1029  *	thus, a caller might want to unlock higher level resources
   1030  *	(e.g. vm_map) before calling flush.
   1031  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
   1032  *	unlock the object nor block.
   1033  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1034  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1035  *	that new pages are inserted on the tail end of the list.   thus,
   1036  *	we can make a complete pass through the object in one go by starting
   1037  *	at the head and working towards the tail (new pages are put in
   1038  *	front of us).
   1039  * => NOTE: we are allowed to lock the page queues, so the caller
   1040  *	must not be holding the page queue lock.
   1041  *
   1042  * note on "cleaning" object and PG_BUSY pages:
   1043  *	this routine is holding the lock on the object.   the only time
   1044  *	that it can run into a PG_BUSY page that it does not own is if
   1045  *	some other process has started I/O on the page (e.g. either
   1046  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1047  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1048  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1049  *	paged out then it means that someone else has already started
   1050  *	cleaning the page for us (how nice!).    in this case, if we
   1051  *	have syncio specified, then after we make our pass through the
   1052  *	object we need to wait for the other PG_BUSY pages to clear
   1053  *	off (i.e. we need to do an iosync).   also note that once a
   1054  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1055  *
   1056  * note on page traversal:
   1057  *	we can traverse the pages in an object either by going down the
   1058  *	linked list in "uobj->memq", or we can go over the address range
   1059  *	by page doing hash table lookups for each address.    depending
   1060  *	on how many pages are in the object it may be cheaper to do one
   1061  *	or the other.   we set "by_list" to true if we are using memq.
   1062  *	if the cost of a hash lookup was equal to the cost of the list
   1063  *	traversal we could compare the number of pages in the start->stop
   1064  *	range to the total number of pages in the object.   however, it
   1065  *	seems that a hash table lookup is more expensive than the linked
   1066  *	list traversal, so we multiply the number of pages in the
   1067  *	range by an estimate of the relatively higher cost of the hash lookup.
   1068  */
   1069 
   1070 int
   1071 genfs_putpages(void *v)
   1072 {
   1073 	struct vop_putpages_args /* {
   1074 		struct vnode *a_vp;
   1075 		voff_t a_offlo;
   1076 		voff_t a_offhi;
   1077 		int a_flags;
   1078 	} */ *ap = v;
   1079 	struct vnode *vp = ap->a_vp;
   1080 	struct uvm_object *uobj = &vp->v_uobj;
   1081 	struct simplelock *slock = &uobj->vmobjlock;
   1082 	off_t startoff = ap->a_offlo;
   1083 	off_t endoff = ap->a_offhi;
   1084 	off_t off;
   1085 	int flags = ap->a_flags;
   1086 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1087 	const int maxpages = MAXPHYS >> PAGE_SHIFT;
   1088 	int i, s, error, npages, nback;
   1089 	int freeflag;
   1090 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1091 	boolean_t wasclean, by_list, needs_clean, yld;
   1092 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1093 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1094 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1095 	struct genfs_node *gp = VTOG(vp);
   1096 	int dirtygen;
   1097 
   1098 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1099 
   1100 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1101 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1102 	KASSERT(startoff < endoff || endoff == 0);
   1103 
   1104 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1105 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1106 	if (uobj->uo_npages == 0) {
   1107 		s = splbio();
   1108 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1109 		    (vp->v_flag & VONWORKLST)) {
   1110 			vp->v_flag &= ~VONWORKLST;
   1111 			LIST_REMOVE(vp, v_synclist);
   1112 		}
   1113 		splx(s);
   1114 		simple_unlock(slock);
   1115 		return (0);
   1116 	}
   1117 
   1118 	/*
   1119 	 * the vnode has pages, set up to process the request.
   1120 	 */
   1121 
   1122 	error = 0;
   1123 	s = splbio();
   1124 	simple_lock(&global_v_numoutput_slock);
   1125 	wasclean = (vp->v_numoutput == 0);
   1126 	simple_unlock(&global_v_numoutput_slock);
   1127 	splx(s);
   1128 	off = startoff;
   1129 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1130 		endoff = trunc_page(LLONG_MAX);
   1131 	}
   1132 	by_list = (uobj->uo_npages <=
   1133 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1134 
   1135 #if !defined(DEBUG)
   1136 	/*
   1137 	 * if this vnode is known not to have dirty pages,
   1138 	 * don't bother to clean it out.
   1139 	 */
   1140 
   1141 	if ((vp->v_flag & VONWORKLST) == 0) {
   1142 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1143 			goto skip_scan;
   1144 		}
   1145 		flags &= ~PGO_CLEANIT;
   1146 	}
   1147 #endif /* !defined(DEBUG) */
   1148 
   1149 	/*
   1150 	 * start the loop.  when scanning by list, hold the last page
   1151 	 * in the list before we start.  pages allocated after we start
   1152 	 * will be added to the end of the list, so we can stop at the
   1153 	 * current last page.
   1154 	 */
   1155 
   1156 	dirtygen = gp->g_dirtygen;
   1157 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1158 	curmp.uobject = uobj;
   1159 	curmp.offset = (voff_t)-1;
   1160 	curmp.flags = PG_BUSY;
   1161 	endmp.uobject = uobj;
   1162 	endmp.offset = (voff_t)-1;
   1163 	endmp.flags = PG_BUSY;
   1164 	if (by_list) {
   1165 		pg = TAILQ_FIRST(&uobj->memq);
   1166 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1167 		PHOLD(l);
   1168 	} else {
   1169 		pg = uvm_pagelookup(uobj, off);
   1170 	}
   1171 	nextpg = NULL;
   1172 	while (by_list || off < endoff) {
   1173 
   1174 		/*
   1175 		 * if the current page is not interesting, move on to the next.
   1176 		 */
   1177 
   1178 		KASSERT(pg == NULL || pg->uobject == uobj);
   1179 		KASSERT(pg == NULL ||
   1180 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1181 		    (pg->flags & PG_BUSY) != 0);
   1182 		if (by_list) {
   1183 			if (pg == &endmp) {
   1184 				break;
   1185 			}
   1186 			if (pg->offset < startoff || pg->offset >= endoff ||
   1187 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1188 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1189 					wasclean = FALSE;
   1190 				}
   1191 				pg = TAILQ_NEXT(pg, listq);
   1192 				continue;
   1193 			}
   1194 			off = pg->offset;
   1195 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1196 			if (pg != NULL) {
   1197 				wasclean = FALSE;
   1198 			}
   1199 			off += PAGE_SIZE;
   1200 			if (off < endoff) {
   1201 				pg = uvm_pagelookup(uobj, off);
   1202 			}
   1203 			continue;
   1204 		}
   1205 
   1206 		/*
   1207 		 * if the current page needs to be cleaned and it's busy,
   1208 		 * wait for it to become unbusy.
   1209 		 */
   1210 
   1211 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1212 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1213 		if (pg->flags & PG_BUSY || yld) {
   1214 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1215 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1216 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1217 				error = EDEADLK;
   1218 				break;
   1219 			}
   1220 			KASSERT(!pagedaemon);
   1221 			if (by_list) {
   1222 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1223 				UVMHIST_LOG(ubchist, "curmp next %p",
   1224 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1225 			}
   1226 			if (yld) {
   1227 				simple_unlock(slock);
   1228 				preempt(1);
   1229 				simple_lock(slock);
   1230 			} else {
   1231 				pg->flags |= PG_WANTED;
   1232 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1233 				simple_lock(slock);
   1234 			}
   1235 			if (by_list) {
   1236 				UVMHIST_LOG(ubchist, "after next %p",
   1237 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1238 				pg = TAILQ_NEXT(&curmp, listq);
   1239 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1240 			} else {
   1241 				pg = uvm_pagelookup(uobj, off);
   1242 			}
   1243 			continue;
   1244 		}
   1245 
   1246 		/*
   1247 		 * if we're freeing, remove all mappings of the page now.
   1248 		 * if we're cleaning, check if the page is needs to be cleaned.
   1249 		 */
   1250 
   1251 		if (flags & PGO_FREE) {
   1252 			pmap_page_protect(pg, VM_PROT_NONE);
   1253 		} else if (flags & PGO_CLEANIT) {
   1254 
   1255 			/*
   1256 			 * if we still have some hope to pull this vnode off
   1257 			 * from the syncer queue, write-protect the page.
   1258 			 */
   1259 
   1260 			if (wasclean && gp->g_dirtygen == dirtygen &&
   1261 			    startoff == 0 && endoff == trunc_page(LLONG_MAX)) {
   1262 				pmap_page_protect(pg,
   1263 				    VM_PROT_READ|VM_PROT_EXECUTE);
   1264 			}
   1265 		}
   1266 
   1267 		if (flags & PGO_CLEANIT) {
   1268 			needs_clean = pmap_clear_modify(pg) ||
   1269 			    (pg->flags & PG_CLEAN) == 0;
   1270 			pg->flags |= PG_CLEAN;
   1271 		} else {
   1272 			needs_clean = FALSE;
   1273 		}
   1274 
   1275 		/*
   1276 		 * if we're cleaning, build a cluster.
   1277 		 * the cluster will consist of pages which are currently dirty,
   1278 		 * but they will be returned to us marked clean.
   1279 		 * if not cleaning, just operate on the one page.
   1280 		 */
   1281 
   1282 		if (needs_clean) {
   1283 			KDASSERT((vp->v_flag & VONWORKLST));
   1284 			wasclean = FALSE;
   1285 			memset(pgs, 0, sizeof(pgs));
   1286 			pg->flags |= PG_BUSY;
   1287 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1288 
   1289 			/*
   1290 			 * first look backward.
   1291 			 */
   1292 
   1293 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1294 			nback = npages;
   1295 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1296 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1297 			if (nback) {
   1298 				memmove(&pgs[0], &pgs[npages - nback],
   1299 				    nback * sizeof(pgs[0]));
   1300 				if (npages - nback < nback)
   1301 					memset(&pgs[nback], 0,
   1302 					    (npages - nback) * sizeof(pgs[0]));
   1303 				else
   1304 					memset(&pgs[npages - nback], 0,
   1305 					    nback * sizeof(pgs[0]));
   1306 			}
   1307 
   1308 			/*
   1309 			 * then plug in our page of interest.
   1310 			 */
   1311 
   1312 			pgs[nback] = pg;
   1313 
   1314 			/*
   1315 			 * then look forward to fill in the remaining space in
   1316 			 * the array of pages.
   1317 			 */
   1318 
   1319 			npages = maxpages - nback - 1;
   1320 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1321 			    &pgs[nback + 1],
   1322 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1323 			npages += nback + 1;
   1324 		} else {
   1325 			pgs[0] = pg;
   1326 			npages = 1;
   1327 			nback = 0;
   1328 		}
   1329 
   1330 		/*
   1331 		 * apply FREE or DEACTIVATE options if requested.
   1332 		 */
   1333 
   1334 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1335 			uvm_lock_pageq();
   1336 		}
   1337 		for (i = 0; i < npages; i++) {
   1338 			tpg = pgs[i];
   1339 			KASSERT(tpg->uobject == uobj);
   1340 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1341 				pg = tpg;
   1342 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1343 				continue;
   1344 			if (flags & PGO_DEACTIVATE &&
   1345 			    (tpg->pqflags & PQ_INACTIVE) == 0 &&
   1346 			    tpg->wire_count == 0) {
   1347 				(void) pmap_clear_reference(tpg);
   1348 				uvm_pagedeactivate(tpg);
   1349 			} else if (flags & PGO_FREE) {
   1350 				pmap_page_protect(tpg, VM_PROT_NONE);
   1351 				if (tpg->flags & PG_BUSY) {
   1352 					tpg->flags |= freeflag;
   1353 					if (pagedaemon) {
   1354 						uvmexp.paging++;
   1355 						uvm_pagedequeue(tpg);
   1356 					}
   1357 				} else {
   1358 
   1359 					/*
   1360 					 * ``page is not busy''
   1361 					 * implies that npages is 1
   1362 					 * and needs_clean is false.
   1363 					 */
   1364 
   1365 					nextpg = TAILQ_NEXT(tpg, listq);
   1366 					uvm_pagefree(tpg);
   1367 					if (pagedaemon)
   1368 						uvmexp.pdfreed++;
   1369 				}
   1370 			}
   1371 		}
   1372 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1373 			uvm_unlock_pageq();
   1374 		}
   1375 		if (needs_clean) {
   1376 
   1377 			/*
   1378 			 * start the i/o.  if we're traversing by list,
   1379 			 * keep our place in the list with a marker page.
   1380 			 */
   1381 
   1382 			if (by_list) {
   1383 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1384 				    listq);
   1385 			}
   1386 			simple_unlock(slock);
   1387 			error = GOP_WRITE(vp, pgs, npages, flags);
   1388 			simple_lock(slock);
   1389 			if (by_list) {
   1390 				pg = TAILQ_NEXT(&curmp, listq);
   1391 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1392 			}
   1393 			if (error) {
   1394 				break;
   1395 			}
   1396 			if (by_list) {
   1397 				continue;
   1398 			}
   1399 		}
   1400 
   1401 		/*
   1402 		 * find the next page and continue if there was no error.
   1403 		 */
   1404 
   1405 		if (by_list) {
   1406 			if (nextpg) {
   1407 				pg = nextpg;
   1408 				nextpg = NULL;
   1409 			} else {
   1410 				pg = TAILQ_NEXT(pg, listq);
   1411 			}
   1412 		} else {
   1413 			off += (npages - nback) << PAGE_SHIFT;
   1414 			if (off < endoff) {
   1415 				pg = uvm_pagelookup(uobj, off);
   1416 			}
   1417 		}
   1418 	}
   1419 	if (by_list) {
   1420 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1421 		PRELE(l);
   1422 	}
   1423 
   1424 	/*
   1425 	 * if we're cleaning and there was nothing to clean,
   1426 	 * take us off the syncer list.  if we started any i/o
   1427 	 * and we're doing sync i/o, wait for all writes to finish.
   1428 	 */
   1429 
   1430 	s = splbio();
   1431 	if ((flags & PGO_CLEANIT) && wasclean && gp->g_dirtygen == dirtygen &&
   1432 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1433 	    LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1434 	    (vp->v_flag & VONWORKLST)) {
   1435 		vp->v_flag &= ~VONWORKLST;
   1436 		LIST_REMOVE(vp, v_synclist);
   1437 	}
   1438 	splx(s);
   1439 
   1440 #if !defined(DEBUG)
   1441 skip_scan:
   1442 #endif /* !defined(DEBUG) */
   1443 	if (!wasclean && !async) {
   1444 		s = splbio();
   1445 		/*
   1446 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1447 		 *	 but the slot in ltsleep() is taken!
   1448 		 * XXX - try to recover from missed wakeups with a timeout..
   1449 		 *	 must think of something better.
   1450 		 */
   1451 		while (vp->v_numoutput != 0) {
   1452 			vp->v_flag |= VBWAIT;
   1453 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1454 			    "genput2", hz);
   1455 			simple_lock(slock);
   1456 		}
   1457 		splx(s);
   1458 	}
   1459 	simple_unlock(&uobj->vmobjlock);
   1460 	return (error);
   1461 }
   1462 
   1463 int
   1464 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1465 {
   1466 	int s, error, run;
   1467 	int fs_bshift, dev_bshift;
   1468 	vaddr_t kva;
   1469 	off_t eof, offset, startoffset;
   1470 	size_t bytes, iobytes, skipbytes;
   1471 	daddr_t lbn, blkno;
   1472 	struct vm_page *pg;
   1473 	struct buf *mbp, *bp;
   1474 	struct vnode *devvp;
   1475 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1476 	UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
   1477 
   1478 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1479 	    vp, pgs, npages, flags);
   1480 
   1481 	GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_WRITE);
   1482 	if (vp->v_type == VREG) {
   1483 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1484 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1485 	} else {
   1486 		fs_bshift = DEV_BSHIFT;
   1487 		dev_bshift = DEV_BSHIFT;
   1488 	}
   1489 	error = 0;
   1490 	pg = pgs[0];
   1491 	startoffset = pg->offset;
   1492 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
   1493 	skipbytes = 0;
   1494 	KASSERT(bytes != 0);
   1495 
   1496 	kva = uvm_pagermapin(pgs, npages,
   1497 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1498 
   1499 	s = splbio();
   1500 	simple_lock(&global_v_numoutput_slock);
   1501 	vp->v_numoutput += 2;
   1502 	simple_unlock(&global_v_numoutput_slock);
   1503 	mbp = pool_get(&bufpool, PR_WAITOK);
   1504 	BUF_INIT(mbp);
   1505 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1506 	    vp, mbp, vp->v_numoutput, bytes);
   1507 	splx(s);
   1508 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1509 	mbp->b_data = (void *)kva;
   1510 	mbp->b_resid = mbp->b_bcount = bytes;
   1511 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
   1512 	mbp->b_iodone = uvm_aio_biodone;
   1513 	mbp->b_vp = vp;
   1514 
   1515 	bp = NULL;
   1516 	for (offset = startoffset;
   1517 	    bytes > 0;
   1518 	    offset += iobytes, bytes -= iobytes) {
   1519 		lbn = offset >> fs_bshift;
   1520 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1521 		if (error) {
   1522 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1523 			skipbytes += bytes;
   1524 			bytes = 0;
   1525 			break;
   1526 		}
   1527 
   1528 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1529 		    bytes);
   1530 		if (blkno == (daddr_t)-1) {
   1531 			skipbytes += iobytes;
   1532 			continue;
   1533 		}
   1534 
   1535 		/* if it's really one i/o, don't make a second buf */
   1536 		if (offset == startoffset && iobytes == bytes) {
   1537 			bp = mbp;
   1538 		} else {
   1539 			s = splbio();
   1540 			V_INCR_NUMOUTPUT(vp);
   1541 			bp = pool_get(&bufpool, PR_WAITOK);
   1542 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1543 			    vp, bp, vp->v_numoutput, 0);
   1544 			splx(s);
   1545 			BUF_INIT(bp);
   1546 			bp->b_data = (char *)kva +
   1547 			    (vaddr_t)(offset - pg->offset);
   1548 			bp->b_resid = bp->b_bcount = iobytes;
   1549 			bp->b_flags = B_BUSY|B_WRITE|B_CALL|B_ASYNC;
   1550 			bp->b_iodone = uvm_aio_biodone1;
   1551 			bp->b_vp = vp;
   1552 		}
   1553 		bp->b_lblkno = 0;
   1554 		bp->b_private = mbp;
   1555 		if (devvp->v_type == VBLK) {
   1556 			bp->b_dev = devvp->v_rdev;
   1557 		}
   1558 
   1559 		/* adjust physical blkno for partial blocks */
   1560 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1561 		    dev_bshift);
   1562 		UVMHIST_LOG(ubchist,
   1563 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1564 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1565 		if (curproc == uvm.pagedaemon_proc)
   1566 			BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
   1567 		else if (async)
   1568 			BIO_SETPRIO(bp, BPRIO_TIMENONCRITICAL);
   1569 		else
   1570 			BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
   1571 		VOP_STRATEGY(bp->b_vp, bp);
   1572 	}
   1573 	if (skipbytes) {
   1574 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1575 		s = splbio();
   1576 		if (error) {
   1577 			mbp->b_flags |= B_ERROR;
   1578 			mbp->b_error = error;
   1579 		}
   1580 		mbp->b_resid -= skipbytes;
   1581 		if (mbp->b_resid == 0) {
   1582 			biodone(mbp);
   1583 		}
   1584 		splx(s);
   1585 	}
   1586 	if (async) {
   1587 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1588 		return (0);
   1589 	}
   1590 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1591 	error = biowait(mbp);
   1592 	uvm_aio_aiodone(mbp);
   1593 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1594 	return (error);
   1595 }
   1596 
   1597 /*
   1598  * VOP_PUTPAGES() for vnodes which never have pages.
   1599  */
   1600 
   1601 int
   1602 genfs_null_putpages(void *v)
   1603 {
   1604 	struct vop_putpages_args /* {
   1605 		struct vnode *a_vp;
   1606 		voff_t a_offlo;
   1607 		voff_t a_offhi;
   1608 		int a_flags;
   1609 	} */ *ap = v;
   1610 	struct vnode *vp = ap->a_vp;
   1611 
   1612 	KASSERT(vp->v_uobj.uo_npages == 0);
   1613 	simple_unlock(&vp->v_interlock);
   1614 	return (0);
   1615 }
   1616 
   1617 void
   1618 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1619 {
   1620 	struct genfs_node *gp = VTOG(vp);
   1621 
   1622 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1623 	gp->g_op = ops;
   1624 }
   1625 
   1626 void
   1627 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1628 {
   1629 	int bsize;
   1630 
   1631 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1632 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1633 }
   1634 
   1635 int
   1636 genfs_compat_getpages(void *v)
   1637 {
   1638 	struct vop_getpages_args /* {
   1639 		struct vnode *a_vp;
   1640 		voff_t a_offset;
   1641 		struct vm_page **a_m;
   1642 		int *a_count;
   1643 		int a_centeridx;
   1644 		vm_prot_t a_access_type;
   1645 		int a_advice;
   1646 		int a_flags;
   1647 	} */ *ap = v;
   1648 
   1649 	off_t origoffset;
   1650 	struct vnode *vp = ap->a_vp;
   1651 	struct uvm_object *uobj = &vp->v_uobj;
   1652 	struct vm_page *pg, **pgs;
   1653 	vaddr_t kva;
   1654 	int i, error, orignpages, npages;
   1655 	struct iovec iov;
   1656 	struct uio uio;
   1657 	struct ucred *cred = curproc->p_ucred;
   1658 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1659 
   1660 	error = 0;
   1661 	origoffset = ap->a_offset;
   1662 	orignpages = *ap->a_count;
   1663 	pgs = ap->a_m;
   1664 
   1665 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1666 		vn_syncer_add_to_worklist(vp, filedelay);
   1667 	}
   1668 	if (ap->a_flags & PGO_LOCKED) {
   1669 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1670 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1671 
   1672 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1673 	}
   1674 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1675 		simple_unlock(&uobj->vmobjlock);
   1676 		return (EINVAL);
   1677 	}
   1678 	npages = orignpages;
   1679 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1680 	simple_unlock(&uobj->vmobjlock);
   1681 	kva = uvm_pagermapin(pgs, npages,
   1682 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1683 	for (i = 0; i < npages; i++) {
   1684 		pg = pgs[i];
   1685 		if ((pg->flags & PG_FAKE) == 0) {
   1686 			continue;
   1687 		}
   1688 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1689 		iov.iov_len = PAGE_SIZE;
   1690 		uio.uio_iov = &iov;
   1691 		uio.uio_iovcnt = 1;
   1692 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1693 		uio.uio_segflg = UIO_SYSSPACE;
   1694 		uio.uio_rw = UIO_READ;
   1695 		uio.uio_resid = PAGE_SIZE;
   1696 		uio.uio_procp = NULL;
   1697 		/* XXX vn_lock */
   1698 		error = VOP_READ(vp, &uio, 0, cred);
   1699 		if (error) {
   1700 			break;
   1701 		}
   1702 		if (uio.uio_resid) {
   1703 			memset(iov.iov_base, 0, uio.uio_resid);
   1704 		}
   1705 	}
   1706 	uvm_pagermapout(kva, npages);
   1707 	simple_lock(&uobj->vmobjlock);
   1708 	uvm_lock_pageq();
   1709 	for (i = 0; i < npages; i++) {
   1710 		pg = pgs[i];
   1711 		if (error && (pg->flags & PG_FAKE) != 0) {
   1712 			pg->flags |= PG_RELEASED;
   1713 		} else {
   1714 			pmap_clear_modify(pg);
   1715 			uvm_pageactivate(pg);
   1716 		}
   1717 	}
   1718 	if (error) {
   1719 		uvm_page_unbusy(pgs, npages);
   1720 	}
   1721 	uvm_unlock_pageq();
   1722 	simple_unlock(&uobj->vmobjlock);
   1723 	return (error);
   1724 }
   1725 
   1726 int
   1727 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1728     int flags)
   1729 {
   1730 	off_t offset;
   1731 	struct iovec iov;
   1732 	struct uio uio;
   1733 	struct ucred *cred = curproc->p_ucred;
   1734 	struct buf *bp;
   1735 	vaddr_t kva;
   1736 	int s, error;
   1737 
   1738 	offset = pgs[0]->offset;
   1739 	kva = uvm_pagermapin(pgs, npages,
   1740 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1741 
   1742 	iov.iov_base = (void *)kva;
   1743 	iov.iov_len = npages << PAGE_SHIFT;
   1744 	uio.uio_iov = &iov;
   1745 	uio.uio_iovcnt = 1;
   1746 	uio.uio_offset = offset;
   1747 	uio.uio_segflg = UIO_SYSSPACE;
   1748 	uio.uio_rw = UIO_WRITE;
   1749 	uio.uio_resid = npages << PAGE_SHIFT;
   1750 	uio.uio_procp = NULL;
   1751 	/* XXX vn_lock */
   1752 	error = VOP_WRITE(vp, &uio, 0, cred);
   1753 
   1754 	s = splbio();
   1755 	V_INCR_NUMOUTPUT(vp);
   1756 	bp = pool_get(&bufpool, PR_WAITOK);
   1757 	splx(s);
   1758 
   1759 	BUF_INIT(bp);
   1760 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1761 	bp->b_vp = vp;
   1762 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1763 	bp->b_data = (char *)kva;
   1764 	bp->b_bcount = npages << PAGE_SHIFT;
   1765 	bp->b_bufsize = npages << PAGE_SHIFT;
   1766 	bp->b_resid = 0;
   1767 	if (error) {
   1768 		bp->b_flags |= B_ERROR;
   1769 		bp->b_error = error;
   1770 	}
   1771 	uvm_aio_aiodone(bp);
   1772 	return (error);
   1773 }
   1774 
   1775 static void
   1776 filt_genfsdetach(struct knote *kn)
   1777 {
   1778 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1779 
   1780 	/* XXXLUKEM lock the struct? */
   1781 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   1782 }
   1783 
   1784 static int
   1785 filt_genfsread(struct knote *kn, long hint)
   1786 {
   1787 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1788 
   1789 	/*
   1790 	 * filesystem is gone, so set the EOF flag and schedule
   1791 	 * the knote for deletion.
   1792 	 */
   1793 	if (hint == NOTE_REVOKE) {
   1794 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   1795 		return (1);
   1796 	}
   1797 
   1798 	/* XXXLUKEM lock the struct? */
   1799 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   1800         return (kn->kn_data != 0);
   1801 }
   1802 
   1803 static int
   1804 filt_genfsvnode(struct knote *kn, long hint)
   1805 {
   1806 
   1807 	if (kn->kn_sfflags & hint)
   1808 		kn->kn_fflags |= hint;
   1809 	if (hint == NOTE_REVOKE) {
   1810 		kn->kn_flags |= EV_EOF;
   1811 		return (1);
   1812 	}
   1813 	return (kn->kn_fflags != 0);
   1814 }
   1815 
   1816 static const struct filterops genfsread_filtops =
   1817 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   1818 static const struct filterops genfsvnode_filtops =
   1819 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   1820 
   1821 int
   1822 genfs_kqfilter(void *v)
   1823 {
   1824 	struct vop_kqfilter_args /* {
   1825 		struct vnode	*a_vp;
   1826 		struct knote	*a_kn;
   1827 	} */ *ap = v;
   1828 	struct vnode *vp;
   1829 	struct knote *kn;
   1830 
   1831 	vp = ap->a_vp;
   1832 	kn = ap->a_kn;
   1833 	switch (kn->kn_filter) {
   1834 	case EVFILT_READ:
   1835 		kn->kn_fop = &genfsread_filtops;
   1836 		break;
   1837 	case EVFILT_VNODE:
   1838 		kn->kn_fop = &genfsvnode_filtops;
   1839 		break;
   1840 	default:
   1841 		return (1);
   1842 	}
   1843 
   1844 	kn->kn_hook = vp;
   1845 
   1846 	/* XXXLUKEM lock the struct? */
   1847 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   1848 
   1849 	return (0);
   1850 }
   1851