Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.129
      1 /*	$NetBSD: genfs_vnops.c,v 1.129 2006/09/15 15:51:12 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.129 2006/09/15 15:51:12 yamt Exp $");
     35 
     36 #if defined(_KERNEL_OPT)
     37 #include "opt_nfsserver.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/proc.h>
     43 #include <sys/kernel.h>
     44 #include <sys/mount.h>
     45 #include <sys/namei.h>
     46 #include <sys/vnode.h>
     47 #include <sys/fcntl.h>
     48 #include <sys/malloc.h>
     49 #include <sys/poll.h>
     50 #include <sys/mman.h>
     51 #include <sys/file.h>
     52 #include <sys/kauth.h>
     53 
     54 #include <miscfs/genfs/genfs.h>
     55 #include <miscfs/genfs/genfs_node.h>
     56 #include <miscfs/specfs/specdev.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_pager.h>
     60 
     61 #ifdef NFSSERVER
     62 #include <nfs/rpcv2.h>
     63 #include <nfs/nfsproto.h>
     64 #include <nfs/nfs.h>
     65 #include <nfs/nqnfs.h>
     66 #include <nfs/nfs_var.h>
     67 #endif
     68 
     69 static inline void genfs_rel_pages(struct vm_page **, int);
     70 static void filt_genfsdetach(struct knote *);
     71 static int filt_genfsread(struct knote *, long);
     72 static int filt_genfsvnode(struct knote *, long);
     73 
     74 #define MAX_READ_PAGES	16 	/* XXXUBC 16 */
     75 
     76 int
     77 genfs_poll(void *v)
     78 {
     79 	struct vop_poll_args /* {
     80 		struct vnode *a_vp;
     81 		int a_events;
     82 		struct lwp *a_l;
     83 	} */ *ap = v;
     84 
     85 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     86 }
     87 
     88 int
     89 genfs_seek(void *v)
     90 {
     91 	struct vop_seek_args /* {
     92 		struct vnode *a_vp;
     93 		off_t a_oldoff;
     94 		off_t a_newoff;
     95 		kauth_cred_t cred;
     96 	} */ *ap = v;
     97 
     98 	if (ap->a_newoff < 0)
     99 		return (EINVAL);
    100 
    101 	return (0);
    102 }
    103 
    104 int
    105 genfs_abortop(void *v)
    106 {
    107 	struct vop_abortop_args /* {
    108 		struct vnode *a_dvp;
    109 		struct componentname *a_cnp;
    110 	} */ *ap = v;
    111 
    112 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    113 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    114 	return (0);
    115 }
    116 
    117 int
    118 genfs_fcntl(void *v)
    119 {
    120 	struct vop_fcntl_args /* {
    121 		struct vnode *a_vp;
    122 		u_int a_command;
    123 		caddr_t a_data;
    124 		int a_fflag;
    125 		kauth_cred_t a_cred;
    126 		struct lwp *a_l;
    127 	} */ *ap = v;
    128 
    129 	if (ap->a_command == F_SETFL)
    130 		return (0);
    131 	else
    132 		return (EOPNOTSUPP);
    133 }
    134 
    135 /*ARGSUSED*/
    136 int
    137 genfs_badop(void *v)
    138 {
    139 
    140 	panic("genfs: bad op");
    141 }
    142 
    143 /*ARGSUSED*/
    144 int
    145 genfs_nullop(void *v)
    146 {
    147 
    148 	return (0);
    149 }
    150 
    151 /*ARGSUSED*/
    152 int
    153 genfs_einval(void *v)
    154 {
    155 
    156 	return (EINVAL);
    157 }
    158 
    159 /*
    160  * Called when an fs doesn't support a particular vop.
    161  * This takes care to vrele, vput, or vunlock passed in vnodes.
    162  */
    163 int
    164 genfs_eopnotsupp(void *v)
    165 {
    166 	struct vop_generic_args /*
    167 		struct vnodeop_desc *a_desc;
    168 		/ * other random data follows, presumably * /
    169 	} */ *ap = v;
    170 	struct vnodeop_desc *desc = ap->a_desc;
    171 	struct vnode *vp, *vp_last = NULL;
    172 	int flags, i, j, offset;
    173 
    174 	flags = desc->vdesc_flags;
    175 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    176 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    177 			break;	/* stop at end of list */
    178 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    179 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    180 
    181 			/* Skip if NULL */
    182 			if (!vp)
    183 				continue;
    184 
    185 			switch (j) {
    186 			case VDESC_VP0_WILLPUT:
    187 				/* Check for dvp == vp cases */
    188 				if (vp == vp_last)
    189 					vrele(vp);
    190 				else {
    191 					vput(vp);
    192 					vp_last = vp;
    193 				}
    194 				break;
    195 			case VDESC_VP0_WILLUNLOCK:
    196 				VOP_UNLOCK(vp, 0);
    197 				break;
    198 			case VDESC_VP0_WILLRELE:
    199 				vrele(vp);
    200 				break;
    201 			}
    202 		}
    203 	}
    204 
    205 	return (EOPNOTSUPP);
    206 }
    207 
    208 /*ARGSUSED*/
    209 int
    210 genfs_ebadf(void *v)
    211 {
    212 
    213 	return (EBADF);
    214 }
    215 
    216 /* ARGSUSED */
    217 int
    218 genfs_enoioctl(void *v)
    219 {
    220 
    221 	return (EPASSTHROUGH);
    222 }
    223 
    224 
    225 /*
    226  * Eliminate all activity associated with the requested vnode
    227  * and with all vnodes aliased to the requested vnode.
    228  */
    229 int
    230 genfs_revoke(void *v)
    231 {
    232 	struct vop_revoke_args /* {
    233 		struct vnode *a_vp;
    234 		int a_flags;
    235 	} */ *ap = v;
    236 	struct vnode *vp, *vq;
    237 	struct lwp *l = curlwp;		/* XXX */
    238 
    239 #ifdef DIAGNOSTIC
    240 	if ((ap->a_flags & REVOKEALL) == 0)
    241 		panic("genfs_revoke: not revokeall");
    242 #endif
    243 
    244 	vp = ap->a_vp;
    245 	simple_lock(&vp->v_interlock);
    246 
    247 	if (vp->v_flag & VALIASED) {
    248 		/*
    249 		 * If a vgone (or vclean) is already in progress,
    250 		 * wait until it is done and return.
    251 		 */
    252 		if (vp->v_flag & VXLOCK) {
    253 			vp->v_flag |= VXWANT;
    254 			ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
    255 				&vp->v_interlock);
    256 			return (0);
    257 		}
    258 		/*
    259 		 * Ensure that vp will not be vgone'd while we
    260 		 * are eliminating its aliases.
    261 		 */
    262 		vp->v_flag |= VXLOCK;
    263 		simple_unlock(&vp->v_interlock);
    264 		while (vp->v_flag & VALIASED) {
    265 			simple_lock(&spechash_slock);
    266 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    267 				if (vq->v_rdev != vp->v_rdev ||
    268 				    vq->v_type != vp->v_type || vp == vq)
    269 					continue;
    270 				simple_unlock(&spechash_slock);
    271 				vgone(vq);
    272 				break;
    273 			}
    274 			if (vq == NULLVP)
    275 				simple_unlock(&spechash_slock);
    276 		}
    277 		/*
    278 		 * Remove the lock so that vgone below will
    279 		 * really eliminate the vnode after which time
    280 		 * vgone will awaken any sleepers.
    281 		 */
    282 		simple_lock(&vp->v_interlock);
    283 		vp->v_flag &= ~VXLOCK;
    284 	}
    285 	vgonel(vp, l);
    286 	return (0);
    287 }
    288 
    289 /*
    290  * Lock the node.
    291  */
    292 int
    293 genfs_lock(void *v)
    294 {
    295 	struct vop_lock_args /* {
    296 		struct vnode *a_vp;
    297 		int a_flags;
    298 	} */ *ap = v;
    299 	struct vnode *vp = ap->a_vp;
    300 
    301 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    302 }
    303 
    304 /*
    305  * Unlock the node.
    306  */
    307 int
    308 genfs_unlock(void *v)
    309 {
    310 	struct vop_unlock_args /* {
    311 		struct vnode *a_vp;
    312 		int a_flags;
    313 	} */ *ap = v;
    314 	struct vnode *vp = ap->a_vp;
    315 
    316 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    317 	    &vp->v_interlock));
    318 }
    319 
    320 /*
    321  * Return whether or not the node is locked.
    322  */
    323 int
    324 genfs_islocked(void *v)
    325 {
    326 	struct vop_islocked_args /* {
    327 		struct vnode *a_vp;
    328 	} */ *ap = v;
    329 	struct vnode *vp = ap->a_vp;
    330 
    331 	return (lockstatus(vp->v_vnlock));
    332 }
    333 
    334 /*
    335  * Stubs to use when there is no locking to be done on the underlying object.
    336  */
    337 int
    338 genfs_nolock(void *v)
    339 {
    340 	struct vop_lock_args /* {
    341 		struct vnode *a_vp;
    342 		int a_flags;
    343 		struct lwp *a_l;
    344 	} */ *ap = v;
    345 
    346 	/*
    347 	 * Since we are not using the lock manager, we must clear
    348 	 * the interlock here.
    349 	 */
    350 	if (ap->a_flags & LK_INTERLOCK)
    351 		simple_unlock(&ap->a_vp->v_interlock);
    352 	return (0);
    353 }
    354 
    355 int
    356 genfs_nounlock(void *v)
    357 {
    358 
    359 	return (0);
    360 }
    361 
    362 int
    363 genfs_noislocked(void *v)
    364 {
    365 
    366 	return (0);
    367 }
    368 
    369 /*
    370  * Local lease check for NFS servers.  Just set up args and let
    371  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    372  * this is a null operation.
    373  */
    374 int
    375 genfs_lease_check(void *v)
    376 {
    377 #ifdef NFSSERVER
    378 	struct vop_lease_args /* {
    379 		struct vnode *a_vp;
    380 		struct lwp *a_l;
    381 		kauth_cred_t a_cred;
    382 		int a_flag;
    383 	} */ *ap = v;
    384 	u_int32_t duration = 0;
    385 	int cache;
    386 	u_quad_t frev;
    387 
    388 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    389 	    NQLOCALSLP, ap->a_l, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    390 	return (0);
    391 #else
    392 	return (0);
    393 #endif /* NFSSERVER */
    394 }
    395 
    396 int
    397 genfs_mmap(void *v)
    398 {
    399 
    400 	return (0);
    401 }
    402 
    403 static inline void
    404 genfs_rel_pages(struct vm_page **pgs, int npages)
    405 {
    406 	int i;
    407 
    408 	for (i = 0; i < npages; i++) {
    409 		struct vm_page *pg = pgs[i];
    410 
    411 		if (pg == NULL || pg == PGO_DONTCARE)
    412 			continue;
    413 		if (pg->flags & PG_FAKE) {
    414 			pg->flags |= PG_RELEASED;
    415 		}
    416 	}
    417 	uvm_lock_pageq();
    418 	uvm_page_unbusy(pgs, npages);
    419 	uvm_unlock_pageq();
    420 }
    421 
    422 /*
    423  * generic VM getpages routine.
    424  * Return PG_BUSY pages for the given range,
    425  * reading from backing store if necessary.
    426  */
    427 
    428 int
    429 genfs_getpages(void *v)
    430 {
    431 	struct vop_getpages_args /* {
    432 		struct vnode *a_vp;
    433 		voff_t a_offset;
    434 		struct vm_page **a_m;
    435 		int *a_count;
    436 		int a_centeridx;
    437 		vm_prot_t a_access_type;
    438 		int a_advice;
    439 		int a_flags;
    440 	} */ *ap = v;
    441 
    442 	off_t newsize, diskeof, memeof;
    443 	off_t offset, origoffset, startoffset, endoffset;
    444 	daddr_t lbn, blkno;
    445 	int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    446 	int fs_bshift, fs_bsize, dev_bshift;
    447 	int flags = ap->a_flags;
    448 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    449 	vaddr_t kva;
    450 	struct buf *bp, *mbp;
    451 	struct vnode *vp = ap->a_vp;
    452 	struct vnode *devvp;
    453 	struct genfs_node *gp = VTOG(vp);
    454 	struct uvm_object *uobj = &vp->v_uobj;
    455 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
    456 	int pgs_size;
    457 	kauth_cred_t cred = curlwp->l_cred;		/* XXXUBC curlwp */
    458 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    459 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    460 	boolean_t sawhole = FALSE;
    461 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    462 	boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    463 	voff_t origvsize;
    464 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    465 
    466 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    467 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    468 
    469 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    470 	    vp->v_type == VLNK || vp->v_type == VBLK);
    471 
    472 	/* XXXUBC temp limit */
    473 	if (*ap->a_count > MAX_READ_PAGES) {
    474 		panic("genfs_getpages: too many pages");
    475 	}
    476 
    477 startover:
    478 	error = 0;
    479 	origvsize = vp->v_size;
    480 	origoffset = ap->a_offset;
    481 	orignpages = *ap->a_count;
    482 	GOP_SIZE(vp, vp->v_size, &diskeof, 0);
    483 	if (flags & PGO_PASTEOF) {
    484 		newsize = MAX(vp->v_size,
    485 		    origoffset + (orignpages << PAGE_SHIFT));
    486 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    487 	} else {
    488 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
    489 	}
    490 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    491 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    492 	KASSERT(orignpages > 0);
    493 
    494 	/*
    495 	 * Bounds-check the request.
    496 	 */
    497 
    498 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    499 		if ((flags & PGO_LOCKED) == 0) {
    500 			simple_unlock(&uobj->vmobjlock);
    501 		}
    502 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    503 		    origoffset, *ap->a_count, memeof,0);
    504 		return (EINVAL);
    505 	}
    506 
    507 	/* uobj is locked */
    508 
    509 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    510 	    (vp->v_type != VBLK ||
    511 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    512 		int updflags = 0;
    513 
    514 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    515 			updflags = GOP_UPDATE_ACCESSED;
    516 		}
    517 		if (write) {
    518 			updflags |= GOP_UPDATE_MODIFIED;
    519 		}
    520 		if (updflags != 0) {
    521 			GOP_MARKUPDATE(vp, updflags);
    522 		}
    523 	}
    524 
    525 	if (write) {
    526 		gp->g_dirtygen++;
    527 		if ((vp->v_flag & VONWORKLST) == 0) {
    528 			vn_syncer_add_to_worklist(vp, filedelay);
    529 		}
    530 		if ((vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP) {
    531 			vp->v_flag |= VWRITEMAPDIRTY;
    532 		}
    533 	}
    534 
    535 	/*
    536 	 * For PGO_LOCKED requests, just return whatever's in memory.
    537 	 */
    538 
    539 	if (flags & PGO_LOCKED) {
    540 		int nfound;
    541 
    542 		npages = *ap->a_count;
    543 #if defined(DEBUG)
    544 		for (i = 0; i < npages; i++) {
    545 			pg = ap->a_m[i];
    546 			KASSERT(pg == NULL || pg == PGO_DONTCARE);
    547 		}
    548 #endif /* defined(DEBUG) */
    549 		nfound = uvn_findpages(uobj, origoffset, &npages,
    550 		    ap->a_m, UFP_NOWAIT|UFP_NOALLOC|(write ? UFP_NORDONLY : 0));
    551 		KASSERT(npages == *ap->a_count);
    552 		if (nfound == 0) {
    553 			return EBUSY;
    554 		}
    555 		if (lockmgr(&gp->g_glock, LK_SHARED | LK_NOWAIT, NULL)) {
    556 			genfs_rel_pages(ap->a_m, npages);
    557 
    558 			/*
    559 			 * restore the array.
    560 			 */
    561 
    562 			for (i = 0; i < npages; i++) {
    563 				pg = ap->a_m[i];
    564 
    565 				if (pg != NULL || pg != PGO_DONTCARE) {
    566 					ap->a_m[i] = NULL;
    567 				}
    568 			}
    569 		} else {
    570 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    571 		}
    572 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    573 	}
    574 	simple_unlock(&uobj->vmobjlock);
    575 
    576 	/*
    577 	 * find the requested pages and make some simple checks.
    578 	 * leave space in the page array for a whole block.
    579 	 */
    580 
    581 	if (vp->v_type != VBLK) {
    582 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    583 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    584 	} else {
    585 		fs_bshift = DEV_BSHIFT;
    586 		dev_bshift = DEV_BSHIFT;
    587 	}
    588 	fs_bsize = 1 << fs_bshift;
    589 
    590 	orignpages = MIN(orignpages,
    591 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    592 	npages = orignpages;
    593 	startoffset = origoffset & ~(fs_bsize - 1);
    594 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    595 	    fs_bsize - 1) & ~(fs_bsize - 1));
    596 	endoffset = MIN(endoffset, round_page(memeof));
    597 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    598 
    599 	pgs_size = sizeof(struct vm_page *) *
    600 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    601 	if (pgs_size > sizeof(pgs_onstack)) {
    602 		pgs = malloc(pgs_size, M_DEVBUF,
    603 		    (async ? M_NOWAIT : M_WAITOK) | M_ZERO);
    604 		if (pgs == NULL) {
    605 			return (ENOMEM);
    606 		}
    607 	} else {
    608 		pgs = pgs_onstack;
    609 		memset(pgs, 0, pgs_size);
    610 	}
    611 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    612 	    ridx, npages, startoffset, endoffset);
    613 
    614 	/*
    615 	 * hold g_glock to prevent a race with truncate.
    616 	 *
    617 	 * check if our idea of v_size is still valid.
    618 	 */
    619 
    620 	if (blockalloc) {
    621 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    622 	} else {
    623 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    624 	}
    625 	simple_lock(&uobj->vmobjlock);
    626 	if (vp->v_size < origvsize) {
    627 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    628 		if (pgs != pgs_onstack)
    629 			free(pgs, M_DEVBUF);
    630 		goto startover;
    631 	}
    632 
    633 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    634 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    635 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    636 		KASSERT(async != 0);
    637 		genfs_rel_pages(&pgs[ridx], orignpages);
    638 		simple_unlock(&uobj->vmobjlock);
    639 		if (pgs != pgs_onstack)
    640 			free(pgs, M_DEVBUF);
    641 		return (EBUSY);
    642 	}
    643 
    644 	/*
    645 	 * if the pages are already resident, just return them.
    646 	 */
    647 
    648 	for (i = 0; i < npages; i++) {
    649 		struct vm_page *pg1 = pgs[ridx + i];
    650 
    651 		if ((pg1->flags & PG_FAKE) ||
    652 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    653 			break;
    654 		}
    655 	}
    656 	if (i == npages) {
    657 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    658 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    659 		npages += ridx;
    660 		goto out;
    661 	}
    662 
    663 	/*
    664 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    665 	 */
    666 
    667 	if (overwrite) {
    668 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    669 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    670 
    671 		for (i = 0; i < npages; i++) {
    672 			struct vm_page *pg1 = pgs[ridx + i];
    673 
    674 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    675 		}
    676 		npages += ridx;
    677 		goto out;
    678 	}
    679 
    680 	/*
    681 	 * the page wasn't resident and we're not overwriting,
    682 	 * so we're going to have to do some i/o.
    683 	 * find any additional pages needed to cover the expanded range.
    684 	 */
    685 
    686 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    687 	if (startoffset != origoffset || npages != orignpages) {
    688 
    689 		/*
    690 		 * we need to avoid deadlocks caused by locking
    691 		 * additional pages at lower offsets than pages we
    692 		 * already have locked.  unlock them all and start over.
    693 		 */
    694 
    695 		genfs_rel_pages(&pgs[ridx], orignpages);
    696 		memset(pgs, 0, pgs_size);
    697 
    698 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    699 		    startoffset, endoffset, 0,0);
    700 		npgs = npages;
    701 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    702 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    703 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    704 			KASSERT(async != 0);
    705 			genfs_rel_pages(pgs, npages);
    706 			simple_unlock(&uobj->vmobjlock);
    707 			if (pgs != pgs_onstack)
    708 				free(pgs, M_DEVBUF);
    709 			return (EBUSY);
    710 		}
    711 	}
    712 	simple_unlock(&uobj->vmobjlock);
    713 
    714 	/*
    715 	 * read the desired page(s).
    716 	 */
    717 
    718 	totalbytes = npages << PAGE_SHIFT;
    719 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    720 	tailbytes = totalbytes - bytes;
    721 	skipbytes = 0;
    722 
    723 	kva = uvm_pagermapin(pgs, npages,
    724 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    725 
    726 	mbp = getiobuf();
    727 	mbp->b_bufsize = totalbytes;
    728 	mbp->b_data = (void *)kva;
    729 	mbp->b_resid = mbp->b_bcount = bytes;
    730 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    731 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    732 	mbp->b_vp = vp;
    733 	if (async)
    734 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    735 	else
    736 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    737 
    738 	/*
    739 	 * if EOF is in the middle of the range, zero the part past EOF.
    740 	 * if the page including EOF is not PG_FAKE, skip over it since
    741 	 * in that case it has valid data that we need to preserve.
    742 	 */
    743 
    744 	if (tailbytes > 0) {
    745 		size_t tailstart = bytes;
    746 
    747 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    748 			tailstart = round_page(tailstart);
    749 			tailbytes -= tailstart - bytes;
    750 		}
    751 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    752 		    kva, tailstart, tailbytes,0);
    753 		memset((void *)(kva + tailstart), 0, tailbytes);
    754 	}
    755 
    756 	/*
    757 	 * now loop over the pages, reading as needed.
    758 	 */
    759 
    760 	bp = NULL;
    761 	for (offset = startoffset;
    762 	    bytes > 0;
    763 	    offset += iobytes, bytes -= iobytes) {
    764 
    765 		/*
    766 		 * skip pages which don't need to be read.
    767 		 */
    768 
    769 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    770 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    771 			size_t b;
    772 
    773 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    774 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    775 				sawhole = TRUE;
    776 			}
    777 			b = MIN(PAGE_SIZE, bytes);
    778 			offset += b;
    779 			bytes -= b;
    780 			skipbytes += b;
    781 			pidx++;
    782 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    783 			    offset, 0,0,0);
    784 			if (bytes == 0) {
    785 				goto loopdone;
    786 			}
    787 		}
    788 
    789 		/*
    790 		 * bmap the file to find out the blkno to read from and
    791 		 * how much we can read in one i/o.  if bmap returns an error,
    792 		 * skip the rest of the top-level i/o.
    793 		 */
    794 
    795 		lbn = offset >> fs_bshift;
    796 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    797 		if (error) {
    798 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    799 			    lbn, error,0,0);
    800 			skipbytes += bytes;
    801 			goto loopdone;
    802 		}
    803 
    804 		/*
    805 		 * see how many pages can be read with this i/o.
    806 		 * reduce the i/o size if necessary to avoid
    807 		 * overwriting pages with valid data.
    808 		 */
    809 
    810 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    811 		    bytes);
    812 		if (offset + iobytes > round_page(offset)) {
    813 			pcount = 1;
    814 			while (pidx + pcount < npages &&
    815 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    816 				pcount++;
    817 			}
    818 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    819 			    (offset - trunc_page(offset)));
    820 		}
    821 
    822 		/*
    823 		 * if this block isn't allocated, zero it instead of
    824 		 * reading it.  unless we are going to allocate blocks,
    825 		 * mark the pages we zeroed PG_RDONLY.
    826 		 */
    827 
    828 		if (blkno < 0) {
    829 			int holepages = (round_page(offset + iobytes) -
    830 			    trunc_page(offset)) >> PAGE_SHIFT;
    831 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    832 
    833 			sawhole = TRUE;
    834 			memset((char *)kva + (offset - startoffset), 0,
    835 			    iobytes);
    836 			skipbytes += iobytes;
    837 
    838 			for (i = 0; i < holepages; i++) {
    839 				if (write) {
    840 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    841 				}
    842 				if (!blockalloc) {
    843 					pgs[pidx + i]->flags |= PG_RDONLY;
    844 				}
    845 			}
    846 			continue;
    847 		}
    848 
    849 		/*
    850 		 * allocate a sub-buf for this piece of the i/o
    851 		 * (or just use mbp if there's only 1 piece),
    852 		 * and start it going.
    853 		 */
    854 
    855 		if (offset == startoffset && iobytes == bytes) {
    856 			bp = mbp;
    857 		} else {
    858 			bp = getiobuf();
    859 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    860 		}
    861 		bp->b_lblkno = 0;
    862 
    863 		/* adjust physical blkno for partial blocks */
    864 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    865 		    dev_bshift);
    866 
    867 		UVMHIST_LOG(ubchist,
    868 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    869 		    bp, offset, iobytes, bp->b_blkno);
    870 
    871 		VOP_STRATEGY(devvp, bp);
    872 	}
    873 
    874 loopdone:
    875 	nestiobuf_done(mbp, skipbytes, error);
    876 	if (async) {
    877 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    878 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    879 		if (pgs != pgs_onstack)
    880 			free(pgs, M_DEVBUF);
    881 		return (0);
    882 	}
    883 	if (bp != NULL) {
    884 		error = biowait(mbp);
    885 	}
    886 	putiobuf(mbp);
    887 	uvm_pagermapout(kva, npages);
    888 
    889 	/*
    890 	 * if this we encountered a hole then we have to do a little more work.
    891 	 * for read faults, we marked the page PG_RDONLY so that future
    892 	 * write accesses to the page will fault again.
    893 	 * for write faults, we must make sure that the backing store for
    894 	 * the page is completely allocated while the pages are locked.
    895 	 */
    896 
    897 	if (!error && sawhole && blockalloc) {
    898 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    899 		    cred);
    900 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    901 		    startoffset, npages << PAGE_SHIFT, error,0);
    902 		if (!error) {
    903 			for (i = 0; i < npages; i++) {
    904 				if (pgs[i] == NULL) {
    905 					continue;
    906 				}
    907 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    908 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    909 				    pgs[i],0,0,0);
    910 			}
    911 		}
    912 	}
    913 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    914 	simple_lock(&uobj->vmobjlock);
    915 
    916 	/*
    917 	 * we're almost done!  release the pages...
    918 	 * for errors, we free the pages.
    919 	 * otherwise we activate them and mark them as valid and clean.
    920 	 * also, unbusy pages that were not actually requested.
    921 	 */
    922 
    923 	if (error) {
    924 		for (i = 0; i < npages; i++) {
    925 			if (pgs[i] == NULL) {
    926 				continue;
    927 			}
    928 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    929 			    pgs[i], pgs[i]->flags, 0,0);
    930 			if (pgs[i]->flags & PG_FAKE) {
    931 				pgs[i]->flags |= PG_RELEASED;
    932 			}
    933 		}
    934 		uvm_lock_pageq();
    935 		uvm_page_unbusy(pgs, npages);
    936 		uvm_unlock_pageq();
    937 		simple_unlock(&uobj->vmobjlock);
    938 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    939 		if (pgs != pgs_onstack)
    940 			free(pgs, M_DEVBUF);
    941 		return (error);
    942 	}
    943 
    944 out:
    945 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    946 	uvm_lock_pageq();
    947 	for (i = 0; i < npages; i++) {
    948 		pg = pgs[i];
    949 		if (pg == NULL) {
    950 			continue;
    951 		}
    952 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    953 		    pg, pg->flags, 0,0);
    954 		if (pg->flags & PG_FAKE && !overwrite) {
    955 			pg->flags &= ~(PG_FAKE);
    956 			pmap_clear_modify(pgs[i]);
    957 		}
    958 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    959 		if (i < ridx || i >= ridx + orignpages || async) {
    960 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    961 			    pg, pg->offset,0,0);
    962 			if (pg->flags & PG_WANTED) {
    963 				wakeup(pg);
    964 			}
    965 			if (pg->flags & PG_FAKE) {
    966 				KASSERT(overwrite);
    967 				uvm_pagezero(pg);
    968 			}
    969 			if (pg->flags & PG_RELEASED) {
    970 				uvm_pagefree(pg);
    971 				continue;
    972 			}
    973 			uvm_pageenqueue(pg);
    974 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    975 			UVM_PAGE_OWN(pg, NULL);
    976 		}
    977 	}
    978 	uvm_unlock_pageq();
    979 	simple_unlock(&uobj->vmobjlock);
    980 	if (ap->a_m != NULL) {
    981 		memcpy(ap->a_m, &pgs[ridx],
    982 		    orignpages * sizeof(struct vm_page *));
    983 	}
    984 	if (pgs != pgs_onstack)
    985 		free(pgs, M_DEVBUF);
    986 	return (0);
    987 }
    988 
    989 /*
    990  * generic VM putpages routine.
    991  * Write the given range of pages to backing store.
    992  *
    993  * => "offhi == 0" means flush all pages at or after "offlo".
    994  * => object should be locked by caller.   we may _unlock_ the object
    995  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
    996  *	if PGO_SYNCIO is set and there are pages busy.
    997  *	we return with the object locked.
    998  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    999  *	thus, a caller might want to unlock higher level resources
   1000  *	(e.g. vm_map) before calling flush.
   1001  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
   1002  *	unlock the object nor block.
   1003  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
   1004  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
   1005  *	that new pages are inserted on the tail end of the list.   thus,
   1006  *	we can make a complete pass through the object in one go by starting
   1007  *	at the head and working towards the tail (new pages are put in
   1008  *	front of us).
   1009  * => NOTE: we are allowed to lock the page queues, so the caller
   1010  *	must not be holding the page queue lock.
   1011  *
   1012  * note on "cleaning" object and PG_BUSY pages:
   1013  *	this routine is holding the lock on the object.   the only time
   1014  *	that it can run into a PG_BUSY page that it does not own is if
   1015  *	some other process has started I/O on the page (e.g. either
   1016  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
   1017  *	in, then it can not be dirty (!PG_CLEAN) because no one has
   1018  *	had a chance to modify it yet.    if the PG_BUSY page is being
   1019  *	paged out then it means that someone else has already started
   1020  *	cleaning the page for us (how nice!).    in this case, if we
   1021  *	have syncio specified, then after we make our pass through the
   1022  *	object we need to wait for the other PG_BUSY pages to clear
   1023  *	off (i.e. we need to do an iosync).   also note that once a
   1024  *	page is PG_BUSY it must stay in its object until it is un-busyed.
   1025  *
   1026  * note on page traversal:
   1027  *	we can traverse the pages in an object either by going down the
   1028  *	linked list in "uobj->memq", or we can go over the address range
   1029  *	by page doing hash table lookups for each address.    depending
   1030  *	on how many pages are in the object it may be cheaper to do one
   1031  *	or the other.   we set "by_list" to true if we are using memq.
   1032  *	if the cost of a hash lookup was equal to the cost of the list
   1033  *	traversal we could compare the number of pages in the start->stop
   1034  *	range to the total number of pages in the object.   however, it
   1035  *	seems that a hash table lookup is more expensive than the linked
   1036  *	list traversal, so we multiply the number of pages in the
   1037  *	range by an estimate of the relatively higher cost of the hash lookup.
   1038  */
   1039 
   1040 int
   1041 genfs_putpages(void *v)
   1042 {
   1043 	struct vop_putpages_args /* {
   1044 		struct vnode *a_vp;
   1045 		voff_t a_offlo;
   1046 		voff_t a_offhi;
   1047 		int a_flags;
   1048 	} */ *ap = v;
   1049 	struct vnode *vp = ap->a_vp;
   1050 	struct uvm_object *uobj = &vp->v_uobj;
   1051 	struct simplelock *slock = &uobj->vmobjlock;
   1052 	off_t startoff = ap->a_offlo;
   1053 	off_t endoff = ap->a_offhi;
   1054 	off_t off;
   1055 	int flags = ap->a_flags;
   1056 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1057 	const int maxpages = MAXPHYS >> PAGE_SHIFT;
   1058 	int i, s, error, npages, nback;
   1059 	int freeflag;
   1060 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1061 	boolean_t wasclean, by_list, needs_clean, yld;
   1062 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1063 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1064 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1065 	struct genfs_node *gp = VTOG(vp);
   1066 	int dirtygen;
   1067 	boolean_t modified = FALSE;
   1068 	boolean_t cleanall;
   1069 
   1070 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1071 
   1072 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1073 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1074 	KASSERT(startoff < endoff || endoff == 0);
   1075 
   1076 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1077 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1078 
   1079 	KASSERT((vp->v_flag & VONWORKLST) != 0 ||
   1080 	    (vp->v_flag & VWRITEMAPDIRTY) == 0);
   1081 	if (uobj->uo_npages == 0) {
   1082 		s = splbio();
   1083 		if (vp->v_flag & VONWORKLST) {
   1084 			vp->v_flag &= ~VWRITEMAPDIRTY;
   1085 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1086 				vp->v_flag &= ~VONWORKLST;
   1087 				LIST_REMOVE(vp, v_synclist);
   1088 			}
   1089 		}
   1090 		splx(s);
   1091 		simple_unlock(slock);
   1092 		return (0);
   1093 	}
   1094 
   1095 	/*
   1096 	 * the vnode has pages, set up to process the request.
   1097 	 */
   1098 
   1099 	error = 0;
   1100 	s = splbio();
   1101 	simple_lock(&global_v_numoutput_slock);
   1102 	wasclean = (vp->v_numoutput == 0);
   1103 	simple_unlock(&global_v_numoutput_slock);
   1104 	splx(s);
   1105 	off = startoff;
   1106 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1107 		endoff = trunc_page(LLONG_MAX);
   1108 	}
   1109 	by_list = (uobj->uo_npages <=
   1110 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1111 
   1112 #if !defined(DEBUG)
   1113 	/*
   1114 	 * if this vnode is known not to have dirty pages,
   1115 	 * don't bother to clean it out.
   1116 	 */
   1117 
   1118 	if ((vp->v_flag & VONWORKLST) == 0) {
   1119 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1120 			goto skip_scan;
   1121 		}
   1122 		flags &= ~PGO_CLEANIT;
   1123 	}
   1124 #endif /* !defined(DEBUG) */
   1125 
   1126 	/*
   1127 	 * start the loop.  when scanning by list, hold the last page
   1128 	 * in the list before we start.  pages allocated after we start
   1129 	 * will be added to the end of the list, so we can stop at the
   1130 	 * current last page.
   1131 	 */
   1132 
   1133 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1134 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1135 	    (vp->v_flag & VONWORKLST) != 0;
   1136 	dirtygen = gp->g_dirtygen;
   1137 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1138 	if (by_list) {
   1139 		curmp.uobject = uobj;
   1140 		curmp.offset = (voff_t)-1;
   1141 		curmp.flags = PG_BUSY;
   1142 		endmp.uobject = uobj;
   1143 		endmp.offset = (voff_t)-1;
   1144 		endmp.flags = PG_BUSY;
   1145 		pg = TAILQ_FIRST(&uobj->memq);
   1146 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1147 		PHOLD(l);
   1148 	} else {
   1149 		pg = uvm_pagelookup(uobj, off);
   1150 	}
   1151 	nextpg = NULL;
   1152 	while (by_list || off < endoff) {
   1153 
   1154 		/*
   1155 		 * if the current page is not interesting, move on to the next.
   1156 		 */
   1157 
   1158 		KASSERT(pg == NULL || pg->uobject == uobj);
   1159 		KASSERT(pg == NULL ||
   1160 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1161 		    (pg->flags & PG_BUSY) != 0);
   1162 		if (by_list) {
   1163 			if (pg == &endmp) {
   1164 				break;
   1165 			}
   1166 			if (pg->offset < startoff || pg->offset >= endoff ||
   1167 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1168 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1169 					wasclean = FALSE;
   1170 				}
   1171 				pg = TAILQ_NEXT(pg, listq);
   1172 				continue;
   1173 			}
   1174 			off = pg->offset;
   1175 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1176 			if (pg != NULL) {
   1177 				wasclean = FALSE;
   1178 			}
   1179 			off += PAGE_SIZE;
   1180 			if (off < endoff) {
   1181 				pg = uvm_pagelookup(uobj, off);
   1182 			}
   1183 			continue;
   1184 		}
   1185 
   1186 		/*
   1187 		 * if the current page needs to be cleaned and it's busy,
   1188 		 * wait for it to become unbusy.
   1189 		 */
   1190 
   1191 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1192 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1193 		if (pg->flags & PG_BUSY || yld) {
   1194 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1195 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1196 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1197 				error = EDEADLK;
   1198 				break;
   1199 			}
   1200 			KASSERT(!pagedaemon);
   1201 			if (by_list) {
   1202 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1203 				UVMHIST_LOG(ubchist, "curmp next %p",
   1204 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1205 			}
   1206 			if (yld) {
   1207 				simple_unlock(slock);
   1208 				preempt(1);
   1209 				simple_lock(slock);
   1210 			} else {
   1211 				pg->flags |= PG_WANTED;
   1212 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1213 				simple_lock(slock);
   1214 			}
   1215 			if (by_list) {
   1216 				UVMHIST_LOG(ubchist, "after next %p",
   1217 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1218 				pg = TAILQ_NEXT(&curmp, listq);
   1219 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1220 			} else {
   1221 				pg = uvm_pagelookup(uobj, off);
   1222 			}
   1223 			continue;
   1224 		}
   1225 
   1226 		/*
   1227 		 * if we're freeing, remove all mappings of the page now.
   1228 		 * if we're cleaning, check if the page is needs to be cleaned.
   1229 		 */
   1230 
   1231 		if (flags & PGO_FREE) {
   1232 			pmap_page_protect(pg, VM_PROT_NONE);
   1233 		} else if (flags & PGO_CLEANIT) {
   1234 
   1235 			/*
   1236 			 * if we still have some hope to pull this vnode off
   1237 			 * from the syncer queue, write-protect the page.
   1238 			 */
   1239 
   1240 			if (cleanall && wasclean &&
   1241 			    gp->g_dirtygen == dirtygen) {
   1242 
   1243 				/*
   1244 				 * uobj pages get wired only by uvm_fault
   1245 				 * where uobj is locked.
   1246 				 */
   1247 
   1248 				if (pg->wire_count == 0) {
   1249 					pmap_page_protect(pg,
   1250 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1251 				} else {
   1252 					cleanall = FALSE;
   1253 				}
   1254 			}
   1255 		}
   1256 
   1257 		if (flags & PGO_CLEANIT) {
   1258 			needs_clean = pmap_clear_modify(pg) ||
   1259 			    (pg->flags & PG_CLEAN) == 0;
   1260 			pg->flags |= PG_CLEAN;
   1261 		} else {
   1262 			needs_clean = FALSE;
   1263 		}
   1264 
   1265 		/*
   1266 		 * if we're cleaning, build a cluster.
   1267 		 * the cluster will consist of pages which are currently dirty,
   1268 		 * but they will be returned to us marked clean.
   1269 		 * if not cleaning, just operate on the one page.
   1270 		 */
   1271 
   1272 		if (needs_clean) {
   1273 			KDASSERT((vp->v_flag & VONWORKLST));
   1274 			wasclean = FALSE;
   1275 			memset(pgs, 0, sizeof(pgs));
   1276 			pg->flags |= PG_BUSY;
   1277 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1278 
   1279 			/*
   1280 			 * first look backward.
   1281 			 */
   1282 
   1283 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1284 			nback = npages;
   1285 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1286 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1287 			if (nback) {
   1288 				memmove(&pgs[0], &pgs[npages - nback],
   1289 				    nback * sizeof(pgs[0]));
   1290 				if (npages - nback < nback)
   1291 					memset(&pgs[nback], 0,
   1292 					    (npages - nback) * sizeof(pgs[0]));
   1293 				else
   1294 					memset(&pgs[npages - nback], 0,
   1295 					    nback * sizeof(pgs[0]));
   1296 			}
   1297 
   1298 			/*
   1299 			 * then plug in our page of interest.
   1300 			 */
   1301 
   1302 			pgs[nback] = pg;
   1303 
   1304 			/*
   1305 			 * then look forward to fill in the remaining space in
   1306 			 * the array of pages.
   1307 			 */
   1308 
   1309 			npages = maxpages - nback - 1;
   1310 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1311 			    &pgs[nback + 1],
   1312 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1313 			npages += nback + 1;
   1314 		} else {
   1315 			pgs[0] = pg;
   1316 			npages = 1;
   1317 			nback = 0;
   1318 		}
   1319 
   1320 		/*
   1321 		 * apply FREE or DEACTIVATE options if requested.
   1322 		 */
   1323 
   1324 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1325 			uvm_lock_pageq();
   1326 		}
   1327 		for (i = 0; i < npages; i++) {
   1328 			tpg = pgs[i];
   1329 			KASSERT(tpg->uobject == uobj);
   1330 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1331 				pg = tpg;
   1332 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1333 				continue;
   1334 			if (flags & PGO_DEACTIVATE && tpg->wire_count == 0) {
   1335 				(void) pmap_clear_reference(tpg);
   1336 				uvm_pagedeactivate(tpg);
   1337 			} else if (flags & PGO_FREE) {
   1338 				pmap_page_protect(tpg, VM_PROT_NONE);
   1339 				if (tpg->flags & PG_BUSY) {
   1340 					tpg->flags |= freeflag;
   1341 					if (pagedaemon) {
   1342 						uvmexp.paging++;
   1343 						uvm_pagedequeue(tpg);
   1344 					}
   1345 				} else {
   1346 
   1347 					/*
   1348 					 * ``page is not busy''
   1349 					 * implies that npages is 1
   1350 					 * and needs_clean is false.
   1351 					 */
   1352 
   1353 					nextpg = TAILQ_NEXT(tpg, listq);
   1354 					uvm_pagefree(tpg);
   1355 					if (pagedaemon)
   1356 						uvmexp.pdfreed++;
   1357 				}
   1358 			}
   1359 		}
   1360 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1361 			uvm_unlock_pageq();
   1362 		}
   1363 		if (needs_clean) {
   1364 			modified = TRUE;
   1365 
   1366 			/*
   1367 			 * start the i/o.  if we're traversing by list,
   1368 			 * keep our place in the list with a marker page.
   1369 			 */
   1370 
   1371 			if (by_list) {
   1372 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1373 				    listq);
   1374 			}
   1375 			simple_unlock(slock);
   1376 			error = GOP_WRITE(vp, pgs, npages, flags);
   1377 			simple_lock(slock);
   1378 			if (by_list) {
   1379 				pg = TAILQ_NEXT(&curmp, listq);
   1380 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1381 			}
   1382 			if (error) {
   1383 				break;
   1384 			}
   1385 			if (by_list) {
   1386 				continue;
   1387 			}
   1388 		}
   1389 
   1390 		/*
   1391 		 * find the next page and continue if there was no error.
   1392 		 */
   1393 
   1394 		if (by_list) {
   1395 			if (nextpg) {
   1396 				pg = nextpg;
   1397 				nextpg = NULL;
   1398 			} else {
   1399 				pg = TAILQ_NEXT(pg, listq);
   1400 			}
   1401 		} else {
   1402 			off += (npages - nback) << PAGE_SHIFT;
   1403 			if (off < endoff) {
   1404 				pg = uvm_pagelookup(uobj, off);
   1405 			}
   1406 		}
   1407 	}
   1408 	if (by_list) {
   1409 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1410 		PRELE(l);
   1411 	}
   1412 
   1413 	if (modified && (vp->v_flag & VWRITEMAPDIRTY) != 0 &&
   1414 	    (vp->v_type != VBLK ||
   1415 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1416 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1417 	}
   1418 
   1419 	/*
   1420 	 * if we're cleaning and there was nothing to clean,
   1421 	 * take us off the syncer list.  if we started any i/o
   1422 	 * and we're doing sync i/o, wait for all writes to finish.
   1423 	 */
   1424 
   1425 	s = splbio();
   1426 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1427 	    (vp->v_flag & VONWORKLST) != 0) {
   1428 		vp->v_flag &= ~VWRITEMAPDIRTY;
   1429 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1430 			vp->v_flag &= ~VONWORKLST;
   1431 			LIST_REMOVE(vp, v_synclist);
   1432 		}
   1433 	}
   1434 	splx(s);
   1435 
   1436 #if !defined(DEBUG)
   1437 skip_scan:
   1438 #endif /* !defined(DEBUG) */
   1439 	if (!wasclean && !async) {
   1440 		s = splbio();
   1441 		/*
   1442 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1443 		 *	 but the slot in ltsleep() is taken!
   1444 		 * XXX - try to recover from missed wakeups with a timeout..
   1445 		 *	 must think of something better.
   1446 		 */
   1447 		while (vp->v_numoutput != 0) {
   1448 			vp->v_flag |= VBWAIT;
   1449 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1450 			    "genput2", hz);
   1451 			simple_lock(slock);
   1452 		}
   1453 		splx(s);
   1454 	}
   1455 	simple_unlock(&uobj->vmobjlock);
   1456 	return (error);
   1457 }
   1458 
   1459 int
   1460 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1461 {
   1462 	int s, error, run;
   1463 	int fs_bshift, dev_bshift;
   1464 	vaddr_t kva;
   1465 	off_t eof, offset, startoffset;
   1466 	size_t bytes, iobytes, skipbytes;
   1467 	daddr_t lbn, blkno;
   1468 	struct vm_page *pg;
   1469 	struct buf *mbp, *bp;
   1470 	struct vnode *devvp;
   1471 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1472 	UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
   1473 
   1474 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1475 	    vp, pgs, npages, flags);
   1476 
   1477 	GOP_SIZE(vp, vp->v_size, &eof, 0);
   1478 	if (vp->v_type != VBLK) {
   1479 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1480 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1481 	} else {
   1482 		fs_bshift = DEV_BSHIFT;
   1483 		dev_bshift = DEV_BSHIFT;
   1484 	}
   1485 	error = 0;
   1486 	pg = pgs[0];
   1487 	startoffset = pg->offset;
   1488 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
   1489 	skipbytes = 0;
   1490 	KASSERT(bytes != 0);
   1491 
   1492 	kva = uvm_pagermapin(pgs, npages,
   1493 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1494 
   1495 	s = splbio();
   1496 	simple_lock(&global_v_numoutput_slock);
   1497 	vp->v_numoutput += 2;
   1498 	simple_unlock(&global_v_numoutput_slock);
   1499 	splx(s);
   1500 	mbp = getiobuf();
   1501 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1502 	    vp, mbp, vp->v_numoutput, bytes);
   1503 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1504 	mbp->b_data = (void *)kva;
   1505 	mbp->b_resid = mbp->b_bcount = bytes;
   1506 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
   1507 	mbp->b_iodone = uvm_aio_biodone;
   1508 	mbp->b_vp = vp;
   1509 	if (curproc == uvm.pagedaemon_proc)
   1510 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1511 	else if (async)
   1512 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1513 	else
   1514 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1515 
   1516 	bp = NULL;
   1517 	for (offset = startoffset;
   1518 	    bytes > 0;
   1519 	    offset += iobytes, bytes -= iobytes) {
   1520 		lbn = offset >> fs_bshift;
   1521 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1522 		if (error) {
   1523 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1524 			skipbytes += bytes;
   1525 			bytes = 0;
   1526 			break;
   1527 		}
   1528 
   1529 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1530 		    bytes);
   1531 		if (blkno == (daddr_t)-1) {
   1532 			skipbytes += iobytes;
   1533 			continue;
   1534 		}
   1535 
   1536 		/* if it's really one i/o, don't make a second buf */
   1537 		if (offset == startoffset && iobytes == bytes) {
   1538 			bp = mbp;
   1539 		} else {
   1540 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1541 			    vp, bp, vp->v_numoutput, 0);
   1542 			bp = getiobuf();
   1543 			nestiobuf_setup(mbp, bp, offset - pg->offset, iobytes);
   1544 		}
   1545 		bp->b_lblkno = 0;
   1546 
   1547 		/* adjust physical blkno for partial blocks */
   1548 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1549 		    dev_bshift);
   1550 		UVMHIST_LOG(ubchist,
   1551 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1552 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1553 
   1554 		VOP_STRATEGY(devvp, bp);
   1555 	}
   1556 	if (skipbytes) {
   1557 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1558 	}
   1559 	nestiobuf_done(mbp, skipbytes, error);
   1560 	if (async) {
   1561 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1562 		return (0);
   1563 	}
   1564 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1565 	error = biowait(mbp);
   1566 	uvm_aio_aiodone(mbp);
   1567 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1568 	return (error);
   1569 }
   1570 
   1571 /*
   1572  * VOP_PUTPAGES() for vnodes which never have pages.
   1573  */
   1574 
   1575 int
   1576 genfs_null_putpages(void *v)
   1577 {
   1578 	struct vop_putpages_args /* {
   1579 		struct vnode *a_vp;
   1580 		voff_t a_offlo;
   1581 		voff_t a_offhi;
   1582 		int a_flags;
   1583 	} */ *ap = v;
   1584 	struct vnode *vp = ap->a_vp;
   1585 
   1586 	KASSERT(vp->v_uobj.uo_npages == 0);
   1587 	simple_unlock(&vp->v_interlock);
   1588 	return (0);
   1589 }
   1590 
   1591 void
   1592 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1593 {
   1594 	struct genfs_node *gp = VTOG(vp);
   1595 
   1596 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1597 	gp->g_op = ops;
   1598 }
   1599 
   1600 void
   1601 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1602 {
   1603 	int bsize;
   1604 
   1605 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1606 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1607 }
   1608 
   1609 int
   1610 genfs_compat_getpages(void *v)
   1611 {
   1612 	struct vop_getpages_args /* {
   1613 		struct vnode *a_vp;
   1614 		voff_t a_offset;
   1615 		struct vm_page **a_m;
   1616 		int *a_count;
   1617 		int a_centeridx;
   1618 		vm_prot_t a_access_type;
   1619 		int a_advice;
   1620 		int a_flags;
   1621 	} */ *ap = v;
   1622 
   1623 	off_t origoffset;
   1624 	struct vnode *vp = ap->a_vp;
   1625 	struct uvm_object *uobj = &vp->v_uobj;
   1626 	struct vm_page *pg, **pgs;
   1627 	vaddr_t kva;
   1628 	int i, error, orignpages, npages;
   1629 	struct iovec iov;
   1630 	struct uio uio;
   1631 	kauth_cred_t cred = curlwp->l_cred;
   1632 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1633 
   1634 	error = 0;
   1635 	origoffset = ap->a_offset;
   1636 	orignpages = *ap->a_count;
   1637 	pgs = ap->a_m;
   1638 
   1639 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1640 		vn_syncer_add_to_worklist(vp, filedelay);
   1641 	}
   1642 	if (ap->a_flags & PGO_LOCKED) {
   1643 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1644 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1645 
   1646 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1647 	}
   1648 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1649 		simple_unlock(&uobj->vmobjlock);
   1650 		return (EINVAL);
   1651 	}
   1652 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1653 		simple_unlock(&uobj->vmobjlock);
   1654 		return 0;
   1655 	}
   1656 	npages = orignpages;
   1657 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1658 	simple_unlock(&uobj->vmobjlock);
   1659 	kva = uvm_pagermapin(pgs, npages,
   1660 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1661 	for (i = 0; i < npages; i++) {
   1662 		pg = pgs[i];
   1663 		if ((pg->flags & PG_FAKE) == 0) {
   1664 			continue;
   1665 		}
   1666 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1667 		iov.iov_len = PAGE_SIZE;
   1668 		uio.uio_iov = &iov;
   1669 		uio.uio_iovcnt = 1;
   1670 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1671 		uio.uio_rw = UIO_READ;
   1672 		uio.uio_resid = PAGE_SIZE;
   1673 		UIO_SETUP_SYSSPACE(&uio);
   1674 		/* XXX vn_lock */
   1675 		error = VOP_READ(vp, &uio, 0, cred);
   1676 		if (error) {
   1677 			break;
   1678 		}
   1679 		if (uio.uio_resid) {
   1680 			memset(iov.iov_base, 0, uio.uio_resid);
   1681 		}
   1682 	}
   1683 	uvm_pagermapout(kva, npages);
   1684 	simple_lock(&uobj->vmobjlock);
   1685 	uvm_lock_pageq();
   1686 	for (i = 0; i < npages; i++) {
   1687 		pg = pgs[i];
   1688 		if (error && (pg->flags & PG_FAKE) != 0) {
   1689 			pg->flags |= PG_RELEASED;
   1690 		} else {
   1691 			pmap_clear_modify(pg);
   1692 			uvm_pageactivate(pg);
   1693 		}
   1694 	}
   1695 	if (error) {
   1696 		uvm_page_unbusy(pgs, npages);
   1697 	}
   1698 	uvm_unlock_pageq();
   1699 	simple_unlock(&uobj->vmobjlock);
   1700 	return (error);
   1701 }
   1702 
   1703 int
   1704 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1705     int flags)
   1706 {
   1707 	off_t offset;
   1708 	struct iovec iov;
   1709 	struct uio uio;
   1710 	kauth_cred_t cred = curlwp->l_cred;
   1711 	struct buf *bp;
   1712 	vaddr_t kva;
   1713 	int s, error;
   1714 
   1715 	offset = pgs[0]->offset;
   1716 	kva = uvm_pagermapin(pgs, npages,
   1717 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1718 
   1719 	iov.iov_base = (void *)kva;
   1720 	iov.iov_len = npages << PAGE_SHIFT;
   1721 	uio.uio_iov = &iov;
   1722 	uio.uio_iovcnt = 1;
   1723 	uio.uio_offset = offset;
   1724 	uio.uio_rw = UIO_WRITE;
   1725 	uio.uio_resid = npages << PAGE_SHIFT;
   1726 	UIO_SETUP_SYSSPACE(&uio);
   1727 	/* XXX vn_lock */
   1728 	error = VOP_WRITE(vp, &uio, 0, cred);
   1729 
   1730 	s = splbio();
   1731 	V_INCR_NUMOUTPUT(vp);
   1732 	splx(s);
   1733 
   1734 	bp = getiobuf();
   1735 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1736 	bp->b_vp = vp;
   1737 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1738 	bp->b_data = (char *)kva;
   1739 	bp->b_bcount = npages << PAGE_SHIFT;
   1740 	bp->b_bufsize = npages << PAGE_SHIFT;
   1741 	bp->b_resid = 0;
   1742 	if (error) {
   1743 		bp->b_flags |= B_ERROR;
   1744 		bp->b_error = error;
   1745 	}
   1746 	uvm_aio_aiodone(bp);
   1747 	return (error);
   1748 }
   1749 
   1750 static void
   1751 filt_genfsdetach(struct knote *kn)
   1752 {
   1753 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1754 
   1755 	/* XXXLUKEM lock the struct? */
   1756 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   1757 }
   1758 
   1759 static int
   1760 filt_genfsread(struct knote *kn, long hint)
   1761 {
   1762 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1763 
   1764 	/*
   1765 	 * filesystem is gone, so set the EOF flag and schedule
   1766 	 * the knote for deletion.
   1767 	 */
   1768 	if (hint == NOTE_REVOKE) {
   1769 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   1770 		return (1);
   1771 	}
   1772 
   1773 	/* XXXLUKEM lock the struct? */
   1774 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   1775         return (kn->kn_data != 0);
   1776 }
   1777 
   1778 static int
   1779 filt_genfsvnode(struct knote *kn, long hint)
   1780 {
   1781 
   1782 	if (kn->kn_sfflags & hint)
   1783 		kn->kn_fflags |= hint;
   1784 	if (hint == NOTE_REVOKE) {
   1785 		kn->kn_flags |= EV_EOF;
   1786 		return (1);
   1787 	}
   1788 	return (kn->kn_fflags != 0);
   1789 }
   1790 
   1791 static const struct filterops genfsread_filtops =
   1792 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   1793 static const struct filterops genfsvnode_filtops =
   1794 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   1795 
   1796 int
   1797 genfs_kqfilter(void *v)
   1798 {
   1799 	struct vop_kqfilter_args /* {
   1800 		struct vnode	*a_vp;
   1801 		struct knote	*a_kn;
   1802 	} */ *ap = v;
   1803 	struct vnode *vp;
   1804 	struct knote *kn;
   1805 
   1806 	vp = ap->a_vp;
   1807 	kn = ap->a_kn;
   1808 	switch (kn->kn_filter) {
   1809 	case EVFILT_READ:
   1810 		kn->kn_fop = &genfsread_filtops;
   1811 		break;
   1812 	case EVFILT_VNODE:
   1813 		kn->kn_fop = &genfsvnode_filtops;
   1814 		break;
   1815 	default:
   1816 		return (1);
   1817 	}
   1818 
   1819 	kn->kn_hook = vp;
   1820 
   1821 	/* XXXLUKEM lock the struct? */
   1822 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   1823 
   1824 	return (0);
   1825 }
   1826