Home | History | Annotate | Line # | Download | only in genfs
genfs_vnops.c revision 1.126
      1 /*	$NetBSD: genfs_vnops.c,v 1.126 2006/07/22 08:47:56 yamt Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  */
     32 
     33 #include <sys/cdefs.h>
     34 __KERNEL_RCSID(0, "$NetBSD: genfs_vnops.c,v 1.126 2006/07/22 08:47:56 yamt Exp $");
     35 
     36 #if defined(_KERNEL_OPT)
     37 #include "opt_nfsserver.h"
     38 #endif
     39 
     40 #include <sys/param.h>
     41 #include <sys/systm.h>
     42 #include <sys/proc.h>
     43 #include <sys/kernel.h>
     44 #include <sys/mount.h>
     45 #include <sys/namei.h>
     46 #include <sys/vnode.h>
     47 #include <sys/fcntl.h>
     48 #include <sys/malloc.h>
     49 #include <sys/poll.h>
     50 #include <sys/mman.h>
     51 #include <sys/file.h>
     52 #include <sys/kauth.h>
     53 
     54 #include <miscfs/genfs/genfs.h>
     55 #include <miscfs/genfs/genfs_node.h>
     56 #include <miscfs/specfs/specdev.h>
     57 
     58 #include <uvm/uvm.h>
     59 #include <uvm/uvm_pager.h>
     60 
     61 #ifdef NFSSERVER
     62 #include <nfs/rpcv2.h>
     63 #include <nfs/nfsproto.h>
     64 #include <nfs/nfs.h>
     65 #include <nfs/nqnfs.h>
     66 #include <nfs/nfs_var.h>
     67 #endif
     68 
     69 static inline void genfs_rel_pages(struct vm_page **, int);
     70 static void filt_genfsdetach(struct knote *);
     71 static int filt_genfsread(struct knote *, long);
     72 static int filt_genfsvnode(struct knote *, long);
     73 
     74 #define MAX_READ_PAGES	16 	/* XXXUBC 16 */
     75 
     76 int
     77 genfs_poll(void *v)
     78 {
     79 	struct vop_poll_args /* {
     80 		struct vnode *a_vp;
     81 		int a_events;
     82 		struct lwp *a_l;
     83 	} */ *ap = v;
     84 
     85 	return (ap->a_events & (POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM));
     86 }
     87 
     88 int
     89 genfs_seek(void *v)
     90 {
     91 	struct vop_seek_args /* {
     92 		struct vnode *a_vp;
     93 		off_t a_oldoff;
     94 		off_t a_newoff;
     95 		kauth_cred_t cred;
     96 	} */ *ap = v;
     97 
     98 	if (ap->a_newoff < 0)
     99 		return (EINVAL);
    100 
    101 	return (0);
    102 }
    103 
    104 int
    105 genfs_abortop(void *v)
    106 {
    107 	struct vop_abortop_args /* {
    108 		struct vnode *a_dvp;
    109 		struct componentname *a_cnp;
    110 	} */ *ap = v;
    111 
    112 	if ((ap->a_cnp->cn_flags & (HASBUF | SAVESTART)) == HASBUF)
    113 		PNBUF_PUT(ap->a_cnp->cn_pnbuf);
    114 	return (0);
    115 }
    116 
    117 int
    118 genfs_fcntl(void *v)
    119 {
    120 	struct vop_fcntl_args /* {
    121 		struct vnode *a_vp;
    122 		u_int a_command;
    123 		caddr_t a_data;
    124 		int a_fflag;
    125 		kauth_cred_t a_cred;
    126 		struct lwp *a_l;
    127 	} */ *ap = v;
    128 
    129 	if (ap->a_command == F_SETFL)
    130 		return (0);
    131 	else
    132 		return (EOPNOTSUPP);
    133 }
    134 
    135 /*ARGSUSED*/
    136 int
    137 genfs_badop(void *v)
    138 {
    139 
    140 	panic("genfs: bad op");
    141 }
    142 
    143 /*ARGSUSED*/
    144 int
    145 genfs_nullop(void *v)
    146 {
    147 
    148 	return (0);
    149 }
    150 
    151 /*ARGSUSED*/
    152 int
    153 genfs_einval(void *v)
    154 {
    155 
    156 	return (EINVAL);
    157 }
    158 
    159 /*
    160  * Called when an fs doesn't support a particular vop.
    161  * This takes care to vrele, vput, or vunlock passed in vnodes.
    162  */
    163 int
    164 genfs_eopnotsupp(void *v)
    165 {
    166 	struct vop_generic_args /*
    167 		struct vnodeop_desc *a_desc;
    168 		/ * other random data follows, presumably * /
    169 	} */ *ap = v;
    170 	struct vnodeop_desc *desc = ap->a_desc;
    171 	struct vnode *vp, *vp_last = NULL;
    172 	int flags, i, j, offset;
    173 
    174 	flags = desc->vdesc_flags;
    175 	for (i = 0; i < VDESC_MAX_VPS; flags >>=1, i++) {
    176 		if ((offset = desc->vdesc_vp_offsets[i]) == VDESC_NO_OFFSET)
    177 			break;	/* stop at end of list */
    178 		if ((j = flags & VDESC_VP0_WILLPUT)) {
    179 			vp = *VOPARG_OFFSETTO(struct vnode **, offset, ap);
    180 
    181 			/* Skip if NULL */
    182 			if (!vp)
    183 				continue;
    184 
    185 			switch (j) {
    186 			case VDESC_VP0_WILLPUT:
    187 				/* Check for dvp == vp cases */
    188 				if (vp == vp_last)
    189 					vrele(vp);
    190 				else {
    191 					vput(vp);
    192 					vp_last = vp;
    193 				}
    194 				break;
    195 			case VDESC_VP0_WILLUNLOCK:
    196 				VOP_UNLOCK(vp, 0);
    197 				break;
    198 			case VDESC_VP0_WILLRELE:
    199 				vrele(vp);
    200 				break;
    201 			}
    202 		}
    203 	}
    204 
    205 	return (EOPNOTSUPP);
    206 }
    207 
    208 /*ARGSUSED*/
    209 int
    210 genfs_ebadf(void *v)
    211 {
    212 
    213 	return (EBADF);
    214 }
    215 
    216 /* ARGSUSED */
    217 int
    218 genfs_enoioctl(void *v)
    219 {
    220 
    221 	return (EPASSTHROUGH);
    222 }
    223 
    224 
    225 /*
    226  * Eliminate all activity associated with the requested vnode
    227  * and with all vnodes aliased to the requested vnode.
    228  */
    229 int
    230 genfs_revoke(void *v)
    231 {
    232 	struct vop_revoke_args /* {
    233 		struct vnode *a_vp;
    234 		int a_flags;
    235 	} */ *ap = v;
    236 	struct vnode *vp, *vq;
    237 	struct lwp *l = curlwp;		/* XXX */
    238 
    239 #ifdef DIAGNOSTIC
    240 	if ((ap->a_flags & REVOKEALL) == 0)
    241 		panic("genfs_revoke: not revokeall");
    242 #endif
    243 
    244 	vp = ap->a_vp;
    245 	simple_lock(&vp->v_interlock);
    246 
    247 	if (vp->v_flag & VALIASED) {
    248 		/*
    249 		 * If a vgone (or vclean) is already in progress,
    250 		 * wait until it is done and return.
    251 		 */
    252 		if (vp->v_flag & VXLOCK) {
    253 			vp->v_flag |= VXWANT;
    254 			ltsleep(vp, PINOD|PNORELOCK, "vop_revokeall", 0,
    255 				&vp->v_interlock);
    256 			return (0);
    257 		}
    258 		/*
    259 		 * Ensure that vp will not be vgone'd while we
    260 		 * are eliminating its aliases.
    261 		 */
    262 		vp->v_flag |= VXLOCK;
    263 		simple_unlock(&vp->v_interlock);
    264 		while (vp->v_flag & VALIASED) {
    265 			simple_lock(&spechash_slock);
    266 			for (vq = *vp->v_hashchain; vq; vq = vq->v_specnext) {
    267 				if (vq->v_rdev != vp->v_rdev ||
    268 				    vq->v_type != vp->v_type || vp == vq)
    269 					continue;
    270 				simple_unlock(&spechash_slock);
    271 				vgone(vq);
    272 				break;
    273 			}
    274 			if (vq == NULLVP)
    275 				simple_unlock(&spechash_slock);
    276 		}
    277 		/*
    278 		 * Remove the lock so that vgone below will
    279 		 * really eliminate the vnode after which time
    280 		 * vgone will awaken any sleepers.
    281 		 */
    282 		simple_lock(&vp->v_interlock);
    283 		vp->v_flag &= ~VXLOCK;
    284 	}
    285 	vgonel(vp, l);
    286 	return (0);
    287 }
    288 
    289 /*
    290  * Lock the node.
    291  */
    292 int
    293 genfs_lock(void *v)
    294 {
    295 	struct vop_lock_args /* {
    296 		struct vnode *a_vp;
    297 		int a_flags;
    298 	} */ *ap = v;
    299 	struct vnode *vp = ap->a_vp;
    300 
    301 	return (lockmgr(vp->v_vnlock, ap->a_flags, &vp->v_interlock));
    302 }
    303 
    304 /*
    305  * Unlock the node.
    306  */
    307 int
    308 genfs_unlock(void *v)
    309 {
    310 	struct vop_unlock_args /* {
    311 		struct vnode *a_vp;
    312 		int a_flags;
    313 	} */ *ap = v;
    314 	struct vnode *vp = ap->a_vp;
    315 
    316 	return (lockmgr(vp->v_vnlock, ap->a_flags | LK_RELEASE,
    317 	    &vp->v_interlock));
    318 }
    319 
    320 /*
    321  * Return whether or not the node is locked.
    322  */
    323 int
    324 genfs_islocked(void *v)
    325 {
    326 	struct vop_islocked_args /* {
    327 		struct vnode *a_vp;
    328 	} */ *ap = v;
    329 	struct vnode *vp = ap->a_vp;
    330 
    331 	return (lockstatus(vp->v_vnlock));
    332 }
    333 
    334 /*
    335  * Stubs to use when there is no locking to be done on the underlying object.
    336  */
    337 int
    338 genfs_nolock(void *v)
    339 {
    340 	struct vop_lock_args /* {
    341 		struct vnode *a_vp;
    342 		int a_flags;
    343 		struct lwp *a_l;
    344 	} */ *ap = v;
    345 
    346 	/*
    347 	 * Since we are not using the lock manager, we must clear
    348 	 * the interlock here.
    349 	 */
    350 	if (ap->a_flags & LK_INTERLOCK)
    351 		simple_unlock(&ap->a_vp->v_interlock);
    352 	return (0);
    353 }
    354 
    355 int
    356 genfs_nounlock(void *v)
    357 {
    358 
    359 	return (0);
    360 }
    361 
    362 int
    363 genfs_noislocked(void *v)
    364 {
    365 
    366 	return (0);
    367 }
    368 
    369 /*
    370  * Local lease check for NFS servers.  Just set up args and let
    371  * nqsrv_getlease() do the rest.  If NFSSERVER is not in the kernel,
    372  * this is a null operation.
    373  */
    374 int
    375 genfs_lease_check(void *v)
    376 {
    377 #ifdef NFSSERVER
    378 	struct vop_lease_args /* {
    379 		struct vnode *a_vp;
    380 		struct lwp *a_l;
    381 		kauth_cred_t a_cred;
    382 		int a_flag;
    383 	} */ *ap = v;
    384 	u_int32_t duration = 0;
    385 	int cache;
    386 	u_quad_t frev;
    387 
    388 	(void) nqsrv_getlease(ap->a_vp, &duration, ND_CHECK | ap->a_flag,
    389 	    NQLOCALSLP, ap->a_l, (struct mbuf *)0, &cache, &frev, ap->a_cred);
    390 	return (0);
    391 #else
    392 	return (0);
    393 #endif /* NFSSERVER */
    394 }
    395 
    396 int
    397 genfs_mmap(void *v)
    398 {
    399 
    400 	return (0);
    401 }
    402 
    403 static inline void
    404 genfs_rel_pages(struct vm_page **pgs, int npages)
    405 {
    406 	int i;
    407 
    408 	for (i = 0; i < npages; i++) {
    409 		struct vm_page *pg = pgs[i];
    410 
    411 		if (pg == NULL)
    412 			continue;
    413 		if (pg->flags & PG_FAKE) {
    414 			pg->flags |= PG_RELEASED;
    415 		}
    416 	}
    417 	uvm_lock_pageq();
    418 	uvm_page_unbusy(pgs, npages);
    419 	uvm_unlock_pageq();
    420 }
    421 
    422 /*
    423  * generic VM getpages routine.
    424  * Return PG_BUSY pages for the given range,
    425  * reading from backing store if necessary.
    426  */
    427 
    428 int
    429 genfs_getpages(void *v)
    430 {
    431 	struct vop_getpages_args /* {
    432 		struct vnode *a_vp;
    433 		voff_t a_offset;
    434 		struct vm_page **a_m;
    435 		int *a_count;
    436 		int a_centeridx;
    437 		vm_prot_t a_access_type;
    438 		int a_advice;
    439 		int a_flags;
    440 	} */ *ap = v;
    441 
    442 	off_t newsize, diskeof, memeof;
    443 	off_t offset, origoffset, startoffset, endoffset;
    444 	daddr_t lbn, blkno;
    445 	int i, error, npages, orignpages, npgs, run, ridx, pidx, pcount;
    446 	int fs_bshift, fs_bsize, dev_bshift;
    447 	int flags = ap->a_flags;
    448 	size_t bytes, iobytes, tailbytes, totalbytes, skipbytes;
    449 	vaddr_t kva;
    450 	struct buf *bp, *mbp;
    451 	struct vnode *vp = ap->a_vp;
    452 	struct vnode *devvp;
    453 	struct genfs_node *gp = VTOG(vp);
    454 	struct uvm_object *uobj = &vp->v_uobj;
    455 	struct vm_page *pg, **pgs, *pgs_onstack[MAX_READ_PAGES];
    456 	int pgs_size;
    457 	kauth_cred_t cred = curproc->p_cred;		/* XXXUBC curlwp */
    458 	boolean_t async = (flags & PGO_SYNCIO) == 0;
    459 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
    460 	boolean_t sawhole = FALSE;
    461 	boolean_t overwrite = (flags & PGO_OVERWRITE) != 0;
    462 	boolean_t blockalloc = write && (flags & PGO_NOBLOCKALLOC) == 0;
    463 	voff_t origvsize;
    464 	UVMHIST_FUNC("genfs_getpages"); UVMHIST_CALLED(ubchist);
    465 
    466 	UVMHIST_LOG(ubchist, "vp %p off 0x%x/%x count %d",
    467 	    vp, ap->a_offset >> 32, ap->a_offset, *ap->a_count);
    468 
    469 	KASSERT(vp->v_type == VREG || vp->v_type == VDIR ||
    470 	    vp->v_type == VLNK || vp->v_type == VBLK);
    471 
    472 	/* XXXUBC temp limit */
    473 	if (*ap->a_count > MAX_READ_PAGES) {
    474 		panic("genfs_getpages: too many pages");
    475 	}
    476 
    477 startover:
    478 	error = 0;
    479 	origvsize = vp->v_size;
    480 	origoffset = ap->a_offset;
    481 	orignpages = *ap->a_count;
    482 	GOP_SIZE(vp, vp->v_size, &diskeof, 0);
    483 	if (flags & PGO_PASTEOF) {
    484 		newsize = MAX(vp->v_size,
    485 		    origoffset + (orignpages << PAGE_SHIFT));
    486 		GOP_SIZE(vp, newsize, &memeof, GOP_SIZE_MEM);
    487 	} else {
    488 		GOP_SIZE(vp, vp->v_size, &memeof, GOP_SIZE_MEM);
    489 	}
    490 	KASSERT(ap->a_centeridx >= 0 || ap->a_centeridx <= orignpages);
    491 	KASSERT((origoffset & (PAGE_SIZE - 1)) == 0 && origoffset >= 0);
    492 	KASSERT(orignpages > 0);
    493 
    494 	/*
    495 	 * Bounds-check the request.
    496 	 */
    497 
    498 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= memeof) {
    499 		if ((flags & PGO_LOCKED) == 0) {
    500 			simple_unlock(&uobj->vmobjlock);
    501 		}
    502 		UVMHIST_LOG(ubchist, "off 0x%x count %d goes past EOF 0x%x",
    503 		    origoffset, *ap->a_count, memeof,0);
    504 		return (EINVAL);
    505 	}
    506 
    507 	/* uobj is locked */
    508 
    509 	if ((flags & PGO_NOTIMESTAMP) == 0 &&
    510 	    (vp->v_type != VBLK ||
    511 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
    512 		int updflags = 0;
    513 
    514 		if ((vp->v_mount->mnt_flag & MNT_NOATIME) == 0) {
    515 			updflags = GOP_UPDATE_ACCESSED;
    516 		}
    517 		if (write) {
    518 			updflags |= GOP_UPDATE_MODIFIED;
    519 		}
    520 		if (updflags != 0) {
    521 			GOP_MARKUPDATE(vp, updflags);
    522 		}
    523 	}
    524 
    525 	if (write) {
    526 		gp->g_dirtygen++;
    527 		if ((vp->v_flag & VONWORKLST) == 0) {
    528 			vn_syncer_add_to_worklist(vp, filedelay);
    529 		}
    530 		if ((vp->v_flag & (VWRITEMAP|VWRITEMAPDIRTY)) == VWRITEMAP) {
    531 			vp->v_flag |= VWRITEMAPDIRTY;
    532 		}
    533 	}
    534 
    535 	/*
    536 	 * For PGO_LOCKED requests, just return whatever's in memory.
    537 	 */
    538 
    539 	if (flags & PGO_LOCKED) {
    540 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
    541 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
    542 
    543 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
    544 	}
    545 	simple_unlock(&uobj->vmobjlock);
    546 
    547 	/*
    548 	 * find the requested pages and make some simple checks.
    549 	 * leave space in the page array for a whole block.
    550 	 */
    551 
    552 	if (vp->v_type != VBLK) {
    553 		fs_bshift = vp->v_mount->mnt_fs_bshift;
    554 		dev_bshift = vp->v_mount->mnt_dev_bshift;
    555 	} else {
    556 		fs_bshift = DEV_BSHIFT;
    557 		dev_bshift = DEV_BSHIFT;
    558 	}
    559 	fs_bsize = 1 << fs_bshift;
    560 
    561 	orignpages = MIN(orignpages,
    562 	    round_page(memeof - origoffset) >> PAGE_SHIFT);
    563 	npages = orignpages;
    564 	startoffset = origoffset & ~(fs_bsize - 1);
    565 	endoffset = round_page((origoffset + (npages << PAGE_SHIFT) +
    566 	    fs_bsize - 1) & ~(fs_bsize - 1));
    567 	endoffset = MIN(endoffset, round_page(memeof));
    568 	ridx = (origoffset - startoffset) >> PAGE_SHIFT;
    569 
    570 	pgs_size = sizeof(struct vm_page *) *
    571 	    ((endoffset - startoffset) >> PAGE_SHIFT);
    572 	if (pgs_size > sizeof(pgs_onstack)) {
    573 		pgs = malloc(pgs_size, M_DEVBUF,
    574 		    (async ? M_NOWAIT : M_WAITOK) | M_ZERO);
    575 		if (pgs == NULL) {
    576 			return (ENOMEM);
    577 		}
    578 	} else {
    579 		pgs = pgs_onstack;
    580 		memset(pgs, 0, pgs_size);
    581 	}
    582 	UVMHIST_LOG(ubchist, "ridx %d npages %d startoff %ld endoff %ld",
    583 	    ridx, npages, startoffset, endoffset);
    584 
    585 	/*
    586 	 * hold g_glock to prevent a race with truncate.
    587 	 *
    588 	 * check if our idea of v_size is still valid.
    589 	 */
    590 
    591 	if (blockalloc) {
    592 		lockmgr(&gp->g_glock, LK_EXCLUSIVE, NULL);
    593 	} else {
    594 		lockmgr(&gp->g_glock, LK_SHARED, NULL);
    595 	}
    596 	simple_lock(&uobj->vmobjlock);
    597 	if (vp->v_size < origvsize) {
    598 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    599 		if (pgs != pgs_onstack)
    600 			free(pgs, M_DEVBUF);
    601 		goto startover;
    602 	}
    603 
    604 	if (uvn_findpages(uobj, origoffset, &npages, &pgs[ridx],
    605 	    async ? UFP_NOWAIT : UFP_ALL) != orignpages) {
    606 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    607 		KASSERT(async != 0);
    608 		genfs_rel_pages(&pgs[ridx], orignpages);
    609 		simple_unlock(&uobj->vmobjlock);
    610 		if (pgs != pgs_onstack)
    611 			free(pgs, M_DEVBUF);
    612 		return (EBUSY);
    613 	}
    614 
    615 	/*
    616 	 * if the pages are already resident, just return them.
    617 	 */
    618 
    619 	for (i = 0; i < npages; i++) {
    620 		struct vm_page *pg1 = pgs[ridx + i];
    621 
    622 		if ((pg1->flags & PG_FAKE) ||
    623 		    (blockalloc && (pg1->flags & PG_RDONLY))) {
    624 			break;
    625 		}
    626 	}
    627 	if (i == npages) {
    628 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    629 		UVMHIST_LOG(ubchist, "returning cached pages", 0,0,0,0);
    630 		npages += ridx;
    631 		goto out;
    632 	}
    633 
    634 	/*
    635 	 * if PGO_OVERWRITE is set, don't bother reading the pages.
    636 	 */
    637 
    638 	if (overwrite) {
    639 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    640 		UVMHIST_LOG(ubchist, "PGO_OVERWRITE",0,0,0,0);
    641 
    642 		for (i = 0; i < npages; i++) {
    643 			struct vm_page *pg1 = pgs[ridx + i];
    644 
    645 			pg1->flags &= ~(PG_RDONLY|PG_CLEAN);
    646 		}
    647 		npages += ridx;
    648 		goto out;
    649 	}
    650 
    651 	/*
    652 	 * the page wasn't resident and we're not overwriting,
    653 	 * so we're going to have to do some i/o.
    654 	 * find any additional pages needed to cover the expanded range.
    655 	 */
    656 
    657 	npages = (endoffset - startoffset) >> PAGE_SHIFT;
    658 	if (startoffset != origoffset || npages != orignpages) {
    659 
    660 		/*
    661 		 * we need to avoid deadlocks caused by locking
    662 		 * additional pages at lower offsets than pages we
    663 		 * already have locked.  unlock them all and start over.
    664 		 */
    665 
    666 		genfs_rel_pages(&pgs[ridx], orignpages);
    667 		memset(pgs, 0, pgs_size);
    668 
    669 		UVMHIST_LOG(ubchist, "reset npages start 0x%x end 0x%x",
    670 		    startoffset, endoffset, 0,0);
    671 		npgs = npages;
    672 		if (uvn_findpages(uobj, startoffset, &npgs, pgs,
    673 		    async ? UFP_NOWAIT : UFP_ALL) != npages) {
    674 			lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    675 			KASSERT(async != 0);
    676 			genfs_rel_pages(pgs, npages);
    677 			simple_unlock(&uobj->vmobjlock);
    678 			if (pgs != pgs_onstack)
    679 				free(pgs, M_DEVBUF);
    680 			return (EBUSY);
    681 		}
    682 	}
    683 	simple_unlock(&uobj->vmobjlock);
    684 
    685 	/*
    686 	 * read the desired page(s).
    687 	 */
    688 
    689 	totalbytes = npages << PAGE_SHIFT;
    690 	bytes = MIN(totalbytes, MAX(diskeof - startoffset, 0));
    691 	tailbytes = totalbytes - bytes;
    692 	skipbytes = 0;
    693 
    694 	kva = uvm_pagermapin(pgs, npages,
    695 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
    696 
    697 	mbp = getiobuf();
    698 	mbp->b_bufsize = totalbytes;
    699 	mbp->b_data = (void *)kva;
    700 	mbp->b_resid = mbp->b_bcount = bytes;
    701 	mbp->b_flags = B_BUSY|B_READ| (async ? B_CALL|B_ASYNC : 0);
    702 	mbp->b_iodone = (async ? uvm_aio_biodone : 0);
    703 	mbp->b_vp = vp;
    704 	if (async)
    705 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
    706 	else
    707 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
    708 
    709 	/*
    710 	 * if EOF is in the middle of the range, zero the part past EOF.
    711 	 * if the page including EOF is not PG_FAKE, skip over it since
    712 	 * in that case it has valid data that we need to preserve.
    713 	 */
    714 
    715 	if (tailbytes > 0) {
    716 		size_t tailstart = bytes;
    717 
    718 		if ((pgs[bytes >> PAGE_SHIFT]->flags & PG_FAKE) == 0) {
    719 			tailstart = round_page(tailstart);
    720 			tailbytes -= tailstart - bytes;
    721 		}
    722 		UVMHIST_LOG(ubchist, "tailbytes %p 0x%x 0x%x",
    723 		    kva, tailstart, tailbytes,0);
    724 		memset((void *)(kva + tailstart), 0, tailbytes);
    725 	}
    726 
    727 	/*
    728 	 * now loop over the pages, reading as needed.
    729 	 */
    730 
    731 	bp = NULL;
    732 	for (offset = startoffset;
    733 	    bytes > 0;
    734 	    offset += iobytes, bytes -= iobytes) {
    735 
    736 		/*
    737 		 * skip pages which don't need to be read.
    738 		 */
    739 
    740 		pidx = (offset - startoffset) >> PAGE_SHIFT;
    741 		while ((pgs[pidx]->flags & PG_FAKE) == 0) {
    742 			size_t b;
    743 
    744 			KASSERT((offset & (PAGE_SIZE - 1)) == 0);
    745 			if ((pgs[pidx]->flags & PG_RDONLY)) {
    746 				sawhole = TRUE;
    747 			}
    748 			b = MIN(PAGE_SIZE, bytes);
    749 			offset += b;
    750 			bytes -= b;
    751 			skipbytes += b;
    752 			pidx++;
    753 			UVMHIST_LOG(ubchist, "skipping, new offset 0x%x",
    754 			    offset, 0,0,0);
    755 			if (bytes == 0) {
    756 				goto loopdone;
    757 			}
    758 		}
    759 
    760 		/*
    761 		 * bmap the file to find out the blkno to read from and
    762 		 * how much we can read in one i/o.  if bmap returns an error,
    763 		 * skip the rest of the top-level i/o.
    764 		 */
    765 
    766 		lbn = offset >> fs_bshift;
    767 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
    768 		if (error) {
    769 			UVMHIST_LOG(ubchist, "VOP_BMAP lbn 0x%x -> %d\n",
    770 			    lbn, error,0,0);
    771 			skipbytes += bytes;
    772 			goto loopdone;
    773 		}
    774 
    775 		/*
    776 		 * see how many pages can be read with this i/o.
    777 		 * reduce the i/o size if necessary to avoid
    778 		 * overwriting pages with valid data.
    779 		 */
    780 
    781 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
    782 		    bytes);
    783 		if (offset + iobytes > round_page(offset)) {
    784 			pcount = 1;
    785 			while (pidx + pcount < npages &&
    786 			    pgs[pidx + pcount]->flags & PG_FAKE) {
    787 				pcount++;
    788 			}
    789 			iobytes = MIN(iobytes, (pcount << PAGE_SHIFT) -
    790 			    (offset - trunc_page(offset)));
    791 		}
    792 
    793 		/*
    794 		 * if this block isn't allocated, zero it instead of
    795 		 * reading it.  unless we are going to allocate blocks,
    796 		 * mark the pages we zeroed PG_RDONLY.
    797 		 */
    798 
    799 		if (blkno < 0) {
    800 			int holepages = (round_page(offset + iobytes) -
    801 			    trunc_page(offset)) >> PAGE_SHIFT;
    802 			UVMHIST_LOG(ubchist, "lbn 0x%x -> HOLE", lbn,0,0,0);
    803 
    804 			sawhole = TRUE;
    805 			memset((char *)kva + (offset - startoffset), 0,
    806 			    iobytes);
    807 			skipbytes += iobytes;
    808 
    809 			for (i = 0; i < holepages; i++) {
    810 				if (write) {
    811 					pgs[pidx + i]->flags &= ~PG_CLEAN;
    812 				}
    813 				if (!blockalloc) {
    814 					pgs[pidx + i]->flags |= PG_RDONLY;
    815 				}
    816 			}
    817 			continue;
    818 		}
    819 
    820 		/*
    821 		 * allocate a sub-buf for this piece of the i/o
    822 		 * (or just use mbp if there's only 1 piece),
    823 		 * and start it going.
    824 		 */
    825 
    826 		if (offset == startoffset && iobytes == bytes) {
    827 			bp = mbp;
    828 		} else {
    829 			bp = getiobuf();
    830 			nestiobuf_setup(mbp, bp, offset - startoffset, iobytes);
    831 		}
    832 		bp->b_lblkno = 0;
    833 
    834 		/* adjust physical blkno for partial blocks */
    835 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
    836 		    dev_bshift);
    837 
    838 		UVMHIST_LOG(ubchist,
    839 		    "bp %p offset 0x%x bcount 0x%x blkno 0x%x",
    840 		    bp, offset, iobytes, bp->b_blkno);
    841 
    842 		VOP_STRATEGY(devvp, bp);
    843 	}
    844 
    845 loopdone:
    846 	nestiobuf_done(mbp, skipbytes, error);
    847 	if (async) {
    848 		UVMHIST_LOG(ubchist, "returning 0 (async)",0,0,0,0);
    849 		lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    850 		if (pgs != pgs_onstack)
    851 			free(pgs, M_DEVBUF);
    852 		return (0);
    853 	}
    854 	if (bp != NULL) {
    855 		error = biowait(mbp);
    856 	}
    857 	putiobuf(mbp);
    858 	uvm_pagermapout(kva, npages);
    859 
    860 	/*
    861 	 * if this we encountered a hole then we have to do a little more work.
    862 	 * for read faults, we marked the page PG_RDONLY so that future
    863 	 * write accesses to the page will fault again.
    864 	 * for write faults, we must make sure that the backing store for
    865 	 * the page is completely allocated while the pages are locked.
    866 	 */
    867 
    868 	if (!error && sawhole && blockalloc) {
    869 		error = GOP_ALLOC(vp, startoffset, npages << PAGE_SHIFT, 0,
    870 		    cred);
    871 		UVMHIST_LOG(ubchist, "gop_alloc off 0x%x/0x%x -> %d",
    872 		    startoffset, npages << PAGE_SHIFT, error,0);
    873 		if (!error) {
    874 			for (i = 0; i < npages; i++) {
    875 				if (pgs[i] == NULL) {
    876 					continue;
    877 				}
    878 				pgs[i]->flags &= ~(PG_CLEAN|PG_RDONLY);
    879 				UVMHIST_LOG(ubchist, "mark dirty pg %p",
    880 				    pgs[i],0,0,0);
    881 			}
    882 		}
    883 	}
    884 	lockmgr(&gp->g_glock, LK_RELEASE, NULL);
    885 	simple_lock(&uobj->vmobjlock);
    886 
    887 	/*
    888 	 * we're almost done!  release the pages...
    889 	 * for errors, we free the pages.
    890 	 * otherwise we activate them and mark them as valid and clean.
    891 	 * also, unbusy pages that were not actually requested.
    892 	 */
    893 
    894 	if (error) {
    895 		for (i = 0; i < npages; i++) {
    896 			if (pgs[i] == NULL) {
    897 				continue;
    898 			}
    899 			UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    900 			    pgs[i], pgs[i]->flags, 0,0);
    901 			if (pgs[i]->flags & PG_FAKE) {
    902 				pgs[i]->flags |= PG_RELEASED;
    903 			}
    904 		}
    905 		uvm_lock_pageq();
    906 		uvm_page_unbusy(pgs, npages);
    907 		uvm_unlock_pageq();
    908 		simple_unlock(&uobj->vmobjlock);
    909 		UVMHIST_LOG(ubchist, "returning error %d", error,0,0,0);
    910 		if (pgs != pgs_onstack)
    911 			free(pgs, M_DEVBUF);
    912 		return (error);
    913 	}
    914 
    915 out:
    916 	UVMHIST_LOG(ubchist, "succeeding, npages %d", npages,0,0,0);
    917 	uvm_lock_pageq();
    918 	for (i = 0; i < npages; i++) {
    919 		pg = pgs[i];
    920 		if (pg == NULL) {
    921 			continue;
    922 		}
    923 		UVMHIST_LOG(ubchist, "examining pg %p flags 0x%x",
    924 		    pg, pg->flags, 0,0);
    925 		if (pg->flags & PG_FAKE && !overwrite) {
    926 			pg->flags &= ~(PG_FAKE);
    927 			pmap_clear_modify(pgs[i]);
    928 		}
    929 		KASSERT(!write || !blockalloc || (pg->flags & PG_RDONLY) == 0);
    930 		if (i < ridx || i >= ridx + orignpages || async) {
    931 			UVMHIST_LOG(ubchist, "unbusy pg %p offset 0x%x",
    932 			    pg, pg->offset,0,0);
    933 			if (pg->flags & PG_WANTED) {
    934 				wakeup(pg);
    935 			}
    936 			if (pg->flags & PG_FAKE) {
    937 				KASSERT(overwrite);
    938 				uvm_pagezero(pg);
    939 			}
    940 			if (pg->flags & PG_RELEASED) {
    941 				uvm_pagefree(pg);
    942 				continue;
    943 			}
    944 			uvm_pageactivate(pg);
    945 			pg->flags &= ~(PG_WANTED|PG_BUSY|PG_FAKE);
    946 			UVM_PAGE_OWN(pg, NULL);
    947 		}
    948 	}
    949 	uvm_unlock_pageq();
    950 	simple_unlock(&uobj->vmobjlock);
    951 	if (ap->a_m != NULL) {
    952 		memcpy(ap->a_m, &pgs[ridx],
    953 		    orignpages * sizeof(struct vm_page *));
    954 	}
    955 	if (pgs != pgs_onstack)
    956 		free(pgs, M_DEVBUF);
    957 	return (0);
    958 }
    959 
    960 /*
    961  * generic VM putpages routine.
    962  * Write the given range of pages to backing store.
    963  *
    964  * => "offhi == 0" means flush all pages at or after "offlo".
    965  * => object should be locked by caller.   we may _unlock_ the object
    966  *	if (and only if) we need to clean a page (PGO_CLEANIT), or
    967  *	if PGO_SYNCIO is set and there are pages busy.
    968  *	we return with the object locked.
    969  * => if PGO_CLEANIT or PGO_SYNCIO is set, we may block (due to I/O).
    970  *	thus, a caller might want to unlock higher level resources
    971  *	(e.g. vm_map) before calling flush.
    972  * => if neither PGO_CLEANIT nor PGO_SYNCIO is set, then we will neither
    973  *	unlock the object nor block.
    974  * => if PGO_ALLPAGES is set, then all pages in the object will be processed.
    975  * => NOTE: we rely on the fact that the object's memq is a TAILQ and
    976  *	that new pages are inserted on the tail end of the list.   thus,
    977  *	we can make a complete pass through the object in one go by starting
    978  *	at the head and working towards the tail (new pages are put in
    979  *	front of us).
    980  * => NOTE: we are allowed to lock the page queues, so the caller
    981  *	must not be holding the page queue lock.
    982  *
    983  * note on "cleaning" object and PG_BUSY pages:
    984  *	this routine is holding the lock on the object.   the only time
    985  *	that it can run into a PG_BUSY page that it does not own is if
    986  *	some other process has started I/O on the page (e.g. either
    987  *	a pagein, or a pageout).    if the PG_BUSY page is being paged
    988  *	in, then it can not be dirty (!PG_CLEAN) because no one has
    989  *	had a chance to modify it yet.    if the PG_BUSY page is being
    990  *	paged out then it means that someone else has already started
    991  *	cleaning the page for us (how nice!).    in this case, if we
    992  *	have syncio specified, then after we make our pass through the
    993  *	object we need to wait for the other PG_BUSY pages to clear
    994  *	off (i.e. we need to do an iosync).   also note that once a
    995  *	page is PG_BUSY it must stay in its object until it is un-busyed.
    996  *
    997  * note on page traversal:
    998  *	we can traverse the pages in an object either by going down the
    999  *	linked list in "uobj->memq", or we can go over the address range
   1000  *	by page doing hash table lookups for each address.    depending
   1001  *	on how many pages are in the object it may be cheaper to do one
   1002  *	or the other.   we set "by_list" to true if we are using memq.
   1003  *	if the cost of a hash lookup was equal to the cost of the list
   1004  *	traversal we could compare the number of pages in the start->stop
   1005  *	range to the total number of pages in the object.   however, it
   1006  *	seems that a hash table lookup is more expensive than the linked
   1007  *	list traversal, so we multiply the number of pages in the
   1008  *	range by an estimate of the relatively higher cost of the hash lookup.
   1009  */
   1010 
   1011 int
   1012 genfs_putpages(void *v)
   1013 {
   1014 	struct vop_putpages_args /* {
   1015 		struct vnode *a_vp;
   1016 		voff_t a_offlo;
   1017 		voff_t a_offhi;
   1018 		int a_flags;
   1019 	} */ *ap = v;
   1020 	struct vnode *vp = ap->a_vp;
   1021 	struct uvm_object *uobj = &vp->v_uobj;
   1022 	struct simplelock *slock = &uobj->vmobjlock;
   1023 	off_t startoff = ap->a_offlo;
   1024 	off_t endoff = ap->a_offhi;
   1025 	off_t off;
   1026 	int flags = ap->a_flags;
   1027 	/* Even for strange MAXPHYS, the shift rounds down to a page */
   1028 	const int maxpages = MAXPHYS >> PAGE_SHIFT;
   1029 	int i, s, error, npages, nback;
   1030 	int freeflag;
   1031 	struct vm_page *pgs[maxpages], *pg, *nextpg, *tpg, curmp, endmp;
   1032 	boolean_t wasclean, by_list, needs_clean, yld;
   1033 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1034 	boolean_t pagedaemon = curproc == uvm.pagedaemon_proc;
   1035 	struct lwp *l = curlwp ? curlwp : &lwp0;
   1036 	struct genfs_node *gp = VTOG(vp);
   1037 	int dirtygen;
   1038 	boolean_t modified = FALSE;
   1039 	boolean_t cleanall;
   1040 
   1041 	UVMHIST_FUNC("genfs_putpages"); UVMHIST_CALLED(ubchist);
   1042 
   1043 	KASSERT(flags & (PGO_CLEANIT|PGO_FREE|PGO_DEACTIVATE));
   1044 	KASSERT((startoff & PAGE_MASK) == 0 && (endoff & PAGE_MASK) == 0);
   1045 	KASSERT(startoff < endoff || endoff == 0);
   1046 
   1047 	UVMHIST_LOG(ubchist, "vp %p pages %d off 0x%x len 0x%x",
   1048 	    vp, uobj->uo_npages, startoff, endoff - startoff);
   1049 
   1050 	KASSERT((vp->v_flag & VONWORKLST) != 0 ||
   1051 	    (vp->v_flag & VWRITEMAPDIRTY) == 0);
   1052 	if (uobj->uo_npages == 0) {
   1053 		s = splbio();
   1054 		if (vp->v_flag & VONWORKLST) {
   1055 			vp->v_flag &= ~VWRITEMAPDIRTY;
   1056 			if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1057 				vp->v_flag &= ~VONWORKLST;
   1058 				LIST_REMOVE(vp, v_synclist);
   1059 			}
   1060 		}
   1061 		splx(s);
   1062 		simple_unlock(slock);
   1063 		return (0);
   1064 	}
   1065 
   1066 	/*
   1067 	 * the vnode has pages, set up to process the request.
   1068 	 */
   1069 
   1070 	error = 0;
   1071 	s = splbio();
   1072 	simple_lock(&global_v_numoutput_slock);
   1073 	wasclean = (vp->v_numoutput == 0);
   1074 	simple_unlock(&global_v_numoutput_slock);
   1075 	splx(s);
   1076 	off = startoff;
   1077 	if (endoff == 0 || flags & PGO_ALLPAGES) {
   1078 		endoff = trunc_page(LLONG_MAX);
   1079 	}
   1080 	by_list = (uobj->uo_npages <=
   1081 	    ((endoff - startoff) >> PAGE_SHIFT) * UVM_PAGE_HASH_PENALTY);
   1082 
   1083 #if !defined(DEBUG)
   1084 	/*
   1085 	 * if this vnode is known not to have dirty pages,
   1086 	 * don't bother to clean it out.
   1087 	 */
   1088 
   1089 	if ((vp->v_flag & VONWORKLST) == 0) {
   1090 		if ((flags & (PGO_FREE|PGO_DEACTIVATE)) == 0) {
   1091 			goto skip_scan;
   1092 		}
   1093 		flags &= ~PGO_CLEANIT;
   1094 	}
   1095 #endif /* !defined(DEBUG) */
   1096 
   1097 	/*
   1098 	 * start the loop.  when scanning by list, hold the last page
   1099 	 * in the list before we start.  pages allocated after we start
   1100 	 * will be added to the end of the list, so we can stop at the
   1101 	 * current last page.
   1102 	 */
   1103 
   1104 	cleanall = (flags & PGO_CLEANIT) != 0 && wasclean &&
   1105 	    startoff == 0 && endoff == trunc_page(LLONG_MAX) &&
   1106 	    (vp->v_flag & VONWORKLST) != 0;
   1107 	dirtygen = gp->g_dirtygen;
   1108 	freeflag = pagedaemon ? PG_PAGEOUT : PG_RELEASED;
   1109 	if (by_list) {
   1110 		curmp.uobject = uobj;
   1111 		curmp.offset = (voff_t)-1;
   1112 		curmp.flags = PG_BUSY;
   1113 		endmp.uobject = uobj;
   1114 		endmp.offset = (voff_t)-1;
   1115 		endmp.flags = PG_BUSY;
   1116 		pg = TAILQ_FIRST(&uobj->memq);
   1117 		TAILQ_INSERT_TAIL(&uobj->memq, &endmp, listq);
   1118 		PHOLD(l);
   1119 	} else {
   1120 		pg = uvm_pagelookup(uobj, off);
   1121 	}
   1122 	nextpg = NULL;
   1123 	while (by_list || off < endoff) {
   1124 
   1125 		/*
   1126 		 * if the current page is not interesting, move on to the next.
   1127 		 */
   1128 
   1129 		KASSERT(pg == NULL || pg->uobject == uobj);
   1130 		KASSERT(pg == NULL ||
   1131 		    (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
   1132 		    (pg->flags & PG_BUSY) != 0);
   1133 		if (by_list) {
   1134 			if (pg == &endmp) {
   1135 				break;
   1136 			}
   1137 			if (pg->offset < startoff || pg->offset >= endoff ||
   1138 			    pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1139 				if (pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1140 					wasclean = FALSE;
   1141 				}
   1142 				pg = TAILQ_NEXT(pg, listq);
   1143 				continue;
   1144 			}
   1145 			off = pg->offset;
   1146 		} else if (pg == NULL || pg->flags & (PG_RELEASED|PG_PAGEOUT)) {
   1147 			if (pg != NULL) {
   1148 				wasclean = FALSE;
   1149 			}
   1150 			off += PAGE_SIZE;
   1151 			if (off < endoff) {
   1152 				pg = uvm_pagelookup(uobj, off);
   1153 			}
   1154 			continue;
   1155 		}
   1156 
   1157 		/*
   1158 		 * if the current page needs to be cleaned and it's busy,
   1159 		 * wait for it to become unbusy.
   1160 		 */
   1161 
   1162 		yld = (l->l_cpu->ci_schedstate.spc_flags &
   1163 		    SPCF_SHOULDYIELD) && !pagedaemon;
   1164 		if (pg->flags & PG_BUSY || yld) {
   1165 			UVMHIST_LOG(ubchist, "busy %p", pg,0,0,0);
   1166 			if (flags & PGO_BUSYFAIL && pg->flags & PG_BUSY) {
   1167 				UVMHIST_LOG(ubchist, "busyfail %p", pg, 0,0,0);
   1168 				error = EDEADLK;
   1169 				break;
   1170 			}
   1171 			KASSERT(!pagedaemon);
   1172 			if (by_list) {
   1173 				TAILQ_INSERT_BEFORE(pg, &curmp, listq);
   1174 				UVMHIST_LOG(ubchist, "curmp next %p",
   1175 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1176 			}
   1177 			if (yld) {
   1178 				simple_unlock(slock);
   1179 				preempt(1);
   1180 				simple_lock(slock);
   1181 			} else {
   1182 				pg->flags |= PG_WANTED;
   1183 				UVM_UNLOCK_AND_WAIT(pg, slock, 0, "genput", 0);
   1184 				simple_lock(slock);
   1185 			}
   1186 			if (by_list) {
   1187 				UVMHIST_LOG(ubchist, "after next %p",
   1188 				    TAILQ_NEXT(&curmp, listq), 0,0,0);
   1189 				pg = TAILQ_NEXT(&curmp, listq);
   1190 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1191 			} else {
   1192 				pg = uvm_pagelookup(uobj, off);
   1193 			}
   1194 			continue;
   1195 		}
   1196 
   1197 		/*
   1198 		 * if we're freeing, remove all mappings of the page now.
   1199 		 * if we're cleaning, check if the page is needs to be cleaned.
   1200 		 */
   1201 
   1202 		if (flags & PGO_FREE) {
   1203 			pmap_page_protect(pg, VM_PROT_NONE);
   1204 		} else if (flags & PGO_CLEANIT) {
   1205 
   1206 			/*
   1207 			 * if we still have some hope to pull this vnode off
   1208 			 * from the syncer queue, write-protect the page.
   1209 			 */
   1210 
   1211 			if (cleanall && wasclean &&
   1212 			    gp->g_dirtygen == dirtygen) {
   1213 
   1214 				/*
   1215 				 * uobj pages get wired only by uvm_fault
   1216 				 * where uobj is locked.
   1217 				 */
   1218 
   1219 				if (pg->wire_count == 0) {
   1220 					pmap_page_protect(pg,
   1221 					    VM_PROT_READ|VM_PROT_EXECUTE);
   1222 				} else {
   1223 					cleanall = FALSE;
   1224 				}
   1225 			}
   1226 		}
   1227 
   1228 		if (flags & PGO_CLEANIT) {
   1229 			needs_clean = pmap_clear_modify(pg) ||
   1230 			    (pg->flags & PG_CLEAN) == 0;
   1231 			pg->flags |= PG_CLEAN;
   1232 		} else {
   1233 			needs_clean = FALSE;
   1234 		}
   1235 
   1236 		/*
   1237 		 * if we're cleaning, build a cluster.
   1238 		 * the cluster will consist of pages which are currently dirty,
   1239 		 * but they will be returned to us marked clean.
   1240 		 * if not cleaning, just operate on the one page.
   1241 		 */
   1242 
   1243 		if (needs_clean) {
   1244 			KDASSERT((vp->v_flag & VONWORKLST));
   1245 			wasclean = FALSE;
   1246 			memset(pgs, 0, sizeof(pgs));
   1247 			pg->flags |= PG_BUSY;
   1248 			UVM_PAGE_OWN(pg, "genfs_putpages");
   1249 
   1250 			/*
   1251 			 * first look backward.
   1252 			 */
   1253 
   1254 			npages = MIN(maxpages >> 1, off >> PAGE_SHIFT);
   1255 			nback = npages;
   1256 			uvn_findpages(uobj, off - PAGE_SIZE, &nback, &pgs[0],
   1257 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY|UFP_BACKWARD);
   1258 			if (nback) {
   1259 				memmove(&pgs[0], &pgs[npages - nback],
   1260 				    nback * sizeof(pgs[0]));
   1261 				if (npages - nback < nback)
   1262 					memset(&pgs[nback], 0,
   1263 					    (npages - nback) * sizeof(pgs[0]));
   1264 				else
   1265 					memset(&pgs[npages - nback], 0,
   1266 					    nback * sizeof(pgs[0]));
   1267 			}
   1268 
   1269 			/*
   1270 			 * then plug in our page of interest.
   1271 			 */
   1272 
   1273 			pgs[nback] = pg;
   1274 
   1275 			/*
   1276 			 * then look forward to fill in the remaining space in
   1277 			 * the array of pages.
   1278 			 */
   1279 
   1280 			npages = maxpages - nback - 1;
   1281 			uvn_findpages(uobj, off + PAGE_SIZE, &npages,
   1282 			    &pgs[nback + 1],
   1283 			    UFP_NOWAIT|UFP_NOALLOC|UFP_DIRTYONLY);
   1284 			npages += nback + 1;
   1285 		} else {
   1286 			pgs[0] = pg;
   1287 			npages = 1;
   1288 			nback = 0;
   1289 		}
   1290 
   1291 		/*
   1292 		 * apply FREE or DEACTIVATE options if requested.
   1293 		 */
   1294 
   1295 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1296 			uvm_lock_pageq();
   1297 		}
   1298 		for (i = 0; i < npages; i++) {
   1299 			tpg = pgs[i];
   1300 			KASSERT(tpg->uobject == uobj);
   1301 			if (by_list && tpg == TAILQ_NEXT(pg, listq))
   1302 				pg = tpg;
   1303 			if (tpg->offset < startoff || tpg->offset >= endoff)
   1304 				continue;
   1305 			if (flags & PGO_DEACTIVATE &&
   1306 			    (tpg->pqflags & PQ_INACTIVE) == 0 &&
   1307 			    tpg->wire_count == 0) {
   1308 				(void) pmap_clear_reference(tpg);
   1309 				uvm_pagedeactivate(tpg);
   1310 			} else if (flags & PGO_FREE) {
   1311 				pmap_page_protect(tpg, VM_PROT_NONE);
   1312 				if (tpg->flags & PG_BUSY) {
   1313 					tpg->flags |= freeflag;
   1314 					if (pagedaemon) {
   1315 						uvmexp.paging++;
   1316 						uvm_pagedequeue(tpg);
   1317 					}
   1318 				} else {
   1319 
   1320 					/*
   1321 					 * ``page is not busy''
   1322 					 * implies that npages is 1
   1323 					 * and needs_clean is false.
   1324 					 */
   1325 
   1326 					nextpg = TAILQ_NEXT(tpg, listq);
   1327 					uvm_pagefree(tpg);
   1328 					if (pagedaemon)
   1329 						uvmexp.pdfreed++;
   1330 				}
   1331 			}
   1332 		}
   1333 		if (flags & (PGO_DEACTIVATE|PGO_FREE)) {
   1334 			uvm_unlock_pageq();
   1335 		}
   1336 		if (needs_clean) {
   1337 			modified = TRUE;
   1338 
   1339 			/*
   1340 			 * start the i/o.  if we're traversing by list,
   1341 			 * keep our place in the list with a marker page.
   1342 			 */
   1343 
   1344 			if (by_list) {
   1345 				TAILQ_INSERT_AFTER(&uobj->memq, pg, &curmp,
   1346 				    listq);
   1347 			}
   1348 			simple_unlock(slock);
   1349 			error = GOP_WRITE(vp, pgs, npages, flags);
   1350 			simple_lock(slock);
   1351 			if (by_list) {
   1352 				pg = TAILQ_NEXT(&curmp, listq);
   1353 				TAILQ_REMOVE(&uobj->memq, &curmp, listq);
   1354 			}
   1355 			if (error) {
   1356 				break;
   1357 			}
   1358 			if (by_list) {
   1359 				continue;
   1360 			}
   1361 		}
   1362 
   1363 		/*
   1364 		 * find the next page and continue if there was no error.
   1365 		 */
   1366 
   1367 		if (by_list) {
   1368 			if (nextpg) {
   1369 				pg = nextpg;
   1370 				nextpg = NULL;
   1371 			} else {
   1372 				pg = TAILQ_NEXT(pg, listq);
   1373 			}
   1374 		} else {
   1375 			off += (npages - nback) << PAGE_SHIFT;
   1376 			if (off < endoff) {
   1377 				pg = uvm_pagelookup(uobj, off);
   1378 			}
   1379 		}
   1380 	}
   1381 	if (by_list) {
   1382 		TAILQ_REMOVE(&uobj->memq, &endmp, listq);
   1383 		PRELE(l);
   1384 	}
   1385 
   1386 	if (modified && (vp->v_flag & VWRITEMAPDIRTY) != 0 &&
   1387 	    (vp->v_type != VBLK ||
   1388 	    (vp->v_mount->mnt_flag & MNT_NODEVMTIME) == 0)) {
   1389 		GOP_MARKUPDATE(vp, GOP_UPDATE_MODIFIED);
   1390 	}
   1391 
   1392 	/*
   1393 	 * if we're cleaning and there was nothing to clean,
   1394 	 * take us off the syncer list.  if we started any i/o
   1395 	 * and we're doing sync i/o, wait for all writes to finish.
   1396 	 */
   1397 
   1398 	s = splbio();
   1399 	if (cleanall && wasclean && gp->g_dirtygen == dirtygen &&
   1400 	    (vp->v_flag & VONWORKLST) != 0) {
   1401 		vp->v_flag &= ~VWRITEMAPDIRTY;
   1402 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL) {
   1403 			vp->v_flag &= ~VONWORKLST;
   1404 			LIST_REMOVE(vp, v_synclist);
   1405 		}
   1406 	}
   1407 	splx(s);
   1408 
   1409 #if !defined(DEBUG)
   1410 skip_scan:
   1411 #endif /* !defined(DEBUG) */
   1412 	if (!wasclean && !async) {
   1413 		s = splbio();
   1414 		/*
   1415 		 * XXX - we want simple_unlock(&global_v_numoutput_slock);
   1416 		 *	 but the slot in ltsleep() is taken!
   1417 		 * XXX - try to recover from missed wakeups with a timeout..
   1418 		 *	 must think of something better.
   1419 		 */
   1420 		while (vp->v_numoutput != 0) {
   1421 			vp->v_flag |= VBWAIT;
   1422 			UVM_UNLOCK_AND_WAIT(&vp->v_numoutput, slock, FALSE,
   1423 			    "genput2", hz);
   1424 			simple_lock(slock);
   1425 		}
   1426 		splx(s);
   1427 	}
   1428 	simple_unlock(&uobj->vmobjlock);
   1429 	return (error);
   1430 }
   1431 
   1432 int
   1433 genfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
   1434 {
   1435 	int s, error, run;
   1436 	int fs_bshift, dev_bshift;
   1437 	vaddr_t kva;
   1438 	off_t eof, offset, startoffset;
   1439 	size_t bytes, iobytes, skipbytes;
   1440 	daddr_t lbn, blkno;
   1441 	struct vm_page *pg;
   1442 	struct buf *mbp, *bp;
   1443 	struct vnode *devvp;
   1444 	boolean_t async = (flags & PGO_SYNCIO) == 0;
   1445 	UVMHIST_FUNC("genfs_gop_write"); UVMHIST_CALLED(ubchist);
   1446 
   1447 	UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
   1448 	    vp, pgs, npages, flags);
   1449 
   1450 	GOP_SIZE(vp, vp->v_size, &eof, 0);
   1451 	if (vp->v_type != VBLK) {
   1452 		fs_bshift = vp->v_mount->mnt_fs_bshift;
   1453 		dev_bshift = vp->v_mount->mnt_dev_bshift;
   1454 	} else {
   1455 		fs_bshift = DEV_BSHIFT;
   1456 		dev_bshift = DEV_BSHIFT;
   1457 	}
   1458 	error = 0;
   1459 	pg = pgs[0];
   1460 	startoffset = pg->offset;
   1461 	bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
   1462 	skipbytes = 0;
   1463 	KASSERT(bytes != 0);
   1464 
   1465 	kva = uvm_pagermapin(pgs, npages,
   1466 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1467 
   1468 	s = splbio();
   1469 	simple_lock(&global_v_numoutput_slock);
   1470 	vp->v_numoutput += 2;
   1471 	simple_unlock(&global_v_numoutput_slock);
   1472 	splx(s);
   1473 	mbp = getiobuf();
   1474 	UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
   1475 	    vp, mbp, vp->v_numoutput, bytes);
   1476 	mbp->b_bufsize = npages << PAGE_SHIFT;
   1477 	mbp->b_data = (void *)kva;
   1478 	mbp->b_resid = mbp->b_bcount = bytes;
   1479 	mbp->b_flags = B_BUSY|B_WRITE|B_AGE| (async ? (B_CALL|B_ASYNC) : 0);
   1480 	mbp->b_iodone = uvm_aio_biodone;
   1481 	mbp->b_vp = vp;
   1482 	if (curproc == uvm.pagedaemon_proc)
   1483 		BIO_SETPRIO(mbp, BPRIO_TIMELIMITED);
   1484 	else if (async)
   1485 		BIO_SETPRIO(mbp, BPRIO_TIMENONCRITICAL);
   1486 	else
   1487 		BIO_SETPRIO(mbp, BPRIO_TIMECRITICAL);
   1488 
   1489 	bp = NULL;
   1490 	for (offset = startoffset;
   1491 	    bytes > 0;
   1492 	    offset += iobytes, bytes -= iobytes) {
   1493 		lbn = offset >> fs_bshift;
   1494 		error = VOP_BMAP(vp, lbn, &devvp, &blkno, &run);
   1495 		if (error) {
   1496 			UVMHIST_LOG(ubchist, "VOP_BMAP() -> %d", error,0,0,0);
   1497 			skipbytes += bytes;
   1498 			bytes = 0;
   1499 			break;
   1500 		}
   1501 
   1502 		iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
   1503 		    bytes);
   1504 		if (blkno == (daddr_t)-1) {
   1505 			skipbytes += iobytes;
   1506 			continue;
   1507 		}
   1508 
   1509 		/* if it's really one i/o, don't make a second buf */
   1510 		if (offset == startoffset && iobytes == bytes) {
   1511 			bp = mbp;
   1512 		} else {
   1513 			UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
   1514 			    vp, bp, vp->v_numoutput, 0);
   1515 			bp = getiobuf();
   1516 			nestiobuf_setup(mbp, bp, offset - pg->offset, iobytes);
   1517 		}
   1518 		bp->b_lblkno = 0;
   1519 
   1520 		/* adjust physical blkno for partial blocks */
   1521 		bp->b_blkno = blkno + ((offset - ((off_t)lbn << fs_bshift)) >>
   1522 		    dev_bshift);
   1523 		UVMHIST_LOG(ubchist,
   1524 		    "vp %p offset 0x%x bcount 0x%x blkno 0x%x",
   1525 		    vp, offset, bp->b_bcount, bp->b_blkno);
   1526 
   1527 		VOP_STRATEGY(devvp, bp);
   1528 	}
   1529 	if (skipbytes) {
   1530 		UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
   1531 	}
   1532 	nestiobuf_done(mbp, skipbytes, error);
   1533 	if (async) {
   1534 		UVMHIST_LOG(ubchist, "returning 0 (async)", 0,0,0,0);
   1535 		return (0);
   1536 	}
   1537 	UVMHIST_LOG(ubchist, "waiting for mbp %p", mbp,0,0,0);
   1538 	error = biowait(mbp);
   1539 	uvm_aio_aiodone(mbp);
   1540 	UVMHIST_LOG(ubchist, "returning, error %d", error,0,0,0);
   1541 	return (error);
   1542 }
   1543 
   1544 /*
   1545  * VOP_PUTPAGES() for vnodes which never have pages.
   1546  */
   1547 
   1548 int
   1549 genfs_null_putpages(void *v)
   1550 {
   1551 	struct vop_putpages_args /* {
   1552 		struct vnode *a_vp;
   1553 		voff_t a_offlo;
   1554 		voff_t a_offhi;
   1555 		int a_flags;
   1556 	} */ *ap = v;
   1557 	struct vnode *vp = ap->a_vp;
   1558 
   1559 	KASSERT(vp->v_uobj.uo_npages == 0);
   1560 	simple_unlock(&vp->v_interlock);
   1561 	return (0);
   1562 }
   1563 
   1564 void
   1565 genfs_node_init(struct vnode *vp, const struct genfs_ops *ops)
   1566 {
   1567 	struct genfs_node *gp = VTOG(vp);
   1568 
   1569 	lockinit(&gp->g_glock, PINOD, "glock", 0, 0);
   1570 	gp->g_op = ops;
   1571 }
   1572 
   1573 void
   1574 genfs_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1575 {
   1576 	int bsize;
   1577 
   1578 	bsize = 1 << vp->v_mount->mnt_fs_bshift;
   1579 	*eobp = (size + bsize - 1) & ~(bsize - 1);
   1580 }
   1581 
   1582 int
   1583 genfs_compat_getpages(void *v)
   1584 {
   1585 	struct vop_getpages_args /* {
   1586 		struct vnode *a_vp;
   1587 		voff_t a_offset;
   1588 		struct vm_page **a_m;
   1589 		int *a_count;
   1590 		int a_centeridx;
   1591 		vm_prot_t a_access_type;
   1592 		int a_advice;
   1593 		int a_flags;
   1594 	} */ *ap = v;
   1595 
   1596 	off_t origoffset;
   1597 	struct vnode *vp = ap->a_vp;
   1598 	struct uvm_object *uobj = &vp->v_uobj;
   1599 	struct vm_page *pg, **pgs;
   1600 	vaddr_t kva;
   1601 	int i, error, orignpages, npages;
   1602 	struct iovec iov;
   1603 	struct uio uio;
   1604 	kauth_cred_t cred = curproc->p_cred;
   1605 	boolean_t write = (ap->a_access_type & VM_PROT_WRITE) != 0;
   1606 
   1607 	error = 0;
   1608 	origoffset = ap->a_offset;
   1609 	orignpages = *ap->a_count;
   1610 	pgs = ap->a_m;
   1611 
   1612 	if (write && (vp->v_flag & VONWORKLST) == 0) {
   1613 		vn_syncer_add_to_worklist(vp, filedelay);
   1614 	}
   1615 	if (ap->a_flags & PGO_LOCKED) {
   1616 		uvn_findpages(uobj, origoffset, ap->a_count, ap->a_m,
   1617 		    UFP_NOWAIT|UFP_NOALLOC| (write ? UFP_NORDONLY : 0));
   1618 
   1619 		return (ap->a_m[ap->a_centeridx] == NULL ? EBUSY : 0);
   1620 	}
   1621 	if (origoffset + (ap->a_centeridx << PAGE_SHIFT) >= vp->v_size) {
   1622 		simple_unlock(&uobj->vmobjlock);
   1623 		return (EINVAL);
   1624 	}
   1625 	if ((ap->a_flags & PGO_SYNCIO) == 0) {
   1626 		simple_unlock(&uobj->vmobjlock);
   1627 		return 0;
   1628 	}
   1629 	npages = orignpages;
   1630 	uvn_findpages(uobj, origoffset, &npages, pgs, UFP_ALL);
   1631 	simple_unlock(&uobj->vmobjlock);
   1632 	kva = uvm_pagermapin(pgs, npages,
   1633 	    UVMPAGER_MAPIN_READ | UVMPAGER_MAPIN_WAITOK);
   1634 	for (i = 0; i < npages; i++) {
   1635 		pg = pgs[i];
   1636 		if ((pg->flags & PG_FAKE) == 0) {
   1637 			continue;
   1638 		}
   1639 		iov.iov_base = (char *)kva + (i << PAGE_SHIFT);
   1640 		iov.iov_len = PAGE_SIZE;
   1641 		uio.uio_iov = &iov;
   1642 		uio.uio_iovcnt = 1;
   1643 		uio.uio_offset = origoffset + (i << PAGE_SHIFT);
   1644 		uio.uio_rw = UIO_READ;
   1645 		uio.uio_resid = PAGE_SIZE;
   1646 		UIO_SETUP_SYSSPACE(&uio);
   1647 		/* XXX vn_lock */
   1648 		error = VOP_READ(vp, &uio, 0, cred);
   1649 		if (error) {
   1650 			break;
   1651 		}
   1652 		if (uio.uio_resid) {
   1653 			memset(iov.iov_base, 0, uio.uio_resid);
   1654 		}
   1655 	}
   1656 	uvm_pagermapout(kva, npages);
   1657 	simple_lock(&uobj->vmobjlock);
   1658 	uvm_lock_pageq();
   1659 	for (i = 0; i < npages; i++) {
   1660 		pg = pgs[i];
   1661 		if (error && (pg->flags & PG_FAKE) != 0) {
   1662 			pg->flags |= PG_RELEASED;
   1663 		} else {
   1664 			pmap_clear_modify(pg);
   1665 			uvm_pageactivate(pg);
   1666 		}
   1667 	}
   1668 	if (error) {
   1669 		uvm_page_unbusy(pgs, npages);
   1670 	}
   1671 	uvm_unlock_pageq();
   1672 	simple_unlock(&uobj->vmobjlock);
   1673 	return (error);
   1674 }
   1675 
   1676 int
   1677 genfs_compat_gop_write(struct vnode *vp, struct vm_page **pgs, int npages,
   1678     int flags)
   1679 {
   1680 	off_t offset;
   1681 	struct iovec iov;
   1682 	struct uio uio;
   1683 	kauth_cred_t cred = curproc->p_cred;
   1684 	struct buf *bp;
   1685 	vaddr_t kva;
   1686 	int s, error;
   1687 
   1688 	offset = pgs[0]->offset;
   1689 	kva = uvm_pagermapin(pgs, npages,
   1690 	    UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
   1691 
   1692 	iov.iov_base = (void *)kva;
   1693 	iov.iov_len = npages << PAGE_SHIFT;
   1694 	uio.uio_iov = &iov;
   1695 	uio.uio_iovcnt = 1;
   1696 	uio.uio_offset = offset;
   1697 	uio.uio_rw = UIO_WRITE;
   1698 	uio.uio_resid = npages << PAGE_SHIFT;
   1699 	UIO_SETUP_SYSSPACE(&uio);
   1700 	/* XXX vn_lock */
   1701 	error = VOP_WRITE(vp, &uio, 0, cred);
   1702 
   1703 	s = splbio();
   1704 	V_INCR_NUMOUTPUT(vp);
   1705 	splx(s);
   1706 
   1707 	bp = getiobuf();
   1708 	bp->b_flags = B_BUSY | B_WRITE | B_AGE;
   1709 	bp->b_vp = vp;
   1710 	bp->b_lblkno = offset >> vp->v_mount->mnt_fs_bshift;
   1711 	bp->b_data = (char *)kva;
   1712 	bp->b_bcount = npages << PAGE_SHIFT;
   1713 	bp->b_bufsize = npages << PAGE_SHIFT;
   1714 	bp->b_resid = 0;
   1715 	if (error) {
   1716 		bp->b_flags |= B_ERROR;
   1717 		bp->b_error = error;
   1718 	}
   1719 	uvm_aio_aiodone(bp);
   1720 	return (error);
   1721 }
   1722 
   1723 static void
   1724 filt_genfsdetach(struct knote *kn)
   1725 {
   1726 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1727 
   1728 	/* XXXLUKEM lock the struct? */
   1729 	SLIST_REMOVE(&vp->v_klist, kn, knote, kn_selnext);
   1730 }
   1731 
   1732 static int
   1733 filt_genfsread(struct knote *kn, long hint)
   1734 {
   1735 	struct vnode *vp = (struct vnode *)kn->kn_hook;
   1736 
   1737 	/*
   1738 	 * filesystem is gone, so set the EOF flag and schedule
   1739 	 * the knote for deletion.
   1740 	 */
   1741 	if (hint == NOTE_REVOKE) {
   1742 		kn->kn_flags |= (EV_EOF | EV_ONESHOT);
   1743 		return (1);
   1744 	}
   1745 
   1746 	/* XXXLUKEM lock the struct? */
   1747 	kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
   1748         return (kn->kn_data != 0);
   1749 }
   1750 
   1751 static int
   1752 filt_genfsvnode(struct knote *kn, long hint)
   1753 {
   1754 
   1755 	if (kn->kn_sfflags & hint)
   1756 		kn->kn_fflags |= hint;
   1757 	if (hint == NOTE_REVOKE) {
   1758 		kn->kn_flags |= EV_EOF;
   1759 		return (1);
   1760 	}
   1761 	return (kn->kn_fflags != 0);
   1762 }
   1763 
   1764 static const struct filterops genfsread_filtops =
   1765 	{ 1, NULL, filt_genfsdetach, filt_genfsread };
   1766 static const struct filterops genfsvnode_filtops =
   1767 	{ 1, NULL, filt_genfsdetach, filt_genfsvnode };
   1768 
   1769 int
   1770 genfs_kqfilter(void *v)
   1771 {
   1772 	struct vop_kqfilter_args /* {
   1773 		struct vnode	*a_vp;
   1774 		struct knote	*a_kn;
   1775 	} */ *ap = v;
   1776 	struct vnode *vp;
   1777 	struct knote *kn;
   1778 
   1779 	vp = ap->a_vp;
   1780 	kn = ap->a_kn;
   1781 	switch (kn->kn_filter) {
   1782 	case EVFILT_READ:
   1783 		kn->kn_fop = &genfsread_filtops;
   1784 		break;
   1785 	case EVFILT_VNODE:
   1786 		kn->kn_fop = &genfsvnode_filtops;
   1787 		break;
   1788 	default:
   1789 		return (1);
   1790 	}
   1791 
   1792 	kn->kn_hook = vp;
   1793 
   1794 	/* XXXLUKEM lock the struct? */
   1795 	SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
   1796 
   1797 	return (0);
   1798 }
   1799