Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.105.2.9
      1 /*	$NetBSD: vfs_vnode.c,v 1.105.2.9 2020/02/29 20:21:03 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * The vnode cache subsystem.
     71  *
     72  * Life-cycle
     73  *
     74  *	Normally, there are two points where new vnodes are created:
     75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76  *	starts in one of the following ways:
     77  *
     78  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80  *
     81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  *	was another, traditional way.  Currently, only the draining thread
     83  *	recycles the vnodes.  This behaviour might be revisited.
     84  *
     85  *	The life-cycle ends when the last reference is dropped, usually
     86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87  *	the file system that vnode is inactive.  Via this call, file system
     88  *	indicates whether vnode can be recycled (usually, it checks its own
     89  *	references, e.g. count of links, whether the file was removed).
     90  *
     91  *	Depending on indication, vnode can be put into a free list (cache),
     92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93  *	disassociate underlying file system from the vnode, and finally
     94  *	destroyed.
     95  *
     96  * Vnode state
     97  *
     98  *	Vnode is always in one of six states:
     99  *	- MARKER	This is a marker vnode to help list traversal.  It
    100  *			will never change its state.
    101  *	- LOADING	Vnode is associating underlying file system and not
    102  *			yet ready to use.
    103  *	- LOADED	Vnode has associated underlying file system and is
    104  *			ready to use.
    105  *	- BLOCKED	Vnode is active but cannot get new references.
    106  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107  *			system.
    108  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109  *			and is dead.
    110  *
    111  *	Valid state changes are:
    112  *	LOADING -> LOADED
    113  *			Vnode has been initialised in vcache_get() or
    114  *			vcache_new() and is ready to use.
    115  *	LOADED -> RECLAIMING
    116  *			Vnode starts disassociation from underlying file
    117  *			system in vcache_reclaim().
    118  *	RECLAIMING -> RECLAIMED
    119  *			Vnode finished disassociation from underlying file
    120  *			system in vcache_reclaim().
    121  *	LOADED -> BLOCKED
    122  *			vcache_rekey*() is changing the vnode key.
    123  *	BLOCKED -> LOADED
    124  *			The block condition is over.
    125  *	LOADING -> RECLAIMED
    126  *			Either vcache_get() or vcache_new() failed to
    127  *			associate the underlying file system or vcache_rekey*()
    128  *			drops a vnode used as placeholder.
    129  *
    130  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    131  *	and it is possible to wait for state change.
    132  *
    133  *	State is protected with v_interlock with one exception:
    134  *	to change from LOADING both v_interlock and vcache_lock must be held
    135  *	so it is possible to check "state == LOADING" without holding
    136  *	v_interlock.  See vcache_get() for details.
    137  *
    138  * Reference counting
    139  *
    140  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    141  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    142  *	as vput(9), routines.  Common points holding references are e.g.
    143  *	file openings, current working directory, mount points, etc.
    144  *
    145  * Note on v_usecount and its locking
    146  *
    147  *	At nearly all points it is known that v_usecount could be zero,
    148  *	the vnode_t::v_interlock will be held.  To change the count away
    149  *	from zero, the interlock must be held.  To change from a non-zero
    150  *	value to zero, again the interlock must be held.
    151  *
    152  *	Changing the usecount from a non-zero value to a non-zero value can
    153  *	safely be done using atomic operations, without the interlock held.
    154  */
    155 
    156 #include <sys/cdefs.h>
    157 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105.2.9 2020/02/29 20:21:03 ad Exp $");
    158 
    159 #ifdef _KERNEL_OPT
    160 #include "opt_pax.h"
    161 #endif
    162 
    163 #include <sys/param.h>
    164 #include <sys/kernel.h>
    165 
    166 #include <sys/atomic.h>
    167 #include <sys/buf.h>
    168 #include <sys/conf.h>
    169 #include <sys/device.h>
    170 #include <sys/hash.h>
    171 #include <sys/kauth.h>
    172 #include <sys/kmem.h>
    173 #include <sys/kthread.h>
    174 #include <sys/module.h>
    175 #include <sys/mount.h>
    176 #include <sys/namei.h>
    177 #include <sys/pax.h>
    178 #include <sys/syscallargs.h>
    179 #include <sys/sysctl.h>
    180 #include <sys/systm.h>
    181 #include <sys/vnode_impl.h>
    182 #include <sys/wapbl.h>
    183 #include <sys/fstrans.h>
    184 
    185 #include <uvm/uvm.h>
    186 #include <uvm/uvm_readahead.h>
    187 #include <uvm/uvm_stat.h>
    188 
    189 /* Flags to vrelel. */
    190 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
    191 
    192 #define	LRU_VRELE	0
    193 #define	LRU_FREE	1
    194 #define	LRU_HOLD	2
    195 #define	LRU_COUNT	3
    196 
    197 /*
    198  * There are three lru lists: one holds vnodes waiting for async release,
    199  * one is for vnodes which have no buffer/page references and one for those
    200  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
    201  * private cache line as vnodes migrate between them while under the same
    202  * lock (vdrain_lock).
    203  */
    204 u_int			numvnodes		__cacheline_aligned;
    205 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
    206 static kmutex_t		vdrain_lock		__cacheline_aligned;
    207 static kcondvar_t	vdrain_cv;
    208 static int		vdrain_gen;
    209 static kcondvar_t	vdrain_gen_cv;
    210 static bool		vdrain_retry;
    211 static lwp_t *		vdrain_lwp;
    212 SLIST_HEAD(hashhead, vnode_impl);
    213 static kmutex_t		vcache_lock		__cacheline_aligned;
    214 static kcondvar_t	vcache_cv;
    215 static u_int		vcache_hashsize;
    216 static u_long		vcache_hashmask;
    217 static struct hashhead	*vcache_hashtab;
    218 static pool_cache_t	vcache_pool;
    219 static void		lru_requeue(vnode_t *, vnodelst_t *);
    220 static vnodelst_t *	lru_which(vnode_t *);
    221 static vnode_impl_t *	vcache_alloc(void);
    222 static void		vcache_dealloc(vnode_impl_t *);
    223 static void		vcache_free(vnode_impl_t *);
    224 static void		vcache_init(void);
    225 static void		vcache_reinit(void);
    226 static void		vcache_reclaim(vnode_t *);
    227 static void		vrelel(vnode_t *, int, int);
    228 static void		vdrain_thread(void *);
    229 static void		vnpanic(vnode_t *, const char *, ...)
    230     __printflike(2, 3);
    231 
    232 /* Routines having to do with the management of the vnode table. */
    233 extern struct mount	*dead_rootmount;
    234 extern int		(**dead_vnodeop_p)(void *);
    235 extern int		(**spec_vnodeop_p)(void *);
    236 extern struct vfsops	dead_vfsops;
    237 
    238 /* Vnode state operations and diagnostics. */
    239 
    240 #if defined(DIAGNOSTIC)
    241 
    242 #define VSTATE_VALID(state) \
    243 	((state) != VS_ACTIVE && (state) != VS_MARKER)
    244 #define VSTATE_GET(vp) \
    245 	vstate_assert_get((vp), __func__, __LINE__)
    246 #define VSTATE_CHANGE(vp, from, to) \
    247 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    248 #define VSTATE_WAIT_STABLE(vp) \
    249 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    250 
    251 void
    252 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    253     bool has_lock)
    254 {
    255 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    256 
    257 	if (!has_lock) {
    258 		/*
    259 		 * Prevent predictive loads from the CPU, but check the state
    260 		 * without loooking first.
    261 		 */
    262 		membar_enter();
    263 		if (state == VS_ACTIVE && vp->v_usecount > 0 &&
    264 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
    265 			return;
    266 		if (vip->vi_state == state)
    267 			return;
    268 		mutex_enter((vp)->v_interlock);
    269 	}
    270 
    271 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    272 
    273 	if ((state == VS_ACTIVE && vp->v_usecount > 0 &&
    274 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
    275 	    vip->vi_state == state) {
    276 		if (!has_lock)
    277 			mutex_exit((vp)->v_interlock);
    278 		return;
    279 	}
    280 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
    281 	    vstate_name(vip->vi_state), vp->v_usecount,
    282 	    vstate_name(state), func, line);
    283 }
    284 
    285 static enum vnode_state
    286 vstate_assert_get(vnode_t *vp, const char *func, int line)
    287 {
    288 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    289 
    290 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    291 	if (! VSTATE_VALID(vip->vi_state))
    292 		vnpanic(vp, "state is %s at %s:%d",
    293 		    vstate_name(vip->vi_state), func, line);
    294 
    295 	return vip->vi_state;
    296 }
    297 
    298 static void
    299 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    300 {
    301 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    302 
    303 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    304 	if (! VSTATE_VALID(vip->vi_state))
    305 		vnpanic(vp, "state is %s at %s:%d",
    306 		    vstate_name(vip->vi_state), func, line);
    307 
    308 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    309 		cv_wait(&vp->v_cv, vp->v_interlock);
    310 
    311 	if (! VSTATE_VALID(vip->vi_state))
    312 		vnpanic(vp, "state is %s at %s:%d",
    313 		    vstate_name(vip->vi_state), func, line);
    314 }
    315 
    316 static void
    317 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    318     const char *func, int line)
    319 {
    320 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    321 
    322 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    323 	if (from == VS_LOADING)
    324 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
    325 
    326 	if (! VSTATE_VALID(from))
    327 		vnpanic(vp, "from is %s at %s:%d",
    328 		    vstate_name(from), func, line);
    329 	if (! VSTATE_VALID(to))
    330 		vnpanic(vp, "to is %s at %s:%d",
    331 		    vstate_name(to), func, line);
    332 	if (vip->vi_state != from)
    333 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    334 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
    335 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
    336 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
    337 		    vstate_name(from), vstate_name(to), vp->v_usecount,
    338 		    func, line);
    339 
    340 	vip->vi_state = to;
    341 	if (from == VS_LOADING)
    342 		cv_broadcast(&vcache_cv);
    343 	if (to == VS_LOADED || to == VS_RECLAIMED)
    344 		cv_broadcast(&vp->v_cv);
    345 }
    346 
    347 #else /* defined(DIAGNOSTIC) */
    348 
    349 #define VSTATE_GET(vp) \
    350 	(VNODE_TO_VIMPL((vp))->vi_state)
    351 #define VSTATE_CHANGE(vp, from, to) \
    352 	vstate_change((vp), (from), (to))
    353 #define VSTATE_WAIT_STABLE(vp) \
    354 	vstate_wait_stable((vp))
    355 void
    356 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    357     bool has_lock)
    358 {
    359 
    360 }
    361 
    362 static void
    363 vstate_wait_stable(vnode_t *vp)
    364 {
    365 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    366 
    367 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    368 		cv_wait(&vp->v_cv, vp->v_interlock);
    369 }
    370 
    371 static void
    372 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    373 {
    374 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    375 
    376 	vip->vi_state = to;
    377 	if (from == VS_LOADING)
    378 		cv_broadcast(&vcache_cv);
    379 	if (to == VS_LOADED || to == VS_RECLAIMED)
    380 		cv_broadcast(&vp->v_cv);
    381 }
    382 
    383 #endif /* defined(DIAGNOSTIC) */
    384 
    385 void
    386 vfs_vnode_sysinit(void)
    387 {
    388 	int error __diagused, i;
    389 
    390 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    391 	KASSERT(dead_rootmount != NULL);
    392 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
    393 
    394 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    395 	for (i = 0; i < LRU_COUNT; i++) {
    396 		TAILQ_INIT(&lru_list[i]);
    397 	}
    398 	vcache_init();
    399 
    400 	cv_init(&vdrain_cv, "vdrain");
    401 	cv_init(&vdrain_gen_cv, "vdrainwt");
    402 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    403 	    NULL, &vdrain_lwp, "vdrain");
    404 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    405 }
    406 
    407 /*
    408  * Allocate a new marker vnode.
    409  */
    410 vnode_t *
    411 vnalloc_marker(struct mount *mp)
    412 {
    413 	vnode_impl_t *vip;
    414 	vnode_t *vp;
    415 
    416 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
    417 	memset(vip, 0, sizeof(*vip));
    418 	vp = VIMPL_TO_VNODE(vip);
    419 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
    420 	vp->v_mount = mp;
    421 	vp->v_type = VBAD;
    422 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    423 	vip->vi_state = VS_MARKER;
    424 
    425 	return vp;
    426 }
    427 
    428 /*
    429  * Free a marker vnode.
    430  */
    431 void
    432 vnfree_marker(vnode_t *vp)
    433 {
    434 	vnode_impl_t *vip;
    435 
    436 	vip = VNODE_TO_VIMPL(vp);
    437 	KASSERT(vip->vi_state == VS_MARKER);
    438 	mutex_obj_free(vp->v_interlock);
    439 	uvm_obj_destroy(&vp->v_uobj, true);
    440 	pool_cache_put(vcache_pool, vip);
    441 }
    442 
    443 /*
    444  * Test a vnode for being a marker vnode.
    445  */
    446 bool
    447 vnis_marker(vnode_t *vp)
    448 {
    449 
    450 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    451 }
    452 
    453 /*
    454  * Return the lru list this node should be on.
    455  */
    456 static vnodelst_t *
    457 lru_which(vnode_t *vp)
    458 {
    459 
    460 	KASSERT(mutex_owned(vp->v_interlock));
    461 
    462 	if (vp->v_holdcnt > 0)
    463 		return &lru_list[LRU_HOLD];
    464 	else
    465 		return &lru_list[LRU_FREE];
    466 }
    467 
    468 /*
    469  * Put vnode to end of given list.
    470  * Both the current and the new list may be NULL, used on vnode alloc/free.
    471  * Adjust numvnodes and signal vdrain thread if there is work.
    472  */
    473 static void
    474 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    475 {
    476 	vnode_impl_t *vip;
    477 	int d;
    478 
    479 	/*
    480 	 * If the vnode is on the correct list, and was put there recently,
    481 	 * then leave it be, thus avoiding huge cache and lock contention.
    482 	 */
    483 	vip = VNODE_TO_VIMPL(vp);
    484 	if (listhd == vip->vi_lrulisthd &&
    485 	    (hardclock_ticks - vip->vi_lrulisttm) < hz) {
    486 	    	return;
    487 	}
    488 
    489 	mutex_enter(&vdrain_lock);
    490 	d = 0;
    491 	if (vip->vi_lrulisthd != NULL)
    492 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    493 	else
    494 		d++;
    495 	vip->vi_lrulisthd = listhd;
    496 	vip->vi_lrulisttm = hardclock_ticks;
    497 	if (vip->vi_lrulisthd != NULL)
    498 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    499 	else
    500 		d--;
    501 	if (d != 0) {
    502 		/*
    503 		 * Looks strange?  This is not a bug.  Don't store
    504 		 * numvnodes unless there is a change - avoid false
    505 		 * sharing on MP.
    506 		 */
    507 		numvnodes += d;
    508 	}
    509 	if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
    510 		cv_broadcast(&vdrain_cv);
    511 	mutex_exit(&vdrain_lock);
    512 }
    513 
    514 /*
    515  * Release deferred vrele vnodes for this mount.
    516  * Called with file system suspended.
    517  */
    518 void
    519 vrele_flush(struct mount *mp)
    520 {
    521 	vnode_impl_t *vip, *marker;
    522 	vnode_t *vp;
    523 
    524 	KASSERT(fstrans_is_owner(mp));
    525 
    526 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    527 
    528 	mutex_enter(&vdrain_lock);
    529 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
    530 
    531 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    532 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    533 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
    534 		    vi_lrulist);
    535 		vp = VIMPL_TO_VNODE(vip);
    536 		if (vnis_marker(vp))
    537 			continue;
    538 
    539 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    540 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    541 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    542 		vip->vi_lrulisttm = hardclock_ticks;
    543 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    544 		mutex_exit(&vdrain_lock);
    545 
    546 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    547 		mutex_enter(vp->v_interlock);
    548 		vrelel(vp, 0, LK_EXCLUSIVE);
    549 
    550 		mutex_enter(&vdrain_lock);
    551 	}
    552 
    553 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    554 	mutex_exit(&vdrain_lock);
    555 
    556 	vnfree_marker(VIMPL_TO_VNODE(marker));
    557 }
    558 
    559 /*
    560  * Reclaim a cached vnode.  Used from vdrain_thread only.
    561  */
    562 static __inline void
    563 vdrain_remove(vnode_t *vp)
    564 {
    565 	struct mount *mp;
    566 
    567 	KASSERT(mutex_owned(&vdrain_lock));
    568 
    569 	/* Probe usecount (unlocked). */
    570 	if (vp->v_usecount > 0)
    571 		return;
    572 	/* Try v_interlock -- we lock the wrong direction! */
    573 	if (!mutex_tryenter(vp->v_interlock))
    574 		return;
    575 	/* Probe usecount and state. */
    576 	if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
    577 		mutex_exit(vp->v_interlock);
    578 		return;
    579 	}
    580 	mp = vp->v_mount;
    581 	if (fstrans_start_nowait(mp) != 0) {
    582 		mutex_exit(vp->v_interlock);
    583 		return;
    584 	}
    585 	vdrain_retry = true;
    586 	mutex_exit(&vdrain_lock);
    587 
    588 	if (vcache_vget(vp) == 0) {
    589 		if (!vrecycle(vp)) {
    590 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    591 			mutex_enter(vp->v_interlock);
    592 			vrelel(vp, 0, LK_EXCLUSIVE);
    593 		}
    594 	}
    595 	fstrans_done(mp);
    596 
    597 	mutex_enter(&vdrain_lock);
    598 }
    599 
    600 /*
    601  * Release a cached vnode.  Used from vdrain_thread only.
    602  */
    603 static __inline void
    604 vdrain_vrele(vnode_t *vp)
    605 {
    606 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    607 	struct mount *mp;
    608 
    609 	KASSERT(mutex_owned(&vdrain_lock));
    610 
    611 	mp = vp->v_mount;
    612 	if (fstrans_start_nowait(mp) != 0)
    613 		return;
    614 
    615 	/*
    616 	 * First remove the vnode from the vrele list.
    617 	 * Put it on the last lru list, the last vrele()
    618 	 * will put it back onto the right list before
    619 	 * its v_usecount reaches zero.
    620 	 */
    621 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    622 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    623 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    624 	vip->vi_lrulisttm = hardclock_ticks;
    625 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    626 
    627 	vdrain_retry = true;
    628 	mutex_exit(&vdrain_lock);
    629 
    630 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    631 	mutex_enter(vp->v_interlock);
    632 	vrelel(vp, 0, LK_EXCLUSIVE);
    633 	fstrans_done(mp);
    634 
    635 	mutex_enter(&vdrain_lock);
    636 }
    637 
    638 /*
    639  * Helper thread to keep the number of vnodes below desiredvnodes
    640  * and release vnodes from asynchronous vrele.
    641  */
    642 static void
    643 vdrain_thread(void *cookie)
    644 {
    645 	int i;
    646 	u_int target;
    647 	vnode_impl_t *vip, *marker;
    648 
    649 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    650 
    651 	mutex_enter(&vdrain_lock);
    652 
    653 	for (;;) {
    654 		vdrain_retry = false;
    655 		target = desiredvnodes - desiredvnodes/10;
    656 
    657 		for (i = 0; i < LRU_COUNT; i++) {
    658 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
    659 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    660 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    661 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
    662 				    vi_lrulist);
    663 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
    664 					continue;
    665 				if (i == LRU_VRELE)
    666 					vdrain_vrele(VIMPL_TO_VNODE(vip));
    667 				else if (numvnodes < target)
    668 					break;
    669 				else
    670 					vdrain_remove(VIMPL_TO_VNODE(vip));
    671 			}
    672 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    673 		}
    674 
    675 		if (vdrain_retry) {
    676 			mutex_exit(&vdrain_lock);
    677 			yield();
    678 			mutex_enter(&vdrain_lock);
    679 		} else {
    680 			vdrain_gen++;
    681 			cv_broadcast(&vdrain_gen_cv);
    682 			cv_wait(&vdrain_cv, &vdrain_lock);
    683 		}
    684 	}
    685 }
    686 
    687 /*
    688  * Try to drop reference on a vnode.  Abort if we are releasing the
    689  * last reference.  Note: this _must_ succeed if not the last reference.
    690  */
    691 static bool
    692 vtryrele(vnode_t *vp)
    693 {
    694 	u_int use, next;
    695 
    696 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
    697 		if (__predict_false(use == 1)) {
    698 			return false;
    699 		}
    700 		KASSERT(use > 1);
    701 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    702 		if (__predict_true(next == use)) {
    703 			return true;
    704 		}
    705 	}
    706 }
    707 
    708 /*
    709  * vput: unlock and release the reference.
    710  */
    711 void
    712 vput(vnode_t *vp)
    713 {
    714 	int lktype;
    715 
    716 	/*
    717 	 * Do an unlocked check of v_usecount.  If it looks like we're not
    718 	 * about to drop the last reference, then unlock the vnode and try
    719 	 * to drop the reference.  If it ends up being the last reference
    720 	 * after all, vrelel() can fix it all up.  Most of the time this
    721 	 * will all go to plan.
    722 	 */
    723 	if (atomic_load_relaxed(&vp->v_usecount) > 1) {
    724 		VOP_UNLOCK(vp);
    725 		if (vtryrele(vp)) {
    726 			return;
    727 		}
    728 		lktype = LK_NONE;
    729 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
    730 		lktype = LK_EXCLUSIVE;
    731 	} else {
    732 		lktype = VOP_ISLOCKED(vp);
    733 		KASSERT(lktype != LK_NONE);
    734 	}
    735 	mutex_enter(vp->v_interlock);
    736 	vrelel(vp, 0, lktype);
    737 }
    738 
    739 /*
    740  * Vnode release.  If reference count drops to zero, call inactive
    741  * routine and either return to freelist or free to the pool.
    742  */
    743 static void
    744 vrelel(vnode_t *vp, int flags, int lktype)
    745 {
    746 	const bool async = ((flags & VRELEL_ASYNC) != 0);
    747 	bool recycle, defer;
    748 	int error;
    749 
    750 	KASSERT(mutex_owned(vp->v_interlock));
    751 
    752 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    753 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    754 		vnpanic(vp, "dead but not clean");
    755 	}
    756 
    757 	/*
    758 	 * If not the last reference, just drop the reference count and
    759 	 * unlock.  VOP_UNLOCK() is called here without a vnode reference
    760 	 * held, but is ok as the hold of v_interlock will stop the vnode
    761 	 * from disappearing.
    762 	 */
    763 	if (vtryrele(vp)) {
    764 		if (lktype != LK_NONE) {
    765 			VOP_UNLOCK(vp);
    766 		}
    767 		mutex_exit(vp->v_interlock);
    768 		return;
    769 	}
    770 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    771 		vnpanic(vp, "%s: bad ref count", __func__);
    772 	}
    773 
    774 #ifdef DIAGNOSTIC
    775 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    776 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    777 		vprint("vrelel: missing VOP_CLOSE()", vp);
    778 	}
    779 #endif
    780 
    781 	/*
    782 	 * First try to get the vnode locked for VOP_INACTIVE().
    783 	 * Defer vnode release to vdrain_thread if caller requests
    784 	 * it explicitly, is the pagedaemon or the lock failed.
    785 	 */
    786 	defer = false;
    787 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
    788 		defer = true;
    789 	} else if (lktype == LK_SHARED) {
    790 		/* Excellent chance of getting, if the last ref. */
    791 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
    792 		    LK_NOWAIT);
    793 		if (error != 0) {
    794 			defer = true;
    795 		} else {
    796 			lktype = LK_EXCLUSIVE;
    797 		}
    798 	} else if (lktype == LK_NONE) {
    799 		/* Excellent chance of getting, if the last ref. */
    800 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
    801 		    LK_NOWAIT);
    802 		if (error != 0) {
    803 			defer = true;
    804 		} else {
    805 			lktype = LK_EXCLUSIVE;
    806 		}
    807 	}
    808 	KASSERT(mutex_owned(vp->v_interlock));
    809 	if (defer) {
    810 		/*
    811 		 * Defer reclaim to the kthread; it's not safe to
    812 		 * clean it here.  We donate it our last reference.
    813 		 */
    814 		if (lktype != LK_NONE) {
    815 			VOP_UNLOCK(vp);
    816 		}
    817 		lru_requeue(vp, &lru_list[LRU_VRELE]);
    818 		mutex_exit(vp->v_interlock);
    819 		return;
    820 	}
    821 	KASSERT(lktype == LK_EXCLUSIVE);
    822 
    823 	/*
    824 	 * If not clean, deactivate the vnode, but preserve
    825 	 * our reference across the call to VOP_INACTIVE().
    826 	 */
    827 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    828 		VOP_UNLOCK(vp);
    829 	} else {
    830 		/*
    831 		 * If VOP_INACTIVE() indicates that the described file has
    832 		 * been deleted, then recycle the vnode.  Note that
    833 		 * VOP_INACTIVE() will not drop the vnode lock.
    834 		 *
    835 		 * If the file has been deleted, this is a lingering
    836 		 * reference and there is no need to worry about new
    837 		 * references looking to do real work with the vnode (as it
    838 		 * will have been purged from directories, caches, etc).
    839 		 */
    840 		recycle = false;
    841 		mutex_exit(vp->v_interlock);
    842 		VOP_INACTIVE(vp, &recycle);
    843 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    844 		mutex_enter(vp->v_interlock);
    845 		if (!recycle) {
    846 			VOP_UNLOCK(vp);
    847 			if (vtryrele(vp)) {
    848 				mutex_exit(vp->v_interlock);
    849 				rw_exit(vp->v_uobj.vmobjlock);
    850 				return;
    851 			}
    852 		}
    853 
    854 		/* Take care of space accounting. */
    855 		if ((vp->v_iflag & VI_EXECMAP) != 0 &&
    856 		    vp->v_uobj.uo_npages != 0) {
    857 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
    858 			cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
    859 		}
    860 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    861 		vp->v_vflag &= ~VV_MAPPED;
    862 		rw_exit(vp->v_uobj.vmobjlock);
    863 
    864 		/*
    865 		 * Recycle the vnode if the file is now unused (unlinked),
    866 		 * otherwise just free it.
    867 		 */
    868 		if (recycle) {
    869 			VSTATE_ASSERT(vp, VS_LOADED);
    870 			/* vcache_reclaim drops the lock. */
    871 			vcache_reclaim(vp);
    872 		}
    873 		KASSERT(vp->v_usecount > 0);
    874 	}
    875 
    876 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    877 		/* Gained another reference while being reclaimed. */
    878 		mutex_exit(vp->v_interlock);
    879 		return;
    880 	}
    881 
    882 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    883 		/*
    884 		 * It's clean so destroy it.  It isn't referenced
    885 		 * anywhere since it has been reclaimed.
    886 		 */
    887 		vcache_free(VNODE_TO_VIMPL(vp));
    888 	} else {
    889 		/*
    890 		 * Otherwise, put it back onto the freelist.  It
    891 		 * can't be destroyed while still associated with
    892 		 * a file system.
    893 		 */
    894 		lru_requeue(vp, lru_which(vp));
    895 		mutex_exit(vp->v_interlock);
    896 	}
    897 }
    898 
    899 void
    900 vrele(vnode_t *vp)
    901 {
    902 
    903 	if (vtryrele(vp)) {
    904 		return;
    905 	}
    906 	mutex_enter(vp->v_interlock);
    907 	vrelel(vp, 0, LK_NONE);
    908 }
    909 
    910 /*
    911  * Asynchronous vnode release, vnode is released in different context.
    912  */
    913 void
    914 vrele_async(vnode_t *vp)
    915 {
    916 
    917 	if (vtryrele(vp)) {
    918 		return;
    919 	}
    920 	mutex_enter(vp->v_interlock);
    921 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
    922 }
    923 
    924 /*
    925  * Vnode reference, where a reference is already held by some other
    926  * object (for example, a file structure).
    927  *
    928  * NB: we have lockless code sequences that rely on this not blocking.
    929  */
    930 void
    931 vref(vnode_t *vp)
    932 {
    933 
    934 	KASSERT(atomic_load_relaxed(&vp->v_usecount) != 0);
    935 
    936 	atomic_inc_uint(&vp->v_usecount);
    937 }
    938 
    939 /*
    940  * Page or buffer structure gets a reference.
    941  * Called with v_interlock held.
    942  */
    943 void
    944 vholdl(vnode_t *vp)
    945 {
    946 
    947 	KASSERT(mutex_owned(vp->v_interlock));
    948 
    949 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
    950 		lru_requeue(vp, lru_which(vp));
    951 }
    952 
    953 /*
    954  * Page or buffer structure gets a reference.
    955  */
    956 void
    957 vhold(vnode_t *vp)
    958 {
    959 
    960 	mutex_enter(vp->v_interlock);
    961 	vholdl(vp);
    962 	mutex_exit(vp->v_interlock);
    963 }
    964 
    965 /*
    966  * Page or buffer structure frees a reference.
    967  * Called with v_interlock held.
    968  */
    969 void
    970 holdrelel(vnode_t *vp)
    971 {
    972 
    973 	KASSERT(mutex_owned(vp->v_interlock));
    974 
    975 	if (vp->v_holdcnt <= 0) {
    976 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
    977 	}
    978 
    979 	vp->v_holdcnt--;
    980 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
    981 		lru_requeue(vp, lru_which(vp));
    982 }
    983 
    984 /*
    985  * Page or buffer structure frees a reference.
    986  */
    987 void
    988 holdrele(vnode_t *vp)
    989 {
    990 
    991 	mutex_enter(vp->v_interlock);
    992 	holdrelel(vp);
    993 	mutex_exit(vp->v_interlock);
    994 }
    995 
    996 /*
    997  * Recycle an unused vnode if caller holds the last reference.
    998  */
    999 bool
   1000 vrecycle(vnode_t *vp)
   1001 {
   1002 	int error __diagused;
   1003 
   1004 	mutex_enter(vp->v_interlock);
   1005 
   1006 	/* Make sure we hold the last reference. */
   1007 	VSTATE_WAIT_STABLE(vp);
   1008 	if (vp->v_usecount != 1) {
   1009 		mutex_exit(vp->v_interlock);
   1010 		return false;
   1011 	}
   1012 
   1013 	/* If the vnode is already clean we're done. */
   1014 	if (VSTATE_GET(vp) != VS_LOADED) {
   1015 		VSTATE_ASSERT(vp, VS_RECLAIMED);
   1016 		vrelel(vp, 0, LK_NONE);
   1017 		return true;
   1018 	}
   1019 
   1020 	/* Prevent further references until the vnode is locked. */
   1021 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1022 	mutex_exit(vp->v_interlock);
   1023 
   1024 	/*
   1025 	 * On a leaf file system this lock will always succeed as we hold
   1026 	 * the last reference and prevent further references.
   1027 	 * On layered file systems waiting for the lock would open a can of
   1028 	 * deadlocks as the lower vnodes may have other active references.
   1029 	 */
   1030 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
   1031 
   1032 	mutex_enter(vp->v_interlock);
   1033 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1034 
   1035 	if (error) {
   1036 		mutex_exit(vp->v_interlock);
   1037 		return false;
   1038 	}
   1039 
   1040 	KASSERT(vp->v_usecount == 1);
   1041 	vcache_reclaim(vp);
   1042 	vrelel(vp, 0, LK_NONE);
   1043 
   1044 	return true;
   1045 }
   1046 
   1047 /*
   1048  * Helper for vrevoke() to propagate suspension from lastmp
   1049  * to thismp.  Both args may be NULL.
   1050  * Returns the currently suspended file system or NULL.
   1051  */
   1052 static struct mount *
   1053 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
   1054 {
   1055 	int error;
   1056 
   1057 	if (lastmp == thismp)
   1058 		return thismp;
   1059 
   1060 	if (lastmp != NULL)
   1061 		vfs_resume(lastmp);
   1062 
   1063 	if (thismp == NULL)
   1064 		return NULL;
   1065 
   1066 	do {
   1067 		error = vfs_suspend(thismp, 0);
   1068 	} while (error == EINTR || error == ERESTART);
   1069 
   1070 	if (error == 0)
   1071 		return thismp;
   1072 
   1073 	KASSERT(error == EOPNOTSUPP);
   1074 	return NULL;
   1075 }
   1076 
   1077 /*
   1078  * Eliminate all activity associated with the requested vnode
   1079  * and with all vnodes aliased to the requested vnode.
   1080  */
   1081 void
   1082 vrevoke(vnode_t *vp)
   1083 {
   1084 	struct mount *mp;
   1085 	vnode_t *vq;
   1086 	enum vtype type;
   1087 	dev_t dev;
   1088 
   1089 	KASSERT(vp->v_usecount > 0);
   1090 
   1091 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
   1092 
   1093 	mutex_enter(vp->v_interlock);
   1094 	VSTATE_WAIT_STABLE(vp);
   1095 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1096 		mutex_exit(vp->v_interlock);
   1097 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1098 		atomic_inc_uint(&vp->v_usecount);
   1099 		mutex_exit(vp->v_interlock);
   1100 		vgone(vp);
   1101 	} else {
   1102 		dev = vp->v_rdev;
   1103 		type = vp->v_type;
   1104 		mutex_exit(vp->v_interlock);
   1105 
   1106 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
   1107 			mp = vrevoke_suspend_next(mp, vq->v_mount);
   1108 			vgone(vq);
   1109 		}
   1110 	}
   1111 	vrevoke_suspend_next(mp, NULL);
   1112 }
   1113 
   1114 /*
   1115  * Eliminate all activity associated with a vnode in preparation for
   1116  * reuse.  Drops a reference from the vnode.
   1117  */
   1118 void
   1119 vgone(vnode_t *vp)
   1120 {
   1121 	int lktype;
   1122 
   1123 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1124 
   1125 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1126 	lktype = LK_EXCLUSIVE;
   1127 	mutex_enter(vp->v_interlock);
   1128 	VSTATE_WAIT_STABLE(vp);
   1129 	if (VSTATE_GET(vp) == VS_LOADED) {
   1130 		vcache_reclaim(vp);
   1131 		lktype = LK_NONE;
   1132 	}
   1133 	VSTATE_ASSERT(vp, VS_RECLAIMED);
   1134 	vrelel(vp, 0, lktype);
   1135 }
   1136 
   1137 static inline uint32_t
   1138 vcache_hash(const struct vcache_key *key)
   1139 {
   1140 	uint32_t hash = HASH32_BUF_INIT;
   1141 
   1142 	KASSERT(key->vk_key_len > 0);
   1143 
   1144 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1145 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1146 	return hash;
   1147 }
   1148 
   1149 static void
   1150 vcache_init(void)
   1151 {
   1152 
   1153 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
   1154 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1155 	KASSERT(vcache_pool != NULL);
   1156 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
   1157 	cv_init(&vcache_cv, "vcache");
   1158 	vcache_hashsize = desiredvnodes;
   1159 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1160 	    &vcache_hashmask);
   1161 }
   1162 
   1163 static void
   1164 vcache_reinit(void)
   1165 {
   1166 	int i;
   1167 	uint32_t hash;
   1168 	u_long oldmask, newmask;
   1169 	struct hashhead *oldtab, *newtab;
   1170 	vnode_impl_t *vip;
   1171 
   1172 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1173 	mutex_enter(&vcache_lock);
   1174 	oldtab = vcache_hashtab;
   1175 	oldmask = vcache_hashmask;
   1176 	vcache_hashsize = desiredvnodes;
   1177 	vcache_hashtab = newtab;
   1178 	vcache_hashmask = newmask;
   1179 	for (i = 0; i <= oldmask; i++) {
   1180 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
   1181 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
   1182 			hash = vcache_hash(&vip->vi_key);
   1183 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
   1184 			    vip, vi_hash);
   1185 		}
   1186 	}
   1187 	mutex_exit(&vcache_lock);
   1188 	hashdone(oldtab, HASH_SLIST, oldmask);
   1189 }
   1190 
   1191 static inline vnode_impl_t *
   1192 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1193 {
   1194 	struct hashhead *hashp;
   1195 	vnode_impl_t *vip;
   1196 
   1197 	KASSERT(mutex_owned(&vcache_lock));
   1198 
   1199 	hashp = &vcache_hashtab[hash & vcache_hashmask];
   1200 	SLIST_FOREACH(vip, hashp, vi_hash) {
   1201 		if (key->vk_mount != vip->vi_key.vk_mount)
   1202 			continue;
   1203 		if (key->vk_key_len != vip->vi_key.vk_key_len)
   1204 			continue;
   1205 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
   1206 			continue;
   1207 		return vip;
   1208 	}
   1209 	return NULL;
   1210 }
   1211 
   1212 /*
   1213  * Allocate a new, uninitialized vcache node.
   1214  */
   1215 static vnode_impl_t *
   1216 vcache_alloc(void)
   1217 {
   1218 	vnode_impl_t *vip;
   1219 	vnode_t *vp;
   1220 
   1221 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
   1222 	vp = VIMPL_TO_VNODE(vip);
   1223 	memset(vip, 0, sizeof(*vip));
   1224 
   1225 	rw_init(&vip->vi_lock);
   1226 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
   1227 
   1228 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
   1229 	cv_init(&vp->v_cv, "vnode");
   1230 	cache_vnode_init(vp);
   1231 
   1232 	vp->v_usecount = 1;
   1233 	vp->v_type = VNON;
   1234 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1235 
   1236 	vip->vi_state = VS_LOADING;
   1237 
   1238 	lru_requeue(vp, &lru_list[LRU_FREE]);
   1239 
   1240 	return vip;
   1241 }
   1242 
   1243 /*
   1244  * Deallocate a vcache node in state VS_LOADING.
   1245  *
   1246  * vcache_lock held on entry and released on return.
   1247  */
   1248 static void
   1249 vcache_dealloc(vnode_impl_t *vip)
   1250 {
   1251 	vnode_t *vp;
   1252 
   1253 	KASSERT(mutex_owned(&vcache_lock));
   1254 
   1255 	vp = VIMPL_TO_VNODE(vip);
   1256 	vfs_ref(dead_rootmount);
   1257 	vfs_insmntque(vp, dead_rootmount);
   1258 	mutex_enter(vp->v_interlock);
   1259 	vp->v_op = dead_vnodeop_p;
   1260 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1261 	mutex_exit(&vcache_lock);
   1262 	vrelel(vp, 0, LK_NONE);
   1263 }
   1264 
   1265 /*
   1266  * Free an unused, unreferenced vcache node.
   1267  * v_interlock locked on entry.
   1268  */
   1269 static void
   1270 vcache_free(vnode_impl_t *vip)
   1271 {
   1272 	vnode_t *vp;
   1273 
   1274 	vp = VIMPL_TO_VNODE(vip);
   1275 	KASSERT(mutex_owned(vp->v_interlock));
   1276 
   1277 	KASSERT(vp->v_usecount == 0);
   1278 	KASSERT(vp->v_holdcnt == 0);
   1279 	KASSERT(vp->v_writecount == 0);
   1280 	lru_requeue(vp, NULL);
   1281 	mutex_exit(vp->v_interlock);
   1282 
   1283 	vfs_insmntque(vp, NULL);
   1284 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1285 		spec_node_destroy(vp);
   1286 
   1287 	mutex_obj_free(vp->v_interlock);
   1288 	rw_destroy(&vip->vi_lock);
   1289 	uvm_obj_destroy(&vp->v_uobj, true);
   1290 	cv_destroy(&vp->v_cv);
   1291 	cache_vnode_fini(vp);
   1292 	pool_cache_put(vcache_pool, vip);
   1293 }
   1294 
   1295 /*
   1296  * Try to get an initial reference on this cached vnode.
   1297  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
   1298  * EBUSY if the vnode state is unstable.
   1299  *
   1300  * v_interlock locked on entry and unlocked on exit.
   1301  */
   1302 int
   1303 vcache_tryvget(vnode_t *vp)
   1304 {
   1305 	int error = 0;
   1306 
   1307 	KASSERT(mutex_owned(vp->v_interlock));
   1308 
   1309 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
   1310 		error = ENOENT;
   1311 	else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
   1312 		error = EBUSY;
   1313 	else if (vp->v_usecount == 0)
   1314 		vp->v_usecount = 1;
   1315 	else
   1316 		atomic_inc_uint(&vp->v_usecount);
   1317 
   1318 	mutex_exit(vp->v_interlock);
   1319 
   1320 	return error;
   1321 }
   1322 
   1323 /*
   1324  * Try to get an initial reference on this cached vnode.
   1325  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1326  * Will wait for the vnode state to be stable.
   1327  *
   1328  * v_interlock locked on entry and unlocked on exit.
   1329  */
   1330 int
   1331 vcache_vget(vnode_t *vp)
   1332 {
   1333 
   1334 	KASSERT(mutex_owned(vp->v_interlock));
   1335 
   1336 	/* Increment hold count to prevent vnode from disappearing. */
   1337 	vp->v_holdcnt++;
   1338 	VSTATE_WAIT_STABLE(vp);
   1339 	vp->v_holdcnt--;
   1340 
   1341 	/* If this was the last reference to a reclaimed vnode free it now. */
   1342 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1343 		if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
   1344 			vcache_free(VNODE_TO_VIMPL(vp));
   1345 		else
   1346 			mutex_exit(vp->v_interlock);
   1347 		return ENOENT;
   1348 	}
   1349 	VSTATE_ASSERT(vp, VS_LOADED);
   1350 	if (vp->v_usecount == 0)
   1351 		vp->v_usecount = 1;
   1352 	else
   1353 		atomic_inc_uint(&vp->v_usecount);
   1354 	mutex_exit(vp->v_interlock);
   1355 
   1356 	return 0;
   1357 }
   1358 
   1359 /*
   1360  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1361  */
   1362 int
   1363 vcache_get(struct mount *mp, const void *key, size_t key_len,
   1364     struct vnode **vpp)
   1365 {
   1366 	int error;
   1367 	uint32_t hash;
   1368 	const void *new_key;
   1369 	struct vnode *vp;
   1370 	struct vcache_key vcache_key;
   1371 	vnode_impl_t *vip, *new_vip;
   1372 
   1373 	new_key = NULL;
   1374 	*vpp = NULL;
   1375 
   1376 	vcache_key.vk_mount = mp;
   1377 	vcache_key.vk_key = key;
   1378 	vcache_key.vk_key_len = key_len;
   1379 	hash = vcache_hash(&vcache_key);
   1380 
   1381 again:
   1382 	mutex_enter(&vcache_lock);
   1383 	vip = vcache_hash_lookup(&vcache_key, hash);
   1384 
   1385 	/* If found, take a reference or retry. */
   1386 	if (__predict_true(vip != NULL)) {
   1387 		/*
   1388 		 * If the vnode is loading we cannot take the v_interlock
   1389 		 * here as it might change during load (see uvm_obj_setlock()).
   1390 		 * As changing state from VS_LOADING requires both vcache_lock
   1391 		 * and v_interlock it is safe to test with vcache_lock held.
   1392 		 *
   1393 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1394 		 */
   1395 		if (__predict_false(vip->vi_state == VS_LOADING)) {
   1396 			cv_wait(&vcache_cv, &vcache_lock);
   1397 			mutex_exit(&vcache_lock);
   1398 			goto again;
   1399 		}
   1400 		vp = VIMPL_TO_VNODE(vip);
   1401 		mutex_enter(vp->v_interlock);
   1402 		mutex_exit(&vcache_lock);
   1403 		error = vcache_vget(vp);
   1404 		if (error == ENOENT)
   1405 			goto again;
   1406 		if (error == 0)
   1407 			*vpp = vp;
   1408 		KASSERT((error != 0) == (*vpp == NULL));
   1409 		return error;
   1410 	}
   1411 	mutex_exit(&vcache_lock);
   1412 
   1413 	/* Allocate and initialize a new vcache / vnode pair. */
   1414 	error = vfs_busy(mp);
   1415 	if (error)
   1416 		return error;
   1417 	new_vip = vcache_alloc();
   1418 	new_vip->vi_key = vcache_key;
   1419 	vp = VIMPL_TO_VNODE(new_vip);
   1420 	mutex_enter(&vcache_lock);
   1421 	vip = vcache_hash_lookup(&vcache_key, hash);
   1422 	if (vip == NULL) {
   1423 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1424 		    new_vip, vi_hash);
   1425 		vip = new_vip;
   1426 	}
   1427 
   1428 	/* If another thread beat us inserting this node, retry. */
   1429 	if (vip != new_vip) {
   1430 		vcache_dealloc(new_vip);
   1431 		vfs_unbusy(mp);
   1432 		goto again;
   1433 	}
   1434 	mutex_exit(&vcache_lock);
   1435 
   1436 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1437 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1438 	if (error) {
   1439 		mutex_enter(&vcache_lock);
   1440 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1441 		    new_vip, vnode_impl, vi_hash);
   1442 		vcache_dealloc(new_vip);
   1443 		vfs_unbusy(mp);
   1444 		KASSERT(*vpp == NULL);
   1445 		return error;
   1446 	}
   1447 	KASSERT(new_key != NULL);
   1448 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1449 	KASSERT(vp->v_op != NULL);
   1450 	vfs_insmntque(vp, mp);
   1451 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1452 		vp->v_vflag |= VV_MPSAFE;
   1453 	vfs_ref(mp);
   1454 	vfs_unbusy(mp);
   1455 
   1456 	/* Finished loading, finalize node. */
   1457 	mutex_enter(&vcache_lock);
   1458 	new_vip->vi_key.vk_key = new_key;
   1459 	mutex_enter(vp->v_interlock);
   1460 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1461 	mutex_exit(vp->v_interlock);
   1462 	mutex_exit(&vcache_lock);
   1463 	*vpp = vp;
   1464 	return 0;
   1465 }
   1466 
   1467 /*
   1468  * Create a new vnode / fs node pair and return it referenced through vpp.
   1469  */
   1470 int
   1471 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1472     kauth_cred_t cred, void *extra, struct vnode **vpp)
   1473 {
   1474 	int error;
   1475 	uint32_t hash;
   1476 	struct vnode *vp, *ovp;
   1477 	vnode_impl_t *vip, *ovip;
   1478 
   1479 	*vpp = NULL;
   1480 
   1481 	/* Allocate and initialize a new vcache / vnode pair. */
   1482 	error = vfs_busy(mp);
   1483 	if (error)
   1484 		return error;
   1485 	vip = vcache_alloc();
   1486 	vip->vi_key.vk_mount = mp;
   1487 	vp = VIMPL_TO_VNODE(vip);
   1488 
   1489 	/* Create and load the fs node. */
   1490 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
   1491 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
   1492 	if (error) {
   1493 		mutex_enter(&vcache_lock);
   1494 		vcache_dealloc(vip);
   1495 		vfs_unbusy(mp);
   1496 		KASSERT(*vpp == NULL);
   1497 		return error;
   1498 	}
   1499 	KASSERT(vp->v_op != NULL);
   1500 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
   1501 	if (vip->vi_key.vk_key_len > 0) {
   1502 		KASSERT(vip->vi_key.vk_key != NULL);
   1503 		hash = vcache_hash(&vip->vi_key);
   1504 
   1505 		/*
   1506 		 * Wait for previous instance to be reclaimed,
   1507 		 * then insert new node.
   1508 		 */
   1509 		mutex_enter(&vcache_lock);
   1510 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
   1511 			ovp = VIMPL_TO_VNODE(ovip);
   1512 			mutex_enter(ovp->v_interlock);
   1513 			mutex_exit(&vcache_lock);
   1514 			error = vcache_vget(ovp);
   1515 			KASSERT(error == ENOENT);
   1516 			mutex_enter(&vcache_lock);
   1517 		}
   1518 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1519 		    vip, vi_hash);
   1520 		mutex_exit(&vcache_lock);
   1521 	}
   1522 	vfs_insmntque(vp, mp);
   1523 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1524 		vp->v_vflag |= VV_MPSAFE;
   1525 	vfs_ref(mp);
   1526 	vfs_unbusy(mp);
   1527 
   1528 	/* Finished loading, finalize node. */
   1529 	mutex_enter(&vcache_lock);
   1530 	mutex_enter(vp->v_interlock);
   1531 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1532 	mutex_exit(&vcache_lock);
   1533 	mutex_exit(vp->v_interlock);
   1534 	*vpp = vp;
   1535 	return 0;
   1536 }
   1537 
   1538 /*
   1539  * Prepare key change: update old cache nodes key and lock new cache node.
   1540  * Return an error if the new node already exists.
   1541  */
   1542 int
   1543 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1544     const void *old_key, size_t old_key_len,
   1545     const void *new_key, size_t new_key_len)
   1546 {
   1547 	uint32_t old_hash, new_hash;
   1548 	struct vcache_key old_vcache_key, new_vcache_key;
   1549 	vnode_impl_t *vip, *new_vip;
   1550 
   1551 	old_vcache_key.vk_mount = mp;
   1552 	old_vcache_key.vk_key = old_key;
   1553 	old_vcache_key.vk_key_len = old_key_len;
   1554 	old_hash = vcache_hash(&old_vcache_key);
   1555 
   1556 	new_vcache_key.vk_mount = mp;
   1557 	new_vcache_key.vk_key = new_key;
   1558 	new_vcache_key.vk_key_len = new_key_len;
   1559 	new_hash = vcache_hash(&new_vcache_key);
   1560 
   1561 	new_vip = vcache_alloc();
   1562 	new_vip->vi_key = new_vcache_key;
   1563 
   1564 	/* Insert locked new node used as placeholder. */
   1565 	mutex_enter(&vcache_lock);
   1566 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1567 	if (vip != NULL) {
   1568 		vcache_dealloc(new_vip);
   1569 		return EEXIST;
   1570 	}
   1571 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1572 	    new_vip, vi_hash);
   1573 
   1574 	/* Replace old nodes key with the temporary copy. */
   1575 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1576 	KASSERT(vip != NULL);
   1577 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1578 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
   1579 	vip->vi_key = old_vcache_key;
   1580 	mutex_exit(&vcache_lock);
   1581 	return 0;
   1582 }
   1583 
   1584 /*
   1585  * Key change complete: update old node and remove placeholder.
   1586  */
   1587 void
   1588 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1589     const void *old_key, size_t old_key_len,
   1590     const void *new_key, size_t new_key_len)
   1591 {
   1592 	uint32_t old_hash, new_hash;
   1593 	struct vcache_key old_vcache_key, new_vcache_key;
   1594 	vnode_impl_t *vip, *new_vip;
   1595 	struct vnode *new_vp;
   1596 
   1597 	old_vcache_key.vk_mount = mp;
   1598 	old_vcache_key.vk_key = old_key;
   1599 	old_vcache_key.vk_key_len = old_key_len;
   1600 	old_hash = vcache_hash(&old_vcache_key);
   1601 
   1602 	new_vcache_key.vk_mount = mp;
   1603 	new_vcache_key.vk_key = new_key;
   1604 	new_vcache_key.vk_key_len = new_key_len;
   1605 	new_hash = vcache_hash(&new_vcache_key);
   1606 
   1607 	mutex_enter(&vcache_lock);
   1608 
   1609 	/* Lookup old and new node. */
   1610 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1611 	KASSERT(vip != NULL);
   1612 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1613 
   1614 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1615 	KASSERT(new_vip != NULL);
   1616 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
   1617 	new_vp = VIMPL_TO_VNODE(new_vip);
   1618 	mutex_enter(new_vp->v_interlock);
   1619 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
   1620 	mutex_exit(new_vp->v_interlock);
   1621 
   1622 	/* Rekey old node and put it onto its new hashlist. */
   1623 	vip->vi_key = new_vcache_key;
   1624 	if (old_hash != new_hash) {
   1625 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
   1626 		    vip, vnode_impl, vi_hash);
   1627 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1628 		    vip, vi_hash);
   1629 	}
   1630 
   1631 	/* Remove new node used as placeholder. */
   1632 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
   1633 	    new_vip, vnode_impl, vi_hash);
   1634 	vcache_dealloc(new_vip);
   1635 }
   1636 
   1637 /*
   1638  * Disassociate the underlying file system from a vnode.
   1639  *
   1640  * Must be called with vnode locked and will return unlocked.
   1641  * Must be called with the interlock held, and will return with it held.
   1642  */
   1643 static void
   1644 vcache_reclaim(vnode_t *vp)
   1645 {
   1646 	lwp_t *l = curlwp;
   1647 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1648 	struct mount *mp = vp->v_mount;
   1649 	uint32_t hash;
   1650 	uint8_t temp_buf[64], *temp_key;
   1651 	size_t temp_key_len;
   1652 	bool recycle, active;
   1653 	int error;
   1654 
   1655 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1656 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1657 	KASSERT(mutex_owned(vp->v_interlock));
   1658 	KASSERT(vp->v_usecount != 0);
   1659 
   1660 	active = (vp->v_usecount > 1);
   1661 	temp_key_len = vip->vi_key.vk_key_len;
   1662 	/*
   1663 	 * Prevent the vnode from being recycled or brought into use
   1664 	 * while we clean it out.
   1665 	 */
   1666 	VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
   1667 	mutex_exit(vp->v_interlock);
   1668 
   1669 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1670 	mutex_enter(vp->v_interlock);
   1671 	if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
   1672 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
   1673 		cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
   1674 	}
   1675 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1676 	mutex_exit(vp->v_interlock);
   1677 	rw_exit(vp->v_uobj.vmobjlock);
   1678 
   1679 	/*
   1680 	 * With vnode state set to reclaiming, purge name cache immediately
   1681 	 * to prevent new handles on vnode, and wait for existing threads
   1682 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
   1683 	 */
   1684 	cache_purge(vp);
   1685 
   1686 	/* Replace the vnode key with a temporary copy. */
   1687 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
   1688 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1689 	} else {
   1690 		temp_key = temp_buf;
   1691 	}
   1692 	if (vip->vi_key.vk_key_len > 0) {
   1693 		mutex_enter(&vcache_lock);
   1694 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
   1695 		vip->vi_key.vk_key = temp_key;
   1696 		mutex_exit(&vcache_lock);
   1697 	}
   1698 
   1699 	fstrans_start(mp);
   1700 
   1701 	/*
   1702 	 * Clean out any cached data associated with the vnode.
   1703 	 * If purging an active vnode, it must be closed and
   1704 	 * deactivated before being reclaimed.
   1705 	 */
   1706 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1707 	if (error != 0) {
   1708 		if (wapbl_vphaswapbl(vp))
   1709 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1710 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1711 	}
   1712 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1713 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1714 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1715 		 spec_node_revoke(vp);
   1716 	}
   1717 
   1718 	/*
   1719 	 * Disassociate the underlying file system from the vnode.
   1720 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1721 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1722 	 * would no longer function.
   1723 	 */
   1724 	VOP_INACTIVE(vp, &recycle);
   1725 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1726 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1727 	if (VOP_RECLAIM(vp)) {
   1728 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1729 	}
   1730 
   1731 	KASSERT(vp->v_data == NULL);
   1732 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
   1733 
   1734 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1735 		uvm_ra_freectx(vp->v_ractx);
   1736 		vp->v_ractx = NULL;
   1737 	}
   1738 
   1739 	if (vip->vi_key.vk_key_len > 0) {
   1740 	/* Remove from vnode cache. */
   1741 		hash = vcache_hash(&vip->vi_key);
   1742 		mutex_enter(&vcache_lock);
   1743 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1744 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1745 		    vip, vnode_impl, vi_hash);
   1746 		mutex_exit(&vcache_lock);
   1747 	}
   1748 	if (temp_key != temp_buf)
   1749 		kmem_free(temp_key, temp_key_len);
   1750 
   1751 	/* Done with purge, notify sleepers of the grim news. */
   1752 	mutex_enter(vp->v_interlock);
   1753 	vp->v_op = dead_vnodeop_p;
   1754 	vp->v_vflag |= VV_LOCKSWORK;
   1755 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1756 	vp->v_tag = VT_NON;
   1757 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1758 	mutex_exit(vp->v_interlock);
   1759 
   1760 	/*
   1761 	 * Move to dead mount.  Must be after changing the operations
   1762 	 * vector as vnode operations enter the mount before using the
   1763 	 * operations vector.  See sys/kern/vnode_if.c.
   1764 	 */
   1765 	vp->v_vflag &= ~VV_ROOT;
   1766 	vfs_ref(dead_rootmount);
   1767 	vfs_insmntque(vp, dead_rootmount);
   1768 
   1769 #ifdef PAX_SEGVGUARD
   1770 	pax_segvguard_cleanup(vp);
   1771 #endif /* PAX_SEGVGUARD */
   1772 
   1773 	mutex_enter(vp->v_interlock);
   1774 	fstrans_done(mp);
   1775 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1776 }
   1777 
   1778 /*
   1779  * Disassociate the underlying file system from an open device vnode
   1780  * and make it anonymous.
   1781  *
   1782  * Vnode unlocked on entry, drops a reference to the vnode.
   1783  */
   1784 void
   1785 vcache_make_anon(vnode_t *vp)
   1786 {
   1787 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1788 	uint32_t hash;
   1789 	bool recycle;
   1790 
   1791 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
   1792 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1793 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
   1794 
   1795 	/* Remove from vnode cache. */
   1796 	hash = vcache_hash(&vip->vi_key);
   1797 	mutex_enter(&vcache_lock);
   1798 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1799 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1800 	    vip, vnode_impl, vi_hash);
   1801 	vip->vi_key.vk_mount = dead_rootmount;
   1802 	vip->vi_key.vk_key_len = 0;
   1803 	vip->vi_key.vk_key = NULL;
   1804 	mutex_exit(&vcache_lock);
   1805 
   1806 	/*
   1807 	 * Disassociate the underlying file system from the vnode.
   1808 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1809 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1810 	 * would no longer function.
   1811 	 */
   1812 	if (vn_lock(vp, LK_EXCLUSIVE)) {
   1813 		vnpanic(vp, "%s: cannot lock", __func__);
   1814 	}
   1815 	VOP_INACTIVE(vp, &recycle);
   1816 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1817 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1818 	if (VOP_RECLAIM(vp)) {
   1819 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1820 	}
   1821 
   1822 	/* Purge name cache. */
   1823 	cache_purge(vp);
   1824 
   1825 	/* Done with purge, change operations vector. */
   1826 	mutex_enter(vp->v_interlock);
   1827 	vp->v_op = spec_vnodeop_p;
   1828 	vp->v_vflag |= VV_MPSAFE;
   1829 	vp->v_vflag &= ~VV_LOCKSWORK;
   1830 	mutex_exit(vp->v_interlock);
   1831 
   1832 	/*
   1833 	 * Move to dead mount.  Must be after changing the operations
   1834 	 * vector as vnode operations enter the mount before using the
   1835 	 * operations vector.  See sys/kern/vnode_if.c.
   1836 	 */
   1837 	vfs_ref(dead_rootmount);
   1838 	vfs_insmntque(vp, dead_rootmount);
   1839 
   1840 	vrele(vp);
   1841 }
   1842 
   1843 /*
   1844  * Update outstanding I/O count and do wakeup if requested.
   1845  */
   1846 void
   1847 vwakeup(struct buf *bp)
   1848 {
   1849 	vnode_t *vp;
   1850 
   1851 	if ((vp = bp->b_vp) == NULL)
   1852 		return;
   1853 
   1854 	KASSERT(bp->b_objlock == vp->v_interlock);
   1855 	KASSERT(mutex_owned(bp->b_objlock));
   1856 
   1857 	if (--vp->v_numoutput < 0)
   1858 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1859 	if (vp->v_numoutput == 0)
   1860 		cv_broadcast(&vp->v_cv);
   1861 }
   1862 
   1863 /*
   1864  * Test a vnode for being or becoming dead.  Returns one of:
   1865  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1866  * ENOENT: vnode is dead.
   1867  * 0:      otherwise.
   1868  *
   1869  * Whenever this function returns a non-zero value all future
   1870  * calls will also return a non-zero value.
   1871  */
   1872 int
   1873 vdead_check(struct vnode *vp, int flags)
   1874 {
   1875 
   1876 	KASSERT(mutex_owned(vp->v_interlock));
   1877 
   1878 	if (! ISSET(flags, VDEAD_NOWAIT))
   1879 		VSTATE_WAIT_STABLE(vp);
   1880 
   1881 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1882 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1883 		return EBUSY;
   1884 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1885 		return ENOENT;
   1886 	}
   1887 
   1888 	return 0;
   1889 }
   1890 
   1891 int
   1892 vfs_drainvnodes(void)
   1893 {
   1894 	int i, gen;
   1895 
   1896 	mutex_enter(&vdrain_lock);
   1897 	for (i = 0; i < 2; i++) {
   1898 		gen = vdrain_gen;
   1899 		while (gen == vdrain_gen) {
   1900 			cv_broadcast(&vdrain_cv);
   1901 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1902 		}
   1903 	}
   1904 	mutex_exit(&vdrain_lock);
   1905 
   1906 	if (numvnodes >= desiredvnodes)
   1907 		return EBUSY;
   1908 
   1909 	if (vcache_hashsize != desiredvnodes)
   1910 		vcache_reinit();
   1911 
   1912 	return 0;
   1913 }
   1914 
   1915 void
   1916 vnpanic(vnode_t *vp, const char *fmt, ...)
   1917 {
   1918 	va_list ap;
   1919 
   1920 #ifdef DIAGNOSTIC
   1921 	vprint(NULL, vp);
   1922 #endif
   1923 	va_start(ap, fmt);
   1924 	vpanic(fmt, ap);
   1925 	va_end(ap);
   1926 }
   1927 
   1928 void
   1929 vshareilock(vnode_t *tvp, vnode_t *fvp)
   1930 {
   1931 	kmutex_t *oldlock;
   1932 
   1933 	oldlock = tvp->v_interlock;
   1934 	mutex_obj_hold(fvp->v_interlock);
   1935 	tvp->v_interlock = fvp->v_interlock;
   1936 	mutex_obj_free(oldlock);
   1937 }
   1938