Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.151
      1 /*	$NetBSD: vfs_vnode.c,v 1.151 2023/11/22 13:19:50 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * The vnode cache subsystem.
     71  *
     72  * Life-cycle
     73  *
     74  *	Normally, there are two points where new vnodes are created:
     75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76  *	starts in one of the following ways:
     77  *
     78  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80  *
     81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  *	was another, traditional way.  Currently, only the draining thread
     83  *	recycles the vnodes.  This behaviour might be revisited.
     84  *
     85  *	The life-cycle ends when the last reference is dropped, usually
     86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87  *	the file system that vnode is inactive.  Via this call, file system
     88  *	indicates whether vnode can be recycled (usually, it checks its own
     89  *	references, e.g. count of links, whether the file was removed).
     90  *
     91  *	Depending on indication, vnode can be put into a free list (cache),
     92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93  *	disassociate underlying file system from the vnode, and finally
     94  *	destroyed.
     95  *
     96  * Vnode state
     97  *
     98  *	Vnode is always in one of six states:
     99  *	- MARKER	This is a marker vnode to help list traversal.  It
    100  *			will never change its state.
    101  *	- LOADING	Vnode is associating underlying file system and not
    102  *			yet ready to use.
    103  *	- LOADED	Vnode has associated underlying file system and is
    104  *			ready to use.
    105  *	- BLOCKED	Vnode is active but cannot get new references.
    106  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107  *			system.
    108  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109  *			and is dead.
    110  *
    111  *	Valid state changes are:
    112  *	LOADING -> LOADED
    113  *			Vnode has been initialised in vcache_get() or
    114  *			vcache_new() and is ready to use.
    115  *	BLOCKED -> RECLAIMING
    116  *			Vnode starts disassociation from underlying file
    117  *			system in vcache_reclaim().
    118  *	RECLAIMING -> RECLAIMED
    119  *			Vnode finished disassociation from underlying file
    120  *			system in vcache_reclaim().
    121  *	LOADED -> BLOCKED
    122  *			Either vcache_rekey*() is changing the vnode key or
    123  *			vrelel() is about to call VOP_INACTIVE().
    124  *	BLOCKED -> LOADED
    125  *			The block condition is over.
    126  *	LOADING -> RECLAIMED
    127  *			Either vcache_get() or vcache_new() failed to
    128  *			associate the underlying file system or vcache_rekey*()
    129  *			drops a vnode used as placeholder.
    130  *
    131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    132  *	and it is possible to wait for state change.
    133  *
    134  *	State is protected with v_interlock with one exception:
    135  *	to change from LOADING both v_interlock and vcache_lock must be held
    136  *	so it is possible to check "state == LOADING" without holding
    137  *	v_interlock.  See vcache_get() for details.
    138  *
    139  * Reference counting
    140  *
    141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    143  *	as vput(9), routines.  Common points holding references are e.g.
    144  *	file openings, current working directory, mount points, etc.
    145  *
    146  *	v_usecount is adjusted with atomic operations, however to change
    147  *	from a non-zero value to zero the interlock must also be held.
    148  */
    149 
    150 #include <sys/cdefs.h>
    151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.151 2023/11/22 13:19:50 riastradh Exp $");
    152 
    153 #ifdef _KERNEL_OPT
    154 #include "opt_pax.h"
    155 #endif
    156 
    157 #include <sys/param.h>
    158 #include <sys/kernel.h>
    159 
    160 #include <sys/atomic.h>
    161 #include <sys/buf.h>
    162 #include <sys/conf.h>
    163 #include <sys/device.h>
    164 #include <sys/hash.h>
    165 #include <sys/kauth.h>
    166 #include <sys/kmem.h>
    167 #include <sys/kthread.h>
    168 #include <sys/module.h>
    169 #include <sys/mount.h>
    170 #include <sys/namei.h>
    171 #include <sys/pax.h>
    172 #include <sys/syscallargs.h>
    173 #include <sys/sysctl.h>
    174 #include <sys/systm.h>
    175 #include <sys/vnode_impl.h>
    176 #include <sys/wapbl.h>
    177 #include <sys/fstrans.h>
    178 
    179 #include <miscfs/deadfs/deadfs.h>
    180 #include <miscfs/specfs/specdev.h>
    181 
    182 #include <uvm/uvm.h>
    183 #include <uvm/uvm_readahead.h>
    184 #include <uvm/uvm_stat.h>
    185 
    186 /* Flags to vrelel. */
    187 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
    188 
    189 #define	LRU_VRELE	0
    190 #define	LRU_FREE	1
    191 #define	LRU_HOLD	2
    192 #define	LRU_COUNT	3
    193 
    194 /*
    195  * There are three lru lists: one holds vnodes waiting for async release,
    196  * one is for vnodes which have no buffer/page references and one for those
    197  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
    198  * private cache line as vnodes migrate between them while under the same
    199  * lock (vdrain_lock).
    200  */
    201 u_int			numvnodes		__cacheline_aligned;
    202 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
    203 static kmutex_t		vdrain_lock		__cacheline_aligned;
    204 static kcondvar_t	vdrain_cv;
    205 static int		vdrain_gen;
    206 static kcondvar_t	vdrain_gen_cv;
    207 static bool		vdrain_retry;
    208 static lwp_t *		vdrain_lwp;
    209 SLIST_HEAD(hashhead, vnode_impl);
    210 static kmutex_t		vcache_lock		__cacheline_aligned;
    211 static kcondvar_t	vcache_cv;
    212 static u_int		vcache_hashsize;
    213 static u_long		vcache_hashmask;
    214 static struct hashhead	*vcache_hashtab;
    215 static pool_cache_t	vcache_pool;
    216 static void		lru_requeue(vnode_t *, vnodelst_t *);
    217 static vnodelst_t *	lru_which(vnode_t *);
    218 static vnode_impl_t *	vcache_alloc(void);
    219 static void		vcache_dealloc(vnode_impl_t *);
    220 static void		vcache_free(vnode_impl_t *);
    221 static void		vcache_init(void);
    222 static void		vcache_reinit(void);
    223 static void		vcache_reclaim(vnode_t *);
    224 static void		vrelel(vnode_t *, int, int);
    225 static void		vdrain_thread(void *);
    226 static void		vnpanic(vnode_t *, const char *, ...)
    227     __printflike(2, 3);
    228 
    229 /* Routines having to do with the management of the vnode table. */
    230 
    231 /*
    232  * The high bit of v_usecount is a gate for vcache_tryvget().  It's set
    233  * only when the vnode state is LOADED.
    234  * The next bit of v_usecount is a flag for vrelel().  It's set
    235  * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
    236  */
    237 #define	VUSECOUNT_MASK	0x3fffffff
    238 #define	VUSECOUNT_GATE	0x80000000
    239 #define	VUSECOUNT_VGET	0x40000000
    240 
    241 /*
    242  * Return the current usecount of a vnode.
    243  */
    244 inline int
    245 vrefcnt(struct vnode *vp)
    246 {
    247 
    248 	return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
    249 }
    250 
    251 /* Vnode state operations and diagnostics. */
    252 
    253 #if defined(DIAGNOSTIC)
    254 
    255 #define VSTATE_VALID(state) \
    256 	((state) != VS_ACTIVE && (state) != VS_MARKER)
    257 #define VSTATE_GET(vp) \
    258 	vstate_assert_get((vp), __func__, __LINE__)
    259 #define VSTATE_CHANGE(vp, from, to) \
    260 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    261 #define VSTATE_WAIT_STABLE(vp) \
    262 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    263 
    264 void
    265 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    266     bool has_lock)
    267 {
    268 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    269 	int refcnt = vrefcnt(vp);
    270 
    271 	if (!has_lock) {
    272 		enum vnode_state vstate = atomic_load_relaxed(&vip->vi_state);
    273 
    274 		if (state == VS_ACTIVE && refcnt > 0 &&
    275 		    (vstate == VS_LOADED || vstate == VS_BLOCKED))
    276 			return;
    277 		if (vstate == state)
    278 			return;
    279 		mutex_enter((vp)->v_interlock);
    280 	}
    281 
    282 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    283 
    284 	if ((state == VS_ACTIVE && refcnt > 0 &&
    285 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
    286 	    vip->vi_state == state) {
    287 		if (!has_lock)
    288 			mutex_exit((vp)->v_interlock);
    289 		return;
    290 	}
    291 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
    292 	    vstate_name(vip->vi_state), refcnt,
    293 	    vstate_name(state), func, line);
    294 }
    295 
    296 static enum vnode_state
    297 vstate_assert_get(vnode_t *vp, const char *func, int line)
    298 {
    299 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    300 
    301 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    302 	if (! VSTATE_VALID(vip->vi_state))
    303 		vnpanic(vp, "state is %s at %s:%d",
    304 		    vstate_name(vip->vi_state), func, line);
    305 
    306 	return vip->vi_state;
    307 }
    308 
    309 static void
    310 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    311 {
    312 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    313 
    314 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    315 	if (! VSTATE_VALID(vip->vi_state))
    316 		vnpanic(vp, "state is %s at %s:%d",
    317 		    vstate_name(vip->vi_state), func, line);
    318 
    319 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    320 		cv_wait(&vp->v_cv, vp->v_interlock);
    321 
    322 	if (! VSTATE_VALID(vip->vi_state))
    323 		vnpanic(vp, "state is %s at %s:%d",
    324 		    vstate_name(vip->vi_state), func, line);
    325 }
    326 
    327 static void
    328 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    329     const char *func, int line)
    330 {
    331 	bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
    332 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    333 
    334 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    335 	if (from == VS_LOADING)
    336 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
    337 
    338 	if (! VSTATE_VALID(from))
    339 		vnpanic(vp, "from is %s at %s:%d",
    340 		    vstate_name(from), func, line);
    341 	if (! VSTATE_VALID(to))
    342 		vnpanic(vp, "to is %s at %s:%d",
    343 		    vstate_name(to), func, line);
    344 	if (vip->vi_state != from)
    345 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    346 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
    347 	if ((from == VS_LOADED) != gated)
    348 		vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
    349 		    vstate_name(vip->vi_state), gated, func, line);
    350 
    351 	/* Open/close the gate for vcache_tryvget(). */
    352 	if (to == VS_LOADED) {
    353 		membar_release();
    354 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
    355 	} else {
    356 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
    357 	}
    358 
    359 	atomic_store_relaxed(&vip->vi_state, to);
    360 	if (from == VS_LOADING)
    361 		cv_broadcast(&vcache_cv);
    362 	if (to == VS_LOADED || to == VS_RECLAIMED)
    363 		cv_broadcast(&vp->v_cv);
    364 }
    365 
    366 #else /* defined(DIAGNOSTIC) */
    367 
    368 #define VSTATE_GET(vp) \
    369 	(VNODE_TO_VIMPL((vp))->vi_state)
    370 #define VSTATE_CHANGE(vp, from, to) \
    371 	vstate_change((vp), (from), (to))
    372 #define VSTATE_WAIT_STABLE(vp) \
    373 	vstate_wait_stable((vp))
    374 void
    375 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    376     bool has_lock)
    377 {
    378 
    379 }
    380 
    381 static void
    382 vstate_wait_stable(vnode_t *vp)
    383 {
    384 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    385 
    386 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    387 		cv_wait(&vp->v_cv, vp->v_interlock);
    388 }
    389 
    390 static void
    391 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    392 {
    393 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    394 
    395 	/* Open/close the gate for vcache_tryvget(). */
    396 	if (to == VS_LOADED) {
    397 		membar_release();
    398 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
    399 	} else {
    400 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
    401 	}
    402 
    403 	atomic_store_relaxed(&vip->vi_state, to);
    404 	if (from == VS_LOADING)
    405 		cv_broadcast(&vcache_cv);
    406 	if (to == VS_LOADED || to == VS_RECLAIMED)
    407 		cv_broadcast(&vp->v_cv);
    408 }
    409 
    410 #endif /* defined(DIAGNOSTIC) */
    411 
    412 void
    413 vfs_vnode_sysinit(void)
    414 {
    415 	int error __diagused, i;
    416 
    417 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    418 	KASSERT(dead_rootmount != NULL);
    419 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
    420 
    421 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    422 	for (i = 0; i < LRU_COUNT; i++) {
    423 		TAILQ_INIT(&lru_list[i]);
    424 	}
    425 	vcache_init();
    426 
    427 	cv_init(&vdrain_cv, "vdrain");
    428 	cv_init(&vdrain_gen_cv, "vdrainwt");
    429 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    430 	    NULL, &vdrain_lwp, "vdrain");
    431 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    432 }
    433 
    434 /*
    435  * Allocate a new marker vnode.
    436  */
    437 vnode_t *
    438 vnalloc_marker(struct mount *mp)
    439 {
    440 	vnode_impl_t *vip;
    441 	vnode_t *vp;
    442 
    443 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
    444 	memset(vip, 0, sizeof(*vip));
    445 	vp = VIMPL_TO_VNODE(vip);
    446 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
    447 	vp->v_mount = mp;
    448 	vp->v_type = VBAD;
    449 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    450 	klist_init(&vip->vi_klist.vk_klist);
    451 	vp->v_klist = &vip->vi_klist;
    452 	vip->vi_state = VS_MARKER;
    453 
    454 	return vp;
    455 }
    456 
    457 /*
    458  * Free a marker vnode.
    459  */
    460 void
    461 vnfree_marker(vnode_t *vp)
    462 {
    463 	vnode_impl_t *vip;
    464 
    465 	vip = VNODE_TO_VIMPL(vp);
    466 	KASSERT(vip->vi_state == VS_MARKER);
    467 	mutex_obj_free(vp->v_interlock);
    468 	uvm_obj_destroy(&vp->v_uobj, true);
    469 	klist_fini(&vip->vi_klist.vk_klist);
    470 	pool_cache_put(vcache_pool, vip);
    471 }
    472 
    473 /*
    474  * Test a vnode for being a marker vnode.
    475  */
    476 bool
    477 vnis_marker(vnode_t *vp)
    478 {
    479 
    480 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    481 }
    482 
    483 /*
    484  * Return the lru list this node should be on.
    485  */
    486 static vnodelst_t *
    487 lru_which(vnode_t *vp)
    488 {
    489 
    490 	KASSERT(mutex_owned(vp->v_interlock));
    491 
    492 	if (vp->v_holdcnt > 0)
    493 		return &lru_list[LRU_HOLD];
    494 	else
    495 		return &lru_list[LRU_FREE];
    496 }
    497 
    498 /*
    499  * Put vnode to end of given list.
    500  * Both the current and the new list may be NULL, used on vnode alloc/free.
    501  * Adjust numvnodes and signal vdrain thread if there is work.
    502  */
    503 static void
    504 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    505 {
    506 	vnode_impl_t *vip;
    507 	int d;
    508 
    509 	/*
    510 	 * If the vnode is on the correct list, and was put there recently,
    511 	 * then leave it be, thus avoiding huge cache and lock contention.
    512 	 */
    513 	vip = VNODE_TO_VIMPL(vp);
    514 	if (listhd == vip->vi_lrulisthd &&
    515 	    (getticks() - vip->vi_lrulisttm) < hz) {
    516 	    	return;
    517 	}
    518 
    519 	mutex_enter(&vdrain_lock);
    520 	d = 0;
    521 	if (vip->vi_lrulisthd != NULL)
    522 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    523 	else
    524 		d++;
    525 	vip->vi_lrulisthd = listhd;
    526 	vip->vi_lrulisttm = getticks();
    527 	if (vip->vi_lrulisthd != NULL)
    528 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    529 	else
    530 		d--;
    531 	if (d != 0) {
    532 		/*
    533 		 * Looks strange?  This is not a bug.  Don't store
    534 		 * numvnodes unless there is a change - avoid false
    535 		 * sharing on MP.
    536 		 */
    537 		numvnodes += d;
    538 	}
    539 	if ((d > 0 && numvnodes > desiredvnodes) ||
    540 	    listhd == &lru_list[LRU_VRELE])
    541 		cv_signal(&vdrain_cv);
    542 	if (d > 0 && numvnodes > desiredvnodes + desiredvnodes / 16)
    543 		kpause("vnfull", false, MAX(1, mstohz(10)), &vdrain_lock);
    544 	mutex_exit(&vdrain_lock);
    545 }
    546 
    547 /*
    548  * Release deferred vrele vnodes for this mount.
    549  * Called with file system suspended.
    550  */
    551 void
    552 vrele_flush(struct mount *mp)
    553 {
    554 	vnode_impl_t *vip, *marker;
    555 	vnode_t *vp;
    556 	int when = 0;
    557 
    558 	KASSERT(fstrans_is_owner(mp));
    559 
    560 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    561 
    562 	mutex_enter(&vdrain_lock);
    563 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
    564 
    565 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    566 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    567 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
    568 		    vi_lrulist);
    569 		vp = VIMPL_TO_VNODE(vip);
    570 		if (vnis_marker(vp))
    571 			continue;
    572 
    573 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    574 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    575 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    576 		vip->vi_lrulisttm = getticks();
    577 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    578 		mutex_exit(&vdrain_lock);
    579 
    580 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    581 		mutex_enter(vp->v_interlock);
    582 		vrelel(vp, 0, LK_EXCLUSIVE);
    583 
    584 		if (getticks() > when) {
    585 			yield();
    586 			when = getticks() + hz / 10;
    587 		}
    588 
    589 		mutex_enter(&vdrain_lock);
    590 	}
    591 
    592 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    593 	mutex_exit(&vdrain_lock);
    594 
    595 	vnfree_marker(VIMPL_TO_VNODE(marker));
    596 }
    597 
    598 /*
    599  * Reclaim a cached vnode.  Used from vdrain_thread only.
    600  */
    601 static __inline void
    602 vdrain_remove(vnode_t *vp)
    603 {
    604 	struct mount *mp;
    605 
    606 	KASSERT(mutex_owned(&vdrain_lock));
    607 
    608 	/* Probe usecount (unlocked). */
    609 	if (vrefcnt(vp) > 0)
    610 		return;
    611 	/* Try v_interlock -- we lock the wrong direction! */
    612 	if (!mutex_tryenter(vp->v_interlock))
    613 		return;
    614 	/* Probe usecount and state. */
    615 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
    616 		mutex_exit(vp->v_interlock);
    617 		return;
    618 	}
    619 	mp = vp->v_mount;
    620 	if (fstrans_start_nowait(mp) != 0) {
    621 		mutex_exit(vp->v_interlock);
    622 		return;
    623 	}
    624 	vdrain_retry = true;
    625 	mutex_exit(&vdrain_lock);
    626 
    627 	if (vcache_vget(vp) == 0) {
    628 		if (!vrecycle(vp)) {
    629 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    630 			mutex_enter(vp->v_interlock);
    631 			vrelel(vp, 0, LK_EXCLUSIVE);
    632 		}
    633 	}
    634 	fstrans_done(mp);
    635 
    636 	mutex_enter(&vdrain_lock);
    637 }
    638 
    639 /*
    640  * Release a cached vnode.  Used from vdrain_thread only.
    641  */
    642 static __inline void
    643 vdrain_vrele(vnode_t *vp)
    644 {
    645 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    646 	struct mount *mp;
    647 
    648 	KASSERT(mutex_owned(&vdrain_lock));
    649 
    650 	mp = vp->v_mount;
    651 	if (fstrans_start_nowait(mp) != 0)
    652 		return;
    653 
    654 	/*
    655 	 * First remove the vnode from the vrele list.
    656 	 * Put it on the last lru list, the last vrele()
    657 	 * will put it back onto the right list before
    658 	 * its usecount reaches zero.
    659 	 */
    660 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    661 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    662 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    663 	vip->vi_lrulisttm = getticks();
    664 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    665 
    666 	vdrain_retry = true;
    667 	mutex_exit(&vdrain_lock);
    668 
    669 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    670 	mutex_enter(vp->v_interlock);
    671 	vrelel(vp, 0, LK_EXCLUSIVE);
    672 	fstrans_done(mp);
    673 
    674 	mutex_enter(&vdrain_lock);
    675 }
    676 
    677 /*
    678  * Helper thread to keep the number of vnodes below desiredvnodes
    679  * and release vnodes from asynchronous vrele.
    680  */
    681 static void
    682 vdrain_thread(void *cookie)
    683 {
    684 	int i;
    685 	u_int target;
    686 	vnode_impl_t *vip, *marker;
    687 
    688 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    689 
    690 	mutex_enter(&vdrain_lock);
    691 
    692 	for (;;) {
    693 		vdrain_retry = false;
    694 		target = desiredvnodes - desiredvnodes / 16;
    695 
    696 		for (i = 0; i < LRU_COUNT; i++) {
    697 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
    698 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    699 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    700 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
    701 				    vi_lrulist);
    702 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
    703 					continue;
    704 				if (i == LRU_VRELE)
    705 					vdrain_vrele(VIMPL_TO_VNODE(vip));
    706 				else if (numvnodes < target)
    707 					break;
    708 				else
    709 					vdrain_remove(VIMPL_TO_VNODE(vip));
    710 			}
    711 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    712 		}
    713 
    714 		if (vdrain_retry) {
    715 			kpause("vdrainrt", false, 1, &vdrain_lock);
    716 		} else {
    717 			vdrain_gen++;
    718 			cv_broadcast(&vdrain_gen_cv);
    719 			cv_wait(&vdrain_cv, &vdrain_lock);
    720 		}
    721 	}
    722 }
    723 
    724 /*
    725  * Try to drop reference on a vnode.  Abort if we are releasing the
    726  * last reference.  Note: this _must_ succeed if not the last reference.
    727  */
    728 static bool
    729 vtryrele(vnode_t *vp)
    730 {
    731 	u_int use, next;
    732 
    733 	membar_release();
    734 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
    735 		if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
    736 			return false;
    737 		}
    738 		KASSERT((use & VUSECOUNT_MASK) > 1);
    739 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    740 		if (__predict_true(next == use)) {
    741 			return true;
    742 		}
    743 	}
    744 }
    745 
    746 /*
    747  * vput: unlock and release the reference.
    748  */
    749 void
    750 vput(vnode_t *vp)
    751 {
    752 	int lktype;
    753 
    754 	/*
    755 	 * Do an unlocked check of the usecount.  If it looks like we're not
    756 	 * about to drop the last reference, then unlock the vnode and try
    757 	 * to drop the reference.  If it ends up being the last reference
    758 	 * after all, vrelel() can fix it all up.  Most of the time this
    759 	 * will all go to plan.
    760 	 */
    761 	if (vrefcnt(vp) > 1) {
    762 		VOP_UNLOCK(vp);
    763 		if (vtryrele(vp)) {
    764 			return;
    765 		}
    766 		lktype = LK_NONE;
    767 	} else {
    768 		lktype = VOP_ISLOCKED(vp);
    769 		KASSERT(lktype != LK_NONE);
    770 	}
    771 	mutex_enter(vp->v_interlock);
    772 	vrelel(vp, 0, lktype);
    773 }
    774 
    775 /*
    776  * Vnode release.  If reference count drops to zero, call inactive
    777  * routine and either return to freelist or free to the pool.
    778  */
    779 static void
    780 vrelel(vnode_t *vp, int flags, int lktype)
    781 {
    782 	const bool async = ((flags & VRELEL_ASYNC) != 0);
    783 	bool recycle, defer, objlock_held;
    784 	u_int use, next;
    785 	int error;
    786 
    787 	objlock_held = false;
    788 
    789 retry:
    790 	KASSERT(mutex_owned(vp->v_interlock));
    791 
    792 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    793 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    794 		vnpanic(vp, "dead but not clean");
    795 	}
    796 
    797 	/*
    798 	 * If not the last reference, just unlock and drop the reference count.
    799 	 *
    800 	 * Otherwise make sure we pass a point in time where we hold the
    801 	 * last reference with VGET flag unset.
    802 	 */
    803 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
    804 		if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
    805 			if (objlock_held) {
    806 				objlock_held = false;
    807 				rw_exit(vp->v_uobj.vmobjlock);
    808 			}
    809 			if (lktype != LK_NONE) {
    810 				mutex_exit(vp->v_interlock);
    811 				lktype = LK_NONE;
    812 				VOP_UNLOCK(vp);
    813 				mutex_enter(vp->v_interlock);
    814 			}
    815 			if (vtryrele(vp)) {
    816 				mutex_exit(vp->v_interlock);
    817 				return;
    818 			}
    819 			next = atomic_load_relaxed(&vp->v_usecount);
    820 			continue;
    821 		}
    822 		KASSERT((use & VUSECOUNT_MASK) == 1);
    823 		next = use & ~VUSECOUNT_VGET;
    824 		if (next != use) {
    825 			next = atomic_cas_uint(&vp->v_usecount, use, next);
    826 		}
    827 		if (__predict_true(next == use)) {
    828 			break;
    829 		}
    830 	}
    831 	membar_acquire();
    832 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
    833 		vnpanic(vp, "%s: bad ref count", __func__);
    834 	}
    835 
    836 #ifdef DIAGNOSTIC
    837 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    838 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    839 		vprint("vrelel: missing VOP_CLOSE()", vp);
    840 	}
    841 #endif
    842 
    843 	/*
    844 	 * If already clean there is no need to lock, defer or
    845 	 * deactivate this node.
    846 	 */
    847 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    848 		if (objlock_held) {
    849 			objlock_held = false;
    850 			rw_exit(vp->v_uobj.vmobjlock);
    851 		}
    852 		if (lktype != LK_NONE) {
    853 			mutex_exit(vp->v_interlock);
    854 			lktype = LK_NONE;
    855 			VOP_UNLOCK(vp);
    856 			mutex_enter(vp->v_interlock);
    857 		}
    858 		goto out;
    859 	}
    860 
    861 	/*
    862 	 * First try to get the vnode locked for VOP_INACTIVE().
    863 	 * Defer vnode release to vdrain_thread if caller requests
    864 	 * it explicitly, is the pagedaemon or the lock failed.
    865 	 */
    866 	defer = false;
    867 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
    868 		defer = true;
    869 	} else if (lktype == LK_SHARED) {
    870 		/* Excellent chance of getting, if the last ref. */
    871 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
    872 		if (error != 0) {
    873 			defer = true;
    874 		} else {
    875 			lktype = LK_EXCLUSIVE;
    876 		}
    877 	} else if (lktype == LK_NONE) {
    878 		/* Excellent chance of getting, if the last ref. */
    879 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
    880 		if (error != 0) {
    881 			defer = true;
    882 		} else {
    883 			lktype = LK_EXCLUSIVE;
    884 		}
    885 	}
    886 	KASSERT(mutex_owned(vp->v_interlock));
    887 	if (defer) {
    888 		/*
    889 		 * Defer reclaim to the kthread; it's not safe to
    890 		 * clean it here.  We donate it our last reference.
    891 		 */
    892 		if (lktype != LK_NONE) {
    893 			mutex_exit(vp->v_interlock);
    894 			VOP_UNLOCK(vp);
    895 			mutex_enter(vp->v_interlock);
    896 		}
    897 		lru_requeue(vp, &lru_list[LRU_VRELE]);
    898 		mutex_exit(vp->v_interlock);
    899 		return;
    900 	}
    901 	KASSERT(lktype == LK_EXCLUSIVE);
    902 
    903 	/* If the node gained another reference, retry. */
    904 	use = atomic_load_relaxed(&vp->v_usecount);
    905 	if ((use & VUSECOUNT_VGET) != 0) {
    906 		goto retry;
    907 	}
    908 	KASSERT((use & VUSECOUNT_MASK) == 1);
    909 
    910 	if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
    911 	    (vp->v_vflag & VV_MAPPED) != 0) {
    912 		/* Take care of space accounting. */
    913 		if (!objlock_held) {
    914 			objlock_held = true;
    915 			if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
    916 				mutex_exit(vp->v_interlock);
    917 				rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    918 				mutex_enter(vp->v_interlock);
    919 				goto retry;
    920 			}
    921 		}
    922 		if ((vp->v_iflag & VI_EXECMAP) != 0) {
    923 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
    924 		}
    925 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    926 		vp->v_vflag &= ~VV_MAPPED;
    927 	}
    928 	if (objlock_held) {
    929 		objlock_held = false;
    930 		rw_exit(vp->v_uobj.vmobjlock);
    931 	}
    932 
    933 	/*
    934 	 * Deactivate the vnode, but preserve our reference across
    935 	 * the call to VOP_INACTIVE().
    936 	 *
    937 	 * If VOP_INACTIVE() indicates that the file has been
    938 	 * deleted, then recycle the vnode.
    939 	 *
    940 	 * Note that VOP_INACTIVE() will not drop the vnode lock.
    941 	 */
    942 	mutex_exit(vp->v_interlock);
    943 	recycle = false;
    944 	VOP_INACTIVE(vp, &recycle);
    945 	if (!recycle) {
    946 		lktype = LK_NONE;
    947 		VOP_UNLOCK(vp);
    948 	}
    949 	mutex_enter(vp->v_interlock);
    950 
    951 	/*
    952 	 * Block new references then check again to see if a
    953 	 * new reference was acquired in the meantime.  If
    954 	 * it was, restore the vnode state and try again.
    955 	 */
    956 	if (recycle) {
    957 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
    958 		use = atomic_load_relaxed(&vp->v_usecount);
    959 		if ((use & VUSECOUNT_VGET) != 0) {
    960 			VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
    961 			goto retry;
    962 		}
    963 		KASSERT((use & VUSECOUNT_MASK) == 1);
    964 	}
    965 
    966 	/*
    967 	 * Recycle the vnode if the file is now unused (unlinked).
    968 	 */
    969 	if (recycle) {
    970 		VSTATE_ASSERT(vp, VS_BLOCKED);
    971 		KASSERT(lktype == LK_EXCLUSIVE);
    972 		/* vcache_reclaim drops the lock. */
    973 		lktype = LK_NONE;
    974 		vcache_reclaim(vp);
    975 	}
    976 	KASSERT(vrefcnt(vp) > 0);
    977 	KASSERT(lktype == LK_NONE);
    978 
    979 out:
    980 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
    981 		if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
    982 		    (use & VUSECOUNT_MASK) == 1)) {
    983 			/* Gained and released another reference, retry. */
    984 			goto retry;
    985 		}
    986 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    987 		if (__predict_true(next == use)) {
    988 			if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
    989 				/* Gained another reference. */
    990 				mutex_exit(vp->v_interlock);
    991 				return;
    992 			}
    993 			break;
    994 		}
    995 	}
    996 	membar_acquire();
    997 
    998 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    999 		/*
   1000 		 * It's clean so destroy it.  It isn't referenced
   1001 		 * anywhere since it has been reclaimed.
   1002 		 */
   1003 		vcache_free(VNODE_TO_VIMPL(vp));
   1004 	} else {
   1005 		/*
   1006 		 * Otherwise, put it back onto the freelist.  It
   1007 		 * can't be destroyed while still associated with
   1008 		 * a file system.
   1009 		 */
   1010 		lru_requeue(vp, lru_which(vp));
   1011 		mutex_exit(vp->v_interlock);
   1012 	}
   1013 }
   1014 
   1015 void
   1016 vrele(vnode_t *vp)
   1017 {
   1018 
   1019 	if (vtryrele(vp)) {
   1020 		return;
   1021 	}
   1022 	mutex_enter(vp->v_interlock);
   1023 	vrelel(vp, 0, LK_NONE);
   1024 }
   1025 
   1026 /*
   1027  * Asynchronous vnode release, vnode is released in different context.
   1028  */
   1029 void
   1030 vrele_async(vnode_t *vp)
   1031 {
   1032 
   1033 	if (vtryrele(vp)) {
   1034 		return;
   1035 	}
   1036 	mutex_enter(vp->v_interlock);
   1037 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
   1038 }
   1039 
   1040 /*
   1041  * Vnode reference, where a reference is already held by some other
   1042  * object (for example, a file structure).
   1043  *
   1044  * NB: lockless code sequences may rely on this not blocking.
   1045  */
   1046 void
   1047 vref(vnode_t *vp)
   1048 {
   1049 
   1050 	KASSERT(vrefcnt(vp) > 0);
   1051 
   1052 	atomic_inc_uint(&vp->v_usecount);
   1053 }
   1054 
   1055 /*
   1056  * Page or buffer structure gets a reference.
   1057  * Called with v_interlock held.
   1058  */
   1059 void
   1060 vholdl(vnode_t *vp)
   1061 {
   1062 
   1063 	KASSERT(mutex_owned(vp->v_interlock));
   1064 
   1065 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
   1066 		lru_requeue(vp, lru_which(vp));
   1067 }
   1068 
   1069 /*
   1070  * Page or buffer structure gets a reference.
   1071  */
   1072 void
   1073 vhold(vnode_t *vp)
   1074 {
   1075 
   1076 	mutex_enter(vp->v_interlock);
   1077 	vholdl(vp);
   1078 	mutex_exit(vp->v_interlock);
   1079 }
   1080 
   1081 /*
   1082  * Page or buffer structure frees a reference.
   1083  * Called with v_interlock held.
   1084  */
   1085 void
   1086 holdrelel(vnode_t *vp)
   1087 {
   1088 
   1089 	KASSERT(mutex_owned(vp->v_interlock));
   1090 
   1091 	if (vp->v_holdcnt <= 0) {
   1092 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
   1093 	}
   1094 
   1095 	vp->v_holdcnt--;
   1096 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
   1097 		lru_requeue(vp, lru_which(vp));
   1098 }
   1099 
   1100 /*
   1101  * Page or buffer structure frees a reference.
   1102  */
   1103 void
   1104 holdrele(vnode_t *vp)
   1105 {
   1106 
   1107 	mutex_enter(vp->v_interlock);
   1108 	holdrelel(vp);
   1109 	mutex_exit(vp->v_interlock);
   1110 }
   1111 
   1112 /*
   1113  * Recycle an unused vnode if caller holds the last reference.
   1114  */
   1115 bool
   1116 vrecycle(vnode_t *vp)
   1117 {
   1118 	int error __diagused;
   1119 
   1120 	mutex_enter(vp->v_interlock);
   1121 
   1122 	/* If the vnode is already clean we're done. */
   1123 	VSTATE_WAIT_STABLE(vp);
   1124 	if (VSTATE_GET(vp) != VS_LOADED) {
   1125 		VSTATE_ASSERT(vp, VS_RECLAIMED);
   1126 		vrelel(vp, 0, LK_NONE);
   1127 		return true;
   1128 	}
   1129 
   1130 	/* Prevent further references until the vnode is locked. */
   1131 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1132 
   1133 	/* Make sure we hold the last reference. */
   1134 	if (vrefcnt(vp) != 1) {
   1135 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1136 		mutex_exit(vp->v_interlock);
   1137 		return false;
   1138 	}
   1139 
   1140 	mutex_exit(vp->v_interlock);
   1141 
   1142 	/*
   1143 	 * On a leaf file system this lock will always succeed as we hold
   1144 	 * the last reference and prevent further references.
   1145 	 * On layered file systems waiting for the lock would open a can of
   1146 	 * deadlocks as the lower vnodes may have other active references.
   1147 	 */
   1148 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
   1149 
   1150 	mutex_enter(vp->v_interlock);
   1151 	if (error) {
   1152 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1153 		mutex_exit(vp->v_interlock);
   1154 		return false;
   1155 	}
   1156 
   1157 	KASSERT(vrefcnt(vp) == 1);
   1158 	vcache_reclaim(vp);
   1159 	vrelel(vp, 0, LK_NONE);
   1160 
   1161 	return true;
   1162 }
   1163 
   1164 /*
   1165  * Helper for vrevoke() to propagate suspension from lastmp
   1166  * to thismp.  Both args may be NULL.
   1167  * Returns the currently suspended file system or NULL.
   1168  */
   1169 static struct mount *
   1170 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
   1171 {
   1172 	int error;
   1173 
   1174 	if (lastmp == thismp)
   1175 		return thismp;
   1176 
   1177 	if (lastmp != NULL)
   1178 		vfs_resume(lastmp);
   1179 
   1180 	if (thismp == NULL)
   1181 		return NULL;
   1182 
   1183 	do {
   1184 		error = vfs_suspend(thismp, 0);
   1185 	} while (error == EINTR || error == ERESTART);
   1186 
   1187 	if (error == 0)
   1188 		return thismp;
   1189 
   1190 	KASSERT(error == EOPNOTSUPP || error == ENOENT);
   1191 	return NULL;
   1192 }
   1193 
   1194 /*
   1195  * Eliminate all activity associated with the requested vnode
   1196  * and with all vnodes aliased to the requested vnode.
   1197  */
   1198 void
   1199 vrevoke(vnode_t *vp)
   1200 {
   1201 	struct mount *mp;
   1202 	vnode_t *vq;
   1203 	enum vtype type;
   1204 	dev_t dev;
   1205 
   1206 	KASSERT(vrefcnt(vp) > 0);
   1207 
   1208 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
   1209 
   1210 	mutex_enter(vp->v_interlock);
   1211 	VSTATE_WAIT_STABLE(vp);
   1212 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1213 		mutex_exit(vp->v_interlock);
   1214 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1215 		atomic_inc_uint(&vp->v_usecount);
   1216 		mutex_exit(vp->v_interlock);
   1217 		vgone(vp);
   1218 	} else {
   1219 		dev = vp->v_rdev;
   1220 		type = vp->v_type;
   1221 		mutex_exit(vp->v_interlock);
   1222 
   1223 		while (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, &vq)
   1224 		    == 0) {
   1225 			mp = vrevoke_suspend_next(mp, vq->v_mount);
   1226 			vgone(vq);
   1227 		}
   1228 	}
   1229 	vrevoke_suspend_next(mp, NULL);
   1230 }
   1231 
   1232 /*
   1233  * Eliminate all activity associated with a vnode in preparation for
   1234  * reuse.  Drops a reference from the vnode.
   1235  */
   1236 void
   1237 vgone(vnode_t *vp)
   1238 {
   1239 	int lktype;
   1240 
   1241 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1242 
   1243 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1244 	lktype = LK_EXCLUSIVE;
   1245 	mutex_enter(vp->v_interlock);
   1246 	VSTATE_WAIT_STABLE(vp);
   1247 	if (VSTATE_GET(vp) == VS_LOADED) {
   1248 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1249 		vcache_reclaim(vp);
   1250 		lktype = LK_NONE;
   1251 	}
   1252 	VSTATE_ASSERT(vp, VS_RECLAIMED);
   1253 	vrelel(vp, 0, lktype);
   1254 }
   1255 
   1256 static inline uint32_t
   1257 vcache_hash(const struct vcache_key *key)
   1258 {
   1259 	uint32_t hash = HASH32_BUF_INIT;
   1260 
   1261 	KASSERT(key->vk_key_len > 0);
   1262 
   1263 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1264 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1265 	return hash;
   1266 }
   1267 
   1268 static int
   1269 vcache_stats(struct hashstat_sysctl *hs, bool fill)
   1270 {
   1271 	vnode_impl_t *vip;
   1272 	uint64_t chain;
   1273 
   1274 	strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
   1275 	strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
   1276 	if (!fill)
   1277 		return 0;
   1278 
   1279 	hs->hash_size = vcache_hashmask + 1;
   1280 
   1281 	for (size_t i = 0; i < hs->hash_size; i++) {
   1282 		chain = 0;
   1283 		mutex_enter(&vcache_lock);
   1284 		SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
   1285 			chain++;
   1286 		}
   1287 		mutex_exit(&vcache_lock);
   1288 		if (chain > 0) {
   1289 			hs->hash_used++;
   1290 			hs->hash_items += chain;
   1291 			if (chain > hs->hash_maxchain)
   1292 				hs->hash_maxchain = chain;
   1293 		}
   1294 		preempt_point();
   1295 	}
   1296 
   1297 	return 0;
   1298 }
   1299 
   1300 static void
   1301 vcache_init(void)
   1302 {
   1303 
   1304 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
   1305 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1306 	KASSERT(vcache_pool != NULL);
   1307 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
   1308 	cv_init(&vcache_cv, "vcache");
   1309 	vcache_hashsize = desiredvnodes;
   1310 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1311 	    &vcache_hashmask);
   1312 	hashstat_register("vcache", vcache_stats);
   1313 }
   1314 
   1315 static void
   1316 vcache_reinit(void)
   1317 {
   1318 	int i;
   1319 	uint32_t hash;
   1320 	u_long oldmask, newmask;
   1321 	struct hashhead *oldtab, *newtab;
   1322 	vnode_impl_t *vip;
   1323 
   1324 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1325 	mutex_enter(&vcache_lock);
   1326 	oldtab = vcache_hashtab;
   1327 	oldmask = vcache_hashmask;
   1328 	vcache_hashsize = desiredvnodes;
   1329 	vcache_hashtab = newtab;
   1330 	vcache_hashmask = newmask;
   1331 	for (i = 0; i <= oldmask; i++) {
   1332 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
   1333 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
   1334 			hash = vcache_hash(&vip->vi_key);
   1335 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
   1336 			    vip, vi_hash);
   1337 		}
   1338 	}
   1339 	mutex_exit(&vcache_lock);
   1340 	hashdone(oldtab, HASH_SLIST, oldmask);
   1341 }
   1342 
   1343 static inline vnode_impl_t *
   1344 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1345 {
   1346 	struct hashhead *hashp;
   1347 	vnode_impl_t *vip;
   1348 
   1349 	KASSERT(mutex_owned(&vcache_lock));
   1350 
   1351 	hashp = &vcache_hashtab[hash & vcache_hashmask];
   1352 	SLIST_FOREACH(vip, hashp, vi_hash) {
   1353 		if (key->vk_mount != vip->vi_key.vk_mount)
   1354 			continue;
   1355 		if (key->vk_key_len != vip->vi_key.vk_key_len)
   1356 			continue;
   1357 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
   1358 			continue;
   1359 		return vip;
   1360 	}
   1361 	return NULL;
   1362 }
   1363 
   1364 /*
   1365  * Allocate a new, uninitialized vcache node.
   1366  */
   1367 static vnode_impl_t *
   1368 vcache_alloc(void)
   1369 {
   1370 	vnode_impl_t *vip;
   1371 	vnode_t *vp;
   1372 
   1373 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
   1374 	vp = VIMPL_TO_VNODE(vip);
   1375 	memset(vip, 0, sizeof(*vip));
   1376 
   1377 	rw_init(&vip->vi_lock);
   1378 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
   1379 
   1380 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
   1381 	klist_init(&vip->vi_klist.vk_klist);
   1382 	vp->v_klist = &vip->vi_klist;
   1383 	cv_init(&vp->v_cv, "vnode");
   1384 	cache_vnode_init(vp);
   1385 
   1386 	vp->v_usecount = 1;
   1387 	vp->v_type = VNON;
   1388 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1389 
   1390 	vip->vi_state = VS_LOADING;
   1391 
   1392 	lru_requeue(vp, &lru_list[LRU_FREE]);
   1393 
   1394 	return vip;
   1395 }
   1396 
   1397 /*
   1398  * Deallocate a vcache node in state VS_LOADING.
   1399  *
   1400  * vcache_lock held on entry and released on return.
   1401  */
   1402 static void
   1403 vcache_dealloc(vnode_impl_t *vip)
   1404 {
   1405 	vnode_t *vp;
   1406 
   1407 	KASSERT(mutex_owned(&vcache_lock));
   1408 
   1409 	vp = VIMPL_TO_VNODE(vip);
   1410 	vfs_ref(dead_rootmount);
   1411 	vfs_insmntque(vp, dead_rootmount);
   1412 	mutex_enter(vp->v_interlock);
   1413 	vp->v_op = dead_vnodeop_p;
   1414 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1415 	mutex_exit(&vcache_lock);
   1416 	vrelel(vp, 0, LK_NONE);
   1417 }
   1418 
   1419 /*
   1420  * Free an unused, unreferenced vcache node.
   1421  * v_interlock locked on entry.
   1422  */
   1423 static void
   1424 vcache_free(vnode_impl_t *vip)
   1425 {
   1426 	vnode_t *vp;
   1427 
   1428 	vp = VIMPL_TO_VNODE(vip);
   1429 	KASSERT(mutex_owned(vp->v_interlock));
   1430 
   1431 	KASSERT(vrefcnt(vp) == 0);
   1432 	KASSERT(vp->v_holdcnt == 0);
   1433 	KASSERT(vp->v_writecount == 0);
   1434 	lru_requeue(vp, NULL);
   1435 	mutex_exit(vp->v_interlock);
   1436 
   1437 	vfs_insmntque(vp, NULL);
   1438 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1439 		spec_node_destroy(vp);
   1440 
   1441 	mutex_obj_free(vp->v_interlock);
   1442 	rw_destroy(&vip->vi_lock);
   1443 	uvm_obj_destroy(&vp->v_uobj, true);
   1444 	KASSERT(vp->v_klist == &vip->vi_klist);
   1445 	klist_fini(&vip->vi_klist.vk_klist);
   1446 	cv_destroy(&vp->v_cv);
   1447 	cache_vnode_fini(vp);
   1448 	pool_cache_put(vcache_pool, vip);
   1449 }
   1450 
   1451 /*
   1452  * Try to get an initial reference on this cached vnode.
   1453  * Returns zero on success or EBUSY if the vnode state is not LOADED.
   1454  *
   1455  * NB: lockless code sequences may rely on this not blocking.
   1456  */
   1457 int
   1458 vcache_tryvget(vnode_t *vp)
   1459 {
   1460 	u_int use, next;
   1461 
   1462 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
   1463 		if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
   1464 			return EBUSY;
   1465 		}
   1466 		next = atomic_cas_uint(&vp->v_usecount,
   1467 		    use, (use + 1) | VUSECOUNT_VGET);
   1468 		if (__predict_true(next == use)) {
   1469 			membar_acquire();
   1470 			return 0;
   1471 		}
   1472 	}
   1473 }
   1474 
   1475 /*
   1476  * Try to get an initial reference on this cached vnode.
   1477  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1478  * Will wait for the vnode state to be stable.
   1479  *
   1480  * v_interlock locked on entry and unlocked on exit.
   1481  */
   1482 int
   1483 vcache_vget(vnode_t *vp)
   1484 {
   1485 	int error;
   1486 
   1487 	KASSERT(mutex_owned(vp->v_interlock));
   1488 
   1489 	/* Increment hold count to prevent vnode from disappearing. */
   1490 	vp->v_holdcnt++;
   1491 	VSTATE_WAIT_STABLE(vp);
   1492 	vp->v_holdcnt--;
   1493 
   1494 	/* If this was the last reference to a reclaimed vnode free it now. */
   1495 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1496 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
   1497 			vcache_free(VNODE_TO_VIMPL(vp));
   1498 		else
   1499 			mutex_exit(vp->v_interlock);
   1500 		return ENOENT;
   1501 	}
   1502 	VSTATE_ASSERT(vp, VS_LOADED);
   1503 	error = vcache_tryvget(vp);
   1504 	KASSERT(error == 0);
   1505 	mutex_exit(vp->v_interlock);
   1506 
   1507 	return 0;
   1508 }
   1509 
   1510 /*
   1511  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1512  */
   1513 int
   1514 vcache_get(struct mount *mp, const void *key, size_t key_len,
   1515     struct vnode **vpp)
   1516 {
   1517 	int error;
   1518 	uint32_t hash;
   1519 	const void *new_key;
   1520 	struct vnode *vp;
   1521 	struct vcache_key vcache_key;
   1522 	vnode_impl_t *vip, *new_vip;
   1523 
   1524 	new_key = NULL;
   1525 	*vpp = NULL;
   1526 
   1527 	vcache_key.vk_mount = mp;
   1528 	vcache_key.vk_key = key;
   1529 	vcache_key.vk_key_len = key_len;
   1530 	hash = vcache_hash(&vcache_key);
   1531 
   1532 again:
   1533 	mutex_enter(&vcache_lock);
   1534 	vip = vcache_hash_lookup(&vcache_key, hash);
   1535 
   1536 	/* If found, take a reference or retry. */
   1537 	if (__predict_true(vip != NULL)) {
   1538 		/*
   1539 		 * If the vnode is loading we cannot take the v_interlock
   1540 		 * here as it might change during load (see uvm_obj_setlock()).
   1541 		 * As changing state from VS_LOADING requires both vcache_lock
   1542 		 * and v_interlock it is safe to test with vcache_lock held.
   1543 		 *
   1544 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1545 		 */
   1546 		if (__predict_false(vip->vi_state == VS_LOADING)) {
   1547 			cv_wait(&vcache_cv, &vcache_lock);
   1548 			mutex_exit(&vcache_lock);
   1549 			goto again;
   1550 		}
   1551 		vp = VIMPL_TO_VNODE(vip);
   1552 		mutex_enter(vp->v_interlock);
   1553 		mutex_exit(&vcache_lock);
   1554 		error = vcache_vget(vp);
   1555 		if (error == ENOENT)
   1556 			goto again;
   1557 		if (error == 0)
   1558 			*vpp = vp;
   1559 		KASSERT((error != 0) == (*vpp == NULL));
   1560 		return error;
   1561 	}
   1562 	mutex_exit(&vcache_lock);
   1563 
   1564 	/* Allocate and initialize a new vcache / vnode pair. */
   1565 	error = vfs_busy(mp);
   1566 	if (error)
   1567 		return error;
   1568 	new_vip = vcache_alloc();
   1569 	new_vip->vi_key = vcache_key;
   1570 	vp = VIMPL_TO_VNODE(new_vip);
   1571 	mutex_enter(&vcache_lock);
   1572 	vip = vcache_hash_lookup(&vcache_key, hash);
   1573 	if (vip == NULL) {
   1574 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1575 		    new_vip, vi_hash);
   1576 		vip = new_vip;
   1577 	}
   1578 
   1579 	/* If another thread beat us inserting this node, retry. */
   1580 	if (vip != new_vip) {
   1581 		vcache_dealloc(new_vip);
   1582 		vfs_unbusy(mp);
   1583 		goto again;
   1584 	}
   1585 	mutex_exit(&vcache_lock);
   1586 
   1587 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1588 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1589 	if (error) {
   1590 		mutex_enter(&vcache_lock);
   1591 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1592 		    new_vip, vnode_impl, vi_hash);
   1593 		vcache_dealloc(new_vip);
   1594 		vfs_unbusy(mp);
   1595 		KASSERT(*vpp == NULL);
   1596 		return error;
   1597 	}
   1598 	KASSERT(new_key != NULL);
   1599 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1600 	KASSERT(vp->v_op != NULL);
   1601 	vfs_insmntque(vp, mp);
   1602 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1603 		vp->v_vflag |= VV_MPSAFE;
   1604 	vfs_ref(mp);
   1605 	vfs_unbusy(mp);
   1606 
   1607 	/* Finished loading, finalize node. */
   1608 	mutex_enter(&vcache_lock);
   1609 	new_vip->vi_key.vk_key = new_key;
   1610 	mutex_enter(vp->v_interlock);
   1611 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1612 	mutex_exit(vp->v_interlock);
   1613 	mutex_exit(&vcache_lock);
   1614 	*vpp = vp;
   1615 	return 0;
   1616 }
   1617 
   1618 /*
   1619  * Create a new vnode / fs node pair and return it referenced through vpp.
   1620  */
   1621 int
   1622 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1623     kauth_cred_t cred, void *extra, struct vnode **vpp)
   1624 {
   1625 	int error;
   1626 	uint32_t hash;
   1627 	struct vnode *vp, *ovp;
   1628 	vnode_impl_t *vip, *ovip;
   1629 
   1630 	*vpp = NULL;
   1631 
   1632 	/* Allocate and initialize a new vcache / vnode pair. */
   1633 	error = vfs_busy(mp);
   1634 	if (error)
   1635 		return error;
   1636 	vip = vcache_alloc();
   1637 	vip->vi_key.vk_mount = mp;
   1638 	vp = VIMPL_TO_VNODE(vip);
   1639 
   1640 	/* Create and load the fs node. */
   1641 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
   1642 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
   1643 	if (error) {
   1644 		mutex_enter(&vcache_lock);
   1645 		vcache_dealloc(vip);
   1646 		vfs_unbusy(mp);
   1647 		KASSERT(*vpp == NULL);
   1648 		return error;
   1649 	}
   1650 	KASSERT(vp->v_op != NULL);
   1651 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
   1652 	if (vip->vi_key.vk_key_len > 0) {
   1653 		KASSERT(vip->vi_key.vk_key != NULL);
   1654 		hash = vcache_hash(&vip->vi_key);
   1655 
   1656 		/*
   1657 		 * Wait for previous instance to be reclaimed,
   1658 		 * then insert new node.
   1659 		 */
   1660 		mutex_enter(&vcache_lock);
   1661 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
   1662 			ovp = VIMPL_TO_VNODE(ovip);
   1663 			mutex_enter(ovp->v_interlock);
   1664 			mutex_exit(&vcache_lock);
   1665 			error = vcache_vget(ovp);
   1666 			KASSERT(error == ENOENT);
   1667 			mutex_enter(&vcache_lock);
   1668 		}
   1669 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1670 		    vip, vi_hash);
   1671 		mutex_exit(&vcache_lock);
   1672 	}
   1673 	vfs_insmntque(vp, mp);
   1674 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1675 		vp->v_vflag |= VV_MPSAFE;
   1676 	vfs_ref(mp);
   1677 	vfs_unbusy(mp);
   1678 
   1679 	/* Finished loading, finalize node. */
   1680 	mutex_enter(&vcache_lock);
   1681 	mutex_enter(vp->v_interlock);
   1682 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1683 	mutex_exit(&vcache_lock);
   1684 	mutex_exit(vp->v_interlock);
   1685 	*vpp = vp;
   1686 	return 0;
   1687 }
   1688 
   1689 /*
   1690  * Prepare key change: update old cache nodes key and lock new cache node.
   1691  * Return an error if the new node already exists.
   1692  */
   1693 int
   1694 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1695     const void *old_key, size_t old_key_len,
   1696     const void *new_key, size_t new_key_len)
   1697 {
   1698 	uint32_t old_hash, new_hash;
   1699 	struct vcache_key old_vcache_key, new_vcache_key;
   1700 	vnode_impl_t *vip, *new_vip;
   1701 
   1702 	old_vcache_key.vk_mount = mp;
   1703 	old_vcache_key.vk_key = old_key;
   1704 	old_vcache_key.vk_key_len = old_key_len;
   1705 	old_hash = vcache_hash(&old_vcache_key);
   1706 
   1707 	new_vcache_key.vk_mount = mp;
   1708 	new_vcache_key.vk_key = new_key;
   1709 	new_vcache_key.vk_key_len = new_key_len;
   1710 	new_hash = vcache_hash(&new_vcache_key);
   1711 
   1712 	new_vip = vcache_alloc();
   1713 	new_vip->vi_key = new_vcache_key;
   1714 
   1715 	/* Insert locked new node used as placeholder. */
   1716 	mutex_enter(&vcache_lock);
   1717 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1718 	if (vip != NULL) {
   1719 		vcache_dealloc(new_vip);
   1720 		return EEXIST;
   1721 	}
   1722 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1723 	    new_vip, vi_hash);
   1724 
   1725 	/* Replace old nodes key with the temporary copy. */
   1726 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1727 	KASSERT(vip != NULL);
   1728 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1729 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
   1730 	vip->vi_key = old_vcache_key;
   1731 	mutex_exit(&vcache_lock);
   1732 	return 0;
   1733 }
   1734 
   1735 /*
   1736  * Key change complete: update old node and remove placeholder.
   1737  */
   1738 void
   1739 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1740     const void *old_key, size_t old_key_len,
   1741     const void *new_key, size_t new_key_len)
   1742 {
   1743 	uint32_t old_hash, new_hash;
   1744 	struct vcache_key old_vcache_key, new_vcache_key;
   1745 	vnode_impl_t *vip, *new_vip;
   1746 	struct vnode *new_vp;
   1747 
   1748 	old_vcache_key.vk_mount = mp;
   1749 	old_vcache_key.vk_key = old_key;
   1750 	old_vcache_key.vk_key_len = old_key_len;
   1751 	old_hash = vcache_hash(&old_vcache_key);
   1752 
   1753 	new_vcache_key.vk_mount = mp;
   1754 	new_vcache_key.vk_key = new_key;
   1755 	new_vcache_key.vk_key_len = new_key_len;
   1756 	new_hash = vcache_hash(&new_vcache_key);
   1757 
   1758 	mutex_enter(&vcache_lock);
   1759 
   1760 	/* Lookup old and new node. */
   1761 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1762 	KASSERT(vip != NULL);
   1763 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1764 
   1765 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1766 	KASSERT(new_vip != NULL);
   1767 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
   1768 	new_vp = VIMPL_TO_VNODE(new_vip);
   1769 	mutex_enter(new_vp->v_interlock);
   1770 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
   1771 	mutex_exit(new_vp->v_interlock);
   1772 
   1773 	/* Rekey old node and put it onto its new hashlist. */
   1774 	vip->vi_key = new_vcache_key;
   1775 	if (old_hash != new_hash) {
   1776 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
   1777 		    vip, vnode_impl, vi_hash);
   1778 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1779 		    vip, vi_hash);
   1780 	}
   1781 
   1782 	/* Remove new node used as placeholder. */
   1783 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
   1784 	    new_vip, vnode_impl, vi_hash);
   1785 	vcache_dealloc(new_vip);
   1786 }
   1787 
   1788 /*
   1789  * Disassociate the underlying file system from a vnode.
   1790  *
   1791  * Must be called with vnode locked and will return unlocked.
   1792  * Must be called with the interlock held, and will return with it held.
   1793  */
   1794 static void
   1795 vcache_reclaim(vnode_t *vp)
   1796 {
   1797 	lwp_t *l = curlwp;
   1798 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1799 	struct mount *mp = vp->v_mount;
   1800 	uint32_t hash;
   1801 	uint8_t temp_buf[64], *temp_key;
   1802 	size_t temp_key_len;
   1803 	bool recycle;
   1804 	int error;
   1805 
   1806 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1807 	KASSERT(mutex_owned(vp->v_interlock));
   1808 	KASSERT(vrefcnt(vp) != 0);
   1809 
   1810 	temp_key_len = vip->vi_key.vk_key_len;
   1811 	/*
   1812 	 * Prevent the vnode from being recycled or brought into use
   1813 	 * while we clean it out.
   1814 	 */
   1815 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
   1816 
   1817 	/*
   1818 	 * Send NOTE_REVOKE now, before we call VOP_RECLAIM(),
   1819 	 * because VOP_RECLAIM() could cause vp->v_klist to
   1820 	 * become invalid.  Don't check for interest in NOTE_REVOKE
   1821 	 * here; it's always posted because it sets EV_EOF.
   1822 	 *
   1823 	 * Once it's been posted, reset vp->v_klist to point to
   1824 	 * our own local storage, in case we were sharing with
   1825 	 * someone else.
   1826 	 */
   1827 	KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
   1828 	vp->v_klist = &vip->vi_klist;
   1829 	mutex_exit(vp->v_interlock);
   1830 
   1831 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1832 	mutex_enter(vp->v_interlock);
   1833 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
   1834 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
   1835 	}
   1836 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1837 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
   1838 	mutex_exit(vp->v_interlock);
   1839 	rw_exit(vp->v_uobj.vmobjlock);
   1840 
   1841 	/*
   1842 	 * With vnode state set to reclaiming, purge name cache immediately
   1843 	 * to prevent new handles on vnode, and wait for existing threads
   1844 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
   1845 	 */
   1846 	cache_purge(vp);
   1847 
   1848 	/* Replace the vnode key with a temporary copy. */
   1849 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
   1850 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1851 	} else {
   1852 		temp_key = temp_buf;
   1853 	}
   1854 	if (vip->vi_key.vk_key_len > 0) {
   1855 		mutex_enter(&vcache_lock);
   1856 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
   1857 		vip->vi_key.vk_key = temp_key;
   1858 		mutex_exit(&vcache_lock);
   1859 	}
   1860 
   1861 	fstrans_start(mp);
   1862 
   1863 	/*
   1864 	 * Clean out any cached data associated with the vnode.
   1865 	 */
   1866 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1867 	if (error != 0) {
   1868 		if (wapbl_vphaswapbl(vp))
   1869 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1870 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1871 	}
   1872 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1873 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1874 	if (vp->v_type == VBLK || vp->v_type == VCHR) {
   1875 		 spec_node_revoke(vp);
   1876 	}
   1877 
   1878 	/*
   1879 	 * Disassociate the underlying file system from the vnode.
   1880 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1881 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1882 	 * would no longer function.
   1883 	 */
   1884 	VOP_INACTIVE(vp, &recycle);
   1885 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1886 	if (VOP_RECLAIM(vp)) {
   1887 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1888 	}
   1889 
   1890 	KASSERT(vp->v_data == NULL);
   1891 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
   1892 
   1893 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1894 		uvm_ra_freectx(vp->v_ractx);
   1895 		vp->v_ractx = NULL;
   1896 	}
   1897 
   1898 	if (vip->vi_key.vk_key_len > 0) {
   1899 	/* Remove from vnode cache. */
   1900 		hash = vcache_hash(&vip->vi_key);
   1901 		mutex_enter(&vcache_lock);
   1902 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1903 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1904 		    vip, vnode_impl, vi_hash);
   1905 		mutex_exit(&vcache_lock);
   1906 	}
   1907 	if (temp_key != temp_buf)
   1908 		kmem_free(temp_key, temp_key_len);
   1909 
   1910 	/* Done with purge, notify sleepers of the grim news. */
   1911 	mutex_enter(vp->v_interlock);
   1912 	vp->v_op = dead_vnodeop_p;
   1913 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1914 	vp->v_tag = VT_NON;
   1915 	mutex_exit(vp->v_interlock);
   1916 
   1917 	/*
   1918 	 * Move to dead mount.  Must be after changing the operations
   1919 	 * vector as vnode operations enter the mount before using the
   1920 	 * operations vector.  See sys/kern/vnode_if.c.
   1921 	 */
   1922 	vp->v_vflag &= ~VV_ROOT;
   1923 	vfs_ref(dead_rootmount);
   1924 	vfs_insmntque(vp, dead_rootmount);
   1925 
   1926 #ifdef PAX_SEGVGUARD
   1927 	pax_segvguard_cleanup(vp);
   1928 #endif /* PAX_SEGVGUARD */
   1929 
   1930 	mutex_enter(vp->v_interlock);
   1931 	fstrans_done(mp);
   1932 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1933 }
   1934 
   1935 /*
   1936  * Disassociate the underlying file system from an open device vnode
   1937  * and make it anonymous.
   1938  *
   1939  * Vnode unlocked on entry, drops a reference to the vnode.
   1940  */
   1941 void
   1942 vcache_make_anon(vnode_t *vp)
   1943 {
   1944 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1945 	uint32_t hash;
   1946 	bool recycle;
   1947 
   1948 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
   1949 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1950 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
   1951 
   1952 	/* Remove from vnode cache. */
   1953 	hash = vcache_hash(&vip->vi_key);
   1954 	mutex_enter(&vcache_lock);
   1955 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1956 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1957 	    vip, vnode_impl, vi_hash);
   1958 	vip->vi_key.vk_mount = dead_rootmount;
   1959 	vip->vi_key.vk_key_len = 0;
   1960 	vip->vi_key.vk_key = NULL;
   1961 	mutex_exit(&vcache_lock);
   1962 
   1963 	/*
   1964 	 * Disassociate the underlying file system from the vnode.
   1965 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1966 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1967 	 * would no longer function.
   1968 	 */
   1969 	if (vn_lock(vp, LK_EXCLUSIVE)) {
   1970 		vnpanic(vp, "%s: cannot lock", __func__);
   1971 	}
   1972 	VOP_INACTIVE(vp, &recycle);
   1973 	KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1974 	if (VOP_RECLAIM(vp)) {
   1975 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1976 	}
   1977 
   1978 	/* Purge name cache. */
   1979 	cache_purge(vp);
   1980 
   1981 	/* Done with purge, change operations vector. */
   1982 	mutex_enter(vp->v_interlock);
   1983 	vp->v_op = spec_vnodeop_p;
   1984 	vp->v_vflag |= VV_MPSAFE;
   1985 	mutex_exit(vp->v_interlock);
   1986 
   1987 	/*
   1988 	 * Move to dead mount.  Must be after changing the operations
   1989 	 * vector as vnode operations enter the mount before using the
   1990 	 * operations vector.  See sys/kern/vnode_if.c.
   1991 	 */
   1992 	vfs_ref(dead_rootmount);
   1993 	vfs_insmntque(vp, dead_rootmount);
   1994 
   1995 	vrele(vp);
   1996 }
   1997 
   1998 /*
   1999  * Update outstanding I/O count and do wakeup if requested.
   2000  */
   2001 void
   2002 vwakeup(struct buf *bp)
   2003 {
   2004 	vnode_t *vp;
   2005 
   2006 	if ((vp = bp->b_vp) == NULL)
   2007 		return;
   2008 
   2009 	KASSERT(bp->b_objlock == vp->v_interlock);
   2010 	KASSERT(mutex_owned(bp->b_objlock));
   2011 
   2012 	if (--vp->v_numoutput < 0)
   2013 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   2014 	if (vp->v_numoutput == 0)
   2015 		cv_broadcast(&vp->v_cv);
   2016 }
   2017 
   2018 /*
   2019  * Test a vnode for being or becoming dead.  Returns one of:
   2020  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   2021  * ENOENT: vnode is dead.
   2022  * 0:      otherwise.
   2023  *
   2024  * Whenever this function returns a non-zero value all future
   2025  * calls will also return a non-zero value.
   2026  */
   2027 int
   2028 vdead_check(struct vnode *vp, int flags)
   2029 {
   2030 
   2031 	KASSERT(mutex_owned(vp->v_interlock));
   2032 
   2033 	if (! ISSET(flags, VDEAD_NOWAIT))
   2034 		VSTATE_WAIT_STABLE(vp);
   2035 
   2036 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   2037 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   2038 		return EBUSY;
   2039 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   2040 		return ENOENT;
   2041 	}
   2042 
   2043 	return 0;
   2044 }
   2045 
   2046 int
   2047 vfs_drainvnodes(void)
   2048 {
   2049 	int i, gen;
   2050 
   2051 	mutex_enter(&vdrain_lock);
   2052 	for (i = 0; i < 2; i++) {
   2053 		gen = vdrain_gen;
   2054 		while (gen == vdrain_gen) {
   2055 			cv_broadcast(&vdrain_cv);
   2056 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   2057 		}
   2058 	}
   2059 	mutex_exit(&vdrain_lock);
   2060 
   2061 	if (numvnodes >= desiredvnodes)
   2062 		return EBUSY;
   2063 
   2064 	if (vcache_hashsize != desiredvnodes)
   2065 		vcache_reinit();
   2066 
   2067 	return 0;
   2068 }
   2069 
   2070 void
   2071 vnpanic(vnode_t *vp, const char *fmt, ...)
   2072 {
   2073 	va_list ap;
   2074 
   2075 #ifdef DIAGNOSTIC
   2076 	vprint(NULL, vp);
   2077 #endif
   2078 	va_start(ap, fmt);
   2079 	vpanic(fmt, ap);
   2080 	va_end(ap);
   2081 }
   2082 
   2083 void
   2084 vshareilock(vnode_t *tvp, vnode_t *fvp)
   2085 {
   2086 	kmutex_t *oldlock;
   2087 
   2088 	oldlock = tvp->v_interlock;
   2089 	mutex_obj_hold(fvp->v_interlock);
   2090 	tvp->v_interlock = fvp->v_interlock;
   2091 	mutex_obj_free(oldlock);
   2092 }
   2093 
   2094 void
   2095 vshareklist(vnode_t *tvp, vnode_t *fvp)
   2096 {
   2097 	/*
   2098 	 * If two vnodes share klist state, they must also share
   2099 	 * an interlock.
   2100 	 */
   2101 	KASSERT(tvp->v_interlock == fvp->v_interlock);
   2102 
   2103 	/*
   2104 	 * We make the following assumptions:
   2105 	 *
   2106 	 * ==> Some other synchronization is happening outside of
   2107 	 *     our view to make this safe.
   2108 	 *
   2109 	 * ==> That the "to" vnode will have the necessary references
   2110 	 *     on the "from" vnode so that the storage for the klist
   2111 	 *     won't be yanked out from beneath us (the vnode_impl).
   2112 	 *
   2113 	 * ==> If "from" is also sharing, we then assume that "from"
   2114 	 *     has the necessary references, and so on.
   2115 	 */
   2116 	tvp->v_klist = fvp->v_klist;
   2117 }
   2118