Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.131
      1 /*	$NetBSD: vfs_vnode.c,v 1.131 2022/02/17 14:38:06 hannken Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * The vnode cache subsystem.
     71  *
     72  * Life-cycle
     73  *
     74  *	Normally, there are two points where new vnodes are created:
     75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76  *	starts in one of the following ways:
     77  *
     78  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80  *
     81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  *	was another, traditional way.  Currently, only the draining thread
     83  *	recycles the vnodes.  This behaviour might be revisited.
     84  *
     85  *	The life-cycle ends when the last reference is dropped, usually
     86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87  *	the file system that vnode is inactive.  Via this call, file system
     88  *	indicates whether vnode can be recycled (usually, it checks its own
     89  *	references, e.g. count of links, whether the file was removed).
     90  *
     91  *	Depending on indication, vnode can be put into a free list (cache),
     92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93  *	disassociate underlying file system from the vnode, and finally
     94  *	destroyed.
     95  *
     96  * Vnode state
     97  *
     98  *	Vnode is always in one of six states:
     99  *	- MARKER	This is a marker vnode to help list traversal.  It
    100  *			will never change its state.
    101  *	- LOADING	Vnode is associating underlying file system and not
    102  *			yet ready to use.
    103  *	- LOADED	Vnode has associated underlying file system and is
    104  *			ready to use.
    105  *	- BLOCKED	Vnode is active but cannot get new references.
    106  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107  *			system.
    108  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109  *			and is dead.
    110  *
    111  *	Valid state changes are:
    112  *	LOADING -> LOADED
    113  *			Vnode has been initialised in vcache_get() or
    114  *			vcache_new() and is ready to use.
    115  *	BLOCKED -> RECLAIMING
    116  *			Vnode starts disassociation from underlying file
    117  *			system in vcache_reclaim().
    118  *	RECLAIMING -> RECLAIMED
    119  *			Vnode finished disassociation from underlying file
    120  *			system in vcache_reclaim().
    121  *	LOADED -> BLOCKED
    122  *			Either vcache_rekey*() is changing the vnode key or
    123  *			vrelel() is about to call VOP_INACTIVE().
    124  *	BLOCKED -> LOADED
    125  *			The block condition is over.
    126  *	LOADING -> RECLAIMED
    127  *			Either vcache_get() or vcache_new() failed to
    128  *			associate the underlying file system or vcache_rekey*()
    129  *			drops a vnode used as placeholder.
    130  *
    131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    132  *	and it is possible to wait for state change.
    133  *
    134  *	State is protected with v_interlock with one exception:
    135  *	to change from LOADING both v_interlock and vcache_lock must be held
    136  *	so it is possible to check "state == LOADING" without holding
    137  *	v_interlock.  See vcache_get() for details.
    138  *
    139  * Reference counting
    140  *
    141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    143  *	as vput(9), routines.  Common points holding references are e.g.
    144  *	file openings, current working directory, mount points, etc.
    145  *
    146  *	v_usecount is adjusted with atomic operations, however to change
    147  *	from a non-zero value to zero the interlock must also be held.
    148  */
    149 
    150 #include <sys/cdefs.h>
    151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.131 2022/02/17 14:38:06 hannken Exp $");
    152 
    153 #ifdef _KERNEL_OPT
    154 #include "opt_pax.h"
    155 #endif
    156 
    157 #include <sys/param.h>
    158 #include <sys/kernel.h>
    159 
    160 #include <sys/atomic.h>
    161 #include <sys/buf.h>
    162 #include <sys/conf.h>
    163 #include <sys/device.h>
    164 #include <sys/hash.h>
    165 #include <sys/kauth.h>
    166 #include <sys/kmem.h>
    167 #include <sys/kthread.h>
    168 #include <sys/module.h>
    169 #include <sys/mount.h>
    170 #include <sys/namei.h>
    171 #include <sys/pax.h>
    172 #include <sys/syscallargs.h>
    173 #include <sys/sysctl.h>
    174 #include <sys/systm.h>
    175 #include <sys/vnode_impl.h>
    176 #include <sys/wapbl.h>
    177 #include <sys/fstrans.h>
    178 
    179 #include <uvm/uvm.h>
    180 #include <uvm/uvm_readahead.h>
    181 #include <uvm/uvm_stat.h>
    182 
    183 /* Flags to vrelel. */
    184 #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
    185 
    186 #define	LRU_VRELE	0
    187 #define	LRU_FREE	1
    188 #define	LRU_HOLD	2
    189 #define	LRU_COUNT	3
    190 
    191 /*
    192  * There are three lru lists: one holds vnodes waiting for async release,
    193  * one is for vnodes which have no buffer/page references and one for those
    194  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
    195  * private cache line as vnodes migrate between them while under the same
    196  * lock (vdrain_lock).
    197  */
    198 u_int			numvnodes		__cacheline_aligned;
    199 static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
    200 static kmutex_t		vdrain_lock		__cacheline_aligned;
    201 static kcondvar_t	vdrain_cv;
    202 static int		vdrain_gen;
    203 static kcondvar_t	vdrain_gen_cv;
    204 static bool		vdrain_retry;
    205 static lwp_t *		vdrain_lwp;
    206 SLIST_HEAD(hashhead, vnode_impl);
    207 static kmutex_t		vcache_lock		__cacheline_aligned;
    208 static kcondvar_t	vcache_cv;
    209 static u_int		vcache_hashsize;
    210 static u_long		vcache_hashmask;
    211 static struct hashhead	*vcache_hashtab;
    212 static pool_cache_t	vcache_pool;
    213 static void		lru_requeue(vnode_t *, vnodelst_t *);
    214 static vnodelst_t *	lru_which(vnode_t *);
    215 static vnode_impl_t *	vcache_alloc(void);
    216 static void		vcache_dealloc(vnode_impl_t *);
    217 static void		vcache_free(vnode_impl_t *);
    218 static void		vcache_init(void);
    219 static void		vcache_reinit(void);
    220 static void		vcache_reclaim(vnode_t *);
    221 static void		vrelel(vnode_t *, int, int);
    222 static void		vdrain_thread(void *);
    223 static void		vnpanic(vnode_t *, const char *, ...)
    224     __printflike(2, 3);
    225 
    226 /* Routines having to do with the management of the vnode table. */
    227 extern struct mount	*dead_rootmount;
    228 extern int		(**dead_vnodeop_p)(void *);
    229 extern int		(**spec_vnodeop_p)(void *);
    230 extern struct vfsops	dead_vfsops;
    231 
    232 /*
    233  * The high bit of v_usecount is a gate for vcache_tryvget().  It's set
    234  * only when the vnode state is LOADED.
    235  */
    236 #define	VUSECOUNT_MASK	0x7fffffff
    237 #define	VUSECOUNT_GATE	0x80000000
    238 
    239 /*
    240  * Return the current usecount of a vnode.
    241  */
    242 inline int
    243 vrefcnt(struct vnode *vp)
    244 {
    245 
    246 	return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
    247 }
    248 
    249 /* Vnode state operations and diagnostics. */
    250 
    251 #if defined(DIAGNOSTIC)
    252 
    253 #define VSTATE_VALID(state) \
    254 	((state) != VS_ACTIVE && (state) != VS_MARKER)
    255 #define VSTATE_GET(vp) \
    256 	vstate_assert_get((vp), __func__, __LINE__)
    257 #define VSTATE_CHANGE(vp, from, to) \
    258 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    259 #define VSTATE_WAIT_STABLE(vp) \
    260 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    261 
    262 void
    263 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    264     bool has_lock)
    265 {
    266 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    267 	int refcnt = vrefcnt(vp);
    268 
    269 	if (!has_lock) {
    270 		/*
    271 		 * Prevent predictive loads from the CPU, but check the state
    272 		 * without loooking first.
    273 		 */
    274 		membar_enter();
    275 		if (state == VS_ACTIVE && refcnt > 0 &&
    276 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
    277 			return;
    278 		if (vip->vi_state == state)
    279 			return;
    280 		mutex_enter((vp)->v_interlock);
    281 	}
    282 
    283 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    284 
    285 	if ((state == VS_ACTIVE && refcnt > 0 &&
    286 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
    287 	    vip->vi_state == state) {
    288 		if (!has_lock)
    289 			mutex_exit((vp)->v_interlock);
    290 		return;
    291 	}
    292 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
    293 	    vstate_name(vip->vi_state), refcnt,
    294 	    vstate_name(state), func, line);
    295 }
    296 
    297 static enum vnode_state
    298 vstate_assert_get(vnode_t *vp, const char *func, int line)
    299 {
    300 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    301 
    302 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    303 	if (! VSTATE_VALID(vip->vi_state))
    304 		vnpanic(vp, "state is %s at %s:%d",
    305 		    vstate_name(vip->vi_state), func, line);
    306 
    307 	return vip->vi_state;
    308 }
    309 
    310 static void
    311 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    312 {
    313 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    314 
    315 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    316 	if (! VSTATE_VALID(vip->vi_state))
    317 		vnpanic(vp, "state is %s at %s:%d",
    318 		    vstate_name(vip->vi_state), func, line);
    319 
    320 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    321 		cv_wait(&vp->v_cv, vp->v_interlock);
    322 
    323 	if (! VSTATE_VALID(vip->vi_state))
    324 		vnpanic(vp, "state is %s at %s:%d",
    325 		    vstate_name(vip->vi_state), func, line);
    326 }
    327 
    328 static void
    329 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    330     const char *func, int line)
    331 {
    332 	bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
    333 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    334 
    335 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    336 	if (from == VS_LOADING)
    337 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
    338 
    339 	if (! VSTATE_VALID(from))
    340 		vnpanic(vp, "from is %s at %s:%d",
    341 		    vstate_name(from), func, line);
    342 	if (! VSTATE_VALID(to))
    343 		vnpanic(vp, "to is %s at %s:%d",
    344 		    vstate_name(to), func, line);
    345 	if (vip->vi_state != from)
    346 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    347 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
    348 	if ((from == VS_LOADED) != gated)
    349 		vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
    350 		    vstate_name(vip->vi_state), gated, func, line);
    351 
    352 	/* Open/close the gate for vcache_tryvget(). */
    353 	if (to == VS_LOADED)
    354 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
    355 	else
    356 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
    357 
    358 	vip->vi_state = to;
    359 	if (from == VS_LOADING)
    360 		cv_broadcast(&vcache_cv);
    361 	if (to == VS_LOADED || to == VS_RECLAIMED)
    362 		cv_broadcast(&vp->v_cv);
    363 }
    364 
    365 #else /* defined(DIAGNOSTIC) */
    366 
    367 #define VSTATE_GET(vp) \
    368 	(VNODE_TO_VIMPL((vp))->vi_state)
    369 #define VSTATE_CHANGE(vp, from, to) \
    370 	vstate_change((vp), (from), (to))
    371 #define VSTATE_WAIT_STABLE(vp) \
    372 	vstate_wait_stable((vp))
    373 void
    374 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    375     bool has_lock)
    376 {
    377 
    378 }
    379 
    380 static void
    381 vstate_wait_stable(vnode_t *vp)
    382 {
    383 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    384 
    385 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    386 		cv_wait(&vp->v_cv, vp->v_interlock);
    387 }
    388 
    389 static void
    390 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    391 {
    392 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    393 
    394 	/* Open/close the gate for vcache_tryvget(). */
    395 	if (to == VS_LOADED)
    396 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
    397 	else
    398 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
    399 
    400 	vip->vi_state = to;
    401 	if (from == VS_LOADING)
    402 		cv_broadcast(&vcache_cv);
    403 	if (to == VS_LOADED || to == VS_RECLAIMED)
    404 		cv_broadcast(&vp->v_cv);
    405 }
    406 
    407 #endif /* defined(DIAGNOSTIC) */
    408 
    409 void
    410 vfs_vnode_sysinit(void)
    411 {
    412 	int error __diagused, i;
    413 
    414 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    415 	KASSERT(dead_rootmount != NULL);
    416 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
    417 
    418 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    419 	for (i = 0; i < LRU_COUNT; i++) {
    420 		TAILQ_INIT(&lru_list[i]);
    421 	}
    422 	vcache_init();
    423 
    424 	cv_init(&vdrain_cv, "vdrain");
    425 	cv_init(&vdrain_gen_cv, "vdrainwt");
    426 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    427 	    NULL, &vdrain_lwp, "vdrain");
    428 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    429 }
    430 
    431 /*
    432  * Allocate a new marker vnode.
    433  */
    434 vnode_t *
    435 vnalloc_marker(struct mount *mp)
    436 {
    437 	vnode_impl_t *vip;
    438 	vnode_t *vp;
    439 
    440 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
    441 	memset(vip, 0, sizeof(*vip));
    442 	vp = VIMPL_TO_VNODE(vip);
    443 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
    444 	vp->v_mount = mp;
    445 	vp->v_type = VBAD;
    446 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    447 	klist_init(&vp->v_klist);
    448 	vip->vi_state = VS_MARKER;
    449 
    450 	return vp;
    451 }
    452 
    453 /*
    454  * Free a marker vnode.
    455  */
    456 void
    457 vnfree_marker(vnode_t *vp)
    458 {
    459 	vnode_impl_t *vip;
    460 
    461 	vip = VNODE_TO_VIMPL(vp);
    462 	KASSERT(vip->vi_state == VS_MARKER);
    463 	mutex_obj_free(vp->v_interlock);
    464 	uvm_obj_destroy(&vp->v_uobj, true);
    465 	klist_fini(&vp->v_klist);
    466 	pool_cache_put(vcache_pool, vip);
    467 }
    468 
    469 /*
    470  * Test a vnode for being a marker vnode.
    471  */
    472 bool
    473 vnis_marker(vnode_t *vp)
    474 {
    475 
    476 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    477 }
    478 
    479 /*
    480  * Return the lru list this node should be on.
    481  */
    482 static vnodelst_t *
    483 lru_which(vnode_t *vp)
    484 {
    485 
    486 	KASSERT(mutex_owned(vp->v_interlock));
    487 
    488 	if (vp->v_holdcnt > 0)
    489 		return &lru_list[LRU_HOLD];
    490 	else
    491 		return &lru_list[LRU_FREE];
    492 }
    493 
    494 /*
    495  * Put vnode to end of given list.
    496  * Both the current and the new list may be NULL, used on vnode alloc/free.
    497  * Adjust numvnodes and signal vdrain thread if there is work.
    498  */
    499 static void
    500 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    501 {
    502 	vnode_impl_t *vip;
    503 	int d;
    504 
    505 	/*
    506 	 * If the vnode is on the correct list, and was put there recently,
    507 	 * then leave it be, thus avoiding huge cache and lock contention.
    508 	 */
    509 	vip = VNODE_TO_VIMPL(vp);
    510 	if (listhd == vip->vi_lrulisthd &&
    511 	    (getticks() - vip->vi_lrulisttm) < hz) {
    512 	    	return;
    513 	}
    514 
    515 	mutex_enter(&vdrain_lock);
    516 	d = 0;
    517 	if (vip->vi_lrulisthd != NULL)
    518 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    519 	else
    520 		d++;
    521 	vip->vi_lrulisthd = listhd;
    522 	vip->vi_lrulisttm = getticks();
    523 	if (vip->vi_lrulisthd != NULL)
    524 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    525 	else
    526 		d--;
    527 	if (d != 0) {
    528 		/*
    529 		 * Looks strange?  This is not a bug.  Don't store
    530 		 * numvnodes unless there is a change - avoid false
    531 		 * sharing on MP.
    532 		 */
    533 		numvnodes += d;
    534 	}
    535 	if ((d > 0 && numvnodes > desiredvnodes) ||
    536 	    listhd == &lru_list[LRU_VRELE])
    537 		cv_signal(&vdrain_cv);
    538 	mutex_exit(&vdrain_lock);
    539 }
    540 
    541 /*
    542  * Release deferred vrele vnodes for this mount.
    543  * Called with file system suspended.
    544  */
    545 void
    546 vrele_flush(struct mount *mp)
    547 {
    548 	vnode_impl_t *vip, *marker;
    549 	vnode_t *vp;
    550 	int when = 0;
    551 
    552 	KASSERT(fstrans_is_owner(mp));
    553 
    554 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    555 
    556 	mutex_enter(&vdrain_lock);
    557 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
    558 
    559 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    560 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    561 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
    562 		    vi_lrulist);
    563 		vp = VIMPL_TO_VNODE(vip);
    564 		if (vnis_marker(vp))
    565 			continue;
    566 
    567 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    568 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    569 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    570 		vip->vi_lrulisttm = getticks();
    571 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    572 		mutex_exit(&vdrain_lock);
    573 
    574 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    575 		mutex_enter(vp->v_interlock);
    576 		vrelel(vp, 0, LK_EXCLUSIVE);
    577 
    578 		if (getticks() > when) {
    579 			yield();
    580 			when = getticks() + hz / 10;
    581 		}
    582 
    583 		mutex_enter(&vdrain_lock);
    584 	}
    585 
    586 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    587 	mutex_exit(&vdrain_lock);
    588 
    589 	vnfree_marker(VIMPL_TO_VNODE(marker));
    590 }
    591 
    592 /*
    593  * Reclaim a cached vnode.  Used from vdrain_thread only.
    594  */
    595 static __inline void
    596 vdrain_remove(vnode_t *vp)
    597 {
    598 	struct mount *mp;
    599 
    600 	KASSERT(mutex_owned(&vdrain_lock));
    601 
    602 	/* Probe usecount (unlocked). */
    603 	if (vrefcnt(vp) > 0)
    604 		return;
    605 	/* Try v_interlock -- we lock the wrong direction! */
    606 	if (!mutex_tryenter(vp->v_interlock))
    607 		return;
    608 	/* Probe usecount and state. */
    609 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
    610 		mutex_exit(vp->v_interlock);
    611 		return;
    612 	}
    613 	mp = vp->v_mount;
    614 	if (fstrans_start_nowait(mp) != 0) {
    615 		mutex_exit(vp->v_interlock);
    616 		return;
    617 	}
    618 	vdrain_retry = true;
    619 	mutex_exit(&vdrain_lock);
    620 
    621 	if (vcache_vget(vp) == 0) {
    622 		if (!vrecycle(vp)) {
    623 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    624 			mutex_enter(vp->v_interlock);
    625 			vrelel(vp, 0, LK_EXCLUSIVE);
    626 		}
    627 	}
    628 	fstrans_done(mp);
    629 
    630 	mutex_enter(&vdrain_lock);
    631 }
    632 
    633 /*
    634  * Release a cached vnode.  Used from vdrain_thread only.
    635  */
    636 static __inline void
    637 vdrain_vrele(vnode_t *vp)
    638 {
    639 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    640 	struct mount *mp;
    641 
    642 	KASSERT(mutex_owned(&vdrain_lock));
    643 
    644 	mp = vp->v_mount;
    645 	if (fstrans_start_nowait(mp) != 0)
    646 		return;
    647 
    648 	/*
    649 	 * First remove the vnode from the vrele list.
    650 	 * Put it on the last lru list, the last vrele()
    651 	 * will put it back onto the right list before
    652 	 * its usecount reaches zero.
    653 	 */
    654 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    655 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    656 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    657 	vip->vi_lrulisttm = getticks();
    658 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    659 
    660 	vdrain_retry = true;
    661 	mutex_exit(&vdrain_lock);
    662 
    663 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    664 	mutex_enter(vp->v_interlock);
    665 	vrelel(vp, 0, LK_EXCLUSIVE);
    666 	fstrans_done(mp);
    667 
    668 	mutex_enter(&vdrain_lock);
    669 }
    670 
    671 /*
    672  * Helper thread to keep the number of vnodes below desiredvnodes
    673  * and release vnodes from asynchronous vrele.
    674  */
    675 static void
    676 vdrain_thread(void *cookie)
    677 {
    678 	int i;
    679 	u_int target;
    680 	vnode_impl_t *vip, *marker;
    681 
    682 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    683 
    684 	mutex_enter(&vdrain_lock);
    685 
    686 	for (;;) {
    687 		vdrain_retry = false;
    688 		target = desiredvnodes - desiredvnodes/10;
    689 
    690 		for (i = 0; i < LRU_COUNT; i++) {
    691 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
    692 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    693 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    694 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
    695 				    vi_lrulist);
    696 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
    697 					continue;
    698 				if (i == LRU_VRELE)
    699 					vdrain_vrele(VIMPL_TO_VNODE(vip));
    700 				else if (numvnodes < target)
    701 					break;
    702 				else
    703 					vdrain_remove(VIMPL_TO_VNODE(vip));
    704 			}
    705 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    706 		}
    707 
    708 		if (vdrain_retry) {
    709 			kpause("vdrainrt", false, 1, &vdrain_lock);
    710 		} else {
    711 			vdrain_gen++;
    712 			cv_broadcast(&vdrain_gen_cv);
    713 			cv_wait(&vdrain_cv, &vdrain_lock);
    714 		}
    715 	}
    716 }
    717 
    718 /*
    719  * Try to drop reference on a vnode.  Abort if we are releasing the
    720  * last reference.  Note: this _must_ succeed if not the last reference.
    721  */
    722 static bool
    723 vtryrele(vnode_t *vp)
    724 {
    725 	u_int use, next;
    726 
    727 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
    728 		if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
    729 			return false;
    730 		}
    731 		KASSERT((use & VUSECOUNT_MASK) > 1);
    732 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    733 		if (__predict_true(next == use)) {
    734 			return true;
    735 		}
    736 	}
    737 }
    738 
    739 /*
    740  * vput: unlock and release the reference.
    741  */
    742 void
    743 vput(vnode_t *vp)
    744 {
    745 	int lktype;
    746 
    747 	/*
    748 	 * Do an unlocked check of the usecount.  If it looks like we're not
    749 	 * about to drop the last reference, then unlock the vnode and try
    750 	 * to drop the reference.  If it ends up being the last reference
    751 	 * after all, vrelel() can fix it all up.  Most of the time this
    752 	 * will all go to plan.
    753 	 */
    754 	if (vrefcnt(vp) > 1) {
    755 		VOP_UNLOCK(vp);
    756 		if (vtryrele(vp)) {
    757 			return;
    758 		}
    759 		lktype = LK_NONE;
    760 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
    761 		VOP_UNLOCK(vp);
    762 		lktype = LK_NONE;
    763 	} else {
    764 		lktype = VOP_ISLOCKED(vp);
    765 		KASSERT(lktype != LK_NONE);
    766 	}
    767 	mutex_enter(vp->v_interlock);
    768 	vrelel(vp, 0, lktype);
    769 }
    770 
    771 /*
    772  * Vnode release.  If reference count drops to zero, call inactive
    773  * routine and either return to freelist or free to the pool.
    774  */
    775 static void
    776 vrelel(vnode_t *vp, int flags, int lktype)
    777 {
    778 	const bool async = ((flags & VRELEL_ASYNC) != 0);
    779 	bool recycle, defer;
    780 	int error;
    781 
    782 	KASSERT(mutex_owned(vp->v_interlock));
    783 
    784 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    785 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    786 		vnpanic(vp, "dead but not clean");
    787 	}
    788 
    789 	/*
    790 	 * If not the last reference, just drop the reference count and
    791 	 * unlock.  VOP_UNLOCK() is called here without a vnode reference
    792 	 * held, but is ok as the hold of v_interlock will stop the vnode
    793 	 * from disappearing.
    794 	 */
    795 	if (vtryrele(vp)) {
    796 		if (lktype != LK_NONE) {
    797 			VOP_UNLOCK(vp);
    798 		}
    799 		mutex_exit(vp->v_interlock);
    800 		return;
    801 	}
    802 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
    803 		vnpanic(vp, "%s: bad ref count", __func__);
    804 	}
    805 
    806 #ifdef DIAGNOSTIC
    807 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    808 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    809 		vprint("vrelel: missing VOP_CLOSE()", vp);
    810 	}
    811 #endif
    812 
    813 	/*
    814 	 * If already clean there is no need to lock, defer or
    815 	 * deactivate this node.
    816 	 */
    817 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    818 		if (lktype != LK_NONE) {
    819 			mutex_exit(vp->v_interlock);
    820 			lktype = LK_NONE;
    821 			VOP_UNLOCK(vp);
    822 			mutex_enter(vp->v_interlock);
    823 		}
    824 		goto out;
    825 	}
    826 
    827 	/*
    828 	 * First try to get the vnode locked for VOP_INACTIVE().
    829 	 * Defer vnode release to vdrain_thread if caller requests
    830 	 * it explicitly, is the pagedaemon or the lock failed.
    831 	 */
    832 	defer = false;
    833 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
    834 		defer = true;
    835 	} else if (lktype == LK_SHARED) {
    836 		/* Excellent chance of getting, if the last ref. */
    837 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
    838 		if (error != 0) {
    839 			defer = true;
    840 		} else {
    841 			lktype = LK_EXCLUSIVE;
    842 		}
    843 	} else if (lktype == LK_NONE) {
    844 		/* Excellent chance of getting, if the last ref. */
    845 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
    846 		if (error != 0) {
    847 			defer = true;
    848 		} else {
    849 			lktype = LK_EXCLUSIVE;
    850 		}
    851 	}
    852 	KASSERT(mutex_owned(vp->v_interlock));
    853 	if (defer) {
    854 		/*
    855 		 * Defer reclaim to the kthread; it's not safe to
    856 		 * clean it here.  We donate it our last reference.
    857 		 */
    858 		if (lktype != LK_NONE) {
    859 			VOP_UNLOCK(vp);
    860 		}
    861 		lru_requeue(vp, &lru_list[LRU_VRELE]);
    862 		mutex_exit(vp->v_interlock);
    863 		return;
    864 	}
    865 	KASSERT(lktype == LK_EXCLUSIVE);
    866 
    867 	/*
    868 	 * Deactivate the vnode, but preserve our reference across
    869 	 * the call to VOP_INACTIVE().
    870 	 *
    871 	 * If VOP_INACTIVE() indicates that the file has been
    872 	 * deleted, then recycle the vnode.
    873 	 *
    874 	 * Note that VOP_INACTIVE() will not drop the vnode lock.
    875 	 */
    876 	mutex_exit(vp->v_interlock);
    877 	recycle = false;
    878 	VOP_INACTIVE(vp, &recycle);
    879 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    880 	mutex_enter(vp->v_interlock);
    881 
    882 	for (;;) {
    883 		/*
    884 		 * If no longer the last reference, try to shed it.
    885 		 * On success, drop the interlock last thereby
    886 		 * preventing the vnode being freed behind us.
    887 		 */
    888 		if (vtryrele(vp)) {
    889 			VOP_UNLOCK(vp);
    890 			rw_exit(vp->v_uobj.vmobjlock);
    891 			mutex_exit(vp->v_interlock);
    892 			return;
    893 		}
    894 		/*
    895 		 * Block new references then check again to see if a
    896 		 * new reference was acquired in the meantime.  If
    897 		 * it was, restore the vnode state and try again.
    898 		 */
    899 		if (recycle) {
    900 			VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
    901 			if (vrefcnt(vp) != 1) {
    902 				VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
    903 				continue;
    904 			}
    905 		}
    906 		break;
    907  		}
    908 
    909 	/* Take care of space accounting. */
    910 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
    911 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
    912 	}
    913 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    914 	vp->v_vflag &= ~VV_MAPPED;
    915 	rw_exit(vp->v_uobj.vmobjlock);
    916 
    917 	/*
    918 	 * Recycle the vnode if the file is now unused (unlinked),
    919 	 * otherwise just free it.
    920 	 */
    921 	if (recycle) {
    922 		VSTATE_ASSERT(vp, VS_BLOCKED);
    923 		/* vcache_reclaim drops the lock. */
    924 		vcache_reclaim(vp);
    925 	} else {
    926 		VOP_UNLOCK(vp);
    927 	}
    928 	KASSERT(vrefcnt(vp) > 0);
    929 
    930 out:
    931 	if ((atomic_dec_uint_nv(&vp->v_usecount) & VUSECOUNT_MASK) != 0) {
    932 		/* Gained another reference while being reclaimed. */
    933 		mutex_exit(vp->v_interlock);
    934 		return;
    935 	}
    936 
    937 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    938 		/*
    939 		 * It's clean so destroy it.  It isn't referenced
    940 		 * anywhere since it has been reclaimed.
    941 		 */
    942 		vcache_free(VNODE_TO_VIMPL(vp));
    943 	} else {
    944 		/*
    945 		 * Otherwise, put it back onto the freelist.  It
    946 		 * can't be destroyed while still associated with
    947 		 * a file system.
    948 		 */
    949 		lru_requeue(vp, lru_which(vp));
    950 		mutex_exit(vp->v_interlock);
    951 	}
    952 }
    953 
    954 void
    955 vrele(vnode_t *vp)
    956 {
    957 
    958 	if (vtryrele(vp)) {
    959 		return;
    960 	}
    961 	mutex_enter(vp->v_interlock);
    962 	vrelel(vp, 0, LK_NONE);
    963 }
    964 
    965 /*
    966  * Asynchronous vnode release, vnode is released in different context.
    967  */
    968 void
    969 vrele_async(vnode_t *vp)
    970 {
    971 
    972 	if (vtryrele(vp)) {
    973 		return;
    974 	}
    975 	mutex_enter(vp->v_interlock);
    976 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
    977 }
    978 
    979 /*
    980  * Vnode reference, where a reference is already held by some other
    981  * object (for example, a file structure).
    982  *
    983  * NB: lockless code sequences may rely on this not blocking.
    984  */
    985 void
    986 vref(vnode_t *vp)
    987 {
    988 
    989 	KASSERT(vrefcnt(vp) > 0);
    990 
    991 	atomic_inc_uint(&vp->v_usecount);
    992 }
    993 
    994 /*
    995  * Page or buffer structure gets a reference.
    996  * Called with v_interlock held.
    997  */
    998 void
    999 vholdl(vnode_t *vp)
   1000 {
   1001 
   1002 	KASSERT(mutex_owned(vp->v_interlock));
   1003 
   1004 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
   1005 		lru_requeue(vp, lru_which(vp));
   1006 }
   1007 
   1008 /*
   1009  * Page or buffer structure gets a reference.
   1010  */
   1011 void
   1012 vhold(vnode_t *vp)
   1013 {
   1014 
   1015 	mutex_enter(vp->v_interlock);
   1016 	vholdl(vp);
   1017 	mutex_exit(vp->v_interlock);
   1018 }
   1019 
   1020 /*
   1021  * Page or buffer structure frees a reference.
   1022  * Called with v_interlock held.
   1023  */
   1024 void
   1025 holdrelel(vnode_t *vp)
   1026 {
   1027 
   1028 	KASSERT(mutex_owned(vp->v_interlock));
   1029 
   1030 	if (vp->v_holdcnt <= 0) {
   1031 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
   1032 	}
   1033 
   1034 	vp->v_holdcnt--;
   1035 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
   1036 		lru_requeue(vp, lru_which(vp));
   1037 }
   1038 
   1039 /*
   1040  * Page or buffer structure frees a reference.
   1041  */
   1042 void
   1043 holdrele(vnode_t *vp)
   1044 {
   1045 
   1046 	mutex_enter(vp->v_interlock);
   1047 	holdrelel(vp);
   1048 	mutex_exit(vp->v_interlock);
   1049 }
   1050 
   1051 /*
   1052  * Recycle an unused vnode if caller holds the last reference.
   1053  */
   1054 bool
   1055 vrecycle(vnode_t *vp)
   1056 {
   1057 	int error __diagused;
   1058 
   1059 	mutex_enter(vp->v_interlock);
   1060 
   1061 	/* If the vnode is already clean we're done. */
   1062 	VSTATE_WAIT_STABLE(vp);
   1063 	if (VSTATE_GET(vp) != VS_LOADED) {
   1064 		VSTATE_ASSERT(vp, VS_RECLAIMED);
   1065 		vrelel(vp, 0, LK_NONE);
   1066 		return true;
   1067 	}
   1068 
   1069 	/* Prevent further references until the vnode is locked. */
   1070 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1071 
   1072 	/* Make sure we hold the last reference. */
   1073 	if (vrefcnt(vp) != 1) {
   1074 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1075 		mutex_exit(vp->v_interlock);
   1076 		return false;
   1077 	}
   1078 
   1079 	mutex_exit(vp->v_interlock);
   1080 
   1081 	/*
   1082 	 * On a leaf file system this lock will always succeed as we hold
   1083 	 * the last reference and prevent further references.
   1084 	 * On layered file systems waiting for the lock would open a can of
   1085 	 * deadlocks as the lower vnodes may have other active references.
   1086 	 */
   1087 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
   1088 
   1089 	mutex_enter(vp->v_interlock);
   1090 	if (error) {
   1091 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1092 		mutex_exit(vp->v_interlock);
   1093 		return false;
   1094 	}
   1095 
   1096 	KASSERT(vrefcnt(vp) == 1);
   1097 	vcache_reclaim(vp);
   1098 	vrelel(vp, 0, LK_NONE);
   1099 
   1100 	return true;
   1101 }
   1102 
   1103 /*
   1104  * Helper for vrevoke() to propagate suspension from lastmp
   1105  * to thismp.  Both args may be NULL.
   1106  * Returns the currently suspended file system or NULL.
   1107  */
   1108 static struct mount *
   1109 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
   1110 {
   1111 	int error;
   1112 
   1113 	if (lastmp == thismp)
   1114 		return thismp;
   1115 
   1116 	if (lastmp != NULL)
   1117 		vfs_resume(lastmp);
   1118 
   1119 	if (thismp == NULL)
   1120 		return NULL;
   1121 
   1122 	do {
   1123 		error = vfs_suspend(thismp, 0);
   1124 	} while (error == EINTR || error == ERESTART);
   1125 
   1126 	if (error == 0)
   1127 		return thismp;
   1128 
   1129 	KASSERT(error == EOPNOTSUPP || error == ENOENT);
   1130 	return NULL;
   1131 }
   1132 
   1133 /*
   1134  * Eliminate all activity associated with the requested vnode
   1135  * and with all vnodes aliased to the requested vnode.
   1136  */
   1137 void
   1138 vrevoke(vnode_t *vp)
   1139 {
   1140 	struct mount *mp;
   1141 	vnode_t *vq;
   1142 	enum vtype type;
   1143 	dev_t dev;
   1144 
   1145 	KASSERT(vrefcnt(vp) > 0);
   1146 
   1147 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
   1148 
   1149 	mutex_enter(vp->v_interlock);
   1150 	VSTATE_WAIT_STABLE(vp);
   1151 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1152 		mutex_exit(vp->v_interlock);
   1153 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1154 		atomic_inc_uint(&vp->v_usecount);
   1155 		mutex_exit(vp->v_interlock);
   1156 		vgone(vp);
   1157 	} else {
   1158 		dev = vp->v_rdev;
   1159 		type = vp->v_type;
   1160 		mutex_exit(vp->v_interlock);
   1161 
   1162 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
   1163 			mp = vrevoke_suspend_next(mp, vq->v_mount);
   1164 			vgone(vq);
   1165 		}
   1166 	}
   1167 	vrevoke_suspend_next(mp, NULL);
   1168 }
   1169 
   1170 /*
   1171  * Eliminate all activity associated with a vnode in preparation for
   1172  * reuse.  Drops a reference from the vnode.
   1173  */
   1174 void
   1175 vgone(vnode_t *vp)
   1176 {
   1177 	int lktype;
   1178 
   1179 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1180 
   1181 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1182 	lktype = LK_EXCLUSIVE;
   1183 	mutex_enter(vp->v_interlock);
   1184 	VSTATE_WAIT_STABLE(vp);
   1185 	if (VSTATE_GET(vp) == VS_LOADED) {
   1186 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1187 		vcache_reclaim(vp);
   1188 		lktype = LK_NONE;
   1189 	}
   1190 	VSTATE_ASSERT(vp, VS_RECLAIMED);
   1191 	vrelel(vp, 0, lktype);
   1192 }
   1193 
   1194 static inline uint32_t
   1195 vcache_hash(const struct vcache_key *key)
   1196 {
   1197 	uint32_t hash = HASH32_BUF_INIT;
   1198 
   1199 	KASSERT(key->vk_key_len > 0);
   1200 
   1201 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1202 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1203 	return hash;
   1204 }
   1205 
   1206 static int
   1207 vcache_stats(struct hashstat_sysctl *hs, bool fill)
   1208 {
   1209 	vnode_impl_t *vip;
   1210 	uint64_t chain;
   1211 
   1212 	strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
   1213 	strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
   1214 	if (!fill)
   1215 		return 0;
   1216 
   1217 	hs->hash_size = vcache_hashmask + 1;
   1218 
   1219 	for (size_t i = 0; i < hs->hash_size; i++) {
   1220 		chain = 0;
   1221 		mutex_enter(&vcache_lock);
   1222 		SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
   1223 			chain++;
   1224 		}
   1225 		mutex_exit(&vcache_lock);
   1226 		if (chain > 0) {
   1227 			hs->hash_used++;
   1228 			hs->hash_items += chain;
   1229 			if (chain > hs->hash_maxchain)
   1230 				hs->hash_maxchain = chain;
   1231 		}
   1232 		preempt_point();
   1233 	}
   1234 
   1235 	return 0;
   1236 }
   1237 
   1238 static void
   1239 vcache_init(void)
   1240 {
   1241 
   1242 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
   1243 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1244 	KASSERT(vcache_pool != NULL);
   1245 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
   1246 	cv_init(&vcache_cv, "vcache");
   1247 	vcache_hashsize = desiredvnodes;
   1248 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1249 	    &vcache_hashmask);
   1250 	hashstat_register("vcache", vcache_stats);
   1251 }
   1252 
   1253 static void
   1254 vcache_reinit(void)
   1255 {
   1256 	int i;
   1257 	uint32_t hash;
   1258 	u_long oldmask, newmask;
   1259 	struct hashhead *oldtab, *newtab;
   1260 	vnode_impl_t *vip;
   1261 
   1262 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1263 	mutex_enter(&vcache_lock);
   1264 	oldtab = vcache_hashtab;
   1265 	oldmask = vcache_hashmask;
   1266 	vcache_hashsize = desiredvnodes;
   1267 	vcache_hashtab = newtab;
   1268 	vcache_hashmask = newmask;
   1269 	for (i = 0; i <= oldmask; i++) {
   1270 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
   1271 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
   1272 			hash = vcache_hash(&vip->vi_key);
   1273 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
   1274 			    vip, vi_hash);
   1275 		}
   1276 	}
   1277 	mutex_exit(&vcache_lock);
   1278 	hashdone(oldtab, HASH_SLIST, oldmask);
   1279 }
   1280 
   1281 static inline vnode_impl_t *
   1282 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1283 {
   1284 	struct hashhead *hashp;
   1285 	vnode_impl_t *vip;
   1286 
   1287 	KASSERT(mutex_owned(&vcache_lock));
   1288 
   1289 	hashp = &vcache_hashtab[hash & vcache_hashmask];
   1290 	SLIST_FOREACH(vip, hashp, vi_hash) {
   1291 		if (key->vk_mount != vip->vi_key.vk_mount)
   1292 			continue;
   1293 		if (key->vk_key_len != vip->vi_key.vk_key_len)
   1294 			continue;
   1295 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
   1296 			continue;
   1297 		return vip;
   1298 	}
   1299 	return NULL;
   1300 }
   1301 
   1302 /*
   1303  * Allocate a new, uninitialized vcache node.
   1304  */
   1305 static vnode_impl_t *
   1306 vcache_alloc(void)
   1307 {
   1308 	vnode_impl_t *vip;
   1309 	vnode_t *vp;
   1310 
   1311 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
   1312 	vp = VIMPL_TO_VNODE(vip);
   1313 	memset(vip, 0, sizeof(*vip));
   1314 
   1315 	rw_init(&vip->vi_lock);
   1316 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
   1317 
   1318 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
   1319 	klist_init(&vp->v_klist);
   1320 	cv_init(&vp->v_cv, "vnode");
   1321 	cache_vnode_init(vp);
   1322 
   1323 	vp->v_usecount = 1;
   1324 	vp->v_type = VNON;
   1325 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1326 
   1327 	vip->vi_state = VS_LOADING;
   1328 
   1329 	lru_requeue(vp, &lru_list[LRU_FREE]);
   1330 
   1331 	return vip;
   1332 }
   1333 
   1334 /*
   1335  * Deallocate a vcache node in state VS_LOADING.
   1336  *
   1337  * vcache_lock held on entry and released on return.
   1338  */
   1339 static void
   1340 vcache_dealloc(vnode_impl_t *vip)
   1341 {
   1342 	vnode_t *vp;
   1343 
   1344 	KASSERT(mutex_owned(&vcache_lock));
   1345 
   1346 	vp = VIMPL_TO_VNODE(vip);
   1347 	vfs_ref(dead_rootmount);
   1348 	vfs_insmntque(vp, dead_rootmount);
   1349 	mutex_enter(vp->v_interlock);
   1350 	vp->v_op = dead_vnodeop_p;
   1351 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1352 	mutex_exit(&vcache_lock);
   1353 	vrelel(vp, 0, LK_NONE);
   1354 }
   1355 
   1356 /*
   1357  * Free an unused, unreferenced vcache node.
   1358  * v_interlock locked on entry.
   1359  */
   1360 static void
   1361 vcache_free(vnode_impl_t *vip)
   1362 {
   1363 	vnode_t *vp;
   1364 
   1365 	vp = VIMPL_TO_VNODE(vip);
   1366 	KASSERT(mutex_owned(vp->v_interlock));
   1367 
   1368 	KASSERT(vrefcnt(vp) == 0);
   1369 	KASSERT(vp->v_holdcnt == 0);
   1370 	KASSERT(vp->v_writecount == 0);
   1371 	lru_requeue(vp, NULL);
   1372 	mutex_exit(vp->v_interlock);
   1373 
   1374 	vfs_insmntque(vp, NULL);
   1375 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1376 		spec_node_destroy(vp);
   1377 
   1378 	mutex_obj_free(vp->v_interlock);
   1379 	rw_destroy(&vip->vi_lock);
   1380 	uvm_obj_destroy(&vp->v_uobj, true);
   1381 	klist_fini(&vp->v_klist);
   1382 	cv_destroy(&vp->v_cv);
   1383 	cache_vnode_fini(vp);
   1384 	pool_cache_put(vcache_pool, vip);
   1385 }
   1386 
   1387 /*
   1388  * Try to get an initial reference on this cached vnode.
   1389  * Returns zero on success or EBUSY if the vnode state is not LOADED.
   1390  *
   1391  * NB: lockless code sequences may rely on this not blocking.
   1392  */
   1393 int
   1394 vcache_tryvget(vnode_t *vp)
   1395 {
   1396 	u_int use, next;
   1397 
   1398 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
   1399 		if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
   1400 			return EBUSY;
   1401 		}
   1402 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
   1403 		if (__predict_true(next == use)) {
   1404 			return 0;
   1405 		}
   1406 	}
   1407 }
   1408 
   1409 /*
   1410  * Try to get an initial reference on this cached vnode.
   1411  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1412  * Will wait for the vnode state to be stable.
   1413  *
   1414  * v_interlock locked on entry and unlocked on exit.
   1415  */
   1416 int
   1417 vcache_vget(vnode_t *vp)
   1418 {
   1419 
   1420 	KASSERT(mutex_owned(vp->v_interlock));
   1421 
   1422 	/* Increment hold count to prevent vnode from disappearing. */
   1423 	vp->v_holdcnt++;
   1424 	VSTATE_WAIT_STABLE(vp);
   1425 	vp->v_holdcnt--;
   1426 
   1427 	/* If this was the last reference to a reclaimed vnode free it now. */
   1428 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1429 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
   1430 			vcache_free(VNODE_TO_VIMPL(vp));
   1431 		else
   1432 			mutex_exit(vp->v_interlock);
   1433 		return ENOENT;
   1434 	}
   1435 	VSTATE_ASSERT(vp, VS_LOADED);
   1436 	atomic_inc_uint(&vp->v_usecount);
   1437 	mutex_exit(vp->v_interlock);
   1438 
   1439 	return 0;
   1440 }
   1441 
   1442 /*
   1443  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1444  */
   1445 int
   1446 vcache_get(struct mount *mp, const void *key, size_t key_len,
   1447     struct vnode **vpp)
   1448 {
   1449 	int error;
   1450 	uint32_t hash;
   1451 	const void *new_key;
   1452 	struct vnode *vp;
   1453 	struct vcache_key vcache_key;
   1454 	vnode_impl_t *vip, *new_vip;
   1455 
   1456 	new_key = NULL;
   1457 	*vpp = NULL;
   1458 
   1459 	vcache_key.vk_mount = mp;
   1460 	vcache_key.vk_key = key;
   1461 	vcache_key.vk_key_len = key_len;
   1462 	hash = vcache_hash(&vcache_key);
   1463 
   1464 again:
   1465 	mutex_enter(&vcache_lock);
   1466 	vip = vcache_hash_lookup(&vcache_key, hash);
   1467 
   1468 	/* If found, take a reference or retry. */
   1469 	if (__predict_true(vip != NULL)) {
   1470 		/*
   1471 		 * If the vnode is loading we cannot take the v_interlock
   1472 		 * here as it might change during load (see uvm_obj_setlock()).
   1473 		 * As changing state from VS_LOADING requires both vcache_lock
   1474 		 * and v_interlock it is safe to test with vcache_lock held.
   1475 		 *
   1476 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1477 		 */
   1478 		if (__predict_false(vip->vi_state == VS_LOADING)) {
   1479 			cv_wait(&vcache_cv, &vcache_lock);
   1480 			mutex_exit(&vcache_lock);
   1481 			goto again;
   1482 		}
   1483 		vp = VIMPL_TO_VNODE(vip);
   1484 		mutex_enter(vp->v_interlock);
   1485 		mutex_exit(&vcache_lock);
   1486 		error = vcache_vget(vp);
   1487 		if (error == ENOENT)
   1488 			goto again;
   1489 		if (error == 0)
   1490 			*vpp = vp;
   1491 		KASSERT((error != 0) == (*vpp == NULL));
   1492 		return error;
   1493 	}
   1494 	mutex_exit(&vcache_lock);
   1495 
   1496 	/* Allocate and initialize a new vcache / vnode pair. */
   1497 	error = vfs_busy(mp);
   1498 	if (error)
   1499 		return error;
   1500 	new_vip = vcache_alloc();
   1501 	new_vip->vi_key = vcache_key;
   1502 	vp = VIMPL_TO_VNODE(new_vip);
   1503 	mutex_enter(&vcache_lock);
   1504 	vip = vcache_hash_lookup(&vcache_key, hash);
   1505 	if (vip == NULL) {
   1506 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1507 		    new_vip, vi_hash);
   1508 		vip = new_vip;
   1509 	}
   1510 
   1511 	/* If another thread beat us inserting this node, retry. */
   1512 	if (vip != new_vip) {
   1513 		vcache_dealloc(new_vip);
   1514 		vfs_unbusy(mp);
   1515 		goto again;
   1516 	}
   1517 	mutex_exit(&vcache_lock);
   1518 
   1519 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1520 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1521 	if (error) {
   1522 		mutex_enter(&vcache_lock);
   1523 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1524 		    new_vip, vnode_impl, vi_hash);
   1525 		vcache_dealloc(new_vip);
   1526 		vfs_unbusy(mp);
   1527 		KASSERT(*vpp == NULL);
   1528 		return error;
   1529 	}
   1530 	KASSERT(new_key != NULL);
   1531 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1532 	KASSERT(vp->v_op != NULL);
   1533 	vfs_insmntque(vp, mp);
   1534 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1535 		vp->v_vflag |= VV_MPSAFE;
   1536 	vfs_ref(mp);
   1537 	vfs_unbusy(mp);
   1538 
   1539 	/* Finished loading, finalize node. */
   1540 	mutex_enter(&vcache_lock);
   1541 	new_vip->vi_key.vk_key = new_key;
   1542 	mutex_enter(vp->v_interlock);
   1543 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1544 	mutex_exit(vp->v_interlock);
   1545 	mutex_exit(&vcache_lock);
   1546 	*vpp = vp;
   1547 	return 0;
   1548 }
   1549 
   1550 /*
   1551  * Create a new vnode / fs node pair and return it referenced through vpp.
   1552  */
   1553 int
   1554 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1555     kauth_cred_t cred, void *extra, struct vnode **vpp)
   1556 {
   1557 	int error;
   1558 	uint32_t hash;
   1559 	struct vnode *vp, *ovp;
   1560 	vnode_impl_t *vip, *ovip;
   1561 
   1562 	*vpp = NULL;
   1563 
   1564 	/* Allocate and initialize a new vcache / vnode pair. */
   1565 	error = vfs_busy(mp);
   1566 	if (error)
   1567 		return error;
   1568 	vip = vcache_alloc();
   1569 	vip->vi_key.vk_mount = mp;
   1570 	vp = VIMPL_TO_VNODE(vip);
   1571 
   1572 	/* Create and load the fs node. */
   1573 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
   1574 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
   1575 	if (error) {
   1576 		mutex_enter(&vcache_lock);
   1577 		vcache_dealloc(vip);
   1578 		vfs_unbusy(mp);
   1579 		KASSERT(*vpp == NULL);
   1580 		return error;
   1581 	}
   1582 	KASSERT(vp->v_op != NULL);
   1583 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
   1584 	if (vip->vi_key.vk_key_len > 0) {
   1585 		KASSERT(vip->vi_key.vk_key != NULL);
   1586 		hash = vcache_hash(&vip->vi_key);
   1587 
   1588 		/*
   1589 		 * Wait for previous instance to be reclaimed,
   1590 		 * then insert new node.
   1591 		 */
   1592 		mutex_enter(&vcache_lock);
   1593 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
   1594 			ovp = VIMPL_TO_VNODE(ovip);
   1595 			mutex_enter(ovp->v_interlock);
   1596 			mutex_exit(&vcache_lock);
   1597 			error = vcache_vget(ovp);
   1598 			KASSERT(error == ENOENT);
   1599 			mutex_enter(&vcache_lock);
   1600 		}
   1601 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1602 		    vip, vi_hash);
   1603 		mutex_exit(&vcache_lock);
   1604 	}
   1605 	vfs_insmntque(vp, mp);
   1606 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1607 		vp->v_vflag |= VV_MPSAFE;
   1608 	vfs_ref(mp);
   1609 	vfs_unbusy(mp);
   1610 
   1611 	/* Finished loading, finalize node. */
   1612 	mutex_enter(&vcache_lock);
   1613 	mutex_enter(vp->v_interlock);
   1614 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1615 	mutex_exit(&vcache_lock);
   1616 	mutex_exit(vp->v_interlock);
   1617 	*vpp = vp;
   1618 	return 0;
   1619 }
   1620 
   1621 /*
   1622  * Prepare key change: update old cache nodes key and lock new cache node.
   1623  * Return an error if the new node already exists.
   1624  */
   1625 int
   1626 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1627     const void *old_key, size_t old_key_len,
   1628     const void *new_key, size_t new_key_len)
   1629 {
   1630 	uint32_t old_hash, new_hash;
   1631 	struct vcache_key old_vcache_key, new_vcache_key;
   1632 	vnode_impl_t *vip, *new_vip;
   1633 
   1634 	old_vcache_key.vk_mount = mp;
   1635 	old_vcache_key.vk_key = old_key;
   1636 	old_vcache_key.vk_key_len = old_key_len;
   1637 	old_hash = vcache_hash(&old_vcache_key);
   1638 
   1639 	new_vcache_key.vk_mount = mp;
   1640 	new_vcache_key.vk_key = new_key;
   1641 	new_vcache_key.vk_key_len = new_key_len;
   1642 	new_hash = vcache_hash(&new_vcache_key);
   1643 
   1644 	new_vip = vcache_alloc();
   1645 	new_vip->vi_key = new_vcache_key;
   1646 
   1647 	/* Insert locked new node used as placeholder. */
   1648 	mutex_enter(&vcache_lock);
   1649 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1650 	if (vip != NULL) {
   1651 		vcache_dealloc(new_vip);
   1652 		return EEXIST;
   1653 	}
   1654 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1655 	    new_vip, vi_hash);
   1656 
   1657 	/* Replace old nodes key with the temporary copy. */
   1658 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1659 	KASSERT(vip != NULL);
   1660 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1661 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
   1662 	vip->vi_key = old_vcache_key;
   1663 	mutex_exit(&vcache_lock);
   1664 	return 0;
   1665 }
   1666 
   1667 /*
   1668  * Key change complete: update old node and remove placeholder.
   1669  */
   1670 void
   1671 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1672     const void *old_key, size_t old_key_len,
   1673     const void *new_key, size_t new_key_len)
   1674 {
   1675 	uint32_t old_hash, new_hash;
   1676 	struct vcache_key old_vcache_key, new_vcache_key;
   1677 	vnode_impl_t *vip, *new_vip;
   1678 	struct vnode *new_vp;
   1679 
   1680 	old_vcache_key.vk_mount = mp;
   1681 	old_vcache_key.vk_key = old_key;
   1682 	old_vcache_key.vk_key_len = old_key_len;
   1683 	old_hash = vcache_hash(&old_vcache_key);
   1684 
   1685 	new_vcache_key.vk_mount = mp;
   1686 	new_vcache_key.vk_key = new_key;
   1687 	new_vcache_key.vk_key_len = new_key_len;
   1688 	new_hash = vcache_hash(&new_vcache_key);
   1689 
   1690 	mutex_enter(&vcache_lock);
   1691 
   1692 	/* Lookup old and new node. */
   1693 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1694 	KASSERT(vip != NULL);
   1695 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1696 
   1697 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1698 	KASSERT(new_vip != NULL);
   1699 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
   1700 	new_vp = VIMPL_TO_VNODE(new_vip);
   1701 	mutex_enter(new_vp->v_interlock);
   1702 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
   1703 	mutex_exit(new_vp->v_interlock);
   1704 
   1705 	/* Rekey old node and put it onto its new hashlist. */
   1706 	vip->vi_key = new_vcache_key;
   1707 	if (old_hash != new_hash) {
   1708 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
   1709 		    vip, vnode_impl, vi_hash);
   1710 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1711 		    vip, vi_hash);
   1712 	}
   1713 
   1714 	/* Remove new node used as placeholder. */
   1715 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
   1716 	    new_vip, vnode_impl, vi_hash);
   1717 	vcache_dealloc(new_vip);
   1718 }
   1719 
   1720 /*
   1721  * Disassociate the underlying file system from a vnode.
   1722  *
   1723  * Must be called with vnode locked and will return unlocked.
   1724  * Must be called with the interlock held, and will return with it held.
   1725  */
   1726 static void
   1727 vcache_reclaim(vnode_t *vp)
   1728 {
   1729 	lwp_t *l = curlwp;
   1730 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1731 	struct mount *mp = vp->v_mount;
   1732 	uint32_t hash;
   1733 	uint8_t temp_buf[64], *temp_key;
   1734 	size_t temp_key_len;
   1735 	bool recycle, active;
   1736 	int error;
   1737 
   1738 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1739 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1740 	KASSERT(mutex_owned(vp->v_interlock));
   1741 	KASSERT(vrefcnt(vp) != 0);
   1742 
   1743 	active = (vrefcnt(vp) > 1);
   1744 	temp_key_len = vip->vi_key.vk_key_len;
   1745 	/*
   1746 	 * Prevent the vnode from being recycled or brought into use
   1747 	 * while we clean it out.
   1748 	 */
   1749 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
   1750 	mutex_exit(vp->v_interlock);
   1751 
   1752 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1753 	mutex_enter(vp->v_interlock);
   1754 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
   1755 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
   1756 	}
   1757 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1758 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
   1759 	mutex_exit(vp->v_interlock);
   1760 	rw_exit(vp->v_uobj.vmobjlock);
   1761 
   1762 	/*
   1763 	 * With vnode state set to reclaiming, purge name cache immediately
   1764 	 * to prevent new handles on vnode, and wait for existing threads
   1765 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
   1766 	 */
   1767 	cache_purge(vp);
   1768 
   1769 	/* Replace the vnode key with a temporary copy. */
   1770 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
   1771 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1772 	} else {
   1773 		temp_key = temp_buf;
   1774 	}
   1775 	if (vip->vi_key.vk_key_len > 0) {
   1776 		mutex_enter(&vcache_lock);
   1777 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
   1778 		vip->vi_key.vk_key = temp_key;
   1779 		mutex_exit(&vcache_lock);
   1780 	}
   1781 
   1782 	fstrans_start(mp);
   1783 
   1784 	/*
   1785 	 * Clean out any cached data associated with the vnode.
   1786 	 * If purging an active vnode, it must be closed and
   1787 	 * deactivated before being reclaimed.
   1788 	 */
   1789 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1790 	if (error != 0) {
   1791 		if (wapbl_vphaswapbl(vp))
   1792 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1793 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1794 	}
   1795 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1796 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1797 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1798 		 spec_node_revoke(vp);
   1799 	}
   1800 
   1801 	/*
   1802 	 * Disassociate the underlying file system from the vnode.
   1803 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1804 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1805 	 * would no longer function.
   1806 	 */
   1807 	VOP_INACTIVE(vp, &recycle);
   1808 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1809 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1810 	if (VOP_RECLAIM(vp)) {
   1811 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1812 	}
   1813 
   1814 	KASSERT(vp->v_data == NULL);
   1815 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
   1816 
   1817 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1818 		uvm_ra_freectx(vp->v_ractx);
   1819 		vp->v_ractx = NULL;
   1820 	}
   1821 
   1822 	if (vip->vi_key.vk_key_len > 0) {
   1823 	/* Remove from vnode cache. */
   1824 		hash = vcache_hash(&vip->vi_key);
   1825 		mutex_enter(&vcache_lock);
   1826 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1827 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1828 		    vip, vnode_impl, vi_hash);
   1829 		mutex_exit(&vcache_lock);
   1830 	}
   1831 	if (temp_key != temp_buf)
   1832 		kmem_free(temp_key, temp_key_len);
   1833 
   1834 	/* Done with purge, notify sleepers of the grim news. */
   1835 	mutex_enter(vp->v_interlock);
   1836 	vp->v_op = dead_vnodeop_p;
   1837 	vp->v_vflag |= VV_LOCKSWORK;
   1838 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1839 	vp->v_tag = VT_NON;
   1840 	/*
   1841 	 * Don't check for interest in NOTE_REVOKE; it's always posted
   1842 	 * because it sets EV_EOF.
   1843 	 */
   1844 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1845 	mutex_exit(vp->v_interlock);
   1846 
   1847 	/*
   1848 	 * Move to dead mount.  Must be after changing the operations
   1849 	 * vector as vnode operations enter the mount before using the
   1850 	 * operations vector.  See sys/kern/vnode_if.c.
   1851 	 */
   1852 	vp->v_vflag &= ~VV_ROOT;
   1853 	vfs_ref(dead_rootmount);
   1854 	vfs_insmntque(vp, dead_rootmount);
   1855 
   1856 #ifdef PAX_SEGVGUARD
   1857 	pax_segvguard_cleanup(vp);
   1858 #endif /* PAX_SEGVGUARD */
   1859 
   1860 	mutex_enter(vp->v_interlock);
   1861 	fstrans_done(mp);
   1862 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1863 }
   1864 
   1865 /*
   1866  * Disassociate the underlying file system from an open device vnode
   1867  * and make it anonymous.
   1868  *
   1869  * Vnode unlocked on entry, drops a reference to the vnode.
   1870  */
   1871 void
   1872 vcache_make_anon(vnode_t *vp)
   1873 {
   1874 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1875 	uint32_t hash;
   1876 	bool recycle;
   1877 
   1878 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
   1879 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1880 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
   1881 
   1882 	/* Remove from vnode cache. */
   1883 	hash = vcache_hash(&vip->vi_key);
   1884 	mutex_enter(&vcache_lock);
   1885 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1886 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1887 	    vip, vnode_impl, vi_hash);
   1888 	vip->vi_key.vk_mount = dead_rootmount;
   1889 	vip->vi_key.vk_key_len = 0;
   1890 	vip->vi_key.vk_key = NULL;
   1891 	mutex_exit(&vcache_lock);
   1892 
   1893 	/*
   1894 	 * Disassociate the underlying file system from the vnode.
   1895 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1896 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1897 	 * would no longer function.
   1898 	 */
   1899 	if (vn_lock(vp, LK_EXCLUSIVE)) {
   1900 		vnpanic(vp, "%s: cannot lock", __func__);
   1901 	}
   1902 	VOP_INACTIVE(vp, &recycle);
   1903 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1904 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1905 	if (VOP_RECLAIM(vp)) {
   1906 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1907 	}
   1908 
   1909 	/* Purge name cache. */
   1910 	cache_purge(vp);
   1911 
   1912 	/* Done with purge, change operations vector. */
   1913 	mutex_enter(vp->v_interlock);
   1914 	vp->v_op = spec_vnodeop_p;
   1915 	vp->v_vflag |= VV_MPSAFE;
   1916 	vp->v_vflag &= ~VV_LOCKSWORK;
   1917 	mutex_exit(vp->v_interlock);
   1918 
   1919 	/*
   1920 	 * Move to dead mount.  Must be after changing the operations
   1921 	 * vector as vnode operations enter the mount before using the
   1922 	 * operations vector.  See sys/kern/vnode_if.c.
   1923 	 */
   1924 	vfs_ref(dead_rootmount);
   1925 	vfs_insmntque(vp, dead_rootmount);
   1926 
   1927 	vrele(vp);
   1928 }
   1929 
   1930 /*
   1931  * Update outstanding I/O count and do wakeup if requested.
   1932  */
   1933 void
   1934 vwakeup(struct buf *bp)
   1935 {
   1936 	vnode_t *vp;
   1937 
   1938 	if ((vp = bp->b_vp) == NULL)
   1939 		return;
   1940 
   1941 	KASSERT(bp->b_objlock == vp->v_interlock);
   1942 	KASSERT(mutex_owned(bp->b_objlock));
   1943 
   1944 	if (--vp->v_numoutput < 0)
   1945 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1946 	if (vp->v_numoutput == 0)
   1947 		cv_broadcast(&vp->v_cv);
   1948 }
   1949 
   1950 /*
   1951  * Test a vnode for being or becoming dead.  Returns one of:
   1952  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1953  * ENOENT: vnode is dead.
   1954  * 0:      otherwise.
   1955  *
   1956  * Whenever this function returns a non-zero value all future
   1957  * calls will also return a non-zero value.
   1958  */
   1959 int
   1960 vdead_check(struct vnode *vp, int flags)
   1961 {
   1962 
   1963 	KASSERT(mutex_owned(vp->v_interlock));
   1964 
   1965 	if (! ISSET(flags, VDEAD_NOWAIT))
   1966 		VSTATE_WAIT_STABLE(vp);
   1967 
   1968 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1969 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1970 		return EBUSY;
   1971 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1972 		return ENOENT;
   1973 	}
   1974 
   1975 	return 0;
   1976 }
   1977 
   1978 int
   1979 vfs_drainvnodes(void)
   1980 {
   1981 	int i, gen;
   1982 
   1983 	mutex_enter(&vdrain_lock);
   1984 	for (i = 0; i < 2; i++) {
   1985 		gen = vdrain_gen;
   1986 		while (gen == vdrain_gen) {
   1987 			cv_broadcast(&vdrain_cv);
   1988 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1989 		}
   1990 	}
   1991 	mutex_exit(&vdrain_lock);
   1992 
   1993 	if (numvnodes >= desiredvnodes)
   1994 		return EBUSY;
   1995 
   1996 	if (vcache_hashsize != desiredvnodes)
   1997 		vcache_reinit();
   1998 
   1999 	return 0;
   2000 }
   2001 
   2002 void
   2003 vnpanic(vnode_t *vp, const char *fmt, ...)
   2004 {
   2005 	va_list ap;
   2006 
   2007 #ifdef DIAGNOSTIC
   2008 	vprint(NULL, vp);
   2009 #endif
   2010 	va_start(ap, fmt);
   2011 	vpanic(fmt, ap);
   2012 	va_end(ap);
   2013 }
   2014 
   2015 void
   2016 vshareilock(vnode_t *tvp, vnode_t *fvp)
   2017 {
   2018 	kmutex_t *oldlock;
   2019 
   2020 	oldlock = tvp->v_interlock;
   2021 	mutex_obj_hold(fvp->v_interlock);
   2022 	tvp->v_interlock = fvp->v_interlock;
   2023 	mutex_obj_free(oldlock);
   2024 }
   2025