Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.53.2.3
      1 /*	$NetBSD: vfs_vnode.c,v 1.53.2.3 2017/03/20 06:57:48 pgoyette Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * The vnode cache subsystem.
     71  *
     72  * Life-cycle
     73  *
     74  *	Normally, there are two points where new vnodes are created:
     75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76  *	starts in one of the following ways:
     77  *
     78  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80  *
     81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  *	was another, traditional way.  Currently, only the draining thread
     83  *	recycles the vnodes.  This behaviour might be revisited.
     84  *
     85  *	The life-cycle ends when the last reference is dropped, usually
     86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87  *	the file system that vnode is inactive.  Via this call, file system
     88  *	indicates whether vnode can be recycled (usually, it checks its own
     89  *	references, e.g. count of links, whether the file was removed).
     90  *
     91  *	Depending on indication, vnode can be put into a free list (cache),
     92  *	or cleaned via vclean(9), which calls VOP_RECLAIM(9) to disassociate
     93  *	underlying file system from the vnode, and finally destroyed.
     94  *
     95  * Vnode state
     96  *
     97  *	Vnode is always in one of six states:
     98  *	- MARKER	This is a marker vnode to help list traversal.  It
     99  *			will never change its state.
    100  *	- LOADING	Vnode is associating underlying file system and not
    101  *			yet ready to use.
    102  *	- ACTIVE	Vnode has associated underlying file system and is
    103  *			ready to use.
    104  *	- BLOCKED	Vnode is active but cannot get new references.
    105  *	- RECLAIMING	Vnode is disassociating from the underlying file
    106  *			system.
    107  *	- RECLAIMED	Vnode has disassociated from underlying file system
    108  *			and is dead.
    109  *
    110  *	Valid state changes are:
    111  *	LOADING -> ACTIVE
    112  *			Vnode has been initialised in vcache_get() or
    113  *			vcache_new() and is ready to use.
    114  *	ACTIVE -> RECLAIMING
    115  *			Vnode starts disassociation from underlying file
    116  *			system in vclean().
    117  *	RECLAIMING -> RECLAIMED
    118  *			Vnode finished disassociation from underlying file
    119  *			system in vclean().
    120  *	ACTIVE -> BLOCKED
    121  *			Either vcache_rekey*() is changing the vnode key or
    122  *			vrelel() is about to call VOP_INACTIVE().
    123  *	BLOCKED -> ACTIVE
    124  *			The block condition is over.
    125  *	LOADING -> RECLAIMED
    126  *			Either vcache_get() or vcache_new() failed to
    127  *			associate the underlying file system or vcache_rekey*()
    128  *			drops a vnode used as placeholder.
    129  *
    130  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    131  *	and it is possible to wait for state change.
    132  *
    133  *	State is protected with v_interlock with one exception:
    134  *	to change from LOADING both v_interlock and vcache_lock must be held
    135  *	so it is possible to check "state == LOADING" without holding
    136  *	v_interlock.  See vcache_get() for details.
    137  *
    138  * Reference counting
    139  *
    140  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    141  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    142  *	as vput(9), routines.  Common points holding references are e.g.
    143  *	file openings, current working directory, mount points, etc.
    144  *
    145  * Note on v_usecount and its locking
    146  *
    147  *	At nearly all points it is known that v_usecount could be zero,
    148  *	the vnode_t::v_interlock will be held.  To change v_usecount away
    149  *	from zero, the interlock must be held.  To change from a non-zero
    150  *	value to zero, again the interlock must be held.
    151  *
    152  *	Changing the usecount from a non-zero value to a non-zero value can
    153  *	safely be done using atomic operations, without the interlock held.
    154  *
    155  */
    156 
    157 #include <sys/cdefs.h>
    158 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.53.2.3 2017/03/20 06:57:48 pgoyette Exp $");
    159 
    160 #include <sys/param.h>
    161 #include <sys/kernel.h>
    162 
    163 #include <sys/atomic.h>
    164 #include <sys/buf.h>
    165 #include <sys/conf.h>
    166 #include <sys/device.h>
    167 #include <sys/hash.h>
    168 #include <sys/kauth.h>
    169 #include <sys/kmem.h>
    170 #include <sys/kthread.h>
    171 #include <sys/module.h>
    172 #include <sys/mount.h>
    173 #include <sys/namei.h>
    174 #include <sys/syscallargs.h>
    175 #include <sys/sysctl.h>
    176 #include <sys/systm.h>
    177 #include <sys/vnode_impl.h>
    178 #include <sys/wapbl.h>
    179 #include <sys/fstrans.h>
    180 
    181 #include <uvm/uvm.h>
    182 #include <uvm/uvm_readahead.h>
    183 
    184 /* Flags to vrelel. */
    185 #define	VRELEL_ASYNC_RELE	0x0001	/* Always defer to vrele thread. */
    186 
    187 u_int			numvnodes		__cacheline_aligned;
    188 
    189 /*
    190  * There are three lru lists: one holds vnodes waiting for async release,
    191  * one is for vnodes which have no buffer/page references and
    192  * one for those which do (i.e. v_holdcnt is non-zero).
    193  */
    194 static vnodelst_t	lru_vrele_list		__cacheline_aligned;
    195 static vnodelst_t	lru_free_list		__cacheline_aligned;
    196 static vnodelst_t	lru_hold_list		__cacheline_aligned;
    197 static kmutex_t		vdrain_lock		__cacheline_aligned;
    198 static kcondvar_t	vdrain_cv		__cacheline_aligned;
    199 static int		vdrain_gen;
    200 static kcondvar_t	vdrain_gen_cv;
    201 static bool		vdrain_retry;
    202 static lwp_t *		vdrain_lwp;
    203 SLIST_HEAD(hashhead, vnode_impl);
    204 static kmutex_t		vcache_lock		__cacheline_aligned;
    205 static kcondvar_t	vcache_cv		__cacheline_aligned;
    206 static u_int		vcache_hashsize;
    207 static u_long		vcache_hashmask;
    208 static struct hashhead	*vcache_hashtab		__cacheline_aligned;
    209 static pool_cache_t	vcache_pool;
    210 static void		lru_requeue(vnode_t *, vnodelst_t *);
    211 static vnodelst_t *	lru_which(vnode_t *);
    212 static vnode_impl_t *	vcache_alloc(void);
    213 static void		vcache_free(vnode_impl_t *);
    214 static void		vcache_init(void);
    215 static void		vcache_reinit(void);
    216 static void		vclean(vnode_t *);
    217 static void		vrelel(vnode_t *, int);
    218 static void		vdrain_thread(void *);
    219 static void		vnpanic(vnode_t *, const char *, ...)
    220     __printflike(2, 3);
    221 
    222 /* Routines having to do with the management of the vnode table. */
    223 extern struct mount	*dead_rootmount;
    224 extern int		(**dead_vnodeop_p)(void *);
    225 extern struct vfsops	dead_vfsops;
    226 
    227 /* Vnode state operations and diagnostics. */
    228 
    229 #if defined(DIAGNOSTIC)
    230 
    231 #define VSTATE_GET(vp) \
    232 	vstate_assert_get((vp), __func__, __LINE__)
    233 #define VSTATE_CHANGE(vp, from, to) \
    234 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    235 #define VSTATE_WAIT_STABLE(vp) \
    236 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    237 #define VSTATE_ASSERT(vp, state) \
    238 	vstate_assert((vp), (state), __func__, __LINE__)
    239 
    240 static void
    241 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
    242 {
    243 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    244 
    245 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    246 
    247 	if (__predict_true(vip->vi_state == state))
    248 		return;
    249 	vnpanic(vp, "state is %s, expected %s at %s:%d",
    250 	    vstate_name(vip->vi_state), vstate_name(state), func, line);
    251 }
    252 
    253 static enum vnode_state
    254 vstate_assert_get(vnode_t *vp, const char *func, int line)
    255 {
    256 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    257 
    258 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    259 	if (vip->vi_state == VS_MARKER)
    260 		vnpanic(vp, "state is %s at %s:%d",
    261 		    vstate_name(vip->vi_state), func, line);
    262 
    263 	return vip->vi_state;
    264 }
    265 
    266 static void
    267 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    268 {
    269 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    270 
    271 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    272 	if (vip->vi_state == VS_MARKER)
    273 		vnpanic(vp, "state is %s at %s:%d",
    274 		    vstate_name(vip->vi_state), func, line);
    275 
    276 	while (vip->vi_state != VS_ACTIVE && vip->vi_state != VS_RECLAIMED)
    277 		cv_wait(&vp->v_cv, vp->v_interlock);
    278 
    279 	if (vip->vi_state == VS_MARKER)
    280 		vnpanic(vp, "state is %s at %s:%d",
    281 		    vstate_name(vip->vi_state), func, line);
    282 }
    283 
    284 static void
    285 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    286     const char *func, int line)
    287 {
    288 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    289 
    290 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    291 	if (from == VS_LOADING)
    292 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
    293 
    294 	if (from == VS_MARKER)
    295 		vnpanic(vp, "from is %s at %s:%d",
    296 		    vstate_name(from), func, line);
    297 	if (to == VS_MARKER)
    298 		vnpanic(vp, "to is %s at %s:%d",
    299 		    vstate_name(to), func, line);
    300 	if (vip->vi_state != from)
    301 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    302 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
    303 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
    304 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
    305 		    vstate_name(from), vstate_name(to), vp->v_usecount,
    306 		    func, line);
    307 
    308 	vip->vi_state = to;
    309 	if (from == VS_LOADING)
    310 		cv_broadcast(&vcache_cv);
    311 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    312 		cv_broadcast(&vp->v_cv);
    313 }
    314 
    315 #else /* defined(DIAGNOSTIC) */
    316 
    317 #define VSTATE_GET(vp) \
    318 	(VNODE_TO_VIMPL((vp))->vi_state)
    319 #define VSTATE_CHANGE(vp, from, to) \
    320 	vstate_change((vp), (from), (to))
    321 #define VSTATE_WAIT_STABLE(vp) \
    322 	vstate_wait_stable((vp))
    323 #define VSTATE_ASSERT(vp, state)
    324 
    325 static void
    326 vstate_wait_stable(vnode_t *vp)
    327 {
    328 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    329 
    330 	while (vip->vi_state != VS_ACTIVE && vip->vi_state != VS_RECLAIMED)
    331 		cv_wait(&vp->v_cv, vp->v_interlock);
    332 }
    333 
    334 static void
    335 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    336 {
    337 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    338 
    339 	vip->vi_state = to;
    340 	if (from == VS_LOADING)
    341 		cv_broadcast(&vcache_cv);
    342 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    343 		cv_broadcast(&vp->v_cv);
    344 }
    345 
    346 #endif /* defined(DIAGNOSTIC) */
    347 
    348 void
    349 vfs_vnode_sysinit(void)
    350 {
    351 	int error __diagused;
    352 
    353 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    354 	KASSERT(dead_rootmount != NULL);
    355 	dead_rootmount->mnt_iflag = IMNT_MPSAFE;
    356 
    357 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    358 	TAILQ_INIT(&lru_free_list);
    359 	TAILQ_INIT(&lru_hold_list);
    360 	TAILQ_INIT(&lru_vrele_list);
    361 
    362 	vcache_init();
    363 
    364 	cv_init(&vdrain_cv, "vdrain");
    365 	cv_init(&vdrain_gen_cv, "vdrainwt");
    366 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    367 	    NULL, &vdrain_lwp, "vdrain");
    368 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    369 }
    370 
    371 /*
    372  * Allocate a new marker vnode.
    373  */
    374 vnode_t *
    375 vnalloc_marker(struct mount *mp)
    376 {
    377 	vnode_impl_t *vip;
    378 	vnode_t *vp;
    379 
    380 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
    381 	memset(vip, 0, sizeof(*vip));
    382 	vp = VIMPL_TO_VNODE(vip);
    383 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    384 	vp->v_mount = mp;
    385 	vp->v_type = VBAD;
    386 	vip->vi_state = VS_MARKER;
    387 
    388 	return vp;
    389 }
    390 
    391 /*
    392  * Free a marker vnode.
    393  */
    394 void
    395 vnfree_marker(vnode_t *vp)
    396 {
    397 	vnode_impl_t *vip;
    398 
    399 	vip = VNODE_TO_VIMPL(vp);
    400 	KASSERT(vip->vi_state == VS_MARKER);
    401 	uvm_obj_destroy(&vp->v_uobj, true);
    402 	pool_cache_put(vcache_pool, vip);
    403 }
    404 
    405 /*
    406  * Test a vnode for being a marker vnode.
    407  */
    408 bool
    409 vnis_marker(vnode_t *vp)
    410 {
    411 
    412 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    413 }
    414 
    415 /*
    416  * Return the lru list this node should be on.
    417  */
    418 static vnodelst_t *
    419 lru_which(vnode_t *vp)
    420 {
    421 
    422 	KASSERT(mutex_owned(vp->v_interlock));
    423 
    424 	if (vp->v_holdcnt > 0)
    425 		return &lru_hold_list;
    426 	else
    427 		return &lru_free_list;
    428 }
    429 
    430 /*
    431  * Put vnode to end of given list.
    432  * Both the current and the new list may be NULL, used on vnode alloc/free.
    433  * Adjust numvnodes and signal vdrain thread if there is work.
    434  */
    435 static void
    436 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    437 {
    438 	vnode_impl_t *vip;
    439 
    440 	mutex_enter(&vdrain_lock);
    441 	vip = VNODE_TO_VIMPL(vp);
    442 	if (vip->vi_lrulisthd != NULL)
    443 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    444 	else
    445 		numvnodes++;
    446 	vip->vi_lrulisthd = listhd;
    447 	if (vip->vi_lrulisthd != NULL)
    448 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    449 	else
    450 		numvnodes--;
    451 	if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
    452 		cv_broadcast(&vdrain_cv);
    453 	mutex_exit(&vdrain_lock);
    454 }
    455 
    456 /*
    457  * Release deferred vrele vnodes for this mount.
    458  * Called with file system suspended.
    459  */
    460 void
    461 vrele_flush(struct mount *mp)
    462 {
    463 	vnode_impl_t *vip, *marker;
    464 
    465 	KASSERT(fstrans_is_owner(mp));
    466 
    467 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    468 
    469 	mutex_enter(&vdrain_lock);
    470 	TAILQ_INSERT_HEAD(&lru_vrele_list, marker, vi_lrulist);
    471 
    472 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    473 		TAILQ_REMOVE(&lru_vrele_list, marker, vi_lrulist);
    474 		TAILQ_INSERT_AFTER(&lru_vrele_list, vip, marker, vi_lrulist);
    475 		if (vnis_marker(VIMPL_TO_VNODE(vip)))
    476 			continue;
    477 
    478 		KASSERT(vip->vi_lrulisthd == &lru_vrele_list);
    479 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    480 		vip->vi_lrulisthd = &lru_hold_list;
    481 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    482 		mutex_exit(&vdrain_lock);
    483 
    484 		vrele(VIMPL_TO_VNODE(vip));
    485 
    486 		mutex_enter(&vdrain_lock);
    487 	}
    488 
    489 	TAILQ_REMOVE(&lru_vrele_list, marker, vi_lrulist);
    490 	mutex_exit(&vdrain_lock);
    491 
    492 	vnfree_marker(VIMPL_TO_VNODE(marker));
    493 }
    494 
    495 /*
    496  * Reclaim a cached vnode.  Used from vdrain_thread only.
    497  */
    498 static __inline void
    499 vdrain_remove(vnode_t *vp)
    500 {
    501 	struct mount *mp;
    502 
    503 	KASSERT(mutex_owned(&vdrain_lock));
    504 
    505 	/* Probe usecount (unlocked). */
    506 	if (vp->v_usecount > 0)
    507 		return;
    508 	/* Try v_interlock -- we lock the wrong direction! */
    509 	if (!mutex_tryenter(vp->v_interlock))
    510 		return;
    511 	/* Probe usecount and state. */
    512 	if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
    513 		mutex_exit(vp->v_interlock);
    514 		return;
    515 	}
    516 	mp = vp->v_mount;
    517 	if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
    518 		mutex_exit(vp->v_interlock);
    519 		return;
    520 	}
    521 	vdrain_retry = true;
    522 	mutex_exit(&vdrain_lock);
    523 
    524 	if (vcache_vget(vp) == 0) {
    525 		if (!vrecycle(vp))
    526 			vrele(vp);
    527 	}
    528 	fstrans_done(mp);
    529 
    530 	mutex_enter(&vdrain_lock);
    531 }
    532 
    533 /*
    534  * Release a cached vnode.  Used from vdrain_thread only.
    535  */
    536 static __inline void
    537 vdrain_vrele(vnode_t *vp)
    538 {
    539 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    540 	struct mount *mp;
    541 
    542 	KASSERT(mutex_owned(&vdrain_lock));
    543 
    544 	mp = vp->v_mount;
    545 	if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0)
    546 		return;
    547 
    548 	/*
    549 	 * First remove the vnode from the vrele list.
    550 	 * Put it on the last lru list, the last vrele()
    551 	 * will put it back onto the right list before
    552 	 * its v_usecount reaches zero.
    553 	 */
    554 	KASSERT(vip->vi_lrulisthd == &lru_vrele_list);
    555 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    556 	vip->vi_lrulisthd = &lru_hold_list;
    557 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    558 
    559 	vdrain_retry = true;
    560 	mutex_exit(&vdrain_lock);
    561 
    562 	mutex_enter(vp->v_interlock);
    563 	vrelel(vp, 0);
    564 	fstrans_done(mp);
    565 
    566 	mutex_enter(&vdrain_lock);
    567 }
    568 
    569 /*
    570  * Helper thread to keep the number of vnodes below desiredvnodes
    571  * and release vnodes from asynchronous vrele.
    572  */
    573 static void
    574 vdrain_thread(void *cookie)
    575 {
    576 	vnodelst_t *listhd[] = {
    577 	    &lru_vrele_list, &lru_free_list, &lru_hold_list
    578 	};
    579 	int i;
    580 	u_int target;
    581 	vnode_impl_t *vip, *marker;
    582 
    583 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    584 
    585 	mutex_enter(&vdrain_lock);
    586 
    587 	for (;;) {
    588 		vdrain_retry = false;
    589 		target = desiredvnodes - desiredvnodes/10;
    590 
    591 		for (i = 0; i < __arraycount(listhd); i++) {
    592 			TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
    593 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    594 				TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
    595 				TAILQ_INSERT_AFTER(listhd[i], vip, marker,
    596 				    vi_lrulist);
    597 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
    598 					continue;
    599 				if (listhd[i] == &lru_vrele_list)
    600 					vdrain_vrele(VIMPL_TO_VNODE(vip));
    601 				else if (numvnodes < target)
    602 					break;
    603 				else
    604 					vdrain_remove(VIMPL_TO_VNODE(vip));
    605 			}
    606 			TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
    607 		}
    608 
    609 		if (vdrain_retry) {
    610 			mutex_exit(&vdrain_lock);
    611 			yield();
    612 			mutex_enter(&vdrain_lock);
    613 		} else {
    614 			vdrain_gen++;
    615 			cv_broadcast(&vdrain_gen_cv);
    616 			cv_wait(&vdrain_cv, &vdrain_lock);
    617 		}
    618 	}
    619 }
    620 
    621 /*
    622  * vput: unlock and release the reference.
    623  */
    624 void
    625 vput(vnode_t *vp)
    626 {
    627 
    628 	VOP_UNLOCK(vp);
    629 	vrele(vp);
    630 }
    631 
    632 /*
    633  * Try to drop reference on a vnode.  Abort if we are releasing the
    634  * last reference.  Note: this _must_ succeed if not the last reference.
    635  */
    636 static inline bool
    637 vtryrele(vnode_t *vp)
    638 {
    639 	u_int use, next;
    640 
    641 	for (use = vp->v_usecount;; use = next) {
    642 		if (use == 1) {
    643 			return false;
    644 		}
    645 		KASSERT(use > 1);
    646 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    647 		if (__predict_true(next == use)) {
    648 			return true;
    649 		}
    650 	}
    651 }
    652 
    653 /*
    654  * Vnode release.  If reference count drops to zero, call inactive
    655  * routine and either return to freelist or free to the pool.
    656  */
    657 static void
    658 vrelel(vnode_t *vp, int flags)
    659 {
    660 	bool recycle, defer;
    661 	int error;
    662 
    663 	KASSERT(mutex_owned(vp->v_interlock));
    664 
    665 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    666 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    667 		vnpanic(vp, "dead but not clean");
    668 	}
    669 
    670 	/*
    671 	 * If not the last reference, just drop the reference count
    672 	 * and unlock.
    673 	 */
    674 	if (vtryrele(vp)) {
    675 		mutex_exit(vp->v_interlock);
    676 		return;
    677 	}
    678 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    679 		vnpanic(vp, "%s: bad ref count", __func__);
    680 	}
    681 
    682 #ifdef DIAGNOSTIC
    683 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    684 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    685 		vprint("vrelel: missing VOP_CLOSE()", vp);
    686 	}
    687 #endif
    688 
    689 	/*
    690 	 * If not clean, deactivate the vnode, but preserve
    691 	 * our reference across the call to VOP_INACTIVE().
    692 	 */
    693 	if (VSTATE_GET(vp) != VS_RECLAIMED) {
    694 		recycle = false;
    695 
    696 		/*
    697 		 * XXX This ugly block can be largely eliminated if
    698 		 * locking is pushed down into the file systems.
    699 		 *
    700 		 * Defer vnode release to vdrain_thread if caller
    701 		 * requests it explicitly or is the pagedaemon.
    702 		 */
    703 		if ((curlwp == uvm.pagedaemon_lwp) ||
    704 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    705 			defer = true;
    706 		} else if (curlwp == vdrain_lwp) {
    707 			/*
    708 			 * We have to try harder.
    709 			 */
    710 			mutex_exit(vp->v_interlock);
    711 			error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    712 			KASSERTMSG((error == 0), "vn_lock failed: %d", error);
    713 			mutex_enter(vp->v_interlock);
    714 			defer = false;
    715 		} else {
    716 			/* If we can't acquire the lock, then defer. */
    717 			mutex_exit(vp->v_interlock);
    718 			error = vn_lock(vp,
    719 			    LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
    720 			defer = (error != 0);
    721 			mutex_enter(vp->v_interlock);
    722 		}
    723 
    724 		KASSERT(mutex_owned(vp->v_interlock));
    725 		KASSERT(! (curlwp == vdrain_lwp && defer));
    726 
    727 		if (defer) {
    728 			/*
    729 			 * Defer reclaim to the kthread; it's not safe to
    730 			 * clean it here.  We donate it our last reference.
    731 			 */
    732 			lru_requeue(vp, &lru_vrele_list);
    733 			mutex_exit(vp->v_interlock);
    734 			return;
    735 		}
    736 
    737 		/*
    738 		 * If the node got another reference while we
    739 		 * released the interlock, don't try to inactivate it yet.
    740 		 */
    741 		if (__predict_false(vtryrele(vp))) {
    742 			VOP_UNLOCK(vp);
    743 			mutex_exit(vp->v_interlock);
    744 			return;
    745 		}
    746 		VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    747 		mutex_exit(vp->v_interlock);
    748 
    749 		/*
    750 		 * The vnode must not gain another reference while being
    751 		 * deactivated.  If VOP_INACTIVE() indicates that
    752 		 * the described file has been deleted, then recycle
    753 		 * the vnode.
    754 		 *
    755 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    756 		 */
    757 		VOP_INACTIVE(vp, &recycle);
    758 		if (recycle) {
    759 			/* vclean() below will drop the lock. */
    760 			if (vn_lock(vp, LK_EXCLUSIVE) != 0)
    761 				recycle = false;
    762 		}
    763 		mutex_enter(vp->v_interlock);
    764 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    765 		if (!recycle) {
    766 			if (vtryrele(vp)) {
    767 				mutex_exit(vp->v_interlock);
    768 				return;
    769 			}
    770 		}
    771 
    772 		/* Take care of space accounting. */
    773 		if (vp->v_iflag & VI_EXECMAP) {
    774 			atomic_add_int(&uvmexp.execpages,
    775 			    -vp->v_uobj.uo_npages);
    776 			atomic_add_int(&uvmexp.filepages,
    777 			    vp->v_uobj.uo_npages);
    778 		}
    779 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    780 		vp->v_vflag &= ~VV_MAPPED;
    781 
    782 		/*
    783 		 * Recycle the vnode if the file is now unused (unlinked),
    784 		 * otherwise just free it.
    785 		 */
    786 		if (recycle) {
    787 			VSTATE_ASSERT(vp, VS_ACTIVE);
    788 			vcache_reclaim(vp);
    789 		}
    790 		KASSERT(vp->v_usecount > 0);
    791 	}
    792 
    793 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    794 		/* Gained another reference while being reclaimed. */
    795 		mutex_exit(vp->v_interlock);
    796 		return;
    797 	}
    798 
    799 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    800 		/*
    801 		 * It's clean so destroy it.  It isn't referenced
    802 		 * anywhere since it has been reclaimed.
    803 		 */
    804 		vcache_free(VNODE_TO_VIMPL(vp));
    805 	} else {
    806 		/*
    807 		 * Otherwise, put it back onto the freelist.  It
    808 		 * can't be destroyed while still associated with
    809 		 * a file system.
    810 		 */
    811 		lru_requeue(vp, lru_which(vp));
    812 		mutex_exit(vp->v_interlock);
    813 	}
    814 }
    815 
    816 void
    817 vrele(vnode_t *vp)
    818 {
    819 
    820 	if (vtryrele(vp)) {
    821 		return;
    822 	}
    823 	mutex_enter(vp->v_interlock);
    824 	vrelel(vp, 0);
    825 }
    826 
    827 /*
    828  * Asynchronous vnode release, vnode is released in different context.
    829  */
    830 void
    831 vrele_async(vnode_t *vp)
    832 {
    833 
    834 	if (vtryrele(vp)) {
    835 		return;
    836 	}
    837 	mutex_enter(vp->v_interlock);
    838 	vrelel(vp, VRELEL_ASYNC_RELE);
    839 }
    840 
    841 /*
    842  * Vnode reference, where a reference is already held by some other
    843  * object (for example, a file structure).
    844  */
    845 void
    846 vref(vnode_t *vp)
    847 {
    848 
    849 	KASSERT(vp->v_usecount != 0);
    850 
    851 	atomic_inc_uint(&vp->v_usecount);
    852 }
    853 
    854 /*
    855  * Page or buffer structure gets a reference.
    856  * Called with v_interlock held.
    857  */
    858 void
    859 vholdl(vnode_t *vp)
    860 {
    861 
    862 	KASSERT(mutex_owned(vp->v_interlock));
    863 
    864 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
    865 		lru_requeue(vp, lru_which(vp));
    866 }
    867 
    868 /*
    869  * Page or buffer structure frees a reference.
    870  * Called with v_interlock held.
    871  */
    872 void
    873 holdrelel(vnode_t *vp)
    874 {
    875 
    876 	KASSERT(mutex_owned(vp->v_interlock));
    877 
    878 	if (vp->v_holdcnt <= 0) {
    879 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
    880 	}
    881 
    882 	vp->v_holdcnt--;
    883 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
    884 		lru_requeue(vp, lru_which(vp));
    885 }
    886 
    887 /*
    888  * Disassociate the underlying file system from a vnode.
    889  *
    890  * Must be called with vnode locked and will return unlocked.
    891  * Must be called with the interlock held, and will return with it held.
    892  */
    893 static void
    894 vclean(vnode_t *vp)
    895 {
    896 	lwp_t *l = curlwp;
    897 	bool recycle, active;
    898 	int error;
    899 
    900 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
    901 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
    902 	KASSERT(mutex_owned(vp->v_interlock));
    903 	KASSERT(vp->v_usecount != 0);
    904 
    905 	active = (vp->v_usecount > 1);
    906 	/*
    907 	 * Prevent the vnode from being recycled or brought into use
    908 	 * while we clean it out.
    909 	 */
    910 	VSTATE_CHANGE(vp, VN_ACTIVE, VN_RECLAIMING);
    911 	if (vp->v_iflag & VI_EXECMAP) {
    912 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
    913 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
    914 	}
    915 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
    916 	mutex_exit(vp->v_interlock);
    917 
    918 	/*
    919 	 * Clean out any cached data associated with the vnode.
    920 	 * If purging an active vnode, it must be closed and
    921 	 * deactivated before being reclaimed. Note that the
    922 	 * VOP_INACTIVE will unlock the vnode.
    923 	 */
    924 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
    925 	if (error != 0) {
    926 		if (wapbl_vphaswapbl(vp))
    927 			WAPBL_DISCARD(wapbl_vptomp(vp));
    928 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
    929 	}
    930 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
    931 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
    932 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
    933 		 spec_node_revoke(vp);
    934 	}
    935 	if (active) {
    936 		VOP_INACTIVE(vp, &recycle);
    937 	} else {
    938 		/*
    939 		 * Any other processes trying to obtain this lock must first
    940 		 * wait for VN_RECLAIMED, then call the new lock operation.
    941 		 */
    942 		VOP_UNLOCK(vp);
    943 	}
    944 
    945 	/* Disassociate the underlying file system from the vnode. */
    946 	if (VOP_RECLAIM(vp)) {
    947 		vnpanic(vp, "%s: cannot reclaim", __func__);
    948 	}
    949 
    950 	KASSERT(vp->v_data == NULL);
    951 	KASSERT(vp->v_uobj.uo_npages == 0);
    952 
    953 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
    954 		uvm_ra_freectx(vp->v_ractx);
    955 		vp->v_ractx = NULL;
    956 	}
    957 
    958 	/* Purge name cache. */
    959 	cache_purge(vp);
    960 
    961 	/* Move to dead mount. */
    962 	vp->v_vflag &= ~VV_ROOT;
    963 	atomic_inc_uint(&dead_rootmount->mnt_refcnt);
    964 	vfs_insmntque(vp, dead_rootmount);
    965 
    966 	/* Done with purge, notify sleepers of the grim news. */
    967 	mutex_enter(vp->v_interlock);
    968 	vp->v_op = dead_vnodeop_p;
    969 	vp->v_vflag |= VV_LOCKSWORK;
    970 	VSTATE_CHANGE(vp, VN_RECLAIMING, VN_RECLAIMED);
    971 	vp->v_tag = VT_NON;
    972 	KNOTE(&vp->v_klist, NOTE_REVOKE);
    973 
    974 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
    975 }
    976 
    977 /*
    978  * Recycle an unused vnode if caller holds the last reference.
    979  */
    980 bool
    981 vrecycle(vnode_t *vp)
    982 {
    983 	int error __diagused;
    984 
    985 	mutex_enter(vp->v_interlock);
    986 
    987 	/* Make sure we hold the last reference. */
    988 	VSTATE_WAIT_STABLE(vp);
    989 	if (vp->v_usecount != 1) {
    990 		mutex_exit(vp->v_interlock);
    991 		return false;
    992 	}
    993 
    994 	/* If the vnode is already clean we're done. */
    995 	if (VSTATE_GET(vp) != VS_ACTIVE) {
    996 		VSTATE_ASSERT(vp, VS_RECLAIMED);
    997 		vrelel(vp, 0);
    998 		return true;
    999 	}
   1000 
   1001 	/* Prevent further references until the vnode is locked. */
   1002 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
   1003 	mutex_exit(vp->v_interlock);
   1004 
   1005 	/*
   1006 	 * On a leaf file system this lock will always succeed as we hold
   1007 	 * the last reference and prevent further references.
   1008 	 * On layered file systems waiting for the lock would open a can of
   1009 	 * deadlocks as the lower vnodes may have other active references.
   1010 	 */
   1011 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
   1012 
   1013 	mutex_enter(vp->v_interlock);
   1014 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
   1015 
   1016 	if (error) {
   1017 		mutex_exit(vp->v_interlock);
   1018 		return false;
   1019 	}
   1020 
   1021 	KASSERT(vp->v_usecount == 1);
   1022 	vcache_reclaim(vp);
   1023 	vrelel(vp, 0);
   1024 
   1025 	return true;
   1026 }
   1027 
   1028 /*
   1029  * Eliminate all activity associated with the requested vnode
   1030  * and with all vnodes aliased to the requested vnode.
   1031  */
   1032 void
   1033 vrevoke(vnode_t *vp)
   1034 {
   1035 	vnode_t *vq;
   1036 	enum vtype type;
   1037 	dev_t dev;
   1038 
   1039 	KASSERT(vp->v_usecount > 0);
   1040 
   1041 	mutex_enter(vp->v_interlock);
   1042 	VSTATE_WAIT_STABLE(vp);
   1043 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1044 		mutex_exit(vp->v_interlock);
   1045 		return;
   1046 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1047 		atomic_inc_uint(&vp->v_usecount);
   1048 		mutex_exit(vp->v_interlock);
   1049 		vgone(vp);
   1050 		return;
   1051 	} else {
   1052 		dev = vp->v_rdev;
   1053 		type = vp->v_type;
   1054 		mutex_exit(vp->v_interlock);
   1055 	}
   1056 
   1057 	while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
   1058 		vgone(vq);
   1059 	}
   1060 }
   1061 
   1062 /*
   1063  * Eliminate all activity associated with a vnode in preparation for
   1064  * reuse.  Drops a reference from the vnode.
   1065  */
   1066 void
   1067 vgone(vnode_t *vp)
   1068 {
   1069 
   1070 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1071 	mutex_enter(vp->v_interlock);
   1072 	VSTATE_WAIT_STABLE(vp);
   1073 	if (VSTATE_GET(vp) == VS_ACTIVE)
   1074 		vcache_reclaim(vp);
   1075 	VSTATE_ASSERT(vp, VS_RECLAIMED);
   1076 	vrelel(vp, 0);
   1077 }
   1078 
   1079 static inline uint32_t
   1080 vcache_hash(const struct vcache_key *key)
   1081 {
   1082 	uint32_t hash = HASH32_BUF_INIT;
   1083 
   1084 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1085 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1086 	return hash;
   1087 }
   1088 
   1089 static void
   1090 vcache_init(void)
   1091 {
   1092 
   1093 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
   1094 	    "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1095 	KASSERT(vcache_pool != NULL);
   1096 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
   1097 	cv_init(&vcache_cv, "vcache");
   1098 	vcache_hashsize = desiredvnodes;
   1099 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1100 	    &vcache_hashmask);
   1101 }
   1102 
   1103 static void
   1104 vcache_reinit(void)
   1105 {
   1106 	int i;
   1107 	uint32_t hash;
   1108 	u_long oldmask, newmask;
   1109 	struct hashhead *oldtab, *newtab;
   1110 	vnode_impl_t *vip;
   1111 
   1112 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1113 	mutex_enter(&vcache_lock);
   1114 	oldtab = vcache_hashtab;
   1115 	oldmask = vcache_hashmask;
   1116 	vcache_hashsize = desiredvnodes;
   1117 	vcache_hashtab = newtab;
   1118 	vcache_hashmask = newmask;
   1119 	for (i = 0; i <= oldmask; i++) {
   1120 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
   1121 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
   1122 			hash = vcache_hash(&vip->vi_key);
   1123 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
   1124 			    vip, vi_hash);
   1125 		}
   1126 	}
   1127 	mutex_exit(&vcache_lock);
   1128 	hashdone(oldtab, HASH_SLIST, oldmask);
   1129 }
   1130 
   1131 static inline vnode_impl_t *
   1132 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1133 {
   1134 	struct hashhead *hashp;
   1135 	vnode_impl_t *vip;
   1136 
   1137 	KASSERT(mutex_owned(&vcache_lock));
   1138 
   1139 	hashp = &vcache_hashtab[hash & vcache_hashmask];
   1140 	SLIST_FOREACH(vip, hashp, vi_hash) {
   1141 		if (key->vk_mount != vip->vi_key.vk_mount)
   1142 			continue;
   1143 		if (key->vk_key_len != vip->vi_key.vk_key_len)
   1144 			continue;
   1145 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
   1146 			continue;
   1147 		return vip;
   1148 	}
   1149 	return NULL;
   1150 }
   1151 
   1152 /*
   1153  * Allocate a new, uninitialized vcache node.
   1154  */
   1155 static vnode_impl_t *
   1156 vcache_alloc(void)
   1157 {
   1158 	vnode_impl_t *vip;
   1159 	vnode_t *vp;
   1160 
   1161 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
   1162 	memset(vip, 0, sizeof(*vip));
   1163 
   1164 	rw_init(&vip->vi_lock);
   1165 	/* SLIST_INIT(&vip->vi_hash); */
   1166 	/* LIST_INIT(&vip->vi_nclist); */
   1167 	/* LIST_INIT(&vip->vi_dnclist); */
   1168 
   1169 	vp = VIMPL_TO_VNODE(vip);
   1170 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
   1171 	cv_init(&vp->v_cv, "vnode");
   1172 
   1173 	vp->v_usecount = 1;
   1174 	vp->v_type = VNON;
   1175 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1176 
   1177 	vip->vi_state = VS_LOADING;
   1178 
   1179 	lru_requeue(vp, &lru_free_list);
   1180 
   1181 	return vip;
   1182 }
   1183 
   1184 /*
   1185  * Free an unused, unreferenced vcache node.
   1186  * v_interlock locked on entry.
   1187  */
   1188 static void
   1189 vcache_free(vnode_impl_t *vip)
   1190 {
   1191 	vnode_t *vp;
   1192 
   1193 	vp = VIMPL_TO_VNODE(vip);
   1194 	KASSERT(mutex_owned(vp->v_interlock));
   1195 
   1196 	KASSERT(vp->v_usecount == 0);
   1197 	KASSERT(vp->v_holdcnt == 0);
   1198 	KASSERT(vp->v_writecount == 0);
   1199 	lru_requeue(vp, NULL);
   1200 	mutex_exit(vp->v_interlock);
   1201 
   1202 	vfs_insmntque(vp, NULL);
   1203 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1204 		spec_node_destroy(vp);
   1205 
   1206 	rw_destroy(&vip->vi_lock);
   1207 	uvm_obj_destroy(&vp->v_uobj, true);
   1208 	cv_destroy(&vp->v_cv);
   1209 	pool_cache_put(vcache_pool, vip);
   1210 }
   1211 
   1212 /*
   1213  * Try to get an initial reference on this cached vnode.
   1214  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
   1215  * EBUSY if the vnode state is unstable.
   1216  *
   1217  * v_interlock locked on entry and unlocked on exit.
   1218  */
   1219 int
   1220 vcache_tryvget(vnode_t *vp)
   1221 {
   1222 	int error = 0;
   1223 
   1224 	KASSERT(mutex_owned(vp->v_interlock));
   1225 
   1226 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
   1227 		error = ENOENT;
   1228 	else if (__predict_false(VSTATE_GET(vp) != VS_ACTIVE))
   1229 		error = EBUSY;
   1230 	else if (vp->v_usecount == 0)
   1231 		vp->v_usecount = 1;
   1232 	else
   1233 		atomic_inc_uint(&vp->v_usecount);
   1234 
   1235 	mutex_exit(vp->v_interlock);
   1236 
   1237 	return error;
   1238 }
   1239 
   1240 /*
   1241  * Try to get an initial reference on this cached vnode.
   1242  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1243  * Will wait for the vnode state to be stable.
   1244  *
   1245  * v_interlock locked on entry and unlocked on exit.
   1246  */
   1247 int
   1248 vcache_vget(vnode_t *vp)
   1249 {
   1250 
   1251 	KASSERT(mutex_owned(vp->v_interlock));
   1252 
   1253 	/* Increment hold count to prevent vnode from disappearing. */
   1254 	vp->v_holdcnt++;
   1255 	VSTATE_WAIT_STABLE(vp);
   1256 	vp->v_holdcnt--;
   1257 
   1258 	/* If this was the last reference to a reclaimed vnode free it now. */
   1259 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1260 		if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
   1261 			vcache_free(VNODE_TO_VIMPL(vp));
   1262 		else
   1263 			mutex_exit(vp->v_interlock);
   1264 		return ENOENT;
   1265 	}
   1266 	VSTATE_ASSERT(vp, VS_ACTIVE);
   1267 	if (vp->v_usecount == 0)
   1268 		vp->v_usecount = 1;
   1269 	else
   1270 		atomic_inc_uint(&vp->v_usecount);
   1271 
   1272 	mutex_exit(vp->v_interlock);
   1273 
   1274 	return 0;
   1275 }
   1276 
   1277 /*
   1278  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1279  */
   1280 int
   1281 vcache_get(struct mount *mp, const void *key, size_t key_len,
   1282     struct vnode **vpp)
   1283 {
   1284 	int error;
   1285 	uint32_t hash;
   1286 	const void *new_key;
   1287 	struct vnode *vp;
   1288 	struct vcache_key vcache_key;
   1289 	vnode_impl_t *vip, *new_vip;
   1290 
   1291 	new_key = NULL;
   1292 	*vpp = NULL;
   1293 
   1294 	vcache_key.vk_mount = mp;
   1295 	vcache_key.vk_key = key;
   1296 	vcache_key.vk_key_len = key_len;
   1297 	hash = vcache_hash(&vcache_key);
   1298 
   1299 again:
   1300 	mutex_enter(&vcache_lock);
   1301 	vip = vcache_hash_lookup(&vcache_key, hash);
   1302 
   1303 	/* If found, take a reference or retry. */
   1304 	if (__predict_true(vip != NULL)) {
   1305 		/*
   1306 		 * If the vnode is loading we cannot take the v_interlock
   1307 		 * here as it might change during load (see uvm_obj_setlock()).
   1308 		 * As changing state from VS_LOADING requires both vcache_lock
   1309 		 * and v_interlock it is safe to test with vcache_lock held.
   1310 		 *
   1311 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1312 		 */
   1313 		if (__predict_false(vip->vi_state == VS_LOADING)) {
   1314 			cv_wait(&vcache_cv, &vcache_lock);
   1315 			mutex_exit(&vcache_lock);
   1316 			goto again;
   1317 		}
   1318 		vp = VIMPL_TO_VNODE(vip);
   1319 		mutex_enter(vp->v_interlock);
   1320 		mutex_exit(&vcache_lock);
   1321 		error = vcache_vget(vp);
   1322 		if (error == ENOENT)
   1323 			goto again;
   1324 		if (error == 0)
   1325 			*vpp = vp;
   1326 		KASSERT((error != 0) == (*vpp == NULL));
   1327 		return error;
   1328 	}
   1329 	mutex_exit(&vcache_lock);
   1330 
   1331 	/* Allocate and initialize a new vcache / vnode pair. */
   1332 	error = vfs_busy(mp, NULL);
   1333 	if (error)
   1334 		return error;
   1335 	new_vip = vcache_alloc();
   1336 	new_vip->vi_key = vcache_key;
   1337 	vp = VIMPL_TO_VNODE(new_vip);
   1338 	mutex_enter(&vcache_lock);
   1339 	vip = vcache_hash_lookup(&vcache_key, hash);
   1340 	if (vip == NULL) {
   1341 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1342 		    new_vip, vi_hash);
   1343 		vip = new_vip;
   1344 	}
   1345 
   1346 	/* If another thread beat us inserting this node, retry. */
   1347 	if (vip != new_vip) {
   1348 		mutex_enter(vp->v_interlock);
   1349 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1350 		mutex_exit(&vcache_lock);
   1351 		vrelel(vp, 0);
   1352 		vfs_unbusy(mp, false, NULL);
   1353 		goto again;
   1354 	}
   1355 	mutex_exit(&vcache_lock);
   1356 
   1357 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1358 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1359 	if (error) {
   1360 		mutex_enter(&vcache_lock);
   1361 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1362 		    new_vip, vnode_impl, vi_hash);
   1363 		mutex_enter(vp->v_interlock);
   1364 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1365 		mutex_exit(&vcache_lock);
   1366 		vrelel(vp, 0);
   1367 		vfs_unbusy(mp, false, NULL);
   1368 		KASSERT(*vpp == NULL);
   1369 		return error;
   1370 	}
   1371 	KASSERT(new_key != NULL);
   1372 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1373 	KASSERT(vp->v_op != NULL);
   1374 	vfs_insmntque(vp, mp);
   1375 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1376 		vp->v_vflag |= VV_MPSAFE;
   1377 	vfs_unbusy(mp, true, NULL);
   1378 
   1379 	/* Finished loading, finalize node. */
   1380 	mutex_enter(&vcache_lock);
   1381 	new_vip->vi_key.vk_key = new_key;
   1382 	mutex_enter(vp->v_interlock);
   1383 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1384 	mutex_exit(vp->v_interlock);
   1385 	mutex_exit(&vcache_lock);
   1386 	*vpp = vp;
   1387 	return 0;
   1388 }
   1389 
   1390 /*
   1391  * Create a new vnode / fs node pair and return it referenced through vpp.
   1392  */
   1393 int
   1394 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1395     kauth_cred_t cred, struct vnode **vpp)
   1396 {
   1397 	int error;
   1398 	uint32_t hash;
   1399 	struct vnode *vp, *ovp;
   1400 	vnode_impl_t *vip, *ovip;
   1401 
   1402 	*vpp = NULL;
   1403 
   1404 	/* Allocate and initialize a new vcache / vnode pair. */
   1405 	error = vfs_busy(mp, NULL);
   1406 	if (error)
   1407 		return error;
   1408 	vip = vcache_alloc();
   1409 	vip->vi_key.vk_mount = mp;
   1410 	vp = VIMPL_TO_VNODE(vip);
   1411 
   1412 	/* Create and load the fs node. */
   1413 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
   1414 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
   1415 	if (error) {
   1416 		mutex_enter(&vcache_lock);
   1417 		mutex_enter(vp->v_interlock);
   1418 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1419 		mutex_exit(&vcache_lock);
   1420 		vrelel(vp, 0);
   1421 		vfs_unbusy(mp, false, NULL);
   1422 		KASSERT(*vpp == NULL);
   1423 		return error;
   1424 	}
   1425 	KASSERT(vip->vi_key.vk_key != NULL);
   1426 	KASSERT(vp->v_op != NULL);
   1427 	hash = vcache_hash(&vip->vi_key);
   1428 
   1429 	/* Wait for previous instance to be reclaimed, then insert new node. */
   1430 	mutex_enter(&vcache_lock);
   1431 	while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
   1432 		ovp = VIMPL_TO_VNODE(ovip);
   1433 		mutex_enter(ovp->v_interlock);
   1434 		mutex_exit(&vcache_lock);
   1435 		error = vcache_vget(ovp);
   1436 		KASSERT(error == ENOENT);
   1437 		mutex_enter(&vcache_lock);
   1438 	}
   1439 	SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1440 	    vip, vi_hash);
   1441 	mutex_exit(&vcache_lock);
   1442 	vfs_insmntque(vp, mp);
   1443 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1444 		vp->v_vflag |= VV_MPSAFE;
   1445 	vfs_unbusy(mp, true, NULL);
   1446 
   1447 	/* Finished loading, finalize node. */
   1448 	mutex_enter(&vcache_lock);
   1449 	mutex_enter(vp->v_interlock);
   1450 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1451 	mutex_exit(&vcache_lock);
   1452 	mutex_exit(vp->v_interlock);
   1453 	*vpp = vp;
   1454 	return 0;
   1455 }
   1456 
   1457 /*
   1458  * Prepare key change: update old cache nodes key and lock new cache node.
   1459  * Return an error if the new node already exists.
   1460  */
   1461 int
   1462 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1463     const void *old_key, size_t old_key_len,
   1464     const void *new_key, size_t new_key_len)
   1465 {
   1466 	uint32_t old_hash, new_hash;
   1467 	struct vcache_key old_vcache_key, new_vcache_key;
   1468 	vnode_impl_t *vip, *new_vip;
   1469 	struct vnode *new_vp;
   1470 
   1471 	old_vcache_key.vk_mount = mp;
   1472 	old_vcache_key.vk_key = old_key;
   1473 	old_vcache_key.vk_key_len = old_key_len;
   1474 	old_hash = vcache_hash(&old_vcache_key);
   1475 
   1476 	new_vcache_key.vk_mount = mp;
   1477 	new_vcache_key.vk_key = new_key;
   1478 	new_vcache_key.vk_key_len = new_key_len;
   1479 	new_hash = vcache_hash(&new_vcache_key);
   1480 
   1481 	new_vip = vcache_alloc();
   1482 	new_vip->vi_key = new_vcache_key;
   1483 	new_vp = VIMPL_TO_VNODE(new_vip);
   1484 
   1485 	/* Insert locked new node used as placeholder. */
   1486 	mutex_enter(&vcache_lock);
   1487 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1488 	if (vip != NULL) {
   1489 		mutex_enter(new_vp->v_interlock);
   1490 		VSTATE_CHANGE(new_vp, VS_LOADING, VS_RECLAIMED);
   1491 		mutex_exit(&vcache_lock);
   1492 		vrelel(new_vp, 0);
   1493 		return EEXIST;
   1494 	}
   1495 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1496 	    new_vip, vi_hash);
   1497 
   1498 	/* Replace old nodes key with the temporary copy. */
   1499 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1500 	KASSERT(vip != NULL);
   1501 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1502 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
   1503 	vip->vi_key = old_vcache_key;
   1504 	mutex_exit(&vcache_lock);
   1505 	return 0;
   1506 }
   1507 
   1508 /*
   1509  * Key change complete: update old node and remove placeholder.
   1510  */
   1511 void
   1512 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1513     const void *old_key, size_t old_key_len,
   1514     const void *new_key, size_t new_key_len)
   1515 {
   1516 	uint32_t old_hash, new_hash;
   1517 	struct vcache_key old_vcache_key, new_vcache_key;
   1518 	vnode_impl_t *vip, *new_vip;
   1519 	struct vnode *new_vp;
   1520 
   1521 	old_vcache_key.vk_mount = mp;
   1522 	old_vcache_key.vk_key = old_key;
   1523 	old_vcache_key.vk_key_len = old_key_len;
   1524 	old_hash = vcache_hash(&old_vcache_key);
   1525 
   1526 	new_vcache_key.vk_mount = mp;
   1527 	new_vcache_key.vk_key = new_key;
   1528 	new_vcache_key.vk_key_len = new_key_len;
   1529 	new_hash = vcache_hash(&new_vcache_key);
   1530 
   1531 	mutex_enter(&vcache_lock);
   1532 
   1533 	/* Lookup old and new node. */
   1534 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1535 	KASSERT(vip != NULL);
   1536 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1537 
   1538 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1539 	KASSERT(new_vip != NULL);
   1540 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
   1541 	new_vp = VIMPL_TO_VNODE(new_vip);
   1542 	mutex_enter(new_vp->v_interlock);
   1543 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
   1544 
   1545 	/* Rekey old node and put it onto its new hashlist. */
   1546 	vip->vi_key = new_vcache_key;
   1547 	if (old_hash != new_hash) {
   1548 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
   1549 		    vip, vnode_impl, vi_hash);
   1550 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1551 		    vip, vi_hash);
   1552 	}
   1553 
   1554 	/* Remove new node used as placeholder. */
   1555 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
   1556 	    new_vip, vnode_impl, vi_hash);
   1557 	VSTATE_CHANGE(new_vp, VS_LOADING, VS_RECLAIMED);
   1558 	mutex_exit(&vcache_lock);
   1559 	vrelel(new_vp, 0);
   1560 }
   1561 
   1562 /*
   1563  * Remove a vnode / fs node pair from the cache.
   1564  */
   1565 void
   1566 vcache_remove(struct mount *mp, const void *key, size_t key_len)
   1567 {
   1568 	lwp_t *l = curlwp;
   1569 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1570 	struct mount *mp = vp->v_mount;
   1571 	uint32_t hash;
   1572 	struct vcache_key vcache_key;
   1573 	struct vcache_node *node;
   1574 
   1575 	vcache_key.vk_mount = mp;
   1576 	vcache_key.vk_key = key;
   1577 	vcache_key.vk_key_len = key_len;
   1578 	hash = vcache_hash(&vcache_key);
   1579 
   1580 	active = (vp->v_usecount > 1);
   1581 	temp_key_len = vip->vi_key.vk_key_len;
   1582 	/*
   1583 	 * Prevent the vnode from being recycled or brought into use
   1584 	 * while we clean it out.
   1585 	 */
   1586 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
   1587 	if (vp->v_iflag & VI_EXECMAP) {
   1588 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
   1589 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
   1590 	}
   1591 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1592 	mutex_exit(vp->v_interlock);
   1593 
   1594 	/* Replace the vnode key with a temporary copy. */
   1595 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
   1596 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1597 	} else {
   1598 		temp_key = temp_buf;
   1599 	}
   1600 	mutex_enter(&vcache_lock);
   1601 	memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
   1602 	vip->vi_key.vk_key = temp_key;
   1603 	mutex_exit(&vcache_lock);
   1604 
   1605 	fstrans_start(mp, FSTRANS_LAZY);
   1606 
   1607 	/*
   1608 	 * Clean out any cached data associated with the vnode.
   1609 	 * If purging an active vnode, it must be closed and
   1610 	 * deactivated before being reclaimed.
   1611 	 */
   1612 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1613 	if (error != 0) {
   1614 		if (wapbl_vphaswapbl(vp))
   1615 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1616 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1617 	}
   1618 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1619 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1620 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1621 		 spec_node_revoke(vp);
   1622 	}
   1623 
   1624 	/*
   1625 	 * Disassociate the underlying file system from the vnode.
   1626 	 * Note that the VOP_INACTIVE will unlock the vnode.
   1627 	 */
   1628 	VOP_INACTIVE(vp, &recycle);
   1629 	if (VOP_RECLAIM(vp)) {
   1630 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1631 	}
   1632 
   1633 	KASSERT(vp->v_data == NULL);
   1634 	KASSERT(vp->v_uobj.uo_npages == 0);
   1635 
   1636 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1637 		uvm_ra_freectx(vp->v_ractx);
   1638 		vp->v_ractx = NULL;
   1639 	}
   1640 
   1641 	/* Purge name cache. */
   1642 	cache_purge(vp);
   1643 
   1644 	/* Move to dead mount. */
   1645 	vp->v_vflag &= ~VV_ROOT;
   1646 	atomic_inc_uint(&dead_rootmount->mnt_refcnt);
   1647 	vfs_insmntque(vp, dead_rootmount);
   1648 
   1649 	/* Remove from vnode cache. */
   1650 	hash = vcache_hash(&vip->vi_key);
   1651 	mutex_enter(&vcache_lock);
   1652 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1653 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1654 	    vip, vnode_impl, vi_hash);
   1655 	mutex_exit(&vcache_lock);
   1656 	if (temp_key != temp_buf)
   1657 		kmem_free(temp_key, temp_key_len);
   1658 
   1659 	/* Done with purge, notify sleepers of the grim news. */
   1660 	mutex_enter(vp->v_interlock);
   1661 	vp->v_op = dead_vnodeop_p;
   1662 	vp->v_vflag |= VV_LOCKSWORK;
   1663 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1664 	vp->v_tag = VT_NON;
   1665 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1666 
   1667 	fstrans_done(mp);
   1668 
   1669 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1670 }
   1671 
   1672 /*
   1673  * Update outstanding I/O count and do wakeup if requested.
   1674  */
   1675 void
   1676 vwakeup(struct buf *bp)
   1677 {
   1678 	vnode_t *vp;
   1679 
   1680 	if ((vp = bp->b_vp) == NULL)
   1681 		return;
   1682 
   1683 	KASSERT(bp->b_objlock == vp->v_interlock);
   1684 	KASSERT(mutex_owned(bp->b_objlock));
   1685 
   1686 	if (--vp->v_numoutput < 0)
   1687 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1688 	if (vp->v_numoutput == 0)
   1689 		cv_broadcast(&vp->v_cv);
   1690 }
   1691 
   1692 /*
   1693  * Test a vnode for being or becoming dead.  Returns one of:
   1694  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1695  * ENOENT: vnode is dead.
   1696  * 0:      otherwise.
   1697  *
   1698  * Whenever this function returns a non-zero value all future
   1699  * calls will also return a non-zero value.
   1700  */
   1701 int
   1702 vdead_check(struct vnode *vp, int flags)
   1703 {
   1704 
   1705 	KASSERT(mutex_owned(vp->v_interlock));
   1706 
   1707 	if (! ISSET(flags, VDEAD_NOWAIT))
   1708 		VSTATE_WAIT_STABLE(vp);
   1709 
   1710 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1711 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1712 		return EBUSY;
   1713 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1714 		return ENOENT;
   1715 	}
   1716 
   1717 	return 0;
   1718 }
   1719 
   1720 int
   1721 vfs_drainvnodes(void)
   1722 {
   1723 	int i, gen;
   1724 
   1725 	mutex_enter(&vdrain_lock);
   1726 	for (i = 0; i < 2; i++) {
   1727 		gen = vdrain_gen;
   1728 		while (gen == vdrain_gen) {
   1729 			cv_broadcast(&vdrain_cv);
   1730 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1731 		}
   1732 	}
   1733 	mutex_exit(&vdrain_lock);
   1734 
   1735 	if (numvnodes >= desiredvnodes)
   1736 		return EBUSY;
   1737 
   1738 	if (vcache_hashsize != desiredvnodes)
   1739 		vcache_reinit();
   1740 
   1741 	return 0;
   1742 }
   1743 
   1744 void
   1745 vnpanic(vnode_t *vp, const char *fmt, ...)
   1746 {
   1747 	va_list ap;
   1748 
   1749 #ifdef DIAGNOSTIC
   1750 	vprint(NULL, vp);
   1751 #endif
   1752 	va_start(ap, fmt);
   1753 	vpanic(fmt, ap);
   1754 	va_end(ap);
   1755 }
   1756