Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.62
      1 /*	$NetBSD: vfs_vnode.c,v 1.62 2016/12/14 15:48:55 hannken Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * The vnode cache subsystem.
     71  *
     72  * Life-cycle
     73  *
     74  *	Normally, there are two points where new vnodes are created:
     75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76  *	starts in one of the following ways:
     77  *
     78  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  *	- Reclamation of inactive vnode, via vget(9).
     80  *
     81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  *	was another, traditional way.  Currently, only the draining thread
     83  *	recycles the vnodes.  This behaviour might be revisited.
     84  *
     85  *	The life-cycle ends when the last reference is dropped, usually
     86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87  *	the file system that vnode is inactive.  Via this call, file system
     88  *	indicates whether vnode can be recycled (usually, it checks its own
     89  *	references, e.g. count of links, whether the file was removed).
     90  *
     91  *	Depending on indication, vnode can be put into a free list (cache),
     92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93  *	disassociate underlying file system from the vnode, and finally
     94  *	destroyed.
     95  *
     96  * Vnode state
     97  *
     98  *	Vnode is always in one of six states:
     99  *	- MARKER	This is a marker vnode to help list traversal.  It
    100  *			will never change its state.
    101  *	- LOADING	Vnode is associating underlying file system and not
    102  *			yet ready to use.
    103  *	- ACTIVE	Vnode has associated underlying file system and is
    104  *			ready to use.
    105  *	- BLOCKED	Vnode is active but cannot get new references.
    106  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107  *			system.
    108  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109  *			and is dead.
    110  *
    111  *	Valid state changes are:
    112  *	LOADING -> ACTIVE
    113  *			Vnode has been initialised in vcache_get() or
    114  *			vcache_new() and is ready to use.
    115  *	ACTIVE -> RECLAIMING
    116  *			Vnode starts disassociation from underlying file
    117  *			system in vcache_reclaim().
    118  *	RECLAIMING -> RECLAIMED
    119  *			Vnode finished disassociation from underlying file
    120  *			system in vcache_reclaim().
    121  *	ACTIVE -> BLOCKED
    122  *			Either vcache_rekey*() is changing the vnode key or
    123  *			vrelel() is about to call VOP_INACTIVE().
    124  *	BLOCKED -> ACTIVE
    125  *			The block condition is over.
    126  *	LOADING -> RECLAIMED
    127  *			Either vcache_get() or vcache_new() failed to
    128  *			associate the underlying file system or vcache_rekey*()
    129  *			drops a vnode used as placeholder.
    130  *
    131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    132  *	and it is possible to wait for state change.
    133  *
    134  *	State is protected with v_interlock with one exception:
    135  *	to change from LOADING both v_interlock and vcache.lock must be held
    136  *	so it is possible to check "state == LOADING" without holding
    137  *	v_interlock.  See vcache_get() for details.
    138  *
    139  * Reference counting
    140  *
    141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    143  *	as vput(9), routines.  Common points holding references are e.g.
    144  *	file openings, current working directory, mount points, etc.
    145  *
    146  * Note on v_usecount and its locking
    147  *
    148  *	At nearly all points it is known that v_usecount could be zero,
    149  *	the vnode_t::v_interlock will be held.  To change v_usecount away
    150  *	from zero, the interlock must be held.  To change from a non-zero
    151  *	value to zero, again the interlock must be held.
    152  *
    153  *	Changing the usecount from a non-zero value to a non-zero value can
    154  *	safely be done using atomic operations, without the interlock held.
    155  *
    156  */
    157 
    158 #include <sys/cdefs.h>
    159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.62 2016/12/14 15:48:55 hannken Exp $");
    160 
    161 #include <sys/param.h>
    162 #include <sys/kernel.h>
    163 
    164 #include <sys/atomic.h>
    165 #include <sys/buf.h>
    166 #include <sys/conf.h>
    167 #include <sys/device.h>
    168 #include <sys/hash.h>
    169 #include <sys/kauth.h>
    170 #include <sys/kmem.h>
    171 #include <sys/kthread.h>
    172 #include <sys/module.h>
    173 #include <sys/mount.h>
    174 #include <sys/namei.h>
    175 #include <sys/syscallargs.h>
    176 #include <sys/sysctl.h>
    177 #include <sys/systm.h>
    178 #include <sys/vnode_impl.h>
    179 #include <sys/wapbl.h>
    180 #include <sys/fstrans.h>
    181 
    182 #include <uvm/uvm.h>
    183 #include <uvm/uvm_readahead.h>
    184 
    185 /* Flags to vrelel. */
    186 #define	VRELEL_ASYNC_RELE	0x0001	/* Always defer to vrele thread. */
    187 
    188 u_int			numvnodes		__cacheline_aligned;
    189 
    190 /*
    191  * There are two free lists: one is for vnodes which have no buffer/page
    192  * references and one for those which do (i.e. v_holdcnt is non-zero).
    193  * Vnode recycling mechanism first attempts to look into the former list.
    194  */
    195 static kmutex_t		vnode_free_list_lock	__cacheline_aligned;
    196 static vnodelst_t	vnode_free_list		__cacheline_aligned;
    197 static vnodelst_t	vnode_hold_list		__cacheline_aligned;
    198 static kcondvar_t	vdrain_cv		__cacheline_aligned;
    199 
    200 static vnodelst_t	vrele_list		__cacheline_aligned;
    201 static kmutex_t		vrele_lock		__cacheline_aligned;
    202 static kcondvar_t	vrele_cv		__cacheline_aligned;
    203 static lwp_t *		vrele_lwp		__cacheline_aligned;
    204 static int		vrele_pending		__cacheline_aligned;
    205 static int		vrele_gen		__cacheline_aligned;
    206 
    207 SLIST_HEAD(hashhead, vnode_impl);
    208 static struct {
    209 	kmutex_t	lock;
    210 	kcondvar_t	cv;
    211 	u_int		hashsize;
    212 	u_long		hashmask;
    213 	struct hashhead	*hashtab;
    214 	pool_cache_t	pool;
    215 }			vcache			__cacheline_aligned;
    216 
    217 static int		cleanvnode(void);
    218 static vnode_impl_t *vcache_alloc(void);
    219 static void		vcache_free(vnode_impl_t *);
    220 static void		vcache_init(void);
    221 static void		vcache_reinit(void);
    222 static void		vcache_reclaim(vnode_t *);
    223 static void		vrelel(vnode_t *, int);
    224 static void		vdrain_thread(void *);
    225 static void		vrele_thread(void *);
    226 static void		vnpanic(vnode_t *, const char *, ...)
    227     __printflike(2, 3);
    228 
    229 /* Routines having to do with the management of the vnode table. */
    230 extern struct mount	*dead_rootmount;
    231 extern int		(**dead_vnodeop_p)(void *);
    232 extern struct vfsops	dead_vfsops;
    233 
    234 /* Vnode state operations and diagnostics. */
    235 
    236 #if defined(DIAGNOSTIC)
    237 
    238 #define VSTATE_GET(vp) \
    239 	vstate_assert_get((vp), __func__, __LINE__)
    240 #define VSTATE_CHANGE(vp, from, to) \
    241 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    242 #define VSTATE_WAIT_STABLE(vp) \
    243 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    244 #define VSTATE_ASSERT(vp, state) \
    245 	vstate_assert((vp), (state), __func__, __LINE__)
    246 
    247 static void
    248 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
    249 {
    250 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    251 
    252 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    253 
    254 	if (__predict_true(node->vi_state == state))
    255 		return;
    256 	vnpanic(vp, "state is %s, expected %s at %s:%d",
    257 	    vstate_name(node->vi_state), vstate_name(state), func, line);
    258 }
    259 
    260 static enum vnode_state
    261 vstate_assert_get(vnode_t *vp, const char *func, int line)
    262 {
    263 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    264 
    265 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    266 	if (node->vi_state == VS_MARKER)
    267 		vnpanic(vp, "state is %s at %s:%d",
    268 		    vstate_name(node->vi_state), func, line);
    269 
    270 	return node->vi_state;
    271 }
    272 
    273 static void
    274 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    275 {
    276 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    277 
    278 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    279 	if (node->vi_state == VS_MARKER)
    280 		vnpanic(vp, "state is %s at %s:%d",
    281 		    vstate_name(node->vi_state), func, line);
    282 
    283 	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
    284 		cv_wait(&vp->v_cv, vp->v_interlock);
    285 
    286 	if (node->vi_state == VS_MARKER)
    287 		vnpanic(vp, "state is %s at %s:%d",
    288 		    vstate_name(node->vi_state), func, line);
    289 }
    290 
    291 static void
    292 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    293     const char *func, int line)
    294 {
    295 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    296 
    297 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    298 	if (from == VS_LOADING)
    299 		KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
    300 
    301 	if (from == VS_MARKER)
    302 		vnpanic(vp, "from is %s at %s:%d",
    303 		    vstate_name(from), func, line);
    304 	if (to == VS_MARKER)
    305 		vnpanic(vp, "to is %s at %s:%d",
    306 		    vstate_name(to), func, line);
    307 	if (node->vi_state != from)
    308 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    309 		    vstate_name(node->vi_state), vstate_name(from), func, line);
    310 
    311 	node->vi_state = to;
    312 	if (from == VS_LOADING)
    313 		cv_broadcast(&vcache.cv);
    314 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    315 		cv_broadcast(&vp->v_cv);
    316 }
    317 
    318 #else /* defined(DIAGNOSTIC) */
    319 
    320 #define VSTATE_GET(vp) \
    321 	(VNODE_TO_VIMPL((vp))->vi_state)
    322 #define VSTATE_CHANGE(vp, from, to) \
    323 	vstate_change((vp), (from), (to))
    324 #define VSTATE_WAIT_STABLE(vp) \
    325 	vstate_wait_stable((vp))
    326 #define VSTATE_ASSERT(vp, state)
    327 
    328 static void
    329 vstate_wait_stable(vnode_t *vp)
    330 {
    331 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    332 
    333 	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
    334 		cv_wait(&vp->v_cv, vp->v_interlock);
    335 }
    336 
    337 static void
    338 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    339 {
    340 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    341 
    342 	node->vi_state = to;
    343 	if (from == VS_LOADING)
    344 		cv_broadcast(&vcache.cv);
    345 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    346 		cv_broadcast(&vp->v_cv);
    347 }
    348 
    349 #endif /* defined(DIAGNOSTIC) */
    350 
    351 void
    352 vfs_vnode_sysinit(void)
    353 {
    354 	int error __diagused;
    355 
    356 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    357 	KASSERT(dead_rootmount != NULL);
    358 	dead_rootmount->mnt_iflag = IMNT_MPSAFE;
    359 
    360 	mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
    361 	TAILQ_INIT(&vnode_free_list);
    362 	TAILQ_INIT(&vnode_hold_list);
    363 	TAILQ_INIT(&vrele_list);
    364 
    365 	vcache_init();
    366 
    367 	mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
    368 	cv_init(&vdrain_cv, "vdrain");
    369 	cv_init(&vrele_cv, "vrele");
    370 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    371 	    NULL, NULL, "vdrain");
    372 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    373 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
    374 	    NULL, &vrele_lwp, "vrele");
    375 	KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
    376 }
    377 
    378 /*
    379  * Allocate a new marker vnode.
    380  */
    381 vnode_t *
    382 vnalloc_marker(struct mount *mp)
    383 {
    384 	vnode_impl_t *node;
    385 	vnode_t *vp;
    386 
    387 	node = pool_cache_get(vcache.pool, PR_WAITOK);
    388 	memset(node, 0, sizeof(*node));
    389 	vp = VIMPL_TO_VNODE(node);
    390 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    391 	vp->v_mount = mp;
    392 	vp->v_type = VBAD;
    393 	node->vi_state = VS_MARKER;
    394 
    395 	return vp;
    396 }
    397 
    398 /*
    399  * Free a marker vnode.
    400  */
    401 void
    402 vnfree_marker(vnode_t *vp)
    403 {
    404 	vnode_impl_t *node;
    405 
    406 	node = VNODE_TO_VIMPL(vp);
    407 	KASSERT(node->vi_state == VS_MARKER);
    408 	uvm_obj_destroy(&vp->v_uobj, true);
    409 	pool_cache_put(vcache.pool, node);
    410 }
    411 
    412 /*
    413  * Test a vnode for being a marker vnode.
    414  */
    415 bool
    416 vnis_marker(vnode_t *vp)
    417 {
    418 
    419 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    420 }
    421 
    422 /*
    423  * cleanvnode: grab a vnode from freelist, clean and free it.
    424  *
    425  * => Releases vnode_free_list_lock.
    426  */
    427 static int
    428 cleanvnode(void)
    429 {
    430 	vnode_t *vp;
    431 	vnode_impl_t *vi;
    432 	vnodelst_t *listhd;
    433 	struct mount *mp;
    434 
    435 	KASSERT(mutex_owned(&vnode_free_list_lock));
    436 
    437 	listhd = &vnode_free_list;
    438 try_nextlist:
    439 	TAILQ_FOREACH(vi, listhd, vi_lrulist) {
    440 		vp = VIMPL_TO_VNODE(vi);
    441 		/*
    442 		 * It's safe to test v_usecount and v_iflag
    443 		 * without holding the interlock here, since
    444 		 * these vnodes should never appear on the
    445 		 * lists.
    446 		 */
    447 		KASSERT(vp->v_usecount == 0);
    448 		KASSERT(vi->vi_lrulisthd == listhd);
    449 
    450 		if (!mutex_tryenter(vp->v_interlock))
    451 			continue;
    452 		mp = vp->v_mount;
    453 		if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
    454 			mutex_exit(vp->v_interlock);
    455 			continue;
    456 		}
    457 		break;
    458 	}
    459 
    460 	if (vi == NULL) {
    461 		if (listhd == &vnode_free_list) {
    462 			listhd = &vnode_hold_list;
    463 			goto try_nextlist;
    464 		}
    465 		mutex_exit(&vnode_free_list_lock);
    466 		return EBUSY;
    467 	}
    468 
    469 	mutex_exit(&vnode_free_list_lock);
    470 
    471 	if (vget(vp, 0, true /* wait */) == 0) {
    472 		if (!vrecycle(vp))
    473 			vrele(vp);
    474 	}
    475 	fstrans_done(mp);
    476 
    477 	return 0;
    478 }
    479 
    480 /*
    481  * Helper thread to keep the number of vnodes below desiredvnodes.
    482  */
    483 static void
    484 vdrain_thread(void *cookie)
    485 {
    486 	int error;
    487 
    488 	mutex_enter(&vnode_free_list_lock);
    489 
    490 	for (;;) {
    491 		cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
    492 		while (numvnodes > desiredvnodes) {
    493 			error = cleanvnode();
    494 			if (error)
    495 				kpause("vndsbusy", false, hz, NULL);
    496 			mutex_enter(&vnode_free_list_lock);
    497 			if (error)
    498 				break;
    499 		}
    500 	}
    501 }
    502 
    503 /*
    504  * Remove a vnode from its freelist.
    505  */
    506 void
    507 vremfree(vnode_t *vp)
    508 {
    509 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    510 
    511 	KASSERT(mutex_owned(vp->v_interlock));
    512 	KASSERT(vp->v_usecount == 0);
    513 
    514 	/*
    515 	 * Note that the reference count must not change until
    516 	 * the vnode is removed.
    517 	 */
    518 	mutex_enter(&vnode_free_list_lock);
    519 	if (vp->v_holdcnt > 0) {
    520 		KASSERT(vi->vi_lrulisthd == &vnode_hold_list);
    521 	} else {
    522 		KASSERT(vi->vi_lrulisthd == &vnode_free_list);
    523 	}
    524 	TAILQ_REMOVE(vi->vi_lrulisthd, vi, vi_lrulist);
    525 	vi->vi_lrulisthd = NULL;
    526 	mutex_exit(&vnode_free_list_lock);
    527 }
    528 
    529 /*
    530  * vget: get a particular vnode from the free list, increment its reference
    531  * count and return it.
    532  *
    533  * => Must be called with v_interlock held.
    534  *
    535  * If state is VS_RECLAIMING, the vnode may be eliminated in vcache_reclaim().
    536  * In that case, we cannot grab the vnode, so the process is awakened when
    537  * the transition is completed, and an error returned to indicate that the
    538  * vnode is no longer usable.
    539  *
    540  * If state is VS_LOADING or VS_BLOCKED, wait until the vnode enters a
    541  * stable state (VS_ACTIVE or VS_RECLAIMED).
    542  */
    543 int
    544 vget(vnode_t *vp, int flags, bool waitok)
    545 {
    546 
    547 	KASSERT(mutex_owned(vp->v_interlock));
    548 	KASSERT((flags & ~LK_NOWAIT) == 0);
    549 	KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
    550 
    551 	/*
    552 	 * Before adding a reference, we must remove the vnode
    553 	 * from its freelist.
    554 	 */
    555 	if (vp->v_usecount == 0) {
    556 		vremfree(vp);
    557 		vp->v_usecount = 1;
    558 	} else {
    559 		atomic_inc_uint(&vp->v_usecount);
    560 	}
    561 
    562 	/*
    563 	 * If the vnode is in the process of changing state we wait
    564 	 * for the change to complete and take care not to return
    565 	 * a clean vnode.
    566 	 */
    567 	if (! ISSET(flags, LK_NOWAIT))
    568 		VSTATE_WAIT_STABLE(vp);
    569 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    570 		vrelel(vp, 0);
    571 		return ENOENT;
    572 	} else if (VSTATE_GET(vp) != VS_ACTIVE) {
    573 		KASSERT(ISSET(flags, LK_NOWAIT));
    574 		vrelel(vp, 0);
    575 		return EBUSY;
    576 	}
    577 
    578 	/*
    579 	 * Ok, we got it in good shape.
    580 	 */
    581 	VSTATE_ASSERT(vp, VS_ACTIVE);
    582 	mutex_exit(vp->v_interlock);
    583 
    584 	return 0;
    585 }
    586 
    587 /*
    588  * vput: unlock and release the reference.
    589  */
    590 void
    591 vput(vnode_t *vp)
    592 {
    593 
    594 	VOP_UNLOCK(vp);
    595 	vrele(vp);
    596 }
    597 
    598 /*
    599  * Try to drop reference on a vnode.  Abort if we are releasing the
    600  * last reference.  Note: this _must_ succeed if not the last reference.
    601  */
    602 static inline bool
    603 vtryrele(vnode_t *vp)
    604 {
    605 	u_int use, next;
    606 
    607 	for (use = vp->v_usecount;; use = next) {
    608 		if (use == 1) {
    609 			return false;
    610 		}
    611 		KASSERT(use > 1);
    612 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    613 		if (__predict_true(next == use)) {
    614 			return true;
    615 		}
    616 	}
    617 }
    618 
    619 /*
    620  * Vnode release.  If reference count drops to zero, call inactive
    621  * routine and either return to freelist or free to the pool.
    622  */
    623 static void
    624 vrelel(vnode_t *vp, int flags)
    625 {
    626 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    627 	bool recycle, defer;
    628 	int error;
    629 
    630 	KASSERT(mutex_owned(vp->v_interlock));
    631 	KASSERT(vi->vi_lrulisthd == NULL);
    632 
    633 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    634 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    635 		vnpanic(vp, "dead but not clean");
    636 	}
    637 
    638 	/*
    639 	 * If not the last reference, just drop the reference count
    640 	 * and unlock.
    641 	 */
    642 	if (vtryrele(vp)) {
    643 		mutex_exit(vp->v_interlock);
    644 		return;
    645 	}
    646 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    647 		vnpanic(vp, "%s: bad ref count", __func__);
    648 	}
    649 
    650 #ifdef DIAGNOSTIC
    651 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    652 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    653 		vprint("vrelel: missing VOP_CLOSE()", vp);
    654 	}
    655 #endif
    656 
    657 	/*
    658 	 * If not clean, deactivate the vnode, but preserve
    659 	 * our reference across the call to VOP_INACTIVE().
    660 	 */
    661 	if (VSTATE_GET(vp) != VS_RECLAIMED) {
    662 		recycle = false;
    663 
    664 		/*
    665 		 * XXX This ugly block can be largely eliminated if
    666 		 * locking is pushed down into the file systems.
    667 		 *
    668 		 * Defer vnode release to vrele_thread if caller
    669 		 * requests it explicitly or is the pagedaemon.
    670 		 */
    671 		if ((curlwp == uvm.pagedaemon_lwp) ||
    672 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    673 			defer = true;
    674 		} else if (curlwp == vrele_lwp) {
    675 			/*
    676 			 * We have to try harder.
    677 			 */
    678 			mutex_exit(vp->v_interlock);
    679 			error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    680 			KASSERTMSG((error == 0), "vn_lock failed: %d", error);
    681 			mutex_enter(vp->v_interlock);
    682 			defer = false;
    683 		} else {
    684 			/* If we can't acquire the lock, then defer. */
    685 			mutex_exit(vp->v_interlock);
    686 			error = vn_lock(vp,
    687 			    LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
    688 			defer = (error != 0);
    689 			mutex_enter(vp->v_interlock);
    690 		}
    691 
    692 		KASSERT(mutex_owned(vp->v_interlock));
    693 		KASSERT(! (curlwp == vrele_lwp && defer));
    694 
    695 		if (defer) {
    696 			/*
    697 			 * Defer reclaim to the kthread; it's not safe to
    698 			 * clean it here.  We donate it our last reference.
    699 			 */
    700 			mutex_enter(&vrele_lock);
    701 			TAILQ_INSERT_TAIL(&vrele_list, vi, vi_lrulist);
    702 			if (++vrele_pending > (desiredvnodes >> 8))
    703 				cv_signal(&vrele_cv);
    704 			mutex_exit(&vrele_lock);
    705 			mutex_exit(vp->v_interlock);
    706 			return;
    707 		}
    708 
    709 		/*
    710 		 * If the node got another reference while we
    711 		 * released the interlock, don't try to inactivate it yet.
    712 		 */
    713 		if (__predict_false(vtryrele(vp))) {
    714 			VOP_UNLOCK(vp);
    715 			mutex_exit(vp->v_interlock);
    716 			return;
    717 		}
    718 		VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    719 		mutex_exit(vp->v_interlock);
    720 
    721 		/*
    722 		 * The vnode must not gain another reference while being
    723 		 * deactivated.  If VOP_INACTIVE() indicates that
    724 		 * the described file has been deleted, then recycle
    725 		 * the vnode.
    726 		 *
    727 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    728 		 */
    729 		VOP_INACTIVE(vp, &recycle);
    730 		if (recycle) {
    731 			/* vcache_reclaim() below will drop the lock. */
    732 			if (vn_lock(vp, LK_EXCLUSIVE) != 0)
    733 				recycle = false;
    734 		}
    735 		mutex_enter(vp->v_interlock);
    736 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    737 		if (!recycle) {
    738 			if (vtryrele(vp)) {
    739 				mutex_exit(vp->v_interlock);
    740 				return;
    741 			}
    742 		}
    743 
    744 		/* Take care of space accounting. */
    745 		if (vp->v_iflag & VI_EXECMAP) {
    746 			atomic_add_int(&uvmexp.execpages,
    747 			    -vp->v_uobj.uo_npages);
    748 			atomic_add_int(&uvmexp.filepages,
    749 			    vp->v_uobj.uo_npages);
    750 		}
    751 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    752 		vp->v_vflag &= ~VV_MAPPED;
    753 
    754 		/*
    755 		 * Recycle the vnode if the file is now unused (unlinked),
    756 		 * otherwise just free it.
    757 		 */
    758 		if (recycle) {
    759 			VSTATE_ASSERT(vp, VS_ACTIVE);
    760 			vcache_reclaim(vp);
    761 		}
    762 		KASSERT(vp->v_usecount > 0);
    763 	}
    764 
    765 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    766 		/* Gained another reference while being reclaimed. */
    767 		mutex_exit(vp->v_interlock);
    768 		return;
    769 	}
    770 
    771 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    772 		/*
    773 		 * It's clean so destroy it.  It isn't referenced
    774 		 * anywhere since it has been reclaimed.
    775 		 */
    776 		KASSERT(vp->v_holdcnt == 0);
    777 		KASSERT(vp->v_writecount == 0);
    778 		mutex_exit(vp->v_interlock);
    779 		vfs_insmntque(vp, NULL);
    780 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    781 			spec_node_destroy(vp);
    782 		}
    783 		vcache_free(VNODE_TO_VIMPL(vp));
    784 	} else {
    785 		/*
    786 		 * Otherwise, put it back onto the freelist.  It
    787 		 * can't be destroyed while still associated with
    788 		 * a file system.
    789 		 */
    790 		mutex_enter(&vnode_free_list_lock);
    791 		if (vp->v_holdcnt > 0) {
    792 			vi->vi_lrulisthd = &vnode_hold_list;
    793 		} else {
    794 			vi->vi_lrulisthd = &vnode_free_list;
    795 		}
    796 		TAILQ_INSERT_TAIL(vi->vi_lrulisthd, vi, vi_lrulist);
    797 		mutex_exit(&vnode_free_list_lock);
    798 		mutex_exit(vp->v_interlock);
    799 	}
    800 }
    801 
    802 void
    803 vrele(vnode_t *vp)
    804 {
    805 
    806 	if (vtryrele(vp)) {
    807 		return;
    808 	}
    809 	mutex_enter(vp->v_interlock);
    810 	vrelel(vp, 0);
    811 }
    812 
    813 /*
    814  * Asynchronous vnode release, vnode is released in different context.
    815  */
    816 void
    817 vrele_async(vnode_t *vp)
    818 {
    819 
    820 	if (vtryrele(vp)) {
    821 		return;
    822 	}
    823 	mutex_enter(vp->v_interlock);
    824 	vrelel(vp, VRELEL_ASYNC_RELE);
    825 }
    826 
    827 static void
    828 vrele_thread(void *cookie)
    829 {
    830 	vnodelst_t skip_list;
    831 	vnode_t *vp;
    832 	vnode_impl_t *vi;
    833 	struct mount *mp;
    834 
    835 	TAILQ_INIT(&skip_list);
    836 
    837 	mutex_enter(&vrele_lock);
    838 	for (;;) {
    839 		while (TAILQ_EMPTY(&vrele_list)) {
    840 			vrele_gen++;
    841 			cv_broadcast(&vrele_cv);
    842 			cv_timedwait(&vrele_cv, &vrele_lock, hz);
    843 			TAILQ_CONCAT(&vrele_list, &skip_list, vi_lrulist);
    844 		}
    845 		vi = TAILQ_FIRST(&vrele_list);
    846 		vp = VIMPL_TO_VNODE(vi);
    847 		mp = vp->v_mount;
    848 		TAILQ_REMOVE(&vrele_list, vi, vi_lrulist);
    849 		if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
    850 			TAILQ_INSERT_TAIL(&skip_list, vi, vi_lrulist);
    851 			continue;
    852 		}
    853 		vrele_pending--;
    854 		mutex_exit(&vrele_lock);
    855 
    856 		/*
    857 		 * If not the last reference, then ignore the vnode
    858 		 * and look for more work.
    859 		 */
    860 		mutex_enter(vp->v_interlock);
    861 		vrelel(vp, 0);
    862 		fstrans_done(mp);
    863 		mutex_enter(&vrele_lock);
    864 	}
    865 }
    866 
    867 /*
    868  * Vnode reference, where a reference is already held by some other
    869  * object (for example, a file structure).
    870  */
    871 void
    872 vref(vnode_t *vp)
    873 {
    874 
    875 	KASSERT(vp->v_usecount != 0);
    876 
    877 	atomic_inc_uint(&vp->v_usecount);
    878 }
    879 
    880 /*
    881  * Page or buffer structure gets a reference.
    882  * Called with v_interlock held.
    883  */
    884 void
    885 vholdl(vnode_t *vp)
    886 {
    887 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    888 
    889 	KASSERT(mutex_owned(vp->v_interlock));
    890 
    891 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
    892 		mutex_enter(&vnode_free_list_lock);
    893 		KASSERT(vi->vi_lrulisthd == &vnode_free_list);
    894 		TAILQ_REMOVE(vi->vi_lrulisthd, vi, vi_lrulist);
    895 		vi->vi_lrulisthd = &vnode_hold_list;
    896 		TAILQ_INSERT_TAIL(vi->vi_lrulisthd, vi, vi_lrulist);
    897 		mutex_exit(&vnode_free_list_lock);
    898 	}
    899 }
    900 
    901 /*
    902  * Page or buffer structure frees a reference.
    903  * Called with v_interlock held.
    904  */
    905 void
    906 holdrelel(vnode_t *vp)
    907 {
    908 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    909 
    910 	KASSERT(mutex_owned(vp->v_interlock));
    911 
    912 	if (vp->v_holdcnt <= 0) {
    913 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
    914 	}
    915 
    916 	vp->v_holdcnt--;
    917 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
    918 		mutex_enter(&vnode_free_list_lock);
    919 		KASSERT(vi->vi_lrulisthd == &vnode_hold_list);
    920 		TAILQ_REMOVE(vi->vi_lrulisthd, vi, vi_lrulist);
    921 		vi->vi_lrulisthd = &vnode_free_list;
    922 		TAILQ_INSERT_TAIL(vi->vi_lrulisthd, vi, vi_lrulist);
    923 		mutex_exit(&vnode_free_list_lock);
    924 	}
    925 }
    926 
    927 /*
    928  * Recycle an unused vnode if caller holds the last reference.
    929  */
    930 bool
    931 vrecycle(vnode_t *vp)
    932 {
    933 	int error __diagused;
    934 
    935 	mutex_enter(vp->v_interlock);
    936 
    937 	/* Make sure we hold the last reference. */
    938 	VSTATE_WAIT_STABLE(vp);
    939 	if (vp->v_usecount != 1) {
    940 		mutex_exit(vp->v_interlock);
    941 		return false;
    942 	}
    943 
    944 	/* If the vnode is already clean we're done. */
    945 	if (VSTATE_GET(vp) != VS_ACTIVE) {
    946 		VSTATE_ASSERT(vp, VS_RECLAIMED);
    947 		vrelel(vp, 0);
    948 		return true;
    949 	}
    950 
    951 	/* Prevent further references until the vnode is locked. */
    952 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    953 	mutex_exit(vp->v_interlock);
    954 
    955 	error = vn_lock(vp, LK_EXCLUSIVE);
    956 	KASSERT(error == 0);
    957 
    958 	mutex_enter(vp->v_interlock);
    959 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    960 
    961 	vcache_reclaim(vp);
    962 	vrelel(vp, 0);
    963 
    964 	return true;
    965 }
    966 
    967 /*
    968  * Eliminate all activity associated with the requested vnode
    969  * and with all vnodes aliased to the requested vnode.
    970  */
    971 void
    972 vrevoke(vnode_t *vp)
    973 {
    974 	vnode_t *vq;
    975 	enum vtype type;
    976 	dev_t dev;
    977 
    978 	KASSERT(vp->v_usecount > 0);
    979 
    980 	mutex_enter(vp->v_interlock);
    981 	VSTATE_WAIT_STABLE(vp);
    982 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    983 		mutex_exit(vp->v_interlock);
    984 		return;
    985 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
    986 		atomic_inc_uint(&vp->v_usecount);
    987 		mutex_exit(vp->v_interlock);
    988 		vgone(vp);
    989 		return;
    990 	} else {
    991 		dev = vp->v_rdev;
    992 		type = vp->v_type;
    993 		mutex_exit(vp->v_interlock);
    994 	}
    995 
    996 	while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
    997 		vgone(vq);
    998 	}
    999 }
   1000 
   1001 /*
   1002  * Eliminate all activity associated with a vnode in preparation for
   1003  * reuse.  Drops a reference from the vnode.
   1004  */
   1005 void
   1006 vgone(vnode_t *vp)
   1007 {
   1008 
   1009 	if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
   1010 		VSTATE_ASSERT(vp, VS_RECLAIMED);
   1011 		vrele(vp);
   1012 	}
   1013 
   1014 	mutex_enter(vp->v_interlock);
   1015 	vcache_reclaim(vp);
   1016 	vrelel(vp, 0);
   1017 }
   1018 
   1019 static inline uint32_t
   1020 vcache_hash(const struct vcache_key *key)
   1021 {
   1022 	uint32_t hash = HASH32_BUF_INIT;
   1023 
   1024 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1025 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1026 	return hash;
   1027 }
   1028 
   1029 static void
   1030 vcache_init(void)
   1031 {
   1032 
   1033 	vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
   1034 	    "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1035 	KASSERT(vcache.pool != NULL);
   1036 	mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
   1037 	cv_init(&vcache.cv, "vcache");
   1038 	vcache.hashsize = desiredvnodes;
   1039 	vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1040 	    &vcache.hashmask);
   1041 }
   1042 
   1043 static void
   1044 vcache_reinit(void)
   1045 {
   1046 	int i;
   1047 	uint32_t hash;
   1048 	u_long oldmask, newmask;
   1049 	struct hashhead *oldtab, *newtab;
   1050 	vnode_impl_t *node;
   1051 
   1052 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1053 	mutex_enter(&vcache.lock);
   1054 	oldtab = vcache.hashtab;
   1055 	oldmask = vcache.hashmask;
   1056 	vcache.hashsize = desiredvnodes;
   1057 	vcache.hashtab = newtab;
   1058 	vcache.hashmask = newmask;
   1059 	for (i = 0; i <= oldmask; i++) {
   1060 		while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
   1061 			SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
   1062 			hash = vcache_hash(&node->vi_key);
   1063 			SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
   1064 			    node, vi_hash);
   1065 		}
   1066 	}
   1067 	mutex_exit(&vcache.lock);
   1068 	hashdone(oldtab, HASH_SLIST, oldmask);
   1069 }
   1070 
   1071 static inline vnode_impl_t *
   1072 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1073 {
   1074 	struct hashhead *hashp;
   1075 	vnode_impl_t *node;
   1076 
   1077 	KASSERT(mutex_owned(&vcache.lock));
   1078 
   1079 	hashp = &vcache.hashtab[hash & vcache.hashmask];
   1080 	SLIST_FOREACH(node, hashp, vi_hash) {
   1081 		if (key->vk_mount != node->vi_key.vk_mount)
   1082 			continue;
   1083 		if (key->vk_key_len != node->vi_key.vk_key_len)
   1084 			continue;
   1085 		if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
   1086 			continue;
   1087 		return node;
   1088 	}
   1089 	return NULL;
   1090 }
   1091 
   1092 /*
   1093  * Allocate a new, uninitialized vcache node.
   1094  */
   1095 static vnode_impl_t *
   1096 vcache_alloc(void)
   1097 {
   1098 	vnode_impl_t *node;
   1099 	vnode_t *vp;
   1100 
   1101 	node = pool_cache_get(vcache.pool, PR_WAITOK);
   1102 	memset(node, 0, sizeof(*node));
   1103 
   1104 	/* SLIST_INIT(&node->vi_hash); */
   1105 
   1106 	vp = VIMPL_TO_VNODE(node);
   1107 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
   1108 	cv_init(&vp->v_cv, "vnode");
   1109 	/* LIST_INIT(&vp->v_nclist); */
   1110 	/* LIST_INIT(&vp->v_dnclist); */
   1111 
   1112 	mutex_enter(&vnode_free_list_lock);
   1113 	numvnodes++;
   1114 	if (numvnodes > desiredvnodes + desiredvnodes / 10)
   1115 		cv_signal(&vdrain_cv);
   1116 	mutex_exit(&vnode_free_list_lock);
   1117 
   1118 	rw_init(&vp->v_lock);
   1119 	vp->v_usecount = 1;
   1120 	vp->v_type = VNON;
   1121 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1122 
   1123 	node->vi_state = VS_LOADING;
   1124 
   1125 	return node;
   1126 }
   1127 
   1128 /*
   1129  * Free an unused, unreferenced vcache node.
   1130  */
   1131 static void
   1132 vcache_free(vnode_impl_t *node)
   1133 {
   1134 	vnode_t *vp;
   1135 
   1136 	vp = VIMPL_TO_VNODE(node);
   1137 
   1138 	KASSERT(vp->v_usecount == 0);
   1139 
   1140 	rw_destroy(&vp->v_lock);
   1141 	mutex_enter(&vnode_free_list_lock);
   1142 	numvnodes--;
   1143 	mutex_exit(&vnode_free_list_lock);
   1144 
   1145 	uvm_obj_destroy(&vp->v_uobj, true);
   1146 	cv_destroy(&vp->v_cv);
   1147 	pool_cache_put(vcache.pool, node);
   1148 }
   1149 
   1150 /*
   1151  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1152  */
   1153 int
   1154 vcache_get(struct mount *mp, const void *key, size_t key_len,
   1155     struct vnode **vpp)
   1156 {
   1157 	int error;
   1158 	uint32_t hash;
   1159 	const void *new_key;
   1160 	struct vnode *vp;
   1161 	struct vcache_key vcache_key;
   1162 	vnode_impl_t *node, *new_node;
   1163 
   1164 	new_key = NULL;
   1165 	*vpp = NULL;
   1166 
   1167 	vcache_key.vk_mount = mp;
   1168 	vcache_key.vk_key = key;
   1169 	vcache_key.vk_key_len = key_len;
   1170 	hash = vcache_hash(&vcache_key);
   1171 
   1172 again:
   1173 	mutex_enter(&vcache.lock);
   1174 	node = vcache_hash_lookup(&vcache_key, hash);
   1175 
   1176 	/* If found, take a reference or retry. */
   1177 	if (__predict_true(node != NULL)) {
   1178 		/*
   1179 		 * If the vnode is loading we cannot take the v_interlock
   1180 		 * here as it might change during load (see uvm_obj_setlock()).
   1181 		 * As changing state from VS_LOADING requires both vcache.lock
   1182 		 * and v_interlock it is safe to test with vcache.lock held.
   1183 		 *
   1184 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1185 		 */
   1186 		if (__predict_false(node->vi_state == VS_LOADING)) {
   1187 			cv_wait(&vcache.cv, &vcache.lock);
   1188 			mutex_exit(&vcache.lock);
   1189 			goto again;
   1190 		}
   1191 		vp = VIMPL_TO_VNODE(node);
   1192 		mutex_enter(vp->v_interlock);
   1193 		mutex_exit(&vcache.lock);
   1194 		error = vget(vp, 0, true /* wait */);
   1195 		if (error == ENOENT)
   1196 			goto again;
   1197 		if (error == 0)
   1198 			*vpp = vp;
   1199 		KASSERT((error != 0) == (*vpp == NULL));
   1200 		return error;
   1201 	}
   1202 	mutex_exit(&vcache.lock);
   1203 
   1204 	/* Allocate and initialize a new vcache / vnode pair. */
   1205 	error = vfs_busy(mp, NULL);
   1206 	if (error)
   1207 		return error;
   1208 	new_node = vcache_alloc();
   1209 	new_node->vi_key = vcache_key;
   1210 	vp = VIMPL_TO_VNODE(new_node);
   1211 	mutex_enter(&vcache.lock);
   1212 	node = vcache_hash_lookup(&vcache_key, hash);
   1213 	if (node == NULL) {
   1214 		SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
   1215 		    new_node, vi_hash);
   1216 		node = new_node;
   1217 	}
   1218 
   1219 	/* If another thread beat us inserting this node, retry. */
   1220 	if (node != new_node) {
   1221 		mutex_enter(vp->v_interlock);
   1222 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1223 		mutex_exit(&vcache.lock);
   1224 		vrelel(vp, 0);
   1225 		vfs_unbusy(mp, false, NULL);
   1226 		goto again;
   1227 	}
   1228 	mutex_exit(&vcache.lock);
   1229 
   1230 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1231 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1232 	if (error) {
   1233 		mutex_enter(&vcache.lock);
   1234 		SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
   1235 		    new_node, vnode_impl, vi_hash);
   1236 		mutex_enter(vp->v_interlock);
   1237 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1238 		mutex_exit(&vcache.lock);
   1239 		vrelel(vp, 0);
   1240 		vfs_unbusy(mp, false, NULL);
   1241 		KASSERT(*vpp == NULL);
   1242 		return error;
   1243 	}
   1244 	KASSERT(new_key != NULL);
   1245 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1246 	KASSERT(vp->v_op != NULL);
   1247 	vfs_insmntque(vp, mp);
   1248 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1249 		vp->v_vflag |= VV_MPSAFE;
   1250 	vfs_unbusy(mp, true, NULL);
   1251 
   1252 	/* Finished loading, finalize node. */
   1253 	mutex_enter(&vcache.lock);
   1254 	new_node->vi_key.vk_key = new_key;
   1255 	mutex_enter(vp->v_interlock);
   1256 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1257 	mutex_exit(vp->v_interlock);
   1258 	mutex_exit(&vcache.lock);
   1259 	*vpp = vp;
   1260 	return 0;
   1261 }
   1262 
   1263 /*
   1264  * Create a new vnode / fs node pair and return it referenced through vpp.
   1265  */
   1266 int
   1267 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1268     kauth_cred_t cred, struct vnode **vpp)
   1269 {
   1270 	int error;
   1271 	uint32_t hash;
   1272 	struct vnode *ovp, *vp;
   1273 	vnode_impl_t *new_node;
   1274 	vnode_impl_t *old_node __diagused;
   1275 
   1276 	*vpp = NULL;
   1277 
   1278 	/* Allocate and initialize a new vcache / vnode pair. */
   1279 	error = vfs_busy(mp, NULL);
   1280 	if (error)
   1281 		return error;
   1282 	new_node = vcache_alloc();
   1283 	new_node->vi_key.vk_mount = mp;
   1284 	vp = VIMPL_TO_VNODE(new_node);
   1285 
   1286 	/* Create and load the fs node. */
   1287 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
   1288 	    &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
   1289 	if (error) {
   1290 		mutex_enter(&vcache.lock);
   1291 		mutex_enter(vp->v_interlock);
   1292 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1293 		mutex_exit(&vcache.lock);
   1294 		vrelel(vp, 0);
   1295 		vfs_unbusy(mp, false, NULL);
   1296 		KASSERT(*vpp == NULL);
   1297 		return error;
   1298 	}
   1299 	KASSERT(new_node->vi_key.vk_key != NULL);
   1300 	KASSERT(vp->v_op != NULL);
   1301 	hash = vcache_hash(&new_node->vi_key);
   1302 
   1303 	/* Wait for previous instance to be reclaimed, then insert new node. */
   1304 	mutex_enter(&vcache.lock);
   1305 	while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
   1306 		ovp = VIMPL_TO_VNODE(old_node);
   1307 		mutex_enter(ovp->v_interlock);
   1308 		mutex_exit(&vcache.lock);
   1309 		error = vget(ovp, 0, true /* wait */);
   1310 		KASSERT(error == ENOENT);
   1311 		mutex_enter(&vcache.lock);
   1312 	}
   1313 	SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
   1314 	    new_node, vi_hash);
   1315 	mutex_exit(&vcache.lock);
   1316 	vfs_insmntque(vp, mp);
   1317 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1318 		vp->v_vflag |= VV_MPSAFE;
   1319 	vfs_unbusy(mp, true, NULL);
   1320 
   1321 	/* Finished loading, finalize node. */
   1322 	mutex_enter(&vcache.lock);
   1323 	mutex_enter(vp->v_interlock);
   1324 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1325 	mutex_exit(&vcache.lock);
   1326 	mutex_exit(vp->v_interlock);
   1327 	*vpp = vp;
   1328 	return 0;
   1329 }
   1330 
   1331 /*
   1332  * Prepare key change: lock old and new cache node.
   1333  * Return an error if the new node already exists.
   1334  */
   1335 int
   1336 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1337     const void *old_key, size_t old_key_len,
   1338     const void *new_key, size_t new_key_len)
   1339 {
   1340 	uint32_t old_hash, new_hash;
   1341 	struct vcache_key old_vcache_key, new_vcache_key;
   1342 	vnode_impl_t *node, *new_node;
   1343 	struct vnode *tvp;
   1344 
   1345 	old_vcache_key.vk_mount = mp;
   1346 	old_vcache_key.vk_key = old_key;
   1347 	old_vcache_key.vk_key_len = old_key_len;
   1348 	old_hash = vcache_hash(&old_vcache_key);
   1349 
   1350 	new_vcache_key.vk_mount = mp;
   1351 	new_vcache_key.vk_key = new_key;
   1352 	new_vcache_key.vk_key_len = new_key_len;
   1353 	new_hash = vcache_hash(&new_vcache_key);
   1354 
   1355 	new_node = vcache_alloc();
   1356 	new_node->vi_key = new_vcache_key;
   1357 	tvp = VIMPL_TO_VNODE(new_node);
   1358 
   1359 	/* Insert locked new node used as placeholder. */
   1360 	mutex_enter(&vcache.lock);
   1361 	node = vcache_hash_lookup(&new_vcache_key, new_hash);
   1362 	if (node != NULL) {
   1363 		mutex_enter(tvp->v_interlock);
   1364 		VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
   1365 		mutex_exit(&vcache.lock);
   1366 		vrelel(tvp, 0);
   1367 		return EEXIST;
   1368 	}
   1369 	SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
   1370 	    new_node, vi_hash);
   1371 
   1372 	/* Lock old node. */
   1373 	node = vcache_hash_lookup(&old_vcache_key, old_hash);
   1374 	KASSERT(node != NULL);
   1375 	KASSERT(VIMPL_TO_VNODE(node) == vp);
   1376 	mutex_enter(vp->v_interlock);
   1377 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
   1378 	node->vi_key = old_vcache_key;
   1379 	mutex_exit(vp->v_interlock);
   1380 	mutex_exit(&vcache.lock);
   1381 	return 0;
   1382 }
   1383 
   1384 /*
   1385  * Key change complete: remove old node and unlock new node.
   1386  */
   1387 void
   1388 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1389     const void *old_key, size_t old_key_len,
   1390     const void *new_key, size_t new_key_len)
   1391 {
   1392 	uint32_t old_hash, new_hash;
   1393 	struct vcache_key old_vcache_key, new_vcache_key;
   1394 	vnode_impl_t *old_node, *new_node;
   1395 	struct vnode *tvp;
   1396 
   1397 	old_vcache_key.vk_mount = mp;
   1398 	old_vcache_key.vk_key = old_key;
   1399 	old_vcache_key.vk_key_len = old_key_len;
   1400 	old_hash = vcache_hash(&old_vcache_key);
   1401 
   1402 	new_vcache_key.vk_mount = mp;
   1403 	new_vcache_key.vk_key = new_key;
   1404 	new_vcache_key.vk_key_len = new_key_len;
   1405 	new_hash = vcache_hash(&new_vcache_key);
   1406 
   1407 	mutex_enter(&vcache.lock);
   1408 
   1409 	/* Lookup old and new node. */
   1410 	old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
   1411 	KASSERT(old_node != NULL);
   1412 	KASSERT(VIMPL_TO_VNODE(old_node) == vp);
   1413 	mutex_enter(vp->v_interlock);
   1414 	VSTATE_ASSERT(vp, VS_BLOCKED);
   1415 
   1416 	new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
   1417 	KASSERT(new_node != NULL);
   1418 	KASSERT(new_node->vi_key.vk_key_len == new_key_len);
   1419 	tvp = VIMPL_TO_VNODE(new_node);
   1420 	mutex_enter(tvp->v_interlock);
   1421 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
   1422 
   1423 	/* Rekey old node and put it onto its new hashlist. */
   1424 	old_node->vi_key = new_vcache_key;
   1425 	if (old_hash != new_hash) {
   1426 		SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
   1427 		    old_node, vnode_impl, vi_hash);
   1428 		SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
   1429 		    old_node, vi_hash);
   1430 	}
   1431 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
   1432 	mutex_exit(vp->v_interlock);
   1433 
   1434 	/* Remove new node used as placeholder. */
   1435 	SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
   1436 	    new_node, vnode_impl, vi_hash);
   1437 	VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
   1438 	mutex_exit(&vcache.lock);
   1439 	vrelel(tvp, 0);
   1440 }
   1441 
   1442 /*
   1443  * Disassociate the underlying file system from a vnode.
   1444  *
   1445  * Must be called with vnode locked and will return unlocked.
   1446  * Must be called with the interlock held, and will return with it held.
   1447  */
   1448 static void
   1449 vcache_reclaim(vnode_t *vp)
   1450 {
   1451 	lwp_t *l = curlwp;
   1452 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
   1453 	uint32_t hash;
   1454 	uint8_t temp_buf[64], *temp_key;
   1455 	size_t temp_key_len;
   1456 	bool recycle, active;
   1457 	int error;
   1458 
   1459 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1460 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1461 	KASSERT(mutex_owned(vp->v_interlock));
   1462 	KASSERT(vp->v_usecount != 0);
   1463 
   1464 	active = (vp->v_usecount > 1);
   1465 	temp_key_len = node->vi_key.vk_key_len;
   1466 	/*
   1467 	 * Prevent the vnode from being recycled or brought into use
   1468 	 * while we clean it out.
   1469 	 */
   1470 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
   1471 	if (vp->v_iflag & VI_EXECMAP) {
   1472 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
   1473 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
   1474 	}
   1475 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1476 	mutex_exit(vp->v_interlock);
   1477 
   1478 	/* Replace the vnode key with a temporary copy. */
   1479 	if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
   1480 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1481 	} else {
   1482 		temp_key = temp_buf;
   1483 	}
   1484 	mutex_enter(&vcache.lock);
   1485 	memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
   1486 	node->vi_key.vk_key = temp_key;
   1487 	mutex_exit(&vcache.lock);
   1488 
   1489 	/*
   1490 	 * Clean out any cached data associated with the vnode.
   1491 	 * If purging an active vnode, it must be closed and
   1492 	 * deactivated before being reclaimed.
   1493 	 */
   1494 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1495 	if (error != 0) {
   1496 		if (wapbl_vphaswapbl(vp))
   1497 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1498 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1499 	}
   1500 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1501 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1502 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1503 		 spec_node_revoke(vp);
   1504 	}
   1505 
   1506 	/*
   1507 	 * Disassociate the underlying file system from the vnode.
   1508 	 * Note that the VOP_INACTIVE will unlock the vnode.
   1509 	 */
   1510 	VOP_INACTIVE(vp, &recycle);
   1511 	if (VOP_RECLAIM(vp)) {
   1512 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1513 	}
   1514 
   1515 	KASSERT(vp->v_data == NULL);
   1516 	KASSERT(vp->v_uobj.uo_npages == 0);
   1517 
   1518 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1519 		uvm_ra_freectx(vp->v_ractx);
   1520 		vp->v_ractx = NULL;
   1521 	}
   1522 
   1523 	/* Purge name cache. */
   1524 	cache_purge(vp);
   1525 
   1526 	/* Move to dead mount. */
   1527 	vp->v_vflag &= ~VV_ROOT;
   1528 	atomic_inc_uint(&dead_rootmount->mnt_refcnt);
   1529 	vfs_insmntque(vp, dead_rootmount);
   1530 
   1531 	/* Remove from vnode cache. */
   1532 	hash = vcache_hash(&node->vi_key);
   1533 	mutex_enter(&vcache.lock);
   1534 	KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
   1535 	SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
   1536 	    node, vnode_impl, vi_hash);
   1537 	mutex_exit(&vcache.lock);
   1538 	if (temp_key != temp_buf)
   1539 		kmem_free(temp_key, temp_key_len);
   1540 
   1541 	/* Done with purge, notify sleepers of the grim news. */
   1542 	mutex_enter(vp->v_interlock);
   1543 	vp->v_op = dead_vnodeop_p;
   1544 	vp->v_vflag |= VV_LOCKSWORK;
   1545 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1546 	vp->v_tag = VT_NON;
   1547 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1548 
   1549 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1550 }
   1551 
   1552 /*
   1553  * Update outstanding I/O count and do wakeup if requested.
   1554  */
   1555 void
   1556 vwakeup(struct buf *bp)
   1557 {
   1558 	vnode_t *vp;
   1559 
   1560 	if ((vp = bp->b_vp) == NULL)
   1561 		return;
   1562 
   1563 	KASSERT(bp->b_objlock == vp->v_interlock);
   1564 	KASSERT(mutex_owned(bp->b_objlock));
   1565 
   1566 	if (--vp->v_numoutput < 0)
   1567 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1568 	if (vp->v_numoutput == 0)
   1569 		cv_broadcast(&vp->v_cv);
   1570 }
   1571 
   1572 /*
   1573  * Test a vnode for being or becoming dead.  Returns one of:
   1574  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1575  * ENOENT: vnode is dead.
   1576  * 0:      otherwise.
   1577  *
   1578  * Whenever this function returns a non-zero value all future
   1579  * calls will also return a non-zero value.
   1580  */
   1581 int
   1582 vdead_check(struct vnode *vp, int flags)
   1583 {
   1584 
   1585 	KASSERT(mutex_owned(vp->v_interlock));
   1586 
   1587 	if (! ISSET(flags, VDEAD_NOWAIT))
   1588 		VSTATE_WAIT_STABLE(vp);
   1589 
   1590 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1591 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1592 		return EBUSY;
   1593 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1594 		return ENOENT;
   1595 	}
   1596 
   1597 	return 0;
   1598 }
   1599 
   1600 int
   1601 vfs_drainvnodes(void)
   1602 {
   1603 	int error, gen;
   1604 
   1605 	mutex_enter(&vrele_lock);
   1606 	gen = vrele_gen;
   1607 	while (vrele_pending && gen == vrele_gen) {
   1608 		cv_broadcast(&vrele_cv);
   1609 		cv_wait(&vrele_cv, &vrele_lock);
   1610 	}
   1611 	mutex_exit(&vrele_lock);
   1612 
   1613 	mutex_enter(&vnode_free_list_lock);
   1614 
   1615 	while (numvnodes > desiredvnodes) {
   1616 		error = cleanvnode();
   1617 		if (error != 0)
   1618 			return error;
   1619 		mutex_enter(&vnode_free_list_lock);
   1620 	}
   1621 
   1622 	mutex_exit(&vnode_free_list_lock);
   1623 
   1624 	if (vcache.hashsize != desiredvnodes)
   1625 		vcache_reinit();
   1626 
   1627 	return 0;
   1628 }
   1629 
   1630 void
   1631 vnpanic(vnode_t *vp, const char *fmt, ...)
   1632 {
   1633 	va_list ap;
   1634 
   1635 #ifdef DIAGNOSTIC
   1636 	vprint(NULL, vp);
   1637 #endif
   1638 	va_start(ap, fmt);
   1639 	vpanic(fmt, ap);
   1640 	va_end(ap);
   1641 }
   1642