Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.68
      1 /*	$NetBSD: vfs_vnode.c,v 1.68 2017/01/02 10:36:58 hannken Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * The vnode cache subsystem.
     71  *
     72  * Life-cycle
     73  *
     74  *	Normally, there are two points where new vnodes are created:
     75  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76  *	starts in one of the following ways:
     77  *
     78  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80  *
     81  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  *	was another, traditional way.  Currently, only the draining thread
     83  *	recycles the vnodes.  This behaviour might be revisited.
     84  *
     85  *	The life-cycle ends when the last reference is dropped, usually
     86  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87  *	the file system that vnode is inactive.  Via this call, file system
     88  *	indicates whether vnode can be recycled (usually, it checks its own
     89  *	references, e.g. count of links, whether the file was removed).
     90  *
     91  *	Depending on indication, vnode can be put into a free list (cache),
     92  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93  *	disassociate underlying file system from the vnode, and finally
     94  *	destroyed.
     95  *
     96  * Vnode state
     97  *
     98  *	Vnode is always in one of six states:
     99  *	- MARKER	This is a marker vnode to help list traversal.  It
    100  *			will never change its state.
    101  *	- LOADING	Vnode is associating underlying file system and not
    102  *			yet ready to use.
    103  *	- ACTIVE	Vnode has associated underlying file system and is
    104  *			ready to use.
    105  *	- BLOCKED	Vnode is active but cannot get new references.
    106  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107  *			system.
    108  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109  *			and is dead.
    110  *
    111  *	Valid state changes are:
    112  *	LOADING -> ACTIVE
    113  *			Vnode has been initialised in vcache_get() or
    114  *			vcache_new() and is ready to use.
    115  *	ACTIVE -> RECLAIMING
    116  *			Vnode starts disassociation from underlying file
    117  *			system in vcache_reclaim().
    118  *	RECLAIMING -> RECLAIMED
    119  *			Vnode finished disassociation from underlying file
    120  *			system in vcache_reclaim().
    121  *	ACTIVE -> BLOCKED
    122  *			Either vcache_rekey*() is changing the vnode key or
    123  *			vrelel() is about to call VOP_INACTIVE().
    124  *	BLOCKED -> ACTIVE
    125  *			The block condition is over.
    126  *	LOADING -> RECLAIMED
    127  *			Either vcache_get() or vcache_new() failed to
    128  *			associate the underlying file system or vcache_rekey*()
    129  *			drops a vnode used as placeholder.
    130  *
    131  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    132  *	and it is possible to wait for state change.
    133  *
    134  *	State is protected with v_interlock with one exception:
    135  *	to change from LOADING both v_interlock and vcache.lock must be held
    136  *	so it is possible to check "state == LOADING" without holding
    137  *	v_interlock.  See vcache_get() for details.
    138  *
    139  * Reference counting
    140  *
    141  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    142  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    143  *	as vput(9), routines.  Common points holding references are e.g.
    144  *	file openings, current working directory, mount points, etc.
    145  *
    146  * Note on v_usecount and its locking
    147  *
    148  *	At nearly all points it is known that v_usecount could be zero,
    149  *	the vnode_t::v_interlock will be held.  To change v_usecount away
    150  *	from zero, the interlock must be held.  To change from a non-zero
    151  *	value to zero, again the interlock must be held.
    152  *
    153  *	Changing the usecount from a non-zero value to a non-zero value can
    154  *	safely be done using atomic operations, without the interlock held.
    155  *
    156  */
    157 
    158 #include <sys/cdefs.h>
    159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.68 2017/01/02 10:36:58 hannken Exp $");
    160 
    161 #include <sys/param.h>
    162 #include <sys/kernel.h>
    163 
    164 #include <sys/atomic.h>
    165 #include <sys/buf.h>
    166 #include <sys/conf.h>
    167 #include <sys/device.h>
    168 #include <sys/hash.h>
    169 #include <sys/kauth.h>
    170 #include <sys/kmem.h>
    171 #include <sys/kthread.h>
    172 #include <sys/module.h>
    173 #include <sys/mount.h>
    174 #include <sys/namei.h>
    175 #include <sys/syscallargs.h>
    176 #include <sys/sysctl.h>
    177 #include <sys/systm.h>
    178 #include <sys/vnode_impl.h>
    179 #include <sys/wapbl.h>
    180 #include <sys/fstrans.h>
    181 
    182 #include <uvm/uvm.h>
    183 #include <uvm/uvm_readahead.h>
    184 
    185 /* Flags to vrelel. */
    186 #define	VRELEL_ASYNC_RELE	0x0001	/* Always defer to vrele thread. */
    187 
    188 u_int			numvnodes		__cacheline_aligned;
    189 
    190 /*
    191  * There are three lru lists: one holds vnodes waiting for async release,
    192  * one is for vnodes which have no buffer/page references and
    193  * one for those which do (i.e. v_holdcnt is non-zero).
    194  */
    195 static vnodelst_t	lru_vrele_list		__cacheline_aligned;
    196 static vnodelst_t	lru_free_list		__cacheline_aligned;
    197 static vnodelst_t	lru_hold_list		__cacheline_aligned;
    198 static kmutex_t		vdrain_lock		__cacheline_aligned;
    199 static kcondvar_t	vdrain_cv		__cacheline_aligned;
    200 static int		vdrain_gen;
    201 static kcondvar_t	vdrain_gen_cv;
    202 static bool		vdrain_retry;
    203 static lwp_t *		vdrain_lwp;
    204 SLIST_HEAD(hashhead, vnode_impl);
    205 static struct {
    206 	kmutex_t	lock;
    207 	kcondvar_t	cv;
    208 	u_int		hashsize;
    209 	u_long		hashmask;
    210 	struct hashhead	*hashtab;
    211 	pool_cache_t	pool;
    212 }			vcache			__cacheline_aligned;
    213 
    214 static void		lru_requeue(vnode_t *, vnodelst_t *);
    215 static vnodelst_t *	lru_which(vnode_t *);
    216 static vnode_impl_t *	vcache_alloc(void);
    217 static void		vcache_free(vnode_impl_t *);
    218 static void		vcache_init(void);
    219 static void		vcache_reinit(void);
    220 static void		vcache_reclaim(vnode_t *);
    221 static void		vrelel(vnode_t *, int);
    222 static void		vdrain_thread(void *);
    223 static void		vnpanic(vnode_t *, const char *, ...)
    224     __printflike(2, 3);
    225 
    226 /* Routines having to do with the management of the vnode table. */
    227 extern struct mount	*dead_rootmount;
    228 extern int		(**dead_vnodeop_p)(void *);
    229 extern struct vfsops	dead_vfsops;
    230 
    231 /* Vnode state operations and diagnostics. */
    232 
    233 #if defined(DIAGNOSTIC)
    234 
    235 #define VSTATE_GET(vp) \
    236 	vstate_assert_get((vp), __func__, __LINE__)
    237 #define VSTATE_CHANGE(vp, from, to) \
    238 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    239 #define VSTATE_WAIT_STABLE(vp) \
    240 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    241 #define VSTATE_ASSERT(vp, state) \
    242 	vstate_assert((vp), (state), __func__, __LINE__)
    243 
    244 static void
    245 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
    246 {
    247 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    248 
    249 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    250 
    251 	if (__predict_true(node->vi_state == state))
    252 		return;
    253 	vnpanic(vp, "state is %s, expected %s at %s:%d",
    254 	    vstate_name(node->vi_state), vstate_name(state), func, line);
    255 }
    256 
    257 static enum vnode_state
    258 vstate_assert_get(vnode_t *vp, const char *func, int line)
    259 {
    260 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    261 
    262 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    263 	if (node->vi_state == VS_MARKER)
    264 		vnpanic(vp, "state is %s at %s:%d",
    265 		    vstate_name(node->vi_state), func, line);
    266 
    267 	return node->vi_state;
    268 }
    269 
    270 static void
    271 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    272 {
    273 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    274 
    275 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    276 	if (node->vi_state == VS_MARKER)
    277 		vnpanic(vp, "state is %s at %s:%d",
    278 		    vstate_name(node->vi_state), func, line);
    279 
    280 	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
    281 		cv_wait(&vp->v_cv, vp->v_interlock);
    282 
    283 	if (node->vi_state == VS_MARKER)
    284 		vnpanic(vp, "state is %s at %s:%d",
    285 		    vstate_name(node->vi_state), func, line);
    286 }
    287 
    288 static void
    289 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    290     const char *func, int line)
    291 {
    292 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    293 
    294 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    295 	if (from == VS_LOADING)
    296 		KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
    297 
    298 	if (from == VS_MARKER)
    299 		vnpanic(vp, "from is %s at %s:%d",
    300 		    vstate_name(from), func, line);
    301 	if (to == VS_MARKER)
    302 		vnpanic(vp, "to is %s at %s:%d",
    303 		    vstate_name(to), func, line);
    304 	if (node->vi_state != from)
    305 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    306 		    vstate_name(node->vi_state), vstate_name(from), func, line);
    307 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
    308 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
    309 		    vstate_name(from), vstate_name(to), vp->v_usecount,
    310 		    func, line);
    311 
    312 	node->vi_state = to;
    313 	if (from == VS_LOADING)
    314 		cv_broadcast(&vcache.cv);
    315 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    316 		cv_broadcast(&vp->v_cv);
    317 }
    318 
    319 #else /* defined(DIAGNOSTIC) */
    320 
    321 #define VSTATE_GET(vp) \
    322 	(VNODE_TO_VIMPL((vp))->vi_state)
    323 #define VSTATE_CHANGE(vp, from, to) \
    324 	vstate_change((vp), (from), (to))
    325 #define VSTATE_WAIT_STABLE(vp) \
    326 	vstate_wait_stable((vp))
    327 #define VSTATE_ASSERT(vp, state)
    328 
    329 static void
    330 vstate_wait_stable(vnode_t *vp)
    331 {
    332 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    333 
    334 	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
    335 		cv_wait(&vp->v_cv, vp->v_interlock);
    336 }
    337 
    338 static void
    339 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    340 {
    341 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    342 
    343 	node->vi_state = to;
    344 	if (from == VS_LOADING)
    345 		cv_broadcast(&vcache.cv);
    346 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    347 		cv_broadcast(&vp->v_cv);
    348 }
    349 
    350 #endif /* defined(DIAGNOSTIC) */
    351 
    352 void
    353 vfs_vnode_sysinit(void)
    354 {
    355 	int error __diagused;
    356 
    357 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    358 	KASSERT(dead_rootmount != NULL);
    359 	dead_rootmount->mnt_iflag = IMNT_MPSAFE;
    360 
    361 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    362 	TAILQ_INIT(&lru_free_list);
    363 	TAILQ_INIT(&lru_hold_list);
    364 	TAILQ_INIT(&lru_vrele_list);
    365 
    366 	vcache_init();
    367 
    368 	cv_init(&vdrain_cv, "vdrain");
    369 	cv_init(&vdrain_gen_cv, "vdrainwt");
    370 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    371 	    NULL, &vdrain_lwp, "vdrain");
    372 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    373 }
    374 
    375 /*
    376  * Allocate a new marker vnode.
    377  */
    378 vnode_t *
    379 vnalloc_marker(struct mount *mp)
    380 {
    381 	vnode_impl_t *node;
    382 	vnode_t *vp;
    383 
    384 	node = pool_cache_get(vcache.pool, PR_WAITOK);
    385 	memset(node, 0, sizeof(*node));
    386 	vp = VIMPL_TO_VNODE(node);
    387 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    388 	vp->v_mount = mp;
    389 	vp->v_type = VBAD;
    390 	node->vi_state = VS_MARKER;
    391 
    392 	return vp;
    393 }
    394 
    395 /*
    396  * Free a marker vnode.
    397  */
    398 void
    399 vnfree_marker(vnode_t *vp)
    400 {
    401 	vnode_impl_t *node;
    402 
    403 	node = VNODE_TO_VIMPL(vp);
    404 	KASSERT(node->vi_state == VS_MARKER);
    405 	uvm_obj_destroy(&vp->v_uobj, true);
    406 	pool_cache_put(vcache.pool, node);
    407 }
    408 
    409 /*
    410  * Test a vnode for being a marker vnode.
    411  */
    412 bool
    413 vnis_marker(vnode_t *vp)
    414 {
    415 
    416 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    417 }
    418 
    419 /*
    420  * Return the lru list this node should be on.
    421  */
    422 static vnodelst_t *
    423 lru_which(vnode_t *vp)
    424 {
    425 
    426 	KASSERT(mutex_owned(vp->v_interlock));
    427 
    428 	if (vp->v_holdcnt > 0)
    429 		return &lru_hold_list;
    430 	else
    431 		return &lru_free_list;
    432 }
    433 
    434 /*
    435  * Put vnode to end of given list.
    436  * Both the current and the new list may be NULL, used on vnode alloc/free.
    437  * Adjust numvnodes and signal vdrain thread if there is work.
    438  */
    439 static void
    440 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    441 {
    442 	vnode_impl_t *node;
    443 
    444 	mutex_enter(&vdrain_lock);
    445 	node = VNODE_TO_VIMPL(vp);
    446 	if (node->vi_lrulisthd != NULL)
    447 		TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
    448 	else
    449 		numvnodes++;
    450 	node->vi_lrulisthd = listhd;
    451 	if (node->vi_lrulisthd != NULL)
    452 		TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
    453 	else
    454 		numvnodes--;
    455 	if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
    456 		cv_broadcast(&vdrain_cv);
    457 	mutex_exit(&vdrain_lock);
    458 }
    459 
    460 /*
    461  * Reclaim a cached vnode.  Used from vdrain_thread only.
    462  */
    463 static __inline void
    464 vdrain_remove(vnode_t *vp)
    465 {
    466 	struct mount *mp;
    467 
    468 	KASSERT(mutex_owned(&vdrain_lock));
    469 
    470 	/* Probe usecount (unlocked). */
    471 	if (vp->v_usecount > 0)
    472 		return;
    473 	/* Try v_interlock -- we lock the wrong direction! */
    474 	if (!mutex_tryenter(vp->v_interlock))
    475 		return;
    476 	/* Probe usecount and state. */
    477 	if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
    478 		mutex_exit(vp->v_interlock);
    479 		return;
    480 	}
    481 	mp = vp->v_mount;
    482 	if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
    483 		mutex_exit(vp->v_interlock);
    484 		return;
    485 	}
    486 	vdrain_retry = true;
    487 	mutex_exit(&vdrain_lock);
    488 
    489 	if (vcache_vget(vp) == 0) {
    490 		if (!vrecycle(vp))
    491 			vrele(vp);
    492 	}
    493 	fstrans_done(mp);
    494 
    495 	mutex_enter(&vdrain_lock);
    496 }
    497 
    498 /*
    499  * Release a cached vnode.  Used from vdrain_thread only.
    500  */
    501 static __inline void
    502 vdrain_vrele(vnode_t *vp)
    503 {
    504 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    505 	struct mount *mp;
    506 
    507 	KASSERT(mutex_owned(&vdrain_lock));
    508 
    509 	mp = vp->v_mount;
    510 	if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0)
    511 		return;
    512 
    513 	/*
    514 	 * First remove the vnode from the vrele list.
    515 	 * Put it on the last lru list, the last vrele()
    516 	 * will put it back onto the right list before
    517 	 * its v_usecount reaches zero.
    518 	 */
    519 	KASSERT(node->vi_lrulisthd == &lru_vrele_list);
    520 	TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
    521 	node->vi_lrulisthd = &lru_hold_list;
    522 	TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
    523 
    524 	vdrain_retry = true;
    525 	mutex_exit(&vdrain_lock);
    526 
    527 	mutex_enter(vp->v_interlock);
    528 	vrelel(vp, 0);
    529 	fstrans_done(mp);
    530 
    531 	mutex_enter(&vdrain_lock);
    532 }
    533 
    534 /*
    535  * Helper thread to keep the number of vnodes below desiredvnodes
    536  * and release vnodes from asynchronous vrele.
    537  */
    538 static void
    539 vdrain_thread(void *cookie)
    540 {
    541 	vnodelst_t *listhd[] = {
    542 	    &lru_vrele_list, &lru_free_list, &lru_hold_list
    543 	};
    544 	int i;
    545 	u_int target;
    546 	vnode_impl_t *node, *marker;
    547 
    548 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    549 
    550 	mutex_enter(&vdrain_lock);
    551 
    552 	for (;;) {
    553 		vdrain_retry = false;
    554 		target = desiredvnodes - desiredvnodes/10;
    555 
    556 		for (i = 0; i < __arraycount(listhd); i++) {
    557 			TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
    558 			while ((node = TAILQ_NEXT(marker, vi_lrulist))) {
    559 				TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
    560 				TAILQ_INSERT_AFTER(listhd[i], node, marker,
    561 				    vi_lrulist);
    562 				if (listhd[i] == &lru_vrele_list)
    563 					vdrain_vrele(VIMPL_TO_VNODE(node));
    564 				else if (numvnodes < target)
    565 					break;
    566 				else
    567 					vdrain_remove(VIMPL_TO_VNODE(node));
    568 			}
    569 			TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
    570 		}
    571 
    572 		if (vdrain_retry) {
    573 			mutex_exit(&vdrain_lock);
    574 			yield();
    575 			mutex_enter(&vdrain_lock);
    576 		} else {
    577 			vdrain_gen++;
    578 			cv_broadcast(&vdrain_gen_cv);
    579 			cv_wait(&vdrain_cv, &vdrain_lock);
    580 		}
    581 	}
    582 }
    583 
    584 /*
    585  * vput: unlock and release the reference.
    586  */
    587 void
    588 vput(vnode_t *vp)
    589 {
    590 
    591 	VOP_UNLOCK(vp);
    592 	vrele(vp);
    593 }
    594 
    595 /*
    596  * Try to drop reference on a vnode.  Abort if we are releasing the
    597  * last reference.  Note: this _must_ succeed if not the last reference.
    598  */
    599 static inline bool
    600 vtryrele(vnode_t *vp)
    601 {
    602 	u_int use, next;
    603 
    604 	for (use = vp->v_usecount;; use = next) {
    605 		if (use == 1) {
    606 			return false;
    607 		}
    608 		KASSERT(use > 1);
    609 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    610 		if (__predict_true(next == use)) {
    611 			return true;
    612 		}
    613 	}
    614 }
    615 
    616 /*
    617  * Vnode release.  If reference count drops to zero, call inactive
    618  * routine and either return to freelist or free to the pool.
    619  */
    620 static void
    621 vrelel(vnode_t *vp, int flags)
    622 {
    623 	bool recycle, defer;
    624 	int error;
    625 
    626 	KASSERT(mutex_owned(vp->v_interlock));
    627 
    628 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    629 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    630 		vnpanic(vp, "dead but not clean");
    631 	}
    632 
    633 	/*
    634 	 * If not the last reference, just drop the reference count
    635 	 * and unlock.
    636 	 */
    637 	if (vtryrele(vp)) {
    638 		mutex_exit(vp->v_interlock);
    639 		return;
    640 	}
    641 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    642 		vnpanic(vp, "%s: bad ref count", __func__);
    643 	}
    644 
    645 #ifdef DIAGNOSTIC
    646 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    647 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    648 		vprint("vrelel: missing VOP_CLOSE()", vp);
    649 	}
    650 #endif
    651 
    652 	/*
    653 	 * If not clean, deactivate the vnode, but preserve
    654 	 * our reference across the call to VOP_INACTIVE().
    655 	 */
    656 	if (VSTATE_GET(vp) != VS_RECLAIMED) {
    657 		recycle = false;
    658 
    659 		/*
    660 		 * XXX This ugly block can be largely eliminated if
    661 		 * locking is pushed down into the file systems.
    662 		 *
    663 		 * Defer vnode release to vdrain_thread if caller
    664 		 * requests it explicitly or is the pagedaemon.
    665 		 */
    666 		if ((curlwp == uvm.pagedaemon_lwp) ||
    667 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    668 			defer = true;
    669 		} else if (curlwp == vdrain_lwp) {
    670 			/*
    671 			 * We have to try harder.
    672 			 */
    673 			mutex_exit(vp->v_interlock);
    674 			error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    675 			KASSERTMSG((error == 0), "vn_lock failed: %d", error);
    676 			mutex_enter(vp->v_interlock);
    677 			defer = false;
    678 		} else {
    679 			/* If we can't acquire the lock, then defer. */
    680 			mutex_exit(vp->v_interlock);
    681 			error = vn_lock(vp,
    682 			    LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
    683 			defer = (error != 0);
    684 			mutex_enter(vp->v_interlock);
    685 		}
    686 
    687 		KASSERT(mutex_owned(vp->v_interlock));
    688 		KASSERT(! (curlwp == vdrain_lwp && defer));
    689 
    690 		if (defer) {
    691 			/*
    692 			 * Defer reclaim to the kthread; it's not safe to
    693 			 * clean it here.  We donate it our last reference.
    694 			 */
    695 			lru_requeue(vp, &lru_vrele_list);
    696 			mutex_exit(vp->v_interlock);
    697 			return;
    698 		}
    699 
    700 		/*
    701 		 * If the node got another reference while we
    702 		 * released the interlock, don't try to inactivate it yet.
    703 		 */
    704 		if (__predict_false(vtryrele(vp))) {
    705 			VOP_UNLOCK(vp);
    706 			mutex_exit(vp->v_interlock);
    707 			return;
    708 		}
    709 		VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    710 		mutex_exit(vp->v_interlock);
    711 
    712 		/*
    713 		 * The vnode must not gain another reference while being
    714 		 * deactivated.  If VOP_INACTIVE() indicates that
    715 		 * the described file has been deleted, then recycle
    716 		 * the vnode.
    717 		 *
    718 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    719 		 */
    720 		VOP_INACTIVE(vp, &recycle);
    721 		if (recycle) {
    722 			/* vcache_reclaim() below will drop the lock. */
    723 			if (vn_lock(vp, LK_EXCLUSIVE) != 0)
    724 				recycle = false;
    725 		}
    726 		mutex_enter(vp->v_interlock);
    727 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    728 		if (!recycle) {
    729 			if (vtryrele(vp)) {
    730 				mutex_exit(vp->v_interlock);
    731 				return;
    732 			}
    733 		}
    734 
    735 		/* Take care of space accounting. */
    736 		if (vp->v_iflag & VI_EXECMAP) {
    737 			atomic_add_int(&uvmexp.execpages,
    738 			    -vp->v_uobj.uo_npages);
    739 			atomic_add_int(&uvmexp.filepages,
    740 			    vp->v_uobj.uo_npages);
    741 		}
    742 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    743 		vp->v_vflag &= ~VV_MAPPED;
    744 
    745 		/*
    746 		 * Recycle the vnode if the file is now unused (unlinked),
    747 		 * otherwise just free it.
    748 		 */
    749 		if (recycle) {
    750 			VSTATE_ASSERT(vp, VS_ACTIVE);
    751 			vcache_reclaim(vp);
    752 		}
    753 		KASSERT(vp->v_usecount > 0);
    754 	}
    755 
    756 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    757 		/* Gained another reference while being reclaimed. */
    758 		mutex_exit(vp->v_interlock);
    759 		return;
    760 	}
    761 
    762 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    763 		/*
    764 		 * It's clean so destroy it.  It isn't referenced
    765 		 * anywhere since it has been reclaimed.
    766 		 */
    767 		vcache_free(VNODE_TO_VIMPL(vp));
    768 	} else {
    769 		/*
    770 		 * Otherwise, put it back onto the freelist.  It
    771 		 * can't be destroyed while still associated with
    772 		 * a file system.
    773 		 */
    774 		lru_requeue(vp, lru_which(vp));
    775 		mutex_exit(vp->v_interlock);
    776 	}
    777 }
    778 
    779 void
    780 vrele(vnode_t *vp)
    781 {
    782 
    783 	if (vtryrele(vp)) {
    784 		return;
    785 	}
    786 	mutex_enter(vp->v_interlock);
    787 	vrelel(vp, 0);
    788 }
    789 
    790 /*
    791  * Asynchronous vnode release, vnode is released in different context.
    792  */
    793 void
    794 vrele_async(vnode_t *vp)
    795 {
    796 
    797 	if (vtryrele(vp)) {
    798 		return;
    799 	}
    800 	mutex_enter(vp->v_interlock);
    801 	vrelel(vp, VRELEL_ASYNC_RELE);
    802 }
    803 
    804 /*
    805  * Vnode reference, where a reference is already held by some other
    806  * object (for example, a file structure).
    807  */
    808 void
    809 vref(vnode_t *vp)
    810 {
    811 
    812 	KASSERT(vp->v_usecount != 0);
    813 
    814 	atomic_inc_uint(&vp->v_usecount);
    815 }
    816 
    817 /*
    818  * Page or buffer structure gets a reference.
    819  * Called with v_interlock held.
    820  */
    821 void
    822 vholdl(vnode_t *vp)
    823 {
    824 
    825 	KASSERT(mutex_owned(vp->v_interlock));
    826 
    827 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
    828 		lru_requeue(vp, lru_which(vp));
    829 }
    830 
    831 /*
    832  * Page or buffer structure frees a reference.
    833  * Called with v_interlock held.
    834  */
    835 void
    836 holdrelel(vnode_t *vp)
    837 {
    838 
    839 	KASSERT(mutex_owned(vp->v_interlock));
    840 
    841 	if (vp->v_holdcnt <= 0) {
    842 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
    843 	}
    844 
    845 	vp->v_holdcnt--;
    846 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
    847 		lru_requeue(vp, lru_which(vp));
    848 }
    849 
    850 /*
    851  * Recycle an unused vnode if caller holds the last reference.
    852  */
    853 bool
    854 vrecycle(vnode_t *vp)
    855 {
    856 	int error __diagused;
    857 
    858 	mutex_enter(vp->v_interlock);
    859 
    860 	/* Make sure we hold the last reference. */
    861 	VSTATE_WAIT_STABLE(vp);
    862 	if (vp->v_usecount != 1) {
    863 		mutex_exit(vp->v_interlock);
    864 		return false;
    865 	}
    866 
    867 	/* If the vnode is already clean we're done. */
    868 	if (VSTATE_GET(vp) != VS_ACTIVE) {
    869 		VSTATE_ASSERT(vp, VS_RECLAIMED);
    870 		vrelel(vp, 0);
    871 		return true;
    872 	}
    873 
    874 	/* Prevent further references until the vnode is locked. */
    875 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    876 	mutex_exit(vp->v_interlock);
    877 
    878 	error = vn_lock(vp, LK_EXCLUSIVE);
    879 	KASSERT(error == 0);
    880 
    881 	mutex_enter(vp->v_interlock);
    882 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    883 
    884 	KASSERT(vp->v_usecount == 1);
    885 	vcache_reclaim(vp);
    886 	vrelel(vp, 0);
    887 
    888 	return true;
    889 }
    890 
    891 /*
    892  * Eliminate all activity associated with the requested vnode
    893  * and with all vnodes aliased to the requested vnode.
    894  */
    895 void
    896 vrevoke(vnode_t *vp)
    897 {
    898 	vnode_t *vq;
    899 	enum vtype type;
    900 	dev_t dev;
    901 
    902 	KASSERT(vp->v_usecount > 0);
    903 
    904 	mutex_enter(vp->v_interlock);
    905 	VSTATE_WAIT_STABLE(vp);
    906 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    907 		mutex_exit(vp->v_interlock);
    908 		return;
    909 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
    910 		atomic_inc_uint(&vp->v_usecount);
    911 		mutex_exit(vp->v_interlock);
    912 		vgone(vp);
    913 		return;
    914 	} else {
    915 		dev = vp->v_rdev;
    916 		type = vp->v_type;
    917 		mutex_exit(vp->v_interlock);
    918 	}
    919 
    920 	while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
    921 		vgone(vq);
    922 	}
    923 }
    924 
    925 /*
    926  * Eliminate all activity associated with a vnode in preparation for
    927  * reuse.  Drops a reference from the vnode.
    928  */
    929 void
    930 vgone(vnode_t *vp)
    931 {
    932 
    933 	if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
    934 		VSTATE_ASSERT(vp, VS_RECLAIMED);
    935 		vrele(vp);
    936 	}
    937 
    938 	mutex_enter(vp->v_interlock);
    939 	vcache_reclaim(vp);
    940 	vrelel(vp, 0);
    941 }
    942 
    943 static inline uint32_t
    944 vcache_hash(const struct vcache_key *key)
    945 {
    946 	uint32_t hash = HASH32_BUF_INIT;
    947 
    948 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
    949 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
    950 	return hash;
    951 }
    952 
    953 static void
    954 vcache_init(void)
    955 {
    956 
    957 	vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
    958 	    "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
    959 	KASSERT(vcache.pool != NULL);
    960 	mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
    961 	cv_init(&vcache.cv, "vcache");
    962 	vcache.hashsize = desiredvnodes;
    963 	vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
    964 	    &vcache.hashmask);
    965 }
    966 
    967 static void
    968 vcache_reinit(void)
    969 {
    970 	int i;
    971 	uint32_t hash;
    972 	u_long oldmask, newmask;
    973 	struct hashhead *oldtab, *newtab;
    974 	vnode_impl_t *node;
    975 
    976 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
    977 	mutex_enter(&vcache.lock);
    978 	oldtab = vcache.hashtab;
    979 	oldmask = vcache.hashmask;
    980 	vcache.hashsize = desiredvnodes;
    981 	vcache.hashtab = newtab;
    982 	vcache.hashmask = newmask;
    983 	for (i = 0; i <= oldmask; i++) {
    984 		while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
    985 			SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
    986 			hash = vcache_hash(&node->vi_key);
    987 			SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
    988 			    node, vi_hash);
    989 		}
    990 	}
    991 	mutex_exit(&vcache.lock);
    992 	hashdone(oldtab, HASH_SLIST, oldmask);
    993 }
    994 
    995 static inline vnode_impl_t *
    996 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
    997 {
    998 	struct hashhead *hashp;
    999 	vnode_impl_t *node;
   1000 
   1001 	KASSERT(mutex_owned(&vcache.lock));
   1002 
   1003 	hashp = &vcache.hashtab[hash & vcache.hashmask];
   1004 	SLIST_FOREACH(node, hashp, vi_hash) {
   1005 		if (key->vk_mount != node->vi_key.vk_mount)
   1006 			continue;
   1007 		if (key->vk_key_len != node->vi_key.vk_key_len)
   1008 			continue;
   1009 		if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
   1010 			continue;
   1011 		return node;
   1012 	}
   1013 	return NULL;
   1014 }
   1015 
   1016 /*
   1017  * Allocate a new, uninitialized vcache node.
   1018  */
   1019 static vnode_impl_t *
   1020 vcache_alloc(void)
   1021 {
   1022 	vnode_impl_t *node;
   1023 	vnode_t *vp;
   1024 
   1025 	node = pool_cache_get(vcache.pool, PR_WAITOK);
   1026 	memset(node, 0, sizeof(*node));
   1027 
   1028 	/* SLIST_INIT(&node->vi_hash); */
   1029 
   1030 	vp = VIMPL_TO_VNODE(node);
   1031 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
   1032 	cv_init(&vp->v_cv, "vnode");
   1033 	/* LIST_INIT(&vp->v_nclist); */
   1034 	/* LIST_INIT(&vp->v_dnclist); */
   1035 
   1036 	rw_init(&vp->v_lock);
   1037 	vp->v_usecount = 1;
   1038 	vp->v_type = VNON;
   1039 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1040 
   1041 	node->vi_state = VS_LOADING;
   1042 
   1043 	lru_requeue(vp, &lru_free_list);
   1044 
   1045 	return node;
   1046 }
   1047 
   1048 /*
   1049  * Free an unused, unreferenced vcache node.
   1050  * v_interlock locked on entry.
   1051  */
   1052 static void
   1053 vcache_free(vnode_impl_t *node)
   1054 {
   1055 	vnode_t *vp;
   1056 
   1057 	vp = VIMPL_TO_VNODE(node);
   1058 	KASSERT(mutex_owned(vp->v_interlock));
   1059 
   1060 	KASSERT(vp->v_usecount == 0);
   1061 	KASSERT(vp->v_holdcnt == 0);
   1062 	KASSERT(vp->v_writecount == 0);
   1063 	lru_requeue(vp, NULL);
   1064 	mutex_exit(vp->v_interlock);
   1065 
   1066 	vfs_insmntque(vp, NULL);
   1067 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1068 		spec_node_destroy(vp);
   1069 
   1070 	rw_destroy(&vp->v_lock);
   1071 	uvm_obj_destroy(&vp->v_uobj, true);
   1072 	cv_destroy(&vp->v_cv);
   1073 	pool_cache_put(vcache.pool, node);
   1074 }
   1075 
   1076 /*
   1077  * Try to get an initial reference on this cached vnode.
   1078  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
   1079  * EBUSY if the vnode state is unstable.
   1080  *
   1081  * v_interlock locked on entry and unlocked on exit.
   1082  */
   1083 int
   1084 vcache_tryvget(vnode_t *vp)
   1085 {
   1086 	int error = 0;
   1087 
   1088 	KASSERT(mutex_owned(vp->v_interlock));
   1089 
   1090 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
   1091 		error = ENOENT;
   1092 	else if (__predict_false(VSTATE_GET(vp) != VS_ACTIVE))
   1093 		error = EBUSY;
   1094 	else if (vp->v_usecount == 0)
   1095 		vp->v_usecount = 1;
   1096 	else
   1097 		atomic_inc_uint(&vp->v_usecount);
   1098 
   1099 	mutex_exit(vp->v_interlock);
   1100 
   1101 	return error;
   1102 }
   1103 
   1104 /*
   1105  * Try to get an initial reference on this cached vnode.
   1106  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1107  * Will wait for the vnode state to be stable.
   1108  *
   1109  * v_interlock locked on entry and unlocked on exit.
   1110  */
   1111 int
   1112 vcache_vget(vnode_t *vp)
   1113 {
   1114 
   1115 	KASSERT(mutex_owned(vp->v_interlock));
   1116 
   1117 	/* Increment hold count to prevent vnode from disappearing. */
   1118 	vp->v_holdcnt++;
   1119 	VSTATE_WAIT_STABLE(vp);
   1120 	vp->v_holdcnt--;
   1121 
   1122 	/* If this was the last reference to a reclaimed vnode free it now. */
   1123 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1124 		if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
   1125 			vcache_free(VNODE_TO_VIMPL(vp));
   1126 		else
   1127 			mutex_exit(vp->v_interlock);
   1128 		return ENOENT;
   1129 	}
   1130 	VSTATE_ASSERT(vp, VS_ACTIVE);
   1131 	if (vp->v_usecount == 0)
   1132 		vp->v_usecount = 1;
   1133 	else
   1134 		atomic_inc_uint(&vp->v_usecount);
   1135 
   1136 	mutex_exit(vp->v_interlock);
   1137 
   1138 	return 0;
   1139 }
   1140 
   1141 /*
   1142  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1143  */
   1144 int
   1145 vcache_get(struct mount *mp, const void *key, size_t key_len,
   1146     struct vnode **vpp)
   1147 {
   1148 	int error;
   1149 	uint32_t hash;
   1150 	const void *new_key;
   1151 	struct vnode *vp;
   1152 	struct vcache_key vcache_key;
   1153 	vnode_impl_t *node, *new_node;
   1154 
   1155 	new_key = NULL;
   1156 	*vpp = NULL;
   1157 
   1158 	vcache_key.vk_mount = mp;
   1159 	vcache_key.vk_key = key;
   1160 	vcache_key.vk_key_len = key_len;
   1161 	hash = vcache_hash(&vcache_key);
   1162 
   1163 again:
   1164 	mutex_enter(&vcache.lock);
   1165 	node = vcache_hash_lookup(&vcache_key, hash);
   1166 
   1167 	/* If found, take a reference or retry. */
   1168 	if (__predict_true(node != NULL)) {
   1169 		/*
   1170 		 * If the vnode is loading we cannot take the v_interlock
   1171 		 * here as it might change during load (see uvm_obj_setlock()).
   1172 		 * As changing state from VS_LOADING requires both vcache.lock
   1173 		 * and v_interlock it is safe to test with vcache.lock held.
   1174 		 *
   1175 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1176 		 */
   1177 		if (__predict_false(node->vi_state == VS_LOADING)) {
   1178 			cv_wait(&vcache.cv, &vcache.lock);
   1179 			mutex_exit(&vcache.lock);
   1180 			goto again;
   1181 		}
   1182 		vp = VIMPL_TO_VNODE(node);
   1183 		mutex_enter(vp->v_interlock);
   1184 		mutex_exit(&vcache.lock);
   1185 		error = vcache_vget(vp);
   1186 		if (error == ENOENT)
   1187 			goto again;
   1188 		if (error == 0)
   1189 			*vpp = vp;
   1190 		KASSERT((error != 0) == (*vpp == NULL));
   1191 		return error;
   1192 	}
   1193 	mutex_exit(&vcache.lock);
   1194 
   1195 	/* Allocate and initialize a new vcache / vnode pair. */
   1196 	error = vfs_busy(mp, NULL);
   1197 	if (error)
   1198 		return error;
   1199 	new_node = vcache_alloc();
   1200 	new_node->vi_key = vcache_key;
   1201 	vp = VIMPL_TO_VNODE(new_node);
   1202 	mutex_enter(&vcache.lock);
   1203 	node = vcache_hash_lookup(&vcache_key, hash);
   1204 	if (node == NULL) {
   1205 		SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
   1206 		    new_node, vi_hash);
   1207 		node = new_node;
   1208 	}
   1209 
   1210 	/* If another thread beat us inserting this node, retry. */
   1211 	if (node != new_node) {
   1212 		mutex_enter(vp->v_interlock);
   1213 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1214 		mutex_exit(&vcache.lock);
   1215 		vrelel(vp, 0);
   1216 		vfs_unbusy(mp, false, NULL);
   1217 		goto again;
   1218 	}
   1219 	mutex_exit(&vcache.lock);
   1220 
   1221 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1222 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1223 	if (error) {
   1224 		mutex_enter(&vcache.lock);
   1225 		SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
   1226 		    new_node, vnode_impl, vi_hash);
   1227 		mutex_enter(vp->v_interlock);
   1228 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1229 		mutex_exit(&vcache.lock);
   1230 		vrelel(vp, 0);
   1231 		vfs_unbusy(mp, false, NULL);
   1232 		KASSERT(*vpp == NULL);
   1233 		return error;
   1234 	}
   1235 	KASSERT(new_key != NULL);
   1236 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1237 	KASSERT(vp->v_op != NULL);
   1238 	vfs_insmntque(vp, mp);
   1239 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1240 		vp->v_vflag |= VV_MPSAFE;
   1241 	vfs_unbusy(mp, true, NULL);
   1242 
   1243 	/* Finished loading, finalize node. */
   1244 	mutex_enter(&vcache.lock);
   1245 	new_node->vi_key.vk_key = new_key;
   1246 	mutex_enter(vp->v_interlock);
   1247 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1248 	mutex_exit(vp->v_interlock);
   1249 	mutex_exit(&vcache.lock);
   1250 	*vpp = vp;
   1251 	return 0;
   1252 }
   1253 
   1254 /*
   1255  * Create a new vnode / fs node pair and return it referenced through vpp.
   1256  */
   1257 int
   1258 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1259     kauth_cred_t cred, struct vnode **vpp)
   1260 {
   1261 	int error;
   1262 	uint32_t hash;
   1263 	struct vnode *ovp, *vp;
   1264 	vnode_impl_t *new_node;
   1265 	vnode_impl_t *old_node __diagused;
   1266 
   1267 	*vpp = NULL;
   1268 
   1269 	/* Allocate and initialize a new vcache / vnode pair. */
   1270 	error = vfs_busy(mp, NULL);
   1271 	if (error)
   1272 		return error;
   1273 	new_node = vcache_alloc();
   1274 	new_node->vi_key.vk_mount = mp;
   1275 	vp = VIMPL_TO_VNODE(new_node);
   1276 
   1277 	/* Create and load the fs node. */
   1278 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
   1279 	    &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
   1280 	if (error) {
   1281 		mutex_enter(&vcache.lock);
   1282 		mutex_enter(vp->v_interlock);
   1283 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1284 		mutex_exit(&vcache.lock);
   1285 		vrelel(vp, 0);
   1286 		vfs_unbusy(mp, false, NULL);
   1287 		KASSERT(*vpp == NULL);
   1288 		return error;
   1289 	}
   1290 	KASSERT(new_node->vi_key.vk_key != NULL);
   1291 	KASSERT(vp->v_op != NULL);
   1292 	hash = vcache_hash(&new_node->vi_key);
   1293 
   1294 	/* Wait for previous instance to be reclaimed, then insert new node. */
   1295 	mutex_enter(&vcache.lock);
   1296 	while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
   1297 		ovp = VIMPL_TO_VNODE(old_node);
   1298 		mutex_enter(ovp->v_interlock);
   1299 		mutex_exit(&vcache.lock);
   1300 		error = vcache_vget(ovp);
   1301 		KASSERT(error == ENOENT);
   1302 		mutex_enter(&vcache.lock);
   1303 	}
   1304 	SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
   1305 	    new_node, vi_hash);
   1306 	mutex_exit(&vcache.lock);
   1307 	vfs_insmntque(vp, mp);
   1308 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1309 		vp->v_vflag |= VV_MPSAFE;
   1310 	vfs_unbusy(mp, true, NULL);
   1311 
   1312 	/* Finished loading, finalize node. */
   1313 	mutex_enter(&vcache.lock);
   1314 	mutex_enter(vp->v_interlock);
   1315 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1316 	mutex_exit(&vcache.lock);
   1317 	mutex_exit(vp->v_interlock);
   1318 	*vpp = vp;
   1319 	return 0;
   1320 }
   1321 
   1322 /*
   1323  * Prepare key change: update old cache nodes key and lock new cache node.
   1324  * Return an error if the new node already exists.
   1325  */
   1326 int
   1327 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1328     const void *old_key, size_t old_key_len,
   1329     const void *new_key, size_t new_key_len)
   1330 {
   1331 	uint32_t old_hash, new_hash;
   1332 	struct vcache_key old_vcache_key, new_vcache_key;
   1333 	vnode_impl_t *node, *new_node;
   1334 	struct vnode *tvp;
   1335 
   1336 	old_vcache_key.vk_mount = mp;
   1337 	old_vcache_key.vk_key = old_key;
   1338 	old_vcache_key.vk_key_len = old_key_len;
   1339 	old_hash = vcache_hash(&old_vcache_key);
   1340 
   1341 	new_vcache_key.vk_mount = mp;
   1342 	new_vcache_key.vk_key = new_key;
   1343 	new_vcache_key.vk_key_len = new_key_len;
   1344 	new_hash = vcache_hash(&new_vcache_key);
   1345 
   1346 	new_node = vcache_alloc();
   1347 	new_node->vi_key = new_vcache_key;
   1348 	tvp = VIMPL_TO_VNODE(new_node);
   1349 
   1350 	/* Insert locked new node used as placeholder. */
   1351 	mutex_enter(&vcache.lock);
   1352 	node = vcache_hash_lookup(&new_vcache_key, new_hash);
   1353 	if (node != NULL) {
   1354 		mutex_enter(tvp->v_interlock);
   1355 		VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
   1356 		mutex_exit(&vcache.lock);
   1357 		vrelel(tvp, 0);
   1358 		return EEXIST;
   1359 	}
   1360 	SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
   1361 	    new_node, vi_hash);
   1362 
   1363 	/* Replace old nodes key with the temporary copy. */
   1364 	node = vcache_hash_lookup(&old_vcache_key, old_hash);
   1365 	KASSERT(node != NULL);
   1366 	KASSERT(VIMPL_TO_VNODE(node) == vp);
   1367 	KASSERT(node->vi_key.vk_key != old_vcache_key.vk_key);
   1368 	node->vi_key = old_vcache_key;
   1369 	mutex_exit(&vcache.lock);
   1370 	return 0;
   1371 }
   1372 
   1373 /*
   1374  * Key change complete: update old node and remove placeholder.
   1375  */
   1376 void
   1377 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1378     const void *old_key, size_t old_key_len,
   1379     const void *new_key, size_t new_key_len)
   1380 {
   1381 	uint32_t old_hash, new_hash;
   1382 	struct vcache_key old_vcache_key, new_vcache_key;
   1383 	vnode_impl_t *old_node, *new_node;
   1384 	struct vnode *tvp;
   1385 
   1386 	old_vcache_key.vk_mount = mp;
   1387 	old_vcache_key.vk_key = old_key;
   1388 	old_vcache_key.vk_key_len = old_key_len;
   1389 	old_hash = vcache_hash(&old_vcache_key);
   1390 
   1391 	new_vcache_key.vk_mount = mp;
   1392 	new_vcache_key.vk_key = new_key;
   1393 	new_vcache_key.vk_key_len = new_key_len;
   1394 	new_hash = vcache_hash(&new_vcache_key);
   1395 
   1396 	mutex_enter(&vcache.lock);
   1397 
   1398 	/* Lookup old and new node. */
   1399 	old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
   1400 	KASSERT(old_node != NULL);
   1401 	KASSERT(VIMPL_TO_VNODE(old_node) == vp);
   1402 
   1403 	new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
   1404 	KASSERT(new_node != NULL);
   1405 	KASSERT(new_node->vi_key.vk_key_len == new_key_len);
   1406 	tvp = VIMPL_TO_VNODE(new_node);
   1407 	mutex_enter(tvp->v_interlock);
   1408 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
   1409 
   1410 	/* Rekey old node and put it onto its new hashlist. */
   1411 	old_node->vi_key = new_vcache_key;
   1412 	if (old_hash != new_hash) {
   1413 		SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
   1414 		    old_node, vnode_impl, vi_hash);
   1415 		SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
   1416 		    old_node, vi_hash);
   1417 	}
   1418 
   1419 	/* Remove new node used as placeholder. */
   1420 	SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
   1421 	    new_node, vnode_impl, vi_hash);
   1422 	VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
   1423 	mutex_exit(&vcache.lock);
   1424 	vrelel(tvp, 0);
   1425 }
   1426 
   1427 /*
   1428  * Disassociate the underlying file system from a vnode.
   1429  *
   1430  * Must be called with vnode locked and will return unlocked.
   1431  * Must be called with the interlock held, and will return with it held.
   1432  */
   1433 static void
   1434 vcache_reclaim(vnode_t *vp)
   1435 {
   1436 	lwp_t *l = curlwp;
   1437 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
   1438 	uint32_t hash;
   1439 	uint8_t temp_buf[64], *temp_key;
   1440 	size_t temp_key_len;
   1441 	bool recycle, active;
   1442 	int error;
   1443 
   1444 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1445 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1446 	KASSERT(mutex_owned(vp->v_interlock));
   1447 	KASSERT(vp->v_usecount != 0);
   1448 
   1449 	active = (vp->v_usecount > 1);
   1450 	temp_key_len = node->vi_key.vk_key_len;
   1451 	/*
   1452 	 * Prevent the vnode from being recycled or brought into use
   1453 	 * while we clean it out.
   1454 	 */
   1455 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
   1456 	if (vp->v_iflag & VI_EXECMAP) {
   1457 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
   1458 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
   1459 	}
   1460 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1461 	mutex_exit(vp->v_interlock);
   1462 
   1463 	/* Replace the vnode key with a temporary copy. */
   1464 	if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
   1465 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1466 	} else {
   1467 		temp_key = temp_buf;
   1468 	}
   1469 	mutex_enter(&vcache.lock);
   1470 	memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
   1471 	node->vi_key.vk_key = temp_key;
   1472 	mutex_exit(&vcache.lock);
   1473 
   1474 	/*
   1475 	 * Clean out any cached data associated with the vnode.
   1476 	 * If purging an active vnode, it must be closed and
   1477 	 * deactivated before being reclaimed.
   1478 	 */
   1479 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1480 	if (error != 0) {
   1481 		if (wapbl_vphaswapbl(vp))
   1482 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1483 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1484 	}
   1485 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1486 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1487 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1488 		 spec_node_revoke(vp);
   1489 	}
   1490 
   1491 	/*
   1492 	 * Disassociate the underlying file system from the vnode.
   1493 	 * Note that the VOP_INACTIVE will unlock the vnode.
   1494 	 */
   1495 	VOP_INACTIVE(vp, &recycle);
   1496 	if (VOP_RECLAIM(vp)) {
   1497 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1498 	}
   1499 
   1500 	KASSERT(vp->v_data == NULL);
   1501 	KASSERT(vp->v_uobj.uo_npages == 0);
   1502 
   1503 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1504 		uvm_ra_freectx(vp->v_ractx);
   1505 		vp->v_ractx = NULL;
   1506 	}
   1507 
   1508 	/* Purge name cache. */
   1509 	cache_purge(vp);
   1510 
   1511 	/* Move to dead mount. */
   1512 	vp->v_vflag &= ~VV_ROOT;
   1513 	atomic_inc_uint(&dead_rootmount->mnt_refcnt);
   1514 	vfs_insmntque(vp, dead_rootmount);
   1515 
   1516 	/* Remove from vnode cache. */
   1517 	hash = vcache_hash(&node->vi_key);
   1518 	mutex_enter(&vcache.lock);
   1519 	KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
   1520 	SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
   1521 	    node, vnode_impl, vi_hash);
   1522 	mutex_exit(&vcache.lock);
   1523 	if (temp_key != temp_buf)
   1524 		kmem_free(temp_key, temp_key_len);
   1525 
   1526 	/* Done with purge, notify sleepers of the grim news. */
   1527 	mutex_enter(vp->v_interlock);
   1528 	vp->v_op = dead_vnodeop_p;
   1529 	vp->v_vflag |= VV_LOCKSWORK;
   1530 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1531 	vp->v_tag = VT_NON;
   1532 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1533 
   1534 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1535 }
   1536 
   1537 /*
   1538  * Update outstanding I/O count and do wakeup if requested.
   1539  */
   1540 void
   1541 vwakeup(struct buf *bp)
   1542 {
   1543 	vnode_t *vp;
   1544 
   1545 	if ((vp = bp->b_vp) == NULL)
   1546 		return;
   1547 
   1548 	KASSERT(bp->b_objlock == vp->v_interlock);
   1549 	KASSERT(mutex_owned(bp->b_objlock));
   1550 
   1551 	if (--vp->v_numoutput < 0)
   1552 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1553 	if (vp->v_numoutput == 0)
   1554 		cv_broadcast(&vp->v_cv);
   1555 }
   1556 
   1557 /*
   1558  * Test a vnode for being or becoming dead.  Returns one of:
   1559  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1560  * ENOENT: vnode is dead.
   1561  * 0:      otherwise.
   1562  *
   1563  * Whenever this function returns a non-zero value all future
   1564  * calls will also return a non-zero value.
   1565  */
   1566 int
   1567 vdead_check(struct vnode *vp, int flags)
   1568 {
   1569 
   1570 	KASSERT(mutex_owned(vp->v_interlock));
   1571 
   1572 	if (! ISSET(flags, VDEAD_NOWAIT))
   1573 		VSTATE_WAIT_STABLE(vp);
   1574 
   1575 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1576 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1577 		return EBUSY;
   1578 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1579 		return ENOENT;
   1580 	}
   1581 
   1582 	return 0;
   1583 }
   1584 
   1585 int
   1586 vfs_drainvnodes(void)
   1587 {
   1588 	int i, gen;
   1589 
   1590 	mutex_enter(&vdrain_lock);
   1591 	for (i = 0; i < 2; i++) {
   1592 		gen = vdrain_gen;
   1593 		while (gen == vdrain_gen) {
   1594 			cv_broadcast(&vdrain_cv);
   1595 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1596 		}
   1597 	}
   1598 	mutex_exit(&vdrain_lock);
   1599 
   1600 	if (numvnodes >= desiredvnodes)
   1601 		return EBUSY;
   1602 
   1603 	if (vcache.hashsize != desiredvnodes)
   1604 		vcache_reinit();
   1605 
   1606 	return 0;
   1607 }
   1608 
   1609 void
   1610 vnpanic(vnode_t *vp, const char *fmt, ...)
   1611 {
   1612 	va_list ap;
   1613 
   1614 #ifdef DIAGNOSTIC
   1615 	vprint(NULL, vp);
   1616 #endif
   1617 	va_start(ap, fmt);
   1618 	vpanic(fmt, ap);
   1619 	va_end(ap);
   1620 }
   1621