Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.68
      1  1.68   hannken /*	$NetBSD: vfs_vnode.c,v 1.68 2017/01/02 10:36:58 hannken Exp $	*/
      2   1.1     rmind 
      3   1.1     rmind /*-
      4   1.2     rmind  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5   1.1     rmind  * All rights reserved.
      6   1.1     rmind  *
      7   1.1     rmind  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1     rmind  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9   1.1     rmind  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10   1.1     rmind  *
     11   1.1     rmind  * Redistribution and use in source and binary forms, with or without
     12   1.1     rmind  * modification, are permitted provided that the following conditions
     13   1.1     rmind  * are met:
     14   1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     15   1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     16   1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     17   1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     18   1.1     rmind  *    documentation and/or other materials provided with the distribution.
     19   1.1     rmind  *
     20   1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21   1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22   1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23   1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24   1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25   1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26   1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27   1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28   1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29   1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30   1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     31   1.1     rmind  */
     32   1.1     rmind 
     33   1.1     rmind /*
     34   1.1     rmind  * Copyright (c) 1989, 1993
     35   1.1     rmind  *	The Regents of the University of California.  All rights reserved.
     36   1.1     rmind  * (c) UNIX System Laboratories, Inc.
     37   1.1     rmind  * All or some portions of this file are derived from material licensed
     38   1.1     rmind  * to the University of California by American Telephone and Telegraph
     39   1.1     rmind  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40   1.1     rmind  * the permission of UNIX System Laboratories, Inc.
     41   1.1     rmind  *
     42   1.1     rmind  * Redistribution and use in source and binary forms, with or without
     43   1.1     rmind  * modification, are permitted provided that the following conditions
     44   1.1     rmind  * are met:
     45   1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     46   1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     47   1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     48   1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     49   1.1     rmind  *    documentation and/or other materials provided with the distribution.
     50   1.1     rmind  * 3. Neither the name of the University nor the names of its contributors
     51   1.1     rmind  *    may be used to endorse or promote products derived from this software
     52   1.1     rmind  *    without specific prior written permission.
     53   1.1     rmind  *
     54   1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55   1.1     rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56   1.1     rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57   1.1     rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58   1.1     rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59   1.1     rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60   1.1     rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61   1.1     rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62   1.1     rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63   1.1     rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64   1.1     rmind  * SUCH DAMAGE.
     65   1.1     rmind  *
     66   1.1     rmind  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67   1.1     rmind  */
     68   1.1     rmind 
     69   1.1     rmind /*
     70   1.8     rmind  * The vnode cache subsystem.
     71   1.1     rmind  *
     72   1.8     rmind  * Life-cycle
     73   1.1     rmind  *
     74   1.8     rmind  *	Normally, there are two points where new vnodes are created:
     75   1.8     rmind  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76   1.8     rmind  *	starts in one of the following ways:
     77   1.8     rmind  *
     78  1.45   hannken  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79  1.66   hannken  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80   1.8     rmind  *
     81  1.16     rmind  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82  1.16     rmind  *	was another, traditional way.  Currently, only the draining thread
     83  1.16     rmind  *	recycles the vnodes.  This behaviour might be revisited.
     84  1.16     rmind  *
     85   1.8     rmind  *	The life-cycle ends when the last reference is dropped, usually
     86   1.8     rmind  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87   1.8     rmind  *	the file system that vnode is inactive.  Via this call, file system
     88  1.16     rmind  *	indicates whether vnode can be recycled (usually, it checks its own
     89  1.16     rmind  *	references, e.g. count of links, whether the file was removed).
     90   1.8     rmind  *
     91   1.8     rmind  *	Depending on indication, vnode can be put into a free list (cache),
     92  1.54   hannken  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93  1.54   hannken  *	disassociate underlying file system from the vnode, and finally
     94  1.54   hannken  *	destroyed.
     95   1.8     rmind  *
     96  1.52   hannken  * Vnode state
     97  1.52   hannken  *
     98  1.52   hannken  *	Vnode is always in one of six states:
     99  1.52   hannken  *	- MARKER	This is a marker vnode to help list traversal.  It
    100  1.52   hannken  *			will never change its state.
    101  1.52   hannken  *	- LOADING	Vnode is associating underlying file system and not
    102  1.52   hannken  *			yet ready to use.
    103  1.52   hannken  *	- ACTIVE	Vnode has associated underlying file system and is
    104  1.52   hannken  *			ready to use.
    105  1.52   hannken  *	- BLOCKED	Vnode is active but cannot get new references.
    106  1.52   hannken  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107  1.52   hannken  *			system.
    108  1.52   hannken  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109  1.52   hannken  *			and is dead.
    110  1.52   hannken  *
    111  1.52   hannken  *	Valid state changes are:
    112  1.52   hannken  *	LOADING -> ACTIVE
    113  1.52   hannken  *			Vnode has been initialised in vcache_get() or
    114  1.52   hannken  *			vcache_new() and is ready to use.
    115  1.52   hannken  *	ACTIVE -> RECLAIMING
    116  1.52   hannken  *			Vnode starts disassociation from underlying file
    117  1.54   hannken  *			system in vcache_reclaim().
    118  1.52   hannken  *	RECLAIMING -> RECLAIMED
    119  1.52   hannken  *			Vnode finished disassociation from underlying file
    120  1.54   hannken  *			system in vcache_reclaim().
    121  1.52   hannken  *	ACTIVE -> BLOCKED
    122  1.52   hannken  *			Either vcache_rekey*() is changing the vnode key or
    123  1.52   hannken  *			vrelel() is about to call VOP_INACTIVE().
    124  1.52   hannken  *	BLOCKED -> ACTIVE
    125  1.52   hannken  *			The block condition is over.
    126  1.52   hannken  *	LOADING -> RECLAIMED
    127  1.52   hannken  *			Either vcache_get() or vcache_new() failed to
    128  1.52   hannken  *			associate the underlying file system or vcache_rekey*()
    129  1.52   hannken  *			drops a vnode used as placeholder.
    130  1.52   hannken  *
    131  1.52   hannken  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    132  1.52   hannken  *	and it is possible to wait for state change.
    133  1.52   hannken  *
    134  1.52   hannken  *	State is protected with v_interlock with one exception:
    135  1.52   hannken  *	to change from LOADING both v_interlock and vcache.lock must be held
    136  1.52   hannken  *	so it is possible to check "state == LOADING" without holding
    137  1.52   hannken  *	v_interlock.  See vcache_get() for details.
    138  1.52   hannken  *
    139   1.8     rmind  * Reference counting
    140   1.8     rmind  *
    141   1.8     rmind  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    142   1.8     rmind  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    143   1.8     rmind  *	as vput(9), routines.  Common points holding references are e.g.
    144   1.8     rmind  *	file openings, current working directory, mount points, etc.
    145   1.8     rmind  *
    146   1.8     rmind  * Note on v_usecount and its locking
    147   1.8     rmind  *
    148   1.8     rmind  *	At nearly all points it is known that v_usecount could be zero,
    149   1.8     rmind  *	the vnode_t::v_interlock will be held.  To change v_usecount away
    150   1.8     rmind  *	from zero, the interlock must be held.  To change from a non-zero
    151   1.8     rmind  *	value to zero, again the interlock must be held.
    152   1.8     rmind  *
    153  1.24   hannken  *	Changing the usecount from a non-zero value to a non-zero value can
    154  1.24   hannken  *	safely be done using atomic operations, without the interlock held.
    155   1.8     rmind  *
    156   1.1     rmind  */
    157   1.1     rmind 
    158   1.1     rmind #include <sys/cdefs.h>
    159  1.68   hannken __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.68 2017/01/02 10:36:58 hannken Exp $");
    160   1.1     rmind 
    161   1.1     rmind #include <sys/param.h>
    162   1.1     rmind #include <sys/kernel.h>
    163   1.1     rmind 
    164   1.1     rmind #include <sys/atomic.h>
    165   1.1     rmind #include <sys/buf.h>
    166   1.1     rmind #include <sys/conf.h>
    167   1.1     rmind #include <sys/device.h>
    168  1.36   hannken #include <sys/hash.h>
    169   1.1     rmind #include <sys/kauth.h>
    170   1.1     rmind #include <sys/kmem.h>
    171   1.1     rmind #include <sys/kthread.h>
    172   1.1     rmind #include <sys/module.h>
    173   1.1     rmind #include <sys/mount.h>
    174   1.1     rmind #include <sys/namei.h>
    175   1.1     rmind #include <sys/syscallargs.h>
    176   1.1     rmind #include <sys/sysctl.h>
    177   1.1     rmind #include <sys/systm.h>
    178  1.58   hannken #include <sys/vnode_impl.h>
    179   1.1     rmind #include <sys/wapbl.h>
    180  1.24   hannken #include <sys/fstrans.h>
    181   1.1     rmind 
    182   1.1     rmind #include <uvm/uvm.h>
    183   1.1     rmind #include <uvm/uvm_readahead.h>
    184   1.1     rmind 
    185  1.23   hannken /* Flags to vrelel. */
    186  1.23   hannken #define	VRELEL_ASYNC_RELE	0x0001	/* Always defer to vrele thread. */
    187  1.23   hannken 
    188   1.6     rmind u_int			numvnodes		__cacheline_aligned;
    189   1.1     rmind 
    190  1.16     rmind /*
    191  1.63   hannken  * There are three lru lists: one holds vnodes waiting for async release,
    192  1.63   hannken  * one is for vnodes which have no buffer/page references and
    193  1.63   hannken  * one for those which do (i.e. v_holdcnt is non-zero).
    194  1.63   hannken  */
    195  1.63   hannken static vnodelst_t	lru_vrele_list		__cacheline_aligned;
    196  1.63   hannken static vnodelst_t	lru_free_list		__cacheline_aligned;
    197  1.63   hannken static vnodelst_t	lru_hold_list		__cacheline_aligned;
    198  1.63   hannken static kmutex_t		vdrain_lock		__cacheline_aligned;
    199  1.16     rmind static kcondvar_t	vdrain_cv		__cacheline_aligned;
    200  1.63   hannken static int		vdrain_gen;
    201  1.63   hannken static kcondvar_t	vdrain_gen_cv;
    202  1.63   hannken static bool		vdrain_retry;
    203  1.63   hannken static lwp_t *		vdrain_lwp;
    204  1.57   hannken SLIST_HEAD(hashhead, vnode_impl);
    205  1.36   hannken static struct {
    206  1.36   hannken 	kmutex_t	lock;
    207  1.51   hannken 	kcondvar_t	cv;
    208  1.61   hannken 	u_int		hashsize;
    209  1.36   hannken 	u_long		hashmask;
    210  1.38      matt 	struct hashhead	*hashtab;
    211  1.36   hannken 	pool_cache_t	pool;
    212  1.36   hannken }			vcache			__cacheline_aligned;
    213  1.36   hannken 
    214  1.63   hannken static void		lru_requeue(vnode_t *, vnodelst_t *);
    215  1.63   hannken static vnodelst_t *	lru_which(vnode_t *);
    216  1.63   hannken static vnode_impl_t *	vcache_alloc(void);
    217  1.57   hannken static void		vcache_free(vnode_impl_t *);
    218  1.36   hannken static void		vcache_init(void);
    219  1.36   hannken static void		vcache_reinit(void);
    220  1.54   hannken static void		vcache_reclaim(vnode_t *);
    221  1.23   hannken static void		vrelel(vnode_t *, int);
    222  1.12   hannken static void		vdrain_thread(void *);
    223  1.11  christos static void		vnpanic(vnode_t *, const char *, ...)
    224  1.18  christos     __printflike(2, 3);
    225   1.1     rmind 
    226   1.1     rmind /* Routines having to do with the management of the vnode table. */
    227  1.44   hannken extern struct mount	*dead_rootmount;
    228   1.1     rmind extern int		(**dead_vnodeop_p)(void *);
    229  1.31   hannken extern struct vfsops	dead_vfsops;
    230   1.1     rmind 
    231  1.51   hannken /* Vnode state operations and diagnostics. */
    232  1.51   hannken 
    233  1.51   hannken #if defined(DIAGNOSTIC)
    234  1.51   hannken 
    235  1.51   hannken #define VSTATE_GET(vp) \
    236  1.51   hannken 	vstate_assert_get((vp), __func__, __LINE__)
    237  1.51   hannken #define VSTATE_CHANGE(vp, from, to) \
    238  1.51   hannken 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    239  1.51   hannken #define VSTATE_WAIT_STABLE(vp) \
    240  1.51   hannken 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    241  1.51   hannken #define VSTATE_ASSERT(vp, state) \
    242  1.51   hannken 	vstate_assert((vp), (state), __func__, __LINE__)
    243  1.51   hannken 
    244  1.52   hannken static void
    245  1.57   hannken vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
    246  1.51   hannken {
    247  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    248  1.51   hannken 
    249  1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    250  1.51   hannken 
    251  1.57   hannken 	if (__predict_true(node->vi_state == state))
    252  1.51   hannken 		return;
    253  1.51   hannken 	vnpanic(vp, "state is %s, expected %s at %s:%d",
    254  1.57   hannken 	    vstate_name(node->vi_state), vstate_name(state), func, line);
    255  1.51   hannken }
    256  1.51   hannken 
    257  1.57   hannken static enum vnode_state
    258  1.51   hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
    259  1.51   hannken {
    260  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    261  1.51   hannken 
    262  1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    263  1.57   hannken 	if (node->vi_state == VS_MARKER)
    264  1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    265  1.57   hannken 		    vstate_name(node->vi_state), func, line);
    266  1.51   hannken 
    267  1.57   hannken 	return node->vi_state;
    268  1.51   hannken }
    269  1.51   hannken 
    270  1.52   hannken static void
    271  1.51   hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    272  1.51   hannken {
    273  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    274  1.51   hannken 
    275  1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    276  1.57   hannken 	if (node->vi_state == VS_MARKER)
    277  1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    278  1.57   hannken 		    vstate_name(node->vi_state), func, line);
    279  1.51   hannken 
    280  1.57   hannken 	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
    281  1.51   hannken 		cv_wait(&vp->v_cv, vp->v_interlock);
    282  1.51   hannken 
    283  1.57   hannken 	if (node->vi_state == VS_MARKER)
    284  1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    285  1.57   hannken 		    vstate_name(node->vi_state), func, line);
    286  1.51   hannken }
    287  1.51   hannken 
    288  1.52   hannken static void
    289  1.57   hannken vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    290  1.51   hannken     const char *func, int line)
    291  1.51   hannken {
    292  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    293  1.51   hannken 
    294  1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    295  1.57   hannken 	if (from == VS_LOADING)
    296  1.51   hannken 		KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
    297  1.51   hannken 
    298  1.57   hannken 	if (from == VS_MARKER)
    299  1.51   hannken 		vnpanic(vp, "from is %s at %s:%d",
    300  1.51   hannken 		    vstate_name(from), func, line);
    301  1.57   hannken 	if (to == VS_MARKER)
    302  1.51   hannken 		vnpanic(vp, "to is %s at %s:%d",
    303  1.51   hannken 		    vstate_name(to), func, line);
    304  1.57   hannken 	if (node->vi_state != from)
    305  1.51   hannken 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    306  1.57   hannken 		    vstate_name(node->vi_state), vstate_name(from), func, line);
    307  1.68   hannken 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
    308  1.68   hannken 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
    309  1.68   hannken 		    vstate_name(from), vstate_name(to), vp->v_usecount,
    310  1.68   hannken 		    func, line);
    311  1.51   hannken 
    312  1.57   hannken 	node->vi_state = to;
    313  1.57   hannken 	if (from == VS_LOADING)
    314  1.51   hannken 		cv_broadcast(&vcache.cv);
    315  1.57   hannken 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    316  1.51   hannken 		cv_broadcast(&vp->v_cv);
    317  1.51   hannken }
    318  1.51   hannken 
    319  1.51   hannken #else /* defined(DIAGNOSTIC) */
    320  1.51   hannken 
    321  1.51   hannken #define VSTATE_GET(vp) \
    322  1.57   hannken 	(VNODE_TO_VIMPL((vp))->vi_state)
    323  1.51   hannken #define VSTATE_CHANGE(vp, from, to) \
    324  1.51   hannken 	vstate_change((vp), (from), (to))
    325  1.51   hannken #define VSTATE_WAIT_STABLE(vp) \
    326  1.51   hannken 	vstate_wait_stable((vp))
    327  1.51   hannken #define VSTATE_ASSERT(vp, state)
    328  1.51   hannken 
    329  1.52   hannken static void
    330  1.51   hannken vstate_wait_stable(vnode_t *vp)
    331  1.51   hannken {
    332  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    333  1.51   hannken 
    334  1.57   hannken 	while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
    335  1.51   hannken 		cv_wait(&vp->v_cv, vp->v_interlock);
    336  1.51   hannken }
    337  1.51   hannken 
    338  1.52   hannken static void
    339  1.57   hannken vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    340  1.51   hannken {
    341  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    342  1.51   hannken 
    343  1.57   hannken 	node->vi_state = to;
    344  1.57   hannken 	if (from == VS_LOADING)
    345  1.51   hannken 		cv_broadcast(&vcache.cv);
    346  1.57   hannken 	if (to == VS_ACTIVE || to == VS_RECLAIMED)
    347  1.51   hannken 		cv_broadcast(&vp->v_cv);
    348  1.51   hannken }
    349  1.51   hannken 
    350  1.51   hannken #endif /* defined(DIAGNOSTIC) */
    351  1.51   hannken 
    352   1.1     rmind void
    353   1.1     rmind vfs_vnode_sysinit(void)
    354   1.1     rmind {
    355  1.22    martin 	int error __diagused;
    356   1.1     rmind 
    357  1.44   hannken 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    358  1.44   hannken 	KASSERT(dead_rootmount != NULL);
    359  1.44   hannken 	dead_rootmount->mnt_iflag = IMNT_MPSAFE;
    360  1.31   hannken 
    361  1.63   hannken 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    362  1.63   hannken 	TAILQ_INIT(&lru_free_list);
    363  1.63   hannken 	TAILQ_INIT(&lru_hold_list);
    364  1.63   hannken 	TAILQ_INIT(&lru_vrele_list);
    365   1.1     rmind 
    366  1.36   hannken 	vcache_init();
    367  1.36   hannken 
    368  1.12   hannken 	cv_init(&vdrain_cv, "vdrain");
    369  1.63   hannken 	cv_init(&vdrain_gen_cv, "vdrainwt");
    370  1.12   hannken 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    371  1.63   hannken 	    NULL, &vdrain_lwp, "vdrain");
    372  1.47  riastrad 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    373   1.1     rmind }
    374   1.1     rmind 
    375   1.1     rmind /*
    376  1.48   hannken  * Allocate a new marker vnode.
    377  1.48   hannken  */
    378  1.48   hannken vnode_t *
    379  1.48   hannken vnalloc_marker(struct mount *mp)
    380  1.48   hannken {
    381  1.57   hannken 	vnode_impl_t *node;
    382  1.50   hannken 	vnode_t *vp;
    383  1.50   hannken 
    384  1.50   hannken 	node = pool_cache_get(vcache.pool, PR_WAITOK);
    385  1.50   hannken 	memset(node, 0, sizeof(*node));
    386  1.57   hannken 	vp = VIMPL_TO_VNODE(node);
    387  1.50   hannken 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    388  1.50   hannken 	vp->v_mount = mp;
    389  1.50   hannken 	vp->v_type = VBAD;
    390  1.57   hannken 	node->vi_state = VS_MARKER;
    391  1.48   hannken 
    392  1.50   hannken 	return vp;
    393  1.48   hannken }
    394  1.48   hannken 
    395  1.48   hannken /*
    396  1.48   hannken  * Free a marker vnode.
    397  1.48   hannken  */
    398  1.48   hannken void
    399  1.48   hannken vnfree_marker(vnode_t *vp)
    400  1.48   hannken {
    401  1.57   hannken 	vnode_impl_t *node;
    402  1.48   hannken 
    403  1.57   hannken 	node = VNODE_TO_VIMPL(vp);
    404  1.57   hannken 	KASSERT(node->vi_state == VS_MARKER);
    405  1.50   hannken 	uvm_obj_destroy(&vp->v_uobj, true);
    406  1.50   hannken 	pool_cache_put(vcache.pool, node);
    407  1.48   hannken }
    408  1.48   hannken 
    409  1.48   hannken /*
    410  1.48   hannken  * Test a vnode for being a marker vnode.
    411  1.48   hannken  */
    412  1.48   hannken bool
    413  1.48   hannken vnis_marker(vnode_t *vp)
    414  1.48   hannken {
    415  1.48   hannken 
    416  1.57   hannken 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    417  1.48   hannken }
    418  1.48   hannken 
    419  1.48   hannken /*
    420  1.63   hannken  * Return the lru list this node should be on.
    421  1.63   hannken  */
    422  1.63   hannken static vnodelst_t *
    423  1.63   hannken lru_which(vnode_t *vp)
    424  1.63   hannken {
    425  1.63   hannken 
    426  1.63   hannken 	KASSERT(mutex_owned(vp->v_interlock));
    427  1.63   hannken 
    428  1.63   hannken 	if (vp->v_holdcnt > 0)
    429  1.63   hannken 		return &lru_hold_list;
    430  1.63   hannken 	else
    431  1.63   hannken 		return &lru_free_list;
    432  1.63   hannken }
    433  1.63   hannken 
    434  1.63   hannken /*
    435  1.63   hannken  * Put vnode to end of given list.
    436  1.63   hannken  * Both the current and the new list may be NULL, used on vnode alloc/free.
    437  1.63   hannken  * Adjust numvnodes and signal vdrain thread if there is work.
    438  1.63   hannken  */
    439  1.63   hannken static void
    440  1.63   hannken lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    441  1.63   hannken {
    442  1.63   hannken 	vnode_impl_t *node;
    443  1.63   hannken 
    444  1.63   hannken 	mutex_enter(&vdrain_lock);
    445  1.63   hannken 	node = VNODE_TO_VIMPL(vp);
    446  1.63   hannken 	if (node->vi_lrulisthd != NULL)
    447  1.63   hannken 		TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
    448  1.63   hannken 	else
    449  1.63   hannken 		numvnodes++;
    450  1.63   hannken 	node->vi_lrulisthd = listhd;
    451  1.63   hannken 	if (node->vi_lrulisthd != NULL)
    452  1.63   hannken 		TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
    453  1.63   hannken 	else
    454  1.63   hannken 		numvnodes--;
    455  1.63   hannken 	if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
    456  1.63   hannken 		cv_broadcast(&vdrain_cv);
    457  1.63   hannken 	mutex_exit(&vdrain_lock);
    458  1.63   hannken }
    459  1.63   hannken 
    460  1.63   hannken /*
    461  1.63   hannken  * Reclaim a cached vnode.  Used from vdrain_thread only.
    462   1.1     rmind  */
    463  1.63   hannken static __inline void
    464  1.63   hannken vdrain_remove(vnode_t *vp)
    465   1.1     rmind {
    466  1.24   hannken 	struct mount *mp;
    467   1.1     rmind 
    468  1.63   hannken 	KASSERT(mutex_owned(&vdrain_lock));
    469  1.24   hannken 
    470  1.63   hannken 	/* Probe usecount (unlocked). */
    471  1.63   hannken 	if (vp->v_usecount > 0)
    472  1.63   hannken 		return;
    473  1.63   hannken 	/* Try v_interlock -- we lock the wrong direction! */
    474  1.63   hannken 	if (!mutex_tryenter(vp->v_interlock))
    475  1.63   hannken 		return;
    476  1.63   hannken 	/* Probe usecount and state. */
    477  1.63   hannken 	if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
    478  1.63   hannken 		mutex_exit(vp->v_interlock);
    479  1.63   hannken 		return;
    480   1.1     rmind 	}
    481  1.63   hannken 	mp = vp->v_mount;
    482  1.63   hannken 	if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
    483  1.63   hannken 		mutex_exit(vp->v_interlock);
    484  1.63   hannken 		return;
    485   1.1     rmind 	}
    486  1.63   hannken 	vdrain_retry = true;
    487  1.63   hannken 	mutex_exit(&vdrain_lock);
    488   1.1     rmind 
    489  1.66   hannken 	if (vcache_vget(vp) == 0) {
    490  1.60   hannken 		if (!vrecycle(vp))
    491  1.60   hannken 			vrele(vp);
    492  1.60   hannken 	}
    493  1.24   hannken 	fstrans_done(mp);
    494  1.12   hannken 
    495  1.63   hannken 	mutex_enter(&vdrain_lock);
    496   1.1     rmind }
    497   1.1     rmind 
    498   1.1     rmind /*
    499  1.63   hannken  * Release a cached vnode.  Used from vdrain_thread only.
    500  1.12   hannken  */
    501  1.63   hannken static __inline void
    502  1.63   hannken vdrain_vrele(vnode_t *vp)
    503  1.12   hannken {
    504  1.63   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
    505  1.63   hannken 	struct mount *mp;
    506  1.12   hannken 
    507  1.63   hannken 	KASSERT(mutex_owned(&vdrain_lock));
    508  1.12   hannken 
    509  1.63   hannken 	mp = vp->v_mount;
    510  1.64   hannken 	if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0)
    511  1.63   hannken 		return;
    512  1.63   hannken 
    513  1.64   hannken 	/*
    514  1.64   hannken 	 * First remove the vnode from the vrele list.
    515  1.64   hannken 	 * Put it on the last lru list, the last vrele()
    516  1.64   hannken 	 * will put it back onto the right list before
    517  1.64   hannken 	 * its v_usecount reaches zero.
    518  1.64   hannken 	 */
    519  1.63   hannken 	KASSERT(node->vi_lrulisthd == &lru_vrele_list);
    520  1.63   hannken 	TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
    521  1.64   hannken 	node->vi_lrulisthd = &lru_hold_list;
    522  1.63   hannken 	TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
    523  1.63   hannken 
    524  1.63   hannken 	vdrain_retry = true;
    525  1.63   hannken 	mutex_exit(&vdrain_lock);
    526  1.63   hannken 
    527  1.64   hannken 	mutex_enter(vp->v_interlock);
    528  1.63   hannken 	vrelel(vp, 0);
    529  1.63   hannken 	fstrans_done(mp);
    530  1.63   hannken 
    531  1.63   hannken 	mutex_enter(&vdrain_lock);
    532  1.12   hannken }
    533  1.12   hannken 
    534  1.12   hannken /*
    535  1.63   hannken  * Helper thread to keep the number of vnodes below desiredvnodes
    536  1.63   hannken  * and release vnodes from asynchronous vrele.
    537   1.1     rmind  */
    538  1.63   hannken static void
    539  1.63   hannken vdrain_thread(void *cookie)
    540   1.1     rmind {
    541  1.63   hannken 	vnodelst_t *listhd[] = {
    542  1.63   hannken 	    &lru_vrele_list, &lru_free_list, &lru_hold_list
    543  1.63   hannken 	};
    544  1.63   hannken 	int i;
    545  1.63   hannken 	u_int target;
    546  1.63   hannken 	vnode_impl_t *node, *marker;
    547  1.63   hannken 
    548  1.63   hannken 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    549  1.63   hannken 
    550  1.63   hannken 	mutex_enter(&vdrain_lock);
    551  1.63   hannken 
    552  1.63   hannken 	for (;;) {
    553  1.63   hannken 		vdrain_retry = false;
    554  1.63   hannken 		target = desiredvnodes - desiredvnodes/10;
    555   1.1     rmind 
    556  1.63   hannken 		for (i = 0; i < __arraycount(listhd); i++) {
    557  1.63   hannken 			TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
    558  1.63   hannken 			while ((node = TAILQ_NEXT(marker, vi_lrulist))) {
    559  1.63   hannken 				TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
    560  1.63   hannken 				TAILQ_INSERT_AFTER(listhd[i], node, marker,
    561  1.63   hannken 				    vi_lrulist);
    562  1.63   hannken 				if (listhd[i] == &lru_vrele_list)
    563  1.63   hannken 					vdrain_vrele(VIMPL_TO_VNODE(node));
    564  1.63   hannken 				else if (numvnodes < target)
    565  1.63   hannken 					break;
    566  1.63   hannken 				else
    567  1.63   hannken 					vdrain_remove(VIMPL_TO_VNODE(node));
    568  1.63   hannken 			}
    569  1.63   hannken 			TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
    570  1.63   hannken 		}
    571   1.1     rmind 
    572  1.63   hannken 		if (vdrain_retry) {
    573  1.63   hannken 			mutex_exit(&vdrain_lock);
    574  1.63   hannken 			yield();
    575  1.63   hannken 			mutex_enter(&vdrain_lock);
    576  1.63   hannken 		} else {
    577  1.63   hannken 			vdrain_gen++;
    578  1.63   hannken 			cv_broadcast(&vdrain_gen_cv);
    579  1.63   hannken 			cv_wait(&vdrain_cv, &vdrain_lock);
    580  1.63   hannken 		}
    581   1.1     rmind 	}
    582   1.1     rmind }
    583   1.1     rmind 
    584   1.1     rmind /*
    585   1.4     rmind  * vput: unlock and release the reference.
    586   1.1     rmind  */
    587   1.1     rmind void
    588   1.1     rmind vput(vnode_t *vp)
    589   1.1     rmind {
    590   1.1     rmind 
    591   1.1     rmind 	VOP_UNLOCK(vp);
    592   1.1     rmind 	vrele(vp);
    593   1.1     rmind }
    594   1.1     rmind 
    595   1.1     rmind /*
    596   1.1     rmind  * Try to drop reference on a vnode.  Abort if we are releasing the
    597   1.1     rmind  * last reference.  Note: this _must_ succeed if not the last reference.
    598   1.1     rmind  */
    599   1.1     rmind static inline bool
    600   1.1     rmind vtryrele(vnode_t *vp)
    601   1.1     rmind {
    602   1.1     rmind 	u_int use, next;
    603   1.1     rmind 
    604   1.1     rmind 	for (use = vp->v_usecount;; use = next) {
    605   1.1     rmind 		if (use == 1) {
    606   1.1     rmind 			return false;
    607   1.1     rmind 		}
    608  1.24   hannken 		KASSERT(use > 1);
    609   1.1     rmind 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    610   1.1     rmind 		if (__predict_true(next == use)) {
    611   1.1     rmind 			return true;
    612   1.1     rmind 		}
    613   1.1     rmind 	}
    614   1.1     rmind }
    615   1.1     rmind 
    616   1.1     rmind /*
    617   1.1     rmind  * Vnode release.  If reference count drops to zero, call inactive
    618   1.1     rmind  * routine and either return to freelist or free to the pool.
    619   1.1     rmind  */
    620  1.23   hannken static void
    621   1.1     rmind vrelel(vnode_t *vp, int flags)
    622   1.1     rmind {
    623   1.1     rmind 	bool recycle, defer;
    624   1.1     rmind 	int error;
    625   1.1     rmind 
    626   1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    627   1.1     rmind 
    628   1.1     rmind 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    629  1.57   hannken 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    630  1.11  christos 		vnpanic(vp, "dead but not clean");
    631   1.1     rmind 	}
    632   1.1     rmind 
    633   1.1     rmind 	/*
    634   1.1     rmind 	 * If not the last reference, just drop the reference count
    635   1.1     rmind 	 * and unlock.
    636   1.1     rmind 	 */
    637   1.1     rmind 	if (vtryrele(vp)) {
    638   1.9     rmind 		mutex_exit(vp->v_interlock);
    639   1.1     rmind 		return;
    640   1.1     rmind 	}
    641   1.1     rmind 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    642  1.11  christos 		vnpanic(vp, "%s: bad ref count", __func__);
    643   1.1     rmind 	}
    644   1.1     rmind 
    645  1.15   hannken #ifdef DIAGNOSTIC
    646  1.15   hannken 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    647  1.15   hannken 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    648  1.15   hannken 		vprint("vrelel: missing VOP_CLOSE()", vp);
    649  1.15   hannken 	}
    650  1.15   hannken #endif
    651  1.15   hannken 
    652   1.1     rmind 	/*
    653   1.1     rmind 	 * If not clean, deactivate the vnode, but preserve
    654   1.1     rmind 	 * our reference across the call to VOP_INACTIVE().
    655   1.1     rmind 	 */
    656  1.57   hannken 	if (VSTATE_GET(vp) != VS_RECLAIMED) {
    657   1.1     rmind 		recycle = false;
    658   1.1     rmind 
    659   1.1     rmind 		/*
    660   1.1     rmind 		 * XXX This ugly block can be largely eliminated if
    661   1.1     rmind 		 * locking is pushed down into the file systems.
    662   1.1     rmind 		 *
    663  1.63   hannken 		 * Defer vnode release to vdrain_thread if caller
    664  1.30   hannken 		 * requests it explicitly or is the pagedaemon.
    665   1.1     rmind 		 */
    666   1.1     rmind 		if ((curlwp == uvm.pagedaemon_lwp) ||
    667   1.1     rmind 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    668   1.1     rmind 			defer = true;
    669  1.63   hannken 		} else if (curlwp == vdrain_lwp) {
    670  1.17   hannken 			/*
    671  1.29  christos 			 * We have to try harder.
    672  1.17   hannken 			 */
    673   1.9     rmind 			mutex_exit(vp->v_interlock);
    674  1.32   hannken 			error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    675  1.47  riastrad 			KASSERTMSG((error == 0), "vn_lock failed: %d", error);
    676  1.17   hannken 			mutex_enter(vp->v_interlock);
    677   1.1     rmind 			defer = false;
    678   1.4     rmind 		} else {
    679   1.1     rmind 			/* If we can't acquire the lock, then defer. */
    680  1.32   hannken 			mutex_exit(vp->v_interlock);
    681  1.32   hannken 			error = vn_lock(vp,
    682  1.32   hannken 			    LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
    683  1.30   hannken 			defer = (error != 0);
    684  1.32   hannken 			mutex_enter(vp->v_interlock);
    685   1.1     rmind 		}
    686   1.1     rmind 
    687  1.30   hannken 		KASSERT(mutex_owned(vp->v_interlock));
    688  1.63   hannken 		KASSERT(! (curlwp == vdrain_lwp && defer));
    689  1.30   hannken 
    690   1.1     rmind 		if (defer) {
    691   1.1     rmind 			/*
    692   1.1     rmind 			 * Defer reclaim to the kthread; it's not safe to
    693   1.1     rmind 			 * clean it here.  We donate it our last reference.
    694   1.1     rmind 			 */
    695  1.63   hannken 			lru_requeue(vp, &lru_vrele_list);
    696   1.9     rmind 			mutex_exit(vp->v_interlock);
    697   1.1     rmind 			return;
    698   1.1     rmind 		}
    699   1.1     rmind 
    700  1.32   hannken 		/*
    701  1.32   hannken 		 * If the node got another reference while we
    702  1.32   hannken 		 * released the interlock, don't try to inactivate it yet.
    703  1.32   hannken 		 */
    704  1.32   hannken 		if (__predict_false(vtryrele(vp))) {
    705  1.32   hannken 			VOP_UNLOCK(vp);
    706  1.32   hannken 			mutex_exit(vp->v_interlock);
    707  1.32   hannken 			return;
    708  1.32   hannken 		}
    709  1.57   hannken 		VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    710  1.29  christos 		mutex_exit(vp->v_interlock);
    711  1.29  christos 
    712   1.1     rmind 		/*
    713  1.52   hannken 		 * The vnode must not gain another reference while being
    714   1.1     rmind 		 * deactivated.  If VOP_INACTIVE() indicates that
    715   1.1     rmind 		 * the described file has been deleted, then recycle
    716  1.52   hannken 		 * the vnode.
    717   1.1     rmind 		 *
    718   1.1     rmind 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    719   1.1     rmind 		 */
    720   1.1     rmind 		VOP_INACTIVE(vp, &recycle);
    721  1.46   hannken 		if (recycle) {
    722  1.54   hannken 			/* vcache_reclaim() below will drop the lock. */
    723  1.46   hannken 			if (vn_lock(vp, LK_EXCLUSIVE) != 0)
    724  1.46   hannken 				recycle = false;
    725  1.46   hannken 		}
    726   1.9     rmind 		mutex_enter(vp->v_interlock);
    727  1.57   hannken 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    728   1.1     rmind 		if (!recycle) {
    729   1.1     rmind 			if (vtryrele(vp)) {
    730   1.9     rmind 				mutex_exit(vp->v_interlock);
    731   1.1     rmind 				return;
    732   1.1     rmind 			}
    733   1.1     rmind 		}
    734   1.1     rmind 
    735   1.1     rmind 		/* Take care of space accounting. */
    736   1.1     rmind 		if (vp->v_iflag & VI_EXECMAP) {
    737   1.1     rmind 			atomic_add_int(&uvmexp.execpages,
    738   1.1     rmind 			    -vp->v_uobj.uo_npages);
    739   1.1     rmind 			atomic_add_int(&uvmexp.filepages,
    740   1.1     rmind 			    vp->v_uobj.uo_npages);
    741   1.1     rmind 		}
    742   1.1     rmind 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    743   1.1     rmind 		vp->v_vflag &= ~VV_MAPPED;
    744   1.1     rmind 
    745   1.1     rmind 		/*
    746   1.1     rmind 		 * Recycle the vnode if the file is now unused (unlinked),
    747   1.1     rmind 		 * otherwise just free it.
    748   1.1     rmind 		 */
    749   1.1     rmind 		if (recycle) {
    750  1.57   hannken 			VSTATE_ASSERT(vp, VS_ACTIVE);
    751  1.54   hannken 			vcache_reclaim(vp);
    752   1.1     rmind 		}
    753   1.1     rmind 		KASSERT(vp->v_usecount > 0);
    754   1.1     rmind 	}
    755   1.1     rmind 
    756   1.1     rmind 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    757   1.1     rmind 		/* Gained another reference while being reclaimed. */
    758   1.9     rmind 		mutex_exit(vp->v_interlock);
    759   1.1     rmind 		return;
    760   1.1     rmind 	}
    761   1.1     rmind 
    762  1.67   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    763   1.1     rmind 		/*
    764   1.1     rmind 		 * It's clean so destroy it.  It isn't referenced
    765   1.1     rmind 		 * anywhere since it has been reclaimed.
    766   1.1     rmind 		 */
    767  1.57   hannken 		vcache_free(VNODE_TO_VIMPL(vp));
    768   1.1     rmind 	} else {
    769   1.1     rmind 		/*
    770   1.1     rmind 		 * Otherwise, put it back onto the freelist.  It
    771   1.1     rmind 		 * can't be destroyed while still associated with
    772   1.1     rmind 		 * a file system.
    773   1.1     rmind 		 */
    774  1.63   hannken 		lru_requeue(vp, lru_which(vp));
    775   1.9     rmind 		mutex_exit(vp->v_interlock);
    776   1.1     rmind 	}
    777   1.1     rmind }
    778   1.1     rmind 
    779   1.1     rmind void
    780   1.1     rmind vrele(vnode_t *vp)
    781   1.1     rmind {
    782   1.1     rmind 
    783  1.29  christos 	if (vtryrele(vp)) {
    784   1.1     rmind 		return;
    785   1.1     rmind 	}
    786   1.9     rmind 	mutex_enter(vp->v_interlock);
    787   1.1     rmind 	vrelel(vp, 0);
    788   1.1     rmind }
    789   1.1     rmind 
    790   1.1     rmind /*
    791   1.1     rmind  * Asynchronous vnode release, vnode is released in different context.
    792   1.1     rmind  */
    793   1.1     rmind void
    794   1.1     rmind vrele_async(vnode_t *vp)
    795   1.1     rmind {
    796   1.1     rmind 
    797  1.29  christos 	if (vtryrele(vp)) {
    798   1.1     rmind 		return;
    799   1.1     rmind 	}
    800   1.9     rmind 	mutex_enter(vp->v_interlock);
    801   1.1     rmind 	vrelel(vp, VRELEL_ASYNC_RELE);
    802   1.1     rmind }
    803   1.1     rmind 
    804   1.1     rmind /*
    805   1.1     rmind  * Vnode reference, where a reference is already held by some other
    806   1.1     rmind  * object (for example, a file structure).
    807   1.1     rmind  */
    808   1.1     rmind void
    809   1.1     rmind vref(vnode_t *vp)
    810   1.1     rmind {
    811   1.1     rmind 
    812   1.1     rmind 	KASSERT(vp->v_usecount != 0);
    813   1.1     rmind 
    814   1.1     rmind 	atomic_inc_uint(&vp->v_usecount);
    815   1.1     rmind }
    816   1.1     rmind 
    817   1.1     rmind /*
    818   1.1     rmind  * Page or buffer structure gets a reference.
    819   1.1     rmind  * Called with v_interlock held.
    820   1.1     rmind  */
    821   1.1     rmind void
    822   1.1     rmind vholdl(vnode_t *vp)
    823   1.1     rmind {
    824   1.1     rmind 
    825   1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    826   1.1     rmind 
    827  1.63   hannken 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
    828  1.63   hannken 		lru_requeue(vp, lru_which(vp));
    829   1.1     rmind }
    830   1.1     rmind 
    831   1.1     rmind /*
    832   1.1     rmind  * Page or buffer structure frees a reference.
    833   1.1     rmind  * Called with v_interlock held.
    834   1.1     rmind  */
    835   1.1     rmind void
    836   1.1     rmind holdrelel(vnode_t *vp)
    837   1.1     rmind {
    838   1.1     rmind 
    839   1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    840   1.1     rmind 
    841   1.1     rmind 	if (vp->v_holdcnt <= 0) {
    842  1.11  christos 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
    843   1.1     rmind 	}
    844   1.1     rmind 
    845   1.1     rmind 	vp->v_holdcnt--;
    846  1.63   hannken 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
    847  1.63   hannken 		lru_requeue(vp, lru_which(vp));
    848   1.1     rmind }
    849   1.1     rmind 
    850   1.1     rmind /*
    851  1.33   hannken  * Recycle an unused vnode if caller holds the last reference.
    852   1.1     rmind  */
    853  1.33   hannken bool
    854  1.33   hannken vrecycle(vnode_t *vp)
    855   1.1     rmind {
    856  1.60   hannken 	int error __diagused;
    857  1.46   hannken 
    858  1.33   hannken 	mutex_enter(vp->v_interlock);
    859  1.33   hannken 
    860  1.60   hannken 	/* Make sure we hold the last reference. */
    861  1.60   hannken 	VSTATE_WAIT_STABLE(vp);
    862  1.33   hannken 	if (vp->v_usecount != 1) {
    863  1.33   hannken 		mutex_exit(vp->v_interlock);
    864  1.33   hannken 		return false;
    865   1.1     rmind 	}
    866  1.60   hannken 
    867  1.60   hannken 	/* If the vnode is already clean we're done. */
    868  1.60   hannken 	if (VSTATE_GET(vp) != VS_ACTIVE) {
    869  1.60   hannken 		VSTATE_ASSERT(vp, VS_RECLAIMED);
    870  1.60   hannken 		vrelel(vp, 0);
    871  1.60   hannken 		return true;
    872  1.60   hannken 	}
    873  1.60   hannken 
    874  1.60   hannken 	/* Prevent further references until the vnode is locked. */
    875  1.60   hannken 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
    876  1.60   hannken 	mutex_exit(vp->v_interlock);
    877  1.60   hannken 
    878  1.60   hannken 	error = vn_lock(vp, LK_EXCLUSIVE);
    879  1.60   hannken 	KASSERT(error == 0);
    880  1.60   hannken 
    881  1.60   hannken 	mutex_enter(vp->v_interlock);
    882  1.60   hannken 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
    883  1.60   hannken 
    884  1.68   hannken 	KASSERT(vp->v_usecount == 1);
    885  1.54   hannken 	vcache_reclaim(vp);
    886  1.52   hannken 	vrelel(vp, 0);
    887  1.60   hannken 
    888  1.33   hannken 	return true;
    889   1.1     rmind }
    890   1.1     rmind 
    891   1.1     rmind /*
    892   1.1     rmind  * Eliminate all activity associated with the requested vnode
    893   1.1     rmind  * and with all vnodes aliased to the requested vnode.
    894   1.1     rmind  */
    895   1.1     rmind void
    896   1.1     rmind vrevoke(vnode_t *vp)
    897   1.1     rmind {
    898  1.19   hannken 	vnode_t *vq;
    899   1.1     rmind 	enum vtype type;
    900   1.1     rmind 	dev_t dev;
    901   1.1     rmind 
    902   1.1     rmind 	KASSERT(vp->v_usecount > 0);
    903   1.1     rmind 
    904   1.9     rmind 	mutex_enter(vp->v_interlock);
    905  1.52   hannken 	VSTATE_WAIT_STABLE(vp);
    906  1.57   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    907   1.9     rmind 		mutex_exit(vp->v_interlock);
    908   1.1     rmind 		return;
    909   1.1     rmind 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
    910   1.1     rmind 		atomic_inc_uint(&vp->v_usecount);
    911  1.29  christos 		mutex_exit(vp->v_interlock);
    912  1.29  christos 		vgone(vp);
    913   1.1     rmind 		return;
    914   1.1     rmind 	} else {
    915   1.1     rmind 		dev = vp->v_rdev;
    916   1.1     rmind 		type = vp->v_type;
    917   1.9     rmind 		mutex_exit(vp->v_interlock);
    918   1.1     rmind 	}
    919   1.1     rmind 
    920  1.19   hannken 	while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
    921  1.29  christos 		vgone(vq);
    922   1.1     rmind 	}
    923   1.1     rmind }
    924   1.1     rmind 
    925   1.1     rmind /*
    926   1.1     rmind  * Eliminate all activity associated with a vnode in preparation for
    927   1.1     rmind  * reuse.  Drops a reference from the vnode.
    928   1.1     rmind  */
    929   1.1     rmind void
    930   1.1     rmind vgone(vnode_t *vp)
    931   1.1     rmind {
    932   1.1     rmind 
    933  1.46   hannken 	if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
    934  1.57   hannken 		VSTATE_ASSERT(vp, VS_RECLAIMED);
    935  1.46   hannken 		vrele(vp);
    936  1.46   hannken 	}
    937  1.46   hannken 
    938   1.9     rmind 	mutex_enter(vp->v_interlock);
    939  1.54   hannken 	vcache_reclaim(vp);
    940  1.52   hannken 	vrelel(vp, 0);
    941   1.1     rmind }
    942   1.1     rmind 
    943  1.36   hannken static inline uint32_t
    944  1.36   hannken vcache_hash(const struct vcache_key *key)
    945  1.36   hannken {
    946  1.36   hannken 	uint32_t hash = HASH32_BUF_INIT;
    947  1.36   hannken 
    948  1.36   hannken 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
    949  1.36   hannken 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
    950  1.36   hannken 	return hash;
    951  1.36   hannken }
    952  1.36   hannken 
    953  1.36   hannken static void
    954  1.36   hannken vcache_init(void)
    955  1.36   hannken {
    956  1.36   hannken 
    957  1.57   hannken 	vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
    958  1.36   hannken 	    "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
    959  1.36   hannken 	KASSERT(vcache.pool != NULL);
    960  1.36   hannken 	mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
    961  1.51   hannken 	cv_init(&vcache.cv, "vcache");
    962  1.61   hannken 	vcache.hashsize = desiredvnodes;
    963  1.36   hannken 	vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
    964  1.36   hannken 	    &vcache.hashmask);
    965  1.36   hannken }
    966  1.36   hannken 
    967  1.36   hannken static void
    968  1.36   hannken vcache_reinit(void)
    969  1.36   hannken {
    970  1.36   hannken 	int i;
    971  1.36   hannken 	uint32_t hash;
    972  1.36   hannken 	u_long oldmask, newmask;
    973  1.36   hannken 	struct hashhead *oldtab, *newtab;
    974  1.57   hannken 	vnode_impl_t *node;
    975  1.36   hannken 
    976  1.36   hannken 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
    977  1.36   hannken 	mutex_enter(&vcache.lock);
    978  1.36   hannken 	oldtab = vcache.hashtab;
    979  1.36   hannken 	oldmask = vcache.hashmask;
    980  1.61   hannken 	vcache.hashsize = desiredvnodes;
    981  1.36   hannken 	vcache.hashtab = newtab;
    982  1.36   hannken 	vcache.hashmask = newmask;
    983  1.36   hannken 	for (i = 0; i <= oldmask; i++) {
    984  1.36   hannken 		while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
    985  1.57   hannken 			SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
    986  1.57   hannken 			hash = vcache_hash(&node->vi_key);
    987  1.36   hannken 			SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
    988  1.57   hannken 			    node, vi_hash);
    989  1.36   hannken 		}
    990  1.36   hannken 	}
    991  1.36   hannken 	mutex_exit(&vcache.lock);
    992  1.36   hannken 	hashdone(oldtab, HASH_SLIST, oldmask);
    993  1.36   hannken }
    994  1.36   hannken 
    995  1.57   hannken static inline vnode_impl_t *
    996  1.36   hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
    997  1.36   hannken {
    998  1.36   hannken 	struct hashhead *hashp;
    999  1.57   hannken 	vnode_impl_t *node;
   1000  1.36   hannken 
   1001  1.36   hannken 	KASSERT(mutex_owned(&vcache.lock));
   1002  1.36   hannken 
   1003  1.36   hannken 	hashp = &vcache.hashtab[hash & vcache.hashmask];
   1004  1.57   hannken 	SLIST_FOREACH(node, hashp, vi_hash) {
   1005  1.57   hannken 		if (key->vk_mount != node->vi_key.vk_mount)
   1006  1.36   hannken 			continue;
   1007  1.57   hannken 		if (key->vk_key_len != node->vi_key.vk_key_len)
   1008  1.36   hannken 			continue;
   1009  1.57   hannken 		if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
   1010  1.36   hannken 			continue;
   1011  1.36   hannken 		return node;
   1012  1.36   hannken 	}
   1013  1.36   hannken 	return NULL;
   1014  1.36   hannken }
   1015  1.36   hannken 
   1016  1.36   hannken /*
   1017  1.50   hannken  * Allocate a new, uninitialized vcache node.
   1018  1.50   hannken  */
   1019  1.57   hannken static vnode_impl_t *
   1020  1.50   hannken vcache_alloc(void)
   1021  1.50   hannken {
   1022  1.57   hannken 	vnode_impl_t *node;
   1023  1.50   hannken 	vnode_t *vp;
   1024  1.50   hannken 
   1025  1.50   hannken 	node = pool_cache_get(vcache.pool, PR_WAITOK);
   1026  1.50   hannken 	memset(node, 0, sizeof(*node));
   1027  1.50   hannken 
   1028  1.57   hannken 	/* SLIST_INIT(&node->vi_hash); */
   1029  1.50   hannken 
   1030  1.57   hannken 	vp = VIMPL_TO_VNODE(node);
   1031  1.50   hannken 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
   1032  1.50   hannken 	cv_init(&vp->v_cv, "vnode");
   1033  1.50   hannken 	/* LIST_INIT(&vp->v_nclist); */
   1034  1.50   hannken 	/* LIST_INIT(&vp->v_dnclist); */
   1035  1.50   hannken 
   1036  1.50   hannken 	rw_init(&vp->v_lock);
   1037  1.50   hannken 	vp->v_usecount = 1;
   1038  1.50   hannken 	vp->v_type = VNON;
   1039  1.50   hannken 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1040  1.50   hannken 
   1041  1.57   hannken 	node->vi_state = VS_LOADING;
   1042  1.51   hannken 
   1043  1.63   hannken 	lru_requeue(vp, &lru_free_list);
   1044  1.63   hannken 
   1045  1.50   hannken 	return node;
   1046  1.50   hannken }
   1047  1.50   hannken 
   1048  1.50   hannken /*
   1049  1.50   hannken  * Free an unused, unreferenced vcache node.
   1050  1.67   hannken  * v_interlock locked on entry.
   1051  1.50   hannken  */
   1052  1.50   hannken static void
   1053  1.57   hannken vcache_free(vnode_impl_t *node)
   1054  1.50   hannken {
   1055  1.50   hannken 	vnode_t *vp;
   1056  1.50   hannken 
   1057  1.57   hannken 	vp = VIMPL_TO_VNODE(node);
   1058  1.67   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1059  1.50   hannken 
   1060  1.50   hannken 	KASSERT(vp->v_usecount == 0);
   1061  1.67   hannken 	KASSERT(vp->v_holdcnt == 0);
   1062  1.67   hannken 	KASSERT(vp->v_writecount == 0);
   1063  1.67   hannken 	lru_requeue(vp, NULL);
   1064  1.67   hannken 	mutex_exit(vp->v_interlock);
   1065  1.67   hannken 
   1066  1.67   hannken 	vfs_insmntque(vp, NULL);
   1067  1.67   hannken 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1068  1.67   hannken 		spec_node_destroy(vp);
   1069  1.50   hannken 
   1070  1.50   hannken 	rw_destroy(&vp->v_lock);
   1071  1.50   hannken 	uvm_obj_destroy(&vp->v_uobj, true);
   1072  1.50   hannken 	cv_destroy(&vp->v_cv);
   1073  1.50   hannken 	pool_cache_put(vcache.pool, node);
   1074  1.50   hannken }
   1075  1.50   hannken 
   1076  1.50   hannken /*
   1077  1.66   hannken  * Try to get an initial reference on this cached vnode.
   1078  1.66   hannken  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
   1079  1.66   hannken  * EBUSY if the vnode state is unstable.
   1080  1.66   hannken  *
   1081  1.66   hannken  * v_interlock locked on entry and unlocked on exit.
   1082  1.66   hannken  */
   1083  1.66   hannken int
   1084  1.66   hannken vcache_tryvget(vnode_t *vp)
   1085  1.66   hannken {
   1086  1.67   hannken 	int error = 0;
   1087  1.66   hannken 
   1088  1.66   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1089  1.66   hannken 
   1090  1.67   hannken 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
   1091  1.67   hannken 		error = ENOENT;
   1092  1.67   hannken 	else if (__predict_false(VSTATE_GET(vp) != VS_ACTIVE))
   1093  1.67   hannken 		error = EBUSY;
   1094  1.67   hannken 	else if (vp->v_usecount == 0)
   1095  1.66   hannken 		vp->v_usecount = 1;
   1096  1.67   hannken 	else
   1097  1.66   hannken 		atomic_inc_uint(&vp->v_usecount);
   1098  1.66   hannken 
   1099  1.66   hannken 	mutex_exit(vp->v_interlock);
   1100  1.66   hannken 
   1101  1.67   hannken 	return error;
   1102  1.66   hannken }
   1103  1.66   hannken 
   1104  1.66   hannken /*
   1105  1.66   hannken  * Try to get an initial reference on this cached vnode.
   1106  1.66   hannken  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1107  1.66   hannken  * Will wait for the vnode state to be stable.
   1108  1.66   hannken  *
   1109  1.66   hannken  * v_interlock locked on entry and unlocked on exit.
   1110  1.66   hannken  */
   1111  1.66   hannken int
   1112  1.66   hannken vcache_vget(vnode_t *vp)
   1113  1.66   hannken {
   1114  1.66   hannken 
   1115  1.66   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1116  1.66   hannken 
   1117  1.67   hannken 	/* Increment hold count to prevent vnode from disappearing. */
   1118  1.67   hannken 	vp->v_holdcnt++;
   1119  1.67   hannken 	VSTATE_WAIT_STABLE(vp);
   1120  1.67   hannken 	vp->v_holdcnt--;
   1121  1.66   hannken 
   1122  1.67   hannken 	/* If this was the last reference to a reclaimed vnode free it now. */
   1123  1.67   hannken 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1124  1.67   hannken 		if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
   1125  1.67   hannken 			vcache_free(VNODE_TO_VIMPL(vp));
   1126  1.67   hannken 		else
   1127  1.67   hannken 			mutex_exit(vp->v_interlock);
   1128  1.66   hannken 		return ENOENT;
   1129  1.66   hannken 	}
   1130  1.67   hannken 	VSTATE_ASSERT(vp, VS_ACTIVE);
   1131  1.67   hannken 	if (vp->v_usecount == 0)
   1132  1.67   hannken 		vp->v_usecount = 1;
   1133  1.67   hannken 	else
   1134  1.67   hannken 		atomic_inc_uint(&vp->v_usecount);
   1135  1.66   hannken 
   1136  1.66   hannken 	mutex_exit(vp->v_interlock);
   1137  1.66   hannken 
   1138  1.66   hannken 	return 0;
   1139  1.66   hannken }
   1140  1.66   hannken 
   1141  1.66   hannken /*
   1142  1.36   hannken  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1143  1.36   hannken  */
   1144  1.36   hannken int
   1145  1.36   hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
   1146  1.36   hannken     struct vnode **vpp)
   1147  1.36   hannken {
   1148  1.36   hannken 	int error;
   1149  1.36   hannken 	uint32_t hash;
   1150  1.36   hannken 	const void *new_key;
   1151  1.36   hannken 	struct vnode *vp;
   1152  1.36   hannken 	struct vcache_key vcache_key;
   1153  1.57   hannken 	vnode_impl_t *node, *new_node;
   1154  1.36   hannken 
   1155  1.36   hannken 	new_key = NULL;
   1156  1.36   hannken 	*vpp = NULL;
   1157  1.36   hannken 
   1158  1.36   hannken 	vcache_key.vk_mount = mp;
   1159  1.36   hannken 	vcache_key.vk_key = key;
   1160  1.36   hannken 	vcache_key.vk_key_len = key_len;
   1161  1.36   hannken 	hash = vcache_hash(&vcache_key);
   1162  1.36   hannken 
   1163  1.36   hannken again:
   1164  1.36   hannken 	mutex_enter(&vcache.lock);
   1165  1.36   hannken 	node = vcache_hash_lookup(&vcache_key, hash);
   1166  1.36   hannken 
   1167  1.36   hannken 	/* If found, take a reference or retry. */
   1168  1.52   hannken 	if (__predict_true(node != NULL)) {
   1169  1.52   hannken 		/*
   1170  1.52   hannken 		 * If the vnode is loading we cannot take the v_interlock
   1171  1.52   hannken 		 * here as it might change during load (see uvm_obj_setlock()).
   1172  1.57   hannken 		 * As changing state from VS_LOADING requires both vcache.lock
   1173  1.52   hannken 		 * and v_interlock it is safe to test with vcache.lock held.
   1174  1.52   hannken 		 *
   1175  1.57   hannken 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1176  1.52   hannken 		 */
   1177  1.57   hannken 		if (__predict_false(node->vi_state == VS_LOADING)) {
   1178  1.52   hannken 			cv_wait(&vcache.cv, &vcache.lock);
   1179  1.52   hannken 			mutex_exit(&vcache.lock);
   1180  1.52   hannken 			goto again;
   1181  1.52   hannken 		}
   1182  1.57   hannken 		vp = VIMPL_TO_VNODE(node);
   1183  1.36   hannken 		mutex_enter(vp->v_interlock);
   1184  1.36   hannken 		mutex_exit(&vcache.lock);
   1185  1.66   hannken 		error = vcache_vget(vp);
   1186  1.36   hannken 		if (error == ENOENT)
   1187  1.36   hannken 			goto again;
   1188  1.36   hannken 		if (error == 0)
   1189  1.36   hannken 			*vpp = vp;
   1190  1.36   hannken 		KASSERT((error != 0) == (*vpp == NULL));
   1191  1.36   hannken 		return error;
   1192  1.36   hannken 	}
   1193  1.36   hannken 	mutex_exit(&vcache.lock);
   1194  1.36   hannken 
   1195  1.36   hannken 	/* Allocate and initialize a new vcache / vnode pair. */
   1196  1.36   hannken 	error = vfs_busy(mp, NULL);
   1197  1.36   hannken 	if (error)
   1198  1.36   hannken 		return error;
   1199  1.50   hannken 	new_node = vcache_alloc();
   1200  1.57   hannken 	new_node->vi_key = vcache_key;
   1201  1.57   hannken 	vp = VIMPL_TO_VNODE(new_node);
   1202  1.36   hannken 	mutex_enter(&vcache.lock);
   1203  1.36   hannken 	node = vcache_hash_lookup(&vcache_key, hash);
   1204  1.36   hannken 	if (node == NULL) {
   1205  1.36   hannken 		SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
   1206  1.57   hannken 		    new_node, vi_hash);
   1207  1.36   hannken 		node = new_node;
   1208  1.36   hannken 	}
   1209  1.36   hannken 
   1210  1.36   hannken 	/* If another thread beat us inserting this node, retry. */
   1211  1.36   hannken 	if (node != new_node) {
   1212  1.52   hannken 		mutex_enter(vp->v_interlock);
   1213  1.57   hannken 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1214  1.52   hannken 		mutex_exit(&vcache.lock);
   1215  1.52   hannken 		vrelel(vp, 0);
   1216  1.36   hannken 		vfs_unbusy(mp, false, NULL);
   1217  1.36   hannken 		goto again;
   1218  1.36   hannken 	}
   1219  1.52   hannken 	mutex_exit(&vcache.lock);
   1220  1.36   hannken 
   1221  1.57   hannken 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1222  1.36   hannken 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1223  1.36   hannken 	if (error) {
   1224  1.36   hannken 		mutex_enter(&vcache.lock);
   1225  1.36   hannken 		SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
   1226  1.57   hannken 		    new_node, vnode_impl, vi_hash);
   1227  1.52   hannken 		mutex_enter(vp->v_interlock);
   1228  1.57   hannken 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1229  1.36   hannken 		mutex_exit(&vcache.lock);
   1230  1.52   hannken 		vrelel(vp, 0);
   1231  1.36   hannken 		vfs_unbusy(mp, false, NULL);
   1232  1.36   hannken 		KASSERT(*vpp == NULL);
   1233  1.36   hannken 		return error;
   1234  1.36   hannken 	}
   1235  1.36   hannken 	KASSERT(new_key != NULL);
   1236  1.36   hannken 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1237  1.36   hannken 	KASSERT(vp->v_op != NULL);
   1238  1.36   hannken 	vfs_insmntque(vp, mp);
   1239  1.36   hannken 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1240  1.36   hannken 		vp->v_vflag |= VV_MPSAFE;
   1241  1.36   hannken 	vfs_unbusy(mp, true, NULL);
   1242  1.36   hannken 
   1243  1.36   hannken 	/* Finished loading, finalize node. */
   1244  1.36   hannken 	mutex_enter(&vcache.lock);
   1245  1.57   hannken 	new_node->vi_key.vk_key = new_key;
   1246  1.39   hannken 	mutex_enter(vp->v_interlock);
   1247  1.57   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1248  1.39   hannken 	mutex_exit(vp->v_interlock);
   1249  1.52   hannken 	mutex_exit(&vcache.lock);
   1250  1.36   hannken 	*vpp = vp;
   1251  1.36   hannken 	return 0;
   1252  1.36   hannken }
   1253  1.36   hannken 
   1254  1.36   hannken /*
   1255  1.40   hannken  * Create a new vnode / fs node pair and return it referenced through vpp.
   1256  1.40   hannken  */
   1257  1.40   hannken int
   1258  1.40   hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1259  1.40   hannken     kauth_cred_t cred, struct vnode **vpp)
   1260  1.40   hannken {
   1261  1.40   hannken 	int error;
   1262  1.40   hannken 	uint32_t hash;
   1263  1.52   hannken 	struct vnode *ovp, *vp;
   1264  1.57   hannken 	vnode_impl_t *new_node;
   1265  1.57   hannken 	vnode_impl_t *old_node __diagused;
   1266  1.40   hannken 
   1267  1.40   hannken 	*vpp = NULL;
   1268  1.40   hannken 
   1269  1.40   hannken 	/* Allocate and initialize a new vcache / vnode pair. */
   1270  1.40   hannken 	error = vfs_busy(mp, NULL);
   1271  1.40   hannken 	if (error)
   1272  1.40   hannken 		return error;
   1273  1.50   hannken 	new_node = vcache_alloc();
   1274  1.57   hannken 	new_node->vi_key.vk_mount = mp;
   1275  1.57   hannken 	vp = VIMPL_TO_VNODE(new_node);
   1276  1.40   hannken 
   1277  1.40   hannken 	/* Create and load the fs node. */
   1278  1.40   hannken 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
   1279  1.57   hannken 	    &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
   1280  1.40   hannken 	if (error) {
   1281  1.52   hannken 		mutex_enter(&vcache.lock);
   1282  1.52   hannken 		mutex_enter(vp->v_interlock);
   1283  1.57   hannken 		VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1284  1.52   hannken 		mutex_exit(&vcache.lock);
   1285  1.52   hannken 		vrelel(vp, 0);
   1286  1.40   hannken 		vfs_unbusy(mp, false, NULL);
   1287  1.40   hannken 		KASSERT(*vpp == NULL);
   1288  1.40   hannken 		return error;
   1289  1.40   hannken 	}
   1290  1.57   hannken 	KASSERT(new_node->vi_key.vk_key != NULL);
   1291  1.40   hannken 	KASSERT(vp->v_op != NULL);
   1292  1.57   hannken 	hash = vcache_hash(&new_node->vi_key);
   1293  1.40   hannken 
   1294  1.40   hannken 	/* Wait for previous instance to be reclaimed, then insert new node. */
   1295  1.40   hannken 	mutex_enter(&vcache.lock);
   1296  1.57   hannken 	while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
   1297  1.57   hannken 		ovp = VIMPL_TO_VNODE(old_node);
   1298  1.52   hannken 		mutex_enter(ovp->v_interlock);
   1299  1.40   hannken 		mutex_exit(&vcache.lock);
   1300  1.66   hannken 		error = vcache_vget(ovp);
   1301  1.52   hannken 		KASSERT(error == ENOENT);
   1302  1.40   hannken 		mutex_enter(&vcache.lock);
   1303  1.40   hannken 	}
   1304  1.40   hannken 	SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
   1305  1.57   hannken 	    new_node, vi_hash);
   1306  1.40   hannken 	mutex_exit(&vcache.lock);
   1307  1.40   hannken 	vfs_insmntque(vp, mp);
   1308  1.40   hannken 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1309  1.40   hannken 		vp->v_vflag |= VV_MPSAFE;
   1310  1.40   hannken 	vfs_unbusy(mp, true, NULL);
   1311  1.40   hannken 
   1312  1.40   hannken 	/* Finished loading, finalize node. */
   1313  1.40   hannken 	mutex_enter(&vcache.lock);
   1314  1.52   hannken 	mutex_enter(vp->v_interlock);
   1315  1.57   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
   1316  1.40   hannken 	mutex_exit(&vcache.lock);
   1317  1.40   hannken 	mutex_exit(vp->v_interlock);
   1318  1.40   hannken 	*vpp = vp;
   1319  1.40   hannken 	return 0;
   1320  1.40   hannken }
   1321  1.40   hannken 
   1322  1.40   hannken /*
   1323  1.65   hannken  * Prepare key change: update old cache nodes key and lock new cache node.
   1324  1.37   hannken  * Return an error if the new node already exists.
   1325  1.37   hannken  */
   1326  1.37   hannken int
   1327  1.37   hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1328  1.37   hannken     const void *old_key, size_t old_key_len,
   1329  1.37   hannken     const void *new_key, size_t new_key_len)
   1330  1.37   hannken {
   1331  1.37   hannken 	uint32_t old_hash, new_hash;
   1332  1.37   hannken 	struct vcache_key old_vcache_key, new_vcache_key;
   1333  1.57   hannken 	vnode_impl_t *node, *new_node;
   1334  1.52   hannken 	struct vnode *tvp;
   1335  1.37   hannken 
   1336  1.37   hannken 	old_vcache_key.vk_mount = mp;
   1337  1.37   hannken 	old_vcache_key.vk_key = old_key;
   1338  1.37   hannken 	old_vcache_key.vk_key_len = old_key_len;
   1339  1.37   hannken 	old_hash = vcache_hash(&old_vcache_key);
   1340  1.37   hannken 
   1341  1.37   hannken 	new_vcache_key.vk_mount = mp;
   1342  1.37   hannken 	new_vcache_key.vk_key = new_key;
   1343  1.37   hannken 	new_vcache_key.vk_key_len = new_key_len;
   1344  1.37   hannken 	new_hash = vcache_hash(&new_vcache_key);
   1345  1.37   hannken 
   1346  1.50   hannken 	new_node = vcache_alloc();
   1347  1.57   hannken 	new_node->vi_key = new_vcache_key;
   1348  1.57   hannken 	tvp = VIMPL_TO_VNODE(new_node);
   1349  1.37   hannken 
   1350  1.52   hannken 	/* Insert locked new node used as placeholder. */
   1351  1.37   hannken 	mutex_enter(&vcache.lock);
   1352  1.37   hannken 	node = vcache_hash_lookup(&new_vcache_key, new_hash);
   1353  1.37   hannken 	if (node != NULL) {
   1354  1.52   hannken 		mutex_enter(tvp->v_interlock);
   1355  1.57   hannken 		VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
   1356  1.37   hannken 		mutex_exit(&vcache.lock);
   1357  1.52   hannken 		vrelel(tvp, 0);
   1358  1.37   hannken 		return EEXIST;
   1359  1.37   hannken 	}
   1360  1.37   hannken 	SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
   1361  1.57   hannken 	    new_node, vi_hash);
   1362  1.49   hannken 
   1363  1.65   hannken 	/* Replace old nodes key with the temporary copy. */
   1364  1.37   hannken 	node = vcache_hash_lookup(&old_vcache_key, old_hash);
   1365  1.37   hannken 	KASSERT(node != NULL);
   1366  1.57   hannken 	KASSERT(VIMPL_TO_VNODE(node) == vp);
   1367  1.65   hannken 	KASSERT(node->vi_key.vk_key != old_vcache_key.vk_key);
   1368  1.57   hannken 	node->vi_key = old_vcache_key;
   1369  1.37   hannken 	mutex_exit(&vcache.lock);
   1370  1.37   hannken 	return 0;
   1371  1.37   hannken }
   1372  1.37   hannken 
   1373  1.37   hannken /*
   1374  1.65   hannken  * Key change complete: update old node and remove placeholder.
   1375  1.37   hannken  */
   1376  1.37   hannken void
   1377  1.37   hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1378  1.37   hannken     const void *old_key, size_t old_key_len,
   1379  1.37   hannken     const void *new_key, size_t new_key_len)
   1380  1.37   hannken {
   1381  1.37   hannken 	uint32_t old_hash, new_hash;
   1382  1.37   hannken 	struct vcache_key old_vcache_key, new_vcache_key;
   1383  1.57   hannken 	vnode_impl_t *old_node, *new_node;
   1384  1.52   hannken 	struct vnode *tvp;
   1385  1.37   hannken 
   1386  1.37   hannken 	old_vcache_key.vk_mount = mp;
   1387  1.37   hannken 	old_vcache_key.vk_key = old_key;
   1388  1.37   hannken 	old_vcache_key.vk_key_len = old_key_len;
   1389  1.37   hannken 	old_hash = vcache_hash(&old_vcache_key);
   1390  1.37   hannken 
   1391  1.37   hannken 	new_vcache_key.vk_mount = mp;
   1392  1.37   hannken 	new_vcache_key.vk_key = new_key;
   1393  1.37   hannken 	new_vcache_key.vk_key_len = new_key_len;
   1394  1.37   hannken 	new_hash = vcache_hash(&new_vcache_key);
   1395  1.37   hannken 
   1396  1.37   hannken 	mutex_enter(&vcache.lock);
   1397  1.49   hannken 
   1398  1.49   hannken 	/* Lookup old and new node. */
   1399  1.49   hannken 	old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
   1400  1.49   hannken 	KASSERT(old_node != NULL);
   1401  1.57   hannken 	KASSERT(VIMPL_TO_VNODE(old_node) == vp);
   1402  1.52   hannken 
   1403  1.49   hannken 	new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
   1404  1.52   hannken 	KASSERT(new_node != NULL);
   1405  1.57   hannken 	KASSERT(new_node->vi_key.vk_key_len == new_key_len);
   1406  1.57   hannken 	tvp = VIMPL_TO_VNODE(new_node);
   1407  1.52   hannken 	mutex_enter(tvp->v_interlock);
   1408  1.57   hannken 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
   1409  1.49   hannken 
   1410  1.49   hannken 	/* Rekey old node and put it onto its new hashlist. */
   1411  1.57   hannken 	old_node->vi_key = new_vcache_key;
   1412  1.49   hannken 	if (old_hash != new_hash) {
   1413  1.49   hannken 		SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
   1414  1.57   hannken 		    old_node, vnode_impl, vi_hash);
   1415  1.49   hannken 		SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
   1416  1.57   hannken 		    old_node, vi_hash);
   1417  1.49   hannken 	}
   1418  1.49   hannken 
   1419  1.49   hannken 	/* Remove new node used as placeholder. */
   1420  1.49   hannken 	SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
   1421  1.57   hannken 	    new_node, vnode_impl, vi_hash);
   1422  1.57   hannken 	VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
   1423  1.37   hannken 	mutex_exit(&vcache.lock);
   1424  1.52   hannken 	vrelel(tvp, 0);
   1425  1.37   hannken }
   1426  1.37   hannken 
   1427  1.37   hannken /*
   1428  1.54   hannken  * Disassociate the underlying file system from a vnode.
   1429  1.54   hannken  *
   1430  1.54   hannken  * Must be called with vnode locked and will return unlocked.
   1431  1.54   hannken  * Must be called with the interlock held, and will return with it held.
   1432  1.54   hannken  */
   1433  1.54   hannken static void
   1434  1.54   hannken vcache_reclaim(vnode_t *vp)
   1435  1.54   hannken {
   1436  1.54   hannken 	lwp_t *l = curlwp;
   1437  1.57   hannken 	vnode_impl_t *node = VNODE_TO_VIMPL(vp);
   1438  1.55   hannken 	uint32_t hash;
   1439  1.55   hannken 	uint8_t temp_buf[64], *temp_key;
   1440  1.55   hannken 	size_t temp_key_len;
   1441  1.54   hannken 	bool recycle, active;
   1442  1.54   hannken 	int error;
   1443  1.54   hannken 
   1444  1.54   hannken 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1445  1.54   hannken 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1446  1.54   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1447  1.54   hannken 	KASSERT(vp->v_usecount != 0);
   1448  1.54   hannken 
   1449  1.54   hannken 	active = (vp->v_usecount > 1);
   1450  1.57   hannken 	temp_key_len = node->vi_key.vk_key_len;
   1451  1.54   hannken 	/*
   1452  1.54   hannken 	 * Prevent the vnode from being recycled or brought into use
   1453  1.54   hannken 	 * while we clean it out.
   1454  1.54   hannken 	 */
   1455  1.57   hannken 	VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
   1456  1.54   hannken 	if (vp->v_iflag & VI_EXECMAP) {
   1457  1.54   hannken 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
   1458  1.54   hannken 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
   1459  1.54   hannken 	}
   1460  1.54   hannken 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1461  1.54   hannken 	mutex_exit(vp->v_interlock);
   1462  1.54   hannken 
   1463  1.55   hannken 	/* Replace the vnode key with a temporary copy. */
   1464  1.57   hannken 	if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
   1465  1.55   hannken 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1466  1.55   hannken 	} else {
   1467  1.55   hannken 		temp_key = temp_buf;
   1468  1.55   hannken 	}
   1469  1.55   hannken 	mutex_enter(&vcache.lock);
   1470  1.57   hannken 	memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
   1471  1.57   hannken 	node->vi_key.vk_key = temp_key;
   1472  1.55   hannken 	mutex_exit(&vcache.lock);
   1473  1.55   hannken 
   1474  1.54   hannken 	/*
   1475  1.54   hannken 	 * Clean out any cached data associated with the vnode.
   1476  1.54   hannken 	 * If purging an active vnode, it must be closed and
   1477  1.60   hannken 	 * deactivated before being reclaimed.
   1478  1.54   hannken 	 */
   1479  1.54   hannken 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1480  1.54   hannken 	if (error != 0) {
   1481  1.54   hannken 		if (wapbl_vphaswapbl(vp))
   1482  1.54   hannken 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1483  1.54   hannken 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1484  1.54   hannken 	}
   1485  1.54   hannken 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1486  1.54   hannken 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1487  1.54   hannken 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1488  1.54   hannken 		 spec_node_revoke(vp);
   1489  1.54   hannken 	}
   1490  1.54   hannken 
   1491  1.60   hannken 	/*
   1492  1.60   hannken 	 * Disassociate the underlying file system from the vnode.
   1493  1.60   hannken 	 * Note that the VOP_INACTIVE will unlock the vnode.
   1494  1.60   hannken 	 */
   1495  1.60   hannken 	VOP_INACTIVE(vp, &recycle);
   1496  1.54   hannken 	if (VOP_RECLAIM(vp)) {
   1497  1.54   hannken 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1498  1.54   hannken 	}
   1499  1.54   hannken 
   1500  1.54   hannken 	KASSERT(vp->v_data == NULL);
   1501  1.54   hannken 	KASSERT(vp->v_uobj.uo_npages == 0);
   1502  1.54   hannken 
   1503  1.54   hannken 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1504  1.54   hannken 		uvm_ra_freectx(vp->v_ractx);
   1505  1.54   hannken 		vp->v_ractx = NULL;
   1506  1.54   hannken 	}
   1507  1.54   hannken 
   1508  1.54   hannken 	/* Purge name cache. */
   1509  1.54   hannken 	cache_purge(vp);
   1510  1.54   hannken 
   1511  1.54   hannken 	/* Move to dead mount. */
   1512  1.54   hannken 	vp->v_vflag &= ~VV_ROOT;
   1513  1.54   hannken 	atomic_inc_uint(&dead_rootmount->mnt_refcnt);
   1514  1.54   hannken 	vfs_insmntque(vp, dead_rootmount);
   1515  1.54   hannken 
   1516  1.55   hannken 	/* Remove from vnode cache. */
   1517  1.57   hannken 	hash = vcache_hash(&node->vi_key);
   1518  1.55   hannken 	mutex_enter(&vcache.lock);
   1519  1.57   hannken 	KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
   1520  1.55   hannken 	SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
   1521  1.57   hannken 	    node, vnode_impl, vi_hash);
   1522  1.55   hannken 	mutex_exit(&vcache.lock);
   1523  1.55   hannken 	if (temp_key != temp_buf)
   1524  1.55   hannken 		kmem_free(temp_key, temp_key_len);
   1525  1.55   hannken 
   1526  1.54   hannken 	/* Done with purge, notify sleepers of the grim news. */
   1527  1.54   hannken 	mutex_enter(vp->v_interlock);
   1528  1.54   hannken 	vp->v_op = dead_vnodeop_p;
   1529  1.54   hannken 	vp->v_vflag |= VV_LOCKSWORK;
   1530  1.57   hannken 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1531  1.54   hannken 	vp->v_tag = VT_NON;
   1532  1.54   hannken 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1533  1.54   hannken 
   1534  1.54   hannken 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1535  1.54   hannken }
   1536  1.54   hannken 
   1537  1.54   hannken /*
   1538   1.1     rmind  * Update outstanding I/O count and do wakeup if requested.
   1539   1.1     rmind  */
   1540   1.1     rmind void
   1541   1.1     rmind vwakeup(struct buf *bp)
   1542   1.1     rmind {
   1543   1.1     rmind 	vnode_t *vp;
   1544   1.1     rmind 
   1545   1.1     rmind 	if ((vp = bp->b_vp) == NULL)
   1546   1.1     rmind 		return;
   1547   1.1     rmind 
   1548   1.9     rmind 	KASSERT(bp->b_objlock == vp->v_interlock);
   1549   1.1     rmind 	KASSERT(mutex_owned(bp->b_objlock));
   1550   1.1     rmind 
   1551   1.1     rmind 	if (--vp->v_numoutput < 0)
   1552  1.11  christos 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1553   1.1     rmind 	if (vp->v_numoutput == 0)
   1554   1.1     rmind 		cv_broadcast(&vp->v_cv);
   1555   1.1     rmind }
   1556   1.1     rmind 
   1557   1.1     rmind /*
   1558  1.35   hannken  * Test a vnode for being or becoming dead.  Returns one of:
   1559  1.35   hannken  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1560  1.35   hannken  * ENOENT: vnode is dead.
   1561  1.35   hannken  * 0:      otherwise.
   1562  1.35   hannken  *
   1563  1.35   hannken  * Whenever this function returns a non-zero value all future
   1564  1.35   hannken  * calls will also return a non-zero value.
   1565  1.35   hannken  */
   1566  1.35   hannken int
   1567  1.35   hannken vdead_check(struct vnode *vp, int flags)
   1568  1.35   hannken {
   1569  1.35   hannken 
   1570  1.35   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1571  1.35   hannken 
   1572  1.52   hannken 	if (! ISSET(flags, VDEAD_NOWAIT))
   1573  1.52   hannken 		VSTATE_WAIT_STABLE(vp);
   1574   1.1     rmind 
   1575  1.57   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1576  1.52   hannken 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1577  1.52   hannken 		return EBUSY;
   1578  1.57   hannken 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1579  1.52   hannken 		return ENOENT;
   1580  1.52   hannken 	}
   1581   1.1     rmind 
   1582  1.52   hannken 	return 0;
   1583   1.1     rmind }
   1584   1.1     rmind 
   1585   1.1     rmind int
   1586  1.61   hannken vfs_drainvnodes(void)
   1587   1.1     rmind {
   1588  1.63   hannken 	int i, gen;
   1589  1.61   hannken 
   1590  1.63   hannken 	mutex_enter(&vdrain_lock);
   1591  1.63   hannken 	for (i = 0; i < 2; i++) {
   1592  1.63   hannken 		gen = vdrain_gen;
   1593  1.63   hannken 		while (gen == vdrain_gen) {
   1594  1.63   hannken 			cv_broadcast(&vdrain_cv);
   1595  1.63   hannken 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1596  1.63   hannken 		}
   1597  1.61   hannken 	}
   1598  1.63   hannken 	mutex_exit(&vdrain_lock);
   1599  1.12   hannken 
   1600  1.63   hannken 	if (numvnodes >= desiredvnodes)
   1601  1.63   hannken 		return EBUSY;
   1602  1.12   hannken 
   1603  1.61   hannken 	if (vcache.hashsize != desiredvnodes)
   1604  1.61   hannken 		vcache_reinit();
   1605  1.36   hannken 
   1606   1.1     rmind 	return 0;
   1607   1.1     rmind }
   1608   1.1     rmind 
   1609   1.1     rmind void
   1610  1.11  christos vnpanic(vnode_t *vp, const char *fmt, ...)
   1611   1.1     rmind {
   1612  1.11  christos 	va_list ap;
   1613  1.11  christos 
   1614   1.1     rmind #ifdef DIAGNOSTIC
   1615   1.1     rmind 	vprint(NULL, vp);
   1616   1.1     rmind #endif
   1617  1.11  christos 	va_start(ap, fmt);
   1618  1.11  christos 	vpanic(fmt, ap);
   1619  1.11  christos 	va_end(ap);
   1620   1.1     rmind }
   1621