Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.126.2.1
      1  1.126.2.1   thorpej /*	$NetBSD: vfs_vnode.c,v 1.126.2.1 2021/04/03 22:29:00 thorpej Exp $	*/
      2        1.1     rmind 
      3        1.1     rmind /*-
      4      1.111        ad  * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
      5        1.1     rmind  * All rights reserved.
      6        1.1     rmind  *
      7        1.1     rmind  * This code is derived from software contributed to The NetBSD Foundation
      8        1.1     rmind  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9        1.1     rmind  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10        1.1     rmind  *
     11        1.1     rmind  * Redistribution and use in source and binary forms, with or without
     12        1.1     rmind  * modification, are permitted provided that the following conditions
     13        1.1     rmind  * are met:
     14        1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     15        1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     16        1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     17        1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     18        1.1     rmind  *    documentation and/or other materials provided with the distribution.
     19        1.1     rmind  *
     20        1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21        1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22        1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23        1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24        1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25        1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26        1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27        1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28        1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29        1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30        1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     31        1.1     rmind  */
     32        1.1     rmind 
     33        1.1     rmind /*
     34        1.1     rmind  * Copyright (c) 1989, 1993
     35        1.1     rmind  *	The Regents of the University of California.  All rights reserved.
     36        1.1     rmind  * (c) UNIX System Laboratories, Inc.
     37        1.1     rmind  * All or some portions of this file are derived from material licensed
     38        1.1     rmind  * to the University of California by American Telephone and Telegraph
     39        1.1     rmind  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40        1.1     rmind  * the permission of UNIX System Laboratories, Inc.
     41        1.1     rmind  *
     42        1.1     rmind  * Redistribution and use in source and binary forms, with or without
     43        1.1     rmind  * modification, are permitted provided that the following conditions
     44        1.1     rmind  * are met:
     45        1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     46        1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     47        1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     48        1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     49        1.1     rmind  *    documentation and/or other materials provided with the distribution.
     50        1.1     rmind  * 3. Neither the name of the University nor the names of its contributors
     51        1.1     rmind  *    may be used to endorse or promote products derived from this software
     52        1.1     rmind  *    without specific prior written permission.
     53        1.1     rmind  *
     54        1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55        1.1     rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56        1.1     rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57        1.1     rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58        1.1     rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59        1.1     rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60        1.1     rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61        1.1     rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62        1.1     rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63        1.1     rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64        1.1     rmind  * SUCH DAMAGE.
     65        1.1     rmind  *
     66        1.1     rmind  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67        1.1     rmind  */
     68        1.1     rmind 
     69        1.1     rmind /*
     70        1.8     rmind  * The vnode cache subsystem.
     71        1.1     rmind  *
     72        1.8     rmind  * Life-cycle
     73        1.1     rmind  *
     74        1.8     rmind  *	Normally, there are two points where new vnodes are created:
     75        1.8     rmind  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76        1.8     rmind  *	starts in one of the following ways:
     77        1.8     rmind  *
     78       1.45   hannken  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79       1.66   hannken  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80        1.8     rmind  *
     81       1.16     rmind  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82       1.16     rmind  *	was another, traditional way.  Currently, only the draining thread
     83       1.16     rmind  *	recycles the vnodes.  This behaviour might be revisited.
     84       1.16     rmind  *
     85        1.8     rmind  *	The life-cycle ends when the last reference is dropped, usually
     86        1.8     rmind  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87        1.8     rmind  *	the file system that vnode is inactive.  Via this call, file system
     88       1.16     rmind  *	indicates whether vnode can be recycled (usually, it checks its own
     89       1.16     rmind  *	references, e.g. count of links, whether the file was removed).
     90        1.8     rmind  *
     91        1.8     rmind  *	Depending on indication, vnode can be put into a free list (cache),
     92       1.54   hannken  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93       1.54   hannken  *	disassociate underlying file system from the vnode, and finally
     94       1.54   hannken  *	destroyed.
     95        1.8     rmind  *
     96       1.52   hannken  * Vnode state
     97       1.52   hannken  *
     98       1.52   hannken  *	Vnode is always in one of six states:
     99       1.52   hannken  *	- MARKER	This is a marker vnode to help list traversal.  It
    100       1.52   hannken  *			will never change its state.
    101       1.52   hannken  *	- LOADING	Vnode is associating underlying file system and not
    102       1.52   hannken  *			yet ready to use.
    103       1.94   hannken  *	- LOADED	Vnode has associated underlying file system and is
    104       1.52   hannken  *			ready to use.
    105       1.52   hannken  *	- BLOCKED	Vnode is active but cannot get new references.
    106       1.52   hannken  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107       1.52   hannken  *			system.
    108       1.52   hannken  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109       1.52   hannken  *			and is dead.
    110       1.52   hannken  *
    111       1.52   hannken  *	Valid state changes are:
    112       1.94   hannken  *	LOADING -> LOADED
    113       1.52   hannken  *			Vnode has been initialised in vcache_get() or
    114       1.52   hannken  *			vcache_new() and is ready to use.
    115      1.123        ad  *	BLOCKED -> RECLAIMING
    116       1.52   hannken  *			Vnode starts disassociation from underlying file
    117       1.54   hannken  *			system in vcache_reclaim().
    118       1.52   hannken  *	RECLAIMING -> RECLAIMED
    119       1.52   hannken  *			Vnode finished disassociation from underlying file
    120       1.54   hannken  *			system in vcache_reclaim().
    121       1.94   hannken  *	LOADED -> BLOCKED
    122       1.52   hannken  *			Either vcache_rekey*() is changing the vnode key or
    123       1.52   hannken  *			vrelel() is about to call VOP_INACTIVE().
    124       1.94   hannken  *	BLOCKED -> LOADED
    125       1.52   hannken  *			The block condition is over.
    126       1.52   hannken  *	LOADING -> RECLAIMED
    127       1.52   hannken  *			Either vcache_get() or vcache_new() failed to
    128       1.52   hannken  *			associate the underlying file system or vcache_rekey*()
    129       1.52   hannken  *			drops a vnode used as placeholder.
    130       1.52   hannken  *
    131       1.52   hannken  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    132       1.52   hannken  *	and it is possible to wait for state change.
    133       1.52   hannken  *
    134       1.52   hannken  *	State is protected with v_interlock with one exception:
    135       1.69   hannken  *	to change from LOADING both v_interlock and vcache_lock must be held
    136       1.52   hannken  *	so it is possible to check "state == LOADING" without holding
    137       1.52   hannken  *	v_interlock.  See vcache_get() for details.
    138       1.52   hannken  *
    139        1.8     rmind  * Reference counting
    140        1.8     rmind  *
    141        1.8     rmind  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    142        1.8     rmind  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    143        1.8     rmind  *	as vput(9), routines.  Common points holding references are e.g.
    144        1.8     rmind  *	file openings, current working directory, mount points, etc.
    145        1.8     rmind  *
    146      1.123        ad  *	v_usecount is adjusted with atomic operations, however to change
    147      1.123        ad  *	from a non-zero value to zero the interlock must also be held.
    148        1.1     rmind  */
    149        1.1     rmind 
    150        1.1     rmind #include <sys/cdefs.h>
    151  1.126.2.1   thorpej __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.126.2.1 2021/04/03 22:29:00 thorpej Exp $");
    152      1.108        ad 
    153      1.109        ad #ifdef _KERNEL_OPT
    154      1.108        ad #include "opt_pax.h"
    155      1.109        ad #endif
    156        1.1     rmind 
    157        1.1     rmind #include <sys/param.h>
    158        1.1     rmind #include <sys/kernel.h>
    159        1.1     rmind 
    160        1.1     rmind #include <sys/atomic.h>
    161        1.1     rmind #include <sys/buf.h>
    162        1.1     rmind #include <sys/conf.h>
    163        1.1     rmind #include <sys/device.h>
    164       1.36   hannken #include <sys/hash.h>
    165        1.1     rmind #include <sys/kauth.h>
    166        1.1     rmind #include <sys/kmem.h>
    167        1.1     rmind #include <sys/kthread.h>
    168        1.1     rmind #include <sys/module.h>
    169        1.1     rmind #include <sys/mount.h>
    170        1.1     rmind #include <sys/namei.h>
    171      1.108        ad #include <sys/pax.h>
    172        1.1     rmind #include <sys/syscallargs.h>
    173        1.1     rmind #include <sys/sysctl.h>
    174        1.1     rmind #include <sys/systm.h>
    175       1.58   hannken #include <sys/vnode_impl.h>
    176        1.1     rmind #include <sys/wapbl.h>
    177       1.24   hannken #include <sys/fstrans.h>
    178        1.1     rmind 
    179        1.1     rmind #include <uvm/uvm.h>
    180        1.1     rmind #include <uvm/uvm_readahead.h>
    181      1.104        ad #include <uvm/uvm_stat.h>
    182        1.1     rmind 
    183       1.23   hannken /* Flags to vrelel. */
    184      1.104        ad #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
    185      1.104        ad 
    186      1.104        ad #define	LRU_VRELE	0
    187      1.104        ad #define	LRU_FREE	1
    188      1.104        ad #define	LRU_HOLD	2
    189      1.104        ad #define	LRU_COUNT	3
    190        1.1     rmind 
    191       1.16     rmind /*
    192       1.63   hannken  * There are three lru lists: one holds vnodes waiting for async release,
    193      1.104        ad  * one is for vnodes which have no buffer/page references and one for those
    194      1.104        ad  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
    195      1.104        ad  * private cache line as vnodes migrate between them while under the same
    196      1.104        ad  * lock (vdrain_lock).
    197       1.63   hannken  */
    198      1.104        ad u_int			numvnodes		__cacheline_aligned;
    199      1.104        ad static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
    200       1.63   hannken static kmutex_t		vdrain_lock		__cacheline_aligned;
    201      1.104        ad static kcondvar_t	vdrain_cv;
    202       1.63   hannken static int		vdrain_gen;
    203       1.63   hannken static kcondvar_t	vdrain_gen_cv;
    204       1.63   hannken static bool		vdrain_retry;
    205       1.63   hannken static lwp_t *		vdrain_lwp;
    206       1.57   hannken SLIST_HEAD(hashhead, vnode_impl);
    207       1.69   hannken static kmutex_t		vcache_lock		__cacheline_aligned;
    208      1.104        ad static kcondvar_t	vcache_cv;
    209       1.69   hannken static u_int		vcache_hashsize;
    210       1.69   hannken static u_long		vcache_hashmask;
    211      1.104        ad static struct hashhead	*vcache_hashtab;
    212       1.69   hannken static pool_cache_t	vcache_pool;
    213       1.63   hannken static void		lru_requeue(vnode_t *, vnodelst_t *);
    214       1.63   hannken static vnodelst_t *	lru_which(vnode_t *);
    215       1.63   hannken static vnode_impl_t *	vcache_alloc(void);
    216       1.79   hannken static void		vcache_dealloc(vnode_impl_t *);
    217       1.57   hannken static void		vcache_free(vnode_impl_t *);
    218       1.36   hannken static void		vcache_init(void);
    219       1.36   hannken static void		vcache_reinit(void);
    220       1.54   hannken static void		vcache_reclaim(vnode_t *);
    221      1.107        ad static void		vrelel(vnode_t *, int, int);
    222       1.12   hannken static void		vdrain_thread(void *);
    223       1.11  christos static void		vnpanic(vnode_t *, const char *, ...)
    224       1.18  christos     __printflike(2, 3);
    225        1.1     rmind 
    226        1.1     rmind /* Routines having to do with the management of the vnode table. */
    227       1.44   hannken extern struct mount	*dead_rootmount;
    228        1.1     rmind extern int		(**dead_vnodeop_p)(void *);
    229       1.98   hannken extern int		(**spec_vnodeop_p)(void *);
    230       1.31   hannken extern struct vfsops	dead_vfsops;
    231        1.1     rmind 
    232      1.120        ad /*
    233      1.123        ad  * The high bit of v_usecount is a gate for vcache_tryvget().  It's set
    234      1.123        ad  * only when the vnode state is LOADED.
    235      1.123        ad  */
    236      1.123        ad #define	VUSECOUNT_MASK	0x7fffffff
    237      1.123        ad #define	VUSECOUNT_GATE	0x80000000
    238      1.123        ad 
    239      1.123        ad /*
    240      1.120        ad  * Return the current usecount of a vnode.
    241      1.120        ad  */
    242      1.120        ad inline int
    243      1.120        ad vrefcnt(struct vnode *vp)
    244      1.120        ad {
    245      1.120        ad 
    246      1.123        ad 	return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
    247      1.120        ad }
    248      1.120        ad 
    249       1.51   hannken /* Vnode state operations and diagnostics. */
    250       1.51   hannken 
    251       1.51   hannken #if defined(DIAGNOSTIC)
    252       1.51   hannken 
    253       1.94   hannken #define VSTATE_VALID(state) \
    254       1.94   hannken 	((state) != VS_ACTIVE && (state) != VS_MARKER)
    255       1.51   hannken #define VSTATE_GET(vp) \
    256       1.51   hannken 	vstate_assert_get((vp), __func__, __LINE__)
    257       1.51   hannken #define VSTATE_CHANGE(vp, from, to) \
    258       1.51   hannken 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    259       1.51   hannken #define VSTATE_WAIT_STABLE(vp) \
    260       1.51   hannken 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    261       1.51   hannken 
    262       1.94   hannken void
    263       1.99     joerg _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    264       1.99     joerg     bool has_lock)
    265       1.51   hannken {
    266       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    267      1.120        ad 	int refcnt = vrefcnt(vp);
    268       1.51   hannken 
    269       1.99     joerg 	if (!has_lock) {
    270       1.99     joerg 		/*
    271       1.99     joerg 		 * Prevent predictive loads from the CPU, but check the state
    272       1.99     joerg 		 * without loooking first.
    273       1.99     joerg 		 */
    274       1.99     joerg 		membar_enter();
    275      1.120        ad 		if (state == VS_ACTIVE && refcnt > 0 &&
    276       1.99     joerg 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
    277       1.99     joerg 			return;
    278       1.99     joerg 		if (vip->vi_state == state)
    279       1.99     joerg 			return;
    280       1.99     joerg 		mutex_enter((vp)->v_interlock);
    281       1.99     joerg 	}
    282       1.99     joerg 
    283       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    284       1.51   hannken 
    285      1.120        ad 	if ((state == VS_ACTIVE && refcnt > 0 &&
    286       1.99     joerg 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
    287       1.99     joerg 	    vip->vi_state == state) {
    288       1.99     joerg 		if (!has_lock)
    289       1.99     joerg 			mutex_exit((vp)->v_interlock);
    290       1.94   hannken 		return;
    291       1.99     joerg 	}
    292       1.94   hannken 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
    293      1.120        ad 	    vstate_name(vip->vi_state), refcnt,
    294       1.94   hannken 	    vstate_name(state), func, line);
    295       1.51   hannken }
    296       1.51   hannken 
    297       1.57   hannken static enum vnode_state
    298       1.51   hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
    299       1.51   hannken {
    300       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    301       1.51   hannken 
    302       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    303       1.94   hannken 	if (! VSTATE_VALID(vip->vi_state))
    304       1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    305       1.70   hannken 		    vstate_name(vip->vi_state), func, line);
    306       1.51   hannken 
    307       1.70   hannken 	return vip->vi_state;
    308       1.51   hannken }
    309       1.51   hannken 
    310       1.52   hannken static void
    311       1.51   hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    312       1.51   hannken {
    313       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    314       1.51   hannken 
    315       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    316       1.94   hannken 	if (! VSTATE_VALID(vip->vi_state))
    317       1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    318       1.70   hannken 		    vstate_name(vip->vi_state), func, line);
    319       1.51   hannken 
    320       1.94   hannken 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    321       1.51   hannken 		cv_wait(&vp->v_cv, vp->v_interlock);
    322       1.51   hannken 
    323       1.94   hannken 	if (! VSTATE_VALID(vip->vi_state))
    324       1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    325       1.70   hannken 		    vstate_name(vip->vi_state), func, line);
    326       1.51   hannken }
    327       1.51   hannken 
    328       1.52   hannken static void
    329       1.57   hannken vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    330       1.51   hannken     const char *func, int line)
    331       1.51   hannken {
    332      1.123        ad 	bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
    333       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    334       1.51   hannken 
    335       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    336       1.57   hannken 	if (from == VS_LOADING)
    337       1.69   hannken 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
    338       1.51   hannken 
    339       1.94   hannken 	if (! VSTATE_VALID(from))
    340       1.51   hannken 		vnpanic(vp, "from is %s at %s:%d",
    341       1.51   hannken 		    vstate_name(from), func, line);
    342       1.94   hannken 	if (! VSTATE_VALID(to))
    343       1.51   hannken 		vnpanic(vp, "to is %s at %s:%d",
    344       1.51   hannken 		    vstate_name(to), func, line);
    345       1.70   hannken 	if (vip->vi_state != from)
    346       1.51   hannken 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    347       1.70   hannken 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
    348      1.123        ad 	if ((from == VS_LOADED) != gated)
    349      1.123        ad 		vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
    350      1.123        ad 		    vstate_name(vip->vi_state), gated, func, line);
    351      1.123        ad 
    352      1.123        ad 	/* Open/close the gate for vcache_tryvget(). */
    353      1.123        ad 	if (to == VS_LOADED)
    354      1.123        ad 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
    355      1.123        ad 	else
    356      1.123        ad 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
    357       1.51   hannken 
    358       1.70   hannken 	vip->vi_state = to;
    359       1.57   hannken 	if (from == VS_LOADING)
    360       1.69   hannken 		cv_broadcast(&vcache_cv);
    361       1.94   hannken 	if (to == VS_LOADED || to == VS_RECLAIMED)
    362       1.51   hannken 		cv_broadcast(&vp->v_cv);
    363       1.51   hannken }
    364       1.51   hannken 
    365       1.51   hannken #else /* defined(DIAGNOSTIC) */
    366       1.51   hannken 
    367       1.51   hannken #define VSTATE_GET(vp) \
    368       1.57   hannken 	(VNODE_TO_VIMPL((vp))->vi_state)
    369       1.51   hannken #define VSTATE_CHANGE(vp, from, to) \
    370       1.51   hannken 	vstate_change((vp), (from), (to))
    371       1.51   hannken #define VSTATE_WAIT_STABLE(vp) \
    372       1.51   hannken 	vstate_wait_stable((vp))
    373       1.94   hannken void
    374      1.100     joerg _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    375      1.100     joerg     bool has_lock)
    376       1.94   hannken {
    377       1.94   hannken 
    378       1.94   hannken }
    379       1.51   hannken 
    380       1.52   hannken static void
    381       1.51   hannken vstate_wait_stable(vnode_t *vp)
    382       1.51   hannken {
    383       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    384       1.51   hannken 
    385       1.94   hannken 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    386       1.51   hannken 		cv_wait(&vp->v_cv, vp->v_interlock);
    387       1.51   hannken }
    388       1.51   hannken 
    389       1.52   hannken static void
    390       1.57   hannken vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    391       1.51   hannken {
    392       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    393       1.51   hannken 
    394      1.123        ad 	/* Open/close the gate for vcache_tryvget(). */
    395      1.123        ad 	if (to == VS_LOADED)
    396      1.123        ad 		atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
    397      1.123        ad 	else
    398      1.123        ad 		atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
    399      1.123        ad 
    400       1.70   hannken 	vip->vi_state = to;
    401       1.57   hannken 	if (from == VS_LOADING)
    402       1.69   hannken 		cv_broadcast(&vcache_cv);
    403       1.94   hannken 	if (to == VS_LOADED || to == VS_RECLAIMED)
    404       1.51   hannken 		cv_broadcast(&vp->v_cv);
    405       1.51   hannken }
    406       1.51   hannken 
    407       1.51   hannken #endif /* defined(DIAGNOSTIC) */
    408       1.51   hannken 
    409        1.1     rmind void
    410        1.1     rmind vfs_vnode_sysinit(void)
    411        1.1     rmind {
    412      1.104        ad 	int error __diagused, i;
    413        1.1     rmind 
    414       1.44   hannken 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    415       1.44   hannken 	KASSERT(dead_rootmount != NULL);
    416      1.103   hannken 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
    417       1.31   hannken 
    418       1.63   hannken 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    419      1.104        ad 	for (i = 0; i < LRU_COUNT; i++) {
    420      1.104        ad 		TAILQ_INIT(&lru_list[i]);
    421      1.104        ad 	}
    422       1.36   hannken 	vcache_init();
    423       1.36   hannken 
    424       1.12   hannken 	cv_init(&vdrain_cv, "vdrain");
    425       1.63   hannken 	cv_init(&vdrain_gen_cv, "vdrainwt");
    426       1.12   hannken 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    427       1.63   hannken 	    NULL, &vdrain_lwp, "vdrain");
    428       1.47  riastrad 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    429        1.1     rmind }
    430        1.1     rmind 
    431        1.1     rmind /*
    432       1.48   hannken  * Allocate a new marker vnode.
    433       1.48   hannken  */
    434       1.48   hannken vnode_t *
    435       1.48   hannken vnalloc_marker(struct mount *mp)
    436       1.48   hannken {
    437       1.70   hannken 	vnode_impl_t *vip;
    438       1.50   hannken 	vnode_t *vp;
    439       1.50   hannken 
    440       1.70   hannken 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
    441       1.70   hannken 	memset(vip, 0, sizeof(*vip));
    442       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
    443      1.111        ad 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
    444       1.50   hannken 	vp->v_mount = mp;
    445       1.50   hannken 	vp->v_type = VBAD;
    446      1.111        ad 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    447       1.70   hannken 	vip->vi_state = VS_MARKER;
    448       1.48   hannken 
    449       1.50   hannken 	return vp;
    450       1.48   hannken }
    451       1.48   hannken 
    452       1.48   hannken /*
    453       1.48   hannken  * Free a marker vnode.
    454       1.48   hannken  */
    455       1.48   hannken void
    456       1.48   hannken vnfree_marker(vnode_t *vp)
    457       1.48   hannken {
    458       1.70   hannken 	vnode_impl_t *vip;
    459       1.48   hannken 
    460       1.70   hannken 	vip = VNODE_TO_VIMPL(vp);
    461       1.70   hannken 	KASSERT(vip->vi_state == VS_MARKER);
    462      1.111        ad 	mutex_obj_free(vp->v_interlock);
    463       1.50   hannken 	uvm_obj_destroy(&vp->v_uobj, true);
    464       1.70   hannken 	pool_cache_put(vcache_pool, vip);
    465       1.48   hannken }
    466       1.48   hannken 
    467       1.48   hannken /*
    468       1.48   hannken  * Test a vnode for being a marker vnode.
    469       1.48   hannken  */
    470       1.48   hannken bool
    471       1.48   hannken vnis_marker(vnode_t *vp)
    472       1.48   hannken {
    473       1.48   hannken 
    474       1.57   hannken 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    475       1.48   hannken }
    476       1.48   hannken 
    477       1.48   hannken /*
    478       1.63   hannken  * Return the lru list this node should be on.
    479       1.63   hannken  */
    480       1.63   hannken static vnodelst_t *
    481       1.63   hannken lru_which(vnode_t *vp)
    482       1.63   hannken {
    483       1.63   hannken 
    484       1.63   hannken 	KASSERT(mutex_owned(vp->v_interlock));
    485       1.63   hannken 
    486       1.63   hannken 	if (vp->v_holdcnt > 0)
    487      1.104        ad 		return &lru_list[LRU_HOLD];
    488       1.63   hannken 	else
    489      1.104        ad 		return &lru_list[LRU_FREE];
    490       1.63   hannken }
    491       1.63   hannken 
    492       1.63   hannken /*
    493       1.63   hannken  * Put vnode to end of given list.
    494       1.63   hannken  * Both the current and the new list may be NULL, used on vnode alloc/free.
    495       1.63   hannken  * Adjust numvnodes and signal vdrain thread if there is work.
    496       1.63   hannken  */
    497       1.63   hannken static void
    498       1.63   hannken lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    499       1.63   hannken {
    500       1.70   hannken 	vnode_impl_t *vip;
    501      1.104        ad 	int d;
    502      1.104        ad 
    503      1.104        ad 	/*
    504      1.104        ad 	 * If the vnode is on the correct list, and was put there recently,
    505      1.104        ad 	 * then leave it be, thus avoiding huge cache and lock contention.
    506      1.104        ad 	 */
    507      1.104        ad 	vip = VNODE_TO_VIMPL(vp);
    508      1.104        ad 	if (listhd == vip->vi_lrulisthd &&
    509      1.119      maxv 	    (getticks() - vip->vi_lrulisttm) < hz) {
    510      1.104        ad 	    	return;
    511      1.104        ad 	}
    512       1.63   hannken 
    513       1.63   hannken 	mutex_enter(&vdrain_lock);
    514      1.104        ad 	d = 0;
    515       1.70   hannken 	if (vip->vi_lrulisthd != NULL)
    516       1.70   hannken 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    517       1.63   hannken 	else
    518      1.104        ad 		d++;
    519       1.70   hannken 	vip->vi_lrulisthd = listhd;
    520      1.119      maxv 	vip->vi_lrulisttm = getticks();
    521       1.70   hannken 	if (vip->vi_lrulisthd != NULL)
    522       1.70   hannken 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    523       1.63   hannken 	else
    524      1.104        ad 		d--;
    525      1.104        ad 	if (d != 0) {
    526      1.104        ad 		/*
    527      1.104        ad 		 * Looks strange?  This is not a bug.  Don't store
    528      1.104        ad 		 * numvnodes unless there is a change - avoid false
    529      1.104        ad 		 * sharing on MP.
    530      1.104        ad 		 */
    531      1.104        ad 		numvnodes += d;
    532      1.104        ad 	}
    533      1.121   hannken 	if ((d > 0 && numvnodes > desiredvnodes) ||
    534      1.121   hannken 	    listhd == &lru_list[LRU_VRELE])
    535      1.121   hannken 		cv_signal(&vdrain_cv);
    536       1.63   hannken 	mutex_exit(&vdrain_lock);
    537       1.63   hannken }
    538       1.63   hannken 
    539       1.63   hannken /*
    540       1.75   hannken  * Release deferred vrele vnodes for this mount.
    541       1.75   hannken  * Called with file system suspended.
    542       1.75   hannken  */
    543       1.75   hannken void
    544       1.75   hannken vrele_flush(struct mount *mp)
    545       1.75   hannken {
    546       1.75   hannken 	vnode_impl_t *vip, *marker;
    547      1.104        ad 	vnode_t *vp;
    548      1.122   hannken 	int when = 0;
    549       1.75   hannken 
    550       1.75   hannken 	KASSERT(fstrans_is_owner(mp));
    551       1.75   hannken 
    552       1.75   hannken 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    553       1.75   hannken 
    554       1.75   hannken 	mutex_enter(&vdrain_lock);
    555      1.104        ad 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
    556       1.75   hannken 
    557       1.75   hannken 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    558      1.104        ad 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    559      1.104        ad 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
    560      1.104        ad 		    vi_lrulist);
    561      1.104        ad 		vp = VIMPL_TO_VNODE(vip);
    562      1.104        ad 		if (vnis_marker(vp))
    563       1.75   hannken 			continue;
    564       1.75   hannken 
    565      1.104        ad 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    566       1.75   hannken 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    567      1.104        ad 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    568      1.119      maxv 		vip->vi_lrulisttm = getticks();
    569       1.75   hannken 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    570       1.75   hannken 		mutex_exit(&vdrain_lock);
    571       1.75   hannken 
    572      1.107        ad 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    573      1.104        ad 		mutex_enter(vp->v_interlock);
    574      1.107        ad 		vrelel(vp, 0, LK_EXCLUSIVE);
    575       1.75   hannken 
    576      1.122   hannken 		if (getticks() > when) {
    577      1.122   hannken 			yield();
    578      1.122   hannken 			when = getticks() + hz / 10;
    579      1.122   hannken 		}
    580      1.122   hannken 
    581       1.75   hannken 		mutex_enter(&vdrain_lock);
    582       1.75   hannken 	}
    583       1.75   hannken 
    584      1.104        ad 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    585       1.75   hannken 	mutex_exit(&vdrain_lock);
    586       1.75   hannken 
    587       1.75   hannken 	vnfree_marker(VIMPL_TO_VNODE(marker));
    588       1.75   hannken }
    589       1.75   hannken 
    590       1.75   hannken /*
    591       1.63   hannken  * Reclaim a cached vnode.  Used from vdrain_thread only.
    592        1.1     rmind  */
    593       1.63   hannken static __inline void
    594       1.63   hannken vdrain_remove(vnode_t *vp)
    595        1.1     rmind {
    596       1.24   hannken 	struct mount *mp;
    597        1.1     rmind 
    598       1.63   hannken 	KASSERT(mutex_owned(&vdrain_lock));
    599       1.24   hannken 
    600       1.63   hannken 	/* Probe usecount (unlocked). */
    601      1.120        ad 	if (vrefcnt(vp) > 0)
    602       1.63   hannken 		return;
    603       1.63   hannken 	/* Try v_interlock -- we lock the wrong direction! */
    604       1.63   hannken 	if (!mutex_tryenter(vp->v_interlock))
    605       1.63   hannken 		return;
    606       1.63   hannken 	/* Probe usecount and state. */
    607      1.120        ad 	if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
    608       1.63   hannken 		mutex_exit(vp->v_interlock);
    609       1.63   hannken 		return;
    610        1.1     rmind 	}
    611       1.63   hannken 	mp = vp->v_mount;
    612       1.96   hannken 	if (fstrans_start_nowait(mp) != 0) {
    613       1.63   hannken 		mutex_exit(vp->v_interlock);
    614       1.63   hannken 		return;
    615        1.1     rmind 	}
    616       1.63   hannken 	vdrain_retry = true;
    617       1.63   hannken 	mutex_exit(&vdrain_lock);
    618        1.1     rmind 
    619       1.66   hannken 	if (vcache_vget(vp) == 0) {
    620       1.78   hannken 		if (!vrecycle(vp)) {
    621      1.107        ad 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    622       1.78   hannken 			mutex_enter(vp->v_interlock);
    623      1.107        ad 			vrelel(vp, 0, LK_EXCLUSIVE);
    624       1.78   hannken 		}
    625       1.60   hannken 	}
    626       1.24   hannken 	fstrans_done(mp);
    627       1.12   hannken 
    628       1.63   hannken 	mutex_enter(&vdrain_lock);
    629        1.1     rmind }
    630        1.1     rmind 
    631        1.1     rmind /*
    632       1.63   hannken  * Release a cached vnode.  Used from vdrain_thread only.
    633       1.12   hannken  */
    634       1.63   hannken static __inline void
    635       1.63   hannken vdrain_vrele(vnode_t *vp)
    636       1.12   hannken {
    637       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    638       1.63   hannken 	struct mount *mp;
    639       1.12   hannken 
    640       1.63   hannken 	KASSERT(mutex_owned(&vdrain_lock));
    641       1.12   hannken 
    642       1.63   hannken 	mp = vp->v_mount;
    643       1.96   hannken 	if (fstrans_start_nowait(mp) != 0)
    644       1.63   hannken 		return;
    645       1.63   hannken 
    646       1.64   hannken 	/*
    647       1.64   hannken 	 * First remove the vnode from the vrele list.
    648       1.64   hannken 	 * Put it on the last lru list, the last vrele()
    649       1.64   hannken 	 * will put it back onto the right list before
    650      1.120        ad 	 * its usecount reaches zero.
    651       1.64   hannken 	 */
    652      1.104        ad 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    653       1.70   hannken 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    654      1.104        ad 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    655      1.119      maxv 	vip->vi_lrulisttm = getticks();
    656       1.70   hannken 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    657       1.63   hannken 
    658       1.63   hannken 	vdrain_retry = true;
    659       1.63   hannken 	mutex_exit(&vdrain_lock);
    660       1.63   hannken 
    661      1.107        ad 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    662       1.64   hannken 	mutex_enter(vp->v_interlock);
    663      1.107        ad 	vrelel(vp, 0, LK_EXCLUSIVE);
    664       1.63   hannken 	fstrans_done(mp);
    665       1.63   hannken 
    666       1.63   hannken 	mutex_enter(&vdrain_lock);
    667       1.12   hannken }
    668       1.12   hannken 
    669       1.12   hannken /*
    670       1.63   hannken  * Helper thread to keep the number of vnodes below desiredvnodes
    671       1.63   hannken  * and release vnodes from asynchronous vrele.
    672        1.1     rmind  */
    673       1.63   hannken static void
    674       1.63   hannken vdrain_thread(void *cookie)
    675        1.1     rmind {
    676       1.63   hannken 	int i;
    677       1.63   hannken 	u_int target;
    678       1.70   hannken 	vnode_impl_t *vip, *marker;
    679       1.63   hannken 
    680       1.63   hannken 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    681       1.63   hannken 
    682       1.63   hannken 	mutex_enter(&vdrain_lock);
    683       1.63   hannken 
    684       1.63   hannken 	for (;;) {
    685       1.63   hannken 		vdrain_retry = false;
    686       1.63   hannken 		target = desiredvnodes - desiredvnodes/10;
    687        1.1     rmind 
    688      1.104        ad 		for (i = 0; i < LRU_COUNT; i++) {
    689      1.104        ad 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
    690       1.70   hannken 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    691      1.104        ad 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    692      1.104        ad 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
    693       1.63   hannken 				    vi_lrulist);
    694       1.75   hannken 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
    695       1.75   hannken 					continue;
    696      1.104        ad 				if (i == LRU_VRELE)
    697       1.70   hannken 					vdrain_vrele(VIMPL_TO_VNODE(vip));
    698       1.63   hannken 				else if (numvnodes < target)
    699       1.63   hannken 					break;
    700       1.63   hannken 				else
    701       1.70   hannken 					vdrain_remove(VIMPL_TO_VNODE(vip));
    702       1.63   hannken 			}
    703      1.104        ad 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    704       1.63   hannken 		}
    705        1.1     rmind 
    706       1.63   hannken 		if (vdrain_retry) {
    707      1.121   hannken 			kpause("vdrainrt", false, 1, &vdrain_lock);
    708       1.63   hannken 		} else {
    709       1.63   hannken 			vdrain_gen++;
    710       1.63   hannken 			cv_broadcast(&vdrain_gen_cv);
    711       1.63   hannken 			cv_wait(&vdrain_cv, &vdrain_lock);
    712       1.63   hannken 		}
    713        1.1     rmind 	}
    714        1.1     rmind }
    715        1.1     rmind 
    716        1.1     rmind /*
    717      1.112        ad  * Try to drop reference on a vnode.  Abort if we are releasing the
    718      1.112        ad  * last reference.  Note: this _must_ succeed if not the last reference.
    719      1.112        ad  */
    720      1.112        ad static bool
    721      1.112        ad vtryrele(vnode_t *vp)
    722      1.112        ad {
    723      1.112        ad 	u_int use, next;
    724      1.112        ad 
    725      1.112        ad 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
    726      1.123        ad 		if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
    727      1.112        ad 			return false;
    728      1.112        ad 		}
    729      1.123        ad 		KASSERT((use & VUSECOUNT_MASK) > 1);
    730      1.112        ad 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    731      1.112        ad 		if (__predict_true(next == use)) {
    732      1.112        ad 			return true;
    733      1.112        ad 		}
    734      1.112        ad 	}
    735      1.112        ad }
    736      1.112        ad 
    737      1.112        ad /*
    738        1.4     rmind  * vput: unlock and release the reference.
    739        1.1     rmind  */
    740        1.1     rmind void
    741        1.1     rmind vput(vnode_t *vp)
    742        1.1     rmind {
    743      1.107        ad 	int lktype;
    744        1.1     rmind 
    745      1.112        ad 	/*
    746      1.120        ad 	 * Do an unlocked check of the usecount.  If it looks like we're not
    747      1.112        ad 	 * about to drop the last reference, then unlock the vnode and try
    748      1.112        ad 	 * to drop the reference.  If it ends up being the last reference
    749      1.112        ad 	 * after all, vrelel() can fix it all up.  Most of the time this
    750      1.112        ad 	 * will all go to plan.
    751      1.112        ad 	 */
    752      1.120        ad 	if (vrefcnt(vp) > 1) {
    753      1.112        ad 		VOP_UNLOCK(vp);
    754      1.112        ad 		if (vtryrele(vp)) {
    755      1.112        ad 			return;
    756      1.112        ad 		}
    757      1.112        ad 		lktype = LK_NONE;
    758      1.112        ad 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
    759      1.126  riastrad 		VOP_UNLOCK(vp);
    760      1.126  riastrad 		lktype = LK_NONE;
    761      1.107        ad 	} else {
    762      1.107        ad 		lktype = VOP_ISLOCKED(vp);
    763      1.107        ad 		KASSERT(lktype != LK_NONE);
    764      1.107        ad 	}
    765      1.107        ad 	mutex_enter(vp->v_interlock);
    766      1.107        ad 	vrelel(vp, 0, lktype);
    767        1.1     rmind }
    768        1.1     rmind 
    769        1.1     rmind /*
    770        1.1     rmind  * Vnode release.  If reference count drops to zero, call inactive
    771        1.1     rmind  * routine and either return to freelist or free to the pool.
    772        1.1     rmind  */
    773       1.23   hannken static void
    774      1.107        ad vrelel(vnode_t *vp, int flags, int lktype)
    775        1.1     rmind {
    776      1.104        ad 	const bool async = ((flags & VRELEL_ASYNC) != 0);
    777        1.1     rmind 	bool recycle, defer;
    778        1.1     rmind 	int error;
    779        1.1     rmind 
    780        1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    781        1.1     rmind 
    782        1.1     rmind 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    783       1.57   hannken 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    784       1.11  christos 		vnpanic(vp, "dead but not clean");
    785        1.1     rmind 	}
    786        1.1     rmind 
    787        1.1     rmind 	/*
    788      1.112        ad 	 * If not the last reference, just drop the reference count and
    789      1.112        ad 	 * unlock.  VOP_UNLOCK() is called here without a vnode reference
    790      1.112        ad 	 * held, but is ok as the hold of v_interlock will stop the vnode
    791      1.112        ad 	 * from disappearing.
    792        1.1     rmind 	 */
    793      1.112        ad 	if (vtryrele(vp)) {
    794      1.107        ad 		if (lktype != LK_NONE) {
    795      1.107        ad 			VOP_UNLOCK(vp);
    796      1.107        ad 		}
    797        1.9     rmind 		mutex_exit(vp->v_interlock);
    798        1.1     rmind 		return;
    799        1.1     rmind 	}
    800      1.120        ad 	if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
    801       1.11  christos 		vnpanic(vp, "%s: bad ref count", __func__);
    802        1.1     rmind 	}
    803        1.1     rmind 
    804       1.15   hannken #ifdef DIAGNOSTIC
    805       1.15   hannken 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    806       1.15   hannken 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    807       1.15   hannken 		vprint("vrelel: missing VOP_CLOSE()", vp);
    808       1.15   hannken 	}
    809       1.15   hannken #endif
    810       1.15   hannken 
    811        1.1     rmind 	/*
    812       1.79   hannken 	 * First try to get the vnode locked for VOP_INACTIVE().
    813       1.79   hannken 	 * Defer vnode release to vdrain_thread if caller requests
    814       1.79   hannken 	 * it explicitly, is the pagedaemon or the lock failed.
    815        1.1     rmind 	 */
    816      1.107        ad 	defer = false;
    817       1.79   hannken 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
    818       1.79   hannken 		defer = true;
    819      1.107        ad 	} else if (lktype == LK_SHARED) {
    820      1.107        ad 		/* Excellent chance of getting, if the last ref. */
    821      1.107        ad 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
    822      1.107        ad 		    LK_NOWAIT);
    823      1.107        ad 		if (error != 0) {
    824      1.107        ad 			defer = true;
    825      1.107        ad 		} else {
    826      1.107        ad 			lktype = LK_EXCLUSIVE;
    827      1.107        ad 		}
    828      1.107        ad 	} else if (lktype == LK_NONE) {
    829      1.107        ad 		/* Excellent chance of getting, if the last ref. */
    830      1.107        ad 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
    831      1.107        ad 		    LK_NOWAIT);
    832      1.107        ad 		if (error != 0) {
    833      1.107        ad 			defer = true;
    834      1.107        ad 		} else {
    835      1.107        ad 			lktype = LK_EXCLUSIVE;
    836      1.107        ad 		}
    837       1.79   hannken 	}
    838       1.79   hannken 	KASSERT(mutex_owned(vp->v_interlock));
    839       1.79   hannken 	if (defer) {
    840        1.1     rmind 		/*
    841       1.79   hannken 		 * Defer reclaim to the kthread; it's not safe to
    842       1.79   hannken 		 * clean it here.  We donate it our last reference.
    843        1.1     rmind 		 */
    844      1.107        ad 		if (lktype != LK_NONE) {
    845      1.107        ad 			VOP_UNLOCK(vp);
    846      1.107        ad 		}
    847      1.104        ad 		lru_requeue(vp, &lru_list[LRU_VRELE]);
    848       1.79   hannken 		mutex_exit(vp->v_interlock);
    849       1.79   hannken 		return;
    850       1.79   hannken 	}
    851      1.107        ad 	KASSERT(lktype == LK_EXCLUSIVE);
    852       1.30   hannken 
    853       1.79   hannken 	/*
    854       1.79   hannken 	 * If not clean, deactivate the vnode, but preserve
    855       1.79   hannken 	 * our reference across the call to VOP_INACTIVE().
    856       1.79   hannken 	 */
    857       1.79   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    858       1.79   hannken 		VOP_UNLOCK(vp);
    859       1.79   hannken 	} else {
    860        1.1     rmind 		/*
    861      1.123        ad 		 * If VOP_INACTIVE() indicates that the file has been
    862      1.123        ad 		 * deleted, then recycle the vnode.
    863        1.1     rmind 		 *
    864       1.82  riastrad 		 * Note that VOP_INACTIVE() will not drop the vnode lock.
    865        1.1     rmind 		 */
    866      1.117        ad 		mutex_exit(vp->v_interlock);
    867       1.79   hannken 		recycle = false;
    868        1.1     rmind 		VOP_INACTIVE(vp, &recycle);
    869      1.113        ad 		rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
    870        1.9     rmind 		mutex_enter(vp->v_interlock);
    871      1.123        ad 
    872      1.123        ad 		for (;;) {
    873      1.123        ad 			/*
    874      1.123        ad 			 * If no longer the last reference, try to shed it.
    875      1.123        ad 			 * On success, drop the interlock last thereby
    876      1.123        ad 			 * preventing the vnode being freed behind us.
    877      1.123        ad 			 */
    878      1.123        ad 			if (vtryrele(vp)) {
    879      1.123        ad 				VOP_UNLOCK(vp);
    880      1.123        ad 				rw_exit(vp->v_uobj.vmobjlock);
    881      1.123        ad 				mutex_exit(vp->v_interlock);
    882      1.123        ad 				return;
    883      1.123        ad 			}
    884      1.123        ad 			/*
    885      1.123        ad 			 * Block new references then check again to see if a
    886      1.123        ad 			 * new reference was acquired in the meantime.  If
    887      1.123        ad 			 * it was, restore the vnode state and try again.
    888      1.123        ad 			 */
    889      1.123        ad 			if (recycle) {
    890      1.123        ad 				VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
    891      1.123        ad 				if (vrefcnt(vp) != 1) {
    892      1.123        ad 					VSTATE_CHANGE(vp, VS_BLOCKED,
    893      1.123        ad 					    VS_LOADED);
    894      1.123        ad 					continue;
    895      1.123        ad 				}
    896      1.123        ad 			}
    897      1.123        ad 			break;
    898      1.123        ad  		}
    899        1.1     rmind 
    900      1.113        ad 		/* Take care of space accounting. */
    901      1.125        ad 		if ((vp->v_iflag & VI_EXECMAP) != 0) {
    902      1.105        ad 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
    903        1.1     rmind 		}
    904        1.1     rmind 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    905        1.1     rmind 		vp->v_vflag &= ~VV_MAPPED;
    906      1.113        ad 		rw_exit(vp->v_uobj.vmobjlock);
    907        1.1     rmind 
    908        1.1     rmind 		/*
    909        1.1     rmind 		 * Recycle the vnode if the file is now unused (unlinked),
    910        1.1     rmind 		 * otherwise just free it.
    911        1.1     rmind 		 */
    912        1.1     rmind 		if (recycle) {
    913      1.123        ad 			VSTATE_ASSERT(vp, VS_BLOCKED);
    914       1.83  riastrad 			/* vcache_reclaim drops the lock. */
    915       1.54   hannken 			vcache_reclaim(vp);
    916      1.118        ad 		} else {
    917      1.118        ad 			VOP_UNLOCK(vp);
    918        1.1     rmind 		}
    919      1.120        ad 		KASSERT(vrefcnt(vp) > 0);
    920        1.1     rmind 	}
    921        1.1     rmind 
    922      1.123        ad 	if ((atomic_dec_uint_nv(&vp->v_usecount) & VUSECOUNT_MASK) != 0) {
    923        1.1     rmind 		/* Gained another reference while being reclaimed. */
    924        1.9     rmind 		mutex_exit(vp->v_interlock);
    925        1.1     rmind 		return;
    926        1.1     rmind 	}
    927        1.1     rmind 
    928       1.67   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    929        1.1     rmind 		/*
    930        1.1     rmind 		 * It's clean so destroy it.  It isn't referenced
    931        1.1     rmind 		 * anywhere since it has been reclaimed.
    932        1.1     rmind 		 */
    933       1.57   hannken 		vcache_free(VNODE_TO_VIMPL(vp));
    934        1.1     rmind 	} else {
    935        1.1     rmind 		/*
    936        1.1     rmind 		 * Otherwise, put it back onto the freelist.  It
    937        1.1     rmind 		 * can't be destroyed while still associated with
    938        1.1     rmind 		 * a file system.
    939        1.1     rmind 		 */
    940       1.63   hannken 		lru_requeue(vp, lru_which(vp));
    941        1.9     rmind 		mutex_exit(vp->v_interlock);
    942        1.1     rmind 	}
    943        1.1     rmind }
    944        1.1     rmind 
    945        1.1     rmind void
    946        1.1     rmind vrele(vnode_t *vp)
    947        1.1     rmind {
    948        1.1     rmind 
    949      1.112        ad 	if (vtryrele(vp)) {
    950      1.112        ad 		return;
    951      1.112        ad 	}
    952        1.9     rmind 	mutex_enter(vp->v_interlock);
    953      1.107        ad 	vrelel(vp, 0, LK_NONE);
    954        1.1     rmind }
    955        1.1     rmind 
    956        1.1     rmind /*
    957        1.1     rmind  * Asynchronous vnode release, vnode is released in different context.
    958        1.1     rmind  */
    959        1.1     rmind void
    960        1.1     rmind vrele_async(vnode_t *vp)
    961        1.1     rmind {
    962        1.1     rmind 
    963      1.112        ad 	if (vtryrele(vp)) {
    964      1.112        ad 		return;
    965      1.112        ad 	}
    966        1.9     rmind 	mutex_enter(vp->v_interlock);
    967      1.107        ad 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
    968        1.1     rmind }
    969        1.1     rmind 
    970        1.1     rmind /*
    971        1.1     rmind  * Vnode reference, where a reference is already held by some other
    972        1.1     rmind  * object (for example, a file structure).
    973      1.112        ad  *
    974      1.123        ad  * NB: lockless code sequences may rely on this not blocking.
    975        1.1     rmind  */
    976        1.1     rmind void
    977        1.1     rmind vref(vnode_t *vp)
    978        1.1     rmind {
    979        1.1     rmind 
    980      1.120        ad 	KASSERT(vrefcnt(vp) > 0);
    981        1.1     rmind 
    982      1.112        ad 	atomic_inc_uint(&vp->v_usecount);
    983        1.1     rmind }
    984        1.1     rmind 
    985        1.1     rmind /*
    986        1.1     rmind  * Page or buffer structure gets a reference.
    987        1.1     rmind  * Called with v_interlock held.
    988        1.1     rmind  */
    989        1.1     rmind void
    990        1.1     rmind vholdl(vnode_t *vp)
    991        1.1     rmind {
    992        1.1     rmind 
    993        1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    994        1.1     rmind 
    995      1.120        ad 	if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
    996       1.63   hannken 		lru_requeue(vp, lru_which(vp));
    997        1.1     rmind }
    998        1.1     rmind 
    999        1.1     rmind /*
   1000      1.112        ad  * Page or buffer structure gets a reference.
   1001      1.112        ad  */
   1002      1.112        ad void
   1003      1.112        ad vhold(vnode_t *vp)
   1004      1.112        ad {
   1005      1.112        ad 
   1006      1.112        ad 	mutex_enter(vp->v_interlock);
   1007      1.112        ad 	vholdl(vp);
   1008      1.112        ad 	mutex_exit(vp->v_interlock);
   1009      1.112        ad }
   1010      1.112        ad 
   1011      1.112        ad /*
   1012        1.1     rmind  * Page or buffer structure frees a reference.
   1013        1.1     rmind  * Called with v_interlock held.
   1014        1.1     rmind  */
   1015        1.1     rmind void
   1016        1.1     rmind holdrelel(vnode_t *vp)
   1017        1.1     rmind {
   1018        1.1     rmind 
   1019        1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
   1020        1.1     rmind 
   1021        1.1     rmind 	if (vp->v_holdcnt <= 0) {
   1022       1.11  christos 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
   1023        1.1     rmind 	}
   1024        1.1     rmind 
   1025        1.1     rmind 	vp->v_holdcnt--;
   1026      1.120        ad 	if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
   1027       1.63   hannken 		lru_requeue(vp, lru_which(vp));
   1028        1.1     rmind }
   1029        1.1     rmind 
   1030        1.1     rmind /*
   1031      1.112        ad  * Page or buffer structure frees a reference.
   1032      1.112        ad  */
   1033      1.112        ad void
   1034      1.112        ad holdrele(vnode_t *vp)
   1035      1.112        ad {
   1036      1.112        ad 
   1037      1.112        ad 	mutex_enter(vp->v_interlock);
   1038      1.112        ad 	holdrelel(vp);
   1039      1.112        ad 	mutex_exit(vp->v_interlock);
   1040      1.112        ad }
   1041      1.112        ad 
   1042      1.112        ad /*
   1043       1.33   hannken  * Recycle an unused vnode if caller holds the last reference.
   1044        1.1     rmind  */
   1045       1.33   hannken bool
   1046       1.33   hannken vrecycle(vnode_t *vp)
   1047        1.1     rmind {
   1048       1.60   hannken 	int error __diagused;
   1049       1.46   hannken 
   1050       1.33   hannken 	mutex_enter(vp->v_interlock);
   1051       1.33   hannken 
   1052      1.123        ad 	/* If the vnode is already clean we're done. */
   1053       1.60   hannken 	VSTATE_WAIT_STABLE(vp);
   1054       1.94   hannken 	if (VSTATE_GET(vp) != VS_LOADED) {
   1055       1.60   hannken 		VSTATE_ASSERT(vp, VS_RECLAIMED);
   1056      1.107        ad 		vrelel(vp, 0, LK_NONE);
   1057       1.60   hannken 		return true;
   1058       1.60   hannken 	}
   1059       1.60   hannken 
   1060       1.60   hannken 	/* Prevent further references until the vnode is locked. */
   1061       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1062      1.123        ad 
   1063      1.123        ad 	/* Make sure we hold the last reference. */
   1064      1.123        ad 	if (vrefcnt(vp) != 1) {
   1065      1.123        ad 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1066      1.123        ad 		mutex_exit(vp->v_interlock);
   1067      1.123        ad 		return false;
   1068      1.123        ad 	}
   1069      1.123        ad 
   1070       1.60   hannken 	mutex_exit(vp->v_interlock);
   1071       1.60   hannken 
   1072       1.73   hannken 	/*
   1073       1.73   hannken 	 * On a leaf file system this lock will always succeed as we hold
   1074       1.73   hannken 	 * the last reference and prevent further references.
   1075       1.73   hannken 	 * On layered file systems waiting for the lock would open a can of
   1076       1.73   hannken 	 * deadlocks as the lower vnodes may have other active references.
   1077       1.73   hannken 	 */
   1078       1.76   hannken 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
   1079       1.60   hannken 
   1080       1.60   hannken 	mutex_enter(vp->v_interlock);
   1081       1.73   hannken 	if (error) {
   1082      1.123        ad 		VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1083       1.73   hannken 		mutex_exit(vp->v_interlock);
   1084       1.73   hannken 		return false;
   1085       1.73   hannken 	}
   1086       1.73   hannken 
   1087      1.120        ad 	KASSERT(vrefcnt(vp) == 1);
   1088       1.54   hannken 	vcache_reclaim(vp);
   1089      1.107        ad 	vrelel(vp, 0, LK_NONE);
   1090       1.60   hannken 
   1091       1.33   hannken 	return true;
   1092        1.1     rmind }
   1093        1.1     rmind 
   1094        1.1     rmind /*
   1095       1.92   hannken  * Helper for vrevoke() to propagate suspension from lastmp
   1096       1.92   hannken  * to thismp.  Both args may be NULL.
   1097       1.92   hannken  * Returns the currently suspended file system or NULL.
   1098       1.92   hannken  */
   1099       1.92   hannken static struct mount *
   1100       1.92   hannken vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
   1101       1.92   hannken {
   1102       1.92   hannken 	int error;
   1103       1.92   hannken 
   1104       1.92   hannken 	if (lastmp == thismp)
   1105       1.92   hannken 		return thismp;
   1106       1.92   hannken 
   1107       1.92   hannken 	if (lastmp != NULL)
   1108       1.92   hannken 		vfs_resume(lastmp);
   1109       1.92   hannken 
   1110       1.92   hannken 	if (thismp == NULL)
   1111       1.92   hannken 		return NULL;
   1112       1.92   hannken 
   1113       1.92   hannken 	do {
   1114       1.92   hannken 		error = vfs_suspend(thismp, 0);
   1115       1.92   hannken 	} while (error == EINTR || error == ERESTART);
   1116       1.92   hannken 
   1117       1.92   hannken 	if (error == 0)
   1118       1.92   hannken 		return thismp;
   1119       1.92   hannken 
   1120       1.92   hannken 	KASSERT(error == EOPNOTSUPP);
   1121       1.92   hannken 	return NULL;
   1122       1.92   hannken }
   1123       1.92   hannken 
   1124       1.92   hannken /*
   1125        1.1     rmind  * Eliminate all activity associated with the requested vnode
   1126        1.1     rmind  * and with all vnodes aliased to the requested vnode.
   1127        1.1     rmind  */
   1128        1.1     rmind void
   1129        1.1     rmind vrevoke(vnode_t *vp)
   1130        1.1     rmind {
   1131       1.88   hannken 	struct mount *mp;
   1132       1.19   hannken 	vnode_t *vq;
   1133        1.1     rmind 	enum vtype type;
   1134        1.1     rmind 	dev_t dev;
   1135        1.1     rmind 
   1136      1.120        ad 	KASSERT(vrefcnt(vp) > 0);
   1137        1.1     rmind 
   1138       1.92   hannken 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
   1139       1.88   hannken 
   1140        1.9     rmind 	mutex_enter(vp->v_interlock);
   1141       1.52   hannken 	VSTATE_WAIT_STABLE(vp);
   1142       1.57   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1143        1.9     rmind 		mutex_exit(vp->v_interlock);
   1144        1.1     rmind 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1145      1.112        ad 		atomic_inc_uint(&vp->v_usecount);
   1146       1.29  christos 		mutex_exit(vp->v_interlock);
   1147       1.29  christos 		vgone(vp);
   1148        1.1     rmind 	} else {
   1149        1.1     rmind 		dev = vp->v_rdev;
   1150        1.1     rmind 		type = vp->v_type;
   1151        1.9     rmind 		mutex_exit(vp->v_interlock);
   1152        1.1     rmind 
   1153       1.88   hannken 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
   1154       1.92   hannken 			mp = vrevoke_suspend_next(mp, vq->v_mount);
   1155       1.88   hannken 			vgone(vq);
   1156       1.88   hannken 		}
   1157        1.1     rmind 	}
   1158       1.92   hannken 	vrevoke_suspend_next(mp, NULL);
   1159        1.1     rmind }
   1160        1.1     rmind 
   1161        1.1     rmind /*
   1162        1.1     rmind  * Eliminate all activity associated with a vnode in preparation for
   1163        1.1     rmind  * reuse.  Drops a reference from the vnode.
   1164        1.1     rmind  */
   1165        1.1     rmind void
   1166        1.1     rmind vgone(vnode_t *vp)
   1167        1.1     rmind {
   1168      1.107        ad 	int lktype;
   1169        1.1     rmind 
   1170      1.103   hannken 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1171       1.93   hannken 
   1172       1.76   hannken 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1173      1.107        ad 	lktype = LK_EXCLUSIVE;
   1174        1.9     rmind 	mutex_enter(vp->v_interlock);
   1175       1.76   hannken 	VSTATE_WAIT_STABLE(vp);
   1176      1.107        ad 	if (VSTATE_GET(vp) == VS_LOADED) {
   1177      1.123        ad 		VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1178       1.76   hannken 		vcache_reclaim(vp);
   1179      1.107        ad 		lktype = LK_NONE;
   1180      1.107        ad 	}
   1181       1.76   hannken 	VSTATE_ASSERT(vp, VS_RECLAIMED);
   1182      1.107        ad 	vrelel(vp, 0, lktype);
   1183        1.1     rmind }
   1184        1.1     rmind 
   1185       1.36   hannken static inline uint32_t
   1186       1.36   hannken vcache_hash(const struct vcache_key *key)
   1187       1.36   hannken {
   1188       1.36   hannken 	uint32_t hash = HASH32_BUF_INIT;
   1189       1.36   hannken 
   1190       1.97   hannken 	KASSERT(key->vk_key_len > 0);
   1191       1.97   hannken 
   1192       1.36   hannken 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1193       1.36   hannken 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1194       1.36   hannken 	return hash;
   1195       1.36   hannken }
   1196       1.36   hannken 
   1197  1.126.2.1   thorpej static int
   1198  1.126.2.1   thorpej vcache_stats(struct hashstat_sysctl *hs, bool fill)
   1199  1.126.2.1   thorpej {
   1200  1.126.2.1   thorpej 	vnode_impl_t *vip;
   1201  1.126.2.1   thorpej 	uint64_t chain;
   1202  1.126.2.1   thorpej 
   1203  1.126.2.1   thorpej 	strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
   1204  1.126.2.1   thorpej 	strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
   1205  1.126.2.1   thorpej 	if (!fill)
   1206  1.126.2.1   thorpej 		return 0;
   1207  1.126.2.1   thorpej 
   1208  1.126.2.1   thorpej 	hs->hash_size = vcache_hashmask + 1;
   1209  1.126.2.1   thorpej 
   1210  1.126.2.1   thorpej 	for (size_t i = 0; i < hs->hash_size; i++) {
   1211  1.126.2.1   thorpej 		chain = 0;
   1212  1.126.2.1   thorpej 		mutex_enter(&vcache_lock);
   1213  1.126.2.1   thorpej 		SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
   1214  1.126.2.1   thorpej 			chain++;
   1215  1.126.2.1   thorpej 		}
   1216  1.126.2.1   thorpej 		mutex_exit(&vcache_lock);
   1217  1.126.2.1   thorpej 		if (chain > 0) {
   1218  1.126.2.1   thorpej 			hs->hash_used++;
   1219  1.126.2.1   thorpej 			hs->hash_items += chain;
   1220  1.126.2.1   thorpej 			if (chain > hs->hash_maxchain)
   1221  1.126.2.1   thorpej 				hs->hash_maxchain = chain;
   1222  1.126.2.1   thorpej 		}
   1223  1.126.2.1   thorpej 		preempt_point();
   1224  1.126.2.1   thorpej 	}
   1225  1.126.2.1   thorpej 
   1226  1.126.2.1   thorpej 	return 0;
   1227  1.126.2.1   thorpej }
   1228  1.126.2.1   thorpej 
   1229       1.36   hannken static void
   1230       1.36   hannken vcache_init(void)
   1231       1.36   hannken {
   1232       1.36   hannken 
   1233      1.112        ad 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
   1234      1.112        ad 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1235       1.69   hannken 	KASSERT(vcache_pool != NULL);
   1236       1.69   hannken 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
   1237       1.69   hannken 	cv_init(&vcache_cv, "vcache");
   1238       1.69   hannken 	vcache_hashsize = desiredvnodes;
   1239       1.69   hannken 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1240       1.69   hannken 	    &vcache_hashmask);
   1241  1.126.2.1   thorpej 	hashstat_register("vcache", vcache_stats);
   1242       1.36   hannken }
   1243       1.36   hannken 
   1244       1.36   hannken static void
   1245       1.36   hannken vcache_reinit(void)
   1246       1.36   hannken {
   1247       1.36   hannken 	int i;
   1248       1.36   hannken 	uint32_t hash;
   1249       1.36   hannken 	u_long oldmask, newmask;
   1250       1.36   hannken 	struct hashhead *oldtab, *newtab;
   1251       1.70   hannken 	vnode_impl_t *vip;
   1252       1.36   hannken 
   1253       1.36   hannken 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1254       1.69   hannken 	mutex_enter(&vcache_lock);
   1255       1.69   hannken 	oldtab = vcache_hashtab;
   1256       1.69   hannken 	oldmask = vcache_hashmask;
   1257       1.69   hannken 	vcache_hashsize = desiredvnodes;
   1258       1.69   hannken 	vcache_hashtab = newtab;
   1259       1.69   hannken 	vcache_hashmask = newmask;
   1260       1.36   hannken 	for (i = 0; i <= oldmask; i++) {
   1261       1.70   hannken 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
   1262       1.70   hannken 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
   1263       1.70   hannken 			hash = vcache_hash(&vip->vi_key);
   1264       1.69   hannken 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
   1265       1.70   hannken 			    vip, vi_hash);
   1266       1.36   hannken 		}
   1267       1.36   hannken 	}
   1268       1.69   hannken 	mutex_exit(&vcache_lock);
   1269       1.36   hannken 	hashdone(oldtab, HASH_SLIST, oldmask);
   1270       1.36   hannken }
   1271       1.36   hannken 
   1272       1.57   hannken static inline vnode_impl_t *
   1273       1.36   hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1274       1.36   hannken {
   1275       1.36   hannken 	struct hashhead *hashp;
   1276       1.70   hannken 	vnode_impl_t *vip;
   1277       1.36   hannken 
   1278       1.69   hannken 	KASSERT(mutex_owned(&vcache_lock));
   1279       1.36   hannken 
   1280       1.69   hannken 	hashp = &vcache_hashtab[hash & vcache_hashmask];
   1281       1.70   hannken 	SLIST_FOREACH(vip, hashp, vi_hash) {
   1282       1.70   hannken 		if (key->vk_mount != vip->vi_key.vk_mount)
   1283       1.36   hannken 			continue;
   1284       1.70   hannken 		if (key->vk_key_len != vip->vi_key.vk_key_len)
   1285       1.36   hannken 			continue;
   1286       1.70   hannken 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
   1287       1.36   hannken 			continue;
   1288       1.70   hannken 		return vip;
   1289       1.36   hannken 	}
   1290       1.36   hannken 	return NULL;
   1291       1.36   hannken }
   1292       1.36   hannken 
   1293       1.36   hannken /*
   1294       1.50   hannken  * Allocate a new, uninitialized vcache node.
   1295       1.50   hannken  */
   1296       1.57   hannken static vnode_impl_t *
   1297       1.50   hannken vcache_alloc(void)
   1298       1.50   hannken {
   1299       1.70   hannken 	vnode_impl_t *vip;
   1300       1.50   hannken 	vnode_t *vp;
   1301       1.50   hannken 
   1302       1.70   hannken 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
   1303      1.111        ad 	vp = VIMPL_TO_VNODE(vip);
   1304       1.70   hannken 	memset(vip, 0, sizeof(*vip));
   1305       1.50   hannken 
   1306      1.112        ad 	rw_init(&vip->vi_lock);
   1307      1.111        ad 	vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
   1308      1.111        ad 
   1309      1.111        ad 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
   1310       1.50   hannken 	cv_init(&vp->v_cv, "vnode");
   1311      1.114        ad 	cache_vnode_init(vp);
   1312       1.50   hannken 
   1313       1.50   hannken 	vp->v_usecount = 1;
   1314       1.50   hannken 	vp->v_type = VNON;
   1315       1.50   hannken 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1316       1.50   hannken 
   1317       1.70   hannken 	vip->vi_state = VS_LOADING;
   1318       1.51   hannken 
   1319      1.104        ad 	lru_requeue(vp, &lru_list[LRU_FREE]);
   1320       1.63   hannken 
   1321       1.70   hannken 	return vip;
   1322       1.50   hannken }
   1323       1.50   hannken 
   1324       1.50   hannken /*
   1325       1.79   hannken  * Deallocate a vcache node in state VS_LOADING.
   1326       1.79   hannken  *
   1327       1.79   hannken  * vcache_lock held on entry and released on return.
   1328       1.79   hannken  */
   1329       1.79   hannken static void
   1330       1.79   hannken vcache_dealloc(vnode_impl_t *vip)
   1331       1.79   hannken {
   1332       1.79   hannken 	vnode_t *vp;
   1333       1.79   hannken 
   1334       1.79   hannken 	KASSERT(mutex_owned(&vcache_lock));
   1335       1.79   hannken 
   1336       1.79   hannken 	vp = VIMPL_TO_VNODE(vip);
   1337      1.102   hannken 	vfs_ref(dead_rootmount);
   1338      1.102   hannken 	vfs_insmntque(vp, dead_rootmount);
   1339       1.79   hannken 	mutex_enter(vp->v_interlock);
   1340       1.79   hannken 	vp->v_op = dead_vnodeop_p;
   1341       1.79   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1342       1.79   hannken 	mutex_exit(&vcache_lock);
   1343      1.107        ad 	vrelel(vp, 0, LK_NONE);
   1344       1.79   hannken }
   1345       1.79   hannken 
   1346       1.79   hannken /*
   1347       1.50   hannken  * Free an unused, unreferenced vcache node.
   1348       1.67   hannken  * v_interlock locked on entry.
   1349       1.50   hannken  */
   1350       1.50   hannken static void
   1351       1.70   hannken vcache_free(vnode_impl_t *vip)
   1352       1.50   hannken {
   1353       1.50   hannken 	vnode_t *vp;
   1354       1.50   hannken 
   1355       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
   1356       1.67   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1357       1.50   hannken 
   1358      1.120        ad 	KASSERT(vrefcnt(vp) == 0);
   1359       1.67   hannken 	KASSERT(vp->v_holdcnt == 0);
   1360       1.67   hannken 	KASSERT(vp->v_writecount == 0);
   1361       1.67   hannken 	lru_requeue(vp, NULL);
   1362       1.67   hannken 	mutex_exit(vp->v_interlock);
   1363       1.67   hannken 
   1364       1.67   hannken 	vfs_insmntque(vp, NULL);
   1365       1.67   hannken 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1366       1.67   hannken 		spec_node_destroy(vp);
   1367       1.50   hannken 
   1368      1.111        ad 	mutex_obj_free(vp->v_interlock);
   1369      1.112        ad 	rw_destroy(&vip->vi_lock);
   1370       1.50   hannken 	uvm_obj_destroy(&vp->v_uobj, true);
   1371       1.50   hannken 	cv_destroy(&vp->v_cv);
   1372      1.114        ad 	cache_vnode_fini(vp);
   1373       1.70   hannken 	pool_cache_put(vcache_pool, vip);
   1374       1.50   hannken }
   1375       1.50   hannken 
   1376       1.50   hannken /*
   1377       1.66   hannken  * Try to get an initial reference on this cached vnode.
   1378      1.123        ad  * Returns zero on success or EBUSY if the vnode state is not LOADED.
   1379       1.66   hannken  *
   1380      1.123        ad  * NB: lockless code sequences may rely on this not blocking.
   1381       1.66   hannken  */
   1382       1.66   hannken int
   1383       1.66   hannken vcache_tryvget(vnode_t *vp)
   1384       1.66   hannken {
   1385      1.123        ad 	u_int use, next;
   1386       1.66   hannken 
   1387      1.123        ad 	for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
   1388      1.123        ad 		if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
   1389      1.123        ad 			return EBUSY;
   1390      1.123        ad 		}
   1391      1.123        ad 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
   1392      1.123        ad 		if (__predict_true(next == use)) {
   1393      1.123        ad 			return 0;
   1394      1.123        ad 		}
   1395      1.123        ad 	}
   1396       1.66   hannken }
   1397       1.66   hannken 
   1398       1.66   hannken /*
   1399       1.66   hannken  * Try to get an initial reference on this cached vnode.
   1400       1.66   hannken  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1401       1.66   hannken  * Will wait for the vnode state to be stable.
   1402       1.66   hannken  *
   1403       1.66   hannken  * v_interlock locked on entry and unlocked on exit.
   1404       1.66   hannken  */
   1405       1.66   hannken int
   1406       1.66   hannken vcache_vget(vnode_t *vp)
   1407       1.66   hannken {
   1408       1.66   hannken 
   1409       1.66   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1410       1.66   hannken 
   1411       1.67   hannken 	/* Increment hold count to prevent vnode from disappearing. */
   1412       1.67   hannken 	vp->v_holdcnt++;
   1413       1.67   hannken 	VSTATE_WAIT_STABLE(vp);
   1414       1.67   hannken 	vp->v_holdcnt--;
   1415       1.66   hannken 
   1416       1.67   hannken 	/* If this was the last reference to a reclaimed vnode free it now. */
   1417       1.67   hannken 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1418      1.120        ad 		if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
   1419       1.67   hannken 			vcache_free(VNODE_TO_VIMPL(vp));
   1420       1.67   hannken 		else
   1421       1.67   hannken 			mutex_exit(vp->v_interlock);
   1422       1.66   hannken 		return ENOENT;
   1423       1.66   hannken 	}
   1424       1.94   hannken 	VSTATE_ASSERT(vp, VS_LOADED);
   1425      1.123        ad 	atomic_inc_uint(&vp->v_usecount);
   1426       1.66   hannken 	mutex_exit(vp->v_interlock);
   1427       1.66   hannken 
   1428       1.66   hannken 	return 0;
   1429       1.66   hannken }
   1430       1.66   hannken 
   1431       1.66   hannken /*
   1432       1.36   hannken  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1433       1.36   hannken  */
   1434       1.36   hannken int
   1435       1.36   hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
   1436       1.36   hannken     struct vnode **vpp)
   1437       1.36   hannken {
   1438       1.36   hannken 	int error;
   1439       1.36   hannken 	uint32_t hash;
   1440       1.36   hannken 	const void *new_key;
   1441       1.36   hannken 	struct vnode *vp;
   1442       1.36   hannken 	struct vcache_key vcache_key;
   1443       1.70   hannken 	vnode_impl_t *vip, *new_vip;
   1444       1.36   hannken 
   1445       1.36   hannken 	new_key = NULL;
   1446       1.36   hannken 	*vpp = NULL;
   1447       1.36   hannken 
   1448       1.36   hannken 	vcache_key.vk_mount = mp;
   1449       1.36   hannken 	vcache_key.vk_key = key;
   1450       1.36   hannken 	vcache_key.vk_key_len = key_len;
   1451       1.36   hannken 	hash = vcache_hash(&vcache_key);
   1452       1.36   hannken 
   1453       1.36   hannken again:
   1454       1.69   hannken 	mutex_enter(&vcache_lock);
   1455       1.70   hannken 	vip = vcache_hash_lookup(&vcache_key, hash);
   1456       1.36   hannken 
   1457       1.36   hannken 	/* If found, take a reference or retry. */
   1458       1.70   hannken 	if (__predict_true(vip != NULL)) {
   1459       1.52   hannken 		/*
   1460       1.52   hannken 		 * If the vnode is loading we cannot take the v_interlock
   1461       1.52   hannken 		 * here as it might change during load (see uvm_obj_setlock()).
   1462       1.69   hannken 		 * As changing state from VS_LOADING requires both vcache_lock
   1463       1.69   hannken 		 * and v_interlock it is safe to test with vcache_lock held.
   1464       1.52   hannken 		 *
   1465       1.57   hannken 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1466       1.52   hannken 		 */
   1467       1.70   hannken 		if (__predict_false(vip->vi_state == VS_LOADING)) {
   1468       1.69   hannken 			cv_wait(&vcache_cv, &vcache_lock);
   1469       1.69   hannken 			mutex_exit(&vcache_lock);
   1470       1.52   hannken 			goto again;
   1471       1.52   hannken 		}
   1472       1.70   hannken 		vp = VIMPL_TO_VNODE(vip);
   1473       1.36   hannken 		mutex_enter(vp->v_interlock);
   1474       1.69   hannken 		mutex_exit(&vcache_lock);
   1475       1.66   hannken 		error = vcache_vget(vp);
   1476       1.36   hannken 		if (error == ENOENT)
   1477       1.36   hannken 			goto again;
   1478       1.36   hannken 		if (error == 0)
   1479       1.36   hannken 			*vpp = vp;
   1480       1.36   hannken 		KASSERT((error != 0) == (*vpp == NULL));
   1481       1.36   hannken 		return error;
   1482       1.36   hannken 	}
   1483       1.69   hannken 	mutex_exit(&vcache_lock);
   1484       1.36   hannken 
   1485       1.36   hannken 	/* Allocate and initialize a new vcache / vnode pair. */
   1486       1.87   hannken 	error = vfs_busy(mp);
   1487       1.36   hannken 	if (error)
   1488       1.36   hannken 		return error;
   1489       1.70   hannken 	new_vip = vcache_alloc();
   1490       1.70   hannken 	new_vip->vi_key = vcache_key;
   1491       1.70   hannken 	vp = VIMPL_TO_VNODE(new_vip);
   1492       1.69   hannken 	mutex_enter(&vcache_lock);
   1493       1.70   hannken 	vip = vcache_hash_lookup(&vcache_key, hash);
   1494       1.70   hannken 	if (vip == NULL) {
   1495       1.69   hannken 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1496       1.70   hannken 		    new_vip, vi_hash);
   1497       1.70   hannken 		vip = new_vip;
   1498       1.36   hannken 	}
   1499       1.36   hannken 
   1500       1.36   hannken 	/* If another thread beat us inserting this node, retry. */
   1501       1.70   hannken 	if (vip != new_vip) {
   1502       1.79   hannken 		vcache_dealloc(new_vip);
   1503       1.87   hannken 		vfs_unbusy(mp);
   1504       1.36   hannken 		goto again;
   1505       1.36   hannken 	}
   1506       1.69   hannken 	mutex_exit(&vcache_lock);
   1507       1.36   hannken 
   1508       1.57   hannken 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1509       1.36   hannken 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1510       1.36   hannken 	if (error) {
   1511       1.69   hannken 		mutex_enter(&vcache_lock);
   1512       1.69   hannken 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1513       1.70   hannken 		    new_vip, vnode_impl, vi_hash);
   1514       1.79   hannken 		vcache_dealloc(new_vip);
   1515       1.87   hannken 		vfs_unbusy(mp);
   1516       1.36   hannken 		KASSERT(*vpp == NULL);
   1517       1.36   hannken 		return error;
   1518       1.36   hannken 	}
   1519       1.36   hannken 	KASSERT(new_key != NULL);
   1520       1.36   hannken 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1521       1.36   hannken 	KASSERT(vp->v_op != NULL);
   1522       1.36   hannken 	vfs_insmntque(vp, mp);
   1523       1.36   hannken 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1524       1.36   hannken 		vp->v_vflag |= VV_MPSAFE;
   1525       1.87   hannken 	vfs_ref(mp);
   1526       1.87   hannken 	vfs_unbusy(mp);
   1527       1.36   hannken 
   1528       1.36   hannken 	/* Finished loading, finalize node. */
   1529       1.69   hannken 	mutex_enter(&vcache_lock);
   1530       1.70   hannken 	new_vip->vi_key.vk_key = new_key;
   1531       1.39   hannken 	mutex_enter(vp->v_interlock);
   1532       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1533       1.39   hannken 	mutex_exit(vp->v_interlock);
   1534       1.69   hannken 	mutex_exit(&vcache_lock);
   1535       1.36   hannken 	*vpp = vp;
   1536       1.36   hannken 	return 0;
   1537       1.36   hannken }
   1538       1.36   hannken 
   1539       1.36   hannken /*
   1540       1.40   hannken  * Create a new vnode / fs node pair and return it referenced through vpp.
   1541       1.40   hannken  */
   1542       1.40   hannken int
   1543       1.40   hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1544      1.101   hannken     kauth_cred_t cred, void *extra, struct vnode **vpp)
   1545       1.40   hannken {
   1546       1.40   hannken 	int error;
   1547       1.40   hannken 	uint32_t hash;
   1548       1.70   hannken 	struct vnode *vp, *ovp;
   1549       1.70   hannken 	vnode_impl_t *vip, *ovip;
   1550       1.40   hannken 
   1551       1.40   hannken 	*vpp = NULL;
   1552       1.40   hannken 
   1553       1.40   hannken 	/* Allocate and initialize a new vcache / vnode pair. */
   1554       1.87   hannken 	error = vfs_busy(mp);
   1555       1.40   hannken 	if (error)
   1556       1.40   hannken 		return error;
   1557       1.70   hannken 	vip = vcache_alloc();
   1558       1.70   hannken 	vip->vi_key.vk_mount = mp;
   1559       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
   1560       1.40   hannken 
   1561       1.40   hannken 	/* Create and load the fs node. */
   1562      1.101   hannken 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
   1563       1.70   hannken 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
   1564       1.40   hannken 	if (error) {
   1565       1.69   hannken 		mutex_enter(&vcache_lock);
   1566       1.79   hannken 		vcache_dealloc(vip);
   1567       1.87   hannken 		vfs_unbusy(mp);
   1568       1.40   hannken 		KASSERT(*vpp == NULL);
   1569       1.40   hannken 		return error;
   1570       1.40   hannken 	}
   1571       1.40   hannken 	KASSERT(vp->v_op != NULL);
   1572       1.97   hannken 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
   1573       1.97   hannken 	if (vip->vi_key.vk_key_len > 0) {
   1574       1.97   hannken 		KASSERT(vip->vi_key.vk_key != NULL);
   1575       1.97   hannken 		hash = vcache_hash(&vip->vi_key);
   1576       1.40   hannken 
   1577       1.97   hannken 		/*
   1578       1.97   hannken 		 * Wait for previous instance to be reclaimed,
   1579       1.97   hannken 		 * then insert new node.
   1580       1.97   hannken 		 */
   1581       1.97   hannken 		mutex_enter(&vcache_lock);
   1582       1.97   hannken 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
   1583       1.97   hannken 			ovp = VIMPL_TO_VNODE(ovip);
   1584       1.97   hannken 			mutex_enter(ovp->v_interlock);
   1585       1.97   hannken 			mutex_exit(&vcache_lock);
   1586       1.97   hannken 			error = vcache_vget(ovp);
   1587       1.97   hannken 			KASSERT(error == ENOENT);
   1588       1.97   hannken 			mutex_enter(&vcache_lock);
   1589       1.97   hannken 		}
   1590       1.97   hannken 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1591       1.97   hannken 		    vip, vi_hash);
   1592       1.69   hannken 		mutex_exit(&vcache_lock);
   1593       1.40   hannken 	}
   1594       1.40   hannken 	vfs_insmntque(vp, mp);
   1595       1.40   hannken 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1596       1.40   hannken 		vp->v_vflag |= VV_MPSAFE;
   1597       1.87   hannken 	vfs_ref(mp);
   1598       1.87   hannken 	vfs_unbusy(mp);
   1599       1.40   hannken 
   1600       1.40   hannken 	/* Finished loading, finalize node. */
   1601       1.69   hannken 	mutex_enter(&vcache_lock);
   1602       1.52   hannken 	mutex_enter(vp->v_interlock);
   1603       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1604       1.69   hannken 	mutex_exit(&vcache_lock);
   1605       1.40   hannken 	mutex_exit(vp->v_interlock);
   1606       1.40   hannken 	*vpp = vp;
   1607       1.40   hannken 	return 0;
   1608       1.40   hannken }
   1609       1.40   hannken 
   1610       1.40   hannken /*
   1611       1.65   hannken  * Prepare key change: update old cache nodes key and lock new cache node.
   1612       1.37   hannken  * Return an error if the new node already exists.
   1613       1.37   hannken  */
   1614       1.37   hannken int
   1615       1.37   hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1616       1.37   hannken     const void *old_key, size_t old_key_len,
   1617       1.37   hannken     const void *new_key, size_t new_key_len)
   1618       1.37   hannken {
   1619       1.37   hannken 	uint32_t old_hash, new_hash;
   1620       1.37   hannken 	struct vcache_key old_vcache_key, new_vcache_key;
   1621       1.70   hannken 	vnode_impl_t *vip, *new_vip;
   1622       1.37   hannken 
   1623       1.37   hannken 	old_vcache_key.vk_mount = mp;
   1624       1.37   hannken 	old_vcache_key.vk_key = old_key;
   1625       1.37   hannken 	old_vcache_key.vk_key_len = old_key_len;
   1626       1.37   hannken 	old_hash = vcache_hash(&old_vcache_key);
   1627       1.37   hannken 
   1628       1.37   hannken 	new_vcache_key.vk_mount = mp;
   1629       1.37   hannken 	new_vcache_key.vk_key = new_key;
   1630       1.37   hannken 	new_vcache_key.vk_key_len = new_key_len;
   1631       1.37   hannken 	new_hash = vcache_hash(&new_vcache_key);
   1632       1.37   hannken 
   1633       1.70   hannken 	new_vip = vcache_alloc();
   1634       1.70   hannken 	new_vip->vi_key = new_vcache_key;
   1635       1.37   hannken 
   1636       1.52   hannken 	/* Insert locked new node used as placeholder. */
   1637       1.69   hannken 	mutex_enter(&vcache_lock);
   1638       1.70   hannken 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1639       1.70   hannken 	if (vip != NULL) {
   1640       1.79   hannken 		vcache_dealloc(new_vip);
   1641       1.37   hannken 		return EEXIST;
   1642       1.37   hannken 	}
   1643       1.69   hannken 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1644       1.70   hannken 	    new_vip, vi_hash);
   1645       1.49   hannken 
   1646       1.65   hannken 	/* Replace old nodes key with the temporary copy. */
   1647       1.70   hannken 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1648       1.70   hannken 	KASSERT(vip != NULL);
   1649       1.70   hannken 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1650       1.70   hannken 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
   1651       1.70   hannken 	vip->vi_key = old_vcache_key;
   1652       1.69   hannken 	mutex_exit(&vcache_lock);
   1653       1.37   hannken 	return 0;
   1654       1.37   hannken }
   1655       1.37   hannken 
   1656       1.37   hannken /*
   1657       1.65   hannken  * Key change complete: update old node and remove placeholder.
   1658       1.37   hannken  */
   1659       1.37   hannken void
   1660       1.37   hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1661       1.37   hannken     const void *old_key, size_t old_key_len,
   1662       1.37   hannken     const void *new_key, size_t new_key_len)
   1663       1.37   hannken {
   1664       1.37   hannken 	uint32_t old_hash, new_hash;
   1665       1.37   hannken 	struct vcache_key old_vcache_key, new_vcache_key;
   1666       1.70   hannken 	vnode_impl_t *vip, *new_vip;
   1667       1.70   hannken 	struct vnode *new_vp;
   1668       1.37   hannken 
   1669       1.37   hannken 	old_vcache_key.vk_mount = mp;
   1670       1.37   hannken 	old_vcache_key.vk_key = old_key;
   1671       1.37   hannken 	old_vcache_key.vk_key_len = old_key_len;
   1672       1.37   hannken 	old_hash = vcache_hash(&old_vcache_key);
   1673       1.37   hannken 
   1674       1.37   hannken 	new_vcache_key.vk_mount = mp;
   1675       1.37   hannken 	new_vcache_key.vk_key = new_key;
   1676       1.37   hannken 	new_vcache_key.vk_key_len = new_key_len;
   1677       1.37   hannken 	new_hash = vcache_hash(&new_vcache_key);
   1678       1.37   hannken 
   1679       1.69   hannken 	mutex_enter(&vcache_lock);
   1680       1.49   hannken 
   1681       1.49   hannken 	/* Lookup old and new node. */
   1682       1.70   hannken 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1683       1.70   hannken 	KASSERT(vip != NULL);
   1684       1.70   hannken 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1685       1.70   hannken 
   1686       1.70   hannken 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1687       1.70   hannken 	KASSERT(new_vip != NULL);
   1688       1.70   hannken 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
   1689       1.70   hannken 	new_vp = VIMPL_TO_VNODE(new_vip);
   1690       1.70   hannken 	mutex_enter(new_vp->v_interlock);
   1691       1.70   hannken 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
   1692       1.79   hannken 	mutex_exit(new_vp->v_interlock);
   1693       1.49   hannken 
   1694       1.49   hannken 	/* Rekey old node and put it onto its new hashlist. */
   1695       1.70   hannken 	vip->vi_key = new_vcache_key;
   1696       1.49   hannken 	if (old_hash != new_hash) {
   1697       1.69   hannken 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
   1698       1.70   hannken 		    vip, vnode_impl, vi_hash);
   1699       1.69   hannken 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1700       1.70   hannken 		    vip, vi_hash);
   1701       1.49   hannken 	}
   1702       1.49   hannken 
   1703       1.49   hannken 	/* Remove new node used as placeholder. */
   1704       1.69   hannken 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
   1705       1.70   hannken 	    new_vip, vnode_impl, vi_hash);
   1706       1.79   hannken 	vcache_dealloc(new_vip);
   1707       1.37   hannken }
   1708       1.37   hannken 
   1709       1.37   hannken /*
   1710       1.54   hannken  * Disassociate the underlying file system from a vnode.
   1711       1.54   hannken  *
   1712       1.54   hannken  * Must be called with vnode locked and will return unlocked.
   1713       1.54   hannken  * Must be called with the interlock held, and will return with it held.
   1714       1.54   hannken  */
   1715       1.54   hannken static void
   1716       1.54   hannken vcache_reclaim(vnode_t *vp)
   1717       1.54   hannken {
   1718       1.54   hannken 	lwp_t *l = curlwp;
   1719       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1720       1.74   hannken 	struct mount *mp = vp->v_mount;
   1721       1.55   hannken 	uint32_t hash;
   1722       1.55   hannken 	uint8_t temp_buf[64], *temp_key;
   1723       1.55   hannken 	size_t temp_key_len;
   1724       1.54   hannken 	bool recycle, active;
   1725       1.54   hannken 	int error;
   1726       1.54   hannken 
   1727       1.54   hannken 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1728       1.54   hannken 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1729       1.54   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1730      1.120        ad 	KASSERT(vrefcnt(vp) != 0);
   1731       1.54   hannken 
   1732      1.120        ad 	active = (vrefcnt(vp) > 1);
   1733       1.70   hannken 	temp_key_len = vip->vi_key.vk_key_len;
   1734       1.54   hannken 	/*
   1735       1.54   hannken 	 * Prevent the vnode from being recycled or brought into use
   1736       1.54   hannken 	 * while we clean it out.
   1737       1.54   hannken 	 */
   1738      1.123        ad 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
   1739      1.111        ad 	mutex_exit(vp->v_interlock);
   1740      1.111        ad 
   1741      1.111        ad 	rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
   1742      1.111        ad 	mutex_enter(vp->v_interlock);
   1743      1.125        ad 	if ((vp->v_iflag & VI_EXECMAP) != 0) {
   1744      1.105        ad 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
   1745       1.54   hannken 	}
   1746       1.54   hannken 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1747      1.116        ad 	vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
   1748       1.54   hannken 	mutex_exit(vp->v_interlock);
   1749      1.111        ad 	rw_exit(vp->v_uobj.vmobjlock);
   1750       1.54   hannken 
   1751      1.114        ad 	/*
   1752      1.114        ad 	 * With vnode state set to reclaiming, purge name cache immediately
   1753      1.114        ad 	 * to prevent new handles on vnode, and wait for existing threads
   1754      1.114        ad 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
   1755      1.114        ad 	 */
   1756      1.114        ad 	cache_purge(vp);
   1757      1.114        ad 
   1758       1.55   hannken 	/* Replace the vnode key with a temporary copy. */
   1759       1.70   hannken 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
   1760       1.55   hannken 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1761       1.55   hannken 	} else {
   1762       1.55   hannken 		temp_key = temp_buf;
   1763       1.55   hannken 	}
   1764       1.97   hannken 	if (vip->vi_key.vk_key_len > 0) {
   1765       1.97   hannken 		mutex_enter(&vcache_lock);
   1766       1.97   hannken 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
   1767       1.97   hannken 		vip->vi_key.vk_key = temp_key;
   1768       1.97   hannken 		mutex_exit(&vcache_lock);
   1769       1.97   hannken 	}
   1770       1.55   hannken 
   1771       1.96   hannken 	fstrans_start(mp);
   1772       1.74   hannken 
   1773       1.54   hannken 	/*
   1774       1.54   hannken 	 * Clean out any cached data associated with the vnode.
   1775       1.54   hannken 	 * If purging an active vnode, it must be closed and
   1776       1.60   hannken 	 * deactivated before being reclaimed.
   1777       1.54   hannken 	 */
   1778       1.54   hannken 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1779       1.54   hannken 	if (error != 0) {
   1780       1.54   hannken 		if (wapbl_vphaswapbl(vp))
   1781       1.54   hannken 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1782       1.54   hannken 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1783       1.54   hannken 	}
   1784       1.54   hannken 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1785       1.54   hannken 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1786       1.54   hannken 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1787       1.54   hannken 		 spec_node_revoke(vp);
   1788       1.54   hannken 	}
   1789       1.54   hannken 
   1790       1.60   hannken 	/*
   1791       1.60   hannken 	 * Disassociate the underlying file system from the vnode.
   1792       1.90  riastrad 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1793       1.90  riastrad 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1794       1.90  riastrad 	 * would no longer function.
   1795       1.60   hannken 	 */
   1796       1.60   hannken 	VOP_INACTIVE(vp, &recycle);
   1797       1.91  riastrad 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1798       1.91  riastrad 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1799       1.54   hannken 	if (VOP_RECLAIM(vp)) {
   1800       1.54   hannken 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1801       1.54   hannken 	}
   1802       1.54   hannken 
   1803       1.54   hannken 	KASSERT(vp->v_data == NULL);
   1804      1.113        ad 	KASSERT((vp->v_iflag & VI_PAGES) == 0);
   1805       1.54   hannken 
   1806       1.54   hannken 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1807       1.54   hannken 		uvm_ra_freectx(vp->v_ractx);
   1808       1.54   hannken 		vp->v_ractx = NULL;
   1809       1.54   hannken 	}
   1810       1.54   hannken 
   1811       1.97   hannken 	if (vip->vi_key.vk_key_len > 0) {
   1812       1.55   hannken 	/* Remove from vnode cache. */
   1813       1.97   hannken 		hash = vcache_hash(&vip->vi_key);
   1814       1.97   hannken 		mutex_enter(&vcache_lock);
   1815       1.97   hannken 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1816       1.97   hannken 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1817       1.97   hannken 		    vip, vnode_impl, vi_hash);
   1818       1.97   hannken 		mutex_exit(&vcache_lock);
   1819       1.97   hannken 	}
   1820       1.55   hannken 	if (temp_key != temp_buf)
   1821       1.55   hannken 		kmem_free(temp_key, temp_key_len);
   1822       1.55   hannken 
   1823       1.54   hannken 	/* Done with purge, notify sleepers of the grim news. */
   1824       1.54   hannken 	mutex_enter(vp->v_interlock);
   1825       1.54   hannken 	vp->v_op = dead_vnodeop_p;
   1826       1.54   hannken 	vp->v_vflag |= VV_LOCKSWORK;
   1827       1.57   hannken 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1828       1.54   hannken 	vp->v_tag = VT_NON;
   1829       1.54   hannken 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1830       1.80   hannken 	mutex_exit(vp->v_interlock);
   1831       1.54   hannken 
   1832       1.80   hannken 	/*
   1833       1.80   hannken 	 * Move to dead mount.  Must be after changing the operations
   1834       1.80   hannken 	 * vector as vnode operations enter the mount before using the
   1835       1.80   hannken 	 * operations vector.  See sys/kern/vnode_if.c.
   1836       1.80   hannken 	 */
   1837       1.80   hannken 	vp->v_vflag &= ~VV_ROOT;
   1838       1.86   hannken 	vfs_ref(dead_rootmount);
   1839       1.80   hannken 	vfs_insmntque(vp, dead_rootmount);
   1840       1.80   hannken 
   1841      1.110        ad #ifdef PAX_SEGVGUARD
   1842      1.110        ad 	pax_segvguard_cleanup(vp);
   1843      1.110        ad #endif /* PAX_SEGVGUARD */
   1844      1.110        ad 
   1845       1.80   hannken 	mutex_enter(vp->v_interlock);
   1846       1.74   hannken 	fstrans_done(mp);
   1847       1.54   hannken 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1848       1.54   hannken }
   1849       1.54   hannken 
   1850       1.54   hannken /*
   1851       1.98   hannken  * Disassociate the underlying file system from an open device vnode
   1852       1.98   hannken  * and make it anonymous.
   1853       1.98   hannken  *
   1854       1.98   hannken  * Vnode unlocked on entry, drops a reference to the vnode.
   1855       1.98   hannken  */
   1856       1.98   hannken void
   1857       1.98   hannken vcache_make_anon(vnode_t *vp)
   1858       1.98   hannken {
   1859       1.98   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1860       1.98   hannken 	uint32_t hash;
   1861       1.98   hannken 	bool recycle;
   1862       1.98   hannken 
   1863       1.98   hannken 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
   1864      1.103   hannken 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1865       1.98   hannken 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
   1866       1.98   hannken 
   1867       1.98   hannken 	/* Remove from vnode cache. */
   1868       1.98   hannken 	hash = vcache_hash(&vip->vi_key);
   1869       1.98   hannken 	mutex_enter(&vcache_lock);
   1870       1.98   hannken 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1871       1.98   hannken 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1872       1.98   hannken 	    vip, vnode_impl, vi_hash);
   1873       1.98   hannken 	vip->vi_key.vk_mount = dead_rootmount;
   1874       1.98   hannken 	vip->vi_key.vk_key_len = 0;
   1875       1.98   hannken 	vip->vi_key.vk_key = NULL;
   1876       1.98   hannken 	mutex_exit(&vcache_lock);
   1877       1.98   hannken 
   1878       1.98   hannken 	/*
   1879       1.98   hannken 	 * Disassociate the underlying file system from the vnode.
   1880       1.98   hannken 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1881       1.98   hannken 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1882       1.98   hannken 	 * would no longer function.
   1883       1.98   hannken 	 */
   1884       1.98   hannken 	if (vn_lock(vp, LK_EXCLUSIVE)) {
   1885       1.98   hannken 		vnpanic(vp, "%s: cannot lock", __func__);
   1886       1.98   hannken 	}
   1887       1.98   hannken 	VOP_INACTIVE(vp, &recycle);
   1888       1.98   hannken 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1889       1.98   hannken 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1890       1.98   hannken 	if (VOP_RECLAIM(vp)) {
   1891       1.98   hannken 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1892       1.98   hannken 	}
   1893       1.98   hannken 
   1894       1.98   hannken 	/* Purge name cache. */
   1895       1.98   hannken 	cache_purge(vp);
   1896       1.98   hannken 
   1897       1.98   hannken 	/* Done with purge, change operations vector. */
   1898       1.98   hannken 	mutex_enter(vp->v_interlock);
   1899       1.98   hannken 	vp->v_op = spec_vnodeop_p;
   1900       1.98   hannken 	vp->v_vflag |= VV_MPSAFE;
   1901       1.98   hannken 	vp->v_vflag &= ~VV_LOCKSWORK;
   1902       1.98   hannken 	mutex_exit(vp->v_interlock);
   1903       1.98   hannken 
   1904       1.98   hannken 	/*
   1905       1.98   hannken 	 * Move to dead mount.  Must be after changing the operations
   1906       1.98   hannken 	 * vector as vnode operations enter the mount before using the
   1907       1.98   hannken 	 * operations vector.  See sys/kern/vnode_if.c.
   1908       1.98   hannken 	 */
   1909       1.98   hannken 	vfs_ref(dead_rootmount);
   1910       1.98   hannken 	vfs_insmntque(vp, dead_rootmount);
   1911       1.98   hannken 
   1912       1.98   hannken 	vrele(vp);
   1913       1.98   hannken }
   1914       1.98   hannken 
   1915       1.98   hannken /*
   1916        1.1     rmind  * Update outstanding I/O count and do wakeup if requested.
   1917        1.1     rmind  */
   1918        1.1     rmind void
   1919        1.1     rmind vwakeup(struct buf *bp)
   1920        1.1     rmind {
   1921        1.1     rmind 	vnode_t *vp;
   1922        1.1     rmind 
   1923        1.1     rmind 	if ((vp = bp->b_vp) == NULL)
   1924        1.1     rmind 		return;
   1925        1.1     rmind 
   1926        1.9     rmind 	KASSERT(bp->b_objlock == vp->v_interlock);
   1927        1.1     rmind 	KASSERT(mutex_owned(bp->b_objlock));
   1928        1.1     rmind 
   1929        1.1     rmind 	if (--vp->v_numoutput < 0)
   1930       1.11  christos 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1931        1.1     rmind 	if (vp->v_numoutput == 0)
   1932        1.1     rmind 		cv_broadcast(&vp->v_cv);
   1933        1.1     rmind }
   1934        1.1     rmind 
   1935        1.1     rmind /*
   1936       1.35   hannken  * Test a vnode for being or becoming dead.  Returns one of:
   1937       1.35   hannken  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1938       1.35   hannken  * ENOENT: vnode is dead.
   1939       1.35   hannken  * 0:      otherwise.
   1940       1.35   hannken  *
   1941       1.35   hannken  * Whenever this function returns a non-zero value all future
   1942       1.35   hannken  * calls will also return a non-zero value.
   1943       1.35   hannken  */
   1944       1.35   hannken int
   1945       1.35   hannken vdead_check(struct vnode *vp, int flags)
   1946       1.35   hannken {
   1947       1.35   hannken 
   1948       1.35   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1949       1.35   hannken 
   1950       1.52   hannken 	if (! ISSET(flags, VDEAD_NOWAIT))
   1951       1.52   hannken 		VSTATE_WAIT_STABLE(vp);
   1952        1.1     rmind 
   1953       1.57   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1954       1.52   hannken 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1955       1.52   hannken 		return EBUSY;
   1956       1.57   hannken 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1957       1.52   hannken 		return ENOENT;
   1958       1.52   hannken 	}
   1959        1.1     rmind 
   1960       1.52   hannken 	return 0;
   1961        1.1     rmind }
   1962        1.1     rmind 
   1963        1.1     rmind int
   1964       1.61   hannken vfs_drainvnodes(void)
   1965        1.1     rmind {
   1966       1.63   hannken 	int i, gen;
   1967       1.61   hannken 
   1968       1.63   hannken 	mutex_enter(&vdrain_lock);
   1969       1.63   hannken 	for (i = 0; i < 2; i++) {
   1970       1.63   hannken 		gen = vdrain_gen;
   1971       1.63   hannken 		while (gen == vdrain_gen) {
   1972       1.63   hannken 			cv_broadcast(&vdrain_cv);
   1973       1.63   hannken 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1974       1.63   hannken 		}
   1975       1.61   hannken 	}
   1976       1.63   hannken 	mutex_exit(&vdrain_lock);
   1977       1.12   hannken 
   1978       1.63   hannken 	if (numvnodes >= desiredvnodes)
   1979       1.63   hannken 		return EBUSY;
   1980       1.12   hannken 
   1981       1.69   hannken 	if (vcache_hashsize != desiredvnodes)
   1982       1.61   hannken 		vcache_reinit();
   1983       1.36   hannken 
   1984        1.1     rmind 	return 0;
   1985        1.1     rmind }
   1986        1.1     rmind 
   1987        1.1     rmind void
   1988       1.11  christos vnpanic(vnode_t *vp, const char *fmt, ...)
   1989        1.1     rmind {
   1990       1.11  christos 	va_list ap;
   1991       1.11  christos 
   1992        1.1     rmind #ifdef DIAGNOSTIC
   1993        1.1     rmind 	vprint(NULL, vp);
   1994        1.1     rmind #endif
   1995       1.11  christos 	va_start(ap, fmt);
   1996       1.11  christos 	vpanic(fmt, ap);
   1997       1.11  christos 	va_end(ap);
   1998        1.1     rmind }
   1999      1.111        ad 
   2000      1.111        ad void
   2001      1.111        ad vshareilock(vnode_t *tvp, vnode_t *fvp)
   2002      1.111        ad {
   2003      1.111        ad 	kmutex_t *oldlock;
   2004      1.111        ad 
   2005      1.111        ad 	oldlock = tvp->v_interlock;
   2006      1.111        ad 	mutex_obj_hold(fvp->v_interlock);
   2007      1.111        ad 	tvp->v_interlock = fvp->v_interlock;
   2008      1.111        ad 	mutex_obj_free(oldlock);
   2009      1.111        ad }
   2010