Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.105.2.7
      1  1.105.2.7        ad /*	$NetBSD: vfs_vnode.c,v 1.105.2.7 2020/01/25 22:38:51 ad Exp $	*/
      2        1.1     rmind 
      3        1.1     rmind /*-
      4      1.104        ad  * Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
      5        1.1     rmind  * All rights reserved.
      6        1.1     rmind  *
      7        1.1     rmind  * This code is derived from software contributed to The NetBSD Foundation
      8        1.1     rmind  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9        1.1     rmind  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10        1.1     rmind  *
     11        1.1     rmind  * Redistribution and use in source and binary forms, with or without
     12        1.1     rmind  * modification, are permitted provided that the following conditions
     13        1.1     rmind  * are met:
     14        1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     15        1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     16        1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     17        1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     18        1.1     rmind  *    documentation and/or other materials provided with the distribution.
     19        1.1     rmind  *
     20        1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21        1.1     rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22        1.1     rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23        1.1     rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24        1.1     rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25        1.1     rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26        1.1     rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27        1.1     rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28        1.1     rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29        1.1     rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30        1.1     rmind  * POSSIBILITY OF SUCH DAMAGE.
     31        1.1     rmind  */
     32        1.1     rmind 
     33        1.1     rmind /*
     34        1.1     rmind  * Copyright (c) 1989, 1993
     35        1.1     rmind  *	The Regents of the University of California.  All rights reserved.
     36        1.1     rmind  * (c) UNIX System Laboratories, Inc.
     37        1.1     rmind  * All or some portions of this file are derived from material licensed
     38        1.1     rmind  * to the University of California by American Telephone and Telegraph
     39        1.1     rmind  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40        1.1     rmind  * the permission of UNIX System Laboratories, Inc.
     41        1.1     rmind  *
     42        1.1     rmind  * Redistribution and use in source and binary forms, with or without
     43        1.1     rmind  * modification, are permitted provided that the following conditions
     44        1.1     rmind  * are met:
     45        1.1     rmind  * 1. Redistributions of source code must retain the above copyright
     46        1.1     rmind  *    notice, this list of conditions and the following disclaimer.
     47        1.1     rmind  * 2. Redistributions in binary form must reproduce the above copyright
     48        1.1     rmind  *    notice, this list of conditions and the following disclaimer in the
     49        1.1     rmind  *    documentation and/or other materials provided with the distribution.
     50        1.1     rmind  * 3. Neither the name of the University nor the names of its contributors
     51        1.1     rmind  *    may be used to endorse or promote products derived from this software
     52        1.1     rmind  *    without specific prior written permission.
     53        1.1     rmind  *
     54        1.1     rmind  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55        1.1     rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56        1.1     rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57        1.1     rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58        1.1     rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59        1.1     rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60        1.1     rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61        1.1     rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62        1.1     rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63        1.1     rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64        1.1     rmind  * SUCH DAMAGE.
     65        1.1     rmind  *
     66        1.1     rmind  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67        1.1     rmind  */
     68        1.1     rmind 
     69        1.1     rmind /*
     70        1.8     rmind  * The vnode cache subsystem.
     71        1.1     rmind  *
     72        1.8     rmind  * Life-cycle
     73        1.1     rmind  *
     74        1.8     rmind  *	Normally, there are two points where new vnodes are created:
     75        1.8     rmind  *	VOP_CREATE(9) and VOP_LOOKUP(9).  The life-cycle of a vnode
     76        1.8     rmind  *	starts in one of the following ways:
     77        1.8     rmind  *
     78       1.45   hannken  *	- Allocation, via vcache_get(9) or vcache_new(9).
     79       1.66   hannken  *	- Reclamation of inactive vnode, via vcache_vget(9).
     80        1.8     rmind  *
     81       1.16     rmind  *	Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
     82       1.16     rmind  *	was another, traditional way.  Currently, only the draining thread
     83       1.16     rmind  *	recycles the vnodes.  This behaviour might be revisited.
     84       1.16     rmind  *
     85        1.8     rmind  *	The life-cycle ends when the last reference is dropped, usually
     86        1.8     rmind  *	in VOP_REMOVE(9).  In such case, VOP_INACTIVE(9) is called to inform
     87        1.8     rmind  *	the file system that vnode is inactive.  Via this call, file system
     88       1.16     rmind  *	indicates whether vnode can be recycled (usually, it checks its own
     89       1.16     rmind  *	references, e.g. count of links, whether the file was removed).
     90        1.8     rmind  *
     91        1.8     rmind  *	Depending on indication, vnode can be put into a free list (cache),
     92       1.54   hannken  *	or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
     93       1.54   hannken  *	disassociate underlying file system from the vnode, and finally
     94       1.54   hannken  *	destroyed.
     95        1.8     rmind  *
     96       1.52   hannken  * Vnode state
     97       1.52   hannken  *
     98       1.52   hannken  *	Vnode is always in one of six states:
     99       1.52   hannken  *	- MARKER	This is a marker vnode to help list traversal.  It
    100       1.52   hannken  *			will never change its state.
    101       1.52   hannken  *	- LOADING	Vnode is associating underlying file system and not
    102       1.52   hannken  *			yet ready to use.
    103       1.94   hannken  *	- LOADED	Vnode has associated underlying file system and is
    104       1.52   hannken  *			ready to use.
    105       1.52   hannken  *	- BLOCKED	Vnode is active but cannot get new references.
    106       1.52   hannken  *	- RECLAIMING	Vnode is disassociating from the underlying file
    107       1.52   hannken  *			system.
    108       1.52   hannken  *	- RECLAIMED	Vnode has disassociated from underlying file system
    109       1.52   hannken  *			and is dead.
    110       1.52   hannken  *
    111       1.52   hannken  *	Valid state changes are:
    112       1.94   hannken  *	LOADING -> LOADED
    113       1.52   hannken  *			Vnode has been initialised in vcache_get() or
    114       1.52   hannken  *			vcache_new() and is ready to use.
    115       1.94   hannken  *	LOADED -> RECLAIMING
    116       1.52   hannken  *			Vnode starts disassociation from underlying file
    117       1.54   hannken  *			system in vcache_reclaim().
    118       1.52   hannken  *	RECLAIMING -> RECLAIMED
    119       1.52   hannken  *			Vnode finished disassociation from underlying file
    120       1.54   hannken  *			system in vcache_reclaim().
    121       1.94   hannken  *	LOADED -> BLOCKED
    122  1.105.2.3        ad  *			vcache_rekey*() is changing the vnode key.
    123       1.94   hannken  *	BLOCKED -> LOADED
    124       1.52   hannken  *			The block condition is over.
    125       1.52   hannken  *	LOADING -> RECLAIMED
    126       1.52   hannken  *			Either vcache_get() or vcache_new() failed to
    127       1.52   hannken  *			associate the underlying file system or vcache_rekey*()
    128       1.52   hannken  *			drops a vnode used as placeholder.
    129       1.52   hannken  *
    130       1.52   hannken  *	Of these states LOADING, BLOCKED and RECLAIMING are intermediate
    131       1.52   hannken  *	and it is possible to wait for state change.
    132       1.52   hannken  *
    133       1.52   hannken  *	State is protected with v_interlock with one exception:
    134       1.69   hannken  *	to change from LOADING both v_interlock and vcache_lock must be held
    135       1.52   hannken  *	so it is possible to check "state == LOADING" without holding
    136       1.52   hannken  *	v_interlock.  See vcache_get() for details.
    137       1.52   hannken  *
    138        1.8     rmind  * Reference counting
    139        1.8     rmind  *
    140        1.8     rmind  *	Vnode is considered active, if reference count (vnode_t::v_usecount)
    141        1.8     rmind  *	is non-zero.  It is maintained using: vref(9) and vrele(9), as well
    142        1.8     rmind  *	as vput(9), routines.  Common points holding references are e.g.
    143        1.8     rmind  *	file openings, current working directory, mount points, etc.
    144        1.8     rmind  *
    145  1.105.2.5        ad  * Note on v_usecount & v_holdcnt and their locking
    146  1.105.2.5        ad  *
    147  1.105.2.5        ad  *	At nearly all points it is known that the counts could be zero,
    148  1.105.2.5        ad  *	the vnode_t::v_interlock will be held.  To change the counts away
    149  1.105.2.5        ad  *	from zero, the interlock must be held.  To change from a non-zero
    150  1.105.2.5        ad  *	value to zero, again the interlock must be held.
    151  1.105.2.5        ad  *
    152  1.105.2.5        ad  *	Changing the usecount from a non-zero value to a non-zero value can
    153  1.105.2.5        ad  *	safely be done using atomic operations, without the interlock held.
    154        1.1     rmind  */
    155        1.1     rmind 
    156        1.1     rmind #include <sys/cdefs.h>
    157  1.105.2.7        ad __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105.2.7 2020/01/25 22:38:51 ad Exp $");
    158  1.105.2.7        ad 
    159  1.105.2.7        ad #ifdef _KERNEL_OPT
    160  1.105.2.7        ad #include "opt_pax.h"
    161  1.105.2.7        ad #endif
    162        1.1     rmind 
    163        1.1     rmind #include <sys/param.h>
    164        1.1     rmind #include <sys/kernel.h>
    165        1.1     rmind 
    166        1.1     rmind #include <sys/atomic.h>
    167        1.1     rmind #include <sys/buf.h>
    168        1.1     rmind #include <sys/conf.h>
    169        1.1     rmind #include <sys/device.h>
    170       1.36   hannken #include <sys/hash.h>
    171        1.1     rmind #include <sys/kauth.h>
    172        1.1     rmind #include <sys/kmem.h>
    173        1.1     rmind #include <sys/kthread.h>
    174        1.1     rmind #include <sys/module.h>
    175        1.1     rmind #include <sys/mount.h>
    176        1.1     rmind #include <sys/namei.h>
    177  1.105.2.7        ad #include <sys/pax.h>
    178        1.1     rmind #include <sys/syscallargs.h>
    179        1.1     rmind #include <sys/sysctl.h>
    180        1.1     rmind #include <sys/systm.h>
    181       1.58   hannken #include <sys/vnode_impl.h>
    182        1.1     rmind #include <sys/wapbl.h>
    183       1.24   hannken #include <sys/fstrans.h>
    184        1.1     rmind 
    185        1.1     rmind #include <uvm/uvm.h>
    186        1.1     rmind #include <uvm/uvm_readahead.h>
    187      1.104        ad #include <uvm/uvm_stat.h>
    188        1.1     rmind 
    189       1.23   hannken /* Flags to vrelel. */
    190      1.104        ad #define	VRELEL_ASYNC	0x0001	/* Always defer to vrele thread. */
    191      1.104        ad 
    192      1.104        ad #define	LRU_VRELE	0
    193      1.104        ad #define	LRU_FREE	1
    194      1.104        ad #define	LRU_HOLD	2
    195      1.104        ad #define	LRU_COUNT	3
    196        1.1     rmind 
    197       1.16     rmind /*
    198       1.63   hannken  * There are three lru lists: one holds vnodes waiting for async release,
    199      1.104        ad  * one is for vnodes which have no buffer/page references and one for those
    200      1.104        ad  * which do (i.e.  v_holdcnt is non-zero).  We put the lists into a single,
    201      1.104        ad  * private cache line as vnodes migrate between them while under the same
    202      1.104        ad  * lock (vdrain_lock).
    203       1.63   hannken  */
    204      1.104        ad u_int			numvnodes		__cacheline_aligned;
    205      1.104        ad static vnodelst_t	lru_list[LRU_COUNT]	__cacheline_aligned;
    206       1.63   hannken static kmutex_t		vdrain_lock		__cacheline_aligned;
    207      1.104        ad static kcondvar_t	vdrain_cv;
    208       1.63   hannken static int		vdrain_gen;
    209       1.63   hannken static kcondvar_t	vdrain_gen_cv;
    210       1.63   hannken static bool		vdrain_retry;
    211       1.63   hannken static lwp_t *		vdrain_lwp;
    212       1.57   hannken SLIST_HEAD(hashhead, vnode_impl);
    213       1.69   hannken static kmutex_t		vcache_lock		__cacheline_aligned;
    214      1.104        ad static kcondvar_t	vcache_cv;
    215       1.69   hannken static u_int		vcache_hashsize;
    216       1.69   hannken static u_long		vcache_hashmask;
    217      1.104        ad static struct hashhead	*vcache_hashtab;
    218       1.69   hannken static pool_cache_t	vcache_pool;
    219       1.63   hannken static void		lru_requeue(vnode_t *, vnodelst_t *);
    220       1.63   hannken static vnodelst_t *	lru_which(vnode_t *);
    221       1.63   hannken static vnode_impl_t *	vcache_alloc(void);
    222       1.79   hannken static void		vcache_dealloc(vnode_impl_t *);
    223       1.57   hannken static void		vcache_free(vnode_impl_t *);
    224       1.36   hannken static void		vcache_init(void);
    225       1.36   hannken static void		vcache_reinit(void);
    226       1.54   hannken static void		vcache_reclaim(vnode_t *);
    227  1.105.2.2        ad static void		vrelel(vnode_t *, int, int);
    228       1.12   hannken static void		vdrain_thread(void *);
    229       1.11  christos static void		vnpanic(vnode_t *, const char *, ...)
    230       1.18  christos     __printflike(2, 3);
    231        1.1     rmind 
    232        1.1     rmind /* Routines having to do with the management of the vnode table. */
    233       1.44   hannken extern struct mount	*dead_rootmount;
    234        1.1     rmind extern int		(**dead_vnodeop_p)(void *);
    235       1.98   hannken extern int		(**spec_vnodeop_p)(void *);
    236       1.31   hannken extern struct vfsops	dead_vfsops;
    237        1.1     rmind 
    238       1.51   hannken /* Vnode state operations and diagnostics. */
    239       1.51   hannken 
    240       1.51   hannken #if defined(DIAGNOSTIC)
    241       1.51   hannken 
    242       1.94   hannken #define VSTATE_VALID(state) \
    243       1.94   hannken 	((state) != VS_ACTIVE && (state) != VS_MARKER)
    244       1.51   hannken #define VSTATE_GET(vp) \
    245       1.51   hannken 	vstate_assert_get((vp), __func__, __LINE__)
    246       1.51   hannken #define VSTATE_CHANGE(vp, from, to) \
    247       1.51   hannken 	vstate_assert_change((vp), (from), (to), __func__, __LINE__)
    248       1.51   hannken #define VSTATE_WAIT_STABLE(vp) \
    249       1.51   hannken 	vstate_assert_wait_stable((vp), __func__, __LINE__)
    250       1.51   hannken 
    251       1.94   hannken void
    252       1.99     joerg _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    253       1.99     joerg     bool has_lock)
    254       1.51   hannken {
    255       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    256       1.51   hannken 
    257       1.99     joerg 	if (!has_lock) {
    258       1.99     joerg 		/*
    259       1.99     joerg 		 * Prevent predictive loads from the CPU, but check the state
    260       1.99     joerg 		 * without loooking first.
    261       1.99     joerg 		 */
    262       1.99     joerg 		membar_enter();
    263       1.99     joerg 		if (state == VS_ACTIVE && vp->v_usecount > 0 &&
    264       1.99     joerg 		    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
    265       1.99     joerg 			return;
    266       1.99     joerg 		if (vip->vi_state == state)
    267       1.99     joerg 			return;
    268       1.99     joerg 		mutex_enter((vp)->v_interlock);
    269       1.99     joerg 	}
    270       1.99     joerg 
    271       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    272       1.51   hannken 
    273       1.99     joerg 	if ((state == VS_ACTIVE && vp->v_usecount > 0 &&
    274       1.99     joerg 	    (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
    275       1.99     joerg 	    vip->vi_state == state) {
    276       1.99     joerg 		if (!has_lock)
    277       1.99     joerg 			mutex_exit((vp)->v_interlock);
    278       1.94   hannken 		return;
    279       1.99     joerg 	}
    280       1.94   hannken 	vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
    281       1.94   hannken 	    vstate_name(vip->vi_state), vp->v_usecount,
    282       1.94   hannken 	    vstate_name(state), func, line);
    283       1.51   hannken }
    284       1.51   hannken 
    285       1.57   hannken static enum vnode_state
    286       1.51   hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
    287       1.51   hannken {
    288       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    289       1.51   hannken 
    290       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    291       1.94   hannken 	if (! VSTATE_VALID(vip->vi_state))
    292       1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    293       1.70   hannken 		    vstate_name(vip->vi_state), func, line);
    294       1.51   hannken 
    295       1.70   hannken 	return vip->vi_state;
    296       1.51   hannken }
    297       1.51   hannken 
    298       1.52   hannken static void
    299       1.51   hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
    300       1.51   hannken {
    301       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    302       1.51   hannken 
    303       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    304       1.94   hannken 	if (! VSTATE_VALID(vip->vi_state))
    305       1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    306       1.70   hannken 		    vstate_name(vip->vi_state), func, line);
    307       1.51   hannken 
    308       1.94   hannken 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    309       1.51   hannken 		cv_wait(&vp->v_cv, vp->v_interlock);
    310       1.51   hannken 
    311       1.94   hannken 	if (! VSTATE_VALID(vip->vi_state))
    312       1.51   hannken 		vnpanic(vp, "state is %s at %s:%d",
    313       1.70   hannken 		    vstate_name(vip->vi_state), func, line);
    314       1.51   hannken }
    315       1.51   hannken 
    316       1.52   hannken static void
    317       1.57   hannken vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    318       1.51   hannken     const char *func, int line)
    319       1.51   hannken {
    320       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    321       1.51   hannken 
    322       1.51   hannken 	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
    323       1.57   hannken 	if (from == VS_LOADING)
    324       1.69   hannken 		KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
    325       1.51   hannken 
    326       1.94   hannken 	if (! VSTATE_VALID(from))
    327       1.51   hannken 		vnpanic(vp, "from is %s at %s:%d",
    328       1.51   hannken 		    vstate_name(from), func, line);
    329       1.94   hannken 	if (! VSTATE_VALID(to))
    330       1.51   hannken 		vnpanic(vp, "to is %s at %s:%d",
    331       1.51   hannken 		    vstate_name(to), func, line);
    332       1.70   hannken 	if (vip->vi_state != from)
    333       1.51   hannken 		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
    334       1.70   hannken 		    vstate_name(vip->vi_state), vstate_name(from), func, line);
    335       1.68   hannken 	if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
    336       1.68   hannken 		vnpanic(vp, "%s to %s with usecount %d at %s:%d",
    337       1.68   hannken 		    vstate_name(from), vstate_name(to), vp->v_usecount,
    338       1.68   hannken 		    func, line);
    339       1.51   hannken 
    340       1.70   hannken 	vip->vi_state = to;
    341       1.57   hannken 	if (from == VS_LOADING)
    342       1.69   hannken 		cv_broadcast(&vcache_cv);
    343       1.94   hannken 	if (to == VS_LOADED || to == VS_RECLAIMED)
    344       1.51   hannken 		cv_broadcast(&vp->v_cv);
    345       1.51   hannken }
    346       1.51   hannken 
    347       1.51   hannken #else /* defined(DIAGNOSTIC) */
    348       1.51   hannken 
    349       1.51   hannken #define VSTATE_GET(vp) \
    350       1.57   hannken 	(VNODE_TO_VIMPL((vp))->vi_state)
    351       1.51   hannken #define VSTATE_CHANGE(vp, from, to) \
    352       1.51   hannken 	vstate_change((vp), (from), (to))
    353       1.51   hannken #define VSTATE_WAIT_STABLE(vp) \
    354       1.51   hannken 	vstate_wait_stable((vp))
    355       1.94   hannken void
    356      1.100     joerg _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
    357      1.100     joerg     bool has_lock)
    358       1.94   hannken {
    359       1.94   hannken 
    360       1.94   hannken }
    361       1.51   hannken 
    362       1.52   hannken static void
    363       1.51   hannken vstate_wait_stable(vnode_t *vp)
    364       1.51   hannken {
    365       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    366       1.51   hannken 
    367       1.94   hannken 	while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
    368       1.51   hannken 		cv_wait(&vp->v_cv, vp->v_interlock);
    369       1.51   hannken }
    370       1.51   hannken 
    371       1.52   hannken static void
    372       1.57   hannken vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
    373       1.51   hannken {
    374       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    375       1.51   hannken 
    376       1.70   hannken 	vip->vi_state = to;
    377       1.57   hannken 	if (from == VS_LOADING)
    378       1.69   hannken 		cv_broadcast(&vcache_cv);
    379       1.94   hannken 	if (to == VS_LOADED || to == VS_RECLAIMED)
    380       1.51   hannken 		cv_broadcast(&vp->v_cv);
    381       1.51   hannken }
    382       1.51   hannken 
    383       1.51   hannken #endif /* defined(DIAGNOSTIC) */
    384       1.51   hannken 
    385        1.1     rmind void
    386        1.1     rmind vfs_vnode_sysinit(void)
    387        1.1     rmind {
    388      1.104        ad 	int error __diagused, i;
    389        1.1     rmind 
    390       1.44   hannken 	dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
    391       1.44   hannken 	KASSERT(dead_rootmount != NULL);
    392      1.103   hannken 	dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
    393       1.31   hannken 
    394       1.63   hannken 	mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
    395      1.104        ad 	for (i = 0; i < LRU_COUNT; i++) {
    396      1.104        ad 		TAILQ_INIT(&lru_list[i]);
    397      1.104        ad 	}
    398       1.36   hannken 	vcache_init();
    399       1.36   hannken 
    400       1.12   hannken 	cv_init(&vdrain_cv, "vdrain");
    401       1.63   hannken 	cv_init(&vdrain_gen_cv, "vdrainwt");
    402       1.12   hannken 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
    403       1.63   hannken 	    NULL, &vdrain_lwp, "vdrain");
    404       1.47  riastrad 	KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
    405        1.1     rmind }
    406        1.1     rmind 
    407        1.1     rmind /*
    408       1.48   hannken  * Allocate a new marker vnode.
    409       1.48   hannken  */
    410       1.48   hannken vnode_t *
    411       1.48   hannken vnalloc_marker(struct mount *mp)
    412       1.48   hannken {
    413       1.70   hannken 	vnode_impl_t *vip;
    414       1.50   hannken 	vnode_t *vp;
    415       1.50   hannken 
    416       1.70   hannken 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
    417       1.70   hannken 	memset(vip, 0, sizeof(*vip));
    418       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
    419       1.50   hannken 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    420       1.50   hannken 	vp->v_mount = mp;
    421       1.50   hannken 	vp->v_type = VBAD;
    422       1.70   hannken 	vip->vi_state = VS_MARKER;
    423       1.48   hannken 
    424       1.50   hannken 	return vp;
    425       1.48   hannken }
    426       1.48   hannken 
    427       1.48   hannken /*
    428       1.48   hannken  * Free a marker vnode.
    429       1.48   hannken  */
    430       1.48   hannken void
    431       1.48   hannken vnfree_marker(vnode_t *vp)
    432       1.48   hannken {
    433       1.70   hannken 	vnode_impl_t *vip;
    434       1.48   hannken 
    435       1.70   hannken 	vip = VNODE_TO_VIMPL(vp);
    436       1.70   hannken 	KASSERT(vip->vi_state == VS_MARKER);
    437       1.50   hannken 	uvm_obj_destroy(&vp->v_uobj, true);
    438       1.70   hannken 	pool_cache_put(vcache_pool, vip);
    439       1.48   hannken }
    440       1.48   hannken 
    441       1.48   hannken /*
    442       1.48   hannken  * Test a vnode for being a marker vnode.
    443       1.48   hannken  */
    444       1.48   hannken bool
    445       1.48   hannken vnis_marker(vnode_t *vp)
    446       1.48   hannken {
    447       1.48   hannken 
    448       1.57   hannken 	return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
    449       1.48   hannken }
    450       1.48   hannken 
    451       1.48   hannken /*
    452       1.63   hannken  * Return the lru list this node should be on.
    453       1.63   hannken  */
    454       1.63   hannken static vnodelst_t *
    455       1.63   hannken lru_which(vnode_t *vp)
    456       1.63   hannken {
    457       1.63   hannken 
    458       1.63   hannken 	KASSERT(mutex_owned(vp->v_interlock));
    459       1.63   hannken 
    460       1.63   hannken 	if (vp->v_holdcnt > 0)
    461      1.104        ad 		return &lru_list[LRU_HOLD];
    462       1.63   hannken 	else
    463      1.104        ad 		return &lru_list[LRU_FREE];
    464       1.63   hannken }
    465       1.63   hannken 
    466       1.63   hannken /*
    467       1.63   hannken  * Put vnode to end of given list.
    468       1.63   hannken  * Both the current and the new list may be NULL, used on vnode alloc/free.
    469       1.63   hannken  * Adjust numvnodes and signal vdrain thread if there is work.
    470       1.63   hannken  */
    471       1.63   hannken static void
    472       1.63   hannken lru_requeue(vnode_t *vp, vnodelst_t *listhd)
    473       1.63   hannken {
    474       1.70   hannken 	vnode_impl_t *vip;
    475      1.104        ad 	int d;
    476      1.104        ad 
    477      1.104        ad 	/*
    478      1.104        ad 	 * If the vnode is on the correct list, and was put there recently,
    479      1.104        ad 	 * then leave it be, thus avoiding huge cache and lock contention.
    480      1.104        ad 	 */
    481      1.104        ad 	vip = VNODE_TO_VIMPL(vp);
    482      1.104        ad 	if (listhd == vip->vi_lrulisthd &&
    483      1.104        ad 	    (hardclock_ticks - vip->vi_lrulisttm) < hz) {
    484      1.104        ad 	    	return;
    485      1.104        ad 	}
    486       1.63   hannken 
    487       1.63   hannken 	mutex_enter(&vdrain_lock);
    488      1.104        ad 	d = 0;
    489       1.70   hannken 	if (vip->vi_lrulisthd != NULL)
    490       1.70   hannken 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    491       1.63   hannken 	else
    492      1.104        ad 		d++;
    493       1.70   hannken 	vip->vi_lrulisthd = listhd;
    494      1.104        ad 	vip->vi_lrulisttm = hardclock_ticks;
    495       1.70   hannken 	if (vip->vi_lrulisthd != NULL)
    496       1.70   hannken 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    497       1.63   hannken 	else
    498      1.104        ad 		d--;
    499      1.104        ad 	if (d != 0) {
    500      1.104        ad 		/*
    501      1.104        ad 		 * Looks strange?  This is not a bug.  Don't store
    502      1.104        ad 		 * numvnodes unless there is a change - avoid false
    503      1.104        ad 		 * sharing on MP.
    504      1.104        ad 		 */
    505      1.104        ad 		numvnodes += d;
    506      1.104        ad 	}
    507      1.104        ad 	if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
    508       1.63   hannken 		cv_broadcast(&vdrain_cv);
    509       1.63   hannken 	mutex_exit(&vdrain_lock);
    510       1.63   hannken }
    511       1.63   hannken 
    512       1.63   hannken /*
    513       1.75   hannken  * Release deferred vrele vnodes for this mount.
    514       1.75   hannken  * Called with file system suspended.
    515       1.75   hannken  */
    516       1.75   hannken void
    517       1.75   hannken vrele_flush(struct mount *mp)
    518       1.75   hannken {
    519       1.75   hannken 	vnode_impl_t *vip, *marker;
    520      1.104        ad 	vnode_t *vp;
    521       1.75   hannken 
    522       1.75   hannken 	KASSERT(fstrans_is_owner(mp));
    523       1.75   hannken 
    524       1.75   hannken 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    525       1.75   hannken 
    526       1.75   hannken 	mutex_enter(&vdrain_lock);
    527      1.104        ad 	TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
    528       1.75   hannken 
    529       1.75   hannken 	while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    530      1.104        ad 		TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    531      1.104        ad 		TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
    532      1.104        ad 		    vi_lrulist);
    533      1.104        ad 		vp = VIMPL_TO_VNODE(vip);
    534      1.104        ad 		if (vnis_marker(vp))
    535       1.75   hannken 			continue;
    536       1.75   hannken 
    537      1.104        ad 		KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    538       1.75   hannken 		TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    539      1.104        ad 		vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    540      1.104        ad 		vip->vi_lrulisttm = hardclock_ticks;
    541       1.75   hannken 		TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    542       1.75   hannken 		mutex_exit(&vdrain_lock);
    543       1.75   hannken 
    544  1.105.2.2        ad 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    545      1.104        ad 		mutex_enter(vp->v_interlock);
    546  1.105.2.2        ad 		vrelel(vp, 0, LK_EXCLUSIVE);
    547       1.75   hannken 
    548       1.75   hannken 		mutex_enter(&vdrain_lock);
    549       1.75   hannken 	}
    550       1.75   hannken 
    551      1.104        ad 	TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
    552       1.75   hannken 	mutex_exit(&vdrain_lock);
    553       1.75   hannken 
    554       1.75   hannken 	vnfree_marker(VIMPL_TO_VNODE(marker));
    555       1.75   hannken }
    556       1.75   hannken 
    557       1.75   hannken /*
    558       1.63   hannken  * Reclaim a cached vnode.  Used from vdrain_thread only.
    559        1.1     rmind  */
    560       1.63   hannken static __inline void
    561       1.63   hannken vdrain_remove(vnode_t *vp)
    562        1.1     rmind {
    563       1.24   hannken 	struct mount *mp;
    564        1.1     rmind 
    565       1.63   hannken 	KASSERT(mutex_owned(&vdrain_lock));
    566       1.24   hannken 
    567       1.63   hannken 	/* Probe usecount (unlocked). */
    568       1.63   hannken 	if (vp->v_usecount > 0)
    569       1.63   hannken 		return;
    570       1.63   hannken 	/* Try v_interlock -- we lock the wrong direction! */
    571       1.63   hannken 	if (!mutex_tryenter(vp->v_interlock))
    572       1.63   hannken 		return;
    573       1.63   hannken 	/* Probe usecount and state. */
    574       1.94   hannken 	if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
    575       1.63   hannken 		mutex_exit(vp->v_interlock);
    576       1.63   hannken 		return;
    577        1.1     rmind 	}
    578       1.63   hannken 	mp = vp->v_mount;
    579       1.96   hannken 	if (fstrans_start_nowait(mp) != 0) {
    580       1.63   hannken 		mutex_exit(vp->v_interlock);
    581       1.63   hannken 		return;
    582        1.1     rmind 	}
    583       1.63   hannken 	vdrain_retry = true;
    584       1.63   hannken 	mutex_exit(&vdrain_lock);
    585        1.1     rmind 
    586       1.66   hannken 	if (vcache_vget(vp) == 0) {
    587       1.78   hannken 		if (!vrecycle(vp)) {
    588  1.105.2.2        ad 			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    589       1.78   hannken 			mutex_enter(vp->v_interlock);
    590  1.105.2.2        ad 			vrelel(vp, 0, LK_EXCLUSIVE);
    591       1.78   hannken 		}
    592       1.60   hannken 	}
    593       1.24   hannken 	fstrans_done(mp);
    594       1.12   hannken 
    595       1.63   hannken 	mutex_enter(&vdrain_lock);
    596        1.1     rmind }
    597        1.1     rmind 
    598        1.1     rmind /*
    599       1.63   hannken  * Release a cached vnode.  Used from vdrain_thread only.
    600       1.12   hannken  */
    601       1.63   hannken static __inline void
    602       1.63   hannken vdrain_vrele(vnode_t *vp)
    603       1.12   hannken {
    604       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
    605       1.63   hannken 	struct mount *mp;
    606       1.12   hannken 
    607       1.63   hannken 	KASSERT(mutex_owned(&vdrain_lock));
    608       1.12   hannken 
    609       1.63   hannken 	mp = vp->v_mount;
    610       1.96   hannken 	if (fstrans_start_nowait(mp) != 0)
    611       1.63   hannken 		return;
    612       1.63   hannken 
    613       1.64   hannken 	/*
    614       1.64   hannken 	 * First remove the vnode from the vrele list.
    615       1.64   hannken 	 * Put it on the last lru list, the last vrele()
    616       1.64   hannken 	 * will put it back onto the right list before
    617       1.64   hannken 	 * its v_usecount reaches zero.
    618       1.64   hannken 	 */
    619      1.104        ad 	KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
    620       1.70   hannken 	TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
    621      1.104        ad 	vip->vi_lrulisthd = &lru_list[LRU_HOLD];
    622      1.104        ad 	vip->vi_lrulisttm = hardclock_ticks;
    623       1.70   hannken 	TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
    624       1.63   hannken 
    625       1.63   hannken 	vdrain_retry = true;
    626       1.63   hannken 	mutex_exit(&vdrain_lock);
    627       1.63   hannken 
    628  1.105.2.2        ad 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
    629       1.64   hannken 	mutex_enter(vp->v_interlock);
    630  1.105.2.2        ad 	vrelel(vp, 0, LK_EXCLUSIVE);
    631       1.63   hannken 	fstrans_done(mp);
    632       1.63   hannken 
    633       1.63   hannken 	mutex_enter(&vdrain_lock);
    634       1.12   hannken }
    635       1.12   hannken 
    636       1.12   hannken /*
    637       1.63   hannken  * Helper thread to keep the number of vnodes below desiredvnodes
    638       1.63   hannken  * and release vnodes from asynchronous vrele.
    639        1.1     rmind  */
    640       1.63   hannken static void
    641       1.63   hannken vdrain_thread(void *cookie)
    642        1.1     rmind {
    643       1.63   hannken 	int i;
    644       1.63   hannken 	u_int target;
    645       1.70   hannken 	vnode_impl_t *vip, *marker;
    646       1.63   hannken 
    647       1.63   hannken 	marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
    648       1.63   hannken 
    649       1.63   hannken 	mutex_enter(&vdrain_lock);
    650       1.63   hannken 
    651       1.63   hannken 	for (;;) {
    652       1.63   hannken 		vdrain_retry = false;
    653       1.63   hannken 		target = desiredvnodes - desiredvnodes/10;
    654        1.1     rmind 
    655      1.104        ad 		for (i = 0; i < LRU_COUNT; i++) {
    656      1.104        ad 			TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
    657       1.70   hannken 			while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
    658      1.104        ad 				TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    659      1.104        ad 				TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
    660       1.63   hannken 				    vi_lrulist);
    661       1.75   hannken 				if (vnis_marker(VIMPL_TO_VNODE(vip)))
    662       1.75   hannken 					continue;
    663      1.104        ad 				if (i == LRU_VRELE)
    664       1.70   hannken 					vdrain_vrele(VIMPL_TO_VNODE(vip));
    665       1.63   hannken 				else if (numvnodes < target)
    666       1.63   hannken 					break;
    667       1.63   hannken 				else
    668       1.70   hannken 					vdrain_remove(VIMPL_TO_VNODE(vip));
    669       1.63   hannken 			}
    670      1.104        ad 			TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
    671       1.63   hannken 		}
    672        1.1     rmind 
    673       1.63   hannken 		if (vdrain_retry) {
    674       1.63   hannken 			mutex_exit(&vdrain_lock);
    675       1.63   hannken 			yield();
    676       1.63   hannken 			mutex_enter(&vdrain_lock);
    677       1.63   hannken 		} else {
    678       1.63   hannken 			vdrain_gen++;
    679       1.63   hannken 			cv_broadcast(&vdrain_gen_cv);
    680       1.63   hannken 			cv_wait(&vdrain_cv, &vdrain_lock);
    681       1.63   hannken 		}
    682        1.1     rmind 	}
    683        1.1     rmind }
    684        1.1     rmind 
    685        1.1     rmind /*
    686  1.105.2.5        ad  * Try to drop reference on a vnode.  Abort if we are releasing the
    687  1.105.2.5        ad  * last reference.  Note: this _must_ succeed if not the last reference.
    688  1.105.2.5        ad  */
    689  1.105.2.5        ad static bool
    690  1.105.2.5        ad vtryrele(vnode_t *vp)
    691  1.105.2.5        ad {
    692  1.105.2.5        ad 	u_int use, next;
    693  1.105.2.5        ad 
    694  1.105.2.5        ad 	for (use = vp->v_usecount;; use = next) {
    695  1.105.2.5        ad 		if (__predict_false(use == 1)) {
    696  1.105.2.5        ad 			return false;
    697  1.105.2.5        ad 		}
    698  1.105.2.5        ad 		KASSERT(use > 1);
    699  1.105.2.5        ad 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    700  1.105.2.5        ad 		if (__predict_true(next == use)) {
    701  1.105.2.5        ad 			return true;
    702  1.105.2.5        ad 		}
    703  1.105.2.5        ad 	}
    704  1.105.2.5        ad }
    705  1.105.2.5        ad 
    706  1.105.2.5        ad /*
    707        1.4     rmind  * vput: unlock and release the reference.
    708        1.1     rmind  */
    709        1.1     rmind void
    710        1.1     rmind vput(vnode_t *vp)
    711        1.1     rmind {
    712  1.105.2.2        ad 	int lktype;
    713        1.1     rmind 
    714  1.105.2.5        ad 	/*
    715  1.105.2.5        ad 	 * Do an unlocked check of v_usecount.  If it looks like we're not
    716  1.105.2.5        ad 	 * about to drop the last reference, then unlock the vnode and try
    717  1.105.2.5        ad 	 * to drop the reference.  If it ends up being the last reference
    718  1.105.2.5        ad 	 * after all, we dropped the lock when we shouldn't have.  vrelel()
    719  1.105.2.5        ad 	 * can fix it all up.  Most of the time this will all go to plan.
    720  1.105.2.5        ad 	 */
    721  1.105.2.5        ad 	if (vp->v_usecount > 1) {
    722  1.105.2.5        ad 		VOP_UNLOCK(vp);
    723  1.105.2.5        ad 		if (vtryrele(vp)) {
    724  1.105.2.5        ad 			return;
    725  1.105.2.5        ad 		}
    726  1.105.2.5        ad 		lktype = LK_NONE;
    727  1.105.2.5        ad 	} else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
    728  1.105.2.2        ad 		lktype = LK_EXCLUSIVE;
    729  1.105.2.2        ad 	} else {
    730  1.105.2.2        ad 		lktype = VOP_ISLOCKED(vp);
    731  1.105.2.2        ad 		KASSERT(lktype != LK_NONE);
    732  1.105.2.2        ad 	}
    733  1.105.2.2        ad 	mutex_enter(vp->v_interlock);
    734  1.105.2.2        ad 	vrelel(vp, 0, lktype);
    735        1.1     rmind }
    736        1.1     rmind 
    737        1.1     rmind /*
    738        1.1     rmind  * Vnode release.  If reference count drops to zero, call inactive
    739        1.1     rmind  * routine and either return to freelist or free to the pool.
    740        1.1     rmind  */
    741       1.23   hannken static void
    742  1.105.2.2        ad vrelel(vnode_t *vp, int flags, int lktype)
    743        1.1     rmind {
    744      1.104        ad 	const bool async = ((flags & VRELEL_ASYNC) != 0);
    745        1.1     rmind 	bool recycle, defer;
    746        1.1     rmind 	int error;
    747        1.1     rmind 
    748        1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    749        1.1     rmind 
    750        1.1     rmind 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    751       1.57   hannken 	    VSTATE_GET(vp) != VS_RECLAIMED)) {
    752       1.11  christos 		vnpanic(vp, "dead but not clean");
    753        1.1     rmind 	}
    754        1.1     rmind 
    755        1.1     rmind 	/*
    756        1.1     rmind 	 * If not the last reference, just drop the reference count
    757        1.1     rmind 	 * and unlock.
    758        1.1     rmind 	 */
    759  1.105.2.5        ad 	if (vtryrele(vp)) {
    760  1.105.2.2        ad 		if (lktype != LK_NONE) {
    761  1.105.2.2        ad 			VOP_UNLOCK(vp);
    762  1.105.2.2        ad 		}
    763        1.9     rmind 		mutex_exit(vp->v_interlock);
    764        1.1     rmind 		return;
    765        1.1     rmind 	}
    766        1.1     rmind 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    767       1.11  christos 		vnpanic(vp, "%s: bad ref count", __func__);
    768        1.1     rmind 	}
    769        1.1     rmind 
    770       1.15   hannken #ifdef DIAGNOSTIC
    771       1.15   hannken 	if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    772       1.15   hannken 	    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    773       1.15   hannken 		vprint("vrelel: missing VOP_CLOSE()", vp);
    774       1.15   hannken 	}
    775       1.15   hannken #endif
    776       1.15   hannken 
    777        1.1     rmind 	/*
    778       1.79   hannken 	 * First try to get the vnode locked for VOP_INACTIVE().
    779       1.79   hannken 	 * Defer vnode release to vdrain_thread if caller requests
    780       1.79   hannken 	 * it explicitly, is the pagedaemon or the lock failed.
    781        1.1     rmind 	 */
    782  1.105.2.2        ad 	defer = false;
    783       1.79   hannken 	if ((curlwp == uvm.pagedaemon_lwp) || async) {
    784       1.79   hannken 		defer = true;
    785  1.105.2.2        ad 	} else if (lktype == LK_SHARED) {
    786  1.105.2.2        ad 		/* Excellent chance of getting, if the last ref. */
    787  1.105.2.2        ad 		error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
    788  1.105.2.2        ad 		    LK_NOWAIT);
    789  1.105.2.2        ad 		if (error != 0) {
    790  1.105.2.2        ad 			defer = true;
    791  1.105.2.2        ad 		} else {
    792  1.105.2.2        ad 			lktype = LK_EXCLUSIVE;
    793  1.105.2.2        ad 		}
    794  1.105.2.2        ad 	} else if (lktype == LK_NONE) {
    795  1.105.2.2        ad 		/* Excellent chance of getting, if the last ref. */
    796  1.105.2.2        ad 		error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
    797  1.105.2.2        ad 		    LK_NOWAIT);
    798  1.105.2.2        ad 		if (error != 0) {
    799  1.105.2.2        ad 			defer = true;
    800  1.105.2.2        ad 		} else {
    801  1.105.2.2        ad 			lktype = LK_EXCLUSIVE;
    802  1.105.2.2        ad 		}
    803       1.79   hannken 	}
    804       1.79   hannken 	KASSERT(mutex_owned(vp->v_interlock));
    805       1.79   hannken 	if (defer) {
    806        1.1     rmind 		/*
    807       1.79   hannken 		 * Defer reclaim to the kthread; it's not safe to
    808       1.79   hannken 		 * clean it here.  We donate it our last reference.
    809        1.1     rmind 		 */
    810  1.105.2.2        ad 		if (lktype != LK_NONE) {
    811  1.105.2.2        ad 			VOP_UNLOCK(vp);
    812  1.105.2.2        ad 		}
    813      1.104        ad 		lru_requeue(vp, &lru_list[LRU_VRELE]);
    814       1.79   hannken 		mutex_exit(vp->v_interlock);
    815       1.79   hannken 		return;
    816       1.79   hannken 	}
    817  1.105.2.2        ad 	KASSERT(lktype == LK_EXCLUSIVE);
    818       1.30   hannken 
    819       1.79   hannken 	/*
    820       1.79   hannken 	 * If not clean, deactivate the vnode, but preserve
    821       1.79   hannken 	 * our reference across the call to VOP_INACTIVE().
    822       1.79   hannken 	 */
    823       1.79   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
    824       1.79   hannken 		VOP_UNLOCK(vp);
    825       1.79   hannken 	} else {
    826        1.1     rmind 		/*
    827  1.105.2.3        ad 		 * If VOP_INACTIVE() indicates that the described file has
    828  1.105.2.3        ad 		 * been deleted, then recycle the vnode.  Note that
    829  1.105.2.3        ad 		 * VOP_INACTIVE() will not drop the vnode lock.
    830        1.1     rmind 		 *
    831  1.105.2.3        ad 		 * If the file has been deleted, this is a lingering
    832  1.105.2.3        ad 		 * reference and there is no need to worry about new
    833  1.105.2.3        ad 		 * references looking to do real work with the vnode (as it
    834  1.105.2.3        ad 		 * will have been purged from directories, caches, etc).
    835        1.1     rmind 		 */
    836       1.79   hannken 		recycle = false;
    837  1.105.2.3        ad 		mutex_exit(vp->v_interlock);
    838        1.1     rmind 		VOP_INACTIVE(vp, &recycle);
    839        1.9     rmind 		mutex_enter(vp->v_interlock);
    840        1.1     rmind 		if (!recycle) {
    841  1.105.2.3        ad 			VOP_UNLOCK(vp);
    842  1.105.2.5        ad 			if (vtryrele(vp)) {
    843        1.9     rmind 				mutex_exit(vp->v_interlock);
    844        1.1     rmind 				return;
    845        1.1     rmind 			}
    846        1.1     rmind 		}
    847        1.1     rmind 
    848        1.1     rmind 		/* Take care of space accounting. */
    849      1.104        ad 		if ((vp->v_iflag & VI_EXECMAP) != 0 &&
    850      1.104        ad 		    vp->v_uobj.uo_npages != 0) {
    851      1.105        ad 			cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
    852      1.105        ad 			cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
    853        1.1     rmind 		}
    854        1.1     rmind 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    855        1.1     rmind 		vp->v_vflag &= ~VV_MAPPED;
    856        1.1     rmind 
    857        1.1     rmind 		/*
    858        1.1     rmind 		 * Recycle the vnode if the file is now unused (unlinked),
    859        1.1     rmind 		 * otherwise just free it.
    860        1.1     rmind 		 */
    861        1.1     rmind 		if (recycle) {
    862       1.94   hannken 			VSTATE_ASSERT(vp, VS_LOADED);
    863       1.83  riastrad 			/* vcache_reclaim drops the lock. */
    864       1.54   hannken 			vcache_reclaim(vp);
    865        1.1     rmind 		}
    866        1.1     rmind 		KASSERT(vp->v_usecount > 0);
    867        1.1     rmind 	}
    868        1.1     rmind 
    869  1.105.2.5        ad 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    870        1.1     rmind 		/* Gained another reference while being reclaimed. */
    871        1.9     rmind 		mutex_exit(vp->v_interlock);
    872        1.1     rmind 		return;
    873        1.1     rmind 	}
    874        1.1     rmind 
    875       1.67   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
    876        1.1     rmind 		/*
    877        1.1     rmind 		 * It's clean so destroy it.  It isn't referenced
    878        1.1     rmind 		 * anywhere since it has been reclaimed.
    879        1.1     rmind 		 */
    880       1.57   hannken 		vcache_free(VNODE_TO_VIMPL(vp));
    881        1.1     rmind 	} else {
    882        1.1     rmind 		/*
    883        1.1     rmind 		 * Otherwise, put it back onto the freelist.  It
    884        1.1     rmind 		 * can't be destroyed while still associated with
    885        1.1     rmind 		 * a file system.
    886        1.1     rmind 		 */
    887       1.63   hannken 		lru_requeue(vp, lru_which(vp));
    888        1.9     rmind 		mutex_exit(vp->v_interlock);
    889        1.1     rmind 	}
    890        1.1     rmind }
    891        1.1     rmind 
    892        1.1     rmind void
    893        1.1     rmind vrele(vnode_t *vp)
    894        1.1     rmind {
    895        1.1     rmind 
    896  1.105.2.5        ad 	if (vtryrele(vp)) {
    897  1.105.2.5        ad 		return;
    898  1.105.2.5        ad 	}
    899        1.9     rmind 	mutex_enter(vp->v_interlock);
    900  1.105.2.2        ad 	vrelel(vp, 0, LK_NONE);
    901        1.1     rmind }
    902        1.1     rmind 
    903        1.1     rmind /*
    904        1.1     rmind  * Asynchronous vnode release, vnode is released in different context.
    905        1.1     rmind  */
    906        1.1     rmind void
    907        1.1     rmind vrele_async(vnode_t *vp)
    908        1.1     rmind {
    909        1.1     rmind 
    910  1.105.2.5        ad 	if (vtryrele(vp)) {
    911  1.105.2.5        ad 		return;
    912  1.105.2.5        ad 	}
    913        1.9     rmind 	mutex_enter(vp->v_interlock);
    914  1.105.2.2        ad 	vrelel(vp, VRELEL_ASYNC, LK_NONE);
    915        1.1     rmind }
    916        1.1     rmind 
    917        1.1     rmind /*
    918        1.1     rmind  * Vnode reference, where a reference is already held by some other
    919        1.1     rmind  * object (for example, a file structure).
    920  1.105.2.6        ad  *
    921  1.105.2.6        ad  * NB: we have lockless code sequences that rely on this not blocking.
    922        1.1     rmind  */
    923        1.1     rmind void
    924        1.1     rmind vref(vnode_t *vp)
    925        1.1     rmind {
    926        1.1     rmind 
    927        1.1     rmind 	KASSERT(vp->v_usecount != 0);
    928        1.1     rmind 
    929  1.105.2.5        ad 	atomic_inc_uint(&vp->v_usecount);
    930        1.1     rmind }
    931        1.1     rmind 
    932        1.1     rmind /*
    933        1.1     rmind  * Page or buffer structure gets a reference.
    934        1.1     rmind  * Called with v_interlock held.
    935        1.1     rmind  */
    936        1.1     rmind void
    937        1.1     rmind vholdl(vnode_t *vp)
    938        1.1     rmind {
    939        1.1     rmind 
    940        1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    941        1.1     rmind 
    942  1.105.2.5        ad 	if (atomic_inc_uint_nv(&vp->v_holdcnt) == 1 && vp->v_usecount == 0)
    943       1.63   hannken 		lru_requeue(vp, lru_which(vp));
    944        1.1     rmind }
    945        1.1     rmind 
    946        1.1     rmind /*
    947  1.105.2.5        ad  * Page or buffer structure gets a reference.
    948  1.105.2.5        ad  */
    949  1.105.2.5        ad void
    950  1.105.2.5        ad vhold(vnode_t *vp)
    951  1.105.2.5        ad {
    952  1.105.2.5        ad 	int hold, next;
    953  1.105.2.5        ad 
    954  1.105.2.5        ad 	for (hold = vp->v_holdcnt;; hold = next) {
    955  1.105.2.5        ad 		if (__predict_false(hold == 0)) {
    956  1.105.2.5        ad 			break;
    957  1.105.2.5        ad 		}
    958  1.105.2.5        ad 		next = atomic_cas_uint(&vp->v_holdcnt, hold, hold + 1);
    959  1.105.2.5        ad 		if (__predict_true(next == hold)) {
    960  1.105.2.5        ad 			return;
    961  1.105.2.5        ad 		}
    962  1.105.2.5        ad 	}
    963  1.105.2.5        ad 
    964  1.105.2.5        ad 	mutex_enter(vp->v_interlock);
    965  1.105.2.5        ad 	vholdl(vp);
    966  1.105.2.5        ad 	mutex_exit(vp->v_interlock);
    967  1.105.2.5        ad }
    968  1.105.2.5        ad 
    969  1.105.2.5        ad /*
    970        1.1     rmind  * Page or buffer structure frees a reference.
    971        1.1     rmind  * Called with v_interlock held.
    972        1.1     rmind  */
    973        1.1     rmind void
    974        1.1     rmind holdrelel(vnode_t *vp)
    975        1.1     rmind {
    976        1.1     rmind 
    977        1.9     rmind 	KASSERT(mutex_owned(vp->v_interlock));
    978        1.1     rmind 
    979        1.1     rmind 	if (vp->v_holdcnt <= 0) {
    980       1.11  christos 		vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
    981        1.1     rmind 	}
    982        1.1     rmind 
    983  1.105.2.5        ad 	if (atomic_dec_uint_nv(&vp->v_holdcnt) == 0 && vp->v_usecount == 0)
    984       1.63   hannken 		lru_requeue(vp, lru_which(vp));
    985        1.1     rmind }
    986        1.1     rmind 
    987        1.1     rmind /*
    988  1.105.2.5        ad  * Page or buffer structure frees a reference.
    989  1.105.2.5        ad  */
    990  1.105.2.5        ad void
    991  1.105.2.5        ad holdrele(vnode_t *vp)
    992  1.105.2.5        ad {
    993  1.105.2.5        ad 	int hold, next;
    994  1.105.2.5        ad 
    995  1.105.2.5        ad 	for (hold = vp->v_holdcnt;; hold = next) {
    996  1.105.2.5        ad 		if (__predict_false(hold == 1)) {
    997  1.105.2.5        ad 			break;
    998  1.105.2.5        ad 		}
    999  1.105.2.5        ad 		KASSERT(hold > 1);
   1000  1.105.2.5        ad 		next = atomic_cas_uint(&vp->v_holdcnt, hold, hold - 1);
   1001  1.105.2.5        ad 		if (__predict_true(next == hold)) {
   1002  1.105.2.5        ad 			return;
   1003  1.105.2.5        ad 		}
   1004  1.105.2.5        ad 	}
   1005  1.105.2.5        ad 
   1006  1.105.2.5        ad 	mutex_enter(vp->v_interlock);
   1007  1.105.2.5        ad 	holdrelel(vp);
   1008  1.105.2.5        ad 	mutex_exit(vp->v_interlock);
   1009  1.105.2.5        ad }
   1010  1.105.2.5        ad 
   1011  1.105.2.5        ad /*
   1012       1.33   hannken  * Recycle an unused vnode if caller holds the last reference.
   1013        1.1     rmind  */
   1014       1.33   hannken bool
   1015       1.33   hannken vrecycle(vnode_t *vp)
   1016        1.1     rmind {
   1017       1.60   hannken 	int error __diagused;
   1018       1.46   hannken 
   1019       1.33   hannken 	mutex_enter(vp->v_interlock);
   1020       1.33   hannken 
   1021       1.60   hannken 	/* Make sure we hold the last reference. */
   1022       1.60   hannken 	VSTATE_WAIT_STABLE(vp);
   1023       1.33   hannken 	if (vp->v_usecount != 1) {
   1024       1.33   hannken 		mutex_exit(vp->v_interlock);
   1025       1.33   hannken 		return false;
   1026        1.1     rmind 	}
   1027       1.60   hannken 
   1028       1.60   hannken 	/* If the vnode is already clean we're done. */
   1029       1.94   hannken 	if (VSTATE_GET(vp) != VS_LOADED) {
   1030       1.60   hannken 		VSTATE_ASSERT(vp, VS_RECLAIMED);
   1031  1.105.2.2        ad 		vrelel(vp, 0, LK_NONE);
   1032       1.60   hannken 		return true;
   1033       1.60   hannken 	}
   1034       1.60   hannken 
   1035       1.60   hannken 	/* Prevent further references until the vnode is locked. */
   1036       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
   1037       1.60   hannken 	mutex_exit(vp->v_interlock);
   1038       1.60   hannken 
   1039       1.73   hannken 	/*
   1040       1.73   hannken 	 * On a leaf file system this lock will always succeed as we hold
   1041       1.73   hannken 	 * the last reference and prevent further references.
   1042       1.73   hannken 	 * On layered file systems waiting for the lock would open a can of
   1043       1.73   hannken 	 * deadlocks as the lower vnodes may have other active references.
   1044       1.73   hannken 	 */
   1045       1.76   hannken 	error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
   1046       1.60   hannken 
   1047       1.60   hannken 	mutex_enter(vp->v_interlock);
   1048       1.94   hannken 	VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
   1049       1.60   hannken 
   1050       1.73   hannken 	if (error) {
   1051       1.73   hannken 		mutex_exit(vp->v_interlock);
   1052       1.73   hannken 		return false;
   1053       1.73   hannken 	}
   1054       1.73   hannken 
   1055       1.68   hannken 	KASSERT(vp->v_usecount == 1);
   1056       1.54   hannken 	vcache_reclaim(vp);
   1057  1.105.2.2        ad 	vrelel(vp, 0, LK_NONE);
   1058       1.60   hannken 
   1059       1.33   hannken 	return true;
   1060        1.1     rmind }
   1061        1.1     rmind 
   1062        1.1     rmind /*
   1063       1.92   hannken  * Helper for vrevoke() to propagate suspension from lastmp
   1064       1.92   hannken  * to thismp.  Both args may be NULL.
   1065       1.92   hannken  * Returns the currently suspended file system or NULL.
   1066       1.92   hannken  */
   1067       1.92   hannken static struct mount *
   1068       1.92   hannken vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
   1069       1.92   hannken {
   1070       1.92   hannken 	int error;
   1071       1.92   hannken 
   1072       1.92   hannken 	if (lastmp == thismp)
   1073       1.92   hannken 		return thismp;
   1074       1.92   hannken 
   1075       1.92   hannken 	if (lastmp != NULL)
   1076       1.92   hannken 		vfs_resume(lastmp);
   1077       1.92   hannken 
   1078       1.92   hannken 	if (thismp == NULL)
   1079       1.92   hannken 		return NULL;
   1080       1.92   hannken 
   1081       1.92   hannken 	do {
   1082       1.92   hannken 		error = vfs_suspend(thismp, 0);
   1083       1.92   hannken 	} while (error == EINTR || error == ERESTART);
   1084       1.92   hannken 
   1085       1.92   hannken 	if (error == 0)
   1086       1.92   hannken 		return thismp;
   1087       1.92   hannken 
   1088       1.92   hannken 	KASSERT(error == EOPNOTSUPP);
   1089       1.92   hannken 	return NULL;
   1090       1.92   hannken }
   1091       1.92   hannken 
   1092       1.92   hannken /*
   1093        1.1     rmind  * Eliminate all activity associated with the requested vnode
   1094        1.1     rmind  * and with all vnodes aliased to the requested vnode.
   1095        1.1     rmind  */
   1096        1.1     rmind void
   1097        1.1     rmind vrevoke(vnode_t *vp)
   1098        1.1     rmind {
   1099       1.88   hannken 	struct mount *mp;
   1100       1.19   hannken 	vnode_t *vq;
   1101        1.1     rmind 	enum vtype type;
   1102        1.1     rmind 	dev_t dev;
   1103        1.1     rmind 
   1104        1.1     rmind 	KASSERT(vp->v_usecount > 0);
   1105        1.1     rmind 
   1106       1.92   hannken 	mp = vrevoke_suspend_next(NULL, vp->v_mount);
   1107       1.88   hannken 
   1108        1.9     rmind 	mutex_enter(vp->v_interlock);
   1109       1.52   hannken 	VSTATE_WAIT_STABLE(vp);
   1110       1.57   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1111        1.9     rmind 		mutex_exit(vp->v_interlock);
   1112        1.1     rmind 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1113  1.105.2.5        ad 		atomic_inc_uint(&vp->v_usecount);
   1114       1.29  christos 		mutex_exit(vp->v_interlock);
   1115       1.29  christos 		vgone(vp);
   1116        1.1     rmind 	} else {
   1117        1.1     rmind 		dev = vp->v_rdev;
   1118        1.1     rmind 		type = vp->v_type;
   1119        1.9     rmind 		mutex_exit(vp->v_interlock);
   1120        1.1     rmind 
   1121       1.88   hannken 		while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
   1122       1.92   hannken 			mp = vrevoke_suspend_next(mp, vq->v_mount);
   1123       1.88   hannken 			vgone(vq);
   1124       1.88   hannken 		}
   1125        1.1     rmind 	}
   1126       1.92   hannken 	vrevoke_suspend_next(mp, NULL);
   1127        1.1     rmind }
   1128        1.1     rmind 
   1129        1.1     rmind /*
   1130        1.1     rmind  * Eliminate all activity associated with a vnode in preparation for
   1131        1.1     rmind  * reuse.  Drops a reference from the vnode.
   1132        1.1     rmind  */
   1133        1.1     rmind void
   1134        1.1     rmind vgone(vnode_t *vp)
   1135        1.1     rmind {
   1136  1.105.2.2        ad 	int lktype;
   1137        1.1     rmind 
   1138      1.103   hannken 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1139       1.93   hannken 
   1140       1.76   hannken 	vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
   1141  1.105.2.2        ad 	lktype = LK_EXCLUSIVE;
   1142        1.9     rmind 	mutex_enter(vp->v_interlock);
   1143       1.76   hannken 	VSTATE_WAIT_STABLE(vp);
   1144  1.105.2.2        ad 	if (VSTATE_GET(vp) == VS_LOADED) {
   1145       1.76   hannken 		vcache_reclaim(vp);
   1146  1.105.2.2        ad 		lktype = LK_NONE;
   1147  1.105.2.2        ad 	}
   1148       1.76   hannken 	VSTATE_ASSERT(vp, VS_RECLAIMED);
   1149  1.105.2.2        ad 	vrelel(vp, 0, lktype);
   1150        1.1     rmind }
   1151        1.1     rmind 
   1152       1.36   hannken static inline uint32_t
   1153       1.36   hannken vcache_hash(const struct vcache_key *key)
   1154       1.36   hannken {
   1155       1.36   hannken 	uint32_t hash = HASH32_BUF_INIT;
   1156       1.36   hannken 
   1157       1.97   hannken 	KASSERT(key->vk_key_len > 0);
   1158       1.97   hannken 
   1159       1.36   hannken 	hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
   1160       1.36   hannken 	hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
   1161       1.36   hannken 	return hash;
   1162       1.36   hannken }
   1163       1.36   hannken 
   1164       1.36   hannken static void
   1165       1.36   hannken vcache_init(void)
   1166       1.36   hannken {
   1167       1.36   hannken 
   1168  1.105.2.5        ad 	vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
   1169  1.105.2.5        ad 	    0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
   1170       1.69   hannken 	KASSERT(vcache_pool != NULL);
   1171       1.69   hannken 	mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
   1172       1.69   hannken 	cv_init(&vcache_cv, "vcache");
   1173       1.69   hannken 	vcache_hashsize = desiredvnodes;
   1174       1.69   hannken 	vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
   1175       1.69   hannken 	    &vcache_hashmask);
   1176       1.36   hannken }
   1177       1.36   hannken 
   1178       1.36   hannken static void
   1179       1.36   hannken vcache_reinit(void)
   1180       1.36   hannken {
   1181       1.36   hannken 	int i;
   1182       1.36   hannken 	uint32_t hash;
   1183       1.36   hannken 	u_long oldmask, newmask;
   1184       1.36   hannken 	struct hashhead *oldtab, *newtab;
   1185       1.70   hannken 	vnode_impl_t *vip;
   1186       1.36   hannken 
   1187       1.36   hannken 	newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
   1188       1.69   hannken 	mutex_enter(&vcache_lock);
   1189       1.69   hannken 	oldtab = vcache_hashtab;
   1190       1.69   hannken 	oldmask = vcache_hashmask;
   1191       1.69   hannken 	vcache_hashsize = desiredvnodes;
   1192       1.69   hannken 	vcache_hashtab = newtab;
   1193       1.69   hannken 	vcache_hashmask = newmask;
   1194       1.36   hannken 	for (i = 0; i <= oldmask; i++) {
   1195       1.70   hannken 		while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
   1196       1.70   hannken 			SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
   1197       1.70   hannken 			hash = vcache_hash(&vip->vi_key);
   1198       1.69   hannken 			SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
   1199       1.70   hannken 			    vip, vi_hash);
   1200       1.36   hannken 		}
   1201       1.36   hannken 	}
   1202       1.69   hannken 	mutex_exit(&vcache_lock);
   1203       1.36   hannken 	hashdone(oldtab, HASH_SLIST, oldmask);
   1204       1.36   hannken }
   1205       1.36   hannken 
   1206       1.57   hannken static inline vnode_impl_t *
   1207       1.36   hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
   1208       1.36   hannken {
   1209       1.36   hannken 	struct hashhead *hashp;
   1210       1.70   hannken 	vnode_impl_t *vip;
   1211       1.36   hannken 
   1212       1.69   hannken 	KASSERT(mutex_owned(&vcache_lock));
   1213       1.36   hannken 
   1214       1.69   hannken 	hashp = &vcache_hashtab[hash & vcache_hashmask];
   1215       1.70   hannken 	SLIST_FOREACH(vip, hashp, vi_hash) {
   1216       1.70   hannken 		if (key->vk_mount != vip->vi_key.vk_mount)
   1217       1.36   hannken 			continue;
   1218       1.70   hannken 		if (key->vk_key_len != vip->vi_key.vk_key_len)
   1219       1.36   hannken 			continue;
   1220       1.70   hannken 		if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
   1221       1.36   hannken 			continue;
   1222       1.70   hannken 		return vip;
   1223       1.36   hannken 	}
   1224       1.36   hannken 	return NULL;
   1225       1.36   hannken }
   1226       1.36   hannken 
   1227       1.36   hannken /*
   1228       1.50   hannken  * Allocate a new, uninitialized vcache node.
   1229       1.50   hannken  */
   1230       1.57   hannken static vnode_impl_t *
   1231       1.50   hannken vcache_alloc(void)
   1232       1.50   hannken {
   1233       1.70   hannken 	vnode_impl_t *vip;
   1234       1.50   hannken 	vnode_t *vp;
   1235       1.50   hannken 
   1236       1.70   hannken 	vip = pool_cache_get(vcache_pool, PR_WAITOK);
   1237       1.70   hannken 	memset(vip, 0, sizeof(*vip));
   1238       1.50   hannken 
   1239  1.105.2.5        ad 	rw_init(&vip->vi_lock);
   1240       1.50   hannken 
   1241       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
   1242       1.50   hannken 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
   1243       1.50   hannken 	cv_init(&vp->v_cv, "vnode");
   1244  1.105.2.1        ad 	cache_vnode_init(vp);
   1245       1.50   hannken 
   1246       1.50   hannken 	vp->v_usecount = 1;
   1247       1.50   hannken 	vp->v_type = VNON;
   1248       1.50   hannken 	vp->v_size = vp->v_writesize = VSIZENOTSET;
   1249       1.50   hannken 
   1250       1.70   hannken 	vip->vi_state = VS_LOADING;
   1251       1.51   hannken 
   1252      1.104        ad 	lru_requeue(vp, &lru_list[LRU_FREE]);
   1253       1.63   hannken 
   1254       1.70   hannken 	return vip;
   1255       1.50   hannken }
   1256       1.50   hannken 
   1257       1.50   hannken /*
   1258       1.79   hannken  * Deallocate a vcache node in state VS_LOADING.
   1259       1.79   hannken  *
   1260       1.79   hannken  * vcache_lock held on entry and released on return.
   1261       1.79   hannken  */
   1262       1.79   hannken static void
   1263       1.79   hannken vcache_dealloc(vnode_impl_t *vip)
   1264       1.79   hannken {
   1265       1.79   hannken 	vnode_t *vp;
   1266       1.79   hannken 
   1267       1.79   hannken 	KASSERT(mutex_owned(&vcache_lock));
   1268       1.79   hannken 
   1269       1.79   hannken 	vp = VIMPL_TO_VNODE(vip);
   1270      1.102   hannken 	vfs_ref(dead_rootmount);
   1271      1.102   hannken 	vfs_insmntque(vp, dead_rootmount);
   1272       1.79   hannken 	mutex_enter(vp->v_interlock);
   1273       1.79   hannken 	vp->v_op = dead_vnodeop_p;
   1274       1.79   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
   1275       1.79   hannken 	mutex_exit(&vcache_lock);
   1276  1.105.2.2        ad 	vrelel(vp, 0, LK_NONE);
   1277       1.79   hannken }
   1278       1.79   hannken 
   1279       1.79   hannken /*
   1280       1.50   hannken  * Free an unused, unreferenced vcache node.
   1281       1.67   hannken  * v_interlock locked on entry.
   1282       1.50   hannken  */
   1283       1.50   hannken static void
   1284       1.70   hannken vcache_free(vnode_impl_t *vip)
   1285       1.50   hannken {
   1286       1.50   hannken 	vnode_t *vp;
   1287       1.50   hannken 
   1288       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
   1289       1.67   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1290       1.50   hannken 
   1291       1.50   hannken 	KASSERT(vp->v_usecount == 0);
   1292       1.67   hannken 	KASSERT(vp->v_holdcnt == 0);
   1293       1.67   hannken 	KASSERT(vp->v_writecount == 0);
   1294       1.67   hannken 	lru_requeue(vp, NULL);
   1295       1.67   hannken 	mutex_exit(vp->v_interlock);
   1296       1.67   hannken 
   1297       1.67   hannken 	vfs_insmntque(vp, NULL);
   1298       1.67   hannken 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1299       1.67   hannken 		spec_node_destroy(vp);
   1300       1.50   hannken 
   1301  1.105.2.5        ad 	rw_destroy(&vip->vi_lock);
   1302       1.50   hannken 	uvm_obj_destroy(&vp->v_uobj, true);
   1303       1.50   hannken 	cv_destroy(&vp->v_cv);
   1304  1.105.2.1        ad 	cache_vnode_fini(vp);
   1305       1.70   hannken 	pool_cache_put(vcache_pool, vip);
   1306       1.50   hannken }
   1307       1.50   hannken 
   1308       1.50   hannken /*
   1309       1.66   hannken  * Try to get an initial reference on this cached vnode.
   1310       1.66   hannken  * Returns zero on success,  ENOENT if the vnode has been reclaimed and
   1311       1.66   hannken  * EBUSY if the vnode state is unstable.
   1312       1.66   hannken  *
   1313       1.66   hannken  * v_interlock locked on entry and unlocked on exit.
   1314       1.66   hannken  */
   1315       1.66   hannken int
   1316       1.66   hannken vcache_tryvget(vnode_t *vp)
   1317       1.66   hannken {
   1318       1.67   hannken 	int error = 0;
   1319       1.66   hannken 
   1320       1.66   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1321       1.66   hannken 
   1322       1.67   hannken 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
   1323       1.67   hannken 		error = ENOENT;
   1324       1.94   hannken 	else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
   1325       1.67   hannken 		error = EBUSY;
   1326  1.105.2.5        ad 	else if (vp->v_usecount == 0)
   1327  1.105.2.5        ad 		vp->v_usecount = 1;
   1328       1.67   hannken 	else
   1329  1.105.2.5        ad 		atomic_inc_uint(&vp->v_usecount);
   1330       1.66   hannken 
   1331       1.66   hannken 	mutex_exit(vp->v_interlock);
   1332       1.66   hannken 
   1333       1.67   hannken 	return error;
   1334       1.66   hannken }
   1335       1.66   hannken 
   1336       1.66   hannken /*
   1337       1.66   hannken  * Try to get an initial reference on this cached vnode.
   1338       1.66   hannken  * Returns zero on success and  ENOENT if the vnode has been reclaimed.
   1339       1.66   hannken  * Will wait for the vnode state to be stable.
   1340       1.66   hannken  *
   1341       1.66   hannken  * v_interlock locked on entry and unlocked on exit.
   1342       1.66   hannken  */
   1343       1.66   hannken int
   1344       1.66   hannken vcache_vget(vnode_t *vp)
   1345       1.66   hannken {
   1346       1.66   hannken 
   1347       1.66   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1348       1.66   hannken 
   1349       1.67   hannken 	/* Increment hold count to prevent vnode from disappearing. */
   1350       1.67   hannken 	vp->v_holdcnt++;
   1351       1.67   hannken 	VSTATE_WAIT_STABLE(vp);
   1352       1.67   hannken 	vp->v_holdcnt--;
   1353       1.66   hannken 
   1354       1.67   hannken 	/* If this was the last reference to a reclaimed vnode free it now. */
   1355       1.67   hannken 	if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
   1356       1.67   hannken 		if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
   1357       1.67   hannken 			vcache_free(VNODE_TO_VIMPL(vp));
   1358       1.67   hannken 		else
   1359       1.67   hannken 			mutex_exit(vp->v_interlock);
   1360       1.66   hannken 		return ENOENT;
   1361       1.66   hannken 	}
   1362       1.94   hannken 	VSTATE_ASSERT(vp, VS_LOADED);
   1363  1.105.2.5        ad 	if (vp->v_usecount == 0)
   1364  1.105.2.5        ad 		vp->v_usecount = 1;
   1365  1.105.2.5        ad 	else
   1366  1.105.2.5        ad 		atomic_inc_uint(&vp->v_usecount);
   1367       1.66   hannken 	mutex_exit(vp->v_interlock);
   1368       1.66   hannken 
   1369       1.66   hannken 	return 0;
   1370       1.66   hannken }
   1371       1.66   hannken 
   1372       1.66   hannken /*
   1373       1.36   hannken  * Get a vnode / fs node pair by key and return it referenced through vpp.
   1374       1.36   hannken  */
   1375       1.36   hannken int
   1376       1.36   hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
   1377       1.36   hannken     struct vnode **vpp)
   1378       1.36   hannken {
   1379       1.36   hannken 	int error;
   1380       1.36   hannken 	uint32_t hash;
   1381       1.36   hannken 	const void *new_key;
   1382       1.36   hannken 	struct vnode *vp;
   1383       1.36   hannken 	struct vcache_key vcache_key;
   1384       1.70   hannken 	vnode_impl_t *vip, *new_vip;
   1385       1.36   hannken 
   1386       1.36   hannken 	new_key = NULL;
   1387       1.36   hannken 	*vpp = NULL;
   1388       1.36   hannken 
   1389       1.36   hannken 	vcache_key.vk_mount = mp;
   1390       1.36   hannken 	vcache_key.vk_key = key;
   1391       1.36   hannken 	vcache_key.vk_key_len = key_len;
   1392       1.36   hannken 	hash = vcache_hash(&vcache_key);
   1393       1.36   hannken 
   1394       1.36   hannken again:
   1395       1.69   hannken 	mutex_enter(&vcache_lock);
   1396       1.70   hannken 	vip = vcache_hash_lookup(&vcache_key, hash);
   1397       1.36   hannken 
   1398       1.36   hannken 	/* If found, take a reference or retry. */
   1399       1.70   hannken 	if (__predict_true(vip != NULL)) {
   1400       1.52   hannken 		/*
   1401       1.52   hannken 		 * If the vnode is loading we cannot take the v_interlock
   1402       1.52   hannken 		 * here as it might change during load (see uvm_obj_setlock()).
   1403       1.69   hannken 		 * As changing state from VS_LOADING requires both vcache_lock
   1404       1.69   hannken 		 * and v_interlock it is safe to test with vcache_lock held.
   1405       1.52   hannken 		 *
   1406       1.57   hannken 		 * Wait for vnodes changing state from VS_LOADING and retry.
   1407       1.52   hannken 		 */
   1408       1.70   hannken 		if (__predict_false(vip->vi_state == VS_LOADING)) {
   1409       1.69   hannken 			cv_wait(&vcache_cv, &vcache_lock);
   1410       1.69   hannken 			mutex_exit(&vcache_lock);
   1411       1.52   hannken 			goto again;
   1412       1.52   hannken 		}
   1413       1.70   hannken 		vp = VIMPL_TO_VNODE(vip);
   1414       1.36   hannken 		mutex_enter(vp->v_interlock);
   1415       1.69   hannken 		mutex_exit(&vcache_lock);
   1416       1.66   hannken 		error = vcache_vget(vp);
   1417       1.36   hannken 		if (error == ENOENT)
   1418       1.36   hannken 			goto again;
   1419       1.36   hannken 		if (error == 0)
   1420       1.36   hannken 			*vpp = vp;
   1421       1.36   hannken 		KASSERT((error != 0) == (*vpp == NULL));
   1422       1.36   hannken 		return error;
   1423       1.36   hannken 	}
   1424       1.69   hannken 	mutex_exit(&vcache_lock);
   1425       1.36   hannken 
   1426       1.36   hannken 	/* Allocate and initialize a new vcache / vnode pair. */
   1427       1.87   hannken 	error = vfs_busy(mp);
   1428       1.36   hannken 	if (error)
   1429       1.36   hannken 		return error;
   1430       1.70   hannken 	new_vip = vcache_alloc();
   1431       1.70   hannken 	new_vip->vi_key = vcache_key;
   1432       1.70   hannken 	vp = VIMPL_TO_VNODE(new_vip);
   1433       1.69   hannken 	mutex_enter(&vcache_lock);
   1434       1.70   hannken 	vip = vcache_hash_lookup(&vcache_key, hash);
   1435       1.70   hannken 	if (vip == NULL) {
   1436       1.69   hannken 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1437       1.70   hannken 		    new_vip, vi_hash);
   1438       1.70   hannken 		vip = new_vip;
   1439       1.36   hannken 	}
   1440       1.36   hannken 
   1441       1.36   hannken 	/* If another thread beat us inserting this node, retry. */
   1442       1.70   hannken 	if (vip != new_vip) {
   1443       1.79   hannken 		vcache_dealloc(new_vip);
   1444       1.87   hannken 		vfs_unbusy(mp);
   1445       1.36   hannken 		goto again;
   1446       1.36   hannken 	}
   1447       1.69   hannken 	mutex_exit(&vcache_lock);
   1448       1.36   hannken 
   1449       1.57   hannken 	/* Load the fs node.  Exclusive as new_node is VS_LOADING. */
   1450       1.36   hannken 	error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
   1451       1.36   hannken 	if (error) {
   1452       1.69   hannken 		mutex_enter(&vcache_lock);
   1453       1.69   hannken 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1454       1.70   hannken 		    new_vip, vnode_impl, vi_hash);
   1455       1.79   hannken 		vcache_dealloc(new_vip);
   1456       1.87   hannken 		vfs_unbusy(mp);
   1457       1.36   hannken 		KASSERT(*vpp == NULL);
   1458       1.36   hannken 		return error;
   1459       1.36   hannken 	}
   1460       1.36   hannken 	KASSERT(new_key != NULL);
   1461       1.36   hannken 	KASSERT(memcmp(key, new_key, key_len) == 0);
   1462       1.36   hannken 	KASSERT(vp->v_op != NULL);
   1463       1.36   hannken 	vfs_insmntque(vp, mp);
   1464       1.36   hannken 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1465       1.36   hannken 		vp->v_vflag |= VV_MPSAFE;
   1466       1.87   hannken 	vfs_ref(mp);
   1467       1.87   hannken 	vfs_unbusy(mp);
   1468       1.36   hannken 
   1469       1.36   hannken 	/* Finished loading, finalize node. */
   1470       1.69   hannken 	mutex_enter(&vcache_lock);
   1471       1.70   hannken 	new_vip->vi_key.vk_key = new_key;
   1472       1.39   hannken 	mutex_enter(vp->v_interlock);
   1473       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1474       1.39   hannken 	mutex_exit(vp->v_interlock);
   1475       1.69   hannken 	mutex_exit(&vcache_lock);
   1476       1.36   hannken 	*vpp = vp;
   1477       1.36   hannken 	return 0;
   1478       1.36   hannken }
   1479       1.36   hannken 
   1480       1.36   hannken /*
   1481       1.40   hannken  * Create a new vnode / fs node pair and return it referenced through vpp.
   1482       1.40   hannken  */
   1483       1.40   hannken int
   1484       1.40   hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
   1485      1.101   hannken     kauth_cred_t cred, void *extra, struct vnode **vpp)
   1486       1.40   hannken {
   1487       1.40   hannken 	int error;
   1488       1.40   hannken 	uint32_t hash;
   1489       1.70   hannken 	struct vnode *vp, *ovp;
   1490       1.70   hannken 	vnode_impl_t *vip, *ovip;
   1491       1.40   hannken 
   1492       1.40   hannken 	*vpp = NULL;
   1493       1.40   hannken 
   1494       1.40   hannken 	/* Allocate and initialize a new vcache / vnode pair. */
   1495       1.87   hannken 	error = vfs_busy(mp);
   1496       1.40   hannken 	if (error)
   1497       1.40   hannken 		return error;
   1498       1.70   hannken 	vip = vcache_alloc();
   1499       1.70   hannken 	vip->vi_key.vk_mount = mp;
   1500       1.70   hannken 	vp = VIMPL_TO_VNODE(vip);
   1501       1.40   hannken 
   1502       1.40   hannken 	/* Create and load the fs node. */
   1503      1.101   hannken 	error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
   1504       1.70   hannken 	    &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
   1505       1.40   hannken 	if (error) {
   1506       1.69   hannken 		mutex_enter(&vcache_lock);
   1507       1.79   hannken 		vcache_dealloc(vip);
   1508       1.87   hannken 		vfs_unbusy(mp);
   1509       1.40   hannken 		KASSERT(*vpp == NULL);
   1510       1.40   hannken 		return error;
   1511       1.40   hannken 	}
   1512       1.40   hannken 	KASSERT(vp->v_op != NULL);
   1513       1.97   hannken 	KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
   1514       1.97   hannken 	if (vip->vi_key.vk_key_len > 0) {
   1515       1.97   hannken 		KASSERT(vip->vi_key.vk_key != NULL);
   1516       1.97   hannken 		hash = vcache_hash(&vip->vi_key);
   1517       1.40   hannken 
   1518       1.97   hannken 		/*
   1519       1.97   hannken 		 * Wait for previous instance to be reclaimed,
   1520       1.97   hannken 		 * then insert new node.
   1521       1.97   hannken 		 */
   1522       1.97   hannken 		mutex_enter(&vcache_lock);
   1523       1.97   hannken 		while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
   1524       1.97   hannken 			ovp = VIMPL_TO_VNODE(ovip);
   1525       1.97   hannken 			mutex_enter(ovp->v_interlock);
   1526       1.97   hannken 			mutex_exit(&vcache_lock);
   1527       1.97   hannken 			error = vcache_vget(ovp);
   1528       1.97   hannken 			KASSERT(error == ENOENT);
   1529       1.97   hannken 			mutex_enter(&vcache_lock);
   1530       1.97   hannken 		}
   1531       1.97   hannken 		SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
   1532       1.97   hannken 		    vip, vi_hash);
   1533       1.69   hannken 		mutex_exit(&vcache_lock);
   1534       1.40   hannken 	}
   1535       1.40   hannken 	vfs_insmntque(vp, mp);
   1536       1.40   hannken 	if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
   1537       1.40   hannken 		vp->v_vflag |= VV_MPSAFE;
   1538       1.87   hannken 	vfs_ref(mp);
   1539       1.87   hannken 	vfs_unbusy(mp);
   1540       1.40   hannken 
   1541       1.40   hannken 	/* Finished loading, finalize node. */
   1542       1.69   hannken 	mutex_enter(&vcache_lock);
   1543       1.52   hannken 	mutex_enter(vp->v_interlock);
   1544       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
   1545       1.69   hannken 	mutex_exit(&vcache_lock);
   1546       1.40   hannken 	mutex_exit(vp->v_interlock);
   1547       1.40   hannken 	*vpp = vp;
   1548       1.40   hannken 	return 0;
   1549       1.40   hannken }
   1550       1.40   hannken 
   1551       1.40   hannken /*
   1552       1.65   hannken  * Prepare key change: update old cache nodes key and lock new cache node.
   1553       1.37   hannken  * Return an error if the new node already exists.
   1554       1.37   hannken  */
   1555       1.37   hannken int
   1556       1.37   hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
   1557       1.37   hannken     const void *old_key, size_t old_key_len,
   1558       1.37   hannken     const void *new_key, size_t new_key_len)
   1559       1.37   hannken {
   1560       1.37   hannken 	uint32_t old_hash, new_hash;
   1561       1.37   hannken 	struct vcache_key old_vcache_key, new_vcache_key;
   1562       1.70   hannken 	vnode_impl_t *vip, *new_vip;
   1563       1.37   hannken 
   1564       1.37   hannken 	old_vcache_key.vk_mount = mp;
   1565       1.37   hannken 	old_vcache_key.vk_key = old_key;
   1566       1.37   hannken 	old_vcache_key.vk_key_len = old_key_len;
   1567       1.37   hannken 	old_hash = vcache_hash(&old_vcache_key);
   1568       1.37   hannken 
   1569       1.37   hannken 	new_vcache_key.vk_mount = mp;
   1570       1.37   hannken 	new_vcache_key.vk_key = new_key;
   1571       1.37   hannken 	new_vcache_key.vk_key_len = new_key_len;
   1572       1.37   hannken 	new_hash = vcache_hash(&new_vcache_key);
   1573       1.37   hannken 
   1574       1.70   hannken 	new_vip = vcache_alloc();
   1575       1.70   hannken 	new_vip->vi_key = new_vcache_key;
   1576       1.37   hannken 
   1577       1.52   hannken 	/* Insert locked new node used as placeholder. */
   1578       1.69   hannken 	mutex_enter(&vcache_lock);
   1579       1.70   hannken 	vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1580       1.70   hannken 	if (vip != NULL) {
   1581       1.79   hannken 		vcache_dealloc(new_vip);
   1582       1.37   hannken 		return EEXIST;
   1583       1.37   hannken 	}
   1584       1.69   hannken 	SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1585       1.70   hannken 	    new_vip, vi_hash);
   1586       1.49   hannken 
   1587       1.65   hannken 	/* Replace old nodes key with the temporary copy. */
   1588       1.70   hannken 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1589       1.70   hannken 	KASSERT(vip != NULL);
   1590       1.70   hannken 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1591       1.70   hannken 	KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
   1592       1.70   hannken 	vip->vi_key = old_vcache_key;
   1593       1.69   hannken 	mutex_exit(&vcache_lock);
   1594       1.37   hannken 	return 0;
   1595       1.37   hannken }
   1596       1.37   hannken 
   1597       1.37   hannken /*
   1598       1.65   hannken  * Key change complete: update old node and remove placeholder.
   1599       1.37   hannken  */
   1600       1.37   hannken void
   1601       1.37   hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
   1602       1.37   hannken     const void *old_key, size_t old_key_len,
   1603       1.37   hannken     const void *new_key, size_t new_key_len)
   1604       1.37   hannken {
   1605       1.37   hannken 	uint32_t old_hash, new_hash;
   1606       1.37   hannken 	struct vcache_key old_vcache_key, new_vcache_key;
   1607       1.70   hannken 	vnode_impl_t *vip, *new_vip;
   1608       1.70   hannken 	struct vnode *new_vp;
   1609       1.37   hannken 
   1610       1.37   hannken 	old_vcache_key.vk_mount = mp;
   1611       1.37   hannken 	old_vcache_key.vk_key = old_key;
   1612       1.37   hannken 	old_vcache_key.vk_key_len = old_key_len;
   1613       1.37   hannken 	old_hash = vcache_hash(&old_vcache_key);
   1614       1.37   hannken 
   1615       1.37   hannken 	new_vcache_key.vk_mount = mp;
   1616       1.37   hannken 	new_vcache_key.vk_key = new_key;
   1617       1.37   hannken 	new_vcache_key.vk_key_len = new_key_len;
   1618       1.37   hannken 	new_hash = vcache_hash(&new_vcache_key);
   1619       1.37   hannken 
   1620       1.69   hannken 	mutex_enter(&vcache_lock);
   1621       1.49   hannken 
   1622       1.49   hannken 	/* Lookup old and new node. */
   1623       1.70   hannken 	vip = vcache_hash_lookup(&old_vcache_key, old_hash);
   1624       1.70   hannken 	KASSERT(vip != NULL);
   1625       1.70   hannken 	KASSERT(VIMPL_TO_VNODE(vip) == vp);
   1626       1.70   hannken 
   1627       1.70   hannken 	new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
   1628       1.70   hannken 	KASSERT(new_vip != NULL);
   1629       1.70   hannken 	KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
   1630       1.70   hannken 	new_vp = VIMPL_TO_VNODE(new_vip);
   1631       1.70   hannken 	mutex_enter(new_vp->v_interlock);
   1632       1.70   hannken 	VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
   1633       1.79   hannken 	mutex_exit(new_vp->v_interlock);
   1634       1.49   hannken 
   1635       1.49   hannken 	/* Rekey old node and put it onto its new hashlist. */
   1636       1.70   hannken 	vip->vi_key = new_vcache_key;
   1637       1.49   hannken 	if (old_hash != new_hash) {
   1638       1.69   hannken 		SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
   1639       1.70   hannken 		    vip, vnode_impl, vi_hash);
   1640       1.69   hannken 		SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
   1641       1.70   hannken 		    vip, vi_hash);
   1642       1.49   hannken 	}
   1643       1.49   hannken 
   1644       1.49   hannken 	/* Remove new node used as placeholder. */
   1645       1.69   hannken 	SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
   1646       1.70   hannken 	    new_vip, vnode_impl, vi_hash);
   1647       1.79   hannken 	vcache_dealloc(new_vip);
   1648       1.37   hannken }
   1649       1.37   hannken 
   1650       1.37   hannken /*
   1651       1.54   hannken  * Disassociate the underlying file system from a vnode.
   1652       1.54   hannken  *
   1653       1.54   hannken  * Must be called with vnode locked and will return unlocked.
   1654       1.54   hannken  * Must be called with the interlock held, and will return with it held.
   1655       1.54   hannken  */
   1656       1.54   hannken static void
   1657       1.54   hannken vcache_reclaim(vnode_t *vp)
   1658       1.54   hannken {
   1659       1.54   hannken 	lwp_t *l = curlwp;
   1660       1.70   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1661       1.74   hannken 	struct mount *mp = vp->v_mount;
   1662       1.55   hannken 	uint32_t hash;
   1663       1.55   hannken 	uint8_t temp_buf[64], *temp_key;
   1664       1.55   hannken 	size_t temp_key_len;
   1665       1.54   hannken 	bool recycle, active;
   1666       1.54   hannken 	int error;
   1667       1.54   hannken 
   1668       1.54   hannken 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1669       1.54   hannken 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1670       1.54   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1671       1.54   hannken 	KASSERT(vp->v_usecount != 0);
   1672       1.54   hannken 
   1673       1.54   hannken 	active = (vp->v_usecount > 1);
   1674       1.70   hannken 	temp_key_len = vip->vi_key.vk_key_len;
   1675       1.54   hannken 	/*
   1676       1.54   hannken 	 * Prevent the vnode from being recycled or brought into use
   1677       1.54   hannken 	 * while we clean it out.
   1678       1.54   hannken 	 */
   1679       1.94   hannken 	VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
   1680      1.104        ad 	if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
   1681      1.105        ad 		cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
   1682      1.105        ad 		cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
   1683       1.54   hannken 	}
   1684       1.54   hannken 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
   1685       1.54   hannken 	mutex_exit(vp->v_interlock);
   1686       1.54   hannken 
   1687  1.105.2.4        ad 	/*
   1688  1.105.2.4        ad 	 * With vnode state set to reclaiming, purge name cache immediately
   1689  1.105.2.4        ad 	 * to prevent new handles on vnode, and wait for existing threads
   1690  1.105.2.4        ad 	 * trying to get a handle to notice VS_RECLAIMED status and abort.
   1691  1.105.2.4        ad 	 */
   1692  1.105.2.4        ad 	cache_purge(vp);
   1693  1.105.2.4        ad 
   1694       1.55   hannken 	/* Replace the vnode key with a temporary copy. */
   1695       1.70   hannken 	if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
   1696       1.55   hannken 		temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
   1697       1.55   hannken 	} else {
   1698       1.55   hannken 		temp_key = temp_buf;
   1699       1.55   hannken 	}
   1700       1.97   hannken 	if (vip->vi_key.vk_key_len > 0) {
   1701       1.97   hannken 		mutex_enter(&vcache_lock);
   1702       1.97   hannken 		memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
   1703       1.97   hannken 		vip->vi_key.vk_key = temp_key;
   1704       1.97   hannken 		mutex_exit(&vcache_lock);
   1705       1.97   hannken 	}
   1706       1.55   hannken 
   1707       1.96   hannken 	fstrans_start(mp);
   1708       1.74   hannken 
   1709       1.54   hannken 	/*
   1710       1.54   hannken 	 * Clean out any cached data associated with the vnode.
   1711       1.54   hannken 	 * If purging an active vnode, it must be closed and
   1712       1.60   hannken 	 * deactivated before being reclaimed.
   1713       1.54   hannken 	 */
   1714       1.54   hannken 	error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
   1715       1.54   hannken 	if (error != 0) {
   1716       1.54   hannken 		if (wapbl_vphaswapbl(vp))
   1717       1.54   hannken 			WAPBL_DISCARD(wapbl_vptomp(vp));
   1718       1.54   hannken 		error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
   1719       1.54   hannken 	}
   1720       1.54   hannken 	KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
   1721       1.54   hannken 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1722       1.54   hannken 	if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1723       1.54   hannken 		 spec_node_revoke(vp);
   1724       1.54   hannken 	}
   1725       1.54   hannken 
   1726       1.60   hannken 	/*
   1727       1.60   hannken 	 * Disassociate the underlying file system from the vnode.
   1728       1.90  riastrad 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1729       1.90  riastrad 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1730       1.90  riastrad 	 * would no longer function.
   1731       1.60   hannken 	 */
   1732       1.60   hannken 	VOP_INACTIVE(vp, &recycle);
   1733       1.91  riastrad 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1734       1.91  riastrad 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1735       1.54   hannken 	if (VOP_RECLAIM(vp)) {
   1736       1.54   hannken 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1737       1.54   hannken 	}
   1738       1.54   hannken 
   1739       1.54   hannken 	KASSERT(vp->v_data == NULL);
   1740       1.54   hannken 	KASSERT(vp->v_uobj.uo_npages == 0);
   1741       1.54   hannken 
   1742       1.54   hannken 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1743       1.54   hannken 		uvm_ra_freectx(vp->v_ractx);
   1744       1.54   hannken 		vp->v_ractx = NULL;
   1745       1.54   hannken 	}
   1746       1.54   hannken 
   1747       1.97   hannken 	if (vip->vi_key.vk_key_len > 0) {
   1748       1.55   hannken 	/* Remove from vnode cache. */
   1749       1.97   hannken 		hash = vcache_hash(&vip->vi_key);
   1750       1.97   hannken 		mutex_enter(&vcache_lock);
   1751       1.97   hannken 		KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1752       1.97   hannken 		SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1753       1.97   hannken 		    vip, vnode_impl, vi_hash);
   1754       1.97   hannken 		mutex_exit(&vcache_lock);
   1755       1.97   hannken 	}
   1756       1.55   hannken 	if (temp_key != temp_buf)
   1757       1.55   hannken 		kmem_free(temp_key, temp_key_len);
   1758       1.55   hannken 
   1759       1.54   hannken 	/* Done with purge, notify sleepers of the grim news. */
   1760       1.54   hannken 	mutex_enter(vp->v_interlock);
   1761       1.54   hannken 	vp->v_op = dead_vnodeop_p;
   1762       1.54   hannken 	vp->v_vflag |= VV_LOCKSWORK;
   1763       1.57   hannken 	VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
   1764       1.54   hannken 	vp->v_tag = VT_NON;
   1765       1.54   hannken 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1766       1.80   hannken 	mutex_exit(vp->v_interlock);
   1767       1.54   hannken 
   1768       1.80   hannken 	/*
   1769       1.80   hannken 	 * Move to dead mount.  Must be after changing the operations
   1770       1.80   hannken 	 * vector as vnode operations enter the mount before using the
   1771       1.80   hannken 	 * operations vector.  See sys/kern/vnode_if.c.
   1772       1.80   hannken 	 */
   1773       1.80   hannken 	vp->v_vflag &= ~VV_ROOT;
   1774       1.86   hannken 	vfs_ref(dead_rootmount);
   1775       1.80   hannken 	vfs_insmntque(vp, dead_rootmount);
   1776       1.80   hannken 
   1777  1.105.2.7        ad #ifdef PAX_SEGVGUARD
   1778  1.105.2.7        ad 	pax_segvguard_cleanup(vp);
   1779  1.105.2.7        ad #endif /* PAX_SEGVGUARD */
   1780  1.105.2.7        ad 
   1781       1.80   hannken 	mutex_enter(vp->v_interlock);
   1782       1.74   hannken 	fstrans_done(mp);
   1783       1.54   hannken 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1784       1.54   hannken }
   1785       1.54   hannken 
   1786       1.54   hannken /*
   1787       1.98   hannken  * Disassociate the underlying file system from an open device vnode
   1788       1.98   hannken  * and make it anonymous.
   1789       1.98   hannken  *
   1790       1.98   hannken  * Vnode unlocked on entry, drops a reference to the vnode.
   1791       1.98   hannken  */
   1792       1.98   hannken void
   1793       1.98   hannken vcache_make_anon(vnode_t *vp)
   1794       1.98   hannken {
   1795       1.98   hannken 	vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
   1796       1.98   hannken 	uint32_t hash;
   1797       1.98   hannken 	bool recycle;
   1798       1.98   hannken 
   1799       1.98   hannken 	KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
   1800      1.103   hannken 	KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
   1801       1.98   hannken 	VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
   1802       1.98   hannken 
   1803       1.98   hannken 	/* Remove from vnode cache. */
   1804       1.98   hannken 	hash = vcache_hash(&vip->vi_key);
   1805       1.98   hannken 	mutex_enter(&vcache_lock);
   1806       1.98   hannken 	KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
   1807       1.98   hannken 	SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
   1808       1.98   hannken 	    vip, vnode_impl, vi_hash);
   1809       1.98   hannken 	vip->vi_key.vk_mount = dead_rootmount;
   1810       1.98   hannken 	vip->vi_key.vk_key_len = 0;
   1811       1.98   hannken 	vip->vi_key.vk_key = NULL;
   1812       1.98   hannken 	mutex_exit(&vcache_lock);
   1813       1.98   hannken 
   1814       1.98   hannken 	/*
   1815       1.98   hannken 	 * Disassociate the underlying file system from the vnode.
   1816       1.98   hannken 	 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
   1817       1.98   hannken 	 * the vnode, and may destroy the vnode so that VOP_UNLOCK
   1818       1.98   hannken 	 * would no longer function.
   1819       1.98   hannken 	 */
   1820       1.98   hannken 	if (vn_lock(vp, LK_EXCLUSIVE)) {
   1821       1.98   hannken 		vnpanic(vp, "%s: cannot lock", __func__);
   1822       1.98   hannken 	}
   1823       1.98   hannken 	VOP_INACTIVE(vp, &recycle);
   1824       1.98   hannken 	KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
   1825       1.98   hannken 	    VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
   1826       1.98   hannken 	if (VOP_RECLAIM(vp)) {
   1827       1.98   hannken 		vnpanic(vp, "%s: cannot reclaim", __func__);
   1828       1.98   hannken 	}
   1829       1.98   hannken 
   1830       1.98   hannken 	/* Purge name cache. */
   1831       1.98   hannken 	cache_purge(vp);
   1832       1.98   hannken 
   1833       1.98   hannken 	/* Done with purge, change operations vector. */
   1834       1.98   hannken 	mutex_enter(vp->v_interlock);
   1835       1.98   hannken 	vp->v_op = spec_vnodeop_p;
   1836       1.98   hannken 	vp->v_vflag |= VV_MPSAFE;
   1837       1.98   hannken 	vp->v_vflag &= ~VV_LOCKSWORK;
   1838       1.98   hannken 	mutex_exit(vp->v_interlock);
   1839       1.98   hannken 
   1840       1.98   hannken 	/*
   1841       1.98   hannken 	 * Move to dead mount.  Must be after changing the operations
   1842       1.98   hannken 	 * vector as vnode operations enter the mount before using the
   1843       1.98   hannken 	 * operations vector.  See sys/kern/vnode_if.c.
   1844       1.98   hannken 	 */
   1845       1.98   hannken 	vfs_ref(dead_rootmount);
   1846       1.98   hannken 	vfs_insmntque(vp, dead_rootmount);
   1847       1.98   hannken 
   1848       1.98   hannken 	vrele(vp);
   1849       1.98   hannken }
   1850       1.98   hannken 
   1851       1.98   hannken /*
   1852        1.1     rmind  * Update outstanding I/O count and do wakeup if requested.
   1853        1.1     rmind  */
   1854        1.1     rmind void
   1855        1.1     rmind vwakeup(struct buf *bp)
   1856        1.1     rmind {
   1857        1.1     rmind 	vnode_t *vp;
   1858        1.1     rmind 
   1859        1.1     rmind 	if ((vp = bp->b_vp) == NULL)
   1860        1.1     rmind 		return;
   1861        1.1     rmind 
   1862        1.9     rmind 	KASSERT(bp->b_objlock == vp->v_interlock);
   1863        1.1     rmind 	KASSERT(mutex_owned(bp->b_objlock));
   1864        1.1     rmind 
   1865        1.1     rmind 	if (--vp->v_numoutput < 0)
   1866       1.11  christos 		vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
   1867        1.1     rmind 	if (vp->v_numoutput == 0)
   1868        1.1     rmind 		cv_broadcast(&vp->v_cv);
   1869        1.1     rmind }
   1870        1.1     rmind 
   1871        1.1     rmind /*
   1872       1.35   hannken  * Test a vnode for being or becoming dead.  Returns one of:
   1873       1.35   hannken  * EBUSY:  vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
   1874       1.35   hannken  * ENOENT: vnode is dead.
   1875       1.35   hannken  * 0:      otherwise.
   1876       1.35   hannken  *
   1877       1.35   hannken  * Whenever this function returns a non-zero value all future
   1878       1.35   hannken  * calls will also return a non-zero value.
   1879       1.35   hannken  */
   1880       1.35   hannken int
   1881       1.35   hannken vdead_check(struct vnode *vp, int flags)
   1882       1.35   hannken {
   1883       1.35   hannken 
   1884       1.35   hannken 	KASSERT(mutex_owned(vp->v_interlock));
   1885       1.35   hannken 
   1886       1.52   hannken 	if (! ISSET(flags, VDEAD_NOWAIT))
   1887       1.52   hannken 		VSTATE_WAIT_STABLE(vp);
   1888        1.1     rmind 
   1889       1.57   hannken 	if (VSTATE_GET(vp) == VS_RECLAIMING) {
   1890       1.52   hannken 		KASSERT(ISSET(flags, VDEAD_NOWAIT));
   1891       1.52   hannken 		return EBUSY;
   1892       1.57   hannken 	} else if (VSTATE_GET(vp) == VS_RECLAIMED) {
   1893       1.52   hannken 		return ENOENT;
   1894       1.52   hannken 	}
   1895        1.1     rmind 
   1896       1.52   hannken 	return 0;
   1897        1.1     rmind }
   1898        1.1     rmind 
   1899        1.1     rmind int
   1900       1.61   hannken vfs_drainvnodes(void)
   1901        1.1     rmind {
   1902       1.63   hannken 	int i, gen;
   1903       1.61   hannken 
   1904       1.63   hannken 	mutex_enter(&vdrain_lock);
   1905       1.63   hannken 	for (i = 0; i < 2; i++) {
   1906       1.63   hannken 		gen = vdrain_gen;
   1907       1.63   hannken 		while (gen == vdrain_gen) {
   1908       1.63   hannken 			cv_broadcast(&vdrain_cv);
   1909       1.63   hannken 			cv_wait(&vdrain_gen_cv, &vdrain_lock);
   1910       1.63   hannken 		}
   1911       1.61   hannken 	}
   1912       1.63   hannken 	mutex_exit(&vdrain_lock);
   1913       1.12   hannken 
   1914       1.63   hannken 	if (numvnodes >= desiredvnodes)
   1915       1.63   hannken 		return EBUSY;
   1916       1.12   hannken 
   1917       1.69   hannken 	if (vcache_hashsize != desiredvnodes)
   1918       1.61   hannken 		vcache_reinit();
   1919       1.36   hannken 
   1920        1.1     rmind 	return 0;
   1921        1.1     rmind }
   1922        1.1     rmind 
   1923        1.1     rmind void
   1924       1.11  christos vnpanic(vnode_t *vp, const char *fmt, ...)
   1925        1.1     rmind {
   1926       1.11  christos 	va_list ap;
   1927       1.11  christos 
   1928        1.1     rmind #ifdef DIAGNOSTIC
   1929        1.1     rmind 	vprint(NULL, vp);
   1930        1.1     rmind #endif
   1931       1.11  christos 	va_start(ap, fmt);
   1932       1.11  christos 	vpanic(fmt, ap);
   1933       1.11  christos 	va_end(ap);
   1934        1.1     rmind }
   1935