Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.5.2.3
      1  1.5.2.3  rmind /*	$NetBSD: vfs_vnode.c,v 1.5.2.3 2011/05/19 03:43:03 rmind Exp $	*/
      2  1.5.2.2  rmind 
      3  1.5.2.2  rmind /*-
      4  1.5.2.2  rmind  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5  1.5.2.2  rmind  * All rights reserved.
      6  1.5.2.2  rmind  *
      7  1.5.2.2  rmind  * This code is derived from software contributed to The NetBSD Foundation
      8  1.5.2.2  rmind  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  1.5.2.2  rmind  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  1.5.2.2  rmind  *
     11  1.5.2.2  rmind  * Redistribution and use in source and binary forms, with or without
     12  1.5.2.2  rmind  * modification, are permitted provided that the following conditions
     13  1.5.2.2  rmind  * are met:
     14  1.5.2.2  rmind  * 1. Redistributions of source code must retain the above copyright
     15  1.5.2.2  rmind  *    notice, this list of conditions and the following disclaimer.
     16  1.5.2.2  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     17  1.5.2.2  rmind  *    notice, this list of conditions and the following disclaimer in the
     18  1.5.2.2  rmind  *    documentation and/or other materials provided with the distribution.
     19  1.5.2.2  rmind  *
     20  1.5.2.2  rmind  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  1.5.2.2  rmind  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  1.5.2.2  rmind  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  1.5.2.2  rmind  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  1.5.2.2  rmind  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  1.5.2.2  rmind  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  1.5.2.2  rmind  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  1.5.2.2  rmind  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  1.5.2.2  rmind  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  1.5.2.2  rmind  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  1.5.2.2  rmind  * POSSIBILITY OF SUCH DAMAGE.
     31  1.5.2.2  rmind  */
     32  1.5.2.2  rmind 
     33  1.5.2.2  rmind /*
     34  1.5.2.2  rmind  * Copyright (c) 1989, 1993
     35  1.5.2.2  rmind  *	The Regents of the University of California.  All rights reserved.
     36  1.5.2.2  rmind  * (c) UNIX System Laboratories, Inc.
     37  1.5.2.2  rmind  * All or some portions of this file are derived from material licensed
     38  1.5.2.2  rmind  * to the University of California by American Telephone and Telegraph
     39  1.5.2.2  rmind  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  1.5.2.2  rmind  * the permission of UNIX System Laboratories, Inc.
     41  1.5.2.2  rmind  *
     42  1.5.2.2  rmind  * Redistribution and use in source and binary forms, with or without
     43  1.5.2.2  rmind  * modification, are permitted provided that the following conditions
     44  1.5.2.2  rmind  * are met:
     45  1.5.2.2  rmind  * 1. Redistributions of source code must retain the above copyright
     46  1.5.2.2  rmind  *    notice, this list of conditions and the following disclaimer.
     47  1.5.2.2  rmind  * 2. Redistributions in binary form must reproduce the above copyright
     48  1.5.2.2  rmind  *    notice, this list of conditions and the following disclaimer in the
     49  1.5.2.2  rmind  *    documentation and/or other materials provided with the distribution.
     50  1.5.2.2  rmind  * 3. Neither the name of the University nor the names of its contributors
     51  1.5.2.2  rmind  *    may be used to endorse or promote products derived from this software
     52  1.5.2.2  rmind  *    without specific prior written permission.
     53  1.5.2.2  rmind  *
     54  1.5.2.2  rmind  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  1.5.2.2  rmind  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  1.5.2.2  rmind  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  1.5.2.2  rmind  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  1.5.2.2  rmind  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  1.5.2.2  rmind  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  1.5.2.2  rmind  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  1.5.2.2  rmind  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  1.5.2.2  rmind  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  1.5.2.2  rmind  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  1.5.2.2  rmind  * SUCH DAMAGE.
     65  1.5.2.2  rmind  *
     66  1.5.2.2  rmind  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  1.5.2.2  rmind  */
     68  1.5.2.2  rmind 
     69  1.5.2.2  rmind /*
     70  1.5.2.2  rmind  * Note on v_usecount and locking:
     71  1.5.2.2  rmind  *
     72  1.5.2.2  rmind  * At nearly all points it is known that v_usecount could be zero, the
     73  1.5.2.2  rmind  * vnode interlock will be held.
     74  1.5.2.2  rmind  *
     75  1.5.2.2  rmind  * To change v_usecount away from zero, the interlock must be held.  To
     76  1.5.2.2  rmind  * change from a non-zero value to zero, again the interlock must be
     77  1.5.2.2  rmind  * held.
     78  1.5.2.2  rmind  *
     79  1.5.2.2  rmind  * There's a flag bit, VC_XLOCK, embedded in v_usecount.
     80  1.5.2.2  rmind  * To raise v_usecount, if the VC_XLOCK bit is set in it, the interlock
     81  1.5.2.2  rmind  * must be held.
     82  1.5.2.2  rmind  * To modify the VC_XLOCK bit, the interlock must be held.
     83  1.5.2.2  rmind  * We always keep the usecount (v_usecount & VC_MASK) non-zero while the
     84  1.5.2.2  rmind  * VC_XLOCK bit is set.
     85  1.5.2.2  rmind  *
     86  1.5.2.2  rmind  * Unless the VC_XLOCK bit is set, changing the usecount from a non-zero
     87  1.5.2.2  rmind  * value to a non-zero value can safely be done using atomic operations,
     88  1.5.2.2  rmind  * without the interlock held.
     89  1.5.2.2  rmind  * Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero
     90  1.5.2.2  rmind  * value can be done using atomic operations, without the interlock held.
     91  1.5.2.2  rmind  */
     92  1.5.2.2  rmind 
     93  1.5.2.2  rmind #include <sys/cdefs.h>
     94  1.5.2.3  rmind __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.5.2.3 2011/05/19 03:43:03 rmind Exp $");
     95  1.5.2.2  rmind 
     96  1.5.2.2  rmind #include <sys/param.h>
     97  1.5.2.2  rmind #include <sys/kernel.h>
     98  1.5.2.2  rmind 
     99  1.5.2.2  rmind #include <sys/atomic.h>
    100  1.5.2.2  rmind #include <sys/buf.h>
    101  1.5.2.2  rmind #include <sys/conf.h>
    102  1.5.2.2  rmind #include <sys/device.h>
    103  1.5.2.2  rmind #include <sys/kauth.h>
    104  1.5.2.2  rmind #include <sys/kmem.h>
    105  1.5.2.2  rmind #include <sys/kthread.h>
    106  1.5.2.2  rmind #include <sys/module.h>
    107  1.5.2.2  rmind #include <sys/mount.h>
    108  1.5.2.2  rmind #include <sys/namei.h>
    109  1.5.2.2  rmind #include <sys/syscallargs.h>
    110  1.5.2.2  rmind #include <sys/sysctl.h>
    111  1.5.2.2  rmind #include <sys/systm.h>
    112  1.5.2.2  rmind #include <sys/vnode.h>
    113  1.5.2.2  rmind #include <sys/wapbl.h>
    114  1.5.2.2  rmind 
    115  1.5.2.2  rmind #include <uvm/uvm.h>
    116  1.5.2.2  rmind #include <uvm/uvm_readahead.h>
    117  1.5.2.2  rmind 
    118  1.5.2.2  rmind u_int			numvnodes;
    119  1.5.2.2  rmind 
    120  1.5.2.2  rmind static pool_cache_t	vnode_cache;
    121  1.5.2.2  rmind static kmutex_t		vnode_free_list_lock;
    122  1.5.2.2  rmind 
    123  1.5.2.2  rmind static vnodelst_t	vnode_free_list;
    124  1.5.2.2  rmind static vnodelst_t	vnode_hold_list;
    125  1.5.2.2  rmind static vnodelst_t	vrele_list;
    126  1.5.2.2  rmind 
    127  1.5.2.2  rmind static kmutex_t		vrele_lock;
    128  1.5.2.2  rmind static kcondvar_t	vrele_cv;
    129  1.5.2.2  rmind static lwp_t *		vrele_lwp;
    130  1.5.2.2  rmind static int		vrele_pending;
    131  1.5.2.2  rmind static int		vrele_gen;
    132  1.5.2.2  rmind 
    133  1.5.2.2  rmind static vnode_t *	getcleanvnode(void);
    134  1.5.2.2  rmind static void		vrele_thread(void *);
    135  1.5.2.2  rmind static void		vpanic(vnode_t *, const char *);
    136  1.5.2.2  rmind 
    137  1.5.2.2  rmind /* Routines having to do with the management of the vnode table. */
    138  1.5.2.2  rmind extern int		(**dead_vnodeop_p)(void *);
    139  1.5.2.2  rmind 
    140  1.5.2.2  rmind void
    141  1.5.2.2  rmind vfs_vnode_sysinit(void)
    142  1.5.2.2  rmind {
    143  1.5.2.2  rmind 	int error;
    144  1.5.2.2  rmind 
    145  1.5.2.2  rmind 	vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl",
    146  1.5.2.2  rmind 	    NULL, IPL_NONE, NULL, NULL, NULL);
    147  1.5.2.2  rmind 	KASSERT(vnode_cache != NULL);
    148  1.5.2.2  rmind 
    149  1.5.2.2  rmind 	mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
    150  1.5.2.2  rmind 	TAILQ_INIT(&vnode_free_list);
    151  1.5.2.2  rmind 	TAILQ_INIT(&vnode_hold_list);
    152  1.5.2.2  rmind 	TAILQ_INIT(&vrele_list);
    153  1.5.2.2  rmind 
    154  1.5.2.2  rmind 	mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
    155  1.5.2.2  rmind 	cv_init(&vrele_cv, "vrele");
    156  1.5.2.2  rmind 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
    157  1.5.2.2  rmind 	    NULL, &vrele_lwp, "vrele");
    158  1.5.2.2  rmind 	KASSERT(error == 0);
    159  1.5.2.2  rmind }
    160  1.5.2.2  rmind 
    161  1.5.2.2  rmind /*
    162  1.5.2.2  rmind  * Allocate a new, uninitialized vnode.  If 'mp' is non-NULL, this is a
    163  1.5.2.2  rmind  * marker vnode and we are prepared to wait for the allocation.
    164  1.5.2.2  rmind  */
    165  1.5.2.2  rmind vnode_t *
    166  1.5.2.2  rmind vnalloc(struct mount *mp)
    167  1.5.2.2  rmind {
    168  1.5.2.2  rmind 	vnode_t *vp;
    169  1.5.2.2  rmind 
    170  1.5.2.2  rmind 	vp = pool_cache_get(vnode_cache, (mp != NULL ? PR_WAITOK : PR_NOWAIT));
    171  1.5.2.2  rmind 	if (vp == NULL) {
    172  1.5.2.2  rmind 		return NULL;
    173  1.5.2.2  rmind 	}
    174  1.5.2.2  rmind 
    175  1.5.2.2  rmind 	memset(vp, 0, sizeof(*vp));
    176  1.5.2.3  rmind 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    177  1.5.2.2  rmind 	cv_init(&vp->v_cv, "vnode");
    178  1.5.2.2  rmind 	/*
    179  1.5.2.2  rmind 	 * Done by memset() above.
    180  1.5.2.2  rmind 	 *	LIST_INIT(&vp->v_nclist);
    181  1.5.2.2  rmind 	 *	LIST_INIT(&vp->v_dnclist);
    182  1.5.2.2  rmind 	 */
    183  1.5.2.2  rmind 
    184  1.5.2.2  rmind 	if (mp != NULL) {
    185  1.5.2.2  rmind 		vp->v_mount = mp;
    186  1.5.2.2  rmind 		vp->v_type = VBAD;
    187  1.5.2.2  rmind 		vp->v_iflag = VI_MARKER;
    188  1.5.2.2  rmind 	} else {
    189  1.5.2.2  rmind 		rw_init(&vp->v_lock);
    190  1.5.2.2  rmind 	}
    191  1.5.2.2  rmind 
    192  1.5.2.2  rmind 	return vp;
    193  1.5.2.2  rmind }
    194  1.5.2.2  rmind 
    195  1.5.2.2  rmind /*
    196  1.5.2.2  rmind  * Free an unused, unreferenced vnode.
    197  1.5.2.2  rmind  */
    198  1.5.2.2  rmind void
    199  1.5.2.2  rmind vnfree(vnode_t *vp)
    200  1.5.2.2  rmind {
    201  1.5.2.2  rmind 
    202  1.5.2.2  rmind 	KASSERT(vp->v_usecount == 0);
    203  1.5.2.2  rmind 
    204  1.5.2.2  rmind 	if ((vp->v_iflag & VI_MARKER) == 0) {
    205  1.5.2.2  rmind 		rw_destroy(&vp->v_lock);
    206  1.5.2.2  rmind 		mutex_enter(&vnode_free_list_lock);
    207  1.5.2.2  rmind 		numvnodes--;
    208  1.5.2.2  rmind 		mutex_exit(&vnode_free_list_lock);
    209  1.5.2.2  rmind 	}
    210  1.5.2.2  rmind 
    211  1.5.2.3  rmind 	/*
    212  1.5.2.3  rmind 	 * Note: the vnode interlock will either be freed, of reference
    213  1.5.2.3  rmind 	 * dropped (if VI_LOCKSHARE was in use).
    214  1.5.2.3  rmind 	 */
    215  1.5.2.3  rmind 	uvm_obj_destroy(&vp->v_uobj, true);
    216  1.5.2.2  rmind 	cv_destroy(&vp->v_cv);
    217  1.5.2.2  rmind 	pool_cache_put(vnode_cache, vp);
    218  1.5.2.2  rmind }
    219  1.5.2.2  rmind 
    220  1.5.2.2  rmind /*
    221  1.5.2.2  rmind  * getcleanvnode: grab a vnode from freelist and clean it.
    222  1.5.2.2  rmind  *
    223  1.5.2.2  rmind  * => Releases vnode_free_list_lock.
    224  1.5.2.2  rmind  * => Returns referenced vnode on success.
    225  1.5.2.2  rmind  */
    226  1.5.2.2  rmind static vnode_t *
    227  1.5.2.2  rmind getcleanvnode(void)
    228  1.5.2.2  rmind {
    229  1.5.2.2  rmind 	vnode_t *vp;
    230  1.5.2.2  rmind 	vnodelst_t *listhd;
    231  1.5.2.2  rmind 
    232  1.5.2.2  rmind 	KASSERT(mutex_owned(&vnode_free_list_lock));
    233  1.5.2.2  rmind retry:
    234  1.5.2.2  rmind 	listhd = &vnode_free_list;
    235  1.5.2.2  rmind try_nextlist:
    236  1.5.2.2  rmind 	TAILQ_FOREACH(vp, listhd, v_freelist) {
    237  1.5.2.2  rmind 		/*
    238  1.5.2.2  rmind 		 * It's safe to test v_usecount and v_iflag
    239  1.5.2.2  rmind 		 * without holding the interlock here, since
    240  1.5.2.2  rmind 		 * these vnodes should never appear on the
    241  1.5.2.2  rmind 		 * lists.
    242  1.5.2.2  rmind 		 */
    243  1.5.2.2  rmind 		KASSERT(vp->v_usecount == 0);
    244  1.5.2.2  rmind 		KASSERT((vp->v_iflag & VI_CLEAN) == 0);
    245  1.5.2.2  rmind 		KASSERT(vp->v_freelisthd == listhd);
    246  1.5.2.2  rmind 
    247  1.5.2.2  rmind 		if (!mutex_tryenter(vp->v_interlock))
    248  1.5.2.2  rmind 			continue;
    249  1.5.2.2  rmind 		if ((vp->v_iflag & VI_XLOCK) == 0)
    250  1.5.2.2  rmind 			break;
    251  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
    252  1.5.2.2  rmind 	}
    253  1.5.2.2  rmind 
    254  1.5.2.2  rmind 	if (vp == NULL) {
    255  1.5.2.2  rmind 		if (listhd == &vnode_free_list) {
    256  1.5.2.2  rmind 			listhd = &vnode_hold_list;
    257  1.5.2.2  rmind 			goto try_nextlist;
    258  1.5.2.2  rmind 		}
    259  1.5.2.2  rmind 		mutex_exit(&vnode_free_list_lock);
    260  1.5.2.2  rmind 		return NULL;
    261  1.5.2.2  rmind 	}
    262  1.5.2.2  rmind 
    263  1.5.2.2  rmind 	/* Remove it from the freelist. */
    264  1.5.2.2  rmind 	TAILQ_REMOVE(listhd, vp, v_freelist);
    265  1.5.2.2  rmind 	vp->v_freelisthd = NULL;
    266  1.5.2.2  rmind 	mutex_exit(&vnode_free_list_lock);
    267  1.5.2.2  rmind 
    268  1.5.2.2  rmind 	KASSERT(vp->v_usecount == 0);
    269  1.5.2.2  rmind 
    270  1.5.2.2  rmind 	/*
    271  1.5.2.2  rmind 	 * The vnode is still associated with a file system, so we must
    272  1.5.2.2  rmind 	 * clean it out before reusing it.  We need to add a reference
    273  1.5.2.2  rmind 	 * before doing this.  If the vnode gains another reference while
    274  1.5.2.2  rmind 	 * being cleaned out then we lose - retry.
    275  1.5.2.2  rmind 	 */
    276  1.5.2.2  rmind 	atomic_add_int(&vp->v_usecount, 1 + VC_XLOCK);
    277  1.5.2.2  rmind 	vclean(vp, DOCLOSE);
    278  1.5.2.2  rmind 	KASSERT(vp->v_usecount >= 1 + VC_XLOCK);
    279  1.5.2.2  rmind 	atomic_add_int(&vp->v_usecount, -VC_XLOCK);
    280  1.5.2.2  rmind 	if (vp->v_usecount == 1) {
    281  1.5.2.2  rmind 		/* We're about to dirty it. */
    282  1.5.2.2  rmind 		vp->v_iflag &= ~VI_CLEAN;
    283  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
    284  1.5.2.2  rmind 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    285  1.5.2.2  rmind 			spec_node_destroy(vp);
    286  1.5.2.2  rmind 		}
    287  1.5.2.2  rmind 		vp->v_type = VNON;
    288  1.5.2.2  rmind 	} else {
    289  1.5.2.2  rmind 		/*
    290  1.5.2.2  rmind 		 * Don't return to freelist - the holder of the last
    291  1.5.2.2  rmind 		 * reference will destroy it.
    292  1.5.2.2  rmind 		 */
    293  1.5.2.2  rmind 		vrelel(vp, 0); /* releases vp->v_interlock */
    294  1.5.2.2  rmind 		mutex_enter(&vnode_free_list_lock);
    295  1.5.2.2  rmind 		goto retry;
    296  1.5.2.2  rmind 	}
    297  1.5.2.2  rmind 
    298  1.5.2.2  rmind 	KASSERT(vp->v_data == NULL);
    299  1.5.2.2  rmind 	KASSERT(vp->v_uobj.uo_npages == 0);
    300  1.5.2.2  rmind 	KASSERT(TAILQ_EMPTY(&vp->v_uobj.memq));
    301  1.5.2.2  rmind 	KASSERT(vp->v_numoutput == 0);
    302  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
    303  1.5.2.2  rmind 
    304  1.5.2.2  rmind 	return vp;
    305  1.5.2.2  rmind }
    306  1.5.2.2  rmind 
    307  1.5.2.2  rmind /*
    308  1.5.2.2  rmind  * getnewvnode: return the next vnode from the free list.
    309  1.5.2.2  rmind  *
    310  1.5.2.2  rmind  * => Returns referenced vnode, moved into the mount queue.
    311  1.5.2.3  rmind  * => Shares the lock with vnode specified by 'svp', if it is not NULL.
    312  1.5.2.2  rmind  */
    313  1.5.2.2  rmind int
    314  1.5.2.2  rmind getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
    315  1.5.2.3  rmind 	    const vnode_t *svp, vnode_t **vpp)
    316  1.5.2.2  rmind {
    317  1.5.2.2  rmind 	struct uvm_object *uobj;
    318  1.5.2.2  rmind 	static int toggle;
    319  1.5.2.2  rmind 	vnode_t *vp;
    320  1.5.2.2  rmind 	int error = 0, tryalloc;
    321  1.5.2.2  rmind 
    322  1.5.2.2  rmind try_again:
    323  1.5.2.2  rmind 	if (mp != NULL) {
    324  1.5.2.2  rmind 		/*
    325  1.5.2.2  rmind 		 * Mark filesystem busy while we are creating a vnode.
    326  1.5.2.2  rmind 		 * If unmount is in progress, this will fail.
    327  1.5.2.2  rmind 		 */
    328  1.5.2.2  rmind 		error = vfs_busy(mp, NULL);
    329  1.5.2.2  rmind 		if (error)
    330  1.5.2.2  rmind 			return error;
    331  1.5.2.2  rmind 	}
    332  1.5.2.2  rmind 
    333  1.5.2.2  rmind 	/*
    334  1.5.2.2  rmind 	 * We must choose whether to allocate a new vnode or recycle an
    335  1.5.2.2  rmind 	 * existing one. The criterion for allocating a new one is that
    336  1.5.2.2  rmind 	 * the total number of vnodes is less than the number desired or
    337  1.5.2.2  rmind 	 * there are no vnodes on either free list. Generally we only
    338  1.5.2.2  rmind 	 * want to recycle vnodes that have no buffers associated with
    339  1.5.2.2  rmind 	 * them, so we look first on the vnode_free_list. If it is empty,
    340  1.5.2.2  rmind 	 * we next consider vnodes with referencing buffers on the
    341  1.5.2.2  rmind 	 * vnode_hold_list. The toggle ensures that half the time we
    342  1.5.2.2  rmind 	 * will use a buffer from the vnode_hold_list, and half the time
    343  1.5.2.2  rmind 	 * we will allocate a new one unless the list has grown to twice
    344  1.5.2.2  rmind 	 * the desired size. We are reticent to recycle vnodes from the
    345  1.5.2.2  rmind 	 * vnode_hold_list because we will lose the identity of all its
    346  1.5.2.2  rmind 	 * referencing buffers.
    347  1.5.2.2  rmind 	 */
    348  1.5.2.2  rmind 
    349  1.5.2.2  rmind 	vp = NULL;
    350  1.5.2.2  rmind 
    351  1.5.2.2  rmind 	mutex_enter(&vnode_free_list_lock);
    352  1.5.2.2  rmind 
    353  1.5.2.2  rmind 	toggle ^= 1;
    354  1.5.2.2  rmind 	if (numvnodes > 2 * desiredvnodes)
    355  1.5.2.2  rmind 		toggle = 0;
    356  1.5.2.2  rmind 
    357  1.5.2.2  rmind 	tryalloc = numvnodes < desiredvnodes ||
    358  1.5.2.2  rmind 	    (TAILQ_FIRST(&vnode_free_list) == NULL &&
    359  1.5.2.2  rmind 	    (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
    360  1.5.2.2  rmind 
    361  1.5.2.2  rmind 	if (tryalloc) {
    362  1.5.2.2  rmind 		/* Allocate a new vnode. */
    363  1.5.2.2  rmind 		numvnodes++;
    364  1.5.2.2  rmind 		mutex_exit(&vnode_free_list_lock);
    365  1.5.2.2  rmind 		if ((vp = vnalloc(NULL)) == NULL) {
    366  1.5.2.2  rmind 			mutex_enter(&vnode_free_list_lock);
    367  1.5.2.2  rmind 			numvnodes--;
    368  1.5.2.2  rmind 		} else
    369  1.5.2.2  rmind 			vp->v_usecount = 1;
    370  1.5.2.2  rmind 	}
    371  1.5.2.2  rmind 
    372  1.5.2.2  rmind 	if (vp == NULL) {
    373  1.5.2.2  rmind 		/* Recycle and get vnode clean. */
    374  1.5.2.2  rmind 		vp = getcleanvnode();
    375  1.5.2.2  rmind 		if (vp == NULL) {
    376  1.5.2.2  rmind 			if (mp != NULL) {
    377  1.5.2.2  rmind 				vfs_unbusy(mp, false, NULL);
    378  1.5.2.2  rmind 			}
    379  1.5.2.2  rmind 			if (tryalloc) {
    380  1.5.2.2  rmind 				printf("WARNING: unable to allocate new "
    381  1.5.2.2  rmind 				    "vnode, retrying...\n");
    382  1.5.2.2  rmind 				kpause("newvn", false, hz, NULL);
    383  1.5.2.2  rmind 				goto try_again;
    384  1.5.2.2  rmind 			}
    385  1.5.2.2  rmind 			tablefull("vnode", "increase kern.maxvnodes or NVNODE");
    386  1.5.2.2  rmind 			*vpp = 0;
    387  1.5.2.2  rmind 			return ENFILE;
    388  1.5.2.2  rmind 		}
    389  1.5.2.3  rmind 		if ((vp->v_iflag & VI_LOCKSHARE) != 0 || svp) {
    390  1.5.2.3  rmind 			/* We must remove vnode from the old mount point. */
    391  1.5.2.3  rmind 			if (vp->v_mount) {
    392  1.5.2.3  rmind 				vfs_insmntque(vp, NULL);
    393  1.5.2.3  rmind 			}
    394  1.5.2.3  rmind 			/* Allocate a new interlock, if it was shared. */
    395  1.5.2.3  rmind 			if (vp->v_iflag & VI_LOCKSHARE) {
    396  1.5.2.3  rmind 				uvm_obj_setlock(&vp->v_uobj, NULL);
    397  1.5.2.3  rmind 				vp->v_iflag &= ~VI_LOCKSHARE;
    398  1.5.2.3  rmind 			}
    399  1.5.2.3  rmind 		}
    400  1.5.2.2  rmind 		vp->v_iflag = 0;
    401  1.5.2.2  rmind 		vp->v_vflag = 0;
    402  1.5.2.2  rmind 		vp->v_uflag = 0;
    403  1.5.2.2  rmind 		vp->v_socket = NULL;
    404  1.5.2.2  rmind 	}
    405  1.5.2.2  rmind 
    406  1.5.2.2  rmind 	KASSERT(vp->v_usecount == 1);
    407  1.5.2.2  rmind 	KASSERT(vp->v_freelisthd == NULL);
    408  1.5.2.2  rmind 	KASSERT(LIST_EMPTY(&vp->v_nclist));
    409  1.5.2.2  rmind 	KASSERT(LIST_EMPTY(&vp->v_dnclist));
    410  1.5.2.2  rmind 
    411  1.5.2.2  rmind 	/* Initialize vnode. */
    412  1.5.2.2  rmind 	vp->v_type = VNON;
    413  1.5.2.2  rmind 	vp->v_tag = tag;
    414  1.5.2.2  rmind 	vp->v_op = vops;
    415  1.5.2.2  rmind 	vp->v_data = NULL;
    416  1.5.2.2  rmind 
    417  1.5.2.2  rmind 	uobj = &vp->v_uobj;
    418  1.5.2.2  rmind 	KASSERT(uobj->pgops == &uvm_vnodeops);
    419  1.5.2.2  rmind 	KASSERT(uobj->uo_npages == 0);
    420  1.5.2.2  rmind 	KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
    421  1.5.2.2  rmind 	vp->v_size = vp->v_writesize = VSIZENOTSET;
    422  1.5.2.2  rmind 
    423  1.5.2.3  rmind 	/* Share the vnode_t::v_interlock, if requested. */
    424  1.5.2.3  rmind 	if (svp) {
    425  1.5.2.3  rmind 		/* Set the interlock and mark that it is shared. */
    426  1.5.2.3  rmind 		KASSERT(vp->v_mount == NULL);
    427  1.5.2.3  rmind 		uvm_obj_setlock(&vp->v_uobj, svp->v_interlock);
    428  1.5.2.3  rmind 		KASSERT(vp->v_interlock == svp->v_interlock);
    429  1.5.2.3  rmind 		vp->v_iflag |= VI_LOCKSHARE;
    430  1.5.2.3  rmind 	}
    431  1.5.2.3  rmind 
    432  1.5.2.2  rmind 	/* Finally, move vnode into the mount queue. */
    433  1.5.2.2  rmind 	vfs_insmntque(vp, mp);
    434  1.5.2.2  rmind 
    435  1.5.2.2  rmind 	if (mp != NULL) {
    436  1.5.2.2  rmind 		if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
    437  1.5.2.2  rmind 			vp->v_vflag |= VV_MPSAFE;
    438  1.5.2.2  rmind 		vfs_unbusy(mp, true, NULL);
    439  1.5.2.2  rmind 	}
    440  1.5.2.2  rmind 
    441  1.5.2.2  rmind 	*vpp = vp;
    442  1.5.2.2  rmind 	return 0;
    443  1.5.2.2  rmind }
    444  1.5.2.2  rmind 
    445  1.5.2.2  rmind /*
    446  1.5.2.2  rmind  * This is really just the reverse of getnewvnode(). Needed for
    447  1.5.2.2  rmind  * VFS_VGET functions who may need to push back a vnode in case
    448  1.5.2.2  rmind  * of a locking race.
    449  1.5.2.2  rmind  */
    450  1.5.2.2  rmind void
    451  1.5.2.2  rmind ungetnewvnode(vnode_t *vp)
    452  1.5.2.2  rmind {
    453  1.5.2.2  rmind 
    454  1.5.2.2  rmind 	KASSERT(vp->v_usecount == 1);
    455  1.5.2.2  rmind 	KASSERT(vp->v_data == NULL);
    456  1.5.2.2  rmind 	KASSERT(vp->v_freelisthd == NULL);
    457  1.5.2.2  rmind 
    458  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
    459  1.5.2.2  rmind 	vp->v_iflag |= VI_CLEAN;
    460  1.5.2.2  rmind 	vrelel(vp, 0);
    461  1.5.2.2  rmind }
    462  1.5.2.2  rmind 
    463  1.5.2.2  rmind /*
    464  1.5.2.2  rmind  * Remove a vnode from its freelist.
    465  1.5.2.2  rmind  */
    466  1.5.2.2  rmind void
    467  1.5.2.2  rmind vremfree(vnode_t *vp)
    468  1.5.2.2  rmind {
    469  1.5.2.2  rmind 
    470  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
    471  1.5.2.2  rmind 	KASSERT(vp->v_usecount == 0);
    472  1.5.2.2  rmind 
    473  1.5.2.2  rmind 	/*
    474  1.5.2.2  rmind 	 * Note that the reference count must not change until
    475  1.5.2.2  rmind 	 * the vnode is removed.
    476  1.5.2.2  rmind 	 */
    477  1.5.2.2  rmind 	mutex_enter(&vnode_free_list_lock);
    478  1.5.2.2  rmind 	if (vp->v_holdcnt > 0) {
    479  1.5.2.2  rmind 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
    480  1.5.2.2  rmind 	} else {
    481  1.5.2.2  rmind 		KASSERT(vp->v_freelisthd == &vnode_free_list);
    482  1.5.2.2  rmind 	}
    483  1.5.2.2  rmind 	TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    484  1.5.2.2  rmind 	vp->v_freelisthd = NULL;
    485  1.5.2.2  rmind 	mutex_exit(&vnode_free_list_lock);
    486  1.5.2.2  rmind }
    487  1.5.2.2  rmind 
    488  1.5.2.2  rmind /*
    489  1.5.2.2  rmind  * Try to gain a reference to a vnode, without acquiring its interlock.
    490  1.5.2.2  rmind  * The caller must hold a lock that will prevent the vnode from being
    491  1.5.2.2  rmind  * recycled or freed.
    492  1.5.2.2  rmind  */
    493  1.5.2.2  rmind bool
    494  1.5.2.2  rmind vtryget(vnode_t *vp)
    495  1.5.2.2  rmind {
    496  1.5.2.2  rmind 	u_int use, next;
    497  1.5.2.2  rmind 
    498  1.5.2.2  rmind 	/*
    499  1.5.2.2  rmind 	 * If the vnode is being freed, don't make life any harder
    500  1.5.2.2  rmind 	 * for vclean() by adding another reference without waiting.
    501  1.5.2.2  rmind 	 * This is not strictly necessary, but we'll do it anyway.
    502  1.5.2.2  rmind 	 */
    503  1.5.2.2  rmind 	if (__predict_false((vp->v_iflag & VI_XLOCK) != 0)) {
    504  1.5.2.2  rmind 		return false;
    505  1.5.2.2  rmind 	}
    506  1.5.2.2  rmind 	for (use = vp->v_usecount;; use = next) {
    507  1.5.2.2  rmind 		if (use == 0 || __predict_false((use & VC_XLOCK) != 0)) {
    508  1.5.2.2  rmind 			/* Need interlock held if first reference. */
    509  1.5.2.2  rmind 			return false;
    510  1.5.2.2  rmind 		}
    511  1.5.2.2  rmind 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
    512  1.5.2.2  rmind 		if (__predict_true(next == use)) {
    513  1.5.2.2  rmind 			return true;
    514  1.5.2.2  rmind 		}
    515  1.5.2.2  rmind 	}
    516  1.5.2.2  rmind }
    517  1.5.2.2  rmind 
    518  1.5.2.2  rmind /*
    519  1.5.2.2  rmind  * vget: get a particular vnode from the free list, increment its reference
    520  1.5.2.2  rmind  * count and lock it.
    521  1.5.2.2  rmind  *
    522  1.5.2.2  rmind  * => Should be called with v_interlock held.
    523  1.5.2.2  rmind  *
    524  1.5.2.2  rmind  * If VI_XLOCK is set, the vnode is being eliminated in vgone()/vclean().
    525  1.5.2.2  rmind  * In that case, we cannot grab the vnode, so the process is awakened when
    526  1.5.2.2  rmind  * the transition is completed, and an error returned to indicate that the
    527  1.5.2.2  rmind  * vnode is no longer usable (e.g. changed to a new file system type).
    528  1.5.2.2  rmind  */
    529  1.5.2.2  rmind int
    530  1.5.2.2  rmind vget(vnode_t *vp, int flags)
    531  1.5.2.2  rmind {
    532  1.5.2.2  rmind 	int error = 0;
    533  1.5.2.2  rmind 
    534  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    535  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
    536  1.5.2.2  rmind 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT)) == 0);
    537  1.5.2.2  rmind 
    538  1.5.2.2  rmind 	/*
    539  1.5.2.2  rmind 	 * Before adding a reference, we must remove the vnode
    540  1.5.2.2  rmind 	 * from its freelist.
    541  1.5.2.2  rmind 	 */
    542  1.5.2.2  rmind 	if (vp->v_usecount == 0) {
    543  1.5.2.2  rmind 		vremfree(vp);
    544  1.5.2.2  rmind 		vp->v_usecount = 1;
    545  1.5.2.2  rmind 	} else {
    546  1.5.2.2  rmind 		atomic_inc_uint(&vp->v_usecount);
    547  1.5.2.2  rmind 	}
    548  1.5.2.2  rmind 
    549  1.5.2.2  rmind 	/*
    550  1.5.2.2  rmind 	 * If the vnode is in the process of being cleaned out for
    551  1.5.2.2  rmind 	 * another use, we wait for the cleaning to finish and then
    552  1.5.2.2  rmind 	 * return failure.  Cleaning is determined by checking if
    553  1.5.2.2  rmind 	 * the VI_XLOCK flag is set.
    554  1.5.2.2  rmind 	 */
    555  1.5.2.2  rmind 	if ((vp->v_iflag & VI_XLOCK) != 0) {
    556  1.5.2.2  rmind 		if ((flags & LK_NOWAIT) != 0) {
    557  1.5.2.2  rmind 			vrelel(vp, 0);
    558  1.5.2.2  rmind 			return EBUSY;
    559  1.5.2.2  rmind 		}
    560  1.5.2.2  rmind 		vwait(vp, VI_XLOCK);
    561  1.5.2.2  rmind 		vrelel(vp, 0);
    562  1.5.2.2  rmind 		return ENOENT;
    563  1.5.2.2  rmind 	}
    564  1.5.2.2  rmind 
    565  1.5.2.2  rmind 	/*
    566  1.5.2.2  rmind 	 * Ok, we got it in good shape.  Just locking left.
    567  1.5.2.2  rmind 	 */
    568  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_CLEAN) == 0);
    569  1.5.2.2  rmind 	mutex_exit(vp->v_interlock);
    570  1.5.2.2  rmind 	if (flags & (LK_EXCLUSIVE | LK_SHARED)) {
    571  1.5.2.2  rmind 		error = vn_lock(vp, flags);
    572  1.5.2.2  rmind 		if (error != 0) {
    573  1.5.2.2  rmind 			vrele(vp);
    574  1.5.2.2  rmind 		}
    575  1.5.2.2  rmind 	}
    576  1.5.2.2  rmind 	return error;
    577  1.5.2.2  rmind }
    578  1.5.2.2  rmind 
    579  1.5.2.2  rmind /*
    580  1.5.2.2  rmind  * vput: unlock and release the reference.
    581  1.5.2.2  rmind  */
    582  1.5.2.2  rmind void
    583  1.5.2.2  rmind vput(vnode_t *vp)
    584  1.5.2.2  rmind {
    585  1.5.2.2  rmind 
    586  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    587  1.5.2.2  rmind 
    588  1.5.2.2  rmind 	VOP_UNLOCK(vp);
    589  1.5.2.2  rmind 	vrele(vp);
    590  1.5.2.2  rmind }
    591  1.5.2.2  rmind 
    592  1.5.2.2  rmind /*
    593  1.5.2.2  rmind  * Try to drop reference on a vnode.  Abort if we are releasing the
    594  1.5.2.2  rmind  * last reference.  Note: this _must_ succeed if not the last reference.
    595  1.5.2.2  rmind  */
    596  1.5.2.2  rmind static inline bool
    597  1.5.2.2  rmind vtryrele(vnode_t *vp)
    598  1.5.2.2  rmind {
    599  1.5.2.2  rmind 	u_int use, next;
    600  1.5.2.2  rmind 
    601  1.5.2.2  rmind 	for (use = vp->v_usecount;; use = next) {
    602  1.5.2.2  rmind 		if (use == 1) {
    603  1.5.2.2  rmind 			return false;
    604  1.5.2.2  rmind 		}
    605  1.5.2.2  rmind 		KASSERT((use & VC_MASK) > 1);
    606  1.5.2.2  rmind 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    607  1.5.2.2  rmind 		if (__predict_true(next == use)) {
    608  1.5.2.2  rmind 			return true;
    609  1.5.2.2  rmind 		}
    610  1.5.2.2  rmind 	}
    611  1.5.2.2  rmind }
    612  1.5.2.2  rmind 
    613  1.5.2.2  rmind /*
    614  1.5.2.2  rmind  * Vnode release.  If reference count drops to zero, call inactive
    615  1.5.2.2  rmind  * routine and either return to freelist or free to the pool.
    616  1.5.2.2  rmind  */
    617  1.5.2.2  rmind void
    618  1.5.2.2  rmind vrelel(vnode_t *vp, int flags)
    619  1.5.2.2  rmind {
    620  1.5.2.2  rmind 	bool recycle, defer;
    621  1.5.2.2  rmind 	int error;
    622  1.5.2.2  rmind 
    623  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
    624  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    625  1.5.2.2  rmind 	KASSERT(vp->v_freelisthd == NULL);
    626  1.5.2.2  rmind 
    627  1.5.2.2  rmind 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    628  1.5.2.2  rmind 	    (vp->v_iflag & (VI_CLEAN|VI_XLOCK)) == 0)) {
    629  1.5.2.2  rmind 		vpanic(vp, "dead but not clean");
    630  1.5.2.2  rmind 	}
    631  1.5.2.2  rmind 
    632  1.5.2.2  rmind 	/*
    633  1.5.2.2  rmind 	 * If not the last reference, just drop the reference count
    634  1.5.2.2  rmind 	 * and unlock.
    635  1.5.2.2  rmind 	 */
    636  1.5.2.2  rmind 	if (vtryrele(vp)) {
    637  1.5.2.2  rmind 		vp->v_iflag |= VI_INACTREDO;
    638  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
    639  1.5.2.2  rmind 		return;
    640  1.5.2.2  rmind 	}
    641  1.5.2.2  rmind 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    642  1.5.2.2  rmind 		vpanic(vp, "vrelel: bad ref count");
    643  1.5.2.2  rmind 	}
    644  1.5.2.2  rmind 
    645  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_XLOCK) == 0);
    646  1.5.2.2  rmind 
    647  1.5.2.2  rmind 	/*
    648  1.5.2.2  rmind 	 * If not clean, deactivate the vnode, but preserve
    649  1.5.2.2  rmind 	 * our reference across the call to VOP_INACTIVE().
    650  1.5.2.2  rmind 	 */
    651  1.5.2.2  rmind retry:
    652  1.5.2.2  rmind 	if ((vp->v_iflag & VI_CLEAN) == 0) {
    653  1.5.2.2  rmind 		recycle = false;
    654  1.5.2.2  rmind 		vp->v_iflag |= VI_INACTNOW;
    655  1.5.2.2  rmind 
    656  1.5.2.2  rmind 		/*
    657  1.5.2.2  rmind 		 * XXX This ugly block can be largely eliminated if
    658  1.5.2.2  rmind 		 * locking is pushed down into the file systems.
    659  1.5.2.2  rmind 		 *
    660  1.5.2.2  rmind 		 * Defer vnode release to vrele_thread if caller
    661  1.5.2.2  rmind 		 * requests it explicitly.
    662  1.5.2.2  rmind 		 */
    663  1.5.2.2  rmind 		if ((curlwp == uvm.pagedaemon_lwp) ||
    664  1.5.2.2  rmind 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    665  1.5.2.2  rmind 			/* The pagedaemon can't wait around; defer. */
    666  1.5.2.2  rmind 			defer = true;
    667  1.5.2.2  rmind 		} else if (curlwp == vrele_lwp) {
    668  1.5.2.2  rmind 			/* We have to try harder. */
    669  1.5.2.2  rmind 			vp->v_iflag &= ~VI_INACTREDO;
    670  1.5.2.2  rmind 			mutex_exit(vp->v_interlock);
    671  1.5.2.2  rmind 			error = vn_lock(vp, LK_EXCLUSIVE);
    672  1.5.2.2  rmind 			if (error != 0) {
    673  1.5.2.2  rmind 				/* XXX */
    674  1.5.2.2  rmind 				vpanic(vp, "vrele: unable to lock %p");
    675  1.5.2.2  rmind 			}
    676  1.5.2.2  rmind 			defer = false;
    677  1.5.2.2  rmind 		} else if ((vp->v_iflag & VI_LAYER) != 0) {
    678  1.5.2.2  rmind 			/*
    679  1.5.2.2  rmind 			 * Acquiring the stack's lock in vclean() even
    680  1.5.2.2  rmind 			 * for an honest vput/vrele is dangerous because
    681  1.5.2.2  rmind 			 * our caller may hold other vnode locks; defer.
    682  1.5.2.2  rmind 			 */
    683  1.5.2.2  rmind 			defer = true;
    684  1.5.2.2  rmind 		} else {
    685  1.5.2.2  rmind 			/* If we can't acquire the lock, then defer. */
    686  1.5.2.2  rmind 			vp->v_iflag &= ~VI_INACTREDO;
    687  1.5.2.2  rmind 			mutex_exit(vp->v_interlock);
    688  1.5.2.2  rmind 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
    689  1.5.2.2  rmind 			if (error != 0) {
    690  1.5.2.2  rmind 				defer = true;
    691  1.5.2.2  rmind 				mutex_enter(vp->v_interlock);
    692  1.5.2.2  rmind 			} else {
    693  1.5.2.2  rmind 				defer = false;
    694  1.5.2.2  rmind 			}
    695  1.5.2.2  rmind 		}
    696  1.5.2.2  rmind 
    697  1.5.2.2  rmind 		if (defer) {
    698  1.5.2.2  rmind 			/*
    699  1.5.2.2  rmind 			 * Defer reclaim to the kthread; it's not safe to
    700  1.5.2.2  rmind 			 * clean it here.  We donate it our last reference.
    701  1.5.2.2  rmind 			 */
    702  1.5.2.2  rmind 			KASSERT(mutex_owned(vp->v_interlock));
    703  1.5.2.2  rmind 			KASSERT((vp->v_iflag & VI_INACTPEND) == 0);
    704  1.5.2.2  rmind 			vp->v_iflag &= ~VI_INACTNOW;
    705  1.5.2.2  rmind 			vp->v_iflag |= VI_INACTPEND;
    706  1.5.2.2  rmind 			mutex_enter(&vrele_lock);
    707  1.5.2.2  rmind 			TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
    708  1.5.2.2  rmind 			if (++vrele_pending > (desiredvnodes >> 8))
    709  1.5.2.2  rmind 				cv_signal(&vrele_cv);
    710  1.5.2.2  rmind 			mutex_exit(&vrele_lock);
    711  1.5.2.2  rmind 			mutex_exit(vp->v_interlock);
    712  1.5.2.2  rmind 			return;
    713  1.5.2.2  rmind 		}
    714  1.5.2.2  rmind 
    715  1.5.2.2  rmind #ifdef DIAGNOSTIC
    716  1.5.2.2  rmind 		if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    717  1.5.2.2  rmind 		    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    718  1.5.2.2  rmind 			vprint("vrelel: missing VOP_CLOSE()", vp);
    719  1.5.2.2  rmind 		}
    720  1.5.2.2  rmind #endif
    721  1.5.2.2  rmind 
    722  1.5.2.2  rmind 		/*
    723  1.5.2.2  rmind 		 * The vnode can gain another reference while being
    724  1.5.2.2  rmind 		 * deactivated.  If VOP_INACTIVE() indicates that
    725  1.5.2.2  rmind 		 * the described file has been deleted, then recycle
    726  1.5.2.2  rmind 		 * the vnode irrespective of additional references.
    727  1.5.2.2  rmind 		 * Another thread may be waiting to re-use the on-disk
    728  1.5.2.2  rmind 		 * inode.
    729  1.5.2.2  rmind 		 *
    730  1.5.2.2  rmind 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    731  1.5.2.2  rmind 		 */
    732  1.5.2.2  rmind 		VOP_INACTIVE(vp, &recycle);
    733  1.5.2.2  rmind 		mutex_enter(vp->v_interlock);
    734  1.5.2.2  rmind 		vp->v_iflag &= ~VI_INACTNOW;
    735  1.5.2.2  rmind 		if (!recycle) {
    736  1.5.2.2  rmind 			if (vtryrele(vp)) {
    737  1.5.2.2  rmind 				mutex_exit(vp->v_interlock);
    738  1.5.2.2  rmind 				return;
    739  1.5.2.2  rmind 			}
    740  1.5.2.2  rmind 
    741  1.5.2.2  rmind 			/*
    742  1.5.2.2  rmind 			 * If we grew another reference while
    743  1.5.2.2  rmind 			 * VOP_INACTIVE() was underway, retry.
    744  1.5.2.2  rmind 			 */
    745  1.5.2.2  rmind 			if ((vp->v_iflag & VI_INACTREDO) != 0) {
    746  1.5.2.2  rmind 				goto retry;
    747  1.5.2.2  rmind 			}
    748  1.5.2.2  rmind 		}
    749  1.5.2.2  rmind 
    750  1.5.2.2  rmind 		/* Take care of space accounting. */
    751  1.5.2.2  rmind 		if (vp->v_iflag & VI_EXECMAP) {
    752  1.5.2.2  rmind 			atomic_add_int(&uvmexp.execpages,
    753  1.5.2.2  rmind 			    -vp->v_uobj.uo_npages);
    754  1.5.2.2  rmind 			atomic_add_int(&uvmexp.filepages,
    755  1.5.2.2  rmind 			    vp->v_uobj.uo_npages);
    756  1.5.2.2  rmind 		}
    757  1.5.2.2  rmind 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    758  1.5.2.2  rmind 		vp->v_vflag &= ~VV_MAPPED;
    759  1.5.2.2  rmind 
    760  1.5.2.2  rmind 		/*
    761  1.5.2.2  rmind 		 * Recycle the vnode if the file is now unused (unlinked),
    762  1.5.2.2  rmind 		 * otherwise just free it.
    763  1.5.2.2  rmind 		 */
    764  1.5.2.2  rmind 		if (recycle) {
    765  1.5.2.2  rmind 			vclean(vp, DOCLOSE);
    766  1.5.2.2  rmind 		}
    767  1.5.2.2  rmind 		KASSERT(vp->v_usecount > 0);
    768  1.5.2.2  rmind 	}
    769  1.5.2.2  rmind 
    770  1.5.2.2  rmind 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    771  1.5.2.2  rmind 		/* Gained another reference while being reclaimed. */
    772  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
    773  1.5.2.2  rmind 		return;
    774  1.5.2.2  rmind 	}
    775  1.5.2.2  rmind 
    776  1.5.2.2  rmind 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    777  1.5.2.2  rmind 		/*
    778  1.5.2.2  rmind 		 * It's clean so destroy it.  It isn't referenced
    779  1.5.2.2  rmind 		 * anywhere since it has been reclaimed.
    780  1.5.2.2  rmind 		 */
    781  1.5.2.2  rmind 		KASSERT(vp->v_holdcnt == 0);
    782  1.5.2.2  rmind 		KASSERT(vp->v_writecount == 0);
    783  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
    784  1.5.2.2  rmind 		vfs_insmntque(vp, NULL);
    785  1.5.2.2  rmind 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    786  1.5.2.2  rmind 			spec_node_destroy(vp);
    787  1.5.2.2  rmind 		}
    788  1.5.2.2  rmind 		vnfree(vp);
    789  1.5.2.2  rmind 	} else {
    790  1.5.2.2  rmind 		/*
    791  1.5.2.2  rmind 		 * Otherwise, put it back onto the freelist.  It
    792  1.5.2.2  rmind 		 * can't be destroyed while still associated with
    793  1.5.2.2  rmind 		 * a file system.
    794  1.5.2.2  rmind 		 */
    795  1.5.2.2  rmind 		mutex_enter(&vnode_free_list_lock);
    796  1.5.2.2  rmind 		if (vp->v_holdcnt > 0) {
    797  1.5.2.2  rmind 			vp->v_freelisthd = &vnode_hold_list;
    798  1.5.2.2  rmind 		} else {
    799  1.5.2.2  rmind 			vp->v_freelisthd = &vnode_free_list;
    800  1.5.2.2  rmind 		}
    801  1.5.2.2  rmind 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    802  1.5.2.2  rmind 		mutex_exit(&vnode_free_list_lock);
    803  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
    804  1.5.2.2  rmind 	}
    805  1.5.2.2  rmind }
    806  1.5.2.2  rmind 
    807  1.5.2.2  rmind void
    808  1.5.2.2  rmind vrele(vnode_t *vp)
    809  1.5.2.2  rmind {
    810  1.5.2.2  rmind 
    811  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    812  1.5.2.2  rmind 
    813  1.5.2.2  rmind 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
    814  1.5.2.2  rmind 		return;
    815  1.5.2.2  rmind 	}
    816  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
    817  1.5.2.2  rmind 	vrelel(vp, 0);
    818  1.5.2.2  rmind }
    819  1.5.2.2  rmind 
    820  1.5.2.2  rmind /*
    821  1.5.2.2  rmind  * Asynchronous vnode release, vnode is released in different context.
    822  1.5.2.2  rmind  */
    823  1.5.2.2  rmind void
    824  1.5.2.2  rmind vrele_async(vnode_t *vp)
    825  1.5.2.2  rmind {
    826  1.5.2.2  rmind 
    827  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    828  1.5.2.2  rmind 
    829  1.5.2.2  rmind 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
    830  1.5.2.2  rmind 		return;
    831  1.5.2.2  rmind 	}
    832  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
    833  1.5.2.2  rmind 	vrelel(vp, VRELEL_ASYNC_RELE);
    834  1.5.2.2  rmind }
    835  1.5.2.2  rmind 
    836  1.5.2.2  rmind static void
    837  1.5.2.2  rmind vrele_thread(void *cookie)
    838  1.5.2.2  rmind {
    839  1.5.2.2  rmind 	vnode_t *vp;
    840  1.5.2.2  rmind 
    841  1.5.2.2  rmind 	for (;;) {
    842  1.5.2.2  rmind 		mutex_enter(&vrele_lock);
    843  1.5.2.2  rmind 		while (TAILQ_EMPTY(&vrele_list)) {
    844  1.5.2.2  rmind 			vrele_gen++;
    845  1.5.2.2  rmind 			cv_broadcast(&vrele_cv);
    846  1.5.2.2  rmind 			cv_timedwait(&vrele_cv, &vrele_lock, hz);
    847  1.5.2.2  rmind 		}
    848  1.5.2.2  rmind 		vp = TAILQ_FIRST(&vrele_list);
    849  1.5.2.2  rmind 		TAILQ_REMOVE(&vrele_list, vp, v_freelist);
    850  1.5.2.2  rmind 		vrele_pending--;
    851  1.5.2.2  rmind 		mutex_exit(&vrele_lock);
    852  1.5.2.2  rmind 
    853  1.5.2.2  rmind 		/*
    854  1.5.2.2  rmind 		 * If not the last reference, then ignore the vnode
    855  1.5.2.2  rmind 		 * and look for more work.
    856  1.5.2.2  rmind 		 */
    857  1.5.2.2  rmind 		mutex_enter(vp->v_interlock);
    858  1.5.2.2  rmind 		KASSERT((vp->v_iflag & VI_INACTPEND) != 0);
    859  1.5.2.2  rmind 		vp->v_iflag &= ~VI_INACTPEND;
    860  1.5.2.2  rmind 		vrelel(vp, 0);
    861  1.5.2.2  rmind 	}
    862  1.5.2.2  rmind }
    863  1.5.2.2  rmind 
    864  1.5.2.2  rmind void
    865  1.5.2.2  rmind vrele_flush(void)
    866  1.5.2.2  rmind {
    867  1.5.2.2  rmind 	int gen;
    868  1.5.2.2  rmind 
    869  1.5.2.2  rmind 	mutex_enter(&vrele_lock);
    870  1.5.2.2  rmind 	gen = vrele_gen;
    871  1.5.2.2  rmind 	while (vrele_pending && gen == vrele_gen) {
    872  1.5.2.2  rmind 		cv_broadcast(&vrele_cv);
    873  1.5.2.2  rmind 		cv_wait(&vrele_cv, &vrele_lock);
    874  1.5.2.2  rmind 	}
    875  1.5.2.2  rmind 	mutex_exit(&vrele_lock);
    876  1.5.2.2  rmind }
    877  1.5.2.2  rmind 
    878  1.5.2.2  rmind /*
    879  1.5.2.2  rmind  * Vnode reference, where a reference is already held by some other
    880  1.5.2.2  rmind  * object (for example, a file structure).
    881  1.5.2.2  rmind  */
    882  1.5.2.2  rmind void
    883  1.5.2.2  rmind vref(vnode_t *vp)
    884  1.5.2.2  rmind {
    885  1.5.2.2  rmind 
    886  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    887  1.5.2.2  rmind 	KASSERT(vp->v_usecount != 0);
    888  1.5.2.2  rmind 
    889  1.5.2.2  rmind 	atomic_inc_uint(&vp->v_usecount);
    890  1.5.2.2  rmind }
    891  1.5.2.2  rmind 
    892  1.5.2.2  rmind /*
    893  1.5.2.2  rmind  * Page or buffer structure gets a reference.
    894  1.5.2.2  rmind  * Called with v_interlock held.
    895  1.5.2.2  rmind  */
    896  1.5.2.2  rmind void
    897  1.5.2.2  rmind vholdl(vnode_t *vp)
    898  1.5.2.2  rmind {
    899  1.5.2.2  rmind 
    900  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
    901  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    902  1.5.2.2  rmind 
    903  1.5.2.2  rmind 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
    904  1.5.2.2  rmind 		mutex_enter(&vnode_free_list_lock);
    905  1.5.2.2  rmind 		KASSERT(vp->v_freelisthd == &vnode_free_list);
    906  1.5.2.2  rmind 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    907  1.5.2.2  rmind 		vp->v_freelisthd = &vnode_hold_list;
    908  1.5.2.2  rmind 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    909  1.5.2.2  rmind 		mutex_exit(&vnode_free_list_lock);
    910  1.5.2.2  rmind 	}
    911  1.5.2.2  rmind }
    912  1.5.2.2  rmind 
    913  1.5.2.2  rmind /*
    914  1.5.2.2  rmind  * Page or buffer structure frees a reference.
    915  1.5.2.2  rmind  * Called with v_interlock held.
    916  1.5.2.2  rmind  */
    917  1.5.2.2  rmind void
    918  1.5.2.2  rmind holdrelel(vnode_t *vp)
    919  1.5.2.2  rmind {
    920  1.5.2.2  rmind 
    921  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
    922  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    923  1.5.2.2  rmind 
    924  1.5.2.2  rmind 	if (vp->v_holdcnt <= 0) {
    925  1.5.2.2  rmind 		vpanic(vp, "holdrelel: holdcnt vp %p");
    926  1.5.2.2  rmind 	}
    927  1.5.2.2  rmind 
    928  1.5.2.2  rmind 	vp->v_holdcnt--;
    929  1.5.2.2  rmind 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
    930  1.5.2.2  rmind 		mutex_enter(&vnode_free_list_lock);
    931  1.5.2.2  rmind 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
    932  1.5.2.2  rmind 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    933  1.5.2.2  rmind 		vp->v_freelisthd = &vnode_free_list;
    934  1.5.2.2  rmind 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    935  1.5.2.2  rmind 		mutex_exit(&vnode_free_list_lock);
    936  1.5.2.2  rmind 	}
    937  1.5.2.2  rmind }
    938  1.5.2.2  rmind 
    939  1.5.2.2  rmind /*
    940  1.5.2.2  rmind  * Disassociate the underlying file system from a vnode.
    941  1.5.2.2  rmind  *
    942  1.5.2.2  rmind  * Must be called with the interlock held, and will return with it held.
    943  1.5.2.2  rmind  */
    944  1.5.2.2  rmind void
    945  1.5.2.2  rmind vclean(vnode_t *vp, int flags)
    946  1.5.2.2  rmind {
    947  1.5.2.2  rmind 	lwp_t *l = curlwp;
    948  1.5.2.2  rmind 	bool recycle, active;
    949  1.5.2.2  rmind 	int error;
    950  1.5.2.2  rmind 
    951  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
    952  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    953  1.5.2.2  rmind 	KASSERT(vp->v_usecount != 0);
    954  1.5.2.2  rmind 
    955  1.5.2.2  rmind 	/* If cleaning is already in progress wait until done and return. */
    956  1.5.2.2  rmind 	if (vp->v_iflag & VI_XLOCK) {
    957  1.5.2.2  rmind 		vwait(vp, VI_XLOCK);
    958  1.5.2.2  rmind 		return;
    959  1.5.2.2  rmind 	}
    960  1.5.2.2  rmind 
    961  1.5.2.2  rmind 	/* If already clean, nothing to do. */
    962  1.5.2.2  rmind 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    963  1.5.2.2  rmind 		return;
    964  1.5.2.2  rmind 	}
    965  1.5.2.2  rmind 
    966  1.5.2.2  rmind 	/*
    967  1.5.2.2  rmind 	 * Prevent the vnode from being recycled or brought into use
    968  1.5.2.2  rmind 	 * while we clean it out.
    969  1.5.2.2  rmind 	 */
    970  1.5.2.2  rmind 	vp->v_iflag |= VI_XLOCK;
    971  1.5.2.2  rmind 	if (vp->v_iflag & VI_EXECMAP) {
    972  1.5.2.2  rmind 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
    973  1.5.2.2  rmind 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
    974  1.5.2.2  rmind 	}
    975  1.5.2.2  rmind 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
    976  1.5.2.2  rmind 	active = (vp->v_usecount & VC_MASK) > 1;
    977  1.5.2.2  rmind 
    978  1.5.2.2  rmind 	/* XXXAD should not lock vnode under layer */
    979  1.5.2.2  rmind 	mutex_exit(vp->v_interlock);
    980  1.5.2.2  rmind 	VOP_LOCK(vp, LK_EXCLUSIVE);
    981  1.5.2.2  rmind 
    982  1.5.2.2  rmind 	/*
    983  1.5.2.2  rmind 	 * Clean out any cached data associated with the vnode.
    984  1.5.2.2  rmind 	 * If purging an active vnode, it must be closed and
    985  1.5.2.2  rmind 	 * deactivated before being reclaimed. Note that the
    986  1.5.2.2  rmind 	 * VOP_INACTIVE will unlock the vnode.
    987  1.5.2.2  rmind 	 */
    988  1.5.2.2  rmind 	if (flags & DOCLOSE) {
    989  1.5.2.2  rmind 		error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
    990  1.5.2.2  rmind 		if (error != 0) {
    991  1.5.2.2  rmind 			/* XXX, fix vn_start_write's grab of mp and use that. */
    992  1.5.2.2  rmind 
    993  1.5.2.2  rmind 			if (wapbl_vphaswapbl(vp))
    994  1.5.2.2  rmind 				WAPBL_DISCARD(wapbl_vptomp(vp));
    995  1.5.2.2  rmind 			error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
    996  1.5.2.2  rmind 		}
    997  1.5.2.2  rmind 		KASSERT(error == 0);
    998  1.5.2.2  rmind 		KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
    999  1.5.2.2  rmind 		if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1000  1.5.2.2  rmind 			 spec_node_revoke(vp);
   1001  1.5.2.2  rmind 		}
   1002  1.5.2.2  rmind 	}
   1003  1.5.2.2  rmind 	if (active) {
   1004  1.5.2.2  rmind 		VOP_INACTIVE(vp, &recycle);
   1005  1.5.2.2  rmind 	} else {
   1006  1.5.2.2  rmind 		/*
   1007  1.5.2.2  rmind 		 * Any other processes trying to obtain this lock must first
   1008  1.5.2.2  rmind 		 * wait for VI_XLOCK to clear, then call the new lock operation.
   1009  1.5.2.2  rmind 		 */
   1010  1.5.2.2  rmind 		VOP_UNLOCK(vp);
   1011  1.5.2.2  rmind 	}
   1012  1.5.2.2  rmind 
   1013  1.5.2.2  rmind 	/* Disassociate the underlying file system from the vnode. */
   1014  1.5.2.2  rmind 	if (VOP_RECLAIM(vp)) {
   1015  1.5.2.2  rmind 		vpanic(vp, "vclean: cannot reclaim");
   1016  1.5.2.2  rmind 	}
   1017  1.5.2.2  rmind 
   1018  1.5.2.2  rmind 	KASSERT(vp->v_uobj.uo_npages == 0);
   1019  1.5.2.2  rmind 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1020  1.5.2.2  rmind 		uvm_ra_freectx(vp->v_ractx);
   1021  1.5.2.2  rmind 		vp->v_ractx = NULL;
   1022  1.5.2.2  rmind 	}
   1023  1.5.2.2  rmind 	cache_purge(vp);
   1024  1.5.2.2  rmind 
   1025  1.5.2.2  rmind 	/* Done with purge, notify sleepers of the grim news. */
   1026  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
   1027  1.5.2.2  rmind 	vp->v_op = dead_vnodeop_p;
   1028  1.5.2.2  rmind 	vp->v_tag = VT_NON;
   1029  1.5.2.2  rmind 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1030  1.5.2.2  rmind 	vp->v_iflag &= ~VI_XLOCK;
   1031  1.5.2.2  rmind 	vp->v_vflag &= ~VV_LOCKSWORK;
   1032  1.5.2.2  rmind 	if ((flags & DOCLOSE) != 0) {
   1033  1.5.2.2  rmind 		vp->v_iflag |= VI_CLEAN;
   1034  1.5.2.2  rmind 	}
   1035  1.5.2.2  rmind 	cv_broadcast(&vp->v_cv);
   1036  1.5.2.2  rmind 
   1037  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1038  1.5.2.2  rmind }
   1039  1.5.2.2  rmind 
   1040  1.5.2.2  rmind /*
   1041  1.5.2.2  rmind  * Recycle an unused vnode to the front of the free list.
   1042  1.5.2.2  rmind  * Release the passed interlock if the vnode will be recycled.
   1043  1.5.2.2  rmind  */
   1044  1.5.2.2  rmind int
   1045  1.5.2.2  rmind vrecycle(vnode_t *vp, kmutex_t *inter_lkp, struct lwp *l)
   1046  1.5.2.2  rmind {
   1047  1.5.2.2  rmind 
   1048  1.5.2.2  rmind 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
   1049  1.5.2.2  rmind 
   1050  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
   1051  1.5.2.2  rmind 	if (vp->v_usecount != 0) {
   1052  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
   1053  1.5.2.2  rmind 		return 0;
   1054  1.5.2.2  rmind 	}
   1055  1.5.2.2  rmind 	if (inter_lkp) {
   1056  1.5.2.2  rmind 		mutex_exit(inter_lkp);
   1057  1.5.2.2  rmind 	}
   1058  1.5.2.2  rmind 	vremfree(vp);
   1059  1.5.2.2  rmind 	vp->v_usecount = 1;
   1060  1.5.2.2  rmind 	vclean(vp, DOCLOSE);
   1061  1.5.2.2  rmind 	vrelel(vp, 0);
   1062  1.5.2.2  rmind 	return 1;
   1063  1.5.2.2  rmind }
   1064  1.5.2.2  rmind 
   1065  1.5.2.2  rmind /*
   1066  1.5.2.2  rmind  * Eliminate all activity associated with the requested vnode
   1067  1.5.2.2  rmind  * and with all vnodes aliased to the requested vnode.
   1068  1.5.2.2  rmind  */
   1069  1.5.2.2  rmind void
   1070  1.5.2.2  rmind vrevoke(vnode_t *vp)
   1071  1.5.2.2  rmind {
   1072  1.5.2.2  rmind 	vnode_t *vq, **vpp;
   1073  1.5.2.2  rmind 	enum vtype type;
   1074  1.5.2.2  rmind 	dev_t dev;
   1075  1.5.2.2  rmind 
   1076  1.5.2.2  rmind 	KASSERT(vp->v_usecount > 0);
   1077  1.5.2.2  rmind 
   1078  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
   1079  1.5.2.2  rmind 	if ((vp->v_iflag & VI_CLEAN) != 0) {
   1080  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
   1081  1.5.2.2  rmind 		return;
   1082  1.5.2.2  rmind 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1083  1.5.2.2  rmind 		atomic_inc_uint(&vp->v_usecount);
   1084  1.5.2.2  rmind 		vclean(vp, DOCLOSE);
   1085  1.5.2.2  rmind 		vrelel(vp, 0);
   1086  1.5.2.2  rmind 		return;
   1087  1.5.2.2  rmind 	} else {
   1088  1.5.2.2  rmind 		dev = vp->v_rdev;
   1089  1.5.2.2  rmind 		type = vp->v_type;
   1090  1.5.2.2  rmind 		mutex_exit(vp->v_interlock);
   1091  1.5.2.2  rmind 	}
   1092  1.5.2.2  rmind 
   1093  1.5.2.2  rmind 	vpp = &specfs_hash[SPECHASH(dev)];
   1094  1.5.2.2  rmind 	mutex_enter(&device_lock);
   1095  1.5.2.2  rmind 	for (vq = *vpp; vq != NULL;) {
   1096  1.5.2.2  rmind 		/* If clean or being cleaned, then ignore it. */
   1097  1.5.2.2  rmind 		mutex_enter(vq->v_interlock);
   1098  1.5.2.2  rmind 		if ((vq->v_iflag & (VI_CLEAN | VI_XLOCK)) != 0 ||
   1099  1.5.2.2  rmind 		    vq->v_rdev != dev || vq->v_type != type) {
   1100  1.5.2.2  rmind 			mutex_exit(vq->v_interlock);
   1101  1.5.2.2  rmind 			vq = vq->v_specnext;
   1102  1.5.2.2  rmind 			continue;
   1103  1.5.2.2  rmind 		}
   1104  1.5.2.2  rmind 		mutex_exit(&device_lock);
   1105  1.5.2.2  rmind 		if (vq->v_usecount == 0) {
   1106  1.5.2.2  rmind 			vremfree(vq);
   1107  1.5.2.2  rmind 			vq->v_usecount = 1;
   1108  1.5.2.2  rmind 		} else {
   1109  1.5.2.2  rmind 			atomic_inc_uint(&vq->v_usecount);
   1110  1.5.2.2  rmind 		}
   1111  1.5.2.2  rmind 		vclean(vq, DOCLOSE);
   1112  1.5.2.2  rmind 		vrelel(vq, 0);
   1113  1.5.2.2  rmind 		mutex_enter(&device_lock);
   1114  1.5.2.2  rmind 		vq = *vpp;
   1115  1.5.2.2  rmind 	}
   1116  1.5.2.2  rmind 	mutex_exit(&device_lock);
   1117  1.5.2.2  rmind }
   1118  1.5.2.2  rmind 
   1119  1.5.2.2  rmind /*
   1120  1.5.2.2  rmind  * Eliminate all activity associated with a vnode in preparation for
   1121  1.5.2.2  rmind  * reuse.  Drops a reference from the vnode.
   1122  1.5.2.2  rmind  */
   1123  1.5.2.2  rmind void
   1124  1.5.2.2  rmind vgone(vnode_t *vp)
   1125  1.5.2.2  rmind {
   1126  1.5.2.2  rmind 
   1127  1.5.2.2  rmind 	mutex_enter(vp->v_interlock);
   1128  1.5.2.2  rmind 	vclean(vp, DOCLOSE);
   1129  1.5.2.2  rmind 	vrelel(vp, 0);
   1130  1.5.2.2  rmind }
   1131  1.5.2.2  rmind 
   1132  1.5.2.2  rmind /*
   1133  1.5.2.2  rmind  * Update outstanding I/O count and do wakeup if requested.
   1134  1.5.2.2  rmind  */
   1135  1.5.2.2  rmind void
   1136  1.5.2.2  rmind vwakeup(struct buf *bp)
   1137  1.5.2.2  rmind {
   1138  1.5.2.2  rmind 	vnode_t *vp;
   1139  1.5.2.2  rmind 
   1140  1.5.2.2  rmind 	if ((vp = bp->b_vp) == NULL)
   1141  1.5.2.2  rmind 		return;
   1142  1.5.2.2  rmind 
   1143  1.5.2.2  rmind 	KASSERT(bp->b_objlock == vp->v_interlock);
   1144  1.5.2.2  rmind 	KASSERT(mutex_owned(bp->b_objlock));
   1145  1.5.2.2  rmind 
   1146  1.5.2.2  rmind 	if (--vp->v_numoutput < 0)
   1147  1.5.2.2  rmind 		panic("vwakeup: neg numoutput, vp %p", vp);
   1148  1.5.2.2  rmind 	if (vp->v_numoutput == 0)
   1149  1.5.2.2  rmind 		cv_broadcast(&vp->v_cv);
   1150  1.5.2.2  rmind }
   1151  1.5.2.2  rmind 
   1152  1.5.2.2  rmind /*
   1153  1.5.2.2  rmind  * Wait for a vnode (typically with VI_XLOCK set) to be cleaned or
   1154  1.5.2.2  rmind  * recycled.
   1155  1.5.2.2  rmind  */
   1156  1.5.2.2  rmind void
   1157  1.5.2.2  rmind vwait(vnode_t *vp, int flags)
   1158  1.5.2.2  rmind {
   1159  1.5.2.2  rmind 
   1160  1.5.2.2  rmind 	KASSERT(mutex_owned(vp->v_interlock));
   1161  1.5.2.2  rmind 	KASSERT(vp->v_usecount != 0);
   1162  1.5.2.2  rmind 
   1163  1.5.2.2  rmind 	while ((vp->v_iflag & flags) != 0)
   1164  1.5.2.2  rmind 		cv_wait(&vp->v_cv, vp->v_interlock);
   1165  1.5.2.2  rmind }
   1166  1.5.2.2  rmind 
   1167  1.5.2.2  rmind int
   1168  1.5.2.2  rmind vfs_drainvnodes(long target)
   1169  1.5.2.2  rmind {
   1170  1.5.2.2  rmind 
   1171  1.5.2.2  rmind 	while (numvnodes > target) {
   1172  1.5.2.2  rmind 		vnode_t *vp;
   1173  1.5.2.2  rmind 
   1174  1.5.2.2  rmind 		mutex_enter(&vnode_free_list_lock);
   1175  1.5.2.2  rmind 		vp = getcleanvnode();
   1176  1.5.2.2  rmind 		if (vp == NULL) {
   1177  1.5.2.2  rmind 			return EBUSY;
   1178  1.5.2.2  rmind 		}
   1179  1.5.2.2  rmind 		ungetnewvnode(vp);
   1180  1.5.2.2  rmind 	}
   1181  1.5.2.2  rmind 	return 0;
   1182  1.5.2.2  rmind }
   1183  1.5.2.2  rmind 
   1184  1.5.2.2  rmind void
   1185  1.5.2.2  rmind vpanic(vnode_t *vp, const char *msg)
   1186  1.5.2.2  rmind {
   1187  1.5.2.2  rmind #ifdef DIAGNOSTIC
   1188  1.5.2.2  rmind 
   1189  1.5.2.2  rmind 	vprint(NULL, vp);
   1190  1.5.2.2  rmind 	panic("%s\n", msg);
   1191  1.5.2.2  rmind #endif
   1192  1.5.2.2  rmind }
   1193