Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.4
      1 /*	$NetBSD: vfs_vnode.c,v 1.4 2011/04/02 07:33:49 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * Note on v_usecount and locking:
     71  *
     72  * At nearly all points it is known that v_usecount could be zero, the
     73  * vnode interlock will be held.
     74  *
     75  * To change v_usecount away from zero, the interlock must be held.  To
     76  * change from a non-zero value to zero, again the interlock must be
     77  * held.
     78  *
     79  * There's a flag bit, VC_XLOCK, embedded in v_usecount.
     80  * To raise v_usecount, if the VC_XLOCK bit is set in it, the interlock
     81  * must be held.
     82  * To modify the VC_XLOCK bit, the interlock must be held.
     83  * We always keep the usecount (v_usecount & VC_MASK) non-zero while the
     84  * VC_XLOCK bit is set.
     85  *
     86  * Unless the VC_XLOCK bit is set, changing the usecount from a non-zero
     87  * value to a non-zero value can safely be done using atomic operations,
     88  * without the interlock held.
     89  * Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero
     90  * value can be done using atomic operations, without the interlock held.
     91  */
     92 
     93 #include <sys/cdefs.h>
     94 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.4 2011/04/02 07:33:49 rmind Exp $");
     95 
     96 #include <sys/param.h>
     97 #include <sys/kernel.h>
     98 
     99 #include <sys/atomic.h>
    100 #include <sys/buf.h>
    101 #include <sys/conf.h>
    102 #include <sys/device.h>
    103 #include <sys/kauth.h>
    104 #include <sys/kmem.h>
    105 #include <sys/kthread.h>
    106 #include <sys/module.h>
    107 #include <sys/mount.h>
    108 #include <sys/namei.h>
    109 #include <sys/syscallargs.h>
    110 #include <sys/sysctl.h>
    111 #include <sys/systm.h>
    112 #include <sys/vnode.h>
    113 #include <sys/wapbl.h>
    114 
    115 #include <uvm/uvm.h>
    116 #include <uvm/uvm_readahead.h>
    117 
    118 u_int			numvnodes;
    119 
    120 static pool_cache_t	vnode_cache;
    121 static kmutex_t		vnode_free_list_lock;
    122 
    123 static vnodelst_t	vnode_free_list;
    124 static vnodelst_t	vnode_hold_list;
    125 static vnodelst_t	vrele_list;
    126 
    127 static kmutex_t		vrele_lock;
    128 static kcondvar_t	vrele_cv;
    129 static lwp_t *		vrele_lwp;
    130 static int		vrele_pending;
    131 static int		vrele_gen;
    132 
    133 static vnode_t *	getcleanvnode(void);
    134 static void		vrele_thread(void *);
    135 static void		vpanic(vnode_t *, const char *);
    136 
    137 /* Routines having to do with the management of the vnode table. */
    138 extern int		(**dead_vnodeop_p)(void *);
    139 
    140 void
    141 vfs_vnode_sysinit(void)
    142 {
    143 	int error;
    144 
    145 	vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl",
    146 	    NULL, IPL_NONE, NULL, NULL, NULL);
    147 	KASSERT(vnode_cache != NULL);
    148 
    149 	mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
    150 	TAILQ_INIT(&vnode_free_list);
    151 	TAILQ_INIT(&vnode_hold_list);
    152 	TAILQ_INIT(&vrele_list);
    153 
    154 	mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
    155 	cv_init(&vrele_cv, "vrele");
    156 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
    157 	    NULL, &vrele_lwp, "vrele");
    158 	KASSERT(error == 0);
    159 }
    160 
    161 /*
    162  * Allocate a new, uninitialized vnode.  If 'mp' is non-NULL, this is a
    163  * marker vnode and we are prepared to wait for the allocation.
    164  */
    165 vnode_t *
    166 vnalloc(struct mount *mp)
    167 {
    168 	vnode_t *vp;
    169 
    170 	vp = pool_cache_get(vnode_cache, (mp != NULL ? PR_WAITOK : PR_NOWAIT));
    171 	if (vp == NULL) {
    172 		return NULL;
    173 	}
    174 
    175 	memset(vp, 0, sizeof(*vp));
    176 	UVM_OBJ_INIT(&vp->v_uobj, &uvm_vnodeops, 0);
    177 	cv_init(&vp->v_cv, "vnode");
    178 	/*
    179 	 * Done by memset() above.
    180 	 *	LIST_INIT(&vp->v_nclist);
    181 	 *	LIST_INIT(&vp->v_dnclist);
    182 	 */
    183 
    184 	if (mp != NULL) {
    185 		vp->v_mount = mp;
    186 		vp->v_type = VBAD;
    187 		vp->v_iflag = VI_MARKER;
    188 	} else {
    189 		rw_init(&vp->v_lock);
    190 	}
    191 
    192 	return vp;
    193 }
    194 
    195 /*
    196  * Free an unused, unreferenced vnode.
    197  */
    198 void
    199 vnfree(vnode_t *vp)
    200 {
    201 
    202 	KASSERT(vp->v_usecount == 0);
    203 
    204 	if ((vp->v_iflag & VI_MARKER) == 0) {
    205 		rw_destroy(&vp->v_lock);
    206 		mutex_enter(&vnode_free_list_lock);
    207 		numvnodes--;
    208 		mutex_exit(&vnode_free_list_lock);
    209 	}
    210 
    211 	UVM_OBJ_DESTROY(&vp->v_uobj);
    212 	cv_destroy(&vp->v_cv);
    213 	pool_cache_put(vnode_cache, vp);
    214 }
    215 
    216 /*
    217  * getcleanvnode: grab a vnode from freelist and clean it.
    218  */
    219 vnode_t *
    220 getcleanvnode(void)
    221 {
    222 	vnode_t *vp;
    223 	vnodelst_t *listhd;
    224 
    225 	KASSERT(mutex_owned(&vnode_free_list_lock));
    226 retry:
    227 	listhd = &vnode_free_list;
    228 try_nextlist:
    229 	TAILQ_FOREACH(vp, listhd, v_freelist) {
    230 		/*
    231 		 * It's safe to test v_usecount and v_iflag
    232 		 * without holding the interlock here, since
    233 		 * these vnodes should never appear on the
    234 		 * lists.
    235 		 */
    236 		if (vp->v_usecount != 0) {
    237 			vpanic(vp, "free vnode isn't");
    238 		}
    239 		if ((vp->v_iflag & VI_CLEAN) != 0) {
    240 			vpanic(vp, "clean vnode on freelist");
    241 		}
    242 		if (vp->v_freelisthd != listhd) {
    243 			printf("vnode sez %p, listhd %p\n", vp->v_freelisthd, listhd);
    244 			vpanic(vp, "list head mismatch");
    245 		}
    246 		if (!mutex_tryenter(&vp->v_interlock))
    247 			continue;
    248 		if ((vp->v_iflag & VI_XLOCK) == 0)
    249 			break;
    250 		mutex_exit(&vp->v_interlock);
    251 	}
    252 
    253 	if (vp == NULL) {
    254 		if (listhd == &vnode_free_list) {
    255 			listhd = &vnode_hold_list;
    256 			goto try_nextlist;
    257 		}
    258 		mutex_exit(&vnode_free_list_lock);
    259 		return NULL;
    260 	}
    261 
    262 	/* Remove it from the freelist. */
    263 	TAILQ_REMOVE(listhd, vp, v_freelist);
    264 	vp->v_freelisthd = NULL;
    265 	mutex_exit(&vnode_free_list_lock);
    266 
    267 	KASSERT(vp->v_usecount == 0);
    268 
    269 	/*
    270 	 * The vnode is still associated with a file system, so we must
    271 	 * clean it out before reusing it.  We need to add a reference
    272 	 * before doing this.  If the vnode gains another reference while
    273 	 * being cleaned out then we lose - retry.
    274 	 */
    275 	atomic_add_int(&vp->v_usecount, 1 + VC_XLOCK);
    276 	vclean(vp, DOCLOSE);
    277 	KASSERT(vp->v_usecount >= 1 + VC_XLOCK);
    278 	atomic_add_int(&vp->v_usecount, -VC_XLOCK);
    279 	if (vp->v_usecount == 1) {
    280 		/* We're about to dirty it. */
    281 		vp->v_iflag &= ~VI_CLEAN;
    282 		mutex_exit(&vp->v_interlock);
    283 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    284 			spec_node_destroy(vp);
    285 		}
    286 		vp->v_type = VNON;
    287 	} else {
    288 		/*
    289 		 * Don't return to freelist - the holder of the last
    290 		 * reference will destroy it.
    291 		 */
    292 		vrelel(vp, 0); /* releases vp->v_interlock */
    293 		mutex_enter(&vnode_free_list_lock);
    294 		goto retry;
    295 	}
    296 
    297 	if (vp->v_data != NULL || vp->v_uobj.uo_npages != 0 ||
    298 	    !TAILQ_EMPTY(&vp->v_uobj.memq)) {
    299 		vpanic(vp, "cleaned vnode isn't");
    300 	}
    301 	if (vp->v_numoutput != 0) {
    302 		vpanic(vp, "clean vnode has pending I/O's");
    303 	}
    304 	if ((vp->v_iflag & VI_ONWORKLST) != 0) {
    305 		vpanic(vp, "clean vnode on syncer list");
    306 	}
    307 
    308 	return vp;
    309 }
    310 
    311 /*
    312  * getnewvnode: return the next vnode from the free list.
    313  */
    314 int
    315 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
    316 	    vnode_t **vpp)
    317 {
    318 	struct uvm_object *uobj;
    319 	static int toggle;
    320 	vnode_t *vp;
    321 	int error = 0, tryalloc;
    322 
    323 try_again:
    324 	if (mp != NULL) {
    325 		/*
    326 		 * Mark filesystem busy while we are creating a vnode.
    327 		 * If unmount is in progress, this will fail.
    328 		 */
    329 		error = vfs_busy(mp, NULL);
    330 		if (error)
    331 			return error;
    332 	}
    333 
    334 	/*
    335 	 * We must choose whether to allocate a new vnode or recycle an
    336 	 * existing one. The criterion for allocating a new one is that
    337 	 * the total number of vnodes is less than the number desired or
    338 	 * there are no vnodes on either free list. Generally we only
    339 	 * want to recycle vnodes that have no buffers associated with
    340 	 * them, so we look first on the vnode_free_list. If it is empty,
    341 	 * we next consider vnodes with referencing buffers on the
    342 	 * vnode_hold_list. The toggle ensures that half the time we
    343 	 * will use a buffer from the vnode_hold_list, and half the time
    344 	 * we will allocate a new one unless the list has grown to twice
    345 	 * the desired size. We are reticent to recycle vnodes from the
    346 	 * vnode_hold_list because we will lose the identity of all its
    347 	 * referencing buffers.
    348 	 */
    349 
    350 	vp = NULL;
    351 
    352 	mutex_enter(&vnode_free_list_lock);
    353 
    354 	toggle ^= 1;
    355 	if (numvnodes > 2 * desiredvnodes)
    356 		toggle = 0;
    357 
    358 	tryalloc = numvnodes < desiredvnodes ||
    359 	    (TAILQ_FIRST(&vnode_free_list) == NULL &&
    360 	    (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
    361 
    362 	if (tryalloc) {
    363 		numvnodes++;
    364 		mutex_exit(&vnode_free_list_lock);
    365 		if ((vp = vnalloc(NULL)) == NULL) {
    366 			mutex_enter(&vnode_free_list_lock);
    367 			numvnodes--;
    368 		} else
    369 			vp->v_usecount = 1;
    370 	}
    371 
    372 	if (vp == NULL) {
    373 		vp = getcleanvnode();
    374 		if (vp == NULL) {
    375 			if (mp != NULL) {
    376 				vfs_unbusy(mp, false, NULL);
    377 			}
    378 			if (tryalloc) {
    379 				printf("WARNING: unable to allocate new "
    380 				    "vnode, retrying...\n");
    381 				kpause("newvn", false, hz, NULL);
    382 				goto try_again;
    383 			}
    384 			tablefull("vnode", "increase kern.maxvnodes or NVNODE");
    385 			*vpp = 0;
    386 			return ENFILE;
    387 		}
    388 		vp->v_iflag = 0;
    389 		vp->v_vflag = 0;
    390 		vp->v_uflag = 0;
    391 		vp->v_socket = NULL;
    392 	}
    393 
    394 	KASSERT(vp->v_usecount == 1);
    395 	KASSERT(vp->v_freelisthd == NULL);
    396 	KASSERT(LIST_EMPTY(&vp->v_nclist));
    397 	KASSERT(LIST_EMPTY(&vp->v_dnclist));
    398 
    399 	vp->v_type = VNON;
    400 	vp->v_tag = tag;
    401 	vp->v_op = vops;
    402 	vfs_insmntque(vp, mp);
    403 	*vpp = vp;
    404 	vp->v_data = NULL;
    405 
    406 	/*
    407 	 * Initialize uvm_object within vnode.
    408 	 */
    409 
    410 	uobj = &vp->v_uobj;
    411 	KASSERT(uobj->pgops == &uvm_vnodeops);
    412 	KASSERT(uobj->uo_npages == 0);
    413 	KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
    414 	vp->v_size = vp->v_writesize = VSIZENOTSET;
    415 
    416 	if (mp != NULL) {
    417 		if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
    418 			vp->v_vflag |= VV_MPSAFE;
    419 		vfs_unbusy(mp, true, NULL);
    420 	}
    421 
    422 	return 0;
    423 }
    424 
    425 /*
    426  * This is really just the reverse of getnewvnode(). Needed for
    427  * VFS_VGET functions who may need to push back a vnode in case
    428  * of a locking race.
    429  */
    430 void
    431 ungetnewvnode(vnode_t *vp)
    432 {
    433 
    434 	KASSERT(vp->v_usecount == 1);
    435 	KASSERT(vp->v_data == NULL);
    436 	KASSERT(vp->v_freelisthd == NULL);
    437 
    438 	mutex_enter(&vp->v_interlock);
    439 	vp->v_iflag |= VI_CLEAN;
    440 	vrelel(vp, 0);
    441 }
    442 
    443 /*
    444  * Remove a vnode from its freelist.
    445  */
    446 void
    447 vremfree(vnode_t *vp)
    448 {
    449 
    450 	KASSERT(mutex_owned(&vp->v_interlock));
    451 	KASSERT(vp->v_usecount == 0);
    452 
    453 	/*
    454 	 * Note that the reference count must not change until
    455 	 * the vnode is removed.
    456 	 */
    457 	mutex_enter(&vnode_free_list_lock);
    458 	if (vp->v_holdcnt > 0) {
    459 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
    460 	} else {
    461 		KASSERT(vp->v_freelisthd == &vnode_free_list);
    462 	}
    463 	TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    464 	vp->v_freelisthd = NULL;
    465 	mutex_exit(&vnode_free_list_lock);
    466 }
    467 
    468 /*
    469  * Try to gain a reference to a vnode, without acquiring its interlock.
    470  * The caller must hold a lock that will prevent the vnode from being
    471  * recycled or freed.
    472  */
    473 bool
    474 vtryget(vnode_t *vp)
    475 {
    476 	u_int use, next;
    477 
    478 	/*
    479 	 * If the vnode is being freed, don't make life any harder
    480 	 * for vclean() by adding another reference without waiting.
    481 	 * This is not strictly necessary, but we'll do it anyway.
    482 	 */
    483 	if (__predict_false((vp->v_iflag & VI_XLOCK) != 0)) {
    484 		return false;
    485 	}
    486 	for (use = vp->v_usecount;; use = next) {
    487 		if (use == 0 || __predict_false((use & VC_XLOCK) != 0)) {
    488 			/* Need interlock held if first reference. */
    489 			return false;
    490 		}
    491 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
    492 		if (__predict_true(next == use)) {
    493 			return true;
    494 		}
    495 	}
    496 }
    497 
    498 /*
    499  * vget: get a particular vnode from the free list, increment its reference
    500  * count and lock it.
    501  *
    502  * => Should be called with v_interlock held.
    503  *
    504  * If VI_XLOCK is set, the vnode is being eliminated in vgone()/vclean().
    505  * In that case, we cannot grab the vnode, so the process is awakened when
    506  * the transition is completed, and an error returned to indicate that the
    507  * vnode is no longer usable (e.g. changed to a new file system type).
    508  */
    509 int
    510 vget(vnode_t *vp, int flags)
    511 {
    512 	int error = 0;
    513 
    514 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    515 	KASSERT(mutex_owned(&vp->v_interlock));
    516 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT)) == 0);
    517 
    518 	/*
    519 	 * Before adding a reference, we must remove the vnode
    520 	 * from its freelist.
    521 	 */
    522 	if (vp->v_usecount == 0) {
    523 		vremfree(vp);
    524 		vp->v_usecount = 1;
    525 	} else {
    526 		atomic_inc_uint(&vp->v_usecount);
    527 	}
    528 
    529 	/*
    530 	 * If the vnode is in the process of being cleaned out for
    531 	 * another use, we wait for the cleaning to finish and then
    532 	 * return failure.  Cleaning is determined by checking if
    533 	 * the VI_XLOCK flag is set.
    534 	 */
    535 	if ((vp->v_iflag & VI_XLOCK) != 0) {
    536 		if ((flags & LK_NOWAIT) != 0) {
    537 			vrelel(vp, 0);
    538 			return EBUSY;
    539 		}
    540 		vwait(vp, VI_XLOCK);
    541 		vrelel(vp, 0);
    542 		return ENOENT;
    543 	}
    544 
    545 	/*
    546 	 * Ok, we got it in good shape.  Just locking left.
    547 	 */
    548 	KASSERT((vp->v_iflag & VI_CLEAN) == 0);
    549 	mutex_exit(&vp->v_interlock);
    550 	if (flags & (LK_EXCLUSIVE | LK_SHARED)) {
    551 		error = vn_lock(vp, flags);
    552 		if (error != 0) {
    553 			vrele(vp);
    554 		}
    555 	}
    556 	return error;
    557 }
    558 
    559 /*
    560  * vput: unlock and release the reference.
    561  */
    562 void
    563 vput(vnode_t *vp)
    564 {
    565 
    566 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    567 
    568 	VOP_UNLOCK(vp);
    569 	vrele(vp);
    570 }
    571 
    572 /*
    573  * Try to drop reference on a vnode.  Abort if we are releasing the
    574  * last reference.  Note: this _must_ succeed if not the last reference.
    575  */
    576 static inline bool
    577 vtryrele(vnode_t *vp)
    578 {
    579 	u_int use, next;
    580 
    581 	for (use = vp->v_usecount;; use = next) {
    582 		if (use == 1) {
    583 			return false;
    584 		}
    585 		KASSERT((use & VC_MASK) > 1);
    586 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    587 		if (__predict_true(next == use)) {
    588 			return true;
    589 		}
    590 	}
    591 }
    592 
    593 /*
    594  * Vnode release.  If reference count drops to zero, call inactive
    595  * routine and either return to freelist or free to the pool.
    596  */
    597 void
    598 vrelel(vnode_t *vp, int flags)
    599 {
    600 	bool recycle, defer;
    601 	int error;
    602 
    603 	KASSERT(mutex_owned(&vp->v_interlock));
    604 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    605 	KASSERT(vp->v_freelisthd == NULL);
    606 
    607 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    608 	    (vp->v_iflag & (VI_CLEAN|VI_XLOCK)) == 0)) {
    609 		vpanic(vp, "dead but not clean");
    610 	}
    611 
    612 	/*
    613 	 * If not the last reference, just drop the reference count
    614 	 * and unlock.
    615 	 */
    616 	if (vtryrele(vp)) {
    617 		vp->v_iflag |= VI_INACTREDO;
    618 		mutex_exit(&vp->v_interlock);
    619 		return;
    620 	}
    621 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    622 		vpanic(vp, "vrelel: bad ref count");
    623 	}
    624 
    625 	KASSERT((vp->v_iflag & VI_XLOCK) == 0);
    626 
    627 	/*
    628 	 * If not clean, deactivate the vnode, but preserve
    629 	 * our reference across the call to VOP_INACTIVE().
    630 	 */
    631 retry:
    632 	if ((vp->v_iflag & VI_CLEAN) == 0) {
    633 		recycle = false;
    634 		vp->v_iflag |= VI_INACTNOW;
    635 
    636 		/*
    637 		 * XXX This ugly block can be largely eliminated if
    638 		 * locking is pushed down into the file systems.
    639 		 *
    640 		 * Defer vnode release to vrele_thread if caller
    641 		 * requests it explicitly.
    642 		 */
    643 		if ((curlwp == uvm.pagedaemon_lwp) ||
    644 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    645 			/* The pagedaemon can't wait around; defer. */
    646 			defer = true;
    647 		} else if (curlwp == vrele_lwp) {
    648 			/* We have to try harder. */
    649 			vp->v_iflag &= ~VI_INACTREDO;
    650 			mutex_exit(&vp->v_interlock);
    651 			error = vn_lock(vp, LK_EXCLUSIVE);
    652 			if (error != 0) {
    653 				/* XXX */
    654 				vpanic(vp, "vrele: unable to lock %p");
    655 			}
    656 			defer = false;
    657 		} else if ((vp->v_iflag & VI_LAYER) != 0) {
    658 			/*
    659 			 * Acquiring the stack's lock in vclean() even
    660 			 * for an honest vput/vrele is dangerous because
    661 			 * our caller may hold other vnode locks; defer.
    662 			 */
    663 			defer = true;
    664 		} else {
    665 			/* If we can't acquire the lock, then defer. */
    666 			vp->v_iflag &= ~VI_INACTREDO;
    667 			mutex_exit(&vp->v_interlock);
    668 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
    669 			if (error != 0) {
    670 				defer = true;
    671 				mutex_enter(&vp->v_interlock);
    672 			} else {
    673 				defer = false;
    674 			}
    675 		}
    676 
    677 		if (defer) {
    678 			/*
    679 			 * Defer reclaim to the kthread; it's not safe to
    680 			 * clean it here.  We donate it our last reference.
    681 			 */
    682 			KASSERT(mutex_owned(&vp->v_interlock));
    683 			KASSERT((vp->v_iflag & VI_INACTPEND) == 0);
    684 			vp->v_iflag &= ~VI_INACTNOW;
    685 			vp->v_iflag |= VI_INACTPEND;
    686 			mutex_enter(&vrele_lock);
    687 			TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
    688 			if (++vrele_pending > (desiredvnodes >> 8))
    689 				cv_signal(&vrele_cv);
    690 			mutex_exit(&vrele_lock);
    691 			mutex_exit(&vp->v_interlock);
    692 			return;
    693 		}
    694 
    695 #ifdef DIAGNOSTIC
    696 		if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    697 		    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    698 			vprint("vrelel: missing VOP_CLOSE()", vp);
    699 		}
    700 #endif
    701 
    702 		/*
    703 		 * The vnode can gain another reference while being
    704 		 * deactivated.  If VOP_INACTIVE() indicates that
    705 		 * the described file has been deleted, then recycle
    706 		 * the vnode irrespective of additional references.
    707 		 * Another thread may be waiting to re-use the on-disk
    708 		 * inode.
    709 		 *
    710 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    711 		 */
    712 		VOP_INACTIVE(vp, &recycle);
    713 		mutex_enter(&vp->v_interlock);
    714 		vp->v_iflag &= ~VI_INACTNOW;
    715 		if (!recycle) {
    716 			if (vtryrele(vp)) {
    717 				mutex_exit(&vp->v_interlock);
    718 				return;
    719 			}
    720 
    721 			/*
    722 			 * If we grew another reference while
    723 			 * VOP_INACTIVE() was underway, retry.
    724 			 */
    725 			if ((vp->v_iflag & VI_INACTREDO) != 0) {
    726 				goto retry;
    727 			}
    728 		}
    729 
    730 		/* Take care of space accounting. */
    731 		if (vp->v_iflag & VI_EXECMAP) {
    732 			atomic_add_int(&uvmexp.execpages,
    733 			    -vp->v_uobj.uo_npages);
    734 			atomic_add_int(&uvmexp.filepages,
    735 			    vp->v_uobj.uo_npages);
    736 		}
    737 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    738 		vp->v_vflag &= ~VV_MAPPED;
    739 
    740 		/*
    741 		 * Recycle the vnode if the file is now unused (unlinked),
    742 		 * otherwise just free it.
    743 		 */
    744 		if (recycle) {
    745 			vclean(vp, DOCLOSE);
    746 		}
    747 		KASSERT(vp->v_usecount > 0);
    748 	}
    749 
    750 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    751 		/* Gained another reference while being reclaimed. */
    752 		mutex_exit(&vp->v_interlock);
    753 		return;
    754 	}
    755 
    756 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    757 		/*
    758 		 * It's clean so destroy it.  It isn't referenced
    759 		 * anywhere since it has been reclaimed.
    760 		 */
    761 		KASSERT(vp->v_holdcnt == 0);
    762 		KASSERT(vp->v_writecount == 0);
    763 		mutex_exit(&vp->v_interlock);
    764 		vfs_insmntque(vp, NULL);
    765 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    766 			spec_node_destroy(vp);
    767 		}
    768 		vnfree(vp);
    769 	} else {
    770 		/*
    771 		 * Otherwise, put it back onto the freelist.  It
    772 		 * can't be destroyed while still associated with
    773 		 * a file system.
    774 		 */
    775 		mutex_enter(&vnode_free_list_lock);
    776 		if (vp->v_holdcnt > 0) {
    777 			vp->v_freelisthd = &vnode_hold_list;
    778 		} else {
    779 			vp->v_freelisthd = &vnode_free_list;
    780 		}
    781 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    782 		mutex_exit(&vnode_free_list_lock);
    783 		mutex_exit(&vp->v_interlock);
    784 	}
    785 }
    786 
    787 void
    788 vrele(vnode_t *vp)
    789 {
    790 
    791 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    792 
    793 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
    794 		return;
    795 	}
    796 	mutex_enter(&vp->v_interlock);
    797 	vrelel(vp, 0);
    798 }
    799 
    800 /*
    801  * Asynchronous vnode release, vnode is released in different context.
    802  */
    803 void
    804 vrele_async(vnode_t *vp)
    805 {
    806 
    807 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    808 
    809 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
    810 		return;
    811 	}
    812 	mutex_enter(&vp->v_interlock);
    813 	vrelel(vp, VRELEL_ASYNC_RELE);
    814 }
    815 
    816 static void
    817 vrele_thread(void *cookie)
    818 {
    819 	vnode_t *vp;
    820 
    821 	for (;;) {
    822 		mutex_enter(&vrele_lock);
    823 		while (TAILQ_EMPTY(&vrele_list)) {
    824 			vrele_gen++;
    825 			cv_broadcast(&vrele_cv);
    826 			cv_timedwait(&vrele_cv, &vrele_lock, hz);
    827 		}
    828 		vp = TAILQ_FIRST(&vrele_list);
    829 		TAILQ_REMOVE(&vrele_list, vp, v_freelist);
    830 		vrele_pending--;
    831 		mutex_exit(&vrele_lock);
    832 
    833 		/*
    834 		 * If not the last reference, then ignore the vnode
    835 		 * and look for more work.
    836 		 */
    837 		mutex_enter(&vp->v_interlock);
    838 		KASSERT((vp->v_iflag & VI_INACTPEND) != 0);
    839 		vp->v_iflag &= ~VI_INACTPEND;
    840 		vrelel(vp, 0);
    841 	}
    842 }
    843 
    844 void
    845 vrele_flush(void)
    846 {
    847 	int gen;
    848 
    849 	mutex_enter(&vrele_lock);
    850 	gen = vrele_gen;
    851 	while (vrele_pending && gen == vrele_gen) {
    852 		cv_broadcast(&vrele_cv);
    853 		cv_wait(&vrele_cv, &vrele_lock);
    854 	}
    855 	mutex_exit(&vrele_lock);
    856 }
    857 
    858 /*
    859  * Vnode reference, where a reference is already held by some other
    860  * object (for example, a file structure).
    861  */
    862 void
    863 vref(vnode_t *vp)
    864 {
    865 
    866 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    867 	KASSERT(vp->v_usecount != 0);
    868 
    869 	atomic_inc_uint(&vp->v_usecount);
    870 }
    871 
    872 /*
    873  * Page or buffer structure gets a reference.
    874  * Called with v_interlock held.
    875  */
    876 void
    877 vholdl(vnode_t *vp)
    878 {
    879 
    880 	KASSERT(mutex_owned(&vp->v_interlock));
    881 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    882 
    883 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
    884 		mutex_enter(&vnode_free_list_lock);
    885 		KASSERT(vp->v_freelisthd == &vnode_free_list);
    886 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    887 		vp->v_freelisthd = &vnode_hold_list;
    888 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    889 		mutex_exit(&vnode_free_list_lock);
    890 	}
    891 }
    892 
    893 /*
    894  * Page or buffer structure frees a reference.
    895  * Called with v_interlock held.
    896  */
    897 void
    898 holdrelel(vnode_t *vp)
    899 {
    900 
    901 	KASSERT(mutex_owned(&vp->v_interlock));
    902 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    903 
    904 	if (vp->v_holdcnt <= 0) {
    905 		vpanic(vp, "holdrelel: holdcnt vp %p");
    906 	}
    907 
    908 	vp->v_holdcnt--;
    909 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
    910 		mutex_enter(&vnode_free_list_lock);
    911 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
    912 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    913 		vp->v_freelisthd = &vnode_free_list;
    914 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    915 		mutex_exit(&vnode_free_list_lock);
    916 	}
    917 }
    918 
    919 /*
    920  * Disassociate the underlying file system from a vnode.
    921  *
    922  * Must be called with the interlock held, and will return with it held.
    923  */
    924 void
    925 vclean(vnode_t *vp, int flags)
    926 {
    927 	lwp_t *l = curlwp;
    928 	bool recycle, active;
    929 	int error;
    930 
    931 	KASSERT(mutex_owned(&vp->v_interlock));
    932 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    933 	KASSERT(vp->v_usecount != 0);
    934 
    935 	/* If cleaning is already in progress wait until done and return. */
    936 	if (vp->v_iflag & VI_XLOCK) {
    937 		vwait(vp, VI_XLOCK);
    938 		return;
    939 	}
    940 
    941 	/* If already clean, nothing to do. */
    942 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    943 		return;
    944 	}
    945 
    946 	/*
    947 	 * Prevent the vnode from being recycled or brought into use
    948 	 * while we clean it out.
    949 	 */
    950 	vp->v_iflag |= VI_XLOCK;
    951 	if (vp->v_iflag & VI_EXECMAP) {
    952 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
    953 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
    954 	}
    955 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
    956 	active = (vp->v_usecount & VC_MASK) > 1;
    957 
    958 	/* XXXAD should not lock vnode under layer */
    959 	mutex_exit(&vp->v_interlock);
    960 	VOP_LOCK(vp, LK_EXCLUSIVE);
    961 
    962 	/*
    963 	 * Clean out any cached data associated with the vnode.
    964 	 * If purging an active vnode, it must be closed and
    965 	 * deactivated before being reclaimed. Note that the
    966 	 * VOP_INACTIVE will unlock the vnode.
    967 	 */
    968 	if (flags & DOCLOSE) {
    969 		error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
    970 		if (error != 0) {
    971 			/* XXX, fix vn_start_write's grab of mp and use that. */
    972 
    973 			if (wapbl_vphaswapbl(vp))
    974 				WAPBL_DISCARD(wapbl_vptomp(vp));
    975 			error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
    976 		}
    977 		KASSERT(error == 0);
    978 		KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
    979 		if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
    980 			 spec_node_revoke(vp);
    981 		}
    982 	}
    983 	if (active) {
    984 		VOP_INACTIVE(vp, &recycle);
    985 	} else {
    986 		/*
    987 		 * Any other processes trying to obtain this lock must first
    988 		 * wait for VI_XLOCK to clear, then call the new lock operation.
    989 		 */
    990 		VOP_UNLOCK(vp);
    991 	}
    992 
    993 	/* Disassociate the underlying file system from the vnode. */
    994 	if (VOP_RECLAIM(vp)) {
    995 		vpanic(vp, "vclean: cannot reclaim");
    996 	}
    997 
    998 	KASSERT(vp->v_uobj.uo_npages == 0);
    999 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1000 		uvm_ra_freectx(vp->v_ractx);
   1001 		vp->v_ractx = NULL;
   1002 	}
   1003 	cache_purge(vp);
   1004 
   1005 	/* Done with purge, notify sleepers of the grim news. */
   1006 	mutex_enter(&vp->v_interlock);
   1007 	vp->v_op = dead_vnodeop_p;
   1008 	vp->v_tag = VT_NON;
   1009 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1010 	vp->v_iflag &= ~VI_XLOCK;
   1011 	vp->v_vflag &= ~VV_LOCKSWORK;
   1012 	if ((flags & DOCLOSE) != 0) {
   1013 		vp->v_iflag |= VI_CLEAN;
   1014 	}
   1015 	cv_broadcast(&vp->v_cv);
   1016 
   1017 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1018 }
   1019 
   1020 /*
   1021  * Recycle an unused vnode to the front of the free list.
   1022  * Release the passed interlock if the vnode will be recycled.
   1023  */
   1024 int
   1025 vrecycle(vnode_t *vp, kmutex_t *inter_lkp, struct lwp *l)
   1026 {
   1027 
   1028 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
   1029 
   1030 	mutex_enter(&vp->v_interlock);
   1031 	if (vp->v_usecount != 0) {
   1032 		mutex_exit(&vp->v_interlock);
   1033 		return 0;
   1034 	}
   1035 	if (inter_lkp) {
   1036 		mutex_exit(inter_lkp);
   1037 	}
   1038 	vremfree(vp);
   1039 	vp->v_usecount = 1;
   1040 	vclean(vp, DOCLOSE);
   1041 	vrelel(vp, 0);
   1042 	return 1;
   1043 }
   1044 
   1045 /*
   1046  * Eliminate all activity associated with the requested vnode
   1047  * and with all vnodes aliased to the requested vnode.
   1048  */
   1049 void
   1050 vrevoke(vnode_t *vp)
   1051 {
   1052 	vnode_t *vq, **vpp;
   1053 	enum vtype type;
   1054 	dev_t dev;
   1055 
   1056 	KASSERT(vp->v_usecount > 0);
   1057 
   1058 	mutex_enter(&vp->v_interlock);
   1059 	if ((vp->v_iflag & VI_CLEAN) != 0) {
   1060 		mutex_exit(&vp->v_interlock);
   1061 		return;
   1062 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1063 		atomic_inc_uint(&vp->v_usecount);
   1064 		vclean(vp, DOCLOSE);
   1065 		vrelel(vp, 0);
   1066 		return;
   1067 	} else {
   1068 		dev = vp->v_rdev;
   1069 		type = vp->v_type;
   1070 		mutex_exit(&vp->v_interlock);
   1071 	}
   1072 
   1073 	vpp = &specfs_hash[SPECHASH(dev)];
   1074 	mutex_enter(&device_lock);
   1075 	for (vq = *vpp; vq != NULL;) {
   1076 		/* If clean or being cleaned, then ignore it. */
   1077 		mutex_enter(&vq->v_interlock);
   1078 		if ((vq->v_iflag & (VI_CLEAN | VI_XLOCK)) != 0 ||
   1079 		    vq->v_rdev != dev || vq->v_type != type) {
   1080 			mutex_exit(&vq->v_interlock);
   1081 			vq = vq->v_specnext;
   1082 			continue;
   1083 		}
   1084 		mutex_exit(&device_lock);
   1085 		if (vq->v_usecount == 0) {
   1086 			vremfree(vq);
   1087 			vq->v_usecount = 1;
   1088 		} else {
   1089 			atomic_inc_uint(&vq->v_usecount);
   1090 		}
   1091 		vclean(vq, DOCLOSE);
   1092 		vrelel(vq, 0);
   1093 		mutex_enter(&device_lock);
   1094 		vq = *vpp;
   1095 	}
   1096 	mutex_exit(&device_lock);
   1097 }
   1098 
   1099 /*
   1100  * Eliminate all activity associated with a vnode in preparation for
   1101  * reuse.  Drops a reference from the vnode.
   1102  */
   1103 void
   1104 vgone(vnode_t *vp)
   1105 {
   1106 
   1107 	mutex_enter(&vp->v_interlock);
   1108 	vclean(vp, DOCLOSE);
   1109 	vrelel(vp, 0);
   1110 }
   1111 
   1112 /*
   1113  * Update outstanding I/O count and do wakeup if requested.
   1114  */
   1115 void
   1116 vwakeup(struct buf *bp)
   1117 {
   1118 	vnode_t *vp;
   1119 
   1120 	if ((vp = bp->b_vp) == NULL)
   1121 		return;
   1122 
   1123 	KASSERT(bp->b_objlock == &vp->v_interlock);
   1124 	KASSERT(mutex_owned(bp->b_objlock));
   1125 
   1126 	if (--vp->v_numoutput < 0)
   1127 		panic("vwakeup: neg numoutput, vp %p", vp);
   1128 	if (vp->v_numoutput == 0)
   1129 		cv_broadcast(&vp->v_cv);
   1130 }
   1131 
   1132 /*
   1133  * Wait for a vnode (typically with VI_XLOCK set) to be cleaned or
   1134  * recycled.
   1135  */
   1136 void
   1137 vwait(vnode_t *vp, int flags)
   1138 {
   1139 
   1140 	KASSERT(mutex_owned(&vp->v_interlock));
   1141 	KASSERT(vp->v_usecount != 0);
   1142 
   1143 	while ((vp->v_iflag & flags) != 0)
   1144 		cv_wait(&vp->v_cv, &vp->v_interlock);
   1145 }
   1146 
   1147 int
   1148 vfs_drainvnodes(long target)
   1149 {
   1150 
   1151 	while (numvnodes > target) {
   1152 		vnode_t *vp;
   1153 
   1154 		mutex_enter(&vnode_free_list_lock);
   1155 		vp = getcleanvnode();
   1156 		if (vp == NULL) {
   1157 			return EBUSY;
   1158 		}
   1159 		ungetnewvnode(vp);
   1160 	}
   1161 	return 0;
   1162 }
   1163 
   1164 void
   1165 vpanic(vnode_t *vp, const char *msg)
   1166 {
   1167 #ifdef DIAGNOSTIC
   1168 
   1169 	vprint(NULL, vp);
   1170 	panic("%s\n", msg);
   1171 #endif
   1172 }
   1173