Home | History | Annotate | Line # | Download | only in kern
vfs_vnode.c revision 1.5.2.5
      1 /*	$NetBSD: vfs_vnode.c,v 1.5.2.5 2011/05/30 14:57:48 rmind Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
      9  * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 
     33 /*
     34  * Copyright (c) 1989, 1993
     35  *	The Regents of the University of California.  All rights reserved.
     36  * (c) UNIX System Laboratories, Inc.
     37  * All or some portions of this file are derived from material licensed
     38  * to the University of California by American Telephone and Telegraph
     39  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     40  * the permission of UNIX System Laboratories, Inc.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)vfs_subr.c	8.13 (Berkeley) 4/18/94
     67  */
     68 
     69 /*
     70  * Note on v_usecount and locking:
     71  *
     72  * At nearly all points it is known that v_usecount could be zero, the
     73  * vnode interlock will be held.
     74  *
     75  * To change v_usecount away from zero, the interlock must be held.  To
     76  * change from a non-zero value to zero, again the interlock must be
     77  * held.
     78  *
     79  * There's a flag bit, VC_XLOCK, embedded in v_usecount.
     80  * To raise v_usecount, if the VC_XLOCK bit is set in it, the interlock
     81  * must be held.
     82  * To modify the VC_XLOCK bit, the interlock must be held.
     83  * We always keep the usecount (v_usecount & VC_MASK) non-zero while the
     84  * VC_XLOCK bit is set.
     85  *
     86  * Unless the VC_XLOCK bit is set, changing the usecount from a non-zero
     87  * value to a non-zero value can safely be done using atomic operations,
     88  * without the interlock held.
     89  * Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero
     90  * value can be done using atomic operations, without the interlock held.
     91  */
     92 
     93 #include <sys/cdefs.h>
     94 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.5.2.5 2011/05/30 14:57:48 rmind Exp $");
     95 
     96 #include <sys/param.h>
     97 #include <sys/kernel.h>
     98 
     99 #include <sys/atomic.h>
    100 #include <sys/buf.h>
    101 #include <sys/conf.h>
    102 #include <sys/device.h>
    103 #include <sys/kauth.h>
    104 #include <sys/kmem.h>
    105 #include <sys/kthread.h>
    106 #include <sys/module.h>
    107 #include <sys/mount.h>
    108 #include <sys/namei.h>
    109 #include <sys/syscallargs.h>
    110 #include <sys/sysctl.h>
    111 #include <sys/systm.h>
    112 #include <sys/vnode.h>
    113 #include <sys/wapbl.h>
    114 
    115 #include <uvm/uvm.h>
    116 #include <uvm/uvm_readahead.h>
    117 
    118 u_int			numvnodes;
    119 
    120 static pool_cache_t	vnode_cache;
    121 static kmutex_t		vnode_free_list_lock;
    122 
    123 static vnodelst_t	vnode_free_list;
    124 static vnodelst_t	vnode_hold_list;
    125 static vnodelst_t	vrele_list;
    126 
    127 static kmutex_t		vrele_lock;
    128 static kcondvar_t	vrele_cv;
    129 static lwp_t *		vrele_lwp;
    130 static int		vrele_pending;
    131 static int		vrele_gen;
    132 
    133 static vnode_t *	getcleanvnode(void);
    134 static void		vrele_thread(void *);
    135 static void		vpanic(vnode_t *, const char *);
    136 
    137 /* Routines having to do with the management of the vnode table. */
    138 extern int		(**dead_vnodeop_p)(void *);
    139 
    140 void
    141 vfs_vnode_sysinit(void)
    142 {
    143 	int error;
    144 
    145 	vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl",
    146 	    NULL, IPL_NONE, NULL, NULL, NULL);
    147 	KASSERT(vnode_cache != NULL);
    148 
    149 	mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
    150 	TAILQ_INIT(&vnode_free_list);
    151 	TAILQ_INIT(&vnode_hold_list);
    152 	TAILQ_INIT(&vrele_list);
    153 
    154 	mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
    155 	cv_init(&vrele_cv, "vrele");
    156 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
    157 	    NULL, &vrele_lwp, "vrele");
    158 	KASSERT(error == 0);
    159 }
    160 
    161 /*
    162  * Allocate a new, uninitialized vnode.  If 'mp' is non-NULL, this is a
    163  * marker vnode and we are prepared to wait for the allocation.
    164  */
    165 vnode_t *
    166 vnalloc(struct mount *mp)
    167 {
    168 	vnode_t *vp;
    169 
    170 	vp = pool_cache_get(vnode_cache, (mp != NULL ? PR_WAITOK : PR_NOWAIT));
    171 	if (vp == NULL) {
    172 		return NULL;
    173 	}
    174 
    175 	memset(vp, 0, sizeof(*vp));
    176 	uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
    177 	cv_init(&vp->v_cv, "vnode");
    178 	/*
    179 	 * Done by memset() above.
    180 	 *	LIST_INIT(&vp->v_nclist);
    181 	 *	LIST_INIT(&vp->v_dnclist);
    182 	 */
    183 
    184 	if (mp != NULL) {
    185 		vp->v_mount = mp;
    186 		vp->v_type = VBAD;
    187 		vp->v_iflag = VI_MARKER;
    188 	} else {
    189 		rw_init(&vp->v_lock);
    190 	}
    191 
    192 	return vp;
    193 }
    194 
    195 /*
    196  * Free an unused, unreferenced vnode.
    197  */
    198 void
    199 vnfree(vnode_t *vp)
    200 {
    201 
    202 	KASSERT(vp->v_usecount == 0);
    203 
    204 	if ((vp->v_iflag & VI_MARKER) == 0) {
    205 		rw_destroy(&vp->v_lock);
    206 		mutex_enter(&vnode_free_list_lock);
    207 		numvnodes--;
    208 		mutex_exit(&vnode_free_list_lock);
    209 	}
    210 
    211 	/*
    212 	 * Note: the vnode interlock will either be freed, of reference
    213 	 * dropped (if VI_LOCKSHARE was in use).
    214 	 */
    215 	uvm_obj_destroy(&vp->v_uobj, true);
    216 	cv_destroy(&vp->v_cv);
    217 	pool_cache_put(vnode_cache, vp);
    218 }
    219 
    220 /*
    221  * getcleanvnode: grab a vnode from freelist and clean it.
    222  *
    223  * => Releases vnode_free_list_lock.
    224  * => Returns referenced vnode on success.
    225  */
    226 static vnode_t *
    227 getcleanvnode(void)
    228 {
    229 	vnode_t *vp;
    230 	vnodelst_t *listhd;
    231 
    232 	KASSERT(mutex_owned(&vnode_free_list_lock));
    233 retry:
    234 	listhd = &vnode_free_list;
    235 try_nextlist:
    236 	TAILQ_FOREACH(vp, listhd, v_freelist) {
    237 		/*
    238 		 * It's safe to test v_usecount and v_iflag
    239 		 * without holding the interlock here, since
    240 		 * these vnodes should never appear on the
    241 		 * lists.
    242 		 */
    243 		KASSERT(vp->v_usecount == 0);
    244 		KASSERT((vp->v_iflag & VI_CLEAN) == 0);
    245 		KASSERT(vp->v_freelisthd == listhd);
    246 
    247 		if (!mutex_tryenter(vp->v_interlock))
    248 			continue;
    249 		if ((vp->v_iflag & VI_XLOCK) == 0)
    250 			break;
    251 		mutex_exit(vp->v_interlock);
    252 	}
    253 
    254 	if (vp == NULL) {
    255 		if (listhd == &vnode_free_list) {
    256 			listhd = &vnode_hold_list;
    257 			goto try_nextlist;
    258 		}
    259 		mutex_exit(&vnode_free_list_lock);
    260 		return NULL;
    261 	}
    262 
    263 	/* Remove it from the freelist. */
    264 	TAILQ_REMOVE(listhd, vp, v_freelist);
    265 	vp->v_freelisthd = NULL;
    266 	mutex_exit(&vnode_free_list_lock);
    267 
    268 	KASSERT(vp->v_usecount == 0);
    269 
    270 	/*
    271 	 * The vnode is still associated with a file system, so we must
    272 	 * clean it out before reusing it.  We need to add a reference
    273 	 * before doing this.  If the vnode gains another reference while
    274 	 * being cleaned out then we lose - retry.
    275 	 */
    276 	atomic_add_int(&vp->v_usecount, 1 + VC_XLOCK);
    277 	vclean(vp, DOCLOSE);
    278 	KASSERT(vp->v_usecount >= 1 + VC_XLOCK);
    279 	atomic_add_int(&vp->v_usecount, -VC_XLOCK);
    280 	if (vp->v_usecount == 1) {
    281 		/* We're about to dirty it. */
    282 		vp->v_iflag &= ~VI_CLEAN;
    283 		mutex_exit(vp->v_interlock);
    284 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    285 			spec_node_destroy(vp);
    286 		}
    287 		vp->v_type = VNON;
    288 	} else {
    289 		/*
    290 		 * Don't return to freelist - the holder of the last
    291 		 * reference will destroy it.
    292 		 */
    293 		vrelel(vp, 0); /* releases vp->v_interlock */
    294 		mutex_enter(&vnode_free_list_lock);
    295 		goto retry;
    296 	}
    297 
    298 	KASSERT(vp->v_data == NULL);
    299 	KASSERT(vp->v_uobj.uo_npages == 0);
    300 	KASSERT(TAILQ_EMPTY(&vp->v_uobj.memq));
    301 	KASSERT(vp->v_numoutput == 0);
    302 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
    303 
    304 	return vp;
    305 }
    306 
    307 /*
    308  * getnewvnode: return the next vnode from the free list.
    309  *
    310  * => Returns referenced vnode, moved into the mount queue.
    311  * => Shares the interlock specified by 'slock', if it is not NULL.
    312  */
    313 int
    314 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
    315     kmutex_t *slock, vnode_t **vpp)
    316 {
    317 	struct uvm_object *uobj;
    318 	static int toggle;
    319 	vnode_t *vp;
    320 	int error = 0, tryalloc;
    321 
    322 try_again:
    323 	if (mp != NULL) {
    324 		/*
    325 		 * Mark filesystem busy while we are creating a vnode.
    326 		 * If unmount is in progress, this will fail.
    327 		 */
    328 		error = vfs_busy(mp, NULL);
    329 		if (error)
    330 			return error;
    331 	}
    332 
    333 	/*
    334 	 * We must choose whether to allocate a new vnode or recycle an
    335 	 * existing one. The criterion for allocating a new one is that
    336 	 * the total number of vnodes is less than the number desired or
    337 	 * there are no vnodes on either free list. Generally we only
    338 	 * want to recycle vnodes that have no buffers associated with
    339 	 * them, so we look first on the vnode_free_list. If it is empty,
    340 	 * we next consider vnodes with referencing buffers on the
    341 	 * vnode_hold_list. The toggle ensures that half the time we
    342 	 * will use a buffer from the vnode_hold_list, and half the time
    343 	 * we will allocate a new one unless the list has grown to twice
    344 	 * the desired size. We are reticent to recycle vnodes from the
    345 	 * vnode_hold_list because we will lose the identity of all its
    346 	 * referencing buffers.
    347 	 */
    348 
    349 	vp = NULL;
    350 
    351 	mutex_enter(&vnode_free_list_lock);
    352 
    353 	toggle ^= 1;
    354 	if (numvnodes > 2 * desiredvnodes)
    355 		toggle = 0;
    356 
    357 	tryalloc = numvnodes < desiredvnodes ||
    358 	    (TAILQ_FIRST(&vnode_free_list) == NULL &&
    359 	    (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
    360 
    361 	if (tryalloc) {
    362 		/* Allocate a new vnode. */
    363 		numvnodes++;
    364 		mutex_exit(&vnode_free_list_lock);
    365 		if ((vp = vnalloc(NULL)) == NULL) {
    366 			mutex_enter(&vnode_free_list_lock);
    367 			numvnodes--;
    368 		} else
    369 			vp->v_usecount = 1;
    370 	}
    371 
    372 	if (vp == NULL) {
    373 		/* Recycle and get vnode clean. */
    374 		vp = getcleanvnode();
    375 		if (vp == NULL) {
    376 			if (mp != NULL) {
    377 				vfs_unbusy(mp, false, NULL);
    378 			}
    379 			if (tryalloc) {
    380 				printf("WARNING: unable to allocate new "
    381 				    "vnode, retrying...\n");
    382 				kpause("newvn", false, hz, NULL);
    383 				goto try_again;
    384 			}
    385 			tablefull("vnode", "increase kern.maxvnodes or NVNODE");
    386 			*vpp = 0;
    387 			return ENFILE;
    388 		}
    389 		if ((vp->v_iflag & VI_LOCKSHARE) != 0 || slock) {
    390 			/* We must remove vnode from the old mount point. */
    391 			if (vp->v_mount) {
    392 				vfs_insmntque(vp, NULL);
    393 			}
    394 			/* Allocate a new interlock, if it was shared. */
    395 			if (vp->v_iflag & VI_LOCKSHARE) {
    396 				uvm_obj_setlock(&vp->v_uobj, NULL);
    397 				vp->v_iflag &= ~VI_LOCKSHARE;
    398 			}
    399 		}
    400 		vp->v_iflag = 0;
    401 		vp->v_vflag = 0;
    402 		vp->v_uflag = 0;
    403 		vp->v_socket = NULL;
    404 	}
    405 
    406 	KASSERT(vp->v_usecount == 1);
    407 	KASSERT(vp->v_freelisthd == NULL);
    408 	KASSERT(LIST_EMPTY(&vp->v_nclist));
    409 	KASSERT(LIST_EMPTY(&vp->v_dnclist));
    410 
    411 	/* Initialize vnode. */
    412 	vp->v_type = VNON;
    413 	vp->v_tag = tag;
    414 	vp->v_op = vops;
    415 	vp->v_data = NULL;
    416 
    417 	uobj = &vp->v_uobj;
    418 	KASSERT(uobj->pgops == &uvm_vnodeops);
    419 	KASSERT(uobj->uo_npages == 0);
    420 	KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
    421 	vp->v_size = vp->v_writesize = VSIZENOTSET;
    422 
    423 	/* Share the vnode_t::v_interlock, if requested. */
    424 	if (slock) {
    425 		/* Set the interlock and mark that it is shared. */
    426 		KASSERT(vp->v_mount == NULL);
    427 		mutex_obj_hold(slock);
    428 		uvm_obj_setlock(&vp->v_uobj, slock);
    429 		KASSERT(vp->v_interlock == slock);
    430 		vp->v_iflag |= VI_LOCKSHARE;
    431 	}
    432 
    433 	/* Finally, move vnode into the mount queue. */
    434 	vfs_insmntque(vp, mp);
    435 
    436 	if (mp != NULL) {
    437 		if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
    438 			vp->v_vflag |= VV_MPSAFE;
    439 		vfs_unbusy(mp, true, NULL);
    440 	}
    441 
    442 	*vpp = vp;
    443 	return 0;
    444 }
    445 
    446 /*
    447  * This is really just the reverse of getnewvnode(). Needed for
    448  * VFS_VGET functions who may need to push back a vnode in case
    449  * of a locking race.
    450  */
    451 void
    452 ungetnewvnode(vnode_t *vp)
    453 {
    454 
    455 	KASSERT(vp->v_usecount == 1);
    456 	KASSERT(vp->v_data == NULL);
    457 	KASSERT(vp->v_freelisthd == NULL);
    458 
    459 	mutex_enter(vp->v_interlock);
    460 	vp->v_iflag |= VI_CLEAN;
    461 	vrelel(vp, 0);
    462 }
    463 
    464 /*
    465  * Remove a vnode from its freelist.
    466  */
    467 void
    468 vremfree(vnode_t *vp)
    469 {
    470 
    471 	KASSERT(mutex_owned(vp->v_interlock));
    472 	KASSERT(vp->v_usecount == 0);
    473 
    474 	/*
    475 	 * Note that the reference count must not change until
    476 	 * the vnode is removed.
    477 	 */
    478 	mutex_enter(&vnode_free_list_lock);
    479 	if (vp->v_holdcnt > 0) {
    480 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
    481 	} else {
    482 		KASSERT(vp->v_freelisthd == &vnode_free_list);
    483 	}
    484 	TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    485 	vp->v_freelisthd = NULL;
    486 	mutex_exit(&vnode_free_list_lock);
    487 }
    488 
    489 /*
    490  * Try to gain a reference to a vnode, without acquiring its interlock.
    491  * The caller must hold a lock that will prevent the vnode from being
    492  * recycled or freed.
    493  */
    494 bool
    495 vtryget(vnode_t *vp)
    496 {
    497 	u_int use, next;
    498 
    499 	/*
    500 	 * If the vnode is being freed, don't make life any harder
    501 	 * for vclean() by adding another reference without waiting.
    502 	 * This is not strictly necessary, but we'll do it anyway.
    503 	 */
    504 	if (__predict_false((vp->v_iflag & VI_XLOCK) != 0)) {
    505 		return false;
    506 	}
    507 	for (use = vp->v_usecount;; use = next) {
    508 		if (use == 0 || __predict_false((use & VC_XLOCK) != 0)) {
    509 			/* Need interlock held if first reference. */
    510 			return false;
    511 		}
    512 		next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
    513 		if (__predict_true(next == use)) {
    514 			return true;
    515 		}
    516 	}
    517 }
    518 
    519 /*
    520  * vget: get a particular vnode from the free list, increment its reference
    521  * count and lock it.
    522  *
    523  * => Should be called with v_interlock held.
    524  *
    525  * If VI_XLOCK is set, the vnode is being eliminated in vgone()/vclean().
    526  * In that case, we cannot grab the vnode, so the process is awakened when
    527  * the transition is completed, and an error returned to indicate that the
    528  * vnode is no longer usable (e.g. changed to a new file system type).
    529  */
    530 int
    531 vget(vnode_t *vp, int flags)
    532 {
    533 	int error = 0;
    534 
    535 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    536 	KASSERT(mutex_owned(vp->v_interlock));
    537 	KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT)) == 0);
    538 
    539 	/*
    540 	 * Before adding a reference, we must remove the vnode
    541 	 * from its freelist.
    542 	 */
    543 	if (vp->v_usecount == 0) {
    544 		vremfree(vp);
    545 		vp->v_usecount = 1;
    546 	} else {
    547 		atomic_inc_uint(&vp->v_usecount);
    548 	}
    549 
    550 	/*
    551 	 * If the vnode is in the process of being cleaned out for
    552 	 * another use, we wait for the cleaning to finish and then
    553 	 * return failure.  Cleaning is determined by checking if
    554 	 * the VI_XLOCK flag is set.
    555 	 */
    556 	if ((vp->v_iflag & VI_XLOCK) != 0) {
    557 		if ((flags & LK_NOWAIT) != 0) {
    558 			vrelel(vp, 0);
    559 			return EBUSY;
    560 		}
    561 		vwait(vp, VI_XLOCK);
    562 		vrelel(vp, 0);
    563 		return ENOENT;
    564 	}
    565 
    566 	/*
    567 	 * Ok, we got it in good shape.  Just locking left.
    568 	 */
    569 	KASSERT((vp->v_iflag & VI_CLEAN) == 0);
    570 	mutex_exit(vp->v_interlock);
    571 	if (flags & (LK_EXCLUSIVE | LK_SHARED)) {
    572 		error = vn_lock(vp, flags);
    573 		if (error != 0) {
    574 			vrele(vp);
    575 		}
    576 	}
    577 	return error;
    578 }
    579 
    580 /*
    581  * vput: unlock and release the reference.
    582  */
    583 void
    584 vput(vnode_t *vp)
    585 {
    586 
    587 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    588 
    589 	VOP_UNLOCK(vp);
    590 	vrele(vp);
    591 }
    592 
    593 /*
    594  * Try to drop reference on a vnode.  Abort if we are releasing the
    595  * last reference.  Note: this _must_ succeed if not the last reference.
    596  */
    597 static inline bool
    598 vtryrele(vnode_t *vp)
    599 {
    600 	u_int use, next;
    601 
    602 	for (use = vp->v_usecount;; use = next) {
    603 		if (use == 1) {
    604 			return false;
    605 		}
    606 		KASSERT((use & VC_MASK) > 1);
    607 		next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
    608 		if (__predict_true(next == use)) {
    609 			return true;
    610 		}
    611 	}
    612 }
    613 
    614 /*
    615  * Vnode release.  If reference count drops to zero, call inactive
    616  * routine and either return to freelist or free to the pool.
    617  */
    618 void
    619 vrelel(vnode_t *vp, int flags)
    620 {
    621 	bool recycle, defer;
    622 	int error;
    623 
    624 	KASSERT(mutex_owned(vp->v_interlock));
    625 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    626 	KASSERT(vp->v_freelisthd == NULL);
    627 
    628 	if (__predict_false(vp->v_op == dead_vnodeop_p &&
    629 	    (vp->v_iflag & (VI_CLEAN|VI_XLOCK)) == 0)) {
    630 		vpanic(vp, "dead but not clean");
    631 	}
    632 
    633 	/*
    634 	 * If not the last reference, just drop the reference count
    635 	 * and unlock.
    636 	 */
    637 	if (vtryrele(vp)) {
    638 		vp->v_iflag |= VI_INACTREDO;
    639 		mutex_exit(vp->v_interlock);
    640 		return;
    641 	}
    642 	if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
    643 		vpanic(vp, "vrelel: bad ref count");
    644 	}
    645 
    646 	KASSERT((vp->v_iflag & VI_XLOCK) == 0);
    647 
    648 	/*
    649 	 * If not clean, deactivate the vnode, but preserve
    650 	 * our reference across the call to VOP_INACTIVE().
    651 	 */
    652 retry:
    653 	if ((vp->v_iflag & VI_CLEAN) == 0) {
    654 		recycle = false;
    655 		vp->v_iflag |= VI_INACTNOW;
    656 
    657 		/*
    658 		 * XXX This ugly block can be largely eliminated if
    659 		 * locking is pushed down into the file systems.
    660 		 *
    661 		 * Defer vnode release to vrele_thread if caller
    662 		 * requests it explicitly.
    663 		 */
    664 		if ((curlwp == uvm.pagedaemon_lwp) ||
    665 		    (flags & VRELEL_ASYNC_RELE) != 0) {
    666 			/* The pagedaemon can't wait around; defer. */
    667 			defer = true;
    668 		} else if (curlwp == vrele_lwp) {
    669 			/* We have to try harder. */
    670 			vp->v_iflag &= ~VI_INACTREDO;
    671 			mutex_exit(vp->v_interlock);
    672 			error = vn_lock(vp, LK_EXCLUSIVE);
    673 			if (error != 0) {
    674 				/* XXX */
    675 				vpanic(vp, "vrele: unable to lock %p");
    676 			}
    677 			defer = false;
    678 		} else if ((vp->v_iflag & VI_LAYER) != 0) {
    679 			/*
    680 			 * Acquiring the stack's lock in vclean() even
    681 			 * for an honest vput/vrele is dangerous because
    682 			 * our caller may hold other vnode locks; defer.
    683 			 */
    684 			defer = true;
    685 		} else {
    686 			/* If we can't acquire the lock, then defer. */
    687 			vp->v_iflag &= ~VI_INACTREDO;
    688 			mutex_exit(vp->v_interlock);
    689 			error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
    690 			if (error != 0) {
    691 				defer = true;
    692 				mutex_enter(vp->v_interlock);
    693 			} else {
    694 				defer = false;
    695 			}
    696 		}
    697 
    698 		if (defer) {
    699 			/*
    700 			 * Defer reclaim to the kthread; it's not safe to
    701 			 * clean it here.  We donate it our last reference.
    702 			 */
    703 			KASSERT(mutex_owned(vp->v_interlock));
    704 			KASSERT((vp->v_iflag & VI_INACTPEND) == 0);
    705 			vp->v_iflag &= ~VI_INACTNOW;
    706 			vp->v_iflag |= VI_INACTPEND;
    707 			mutex_enter(&vrele_lock);
    708 			TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
    709 			if (++vrele_pending > (desiredvnodes >> 8))
    710 				cv_signal(&vrele_cv);
    711 			mutex_exit(&vrele_lock);
    712 			mutex_exit(vp->v_interlock);
    713 			return;
    714 		}
    715 
    716 #ifdef DIAGNOSTIC
    717 		if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
    718 		    vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
    719 			vprint("vrelel: missing VOP_CLOSE()", vp);
    720 		}
    721 #endif
    722 
    723 		/*
    724 		 * The vnode can gain another reference while being
    725 		 * deactivated.  If VOP_INACTIVE() indicates that
    726 		 * the described file has been deleted, then recycle
    727 		 * the vnode irrespective of additional references.
    728 		 * Another thread may be waiting to re-use the on-disk
    729 		 * inode.
    730 		 *
    731 		 * Note that VOP_INACTIVE() will drop the vnode lock.
    732 		 */
    733 		VOP_INACTIVE(vp, &recycle);
    734 		mutex_enter(vp->v_interlock);
    735 		vp->v_iflag &= ~VI_INACTNOW;
    736 		if (!recycle) {
    737 			if (vtryrele(vp)) {
    738 				mutex_exit(vp->v_interlock);
    739 				return;
    740 			}
    741 
    742 			/*
    743 			 * If we grew another reference while
    744 			 * VOP_INACTIVE() was underway, retry.
    745 			 */
    746 			if ((vp->v_iflag & VI_INACTREDO) != 0) {
    747 				goto retry;
    748 			}
    749 		}
    750 
    751 		/* Take care of space accounting. */
    752 		if (vp->v_iflag & VI_EXECMAP) {
    753 			atomic_add_int(&uvmexp.execpages,
    754 			    -vp->v_uobj.uo_npages);
    755 			atomic_add_int(&uvmexp.filepages,
    756 			    vp->v_uobj.uo_npages);
    757 		}
    758 		vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
    759 		vp->v_vflag &= ~VV_MAPPED;
    760 
    761 		/*
    762 		 * Recycle the vnode if the file is now unused (unlinked),
    763 		 * otherwise just free it.
    764 		 */
    765 		if (recycle) {
    766 			vclean(vp, DOCLOSE);
    767 		}
    768 		KASSERT(vp->v_usecount > 0);
    769 	}
    770 
    771 	if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
    772 		/* Gained another reference while being reclaimed. */
    773 		mutex_exit(vp->v_interlock);
    774 		return;
    775 	}
    776 
    777 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    778 		/*
    779 		 * It's clean so destroy it.  It isn't referenced
    780 		 * anywhere since it has been reclaimed.
    781 		 */
    782 		KASSERT(vp->v_holdcnt == 0);
    783 		KASSERT(vp->v_writecount == 0);
    784 		mutex_exit(vp->v_interlock);
    785 		vfs_insmntque(vp, NULL);
    786 		if (vp->v_type == VBLK || vp->v_type == VCHR) {
    787 			spec_node_destroy(vp);
    788 		}
    789 		vnfree(vp);
    790 	} else {
    791 		/*
    792 		 * Otherwise, put it back onto the freelist.  It
    793 		 * can't be destroyed while still associated with
    794 		 * a file system.
    795 		 */
    796 		mutex_enter(&vnode_free_list_lock);
    797 		if (vp->v_holdcnt > 0) {
    798 			vp->v_freelisthd = &vnode_hold_list;
    799 		} else {
    800 			vp->v_freelisthd = &vnode_free_list;
    801 		}
    802 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    803 		mutex_exit(&vnode_free_list_lock);
    804 		mutex_exit(vp->v_interlock);
    805 	}
    806 }
    807 
    808 void
    809 vrele(vnode_t *vp)
    810 {
    811 
    812 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    813 
    814 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
    815 		return;
    816 	}
    817 	mutex_enter(vp->v_interlock);
    818 	vrelel(vp, 0);
    819 }
    820 
    821 /*
    822  * Asynchronous vnode release, vnode is released in different context.
    823  */
    824 void
    825 vrele_async(vnode_t *vp)
    826 {
    827 
    828 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    829 
    830 	if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
    831 		return;
    832 	}
    833 	mutex_enter(vp->v_interlock);
    834 	vrelel(vp, VRELEL_ASYNC_RELE);
    835 }
    836 
    837 static void
    838 vrele_thread(void *cookie)
    839 {
    840 	vnode_t *vp;
    841 
    842 	for (;;) {
    843 		mutex_enter(&vrele_lock);
    844 		while (TAILQ_EMPTY(&vrele_list)) {
    845 			vrele_gen++;
    846 			cv_broadcast(&vrele_cv);
    847 			cv_timedwait(&vrele_cv, &vrele_lock, hz);
    848 		}
    849 		vp = TAILQ_FIRST(&vrele_list);
    850 		TAILQ_REMOVE(&vrele_list, vp, v_freelist);
    851 		vrele_pending--;
    852 		mutex_exit(&vrele_lock);
    853 
    854 		/*
    855 		 * If not the last reference, then ignore the vnode
    856 		 * and look for more work.
    857 		 */
    858 		mutex_enter(vp->v_interlock);
    859 		KASSERT((vp->v_iflag & VI_INACTPEND) != 0);
    860 		vp->v_iflag &= ~VI_INACTPEND;
    861 		vrelel(vp, 0);
    862 	}
    863 }
    864 
    865 void
    866 vrele_flush(void)
    867 {
    868 	int gen;
    869 
    870 	mutex_enter(&vrele_lock);
    871 	gen = vrele_gen;
    872 	while (vrele_pending && gen == vrele_gen) {
    873 		cv_broadcast(&vrele_cv);
    874 		cv_wait(&vrele_cv, &vrele_lock);
    875 	}
    876 	mutex_exit(&vrele_lock);
    877 }
    878 
    879 /*
    880  * Vnode reference, where a reference is already held by some other
    881  * object (for example, a file structure).
    882  */
    883 void
    884 vref(vnode_t *vp)
    885 {
    886 
    887 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    888 	KASSERT(vp->v_usecount != 0);
    889 
    890 	atomic_inc_uint(&vp->v_usecount);
    891 }
    892 
    893 /*
    894  * Page or buffer structure gets a reference.
    895  * Called with v_interlock held.
    896  */
    897 void
    898 vholdl(vnode_t *vp)
    899 {
    900 
    901 	KASSERT(mutex_owned(vp->v_interlock));
    902 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    903 
    904 	if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
    905 		mutex_enter(&vnode_free_list_lock);
    906 		KASSERT(vp->v_freelisthd == &vnode_free_list);
    907 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    908 		vp->v_freelisthd = &vnode_hold_list;
    909 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    910 		mutex_exit(&vnode_free_list_lock);
    911 	}
    912 }
    913 
    914 /*
    915  * Page or buffer structure frees a reference.
    916  * Called with v_interlock held.
    917  */
    918 void
    919 holdrelel(vnode_t *vp)
    920 {
    921 
    922 	KASSERT(mutex_owned(vp->v_interlock));
    923 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    924 
    925 	if (vp->v_holdcnt <= 0) {
    926 		vpanic(vp, "holdrelel: holdcnt vp %p");
    927 	}
    928 
    929 	vp->v_holdcnt--;
    930 	if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
    931 		mutex_enter(&vnode_free_list_lock);
    932 		KASSERT(vp->v_freelisthd == &vnode_hold_list);
    933 		TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
    934 		vp->v_freelisthd = &vnode_free_list;
    935 		TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
    936 		mutex_exit(&vnode_free_list_lock);
    937 	}
    938 }
    939 
    940 /*
    941  * Disassociate the underlying file system from a vnode.
    942  *
    943  * Must be called with the interlock held, and will return with it held.
    944  */
    945 void
    946 vclean(vnode_t *vp, int flags)
    947 {
    948 	lwp_t *l = curlwp;
    949 	bool recycle, active;
    950 	int error;
    951 
    952 	KASSERT(mutex_owned(vp->v_interlock));
    953 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
    954 	KASSERT(vp->v_usecount != 0);
    955 
    956 	/* If cleaning is already in progress wait until done and return. */
    957 	if (vp->v_iflag & VI_XLOCK) {
    958 		vwait(vp, VI_XLOCK);
    959 		return;
    960 	}
    961 
    962 	/* If already clean, nothing to do. */
    963 	if ((vp->v_iflag & VI_CLEAN) != 0) {
    964 		return;
    965 	}
    966 
    967 	/*
    968 	 * Prevent the vnode from being recycled or brought into use
    969 	 * while we clean it out.
    970 	 */
    971 	vp->v_iflag |= VI_XLOCK;
    972 	if (vp->v_iflag & VI_EXECMAP) {
    973 		atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
    974 		atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
    975 	}
    976 	vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
    977 	active = (vp->v_usecount & VC_MASK) > 1;
    978 
    979 	/* XXXAD should not lock vnode under layer */
    980 	mutex_exit(vp->v_interlock);
    981 	VOP_LOCK(vp, LK_EXCLUSIVE);
    982 
    983 	/*
    984 	 * Clean out any cached data associated with the vnode.
    985 	 * If purging an active vnode, it must be closed and
    986 	 * deactivated before being reclaimed. Note that the
    987 	 * VOP_INACTIVE will unlock the vnode.
    988 	 */
    989 	if (flags & DOCLOSE) {
    990 		error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
    991 		if (error != 0) {
    992 			/* XXX, fix vn_start_write's grab of mp and use that. */
    993 
    994 			if (wapbl_vphaswapbl(vp))
    995 				WAPBL_DISCARD(wapbl_vptomp(vp));
    996 			error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
    997 		}
    998 		KASSERT(error == 0);
    999 		KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1000 		if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
   1001 			 spec_node_revoke(vp);
   1002 		}
   1003 	}
   1004 	if (active) {
   1005 		VOP_INACTIVE(vp, &recycle);
   1006 	} else {
   1007 		/*
   1008 		 * Any other processes trying to obtain this lock must first
   1009 		 * wait for VI_XLOCK to clear, then call the new lock operation.
   1010 		 */
   1011 		VOP_UNLOCK(vp);
   1012 	}
   1013 
   1014 	/* Disassociate the underlying file system from the vnode. */
   1015 	if (VOP_RECLAIM(vp)) {
   1016 		vpanic(vp, "vclean: cannot reclaim");
   1017 	}
   1018 
   1019 	KASSERT(vp->v_uobj.uo_npages == 0);
   1020 	if (vp->v_type == VREG && vp->v_ractx != NULL) {
   1021 		uvm_ra_freectx(vp->v_ractx);
   1022 		vp->v_ractx = NULL;
   1023 	}
   1024 	cache_purge(vp);
   1025 
   1026 	/* Done with purge, notify sleepers of the grim news. */
   1027 	mutex_enter(vp->v_interlock);
   1028 	vp->v_op = dead_vnodeop_p;
   1029 	vp->v_tag = VT_NON;
   1030 	KNOTE(&vp->v_klist, NOTE_REVOKE);
   1031 	vp->v_iflag &= ~VI_XLOCK;
   1032 	vp->v_vflag &= ~VV_LOCKSWORK;
   1033 	if ((flags & DOCLOSE) != 0) {
   1034 		vp->v_iflag |= VI_CLEAN;
   1035 	}
   1036 	cv_broadcast(&vp->v_cv);
   1037 
   1038 	KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
   1039 }
   1040 
   1041 /*
   1042  * Recycle an unused vnode to the front of the free list.
   1043  * Release the passed interlock if the vnode will be recycled.
   1044  */
   1045 int
   1046 vrecycle(vnode_t *vp, kmutex_t *inter_lkp, struct lwp *l)
   1047 {
   1048 
   1049 	KASSERT((vp->v_iflag & VI_MARKER) == 0);
   1050 
   1051 	mutex_enter(vp->v_interlock);
   1052 	if (vp->v_usecount != 0) {
   1053 		mutex_exit(vp->v_interlock);
   1054 		return 0;
   1055 	}
   1056 	if (inter_lkp) {
   1057 		mutex_exit(inter_lkp);
   1058 	}
   1059 	vremfree(vp);
   1060 	vp->v_usecount = 1;
   1061 	vclean(vp, DOCLOSE);
   1062 	vrelel(vp, 0);
   1063 	return 1;
   1064 }
   1065 
   1066 /*
   1067  * Eliminate all activity associated with the requested vnode
   1068  * and with all vnodes aliased to the requested vnode.
   1069  */
   1070 void
   1071 vrevoke(vnode_t *vp)
   1072 {
   1073 	vnode_t *vq, **vpp;
   1074 	enum vtype type;
   1075 	dev_t dev;
   1076 
   1077 	KASSERT(vp->v_usecount > 0);
   1078 
   1079 	mutex_enter(vp->v_interlock);
   1080 	if ((vp->v_iflag & VI_CLEAN) != 0) {
   1081 		mutex_exit(vp->v_interlock);
   1082 		return;
   1083 	} else if (vp->v_type != VBLK && vp->v_type != VCHR) {
   1084 		atomic_inc_uint(&vp->v_usecount);
   1085 		vclean(vp, DOCLOSE);
   1086 		vrelel(vp, 0);
   1087 		return;
   1088 	} else {
   1089 		dev = vp->v_rdev;
   1090 		type = vp->v_type;
   1091 		mutex_exit(vp->v_interlock);
   1092 	}
   1093 
   1094 	vpp = &specfs_hash[SPECHASH(dev)];
   1095 	mutex_enter(&device_lock);
   1096 	for (vq = *vpp; vq != NULL;) {
   1097 		/* If clean or being cleaned, then ignore it. */
   1098 		mutex_enter(vq->v_interlock);
   1099 		if ((vq->v_iflag & (VI_CLEAN | VI_XLOCK)) != 0 ||
   1100 		    vq->v_rdev != dev || vq->v_type != type) {
   1101 			mutex_exit(vq->v_interlock);
   1102 			vq = vq->v_specnext;
   1103 			continue;
   1104 		}
   1105 		mutex_exit(&device_lock);
   1106 		if (vq->v_usecount == 0) {
   1107 			vremfree(vq);
   1108 			vq->v_usecount = 1;
   1109 		} else {
   1110 			atomic_inc_uint(&vq->v_usecount);
   1111 		}
   1112 		vclean(vq, DOCLOSE);
   1113 		vrelel(vq, 0);
   1114 		mutex_enter(&device_lock);
   1115 		vq = *vpp;
   1116 	}
   1117 	mutex_exit(&device_lock);
   1118 }
   1119 
   1120 /*
   1121  * Eliminate all activity associated with a vnode in preparation for
   1122  * reuse.  Drops a reference from the vnode.
   1123  */
   1124 void
   1125 vgone(vnode_t *vp)
   1126 {
   1127 
   1128 	mutex_enter(vp->v_interlock);
   1129 	vclean(vp, DOCLOSE);
   1130 	vrelel(vp, 0);
   1131 }
   1132 
   1133 /*
   1134  * Update outstanding I/O count and do wakeup if requested.
   1135  */
   1136 void
   1137 vwakeup(struct buf *bp)
   1138 {
   1139 	vnode_t *vp;
   1140 
   1141 	if ((vp = bp->b_vp) == NULL)
   1142 		return;
   1143 
   1144 	KASSERT(bp->b_objlock == vp->v_interlock);
   1145 	KASSERT(mutex_owned(bp->b_objlock));
   1146 
   1147 	if (--vp->v_numoutput < 0)
   1148 		panic("vwakeup: neg numoutput, vp %p", vp);
   1149 	if (vp->v_numoutput == 0)
   1150 		cv_broadcast(&vp->v_cv);
   1151 }
   1152 
   1153 /*
   1154  * Wait for a vnode (typically with VI_XLOCK set) to be cleaned or
   1155  * recycled.
   1156  */
   1157 void
   1158 vwait(vnode_t *vp, int flags)
   1159 {
   1160 
   1161 	KASSERT(mutex_owned(vp->v_interlock));
   1162 	KASSERT(vp->v_usecount != 0);
   1163 
   1164 	while ((vp->v_iflag & flags) != 0)
   1165 		cv_wait(&vp->v_cv, vp->v_interlock);
   1166 }
   1167 
   1168 int
   1169 vfs_drainvnodes(long target)
   1170 {
   1171 
   1172 	while (numvnodes > target) {
   1173 		vnode_t *vp;
   1174 
   1175 		mutex_enter(&vnode_free_list_lock);
   1176 		vp = getcleanvnode();
   1177 		if (vp == NULL) {
   1178 			return EBUSY;
   1179 		}
   1180 		ungetnewvnode(vp);
   1181 	}
   1182 	return 0;
   1183 }
   1184 
   1185 void
   1186 vpanic(vnode_t *vp, const char *msg)
   1187 {
   1188 #ifdef DIAGNOSTIC
   1189 
   1190 	vprint(NULL, vp);
   1191 	panic("%s\n", msg);
   1192 #endif
   1193 }
   1194