Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.129
      1 /*	$NetBSD: vfs_cache.c,v 1.129 2020/03/23 18:33:43 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1989, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     61  */
     62 
     63 /*
     64  * Name caching:
     65  *
     66  *	Names found by directory scans are retained in a cache for future
     67  *	reference.  It is managed LRU, so frequently used names will hang
     68  *	around.  The cache is indexed by hash value obtained from the name.
     69  *
     70  *	The name cache is the brainchild of Robert Elz and was introduced in
     71  *	4.3BSD.  See "Using gprof to Tune the 4.2BSD Kernel", Marshall Kirk
     72  *	McKusick, May 21 1984.
     73  *
     74  * Data structures:
     75  *
     76  *	Most Unix namecaches very sensibly use a global hash table to index
     77  *	names.  The global hash table works well, but can cause concurrency
     78  *	headaches for the kernel hacker.  In the NetBSD 10.0 implementation
     79  *	we are not sensible, and use a per-directory data structure to index
     80  *	names, but the cache otherwise functions the same.
     81  *
     82  *	The index is a red-black tree.  There are no special concurrency
     83  *	requirements placed on it, because it's per-directory and protected
     84  *	by the namecache's per-directory locks.  It should therefore not be
     85  *	difficult to experiment with other types of index.
     86  *
     87  *	Each cached name is stored in a struct namecache, along with a
     88  *	pointer to the associated vnode (nc_vp).  Names longer than a
     89  *	maximum length of NCHNAMLEN are allocated with kmem_alloc(); they
     90  *	occur infrequently, and names shorter than this are stored directly
     91  *	in struct namecache.  If it is a "negative" entry, (i.e. for a name
     92  *	that is known NOT to exist) the vnode pointer will be NULL.
     93  *
     94  *	For a directory with 3 cached names for 3 distinct vnodes, the
     95  *	various vnodes and namecache structs would be connected like this
     96  *	(the root is at the bottom of the diagram):
     97  *
     98  *          ...
     99  *           ^
    100  *           |- vi_nc_tree
    101  *           |
    102  *      +----o----+               +---------+               +---------+
    103  *      |  VDIR   |               |  VCHR   |               |  VREG   |
    104  *      |  vnode  o-----+         |  vnode  o-----+         |  vnode  o------+
    105  *      +---------+     |         +---------+     |         +---------+      |
    106  *           ^          |              ^          |              ^           |
    107  *           |- nc_vp   |- vi_nc_list  |- nc_vp   |- vi_nc_list  |- nc_vp    |
    108  *           |          |              |          |              |           |
    109  *      +----o----+     |         +----o----+     |         +----o----+      |
    110  *  +---onamecache|<----+     +---onamecache|<----+     +---onamecache|<-----+
    111  *  |   +---------+           |   +---------+           |   +---------+
    112  *  |        ^                |        ^                |        ^
    113  *  |        |                |        |                |        |
    114  *  |        |  +----------------------+                |        |
    115  *  |-nc_dvp | +-------------------------------------------------+
    116  *  |        |/- vi_nc_tree   |                         |
    117  *  |        |                |- nc_dvp                 |- nc_dvp
    118  *  |   +----o----+           |                         |
    119  *  +-->|  VDIR   |<----------+                         |
    120  *      |  vnode  |<------------------------------------+
    121  *      +---------+
    122  *
    123  *      START HERE
    124  *
    125  * Replacement:
    126  *
    127  *	As the cache becomes full, old and unused entries are purged as new
    128  *	entries are added.  The synchronization overhead in maintaining a
    129  *	strict ordering would be prohibitive, so the VM system's "clock" or
    130  *	"second chance" page replacement algorithm is aped here.  New
    131  *	entries go to the tail of the active list.  After they age out and
    132  *	reach the head of the list, they are moved to the tail of the
    133  *	inactive list.  Any use of the deactivated cache entry reactivates
    134  *	it, saving it from impending doom; if not reactivated, the entry
    135  *	eventually reaches the head of the inactive list and is purged.
    136  *
    137  * Concurrency:
    138  *
    139  *	From a performance perspective, cache_lookup(nameiop == LOOKUP) is
    140  *	what really matters; insertion of new entries with cache_enter() is
    141  *	comparatively infrequent, and overshadowed by the cost of expensive
    142  *	file system metadata operations (which may involve disk I/O).  We
    143  *	therefore want to make everything simplest in the lookup path.
    144  *
    145  *	struct namecache is mostly stable except for list and tree related
    146  *	entries, changes to which don't affect the cached name or vnode.
    147  *	For changes to name+vnode, entries are purged in preference to
    148  *	modifying them.
    149  *
    150  *	Read access to namecache entries is made via tree, list, or LRU
    151  *	list.  A lock corresponding to the direction of access should be
    152  *	held.  See definition of "struct namecache" in src/sys/namei.src,
    153  *	and the definition of "struct vnode" for the particulars.
    154  *
    155  *	Per-CPU statistics, and LRU list totals are read unlocked, since
    156  *	an approximate value is OK.  We maintain 32-bit sized per-CPU
    157  *	counters and 64-bit global counters under the theory that 32-bit
    158  *	sized counters are less likely to be hosed by nonatomic increment
    159  *	(on 32-bit platforms).
    160  *
    161  *	The lock order is:
    162  *
    163  *	1) vi->vi_nc_lock	(tree or parent -> child direction,
    164  *				 used during forward lookup)
    165  *
    166  *	2) vi->vi_nc_listlock	(list or child -> parent direction,
    167  *				 used during reverse lookup)
    168  *
    169  *	3) cache_lru_lock	(LRU list direction, used during reclaim)
    170  *
    171  *	4) vp->v_interlock	(what the cache entry points to)
    172  */
    173 
    174 #include <sys/cdefs.h>
    175 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.129 2020/03/23 18:33:43 ad Exp $");
    176 
    177 #define __NAMECACHE_PRIVATE
    178 #ifdef _KERNEL_OPT
    179 #include "opt_ddb.h"
    180 #include "opt_dtrace.h"
    181 #endif
    182 
    183 #include <sys/types.h>
    184 #include <sys/atomic.h>
    185 #include <sys/callout.h>
    186 #include <sys/cpu.h>
    187 #include <sys/errno.h>
    188 #include <sys/evcnt.h>
    189 #include <sys/hash.h>
    190 #include <sys/kernel.h>
    191 #include <sys/mount.h>
    192 #include <sys/mutex.h>
    193 #include <sys/namei.h>
    194 #include <sys/param.h>
    195 #include <sys/pool.h>
    196 #include <sys/sdt.h>
    197 #include <sys/sysctl.h>
    198 #include <sys/systm.h>
    199 #include <sys/time.h>
    200 #include <sys/vnode_impl.h>
    201 
    202 #include <miscfs/genfs/genfs.h>
    203 
    204 static void	cache_activate(struct namecache *);
    205 static void	cache_update_stats(void *);
    206 static int	cache_compare_key(void *, const void *, const void *);
    207 static int	cache_compare_nodes(void *, const void *, const void *);
    208 static void	cache_deactivate(void);
    209 static void	cache_reclaim(void);
    210 static int	cache_stat_sysctl(SYSCTLFN_ARGS);
    211 
    212 /* Global pool cache. */
    213 static pool_cache_t cache_pool __read_mostly;
    214 
    215 /* LRU replacement. */
    216 enum cache_lru_id {
    217 	LRU_ACTIVE,
    218 	LRU_INACTIVE,
    219 	LRU_COUNT
    220 };
    221 
    222 static struct {
    223 	TAILQ_HEAD(, namecache)	list[LRU_COUNT];
    224 	u_int			count[LRU_COUNT];
    225 } cache_lru __cacheline_aligned;
    226 
    227 static kmutex_t cache_lru_lock __cacheline_aligned;
    228 
    229 /* Cache effectiveness statistics.  nchstats holds system-wide total. */
    230 struct nchstats	nchstats;
    231 struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
    232 struct nchcpu {
    233 	struct nchstats_percpu cur;
    234 	struct nchstats_percpu last;
    235 };
    236 static callout_t cache_stat_callout;
    237 static kmutex_t cache_stat_lock __cacheline_aligned;
    238 
    239 #define	COUNT(f)	do { \
    240 	lwp_t *l = curlwp; \
    241 	KPREEMPT_DISABLE(l); \
    242 	((struct nchstats_percpu *)curcpu()->ci_data.cpu_nch)->f++; \
    243 	KPREEMPT_ENABLE(l); \
    244 } while (/* CONSTCOND */ 0);
    245 
    246 #define	UPDATE(nchcpu, f) do { \
    247 	uint32_t cur = atomic_load_relaxed(&nchcpu->cur.f); \
    248 	nchstats.f += cur - nchcpu->last.f; \
    249 	nchcpu->last.f = cur; \
    250 } while (/* CONSTCOND */ 0)
    251 
    252 /*
    253  * Tunables.  cache_maxlen replaces the historical doingcache:
    254  * set it zero to disable caching for debugging purposes.
    255  */
    256 int cache_lru_maxdeact __read_mostly = 2;	/* max # to deactivate */
    257 int cache_lru_maxscan __read_mostly = 64;	/* max # to scan/reclaim */
    258 int cache_maxlen __read_mostly = USHRT_MAX;	/* max name length to cache */
    259 int cache_stat_interval __read_mostly = 300;	/* in seconds */
    260 
    261 /* sysctl */
    262 static struct	sysctllog *cache_sysctllog;
    263 
    264 /* Read-black tree */
    265 static const rb_tree_ops_t cache_rbtree_ops = {
    266 	.rbto_compare_nodes = cache_compare_nodes,
    267 	.rbto_compare_key = cache_compare_key,
    268 	.rbto_node_offset = offsetof(struct namecache, nc_tree),
    269 	.rbto_context = NULL
    270 };
    271 
    272 /* dtrace hooks */
    273 SDT_PROVIDER_DEFINE(vfs);
    274 
    275 SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
    276 SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
    277 SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
    278 SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
    279 SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
    280 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
    281     "char *", "size_t");
    282 SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
    283     "char *", "size_t");
    284 SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
    285     "char *", "size_t");
    286 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
    287      "struct vnode *");
    288 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
    289      "int");
    290 SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
    291 SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
    292     "char *", "size_t");
    293 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
    294     "char *", "size_t");
    295 
    296 /*
    297  * rbtree: compare two nodes.
    298  */
    299 static int
    300 cache_compare_nodes(void *context, const void *n1, const void *n2)
    301 {
    302 	const struct namecache *nc1 = n1;
    303 	const struct namecache *nc2 = n2;
    304 
    305 	if (nc1->nc_key < nc2->nc_key) {
    306 		return -1;
    307 	}
    308 	if (nc1->nc_key > nc2->nc_key) {
    309 		return 1;
    310 	}
    311 	return 0;
    312 }
    313 
    314 /*
    315  * rbtree: compare a node and a key.
    316  */
    317 static int
    318 cache_compare_key(void *context, const void *n, const void *k)
    319 {
    320 	const struct namecache *ncp = n;
    321 	const uint64_t key = *(const uint64_t *)k;
    322 
    323 	if (ncp->nc_key < key) {
    324 		return -1;
    325 	}
    326 	if (ncp->nc_key > key) {
    327 		return 1;
    328 	}
    329 	return 0;
    330 }
    331 
    332 /*
    333  * Compute a key value for the given name.  The name length is encoded in
    334  * the key value to try and improve uniqueness, and so that length doesn't
    335  * need to be compared separately for string comparisons.
    336  */
    337 static inline uint64_t
    338 cache_key(const char *name, size_t nlen)
    339 {
    340 	uint64_t key;
    341 
    342 	KASSERT(nlen <= USHRT_MAX);
    343 
    344 	key = hash32_buf(name, nlen, HASH32_STR_INIT);
    345 	return (key << 32) | nlen;
    346 }
    347 
    348 /*
    349  * Like bcmp() but tuned for the use case here which is:
    350  *
    351  * - always of equal length both sides
    352  * - almost always the same string both sides
    353  * - small strings
    354  */
    355 static inline int
    356 cache_namecmp(struct namecache *ncp, const char *name, size_t namelen)
    357 {
    358 	size_t i;
    359 	int d;
    360 
    361 	KASSERT(ncp->nc_nlen == namelen);
    362 	for (d = 0, i = 0; i < namelen; i++) {
    363 		d |= (ncp->nc_name[i] ^ name[i]);
    364 	}
    365 	return d;
    366 }
    367 
    368 /*
    369  * Remove an entry from the cache.  vi_nc_lock must be held, and if dir2node
    370  * is true, then we're locking in the conventional direction and the list
    371  * lock will be acquired when removing the entry from the vnode list.
    372  */
    373 static void
    374 cache_remove(struct namecache *ncp, const bool dir2node)
    375 {
    376 	struct vnode *vp, *dvp = ncp->nc_dvp;
    377 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    378 
    379 	KASSERT(rw_write_held(&dvi->vi_nc_lock));
    380 	KASSERT(cache_key(ncp->nc_name, ncp->nc_nlen) == ncp->nc_key);
    381 	KASSERT(rb_tree_find_node(&dvi->vi_nc_tree, &ncp->nc_key) == ncp);
    382 
    383 	SDT_PROBE(vfs, namecache, invalidate, done, ncp,
    384 	    0, 0, 0, 0);
    385 
    386 	/* First remove from the directory's rbtree. */
    387 	rb_tree_remove_node(&dvi->vi_nc_tree, ncp);
    388 
    389 	/* Then remove from the LRU lists. */
    390 	mutex_enter(&cache_lru_lock);
    391 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
    392 	cache_lru.count[ncp->nc_lrulist]--;
    393 	mutex_exit(&cache_lru_lock);
    394 
    395 	/* Then remove from the node's list. */
    396 	if ((vp = ncp->nc_vp) != NULL) {
    397 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    398 		if (__predict_true(dir2node)) {
    399 			rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    400 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
    401 			rw_exit(&vi->vi_nc_listlock);
    402 		} else {
    403 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
    404 		}
    405 	}
    406 
    407 	/* Finally, free it. */
    408 	if (ncp->nc_nlen > NCHNAMLEN) {
    409 		size_t sz = offsetof(struct namecache, nc_name[ncp->nc_nlen]);
    410 		kmem_free(ncp, sz);
    411 	} else {
    412 		pool_cache_put(cache_pool, ncp);
    413 	}
    414 }
    415 
    416 /*
    417  * Find a single cache entry and return it.  vi_nc_lock must be held.
    418  */
    419 static struct namecache * __noinline
    420 cache_lookup_entry(struct vnode *dvp, const char *name, size_t namelen,
    421     uint64_t key)
    422 {
    423 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    424 	struct rb_node *node = dvi->vi_nc_tree.rbt_root;
    425 	struct namecache *ncp;
    426 	int lrulist;
    427 
    428 	KASSERT(rw_lock_held(&dvi->vi_nc_lock));
    429 
    430 	/*
    431 	 * Search the RB tree for the key.  This is an inlined lookup
    432 	 * tailored for exactly what's needed here (64-bit key and so on)
    433 	 * that is quite a bit faster than using rb_tree_find_node().
    434 	 */
    435 	for (;;) {
    436 		if (__predict_false(RB_SENTINEL_P(node))) {
    437 			return NULL;
    438 		}
    439 		KASSERT((void *)&ncp->nc_tree == (void *)ncp);
    440 		ncp = (struct namecache *)node;
    441 		KASSERT(ncp->nc_dvp == dvp);
    442 		if (ncp->nc_key == key) {
    443 			break;
    444 		}
    445 		node = node->rb_nodes[ncp->nc_key < key];
    446 	}
    447 
    448 	/* Exclude collisions. */
    449 	if (__predict_false(cache_namecmp(ncp, name, namelen))) {
    450 		return NULL;
    451 	}
    452 
    453 	/*
    454 	 * If the entry is on the wrong LRU list, requeue it.  This is an
    455 	 * unlocked check, but it will rarely be wrong and even then there
    456 	 * will be no harm caused.
    457 	 */
    458 	lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
    459 	if (__predict_false(lrulist != LRU_ACTIVE)) {
    460 		cache_activate(ncp);
    461 	}
    462 	return ncp;
    463 }
    464 
    465 /*
    466  * Look for a the name in the cache. We don't do this
    467  * if the segment name is long, simply so the cache can avoid
    468  * holding long names (which would either waste space, or
    469  * add greatly to the complexity).
    470  *
    471  * Lookup is called with DVP pointing to the directory to search,
    472  * and CNP providing the name of the entry being sought: cn_nameptr
    473  * is the name, cn_namelen is its length, and cn_flags is the flags
    474  * word from the namei operation.
    475  *
    476  * DVP must be locked.
    477  *
    478  * There are three possible non-error return states:
    479  *    1. Nothing was found in the cache. Nothing is known about
    480  *       the requested name.
    481  *    2. A negative entry was found in the cache, meaning that the
    482  *       requested name definitely does not exist.
    483  *    3. A positive entry was found in the cache, meaning that the
    484  *       requested name does exist and that we are providing the
    485  *       vnode.
    486  * In these cases the results are:
    487  *    1. 0 returned; VN is set to NULL.
    488  *    2. 1 returned; VN is set to NULL.
    489  *    3. 1 returned; VN is set to the vnode found.
    490  *
    491  * The additional result argument ISWHT is set to zero, unless a
    492  * negative entry is found that was entered as a whiteout, in which
    493  * case ISWHT is set to one.
    494  *
    495  * The ISWHT_RET argument pointer may be null. In this case an
    496  * assertion is made that the whiteout flag is not set. File systems
    497  * that do not support whiteouts can/should do this.
    498  *
    499  * Filesystems that do support whiteouts should add ISWHITEOUT to
    500  * cnp->cn_flags if ISWHT comes back nonzero.
    501  *
    502  * When a vnode is returned, it is locked, as per the vnode lookup
    503  * locking protocol.
    504  *
    505  * There is no way for this function to fail, in the sense of
    506  * generating an error that requires aborting the namei operation.
    507  *
    508  * (Prior to October 2012, this function returned an integer status,
    509  * and a vnode, and mucked with the flags word in CNP for whiteouts.
    510  * The integer status was -1 for "nothing found", ENOENT for "a
    511  * negative entry found", 0 for "a positive entry found", and possibly
    512  * other errors, and the value of VN might or might not have been set
    513  * depending on what error occurred.)
    514  */
    515 bool
    516 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
    517 	     uint32_t nameiop, uint32_t cnflags,
    518 	     int *iswht_ret, struct vnode **vn_ret)
    519 {
    520 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    521 	struct namecache *ncp;
    522 	struct vnode *vp;
    523 	uint64_t key;
    524 	int error;
    525 	bool hit;
    526 	krw_t op;
    527 
    528 	/* Establish default result values */
    529 	if (iswht_ret != NULL) {
    530 		*iswht_ret = 0;
    531 	}
    532 	*vn_ret = NULL;
    533 
    534 	if (__predict_false(namelen > cache_maxlen)) {
    535 		SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
    536 		    name, namelen, 0, 0);
    537 		COUNT(ncs_long);
    538 		return false;
    539 	}
    540 
    541 	/* Compute the key up front - don't need the lock. */
    542 	key = cache_key(name, namelen);
    543 
    544 	/* Could the entry be purged below? */
    545 	if ((cnflags & ISLASTCN) != 0 &&
    546 	    ((cnflags & MAKEENTRY) == 0 || nameiop == CREATE)) {
    547 	    	op = RW_WRITER;
    548 	} else {
    549 		op = RW_READER;
    550 	}
    551 
    552 	/* Now look for the name. */
    553 	rw_enter(&dvi->vi_nc_lock, op);
    554 	ncp = cache_lookup_entry(dvp, name, namelen, key);
    555 	if (__predict_false(ncp == NULL)) {
    556 		rw_exit(&dvi->vi_nc_lock);
    557 		COUNT(ncs_miss);
    558 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    559 		    name, namelen, 0, 0);
    560 		return false;
    561 	}
    562 	if (__predict_false((cnflags & MAKEENTRY) == 0)) {
    563 		/*
    564 		 * Last component and we are renaming or deleting,
    565 		 * the cache entry is invalid, or otherwise don't
    566 		 * want cache entry to exist.
    567 		 */
    568 		KASSERT((cnflags & ISLASTCN) != 0);
    569 		cache_remove(ncp, true);
    570 		rw_exit(&dvi->vi_nc_lock);
    571 		COUNT(ncs_badhits);
    572 		return false;
    573 	}
    574 	if (ncp->nc_vp == NULL) {
    575 		if (nameiop == CREATE && (cnflags & ISLASTCN) != 0) {
    576 			/*
    577 			 * Last component and we are preparing to create
    578 			 * the named object, so flush the negative cache
    579 			 * entry.
    580 			 */
    581 			COUNT(ncs_badhits);
    582 			cache_remove(ncp, true);
    583 			hit = false;
    584 		} else {
    585 			COUNT(ncs_neghits);
    586 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, name,
    587 			    namelen, 0, 0);
    588 			/* found neg entry; vn is already null from above */
    589 			hit = true;
    590 		}
    591 		if (iswht_ret != NULL) {
    592 			/*
    593 			 * Restore the ISWHITEOUT flag saved earlier.
    594 			 */
    595 			*iswht_ret = ncp->nc_whiteout;
    596 		} else {
    597 			KASSERT(!ncp->nc_whiteout);
    598 		}
    599 		rw_exit(&dvi->vi_nc_lock);
    600 		return hit;
    601 	}
    602 	vp = ncp->nc_vp;
    603 	mutex_enter(vp->v_interlock);
    604 	rw_exit(&dvi->vi_nc_lock);
    605 
    606 	/*
    607 	 * Unlocked except for the vnode interlock.  Call vcache_tryvget().
    608 	 */
    609 	error = vcache_tryvget(vp);
    610 	if (error) {
    611 		KASSERT(error == EBUSY);
    612 		/*
    613 		 * This vnode is being cleaned out.
    614 		 * XXX badhits?
    615 		 */
    616 		COUNT(ncs_falsehits);
    617 		return false;
    618 	}
    619 
    620 	COUNT(ncs_goodhits);
    621 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    622 	/* found it */
    623 	*vn_ret = vp;
    624 	return true;
    625 }
    626 
    627 /*
    628  * Version of the above without the nameiop argument, for NFS.
    629  */
    630 bool
    631 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
    632 		 uint32_t cnflags,
    633 		 int *iswht_ret, struct vnode **vn_ret)
    634 {
    635 
    636 	return cache_lookup(dvp, name, namelen, LOOKUP, cnflags | MAKEENTRY,
    637 	    iswht_ret, vn_ret);
    638 }
    639 
    640 /*
    641  * Used by namei() to walk down a path, component by component by looking up
    642  * names in the cache.  The node locks are chained along the way: a parent's
    643  * lock is not dropped until the child's is acquired.
    644  */
    645 #ifdef notyet
    646 bool
    647 cache_lookup_linked(struct vnode *dvp, const char *name, size_t namelen,
    648 		    struct vnode **vn_ret, krwlock_t **plock,
    649 		    kauth_cred_t cred)
    650 {
    651 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    652 	struct namecache *ncp;
    653 	uint64_t key;
    654 	int error;
    655 
    656 	/* Establish default results. */
    657 	*vn_ret = NULL;
    658 
    659 	/* If disabled, or file system doesn't support this, bail out. */
    660 	if (__predict_false(cache_maxlen == 0 ||
    661 	    (dvp->v_mount->mnt_iflag & IMNT_NCLOOKUP) == 0)) {
    662 		return false;
    663 	}
    664 
    665 	if (__predict_false(namelen > USHRT_MAX)) {
    666 		COUNT(ncs_long);
    667 		return false;
    668 	}
    669 
    670 	/* Compute the key up front - don't need the lock. */
    671 	key = cache_key(name, namelen);
    672 
    673 	/*
    674 	 * Acquire the directory lock.  Once we have that, we can drop the
    675 	 * previous one (if any).
    676 	 *
    677 	 * The two lock holds mean that the directory can't go away while
    678 	 * here: the directory must be purged with cache_purge() before
    679 	 * being freed, and both parent & child's vi_nc_lock must be taken
    680 	 * before that point is passed.
    681 	 *
    682 	 * However if there's no previous lock, like at the root of the
    683 	 * chain, then "dvp" must be referenced to prevent dvp going away
    684 	 * before we get its lock.
    685 	 *
    686 	 * Note that the two locks can be the same if looking up a dot, for
    687 	 * example: /usr/bin/.
    688 	 */
    689 	if (*plock != &dvi->vi_nc_lock) {
    690 		rw_enter(&dvi->vi_nc_lock, RW_READER);
    691 		if (*plock != NULL) {
    692 			rw_exit(*plock);
    693 		}
    694 		*plock = &dvi->vi_nc_lock;
    695 	} else if (*plock == NULL) {
    696 		KASSERT(dvp->v_usecount > 0);
    697 	}
    698 
    699 	/*
    700 	 * First up check if the user is allowed to look up files in this
    701 	 * directory.
    702 	 */
    703 	KASSERT(dvi->vi_nc_mode != VNOVAL && dvi->vi_nc_uid != VNOVAL &&
    704 	    dvi->vi_nc_gid != VNOVAL);
    705 	error = kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(VEXEC,
    706 	    dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL,
    707 	    genfs_can_access(dvp->v_type, dvi->vi_nc_mode & ALLPERMS,
    708 	    dvi->vi_nc_uid, dvi->vi_nc_gid, VEXEC, cred));
    709 	if (error != 0) {
    710 		COUNT(ncs_denied);
    711 		return false;
    712 	}
    713 
    714 	/*
    715 	 * Now look for a matching cache entry.
    716 	 */
    717 	ncp = cache_lookup_entry(dvp, name, namelen, key);
    718 	if (__predict_false(ncp == NULL)) {
    719 		COUNT(ncs_miss);
    720 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    721 		    name, namelen, 0, 0);
    722 		return false;
    723 	}
    724 	if (ncp->nc_vp == NULL) {
    725 		/* found negative entry; vn is already null from above */
    726 		COUNT(ncs_neghits);
    727 		SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    728 		return true;
    729 	}
    730 
    731 	COUNT(ncs_goodhits); /* XXX can be "badhits" */
    732 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    733 
    734 	/*
    735 	 * Return with the directory lock still held.  It will either be
    736 	 * returned to us with another call to cache_lookup_linked() when
    737 	 * looking up the next component, or the caller will release it
    738 	 * manually when finished.
    739 	 */
    740 	*vn_ret = ncp->nc_vp;
    741 	return true;
    742 }
    743 #endif /* notyet */
    744 
    745 /*
    746  * Scan cache looking for name of directory entry pointing at vp.
    747  * Will not search for "." or "..".
    748  *
    749  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    750  *
    751  * If bufp is non-NULL, also place the name in the buffer which starts
    752  * at bufp, immediately before *bpp, and move bpp backwards to point
    753  * at the start of it.  (Yes, this is a little baroque, but it's done
    754  * this way to cater to the whims of getcwd).
    755  *
    756  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    757  */
    758 int
    759 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp,
    760     bool checkaccess, int perms)
    761 {
    762 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    763 	struct namecache *ncp;
    764 	struct vnode *dvp;
    765 	int error, nlen, lrulist;
    766 	char *bp;
    767 
    768 	KASSERT(vp != NULL);
    769 
    770 	if (cache_maxlen == 0)
    771 		goto out;
    772 
    773 	rw_enter(&vi->vi_nc_listlock, RW_READER);
    774 	if (checkaccess) {
    775 		/*
    776 		 * Check if the user is allowed to see.  NOTE: this is
    777 		 * checking for access on the "wrong" directory.  getcwd()
    778 		 * wants to see that there is access on every component
    779 		 * along the way, not that there is access to any individual
    780 		 * component.  Don't use this to check you can look in vp.
    781 		 *
    782 		 * I don't like it, I didn't come up with it, don't blame me!
    783 		 */
    784 		KASSERT(vi->vi_nc_mode != VNOVAL && vi->vi_nc_uid != VNOVAL &&
    785 		    vi->vi_nc_gid != VNOVAL);
    786 		error = kauth_authorize_vnode(curlwp->l_cred,
    787 		    KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
    788 		    ALLPERMS), vp, NULL, genfs_can_access(vp->v_type,
    789 		    vi->vi_nc_mode & ALLPERMS, vi->vi_nc_uid, vi->vi_nc_gid,
    790 		    perms, curlwp->l_cred));
    791 		    if (error != 0) {
    792 		    	rw_exit(&vi->vi_nc_listlock);
    793 			COUNT(ncs_denied);
    794 			return EACCES;
    795 		}
    796 	}
    797 	TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
    798 		KASSERT(ncp->nc_vp == vp);
    799 		KASSERT(ncp->nc_dvp != NULL);
    800 		nlen = ncp->nc_nlen;
    801 
    802 		/*
    803 		 * The queue is partially sorted.  Once we hit dots, nothing
    804 		 * else remains but dots and dotdots, so bail out.
    805 		 */
    806 		if (ncp->nc_name[0] == '.') {
    807 			if (nlen == 1 ||
    808 			    (nlen == 2 && ncp->nc_name[1] == '.')) {
    809 			    	break;
    810 			}
    811 		}
    812 
    813 		/* Record a hit on the entry.  This is an unlocked read. */
    814 		lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
    815 		if (lrulist != LRU_ACTIVE) {
    816 			cache_activate(ncp);
    817 		}
    818 
    819 		if (bufp) {
    820 			bp = *bpp;
    821 			bp -= nlen;
    822 			if (bp <= bufp) {
    823 				*dvpp = NULL;
    824 				rw_exit(&vi->vi_nc_listlock);
    825 				SDT_PROBE(vfs, namecache, revlookup,
    826 				    fail, vp, ERANGE, 0, 0, 0);
    827 				return (ERANGE);
    828 			}
    829 			memcpy(bp, ncp->nc_name, nlen);
    830 			*bpp = bp;
    831 		}
    832 
    833 		dvp = ncp->nc_dvp;
    834 		mutex_enter(dvp->v_interlock);
    835 		rw_exit(&vi->vi_nc_listlock);
    836 		error = vcache_tryvget(dvp);
    837 		if (error) {
    838 			KASSERT(error == EBUSY);
    839 			if (bufp)
    840 				(*bpp) += nlen;
    841 			*dvpp = NULL;
    842 			SDT_PROBE(vfs, namecache, revlookup, fail, vp,
    843 			    error, 0, 0, 0);
    844 			return -1;
    845 		}
    846 		*dvpp = dvp;
    847 		SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
    848 		    0, 0, 0);
    849 		COUNT(ncs_revhits);
    850 		return (0);
    851 	}
    852 	rw_exit(&vi->vi_nc_listlock);
    853 	COUNT(ncs_revmiss);
    854  out:
    855 	*dvpp = NULL;
    856 	return (-1);
    857 }
    858 
    859 /*
    860  * Add an entry to the cache.
    861  */
    862 void
    863 cache_enter(struct vnode *dvp, struct vnode *vp,
    864 	    const char *name, size_t namelen, uint32_t cnflags)
    865 {
    866 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    867 	struct namecache *ncp, *oncp;
    868 	int total;
    869 
    870 	/* First, check whether we can/should add a cache entry. */
    871 	if ((cnflags & MAKEENTRY) == 0 ||
    872 	    __predict_false(namelen > cache_maxlen)) {
    873 		SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
    874 		    0, 0);
    875 		return;
    876 	}
    877 
    878 	SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
    879 
    880 	/*
    881 	 * Reclaim some entries if over budget.  This is an unlocked check,
    882 	 * but it doesn't matter.  Just need to catch up with things
    883 	 * eventually: it doesn't matter if we go over temporarily.
    884 	 */
    885 	total = atomic_load_relaxed(&cache_lru.count[LRU_ACTIVE]);
    886 	total += atomic_load_relaxed(&cache_lru.count[LRU_INACTIVE]);
    887 	if (__predict_false(total > desiredvnodes)) {
    888 		cache_reclaim();
    889 	}
    890 
    891 	/* Now allocate a fresh entry. */
    892 	if (__predict_true(namelen <= NCHNAMLEN)) {
    893 		ncp = pool_cache_get(cache_pool, PR_WAITOK);
    894 	} else {
    895 		size_t sz = offsetof(struct namecache, nc_name[namelen]);
    896 		ncp = kmem_alloc(sz, KM_SLEEP);
    897 	}
    898 
    899 	/* Fill in cache info. */
    900 	ncp->nc_dvp = dvp;
    901 	ncp->nc_key = cache_key(name, namelen);
    902 	ncp->nc_nlen = namelen;
    903 	memcpy(ncp->nc_name, name, namelen);
    904 
    905 	/*
    906 	 * Insert to the directory.  Concurrent lookups in the same
    907 	 * directory may race for a cache entry.  There can also be hash
    908 	 * value collisions.  If there's a entry there already, purge it.
    909 	 */
    910 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
    911 	oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
    912 	if (oncp != ncp) {
    913 		KASSERT(oncp->nc_key == ncp->nc_key);
    914 		KASSERT(oncp->nc_nlen == ncp->nc_nlen);
    915 		if (cache_namecmp(oncp, name, namelen)) {
    916 			COUNT(ncs_collisions);
    917 		}
    918 		cache_remove(oncp, true);
    919 		oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
    920 		KASSERT(oncp == ncp);
    921 	}
    922 
    923 	/* Then insert to the vnode. */
    924 	if (vp == NULL) {
    925 		/*
    926 		 * For negative hits, save the ISWHITEOUT flag so we can
    927 		 * restore it later when the cache entry is used again.
    928 		 */
    929 		ncp->nc_vp = NULL;
    930 		ncp->nc_whiteout = ((cnflags & ISWHITEOUT) != 0);
    931 	} else {
    932 		/* Partially sort the per-vnode list: dots go to back. */
    933 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    934 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    935 		if ((namelen == 1 && name[0] == '.') ||
    936 		    (namelen == 2 && name[0] == '.' && name[1] == '.')) {
    937 			TAILQ_INSERT_TAIL(&vi->vi_nc_list, ncp, nc_list);
    938 		} else {
    939 			TAILQ_INSERT_HEAD(&vi->vi_nc_list, ncp, nc_list);
    940 		}
    941 		rw_exit(&vi->vi_nc_listlock);
    942 		ncp->nc_vp = vp;
    943 		ncp->nc_whiteout = false;
    944 	}
    945 
    946 	/*
    947 	 * Finally, insert to the tail of the ACTIVE LRU list (new) and
    948 	 * with the LRU lock held take the to opportunity to incrementally
    949 	 * balance the lists.
    950 	 */
    951 	mutex_enter(&cache_lru_lock);
    952 	ncp->nc_lrulist = LRU_ACTIVE;
    953 	cache_lru.count[LRU_ACTIVE]++;
    954 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
    955 	cache_deactivate();
    956 	mutex_exit(&cache_lru_lock);
    957 	rw_exit(&dvi->vi_nc_lock);
    958 }
    959 
    960 /*
    961  * Set identity info in cache for a vnode.  We only care about directories
    962  * so ignore other updates.
    963  */
    964 void
    965 cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid)
    966 {
    967 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    968 
    969 	if (vp->v_type == VDIR) {
    970 		/* Grab both locks, for forward & reverse lookup. */
    971 		rw_enter(&vi->vi_nc_lock, RW_WRITER);
    972 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    973 		vi->vi_nc_mode = mode;
    974 		vi->vi_nc_uid = uid;
    975 		vi->vi_nc_gid = gid;
    976 		rw_exit(&vi->vi_nc_listlock);
    977 		rw_exit(&vi->vi_nc_lock);
    978 	}
    979 }
    980 
    981 /*
    982  * Return true if we have identity for the given vnode, and use as an
    983  * opportunity to confirm that everything squares up.
    984  *
    985  * Because of shared code, some file systems could provide partial
    986  * information, missing some updates, so always check the mount flag
    987  * instead of looking for !VNOVAL.
    988  */
    989 #ifdef notyet
    990 bool
    991 cache_have_id(struct vnode *vp)
    992 {
    993 
    994 	if (vp->v_type == VDIR &&
    995 	    (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0) {
    996 		KASSERT(VNODE_TO_VIMPL(vp)->vi_nc_mode != VNOVAL);
    997 		KASSERT(VNODE_TO_VIMPL(vp)->vi_nc_uid != VNOVAL);
    998 		KASSERT(VNODE_TO_VIMPL(vp)->vi_nc_gid != VNOVAL);
    999 		return true;
   1000 	} else {
   1001 		return false;
   1002 	}
   1003 }
   1004 #endif /* notyet */
   1005 
   1006 /*
   1007  * Name cache initialization, from vfs_init() when the system is booting.
   1008  */
   1009 void
   1010 nchinit(void)
   1011 {
   1012 
   1013 	cache_pool = pool_cache_init(sizeof(struct namecache),
   1014 	    coherency_unit, 0, 0, "nchentry", NULL, IPL_NONE, NULL,
   1015 	    NULL, NULL);
   1016 	KASSERT(cache_pool != NULL);
   1017 
   1018 	mutex_init(&cache_lru_lock, MUTEX_DEFAULT, IPL_NONE);
   1019 	TAILQ_INIT(&cache_lru.list[LRU_ACTIVE]);
   1020 	TAILQ_INIT(&cache_lru.list[LRU_INACTIVE]);
   1021 
   1022 	mutex_init(&cache_stat_lock, MUTEX_DEFAULT, IPL_NONE);
   1023 	callout_init(&cache_stat_callout, CALLOUT_MPSAFE);
   1024 	callout_setfunc(&cache_stat_callout, cache_update_stats, NULL);
   1025 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
   1026 
   1027 	KASSERT(cache_sysctllog == NULL);
   1028 	sysctl_createv(&cache_sysctllog, 0, NULL, NULL,
   1029 		       CTLFLAG_PERMANENT,
   1030 		       CTLTYPE_STRUCT, "namecache_stats",
   1031 		       SYSCTL_DESCR("namecache statistics"),
   1032 		       cache_stat_sysctl, 0, NULL, 0,
   1033 		       CTL_VFS, CTL_CREATE, CTL_EOL);
   1034 }
   1035 
   1036 /*
   1037  * Called once for each CPU in the system as attached.
   1038  */
   1039 void
   1040 cache_cpu_init(struct cpu_info *ci)
   1041 {
   1042 	void *p;
   1043 	size_t sz;
   1044 
   1045 	sz = roundup2(sizeof(struct nchstats_percpu), coherency_unit) +
   1046 	    coherency_unit;
   1047 	p = kmem_zalloc(sz, KM_SLEEP);
   1048 	ci->ci_data.cpu_nch = (void *)roundup2((uintptr_t)p, coherency_unit);
   1049 }
   1050 
   1051 /*
   1052  * A vnode is being allocated: set up cache structures.
   1053  */
   1054 void
   1055 cache_vnode_init(struct vnode *vp)
   1056 {
   1057 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1058 
   1059 	rw_init(&vi->vi_nc_lock);
   1060 	rw_init(&vi->vi_nc_listlock);
   1061 	rb_tree_init(&vi->vi_nc_tree, &cache_rbtree_ops);
   1062 	TAILQ_INIT(&vi->vi_nc_list);
   1063 	vi->vi_nc_mode = VNOVAL;
   1064 	vi->vi_nc_uid = VNOVAL;
   1065 	vi->vi_nc_gid = VNOVAL;
   1066 }
   1067 
   1068 /*
   1069  * A vnode is being freed: finish cache structures.
   1070  */
   1071 void
   1072 cache_vnode_fini(struct vnode *vp)
   1073 {
   1074 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1075 
   1076 	KASSERT(RB_TREE_MIN(&vi->vi_nc_tree) == NULL);
   1077 	KASSERT(TAILQ_EMPTY(&vi->vi_nc_list));
   1078 	rw_destroy(&vi->vi_nc_lock);
   1079 	rw_destroy(&vi->vi_nc_listlock);
   1080 }
   1081 
   1082 /*
   1083  * Helper for cache_purge1(): purge cache entries for the given vnode from
   1084  * all directories that the vnode is cached in.
   1085  */
   1086 static void
   1087 cache_purge_parents(struct vnode *vp)
   1088 {
   1089 	vnode_impl_t *dvi, *vi = VNODE_TO_VIMPL(vp);
   1090 	struct vnode *dvp, *blocked;
   1091 	struct namecache *ncp;
   1092 
   1093 	SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
   1094 
   1095 	blocked = NULL;
   1096 
   1097 	rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1098 	while ((ncp = TAILQ_FIRST(&vi->vi_nc_list)) != NULL) {
   1099 		/*
   1100 		 * Locking in the wrong direction.  Try for a hold on the
   1101 		 * directory node's lock, and if we get it then all good,
   1102 		 * nuke the entry and move on to the next.
   1103 		 */
   1104 		dvp = ncp->nc_dvp;
   1105 		dvi = VNODE_TO_VIMPL(dvp);
   1106 		if (rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
   1107 			cache_remove(ncp, false);
   1108 			rw_exit(&dvi->vi_nc_lock);
   1109 			blocked = NULL;
   1110 			continue;
   1111 		}
   1112 
   1113 		/*
   1114 		 * We can't wait on the directory node's lock with our list
   1115 		 * lock held or the system could deadlock.
   1116 		 *
   1117 		 * Take a hold on the directory vnode to prevent it from
   1118 		 * being freed (taking the vnode & lock with it).  Then
   1119 		 * wait for the lock to become available with no other locks
   1120 		 * held, and retry.
   1121 		 *
   1122 		 * If this happens twice in a row, give the other side a
   1123 		 * breather; we can do nothing until it lets go.
   1124 		 */
   1125 		vhold(dvp);
   1126 		rw_exit(&vi->vi_nc_listlock);
   1127 		rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1128 		/* Do nothing. */
   1129 		rw_exit(&dvi->vi_nc_lock);
   1130 		holdrele(dvp);
   1131 		if (blocked == dvp) {
   1132 			kpause("ncpurge", false, 1, NULL);
   1133 		}
   1134 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1135 		blocked = dvp;
   1136 	}
   1137 	rw_exit(&vi->vi_nc_listlock);
   1138 }
   1139 
   1140 /*
   1141  * Helper for cache_purge1(): purge all cache entries hanging off the given
   1142  * directory vnode.
   1143  */
   1144 static void
   1145 cache_purge_children(struct vnode *dvp)
   1146 {
   1147 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
   1148 	struct namecache *ncp;
   1149 
   1150 	SDT_PROBE(vfs, namecache, purge, children, dvp, 0, 0, 0, 0);
   1151 
   1152 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1153 	for (;;) {
   1154 		ncp = rb_tree_iterate(&dvi->vi_nc_tree, NULL, RB_DIR_RIGHT);
   1155 		if (ncp == NULL) {
   1156 			break;
   1157 		}
   1158 		cache_remove(ncp, true);
   1159 	}
   1160 	rw_exit(&dvi->vi_nc_lock);
   1161 }
   1162 
   1163 /*
   1164  * Helper for cache_purge1(): purge cache entry from the given vnode,
   1165  * finding it by name.
   1166  */
   1167 static void
   1168 cache_purge_name(struct vnode *dvp, const char *name, size_t namelen)
   1169 {
   1170 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
   1171 	struct namecache *ncp;
   1172 	uint64_t key;
   1173 
   1174 	SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
   1175 
   1176 	key = cache_key(name, namelen);
   1177 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1178 	ncp = cache_lookup_entry(dvp, name, namelen, key);
   1179 	if (ncp) {
   1180 		cache_remove(ncp, true);
   1181 	}
   1182 	rw_exit(&dvi->vi_nc_lock);
   1183 }
   1184 
   1185 /*
   1186  * Cache flush, a particular vnode; called when a vnode is renamed to
   1187  * hide entries that would now be invalid.
   1188  */
   1189 void
   1190 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
   1191 {
   1192 
   1193 	if (flags & PURGE_PARENTS) {
   1194 		cache_purge_parents(vp);
   1195 	}
   1196 	if (flags & PURGE_CHILDREN) {
   1197 		cache_purge_children(vp);
   1198 	}
   1199 	if (name != NULL) {
   1200 		cache_purge_name(vp, name, namelen);
   1201 	}
   1202 }
   1203 
   1204 /*
   1205  * vnode filter for cache_purgevfs().
   1206  */
   1207 static bool
   1208 cache_vdir_filter(void *cookie, vnode_t *vp)
   1209 {
   1210 
   1211 	return vp->v_type == VDIR;
   1212 }
   1213 
   1214 /*
   1215  * Cache flush, a whole filesystem; called when filesys is umounted to
   1216  * remove entries that would now be invalid.
   1217  */
   1218 void
   1219 cache_purgevfs(struct mount *mp)
   1220 {
   1221 	struct vnode_iterator *iter;
   1222 	vnode_t *dvp;
   1223 
   1224 	vfs_vnode_iterator_init(mp, &iter);
   1225 	for (;;) {
   1226 		dvp = vfs_vnode_iterator_next(iter, cache_vdir_filter, NULL);
   1227 		if (dvp == NULL) {
   1228 			break;
   1229 		}
   1230 		cache_purge_children(dvp);
   1231 		vrele(dvp);
   1232 	}
   1233 	vfs_vnode_iterator_destroy(iter);
   1234 }
   1235 
   1236 /*
   1237  * Re-queue an entry onto the correct LRU list, after it has scored a hit.
   1238  */
   1239 static void
   1240 cache_activate(struct namecache *ncp)
   1241 {
   1242 
   1243 	mutex_enter(&cache_lru_lock);
   1244 	/* Put on tail of ACTIVE list, since it just scored a hit. */
   1245 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
   1246 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
   1247 	cache_lru.count[ncp->nc_lrulist]--;
   1248 	cache_lru.count[LRU_ACTIVE]++;
   1249 	ncp->nc_lrulist = LRU_ACTIVE;
   1250 	mutex_exit(&cache_lru_lock);
   1251 }
   1252 
   1253 /*
   1254  * Try to balance the LRU lists.  Pick some victim entries, and re-queue
   1255  * them from the head of the active list to the tail of the inactive list.
   1256  */
   1257 static void
   1258 cache_deactivate(void)
   1259 {
   1260 	struct namecache *ncp;
   1261 	int total, i;
   1262 
   1263 	KASSERT(mutex_owned(&cache_lru_lock));
   1264 
   1265 	/* If we're nowhere near budget yet, don't bother. */
   1266 	total = cache_lru.count[LRU_ACTIVE] + cache_lru.count[LRU_INACTIVE];
   1267 	if (total < (desiredvnodes >> 1)) {
   1268 	    	return;
   1269 	}
   1270 
   1271 	/*
   1272 	 * Aim for a 1:1 ratio of active to inactive.  This is to allow each
   1273 	 * potential victim a reasonable amount of time to cycle through the
   1274 	 * inactive list in order to score a hit and be reactivated, while
   1275 	 * trying not to cause reactivations too frequently.
   1276 	 */
   1277 	if (cache_lru.count[LRU_ACTIVE] < cache_lru.count[LRU_INACTIVE]) {
   1278 		return;
   1279 	}
   1280 
   1281 	/* Move only a few at a time; will catch up eventually. */
   1282 	for (i = 0; i < cache_lru_maxdeact; i++) {
   1283 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_ACTIVE]);
   1284 		if (ncp == NULL) {
   1285 			break;
   1286 		}
   1287 		KASSERT(ncp->nc_lrulist == LRU_ACTIVE);
   1288 		ncp->nc_lrulist = LRU_INACTIVE;
   1289 		TAILQ_REMOVE(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
   1290 		TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE], ncp, nc_lru);
   1291 		cache_lru.count[LRU_ACTIVE]--;
   1292 		cache_lru.count[LRU_INACTIVE]++;
   1293 	}
   1294 }
   1295 
   1296 /*
   1297  * Free some entries from the cache, when we have gone over budget.
   1298  *
   1299  * We don't want to cause too much work for any individual caller, and it
   1300  * doesn't matter if we temporarily go over budget.  This is also "just a
   1301  * cache" so it's not a big deal if we screw up and throw out something we
   1302  * shouldn't.  So we take a relaxed attitude to this process to reduce its
   1303  * impact.
   1304  */
   1305 static void
   1306 cache_reclaim(void)
   1307 {
   1308 	struct namecache *ncp;
   1309 	vnode_impl_t *dvi;
   1310 	int toscan;
   1311 
   1312 	/*
   1313 	 * Scan up to a preset maxium number of entries, but no more than
   1314 	 * 0.8% of the total at once (to allow for very small systems).
   1315 	 *
   1316 	 * On bigger systems, do a larger chunk of work to reduce the number
   1317 	 * of times that cache_lru_lock is held for any length of time.
   1318 	 */
   1319 	mutex_enter(&cache_lru_lock);
   1320 	toscan = MIN(cache_lru_maxscan, desiredvnodes >> 7);
   1321 	toscan = MAX(toscan, 1);
   1322 	SDT_PROBE(vfs, namecache, prune, done, cache_lru.count[LRU_ACTIVE] +
   1323 	    cache_lru.count[LRU_INACTIVE], toscan, 0, 0, 0);
   1324 	while (toscan-- != 0) {
   1325 		/* First try to balance the lists. */
   1326 		cache_deactivate();
   1327 
   1328 		/* Now look for a victim on head of inactive list (old). */
   1329 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_INACTIVE]);
   1330 		if (ncp == NULL) {
   1331 			break;
   1332 		}
   1333 		dvi = VNODE_TO_VIMPL(ncp->nc_dvp);
   1334 		KASSERT(ncp->nc_lrulist == LRU_INACTIVE);
   1335 		KASSERT(dvi != NULL);
   1336 
   1337 		/*
   1338 		 * Locking in the wrong direction.  If we can't get the
   1339 		 * lock, the directory is actively busy, and it could also
   1340 		 * cause problems for the next guy in here, so send the
   1341 		 * entry to the back of the list.
   1342 		 */
   1343 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
   1344 			TAILQ_REMOVE(&cache_lru.list[LRU_INACTIVE],
   1345 			    ncp, nc_lru);
   1346 			TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE],
   1347 			    ncp, nc_lru);
   1348 			continue;
   1349 		}
   1350 
   1351 		/*
   1352 		 * Now have the victim entry locked.  Drop the LRU list
   1353 		 * lock, purge the entry, and start over.  The hold on
   1354 		 * vi_nc_lock will prevent the vnode from vanishing until
   1355 		 * finished (cache_purge() will be called on dvp before it
   1356 		 * disappears, and that will wait on vi_nc_lock).
   1357 		 */
   1358 		mutex_exit(&cache_lru_lock);
   1359 		cache_remove(ncp, true);
   1360 		rw_exit(&dvi->vi_nc_lock);
   1361 		mutex_enter(&cache_lru_lock);
   1362 	}
   1363 	mutex_exit(&cache_lru_lock);
   1364 }
   1365 
   1366 /*
   1367  * For file system code: count a lookup that required a full re-scan of
   1368  * directory metadata.
   1369  */
   1370 void
   1371 namecache_count_pass2(void)
   1372 {
   1373 
   1374 	COUNT(ncs_pass2);
   1375 }
   1376 
   1377 /*
   1378  * For file system code: count a lookup that scored a hit in the directory
   1379  * metadata near the location of the last lookup.
   1380  */
   1381 void
   1382 namecache_count_2passes(void)
   1383 {
   1384 
   1385 	COUNT(ncs_2passes);
   1386 }
   1387 
   1388 /*
   1389  * Sum the stats from all CPUs into nchstats.  This needs to run at least
   1390  * once within every window where a 32-bit counter could roll over.  It's
   1391  * called regularly by timer to ensure this.
   1392  */
   1393 static void
   1394 cache_update_stats(void *cookie)
   1395 {
   1396 	CPU_INFO_ITERATOR cii;
   1397 	struct cpu_info *ci;
   1398 
   1399 	mutex_enter(&cache_stat_lock);
   1400 	for (CPU_INFO_FOREACH(cii, ci)) {
   1401 		struct nchcpu *nchcpu = ci->ci_data.cpu_nch;
   1402 		UPDATE(nchcpu, ncs_goodhits);
   1403 		UPDATE(nchcpu, ncs_neghits);
   1404 		UPDATE(nchcpu, ncs_badhits);
   1405 		UPDATE(nchcpu, ncs_falsehits);
   1406 		UPDATE(nchcpu, ncs_miss);
   1407 		UPDATE(nchcpu, ncs_long);
   1408 		UPDATE(nchcpu, ncs_pass2);
   1409 		UPDATE(nchcpu, ncs_2passes);
   1410 		UPDATE(nchcpu, ncs_revhits);
   1411 		UPDATE(nchcpu, ncs_revmiss);
   1412 		UPDATE(nchcpu, ncs_collisions);
   1413 		UPDATE(nchcpu, ncs_denied);
   1414 	}
   1415 	if (cookie != NULL) {
   1416 		memcpy(cookie, &nchstats, sizeof(nchstats));
   1417 	}
   1418 	/* Reset the timer; arrive back here in N minutes at latest. */
   1419 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
   1420 	mutex_exit(&cache_stat_lock);
   1421 }
   1422 
   1423 /*
   1424  * Fetch the current values of the stats.  We return the most
   1425  * recent values harvested into nchstats by cache_reclaim(), which
   1426  * will be less than a second old.
   1427  */
   1428 static int
   1429 cache_stat_sysctl(SYSCTLFN_ARGS)
   1430 {
   1431 	struct nchstats stats;
   1432 
   1433 	if (oldp == NULL) {
   1434 		*oldlenp = sizeof(nchstats);
   1435 		return 0;
   1436 	}
   1437 
   1438 	if (*oldlenp <= 0) {
   1439 		*oldlenp = 0;
   1440 		return 0;
   1441 	}
   1442 
   1443 	/* Refresh the global stats. */
   1444 	sysctl_unlock();
   1445 	cache_update_stats(&stats);
   1446 	sysctl_relock();
   1447 
   1448 	*oldlenp = MIN(sizeof(stats), *oldlenp);
   1449 	return sysctl_copyout(l, &stats, oldp, *oldlenp);
   1450 }
   1451 
   1452 /*
   1453  * For the debugger, given the address of a vnode, print all associated
   1454  * names in the cache.
   1455  */
   1456 #ifdef DDB
   1457 void
   1458 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
   1459 {
   1460 	struct vnode *dvp = NULL;
   1461 	struct namecache *ncp;
   1462 	enum cache_lru_id id;
   1463 
   1464 	for (id = 0; id < LRU_COUNT; id++) {
   1465 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
   1466 			if (ncp->nc_vp == vp) {
   1467 				(*pr)("name %.*s\n", ncp->nc_nlen,
   1468 				    ncp->nc_name);
   1469 				dvp = ncp->nc_dvp;
   1470 			}
   1471 		}
   1472 	}
   1473 	if (dvp == NULL) {
   1474 		(*pr)("name not found\n");
   1475 		return;
   1476 	}
   1477 	for (id = 0; id < LRU_COUNT; id++) {
   1478 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
   1479 			if (ncp->nc_vp == dvp) {
   1480 				(*pr)("parent %.*s\n", ncp->nc_nlen,
   1481 				    ncp->nc_name);
   1482 			}
   1483 		}
   1484 	}
   1485 }
   1486 #endif
   1487