Home | History | Annotate | Line # | Download | only in kern
      1 /*	$NetBSD: vfs_cache.c,v 1.159 2024/12/07 02:27:38 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008, 2019, 2020, 2023 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1989, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     61  */
     62 
     63 /*
     64  * Name caching:
     65  *
     66  *	Names found by directory scans are retained in a cache for future
     67  *	reference.  It is managed LRU, so frequently used names will hang
     68  *	around.  The cache is indexed by hash value obtained from the name.
     69  *
     70  *	The name cache is the brainchild of Robert Elz and was introduced in
     71  *	4.3BSD.  See "Using gprof to Tune the 4.2BSD Kernel", Marshall Kirk
     72  *	McKusick, May 21 1984.
     73  *
     74  * Data structures:
     75  *
     76  *	Most Unix namecaches very sensibly use a global hash table to index
     77  *	names.  The global hash table works well, but can cause concurrency
     78  *	headaches for the kernel hacker.  In the NetBSD 10.0 implementation
     79  *	we are not sensible, and use a per-directory data structure to index
     80  *	names, but the cache otherwise functions the same.
     81  *
     82  *	The index is a red-black tree.  It should not be difficult to
     83  *	experiment with other types of index, however note that a tree
     84  *	can trivially be made to support lockless lookup.
     85  *
     86  *	Each cached name is stored in a struct namecache, along with a
     87  *	pointer to the associated vnode (nc_vp).  Names longer than a
     88  *	maximum length of NCHNAMLEN are allocated with kmem_alloc(); they
     89  *	occur infrequently, and names shorter than this are stored directly
     90  *	in struct namecache.  If it is a "negative" entry, (i.e. for a name
     91  *	that is known NOT to exist) the vnode pointer will be NULL.
     92  *
     93  *	In practice this implementation is not any slower than the hash
     94  *	table that preceeded it and in some cases it significantly
     95  *	outperforms the hash table.  Some reasons why this might be:
     96  *
     97  *	- natural partitioning provided by the file system structure, which
     98  *	  the prior implementation discarded (global hash table).
     99  *	- worst case tree traversal of O(log n), the hash table could have
    100  *	  many collisions.
    101  *	- minimized cache misses & total L2/L3 CPU cache footprint; struct
    102  *	  namecache and vnode_impl_t are laid out to keep cache footprint
    103  *	  minimal in the lookup path; no hash table buckets to cache.
    104  *	- minimized number of conditionals & string comparisons.
    105  *
    106  *	For a directory with 3 cached names for 3 distinct vnodes, the
    107  *	various vnodes and namecache structs would be connected like this
    108  *	(the root is at the bottom of the diagram):
    109  *
    110  *          ...
    111  *           ^
    112  *           |- vi_nc_tree
    113  *           |
    114  *      +----o----+               +---------+               +---------+
    115  *      |  VDIR   |               |  VCHR   |               |  VREG   |
    116  *      |  vnode  o-----+         |  vnode  o-----+         |  vnode  o------+
    117  *      +---------+     |         +---------+     |         +---------+      |
    118  *           ^          |              ^          |              ^           |
    119  *           |- nc_vp   |- vi_nc_list  |- nc_vp   |- vi_nc_list  |- nc_vp    |
    120  *           |          |              |          |              |           |
    121  *      +----o----+     |         +----o----+     |         +----o----+      |
    122  *  +---onamecache|<----+     +---onamecache|<----+     +---onamecache|<-----+
    123  *  |   +---------+           |   +---------+           |   +---------+
    124  *  |        ^                |        ^                |        ^
    125  *  |        |                |        |                |        |
    126  *  |        |  +----------------------+                |        |
    127  *  |-nc_dvp | +-------------------------------------------------+
    128  *  |        |/- vi_nc_tree   |                         |
    129  *  |        |                |- nc_dvp                 |- nc_dvp
    130  *  |   +----o----+           |                         |
    131  *  +-->|  VDIR   |<----------+                         |
    132  *      |  vnode  |<------------------------------------+
    133  *      +---------+
    134  *
    135  *      START HERE
    136  *
    137  * Replacement:
    138  *
    139  *	As the cache becomes full, old and unused entries are purged as new
    140  *	entries are added.  The synchronization overhead in maintaining a
    141  *	strict ordering would be prohibitive, so the VM system's "clock" or
    142  *	"second chance" page replacement algorithm is aped here.  New
    143  *	entries go to the tail of the active list.  After they age out and
    144  *	reach the head of the list, they are moved to the tail of the
    145  *	inactive list.  Any use of the deactivated cache entry reactivates
    146  *	it, saving it from impending doom; if not reactivated, the entry
    147  *	eventually reaches the head of the inactive list and is purged.
    148  *
    149  * Concurrency:
    150  *
    151  *	From a performance perspective, cache_lookup(nameiop == LOOKUP) is
    152  *	what really matters; insertion of new entries with cache_enter() is
    153  *	comparatively infrequent, and overshadowed by the cost of expensive
    154  *	file system metadata operations (which may involve disk I/O).  We
    155  *	therefore want to make everything simplest in the lookup path.
    156  *
    157  *	struct namecache is mostly stable except for list and tree related
    158  *	entries, changes to which don't affect the cached name or vnode.
    159  *	For changes to name+vnode, entries are purged in preference to
    160  *	modifying them.
    161  *
    162  *	Read access to namecache entries is made via tree, list, or LRU
    163  *	list.  A lock corresponding to the direction of access should be
    164  *	held.  See definition of "struct namecache" in src/sys/namei.src,
    165  *	and the definition of "struct vnode" for the particulars.
    166  *
    167  *	Per-CPU statistics, and LRU list totals are read unlocked, since an
    168  *	approximate value is OK.  We maintain 32-bit sized per-CPU counters
    169  *	and 64-bit global counters since 32-bit sized counters can be
    170  *	observed locklessly while the global counters are protected by a
    171  *	mutex.
    172  *
    173  *	The lock order is:
    174  *
    175  *	1) vi->vi_nc_lock	(tree or parent -> child direction,
    176  *				 used during forward lookup)
    177  *
    178  *	2) vi->vi_nc_listlock	(list or child -> parent direction,
    179  *				 used during reverse lookup)
    180  *
    181  *	3) cache_lru_lock	(LRU list direction, used during reclaim)
    182  */
    183 
    184 #define __NAMECACHE_PRIVATE
    185 
    186 #include <sys/cdefs.h>
    187 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.159 2024/12/07 02:27:38 riastradh Exp $");
    188 
    189 #ifdef _KERNEL_OPT
    190 #include "opt_ddb.h"
    191 #include "opt_dtrace.h"
    192 #endif
    193 
    194 #include <sys/param.h>
    195 #include <sys/types.h>
    196 
    197 #include <sys/atomic.h>
    198 #include <sys/callout.h>
    199 #include <sys/cpu.h>
    200 #include <sys/errno.h>
    201 #include <sys/evcnt.h>
    202 #include <sys/hash.h>
    203 #include <sys/kernel.h>
    204 #include <sys/mount.h>
    205 #include <sys/mutex.h>
    206 #include <sys/namei.h>
    207 #include <sys/param.h>
    208 #include <sys/pool.h>
    209 #include <sys/sdt.h>
    210 #include <sys/sysctl.h>
    211 #include <sys/systm.h>
    212 #include <sys/time.h>
    213 #include <sys/vnode_impl.h>
    214 
    215 #include <miscfs/genfs/genfs.h>
    216 
    217 /*
    218  * Assert that data structure layout hasn't changed unintentionally.
    219  */
    220 #ifdef _LP64
    221 CTASSERT(sizeof(struct namecache) == 128);
    222 #else
    223 CTASSERT(sizeof(struct namecache) == 64);
    224 #endif
    225 CTASSERT(NC_NLEN_MASK >= MAXPATHLEN);
    226 
    227 static void	cache_activate(struct namecache *);
    228 static void	cache_update_stats(void *);
    229 static int	cache_compare_nodes(void *, const void *, const void *);
    230 static void	cache_deactivate(void);
    231 static void	cache_reclaim(void);
    232 static int	cache_stat_sysctl(SYSCTLFN_ARGS);
    233 
    234 /*
    235  * Global pool cache.
    236  */
    237 static pool_cache_t cache_pool __read_mostly;
    238 
    239 /*
    240  * LRU replacement.
    241  */
    242 enum cache_lru_id {
    243 	LRU_ACTIVE,
    244 	LRU_INACTIVE,
    245 	LRU_COUNT
    246 };
    247 
    248 static struct {
    249 	TAILQ_HEAD(, namecache)	list[LRU_COUNT];
    250 	u_int			count[LRU_COUNT];
    251 } cache_lru __cacheline_aligned;
    252 
    253 static kmutex_t cache_lru_lock __cacheline_aligned;
    254 
    255 /*
    256  * Cache effectiveness statistics.  nchstats holds system-wide total.
    257  */
    258 struct nchstats	nchstats;
    259 struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
    260 struct nchcpu {
    261 	struct nchstats_percpu cur;
    262 	struct nchstats_percpu last;
    263 };
    264 static callout_t cache_stat_callout;
    265 static kmutex_t cache_stat_lock __cacheline_aligned;
    266 
    267 #define	COUNT(f) do { \
    268 	lwp_t *l = curlwp; \
    269 	KPREEMPT_DISABLE(l); \
    270 	struct nchcpu *nchcpu = curcpu()->ci_data.cpu_nch; \
    271 	nchcpu->cur.f++; \
    272 	KPREEMPT_ENABLE(l); \
    273 } while (/* CONSTCOND */ 0);
    274 
    275 #define	UPDATE(nchcpu, f) do { \
    276 	uint32_t cur = atomic_load_relaxed(&nchcpu->cur.f); \
    277 	nchstats.f += (uint32_t)(cur - nchcpu->last.f); \
    278 	nchcpu->last.f = cur; \
    279 } while (/* CONSTCOND */ 0)
    280 
    281 /*
    282  * Tunables.  cache_maxlen replaces the historical doingcache:
    283  * set it zero to disable caching for debugging purposes.
    284  */
    285 int cache_lru_maxdeact __read_mostly = 2;	/* max # to deactivate */
    286 int cache_lru_maxscan __read_mostly = 64;	/* max # to scan/reclaim */
    287 int cache_maxlen __read_mostly = NC_NLEN_MASK;	/* max name length to cache */
    288 int cache_stat_interval __read_mostly = 300;	/* in seconds */
    289 
    290 /*
    291  * sysctl stuff.
    292  */
    293 static struct	sysctllog *cache_sysctllog;
    294 
    295 /*
    296  * This is a dummy name that cannot usually occur anywhere in the cache nor
    297  * file system.  It's used when caching the root vnode of mounted file
    298  * systems.  The name is attached to the directory that the file system is
    299  * mounted on.
    300  */
    301 static const char cache_mp_name[] = "";
    302 static const int cache_mp_nlen = sizeof(cache_mp_name) - 1;
    303 
    304 /*
    305  * Red-black tree stuff.
    306  */
    307 static const rb_tree_ops_t cache_rbtree_ops = {
    308 	.rbto_compare_nodes = cache_compare_nodes,
    309 	.rbto_compare_key = cache_compare_nodes,
    310 	.rbto_node_offset = offsetof(struct namecache, nc_tree),
    311 	.rbto_context = NULL
    312 };
    313 
    314 /*
    315  * dtrace probes.
    316  */
    317 SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
    318 SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
    319 SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
    320 SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
    321 SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
    322 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
    323     "char *", "size_t");
    324 SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
    325     "char *", "size_t");
    326 SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
    327     "char *", "size_t");
    328 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
    329      "struct vnode *");
    330 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
    331      "int");
    332 SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
    333 SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
    334     "char *", "size_t");
    335 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
    336     "char *", "size_t");
    337 
    338 /*
    339  * rbtree: compare two nodes.
    340  */
    341 static int
    342 cache_compare_nodes(void *context, const void *n1, const void *n2)
    343 {
    344 	const struct namecache *nc1 = n1;
    345 	const struct namecache *nc2 = n2;
    346 
    347 	if (nc1->nc_key < nc2->nc_key) {
    348 		return -1;
    349 	}
    350 	if (nc1->nc_key > nc2->nc_key) {
    351 		return 1;
    352 	}
    353 	KASSERT(NC_NLEN(nc1) == NC_NLEN(nc2));
    354 	return memcmp(nc1->nc_name, nc2->nc_name, NC_NLEN(nc1));
    355 }
    356 
    357 /*
    358  * Compute a key value for the given name.  The name length is encoded in
    359  * the key value to try and improve uniqueness, and so that length doesn't
    360  * need to be compared separately for string comparisons.
    361  */
    362 static uintptr_t
    363 cache_key(const char *name, size_t nlen)
    364 {
    365 	uintptr_t key;
    366 
    367 	KASSERT((nlen & ~NC_NLEN_MASK) == 0);
    368 
    369 	key = hash32_buf(name, nlen, HASH32_STR_INIT);
    370 	return (key << NC_NLEN_BITS) | (uintptr_t)nlen;
    371 }
    372 
    373 /*
    374  * Remove an entry from the cache.  vi_nc_lock must be held, and if dir2node
    375  * is true, then we're locking in the conventional direction and the list
    376  * lock will be acquired when removing the entry from the vnode list.
    377  */
    378 static void
    379 cache_remove(struct namecache *ncp, const bool dir2node)
    380 {
    381 	struct vnode *vp, *dvp = ncp->nc_dvp;
    382 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    383 	size_t namelen = NC_NLEN(ncp);
    384 
    385 	KASSERT(rw_write_held(&dvi->vi_nc_lock));
    386 	KASSERT(cache_key(ncp->nc_name, namelen) == ncp->nc_key);
    387 	KASSERT(rb_tree_find_node(&dvi->vi_nc_tree, ncp) == ncp);
    388 
    389 	SDT_PROBE(vfs, namecache, invalidate, done, ncp, 0, 0, 0, 0);
    390 
    391 	/*
    392 	 * Remove from the vnode's list.  This excludes cache_revlookup(),
    393 	 * and then it's safe to remove from the LRU lists.
    394 	 */
    395 	if ((vp = ncp->nc_vp) != NULL) {
    396 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    397 		if (__predict_true(dir2node)) {
    398 			rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    399 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
    400 			rw_exit(&vi->vi_nc_listlock);
    401 		} else {
    402 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
    403 		}
    404 	}
    405 
    406 	/* Remove from the directory's rbtree. */
    407 	rb_tree_remove_node(&dvi->vi_nc_tree, ncp);
    408 
    409 	/* Remove from the LRU lists. */
    410 	mutex_enter(&cache_lru_lock);
    411 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
    412 	cache_lru.count[ncp->nc_lrulist]--;
    413 	mutex_exit(&cache_lru_lock);
    414 
    415 	/* Finally, free it. */
    416 	if (namelen > NCHNAMLEN) {
    417 		size_t sz = offsetof(struct namecache, nc_name[namelen]);
    418 		kmem_free(ncp, sz);
    419 	} else {
    420 		pool_cache_put(cache_pool, ncp);
    421 	}
    422 }
    423 
    424 /*
    425  * Find a single cache entry and return it.  vi_nc_lock must be held.
    426  */
    427 static struct namecache * __noinline
    428 cache_lookup_entry(struct vnode *dvp, const char *name, size_t namelen,
    429     uintptr_t key)
    430 {
    431 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    432 	struct rb_node *node = dvi->vi_nc_tree.rbt_root;
    433 	struct namecache *ncp;
    434 	enum cache_lru_id lrulist;
    435 	int diff;
    436 
    437 	KASSERT(namelen <= MAXPATHLEN);
    438 	KASSERT(rw_lock_held(&dvi->vi_nc_lock));
    439 
    440 	/*
    441 	 * Search the RB tree for the key.  This is an inlined lookup
    442 	 * tailored for exactly what's needed here that turns out to be
    443 	 * quite a bit faster than using rb_tree_find_node().
    444 	 *
    445 	 * For a matching key memcmp() needs to be called once to confirm
    446 	 * that the correct name has been found.  Very rarely there will be
    447 	 * a key value collision and the search will continue.
    448 	 */
    449 	for (;;) {
    450 		if (__predict_false(RB_SENTINEL_P(node))) {
    451 			return NULL;
    452 		}
    453 		ncp = (struct namecache *)node;
    454 		KASSERT((void *)&ncp->nc_tree == (void *)ncp);
    455 		KASSERT(ncp->nc_dvp == dvp);
    456 		if (ncp->nc_key == key) {
    457 			KASSERT(NC_NLEN(ncp) == namelen);
    458 			diff = memcmp(ncp->nc_name, name, namelen);
    459 			if (__predict_true(diff == 0)) {
    460 				break;
    461 			}
    462 			node = node->rb_nodes[diff < 0];
    463 		} else {
    464 			node = node->rb_nodes[ncp->nc_key < key];
    465 		}
    466 	}
    467 
    468 	/*
    469 	 * If the entry is on the wrong LRU list, requeue it.  This is an
    470 	 * unlocked check, but it will rarely be wrong and even then there
    471 	 * will be no harm caused.
    472 	 */
    473 	lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
    474 	if (__predict_false(lrulist != LRU_ACTIVE)) {
    475 		cache_activate(ncp);
    476 	}
    477 	return ncp;
    478 }
    479 
    480 /*
    481  * Look for a the name in the cache. We don't do this
    482  * if the segment name is long, simply so the cache can avoid
    483  * holding long names (which would either waste space, or
    484  * add greatly to the complexity).
    485  *
    486  * Lookup is called with DVP pointing to the directory to search,
    487  * and CNP providing the name of the entry being sought: cn_nameptr
    488  * is the name, cn_namelen is its length, and cn_flags is the flags
    489  * word from the namei operation.
    490  *
    491  * DVP must be locked.
    492  *
    493  * There are three possible non-error return states:
    494  *    1. Nothing was found in the cache. Nothing is known about
    495  *       the requested name.
    496  *    2. A negative entry was found in the cache, meaning that the
    497  *       requested name definitely does not exist.
    498  *    3. A positive entry was found in the cache, meaning that the
    499  *       requested name does exist and that we are providing the
    500  *       vnode.
    501  * In these cases the results are:
    502  *    1. 0 returned; VN is set to NULL.
    503  *    2. 1 returned; VN is set to NULL.
    504  *    3. 1 returned; VN is set to the vnode found.
    505  *
    506  * The additional result argument ISWHT is set to zero, unless a
    507  * negative entry is found that was entered as a whiteout, in which
    508  * case ISWHT is set to one.
    509  *
    510  * The ISWHT_RET argument pointer may be null. In this case an
    511  * assertion is made that the whiteout flag is not set. File systems
    512  * that do not support whiteouts can/should do this.
    513  *
    514  * Filesystems that do support whiteouts should add ISWHITEOUT to
    515  * cnp->cn_flags if ISWHT comes back nonzero.
    516  *
    517  * When a vnode is returned, it is locked, as per the vnode lookup
    518  * locking protocol.
    519  *
    520  * There is no way for this function to fail, in the sense of
    521  * generating an error that requires aborting the namei operation.
    522  *
    523  * (Prior to October 2012, this function returned an integer status,
    524  * and a vnode, and mucked with the flags word in CNP for whiteouts.
    525  * The integer status was -1 for "nothing found", ENOENT for "a
    526  * negative entry found", 0 for "a positive entry found", and possibly
    527  * other errors, and the value of VN might or might not have been set
    528  * depending on what error occurred.)
    529  */
    530 bool
    531 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
    532 	     uint32_t nameiop, uint32_t cnflags,
    533 	     int *iswht_ret, struct vnode **vn_ret)
    534 {
    535 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    536 	struct namecache *ncp;
    537 	struct vnode *vp;
    538 	uintptr_t key;
    539 	int error;
    540 	bool hit;
    541 	krw_t op;
    542 
    543 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
    544 
    545 	/* Establish default result values */
    546 	if (iswht_ret != NULL) {
    547 		*iswht_ret = 0;
    548 	}
    549 	*vn_ret = NULL;
    550 
    551 	if (__predict_false(namelen > cache_maxlen)) {
    552 		SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
    553 		    name, namelen, 0, 0);
    554 		COUNT(ncs_long);
    555 		return false;
    556 	}
    557 
    558 	/* Compute the key up front - don't need the lock. */
    559 	key = cache_key(name, namelen);
    560 
    561 	/* Could the entry be purged below? */
    562 	if ((cnflags & ISLASTCN) != 0 &&
    563 	    ((cnflags & MAKEENTRY) == 0 || nameiop == CREATE)) {
    564 		op = RW_WRITER;
    565 	} else {
    566 		op = RW_READER;
    567 	}
    568 
    569 	/* Now look for the name. */
    570 	rw_enter(&dvi->vi_nc_lock, op);
    571 	ncp = cache_lookup_entry(dvp, name, namelen, key);
    572 	if (__predict_false(ncp == NULL)) {
    573 		rw_exit(&dvi->vi_nc_lock);
    574 		COUNT(ncs_miss);
    575 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    576 		    name, namelen, 0, 0);
    577 		return false;
    578 	}
    579 	if (__predict_false((cnflags & MAKEENTRY) == 0)) {
    580 		/*
    581 		 * Last component and we are renaming or deleting,
    582 		 * the cache entry is invalid, or otherwise don't
    583 		 * want cache entry to exist.
    584 		 */
    585 		KASSERT((cnflags & ISLASTCN) != 0);
    586 		cache_remove(ncp, true);
    587 		rw_exit(&dvi->vi_nc_lock);
    588 		COUNT(ncs_badhits);
    589 		return false;
    590 	}
    591 	if ((vp = ncp->nc_vp) == NULL) {
    592 		if (iswht_ret != NULL) {
    593 			/*
    594 			 * Restore the ISWHITEOUT flag saved earlier.
    595 			 */
    596 			*iswht_ret = ncp->nc_whiteout;
    597 		} else {
    598 			KASSERT(!ncp->nc_whiteout);
    599 		}
    600 		if (nameiop == CREATE && (cnflags & ISLASTCN) != 0) {
    601 			/*
    602 			 * Last component and we are preparing to create
    603 			 * the named object, so flush the negative cache
    604 			 * entry.
    605 			 */
    606 			COUNT(ncs_badhits);
    607 			cache_remove(ncp, true);
    608 			hit = false;
    609 		} else {
    610 			COUNT(ncs_neghits);
    611 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, name,
    612 			    namelen, 0, 0);
    613 			/* found neg entry; vn is already null from above */
    614 			hit = true;
    615 		}
    616 		rw_exit(&dvi->vi_nc_lock);
    617 		return hit;
    618 	}
    619 	error = vcache_tryvget(vp);
    620 	rw_exit(&dvi->vi_nc_lock);
    621 	if (error) {
    622 		KASSERT(error == EBUSY);
    623 		/*
    624 		 * This vnode is being cleaned out.
    625 		 * XXX badhits?
    626 		 */
    627 		COUNT(ncs_falsehits);
    628 		return false;
    629 	}
    630 
    631 	COUNT(ncs_goodhits);
    632 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    633 	/* found it */
    634 	*vn_ret = vp;
    635 	return true;
    636 }
    637 
    638 /*
    639  * Version of the above without the nameiop argument, for NFS.
    640  */
    641 bool
    642 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
    643 		 uint32_t cnflags,
    644 		 int *iswht_ret, struct vnode **vn_ret)
    645 {
    646 
    647 	return cache_lookup(dvp, name, namelen, LOOKUP, cnflags | MAKEENTRY,
    648 	    iswht_ret, vn_ret);
    649 }
    650 
    651 /*
    652  * Used by namei() to walk down a path, component by component by looking up
    653  * names in the cache.  The node locks are chained along the way: a parent's
    654  * lock is not dropped until the child's is acquired.
    655  */
    656 bool
    657 cache_lookup_linked(struct vnode *dvp, const char *name, size_t namelen,
    658 		    struct vnode **vn_ret, krwlock_t **plock,
    659 		    kauth_cred_t cred)
    660 {
    661 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    662 	struct namecache *ncp;
    663 	krwlock_t *oldlock, *newlock;
    664 	struct vnode *vp;
    665 	uintptr_t key;
    666 	int error;
    667 
    668 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
    669 
    670 	/* If disabled, or file system doesn't support this, bail out. */
    671 	if (__predict_false((dvp->v_mount->mnt_iflag & IMNT_NCLOOKUP) == 0)) {
    672 		return false;
    673 	}
    674 
    675 	if (__predict_false(namelen > cache_maxlen)) {
    676 		COUNT(ncs_long);
    677 		return false;
    678 	}
    679 
    680 	/* Compute the key up front - don't need the lock. */
    681 	key = cache_key(name, namelen);
    682 
    683 	/*
    684 	 * Acquire the directory lock.  Once we have that, we can drop the
    685 	 * previous one (if any).
    686 	 *
    687 	 * The two lock holds mean that the directory can't go away while
    688 	 * here: the directory must be purged with cache_purge() before
    689 	 * being freed, and both parent & child's vi_nc_lock must be taken
    690 	 * before that point is passed.
    691 	 *
    692 	 * However if there's no previous lock, like at the root of the
    693 	 * chain, then "dvp" must be referenced to prevent dvp going away
    694 	 * before we get its lock.
    695 	 *
    696 	 * Note that the two locks can be the same if looking up a dot, for
    697 	 * example: /usr/bin/.  If looking up the parent (..) we can't wait
    698 	 * on the lock as child -> parent is the wrong direction.
    699 	 */
    700 	if (*plock != &dvi->vi_nc_lock) {
    701 		oldlock = *plock;
    702 		newlock = &dvi->vi_nc_lock;
    703 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_READER)) {
    704 			return false;
    705 		}
    706 	} else {
    707 		oldlock = NULL;
    708 		newlock = NULL;
    709 		if (*plock == NULL) {
    710 			KASSERT(vrefcnt(dvp) > 0);
    711 		}
    712 	}
    713 
    714 	/*
    715 	 * First up check if the user is allowed to look up files in this
    716 	 * directory.
    717 	 */
    718 	if (cred != FSCRED) {
    719 		if (dvi->vi_nc_mode == VNOVAL) {
    720 			if (newlock != NULL) {
    721 				rw_exit(newlock);
    722 			}
    723 			return false;
    724 		}
    725 		KASSERT(dvi->vi_nc_uid != VNOVAL);
    726 		KASSERT(dvi->vi_nc_gid != VNOVAL);
    727 		error = kauth_authorize_vnode(cred,
    728 		    KAUTH_ACCESS_ACTION(VEXEC,
    729 			dvp->v_type, dvi->vi_nc_mode & ALLPERMS),
    730 		    dvp, NULL,
    731 		    genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid,
    732 			dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC));
    733 		if (error != 0) {
    734 			if (newlock != NULL) {
    735 				rw_exit(newlock);
    736 			}
    737 			COUNT(ncs_denied);
    738 			return false;
    739 		}
    740 	}
    741 
    742 	/*
    743 	 * Now look for a matching cache entry.
    744 	 */
    745 	ncp = cache_lookup_entry(dvp, name, namelen, key);
    746 	if (__predict_false(ncp == NULL)) {
    747 		if (newlock != NULL) {
    748 			rw_exit(newlock);
    749 		}
    750 		COUNT(ncs_miss);
    751 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    752 		    name, namelen, 0, 0);
    753 		return false;
    754 	}
    755 	if ((vp = ncp->nc_vp) == NULL) {
    756 		/* found negative entry; vn is already null from above */
    757 		KASSERT(namelen != cache_mp_nlen);
    758 		KASSERT(name != cache_mp_name);
    759 		COUNT(ncs_neghits);
    760 	} else {
    761 		COUNT(ncs_goodhits); /* XXX can be "badhits" */
    762 	}
    763 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    764 
    765 	/*
    766 	 * Return with the directory lock still held.  It will either be
    767 	 * returned to us with another call to cache_lookup_linked() when
    768 	 * looking up the next component, or the caller will release it
    769 	 * manually when finished.
    770 	 */
    771 	if (oldlock) {
    772 		rw_exit(oldlock);
    773 	}
    774 	if (newlock) {
    775 		*plock = newlock;
    776 	}
    777 	*vn_ret = vp;
    778 	return true;
    779 }
    780 
    781 /*
    782  * Scan cache looking for name of directory entry pointing at vp.
    783  * Will not search for "." or "..".
    784  *
    785  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    786  *
    787  * If bufp is non-NULL, also place the name in the buffer which starts
    788  * at bufp, immediately before *bpp, and move bpp backwards to point
    789  * at the start of it.  (Yes, this is a little baroque, but it's done
    790  * this way to cater to the whims of getcwd).
    791  *
    792  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    793  */
    794 int
    795 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp,
    796     bool checkaccess, accmode_t accmode)
    797 {
    798 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    799 	struct namecache *ncp;
    800 	enum cache_lru_id lrulist;
    801 	struct vnode *dvp;
    802 	int error, nlen;
    803 	char *bp;
    804 
    805 	KASSERT(vp != NULL);
    806 
    807 	if (cache_maxlen == 0)
    808 		goto out;
    809 
    810 	rw_enter(&vi->vi_nc_listlock, RW_READER);
    811 	if (checkaccess) {
    812 		/*
    813 		 * Check if the user is allowed to see.  NOTE: this is
    814 		 * checking for access on the "wrong" directory.  getcwd()
    815 		 * wants to see that there is access on every component
    816 		 * along the way, not that there is access to any individual
    817 		 * component.  Don't use this to check you can look in vp.
    818 		 *
    819 		 * I don't like it, I didn't come up with it, don't blame me!
    820 		 */
    821 		if (vi->vi_nc_mode == VNOVAL) {
    822 			rw_exit(&vi->vi_nc_listlock);
    823 			return -1;
    824 		}
    825 		KASSERT(vi->vi_nc_uid != VNOVAL);
    826 		KASSERT(vi->vi_nc_gid != VNOVAL);
    827 		error = kauth_authorize_vnode(kauth_cred_get(),
    828 		    KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
    829 			ALLPERMS),
    830 		    vp, NULL, genfs_can_access(vp, curlwp->l_cred,
    831 			vi->vi_nc_uid, vi->vi_nc_gid,
    832 			vi->vi_nc_mode & ALLPERMS,
    833 			NULL, accmode));
    834 		if (error != 0) {
    835 			rw_exit(&vi->vi_nc_listlock);
    836 			COUNT(ncs_denied);
    837 			return SET_ERROR(EACCES);
    838 		}
    839 	}
    840 	TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
    841 		KASSERT(ncp->nc_vp == vp);
    842 		KASSERT(ncp->nc_dvp != NULL);
    843 		nlen = NC_NLEN(ncp);
    844 
    845 		/*
    846 		 * Ignore mountpoint entries.
    847 		 */
    848 		if (nlen == cache_mp_nlen) {
    849 			continue;
    850 		}
    851 
    852 		/*
    853 		 * The queue is partially sorted.  Once we hit dots, nothing
    854 		 * else remains but dots and dotdots, so bail out.
    855 		 */
    856 		if (ncp->nc_name[0] == '.') {
    857 			if (nlen == 1 ||
    858 			    (nlen == 2 && ncp->nc_name[1] == '.')) {
    859 				break;
    860 			}
    861 		}
    862 
    863 		/*
    864 		 * Record a hit on the entry.  This is an unlocked read but
    865 		 * even if wrong it doesn't matter too much.
    866 		 */
    867 		lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
    868 		if (lrulist != LRU_ACTIVE) {
    869 			cache_activate(ncp);
    870 		}
    871 
    872 		if (bufp) {
    873 			bp = *bpp;
    874 			bp -= nlen;
    875 			if (bp <= bufp) {
    876 				*dvpp = NULL;
    877 				rw_exit(&vi->vi_nc_listlock);
    878 				SDT_PROBE(vfs, namecache, revlookup,
    879 				    fail, vp, ERANGE, 0, 0, 0);
    880 				return SET_ERROR(ERANGE);
    881 			}
    882 			memcpy(bp, ncp->nc_name, nlen);
    883 			*bpp = bp;
    884 		}
    885 
    886 		dvp = ncp->nc_dvp;
    887 		error = vcache_tryvget(dvp);
    888 		rw_exit(&vi->vi_nc_listlock);
    889 		if (error) {
    890 			KASSERT(error == EBUSY);
    891 			if (bufp)
    892 				(*bpp) += nlen;
    893 			*dvpp = NULL;
    894 			SDT_PROBE(vfs, namecache, revlookup, fail, vp,
    895 			    error, 0, 0, 0);
    896 			return -1;
    897 		}
    898 		*dvpp = dvp;
    899 		SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
    900 		    0, 0, 0);
    901 		COUNT(ncs_revhits);
    902 		return 0;
    903 	}
    904 	rw_exit(&vi->vi_nc_listlock);
    905 	COUNT(ncs_revmiss);
    906 out:
    907 	*dvpp = NULL;
    908 	return -1;
    909 }
    910 
    911 /*
    912  * Add an entry to the cache.
    913  */
    914 void
    915 cache_enter(struct vnode *dvp, struct vnode *vp,
    916 	    const char *name, size_t namelen, uint32_t cnflags)
    917 {
    918 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    919 	struct namecache *ncp, *oncp;
    920 	int total;
    921 
    922 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
    923 
    924 	/* First, check whether we can/should add a cache entry. */
    925 	if ((cnflags & MAKEENTRY) == 0 ||
    926 	    __predict_false(namelen > cache_maxlen)) {
    927 		SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
    928 		    0, 0);
    929 		return;
    930 	}
    931 
    932 	SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
    933 
    934 	/*
    935 	 * Reclaim some entries if over budget.  This is an unlocked check,
    936 	 * but it doesn't matter.  Just need to catch up with things
    937 	 * eventually: it doesn't matter if we go over temporarily.
    938 	 */
    939 	total = atomic_load_relaxed(&cache_lru.count[LRU_ACTIVE]);
    940 	total += atomic_load_relaxed(&cache_lru.count[LRU_INACTIVE]);
    941 	if (__predict_false(total > desiredvnodes)) {
    942 		cache_reclaim();
    943 	}
    944 
    945 	/* Now allocate a fresh entry. */
    946 	if (__predict_true(namelen <= NCHNAMLEN)) {
    947 		ncp = pool_cache_get(cache_pool, PR_WAITOK);
    948 	} else {
    949 		size_t sz = offsetof(struct namecache, nc_name[namelen]);
    950 		ncp = kmem_alloc(sz, KM_SLEEP);
    951 	}
    952 
    953 	/*
    954 	 * Fill in cache info.  For negative hits, save the ISWHITEOUT flag
    955 	 * so we can restore it later when the cache entry is used again.
    956 	 */
    957 	ncp->nc_vp = vp;
    958 	ncp->nc_dvp = dvp;
    959 	ncp->nc_key = cache_key(name, namelen);
    960 	ncp->nc_whiteout = ((cnflags & ISWHITEOUT) != 0);
    961 	memcpy(ncp->nc_name, name, namelen);
    962 
    963 	/*
    964 	 * Insert to the directory.  Concurrent lookups may race for a cache
    965 	 * entry.  If there's a entry there already, purge it.
    966 	 */
    967 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
    968 	oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
    969 	if (oncp != ncp) {
    970 		KASSERT(oncp->nc_key == ncp->nc_key);
    971 		KASSERT(NC_NLEN(oncp) == NC_NLEN(ncp));
    972 		KASSERT(memcmp(oncp->nc_name, name, namelen) == 0);
    973 		cache_remove(oncp, true);
    974 		oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
    975 		KASSERT(oncp == ncp);
    976 	}
    977 
    978 	/*
    979 	 * With the directory lock still held, insert to the tail of the
    980 	 * ACTIVE LRU list (new) and take the opportunity to incrementally
    981 	 * balance the lists.
    982 	 */
    983 	mutex_enter(&cache_lru_lock);
    984 	ncp->nc_lrulist = LRU_ACTIVE;
    985 	cache_lru.count[LRU_ACTIVE]++;
    986 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
    987 	cache_deactivate();
    988 	mutex_exit(&cache_lru_lock);
    989 
    990 	/*
    991 	 * Finally, insert to the vnode and unlock.  With everything set up
    992 	 * it's safe to let cache_revlookup() see the entry.  Partially sort
    993 	 * the per-vnode list: dots go to back so cache_revlookup() doesn't
    994 	 * have to consider them.
    995 	 */
    996 	if (vp != NULL) {
    997 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    998 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    999 		if ((namelen == 1 && name[0] == '.') ||
   1000 		    (namelen == 2 && name[0] == '.' && name[1] == '.')) {
   1001 			TAILQ_INSERT_TAIL(&vi->vi_nc_list, ncp, nc_list);
   1002 		} else {
   1003 			TAILQ_INSERT_HEAD(&vi->vi_nc_list, ncp, nc_list);
   1004 		}
   1005 		rw_exit(&vi->vi_nc_listlock);
   1006 	}
   1007 	rw_exit(&dvi->vi_nc_lock);
   1008 }
   1009 
   1010 /*
   1011  * Set identity info in cache for a vnode.  We only care about directories
   1012  * so ignore other updates.  The cached info may be marked invalid if the
   1013  * inode has an ACL.
   1014  */
   1015 void
   1016 cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid, bool valid)
   1017 {
   1018 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1019 
   1020 	if (vp->v_type == VDIR) {
   1021 		/* Grab both locks, for forward & reverse lookup. */
   1022 		rw_enter(&vi->vi_nc_lock, RW_WRITER);
   1023 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1024 		if (valid) {
   1025 			vi->vi_nc_mode = mode;
   1026 			vi->vi_nc_uid = uid;
   1027 			vi->vi_nc_gid = gid;
   1028 		} else {
   1029 			vi->vi_nc_mode = VNOVAL;
   1030 			vi->vi_nc_uid = VNOVAL;
   1031 			vi->vi_nc_gid = VNOVAL;
   1032 		}
   1033 		rw_exit(&vi->vi_nc_listlock);
   1034 		rw_exit(&vi->vi_nc_lock);
   1035 	}
   1036 }
   1037 
   1038 /*
   1039  * Return true if we have identity for the given vnode, and use as an
   1040  * opportunity to confirm that everything squares up.
   1041  *
   1042  * Because of shared code, some file systems could provide partial
   1043  * information, missing some updates, so check the mount flag too.
   1044  */
   1045 bool
   1046 cache_have_id(struct vnode *vp)
   1047 {
   1048 
   1049 	if (vp->v_type == VDIR &&
   1050 	    (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0 &&
   1051 	    atomic_load_relaxed(&VNODE_TO_VIMPL(vp)->vi_nc_mode) != VNOVAL) {
   1052 		return true;
   1053 	} else {
   1054 		return false;
   1055 	}
   1056 }
   1057 
   1058 /*
   1059  * Enter a mount point.  cvp is the covered vnode, and rvp is the root of
   1060  * the mounted file system.
   1061  */
   1062 void
   1063 cache_enter_mount(struct vnode *cvp, struct vnode *rvp)
   1064 {
   1065 
   1066 	KASSERT(vrefcnt(cvp) > 0);
   1067 	KASSERT(vrefcnt(rvp) > 0);
   1068 	KASSERT(cvp->v_type == VDIR);
   1069 	KASSERT((rvp->v_vflag & VV_ROOT) != 0);
   1070 
   1071 	if (rvp->v_type == VDIR) {
   1072 		cache_enter(cvp, rvp, cache_mp_name, cache_mp_nlen, MAKEENTRY);
   1073 	}
   1074 }
   1075 
   1076 /*
   1077  * Look up a cached mount point.  Used in the strongly locked path.
   1078  */
   1079 bool
   1080 cache_lookup_mount(struct vnode *dvp, struct vnode **vn_ret)
   1081 {
   1082 	bool ret;
   1083 
   1084 	ret = cache_lookup(dvp, cache_mp_name, cache_mp_nlen, LOOKUP,
   1085 	    MAKEENTRY, NULL, vn_ret);
   1086 	KASSERT((*vn_ret != NULL) == ret);
   1087 	return ret;
   1088 }
   1089 
   1090 /*
   1091  * Try to cross a mount point.  For use with cache_lookup_linked().
   1092  */
   1093 bool
   1094 cache_cross_mount(struct vnode **dvp, krwlock_t **plock)
   1095 {
   1096 
   1097 	return cache_lookup_linked(*dvp, cache_mp_name, cache_mp_nlen,
   1098 	   dvp, plock, FSCRED);
   1099 }
   1100 
   1101 /*
   1102  * Name cache initialization, from vfs_init() when the system is booting.
   1103  */
   1104 void
   1105 nchinit(void)
   1106 {
   1107 
   1108 	cache_pool = pool_cache_init(sizeof(struct namecache),
   1109 	    coherency_unit, 0, 0, "namecache", NULL, IPL_NONE, NULL,
   1110 	    NULL, NULL);
   1111 	KASSERT(cache_pool != NULL);
   1112 
   1113 	mutex_init(&cache_lru_lock, MUTEX_DEFAULT, IPL_NONE);
   1114 	TAILQ_INIT(&cache_lru.list[LRU_ACTIVE]);
   1115 	TAILQ_INIT(&cache_lru.list[LRU_INACTIVE]);
   1116 
   1117 	mutex_init(&cache_stat_lock, MUTEX_DEFAULT, IPL_NONE);
   1118 	callout_init(&cache_stat_callout, CALLOUT_MPSAFE);
   1119 	callout_setfunc(&cache_stat_callout, cache_update_stats, NULL);
   1120 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
   1121 
   1122 	KASSERT(cache_sysctllog == NULL);
   1123 	sysctl_createv(&cache_sysctllog, 0, NULL, NULL,
   1124 		       CTLFLAG_PERMANENT,
   1125 		       CTLTYPE_STRUCT, "namecache_stats",
   1126 		       SYSCTL_DESCR("namecache statistics"),
   1127 		       cache_stat_sysctl, 0, NULL, 0,
   1128 		       CTL_VFS, CTL_CREATE, CTL_EOL);
   1129 }
   1130 
   1131 /*
   1132  * Called once for each CPU in the system as attached.
   1133  */
   1134 void
   1135 cache_cpu_init(struct cpu_info *ci)
   1136 {
   1137 	size_t sz;
   1138 
   1139 	sz = roundup2(sizeof(struct nchcpu), coherency_unit);
   1140 	ci->ci_data.cpu_nch = kmem_zalloc(sz, KM_SLEEP);
   1141 	KASSERT(((uintptr_t)ci->ci_data.cpu_nch & (coherency_unit - 1)) == 0);
   1142 }
   1143 
   1144 /*
   1145  * A vnode is being allocated: set up cache structures.
   1146  */
   1147 void
   1148 cache_vnode_init(struct vnode *vp)
   1149 {
   1150 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1151 
   1152 	rw_init(&vi->vi_nc_lock);
   1153 	rw_init(&vi->vi_nc_listlock);
   1154 	rb_tree_init(&vi->vi_nc_tree, &cache_rbtree_ops);
   1155 	TAILQ_INIT(&vi->vi_nc_list);
   1156 	vi->vi_nc_mode = VNOVAL;
   1157 	vi->vi_nc_uid = VNOVAL;
   1158 	vi->vi_nc_gid = VNOVAL;
   1159 }
   1160 
   1161 /*
   1162  * A vnode is being freed: finish cache structures.
   1163  */
   1164 void
   1165 cache_vnode_fini(struct vnode *vp)
   1166 {
   1167 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1168 
   1169 	KASSERT(RB_TREE_MIN(&vi->vi_nc_tree) == NULL);
   1170 	KASSERT(TAILQ_EMPTY(&vi->vi_nc_list));
   1171 	rw_destroy(&vi->vi_nc_lock);
   1172 	rw_destroy(&vi->vi_nc_listlock);
   1173 }
   1174 
   1175 /*
   1176  * Helper for cache_purge1(): purge cache entries for the given vnode from
   1177  * all directories that the vnode is cached in.
   1178  */
   1179 static void
   1180 cache_purge_parents(struct vnode *vp)
   1181 {
   1182 	vnode_impl_t *dvi, *vi = VNODE_TO_VIMPL(vp);
   1183 	struct vnode *dvp, *blocked;
   1184 	struct namecache *ncp;
   1185 
   1186 	SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
   1187 
   1188 	blocked = NULL;
   1189 
   1190 	rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1191 	while ((ncp = TAILQ_FIRST(&vi->vi_nc_list)) != NULL) {
   1192 		/*
   1193 		 * Locking in the wrong direction.  Try for a hold on the
   1194 		 * directory node's lock, and if we get it then all good,
   1195 		 * nuke the entry and move on to the next.
   1196 		 */
   1197 		dvp = ncp->nc_dvp;
   1198 		dvi = VNODE_TO_VIMPL(dvp);
   1199 		if (rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
   1200 			cache_remove(ncp, false);
   1201 			rw_exit(&dvi->vi_nc_lock);
   1202 			blocked = NULL;
   1203 			continue;
   1204 		}
   1205 
   1206 		/*
   1207 		 * We can't wait on the directory node's lock with our list
   1208 		 * lock held or the system could deadlock.
   1209 		 *
   1210 		 * Take a hold on the directory vnode to prevent it from
   1211 		 * being freed (taking the vnode & lock with it).  Then
   1212 		 * wait for the lock to become available with no other locks
   1213 		 * held, and retry.
   1214 		 *
   1215 		 * If this happens twice in a row, give the other side a
   1216 		 * breather; we can do nothing until it lets go.
   1217 		 */
   1218 		vhold(dvp);
   1219 		rw_exit(&vi->vi_nc_listlock);
   1220 		rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1221 		/* Do nothing. */
   1222 		rw_exit(&dvi->vi_nc_lock);
   1223 		holdrele(dvp);
   1224 		if (blocked == dvp) {
   1225 			kpause("ncpurge", false, 1, NULL);
   1226 		}
   1227 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1228 		blocked = dvp;
   1229 	}
   1230 	rw_exit(&vi->vi_nc_listlock);
   1231 }
   1232 
   1233 /*
   1234  * Helper for cache_purge1(): purge all cache entries hanging off the given
   1235  * directory vnode.
   1236  */
   1237 static void
   1238 cache_purge_children(struct vnode *dvp)
   1239 {
   1240 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
   1241 	struct namecache *ncp;
   1242 
   1243 	SDT_PROBE(vfs, namecache, purge, children, dvp, 0, 0, 0, 0);
   1244 
   1245 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1246 	while ((ncp = RB_TREE_MIN(&dvi->vi_nc_tree)) != NULL) {
   1247 		cache_remove(ncp, true);
   1248 	}
   1249 	rw_exit(&dvi->vi_nc_lock);
   1250 }
   1251 
   1252 /*
   1253  * Helper for cache_purge1(): purge cache entry from the given vnode,
   1254  * finding it by name.
   1255  */
   1256 static void
   1257 cache_purge_name(struct vnode *dvp, const char *name, size_t namelen)
   1258 {
   1259 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
   1260 	struct namecache *ncp;
   1261 	uintptr_t key;
   1262 
   1263 	SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
   1264 
   1265 	key = cache_key(name, namelen);
   1266 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1267 	ncp = cache_lookup_entry(dvp, name, namelen, key);
   1268 	if (ncp) {
   1269 		cache_remove(ncp, true);
   1270 	}
   1271 	rw_exit(&dvi->vi_nc_lock);
   1272 }
   1273 
   1274 /*
   1275  * Cache flush, a particular vnode; called when a vnode is renamed to
   1276  * hide entries that would now be invalid.
   1277  */
   1278 void
   1279 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
   1280 {
   1281 
   1282 	if (flags & PURGE_PARENTS) {
   1283 		cache_purge_parents(vp);
   1284 	}
   1285 	if (flags & PURGE_CHILDREN) {
   1286 		cache_purge_children(vp);
   1287 	}
   1288 	if (name != NULL) {
   1289 		cache_purge_name(vp, name, namelen);
   1290 	}
   1291 }
   1292 
   1293 /*
   1294  * vnode filter for cache_purgevfs().
   1295  */
   1296 static bool
   1297 cache_vdir_filter(void *cookie, vnode_t *vp)
   1298 {
   1299 
   1300 	return vp->v_type == VDIR;
   1301 }
   1302 
   1303 /*
   1304  * Cache flush, a whole filesystem; called when filesys is umounted to
   1305  * remove entries that would now be invalid.
   1306  */
   1307 void
   1308 cache_purgevfs(struct mount *mp)
   1309 {
   1310 	struct vnode_iterator *iter;
   1311 	vnode_t *dvp;
   1312 
   1313 	vfs_vnode_iterator_init(mp, &iter);
   1314 	for (;;) {
   1315 		dvp = vfs_vnode_iterator_next(iter, cache_vdir_filter, NULL);
   1316 		if (dvp == NULL) {
   1317 			break;
   1318 		}
   1319 		cache_purge_children(dvp);
   1320 		vrele(dvp);
   1321 	}
   1322 	vfs_vnode_iterator_destroy(iter);
   1323 }
   1324 
   1325 /*
   1326  * Re-queue an entry onto the tail of the active LRU list, after it has
   1327  * scored a hit.
   1328  */
   1329 static void
   1330 cache_activate(struct namecache *ncp)
   1331 {
   1332 
   1333 	mutex_enter(&cache_lru_lock);
   1334 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
   1335 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
   1336 	cache_lru.count[ncp->nc_lrulist]--;
   1337 	cache_lru.count[LRU_ACTIVE]++;
   1338 	ncp->nc_lrulist = LRU_ACTIVE;
   1339 	mutex_exit(&cache_lru_lock);
   1340 }
   1341 
   1342 /*
   1343  * Try to balance the LRU lists.  Pick some victim entries, and re-queue
   1344  * them from the head of the active list to the tail of the inactive list.
   1345  */
   1346 static void
   1347 cache_deactivate(void)
   1348 {
   1349 	struct namecache *ncp;
   1350 	int total, i;
   1351 
   1352 	KASSERT(mutex_owned(&cache_lru_lock));
   1353 
   1354 	/* If we're nowhere near budget yet, don't bother. */
   1355 	total = cache_lru.count[LRU_ACTIVE] + cache_lru.count[LRU_INACTIVE];
   1356 	if (total < (desiredvnodes >> 1)) {
   1357 		return;
   1358 	}
   1359 
   1360 	/*
   1361 	 * Aim for a 1:1 ratio of active to inactive.  This is to allow each
   1362 	 * potential victim a reasonable amount of time to cycle through the
   1363 	 * inactive list in order to score a hit and be reactivated, while
   1364 	 * trying not to cause reactivations too frequently.
   1365 	 */
   1366 	if (cache_lru.count[LRU_ACTIVE] < cache_lru.count[LRU_INACTIVE]) {
   1367 		return;
   1368 	}
   1369 
   1370 	/* Move only a few at a time; will catch up eventually. */
   1371 	for (i = 0; i < cache_lru_maxdeact; i++) {
   1372 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_ACTIVE]);
   1373 		if (ncp == NULL) {
   1374 			break;
   1375 		}
   1376 		KASSERT(ncp->nc_lrulist == LRU_ACTIVE);
   1377 		ncp->nc_lrulist = LRU_INACTIVE;
   1378 		TAILQ_REMOVE(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
   1379 		TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE], ncp, nc_lru);
   1380 		cache_lru.count[LRU_ACTIVE]--;
   1381 		cache_lru.count[LRU_INACTIVE]++;
   1382 	}
   1383 }
   1384 
   1385 /*
   1386  * Free some entries from the cache, when we have gone over budget.
   1387  *
   1388  * We don't want to cause too much work for any individual caller, and it
   1389  * doesn't matter if we temporarily go over budget.  This is also "just a
   1390  * cache" so it's not a big deal if we screw up and throw out something we
   1391  * shouldn't.  So we take a relaxed attitude to this process to reduce its
   1392  * impact.
   1393  */
   1394 static void
   1395 cache_reclaim(void)
   1396 {
   1397 	struct namecache *ncp;
   1398 	vnode_impl_t *dvi;
   1399 	int toscan;
   1400 
   1401 	/*
   1402 	 * Scan up to a preset maximum number of entries, but no more than
   1403 	 * 0.8% of the total at once (to allow for very small systems).
   1404 	 *
   1405 	 * On bigger systems, do a larger chunk of work to reduce the number
   1406 	 * of times that cache_lru_lock is held for any length of time.
   1407 	 */
   1408 	mutex_enter(&cache_lru_lock);
   1409 	toscan = MIN(cache_lru_maxscan, desiredvnodes >> 7);
   1410 	toscan = MAX(toscan, 1);
   1411 	SDT_PROBE(vfs, namecache, prune, done, cache_lru.count[LRU_ACTIVE] +
   1412 	    cache_lru.count[LRU_INACTIVE], toscan, 0, 0, 0);
   1413 	while (toscan-- != 0) {
   1414 		/* First try to balance the lists. */
   1415 		cache_deactivate();
   1416 
   1417 		/* Now look for a victim on head of inactive list (old). */
   1418 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_INACTIVE]);
   1419 		if (ncp == NULL) {
   1420 			break;
   1421 		}
   1422 		dvi = VNODE_TO_VIMPL(ncp->nc_dvp);
   1423 		KASSERT(ncp->nc_lrulist == LRU_INACTIVE);
   1424 		KASSERT(dvi != NULL);
   1425 
   1426 		/*
   1427 		 * Locking in the wrong direction.  If we can't get the
   1428 		 * lock, the directory is actively busy, and it could also
   1429 		 * cause problems for the next guy in here, so send the
   1430 		 * entry to the back of the list.
   1431 		 */
   1432 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
   1433 			TAILQ_REMOVE(&cache_lru.list[LRU_INACTIVE],
   1434 			    ncp, nc_lru);
   1435 			TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE],
   1436 			    ncp, nc_lru);
   1437 			continue;
   1438 		}
   1439 
   1440 		/*
   1441 		 * Now have the victim entry locked.  Drop the LRU list
   1442 		 * lock, purge the entry, and start over.  The hold on
   1443 		 * vi_nc_lock will prevent the vnode from vanishing until
   1444 		 * finished (cache_purge() will be called on dvp before it
   1445 		 * disappears, and that will wait on vi_nc_lock).
   1446 		 */
   1447 		mutex_exit(&cache_lru_lock);
   1448 		cache_remove(ncp, true);
   1449 		rw_exit(&dvi->vi_nc_lock);
   1450 		mutex_enter(&cache_lru_lock);
   1451 	}
   1452 	mutex_exit(&cache_lru_lock);
   1453 }
   1454 
   1455 /*
   1456  * For file system code: count a lookup that required a full re-scan of
   1457  * directory metadata.
   1458  */
   1459 void
   1460 namecache_count_pass2(void)
   1461 {
   1462 
   1463 	COUNT(ncs_pass2);
   1464 }
   1465 
   1466 /*
   1467  * For file system code: count a lookup that scored a hit in the directory
   1468  * metadata near the location of the last lookup.
   1469  */
   1470 void
   1471 namecache_count_2passes(void)
   1472 {
   1473 
   1474 	COUNT(ncs_2passes);
   1475 }
   1476 
   1477 /*
   1478  * Sum the stats from all CPUs into nchstats.  This needs to run at least
   1479  * once within every window where a 32-bit counter could roll over.  It's
   1480  * called regularly by timer to ensure this.
   1481  */
   1482 static void
   1483 cache_update_stats(void *cookie)
   1484 {
   1485 	CPU_INFO_ITERATOR cii;
   1486 	struct cpu_info *ci;
   1487 
   1488 	mutex_enter(&cache_stat_lock);
   1489 	for (CPU_INFO_FOREACH(cii, ci)) {
   1490 		struct nchcpu *nchcpu = ci->ci_data.cpu_nch;
   1491 		UPDATE(nchcpu, ncs_goodhits);
   1492 		UPDATE(nchcpu, ncs_neghits);
   1493 		UPDATE(nchcpu, ncs_badhits);
   1494 		UPDATE(nchcpu, ncs_falsehits);
   1495 		UPDATE(nchcpu, ncs_miss);
   1496 		UPDATE(nchcpu, ncs_long);
   1497 		UPDATE(nchcpu, ncs_pass2);
   1498 		UPDATE(nchcpu, ncs_2passes);
   1499 		UPDATE(nchcpu, ncs_revhits);
   1500 		UPDATE(nchcpu, ncs_revmiss);
   1501 		UPDATE(nchcpu, ncs_denied);
   1502 	}
   1503 	if (cookie != NULL) {
   1504 		memcpy(cookie, &nchstats, sizeof(nchstats));
   1505 	}
   1506 	/* Reset the timer; arrive back here in N minutes at latest. */
   1507 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
   1508 	mutex_exit(&cache_stat_lock);
   1509 }
   1510 
   1511 /*
   1512  * Fetch the current values of the stats for sysctl.
   1513  */
   1514 static int
   1515 cache_stat_sysctl(SYSCTLFN_ARGS)
   1516 {
   1517 	struct nchstats stats;
   1518 
   1519 	if (oldp == NULL) {
   1520 		*oldlenp = sizeof(nchstats);
   1521 		return 0;
   1522 	}
   1523 
   1524 	if (*oldlenp <= 0) {
   1525 		*oldlenp = 0;
   1526 		return 0;
   1527 	}
   1528 
   1529 	/* Refresh the global stats. */
   1530 	sysctl_unlock();
   1531 	cache_update_stats(&stats);
   1532 	sysctl_relock();
   1533 
   1534 	*oldlenp = MIN(sizeof(stats), *oldlenp);
   1535 	return sysctl_copyout(l, &stats, oldp, *oldlenp);
   1536 }
   1537 
   1538 /*
   1539  * For the debugger, given the address of a vnode, print all associated
   1540  * names in the cache.
   1541  */
   1542 #ifdef DDB
   1543 void
   1544 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
   1545 {
   1546 	struct vnode *dvp = NULL;
   1547 	struct namecache *ncp;
   1548 	enum cache_lru_id id;
   1549 
   1550 	for (id = 0; id < LRU_COUNT; id++) {
   1551 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
   1552 			if (ncp->nc_vp == vp) {
   1553 				(*pr)("name %.*s\n", NC_NLEN(ncp),
   1554 				    ncp->nc_name);
   1555 				dvp = ncp->nc_dvp;
   1556 			}
   1557 		}
   1558 	}
   1559 	if (dvp == NULL) {
   1560 		(*pr)("name not found\n");
   1561 		return;
   1562 	}
   1563 	for (id = 0; id < LRU_COUNT; id++) {
   1564 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
   1565 			if (ncp->nc_vp == dvp) {
   1566 				(*pr)("parent %.*s\n", NC_NLEN(ncp),
   1567 				    ncp->nc_name);
   1568 			}
   1569 		}
   1570 	}
   1571 }
   1572 #endif
   1573