Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.149
      1 /*	$NetBSD: vfs_cache.c,v 1.149 2020/12/12 18:41:13 christos Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008, 2019, 2020 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Andrew Doran.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 /*
     33  * Copyright (c) 1989, 1993
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     61  */
     62 
     63 /*
     64  * Name caching:
     65  *
     66  *	Names found by directory scans are retained in a cache for future
     67  *	reference.  It is managed LRU, so frequently used names will hang
     68  *	around.  The cache is indexed by hash value obtained from the name.
     69  *
     70  *	The name cache is the brainchild of Robert Elz and was introduced in
     71  *	4.3BSD.  See "Using gprof to Tune the 4.2BSD Kernel", Marshall Kirk
     72  *	McKusick, May 21 1984.
     73  *
     74  * Data structures:
     75  *
     76  *	Most Unix namecaches very sensibly use a global hash table to index
     77  *	names.  The global hash table works well, but can cause concurrency
     78  *	headaches for the kernel hacker.  In the NetBSD 10.0 implementation
     79  *	we are not sensible, and use a per-directory data structure to index
     80  *	names, but the cache otherwise functions the same.
     81  *
     82  *	The index is a red-black tree.  There are no special concurrency
     83  *	requirements placed on it, because it's per-directory and protected
     84  *	by the namecache's per-directory locks.  It should therefore not be
     85  *	difficult to experiment with other types of index.
     86  *
     87  *	Each cached name is stored in a struct namecache, along with a
     88  *	pointer to the associated vnode (nc_vp).  Names longer than a
     89  *	maximum length of NCHNAMLEN are allocated with kmem_alloc(); they
     90  *	occur infrequently, and names shorter than this are stored directly
     91  *	in struct namecache.  If it is a "negative" entry, (i.e. for a name
     92  *	that is known NOT to exist) the vnode pointer will be NULL.
     93  *
     94  *	For a directory with 3 cached names for 3 distinct vnodes, the
     95  *	various vnodes and namecache structs would be connected like this
     96  *	(the root is at the bottom of the diagram):
     97  *
     98  *          ...
     99  *           ^
    100  *           |- vi_nc_tree
    101  *           |
    102  *      +----o----+               +---------+               +---------+
    103  *      |  VDIR   |               |  VCHR   |               |  VREG   |
    104  *      |  vnode  o-----+         |  vnode  o-----+         |  vnode  o------+
    105  *      +---------+     |         +---------+     |         +---------+      |
    106  *           ^          |              ^          |              ^           |
    107  *           |- nc_vp   |- vi_nc_list  |- nc_vp   |- vi_nc_list  |- nc_vp    |
    108  *           |          |              |          |              |           |
    109  *      +----o----+     |         +----o----+     |         +----o----+      |
    110  *  +---onamecache|<----+     +---onamecache|<----+     +---onamecache|<-----+
    111  *  |   +---------+           |   +---------+           |   +---------+
    112  *  |        ^                |        ^                |        ^
    113  *  |        |                |        |                |        |
    114  *  |        |  +----------------------+                |        |
    115  *  |-nc_dvp | +-------------------------------------------------+
    116  *  |        |/- vi_nc_tree   |                         |
    117  *  |        |                |- nc_dvp                 |- nc_dvp
    118  *  |   +----o----+           |                         |
    119  *  +-->|  VDIR   |<----------+                         |
    120  *      |  vnode  |<------------------------------------+
    121  *      +---------+
    122  *
    123  *      START HERE
    124  *
    125  * Replacement:
    126  *
    127  *	As the cache becomes full, old and unused entries are purged as new
    128  *	entries are added.  The synchronization overhead in maintaining a
    129  *	strict ordering would be prohibitive, so the VM system's "clock" or
    130  *	"second chance" page replacement algorithm is aped here.  New
    131  *	entries go to the tail of the active list.  After they age out and
    132  *	reach the head of the list, they are moved to the tail of the
    133  *	inactive list.  Any use of the deactivated cache entry reactivates
    134  *	it, saving it from impending doom; if not reactivated, the entry
    135  *	eventually reaches the head of the inactive list and is purged.
    136  *
    137  * Concurrency:
    138  *
    139  *	From a performance perspective, cache_lookup(nameiop == LOOKUP) is
    140  *	what really matters; insertion of new entries with cache_enter() is
    141  *	comparatively infrequent, and overshadowed by the cost of expensive
    142  *	file system metadata operations (which may involve disk I/O).  We
    143  *	therefore want to make everything simplest in the lookup path.
    144  *
    145  *	struct namecache is mostly stable except for list and tree related
    146  *	entries, changes to which don't affect the cached name or vnode.
    147  *	For changes to name+vnode, entries are purged in preference to
    148  *	modifying them.
    149  *
    150  *	Read access to namecache entries is made via tree, list, or LRU
    151  *	list.  A lock corresponding to the direction of access should be
    152  *	held.  See definition of "struct namecache" in src/sys/namei.src,
    153  *	and the definition of "struct vnode" for the particulars.
    154  *
    155  *	Per-CPU statistics, and LRU list totals are read unlocked, since
    156  *	an approximate value is OK.  We maintain 32-bit sized per-CPU
    157  *	counters and 64-bit global counters under the theory that 32-bit
    158  *	sized counters are less likely to be hosed by nonatomic increment
    159  *	(on 32-bit platforms).
    160  *
    161  *	The lock order is:
    162  *
    163  *	1) vi->vi_nc_lock	(tree or parent -> child direction,
    164  *				 used during forward lookup)
    165  *
    166  *	2) vi->vi_nc_listlock	(list or child -> parent direction,
    167  *				 used during reverse lookup)
    168  *
    169  *	3) cache_lru_lock	(LRU list direction, used during reclaim)
    170  *
    171  *	4) vp->v_interlock	(what the cache entry points to)
    172  */
    173 
    174 #include <sys/cdefs.h>
    175 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.149 2020/12/12 18:41:13 christos Exp $");
    176 
    177 #define __NAMECACHE_PRIVATE
    178 #ifdef _KERNEL_OPT
    179 #include "opt_ddb.h"
    180 #include "opt_dtrace.h"
    181 #endif
    182 
    183 #include <sys/types.h>
    184 #include <sys/atomic.h>
    185 #include <sys/callout.h>
    186 #include <sys/cpu.h>
    187 #include <sys/errno.h>
    188 #include <sys/evcnt.h>
    189 #include <sys/hash.h>
    190 #include <sys/kernel.h>
    191 #include <sys/mount.h>
    192 #include <sys/mutex.h>
    193 #include <sys/namei.h>
    194 #include <sys/param.h>
    195 #include <sys/pool.h>
    196 #include <sys/sdt.h>
    197 #include <sys/sysctl.h>
    198 #include <sys/systm.h>
    199 #include <sys/time.h>
    200 #include <sys/vnode_impl.h>
    201 
    202 #include <miscfs/genfs/genfs.h>
    203 
    204 static void	cache_activate(struct namecache *);
    205 static void	cache_update_stats(void *);
    206 static int	cache_compare_nodes(void *, const void *, const void *);
    207 static void	cache_deactivate(void);
    208 static void	cache_reclaim(void);
    209 static int	cache_stat_sysctl(SYSCTLFN_ARGS);
    210 
    211 /*
    212  * Global pool cache.
    213  */
    214 static pool_cache_t cache_pool __read_mostly;
    215 
    216 /*
    217  * LRU replacement.
    218  */
    219 enum cache_lru_id {
    220 	LRU_ACTIVE,
    221 	LRU_INACTIVE,
    222 	LRU_COUNT
    223 };
    224 
    225 static struct {
    226 	TAILQ_HEAD(, namecache)	list[LRU_COUNT];
    227 	u_int			count[LRU_COUNT];
    228 } cache_lru __cacheline_aligned;
    229 
    230 static kmutex_t cache_lru_lock __cacheline_aligned;
    231 
    232 /*
    233  * Cache effectiveness statistics.  nchstats holds system-wide total.
    234  */
    235 struct nchstats	nchstats;
    236 struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
    237 struct nchcpu {
    238 	struct nchstats_percpu cur;
    239 	struct nchstats_percpu last;
    240 };
    241 static callout_t cache_stat_callout;
    242 static kmutex_t cache_stat_lock __cacheline_aligned;
    243 
    244 #define	COUNT(f) do { \
    245 	lwp_t *l = curlwp; \
    246 	KPREEMPT_DISABLE(l); \
    247 	struct nchcpu *nchcpu = curcpu()->ci_data.cpu_nch; \
    248 	nchcpu->cur.f++; \
    249 	KPREEMPT_ENABLE(l); \
    250 } while (/* CONSTCOND */ 0);
    251 
    252 #define	UPDATE(nchcpu, f) do { \
    253 	uint32_t cur = atomic_load_relaxed(&nchcpu->cur.f); \
    254 	nchstats.f += (uint32_t)(cur - nchcpu->last.f); \
    255 	nchcpu->last.f = cur; \
    256 } while (/* CONSTCOND */ 0)
    257 
    258 /*
    259  * Tunables.  cache_maxlen replaces the historical doingcache:
    260  * set it zero to disable caching for debugging purposes.
    261  */
    262 int cache_lru_maxdeact __read_mostly = 2;	/* max # to deactivate */
    263 int cache_lru_maxscan __read_mostly = 64;	/* max # to scan/reclaim */
    264 int cache_maxlen __read_mostly = USHRT_MAX;	/* max name length to cache */
    265 int cache_stat_interval __read_mostly = 300;	/* in seconds */
    266 
    267 /*
    268  * sysctl stuff.
    269  */
    270 static struct	sysctllog *cache_sysctllog;
    271 
    272 /*
    273  * This is a dummy name that cannot usually occur anywhere in the cache nor
    274  * file system.  It's used when caching the root vnode of mounted file
    275  * systems.  The name is attached to the directory that the file system is
    276  * mounted on.
    277  */
    278 static const char cache_mp_name[] = "";
    279 static const int cache_mp_nlen = sizeof(cache_mp_name) - 1;
    280 
    281 /*
    282  * Red-black tree stuff.
    283  */
    284 static const rb_tree_ops_t cache_rbtree_ops = {
    285 	.rbto_compare_nodes = cache_compare_nodes,
    286 	.rbto_compare_key = cache_compare_nodes,
    287 	.rbto_node_offset = offsetof(struct namecache, nc_tree),
    288 	.rbto_context = NULL
    289 };
    290 
    291 /*
    292  * dtrace probes.
    293  */
    294 SDT_PROVIDER_DEFINE(vfs);
    295 
    296 SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
    297 SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
    298 SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
    299 SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
    300 SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
    301 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
    302     "char *", "size_t");
    303 SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
    304     "char *", "size_t");
    305 SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
    306     "char *", "size_t");
    307 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
    308      "struct vnode *");
    309 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
    310      "int");
    311 SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
    312 SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
    313     "char *", "size_t");
    314 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
    315     "char *", "size_t");
    316 
    317 /*
    318  * rbtree: compare two nodes.
    319  */
    320 static int
    321 cache_compare_nodes(void *context, const void *n1, const void *n2)
    322 {
    323 	const struct namecache *nc1 = n1;
    324 	const struct namecache *nc2 = n2;
    325 
    326 	if (nc1->nc_key < nc2->nc_key) {
    327 		return -1;
    328 	}
    329 	if (nc1->nc_key > nc2->nc_key) {
    330 		return 1;
    331 	}
    332 	KASSERT(nc1->nc_nlen == nc2->nc_nlen);
    333 	return memcmp(nc1->nc_name, nc2->nc_name, nc1->nc_nlen);
    334 }
    335 
    336 /*
    337  * Compute a key value for the given name.  The name length is encoded in
    338  * the key value to try and improve uniqueness, and so that length doesn't
    339  * need to be compared separately for string comparisons.
    340  */
    341 static inline uint64_t
    342 cache_key(const char *name, size_t nlen)
    343 {
    344 	uint64_t key;
    345 
    346 	KASSERT(nlen <= USHRT_MAX);
    347 
    348 	key = hash32_buf(name, nlen, HASH32_STR_INIT);
    349 	return (key << 32) | nlen;
    350 }
    351 
    352 /*
    353  * Remove an entry from the cache.  vi_nc_lock must be held, and if dir2node
    354  * is true, then we're locking in the conventional direction and the list
    355  * lock will be acquired when removing the entry from the vnode list.
    356  */
    357 static void
    358 cache_remove(struct namecache *ncp, const bool dir2node)
    359 {
    360 	struct vnode *vp, *dvp = ncp->nc_dvp;
    361 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    362 
    363 	KASSERT(rw_write_held(&dvi->vi_nc_lock));
    364 	KASSERT(cache_key(ncp->nc_name, ncp->nc_nlen) == ncp->nc_key);
    365 	KASSERT(rb_tree_find_node(&dvi->vi_nc_tree, ncp) == ncp);
    366 
    367 	SDT_PROBE(vfs, namecache, invalidate, done, ncp,
    368 	    0, 0, 0, 0);
    369 
    370 	/*
    371 	 * Remove from the vnode's list.  This excludes cache_revlookup(),
    372 	 * and then it's safe to remove from the LRU lists.
    373 	 */
    374 	if ((vp = ncp->nc_vp) != NULL) {
    375 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    376 		if (__predict_true(dir2node)) {
    377 			rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    378 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
    379 			rw_exit(&vi->vi_nc_listlock);
    380 		} else {
    381 			TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
    382 		}
    383 	}
    384 
    385 	/* Remove from the directory's rbtree. */
    386 	rb_tree_remove_node(&dvi->vi_nc_tree, ncp);
    387 
    388 	/* Remove from the LRU lists. */
    389 	mutex_enter(&cache_lru_lock);
    390 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
    391 	cache_lru.count[ncp->nc_lrulist]--;
    392 	mutex_exit(&cache_lru_lock);
    393 
    394 	/* Finally, free it. */
    395 	if (ncp->nc_nlen > NCHNAMLEN) {
    396 		size_t sz = offsetof(struct namecache, nc_name[ncp->nc_nlen]);
    397 		kmem_free(ncp, sz);
    398 	} else {
    399 		pool_cache_put(cache_pool, ncp);
    400 	}
    401 }
    402 
    403 /*
    404  * Find a single cache entry and return it.  vi_nc_lock must be held.
    405  */
    406 static struct namecache * __noinline
    407 cache_lookup_entry(struct vnode *dvp, const char *name, size_t namelen,
    408     uint64_t key)
    409 {
    410 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    411 	struct rb_node *node = dvi->vi_nc_tree.rbt_root;
    412 	struct namecache *ncp;
    413 	int lrulist, diff;
    414 
    415 	KASSERT(rw_lock_held(&dvi->vi_nc_lock));
    416 
    417 	/*
    418 	 * Search the RB tree for the key.  This is an inlined lookup
    419 	 * tailored for exactly what's needed here (64-bit key and so on)
    420 	 * that is quite a bit faster than using rb_tree_find_node().
    421 	 *
    422 	 * For a matching key memcmp() needs to be called once to confirm
    423 	 * that the correct name has been found.  Very rarely there will be
    424 	 * a key value collision and the search will continue.
    425 	 */
    426 	for (;;) {
    427 		if (__predict_false(RB_SENTINEL_P(node))) {
    428 			return NULL;
    429 		}
    430 		ncp = (struct namecache *)node;
    431 		KASSERT((void *)&ncp->nc_tree == (void *)ncp);
    432 		KASSERT(ncp->nc_dvp == dvp);
    433 		if (ncp->nc_key == key) {
    434 			KASSERT(ncp->nc_nlen == namelen);
    435 			diff = memcmp(ncp->nc_name, name, namelen);
    436 			if (__predict_true(diff == 0)) {
    437 				break;
    438 			}
    439 			node = node->rb_nodes[diff < 0];
    440 		} else {
    441 			node = node->rb_nodes[ncp->nc_key < key];
    442 		}
    443 	}
    444 
    445 	/*
    446 	 * If the entry is on the wrong LRU list, requeue it.  This is an
    447 	 * unlocked check, but it will rarely be wrong and even then there
    448 	 * will be no harm caused.
    449 	 */
    450 	lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
    451 	if (__predict_false(lrulist != LRU_ACTIVE)) {
    452 		cache_activate(ncp);
    453 	}
    454 	return ncp;
    455 }
    456 
    457 /*
    458  * Look for a the name in the cache. We don't do this
    459  * if the segment name is long, simply so the cache can avoid
    460  * holding long names (which would either waste space, or
    461  * add greatly to the complexity).
    462  *
    463  * Lookup is called with DVP pointing to the directory to search,
    464  * and CNP providing the name of the entry being sought: cn_nameptr
    465  * is the name, cn_namelen is its length, and cn_flags is the flags
    466  * word from the namei operation.
    467  *
    468  * DVP must be locked.
    469  *
    470  * There are three possible non-error return states:
    471  *    1. Nothing was found in the cache. Nothing is known about
    472  *       the requested name.
    473  *    2. A negative entry was found in the cache, meaning that the
    474  *       requested name definitely does not exist.
    475  *    3. A positive entry was found in the cache, meaning that the
    476  *       requested name does exist and that we are providing the
    477  *       vnode.
    478  * In these cases the results are:
    479  *    1. 0 returned; VN is set to NULL.
    480  *    2. 1 returned; VN is set to NULL.
    481  *    3. 1 returned; VN is set to the vnode found.
    482  *
    483  * The additional result argument ISWHT is set to zero, unless a
    484  * negative entry is found that was entered as a whiteout, in which
    485  * case ISWHT is set to one.
    486  *
    487  * The ISWHT_RET argument pointer may be null. In this case an
    488  * assertion is made that the whiteout flag is not set. File systems
    489  * that do not support whiteouts can/should do this.
    490  *
    491  * Filesystems that do support whiteouts should add ISWHITEOUT to
    492  * cnp->cn_flags if ISWHT comes back nonzero.
    493  *
    494  * When a vnode is returned, it is locked, as per the vnode lookup
    495  * locking protocol.
    496  *
    497  * There is no way for this function to fail, in the sense of
    498  * generating an error that requires aborting the namei operation.
    499  *
    500  * (Prior to October 2012, this function returned an integer status,
    501  * and a vnode, and mucked with the flags word in CNP for whiteouts.
    502  * The integer status was -1 for "nothing found", ENOENT for "a
    503  * negative entry found", 0 for "a positive entry found", and possibly
    504  * other errors, and the value of VN might or might not have been set
    505  * depending on what error occurred.)
    506  */
    507 bool
    508 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
    509 	     uint32_t nameiop, uint32_t cnflags,
    510 	     int *iswht_ret, struct vnode **vn_ret)
    511 {
    512 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    513 	struct namecache *ncp;
    514 	struct vnode *vp;
    515 	uint64_t key;
    516 	int error;
    517 	bool hit;
    518 	krw_t op;
    519 
    520 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
    521 
    522 	/* Establish default result values */
    523 	if (iswht_ret != NULL) {
    524 		*iswht_ret = 0;
    525 	}
    526 	*vn_ret = NULL;
    527 
    528 	if (__predict_false(namelen > cache_maxlen)) {
    529 		SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
    530 		    name, namelen, 0, 0);
    531 		COUNT(ncs_long);
    532 		return false;
    533 	}
    534 
    535 	/* Compute the key up front - don't need the lock. */
    536 	key = cache_key(name, namelen);
    537 
    538 	/* Could the entry be purged below? */
    539 	if ((cnflags & ISLASTCN) != 0 &&
    540 	    ((cnflags & MAKEENTRY) == 0 || nameiop == CREATE)) {
    541 	    	op = RW_WRITER;
    542 	} else {
    543 		op = RW_READER;
    544 	}
    545 
    546 	/* Now look for the name. */
    547 	rw_enter(&dvi->vi_nc_lock, op);
    548 	ncp = cache_lookup_entry(dvp, name, namelen, key);
    549 	if (__predict_false(ncp == NULL)) {
    550 		rw_exit(&dvi->vi_nc_lock);
    551 		COUNT(ncs_miss);
    552 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    553 		    name, namelen, 0, 0);
    554 		return false;
    555 	}
    556 	if (__predict_false((cnflags & MAKEENTRY) == 0)) {
    557 		/*
    558 		 * Last component and we are renaming or deleting,
    559 		 * the cache entry is invalid, or otherwise don't
    560 		 * want cache entry to exist.
    561 		 */
    562 		KASSERT((cnflags & ISLASTCN) != 0);
    563 		cache_remove(ncp, true);
    564 		rw_exit(&dvi->vi_nc_lock);
    565 		COUNT(ncs_badhits);
    566 		return false;
    567 	}
    568 	if (ncp->nc_vp == NULL) {
    569 		if (iswht_ret != NULL) {
    570 			/*
    571 			 * Restore the ISWHITEOUT flag saved earlier.
    572 			 */
    573 			*iswht_ret = ncp->nc_whiteout;
    574 		} else {
    575 			KASSERT(!ncp->nc_whiteout);
    576 		}
    577 		if (nameiop == CREATE && (cnflags & ISLASTCN) != 0) {
    578 			/*
    579 			 * Last component and we are preparing to create
    580 			 * the named object, so flush the negative cache
    581 			 * entry.
    582 			 */
    583 			COUNT(ncs_badhits);
    584 			cache_remove(ncp, true);
    585 			hit = false;
    586 		} else {
    587 			COUNT(ncs_neghits);
    588 			SDT_PROBE(vfs, namecache, lookup, hit, dvp, name,
    589 			    namelen, 0, 0);
    590 			/* found neg entry; vn is already null from above */
    591 			hit = true;
    592 		}
    593 		rw_exit(&dvi->vi_nc_lock);
    594 		return hit;
    595 	}
    596 	vp = ncp->nc_vp;
    597 	error = vcache_tryvget(vp);
    598 	rw_exit(&dvi->vi_nc_lock);
    599 	if (error) {
    600 		KASSERT(error == EBUSY);
    601 		/*
    602 		 * This vnode is being cleaned out.
    603 		 * XXX badhits?
    604 		 */
    605 		COUNT(ncs_falsehits);
    606 		return false;
    607 	}
    608 
    609 	COUNT(ncs_goodhits);
    610 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    611 	/* found it */
    612 	*vn_ret = vp;
    613 	return true;
    614 }
    615 
    616 /*
    617  * Version of the above without the nameiop argument, for NFS.
    618  */
    619 bool
    620 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
    621 		 uint32_t cnflags,
    622 		 int *iswht_ret, struct vnode **vn_ret)
    623 {
    624 
    625 	return cache_lookup(dvp, name, namelen, LOOKUP, cnflags | MAKEENTRY,
    626 	    iswht_ret, vn_ret);
    627 }
    628 
    629 /*
    630  * Used by namei() to walk down a path, component by component by looking up
    631  * names in the cache.  The node locks are chained along the way: a parent's
    632  * lock is not dropped until the child's is acquired.
    633  */
    634 bool
    635 cache_lookup_linked(struct vnode *dvp, const char *name, size_t namelen,
    636 		    struct vnode **vn_ret, krwlock_t **plock,
    637 		    kauth_cred_t cred)
    638 {
    639 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    640 	struct namecache *ncp;
    641 	krwlock_t *oldlock, *newlock;
    642 	uint64_t key;
    643 	int error;
    644 
    645 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
    646 
    647 	/* If disabled, or file system doesn't support this, bail out. */
    648 	if (__predict_false((dvp->v_mount->mnt_iflag & IMNT_NCLOOKUP) == 0)) {
    649 		return false;
    650 	}
    651 
    652 	if (__predict_false(namelen > cache_maxlen)) {
    653 		COUNT(ncs_long);
    654 		return false;
    655 	}
    656 
    657 	/* Compute the key up front - don't need the lock. */
    658 	key = cache_key(name, namelen);
    659 
    660 	/*
    661 	 * Acquire the directory lock.  Once we have that, we can drop the
    662 	 * previous one (if any).
    663 	 *
    664 	 * The two lock holds mean that the directory can't go away while
    665 	 * here: the directory must be purged with cache_purge() before
    666 	 * being freed, and both parent & child's vi_nc_lock must be taken
    667 	 * before that point is passed.
    668 	 *
    669 	 * However if there's no previous lock, like at the root of the
    670 	 * chain, then "dvp" must be referenced to prevent dvp going away
    671 	 * before we get its lock.
    672 	 *
    673 	 * Note that the two locks can be the same if looking up a dot, for
    674 	 * example: /usr/bin/.  If looking up the parent (..) we can't wait
    675 	 * on the lock as child -> parent is the wrong direction.
    676 	 */
    677 	if (*plock != &dvi->vi_nc_lock) {
    678 		oldlock = *plock;
    679 		newlock = &dvi->vi_nc_lock;
    680 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_READER)) {
    681 			return false;
    682 		}
    683 	} else {
    684 		oldlock = NULL;
    685 		newlock = NULL;
    686 		if (*plock == NULL) {
    687 			KASSERT(vrefcnt(dvp) > 0);
    688 		}
    689 	}
    690 
    691 	/*
    692 	 * First up check if the user is allowed to look up files in this
    693 	 * directory.
    694 	 */
    695 	if (cred != FSCRED) {
    696 		if (dvi->vi_nc_mode == VNOVAL) {
    697 			if (newlock != NULL) {
    698 				rw_exit(newlock);
    699 			}
    700 			return false;
    701 		}
    702 		KASSERT(dvi->vi_nc_uid != VNOVAL && dvi->vi_nc_gid != VNOVAL);
    703 		error = kauth_authorize_vnode(cred, KAUTH_ACCESS_ACTION(VEXEC,
    704 		    dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL,
    705 		    genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid,
    706 		    dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC));
    707 		if (error != 0) {
    708 			if (newlock != NULL) {
    709 				rw_exit(newlock);
    710 			}
    711 			COUNT(ncs_denied);
    712 			return false;
    713 		}
    714 	}
    715 
    716 	/*
    717 	 * Now look for a matching cache entry.
    718 	 */
    719 	ncp = cache_lookup_entry(dvp, name, namelen, key);
    720 	if (__predict_false(ncp == NULL)) {
    721 		if (newlock != NULL) {
    722 			rw_exit(newlock);
    723 		}
    724 		COUNT(ncs_miss);
    725 		SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    726 		    name, namelen, 0, 0);
    727 		return false;
    728 	}
    729 	if (ncp->nc_vp == NULL) {
    730 		/* found negative entry; vn is already null from above */
    731 		KASSERT(namelen != cache_mp_nlen && name != cache_mp_name);
    732 		COUNT(ncs_neghits);
    733 	} else {
    734 		COUNT(ncs_goodhits); /* XXX can be "badhits" */
    735 	}
    736 	SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
    737 
    738 	/*
    739 	 * Return with the directory lock still held.  It will either be
    740 	 * returned to us with another call to cache_lookup_linked() when
    741 	 * looking up the next component, or the caller will release it
    742 	 * manually when finished.
    743 	 */
    744 	if (oldlock) {
    745 		rw_exit(oldlock);
    746 	}
    747 	if (newlock) {
    748 		*plock = newlock;
    749 	}
    750 	*vn_ret = ncp->nc_vp;
    751 	return true;
    752 }
    753 
    754 /*
    755  * Scan cache looking for name of directory entry pointing at vp.
    756  * Will not search for "." or "..".
    757  *
    758  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    759  *
    760  * If bufp is non-NULL, also place the name in the buffer which starts
    761  * at bufp, immediately before *bpp, and move bpp backwards to point
    762  * at the start of it.  (Yes, this is a little baroque, but it's done
    763  * this way to cater to the whims of getcwd).
    764  *
    765  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    766  */
    767 int
    768 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp,
    769     bool checkaccess, accmode_t accmode)
    770 {
    771 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    772 	struct namecache *ncp;
    773 	struct vnode *dvp;
    774 	int error, nlen, lrulist;
    775 	char *bp;
    776 
    777 	KASSERT(vp != NULL);
    778 
    779 	if (cache_maxlen == 0)
    780 		goto out;
    781 
    782 	rw_enter(&vi->vi_nc_listlock, RW_READER);
    783 	if (checkaccess) {
    784 		/*
    785 		 * Check if the user is allowed to see.  NOTE: this is
    786 		 * checking for access on the "wrong" directory.  getcwd()
    787 		 * wants to see that there is access on every component
    788 		 * along the way, not that there is access to any individual
    789 		 * component.  Don't use this to check you can look in vp.
    790 		 *
    791 		 * I don't like it, I didn't come up with it, don't blame me!
    792 		 */
    793 		if (vi->vi_nc_mode == VNOVAL) {
    794 			rw_exit(&vi->vi_nc_listlock);
    795 			return -1;
    796 		}
    797 		KASSERT(vi->vi_nc_uid != VNOVAL && vi->vi_nc_gid != VNOVAL);
    798 		error = kauth_authorize_vnode(curlwp->l_cred,
    799 		    KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
    800 		    ALLPERMS), vp, NULL, genfs_can_access(vp, curlwp->l_cred,
    801 		    vi->vi_nc_uid, vi->vi_nc_gid, vi->vi_nc_mode & ALLPERMS,
    802 		    NULL, accmode));
    803 		    if (error != 0) {
    804 		    	rw_exit(&vi->vi_nc_listlock);
    805 			COUNT(ncs_denied);
    806 			return EACCES;
    807 		}
    808 	}
    809 	TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
    810 		KASSERT(ncp->nc_vp == vp);
    811 		KASSERT(ncp->nc_dvp != NULL);
    812 		nlen = ncp->nc_nlen;
    813 
    814 		/*
    815 		 * Ignore mountpoint entries.
    816 		 */
    817 		if (ncp->nc_nlen == cache_mp_nlen) {
    818 			continue;
    819 		}
    820 
    821 		/*
    822 		 * The queue is partially sorted.  Once we hit dots, nothing
    823 		 * else remains but dots and dotdots, so bail out.
    824 		 */
    825 		if (ncp->nc_name[0] == '.') {
    826 			if (nlen == 1 ||
    827 			    (nlen == 2 && ncp->nc_name[1] == '.')) {
    828 			    	break;
    829 			}
    830 		}
    831 
    832 		/*
    833 		 * Record a hit on the entry.  This is an unlocked read but
    834 		 * even if wrong it doesn't matter too much.
    835 		 */
    836 		lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
    837 		if (lrulist != LRU_ACTIVE) {
    838 			cache_activate(ncp);
    839 		}
    840 
    841 		if (bufp) {
    842 			bp = *bpp;
    843 			bp -= nlen;
    844 			if (bp <= bufp) {
    845 				*dvpp = NULL;
    846 				rw_exit(&vi->vi_nc_listlock);
    847 				SDT_PROBE(vfs, namecache, revlookup,
    848 				    fail, vp, ERANGE, 0, 0, 0);
    849 				return (ERANGE);
    850 			}
    851 			memcpy(bp, ncp->nc_name, nlen);
    852 			*bpp = bp;
    853 		}
    854 
    855 		dvp = ncp->nc_dvp;
    856 		error = vcache_tryvget(dvp);
    857 		rw_exit(&vi->vi_nc_listlock);
    858 		if (error) {
    859 			KASSERT(error == EBUSY);
    860 			if (bufp)
    861 				(*bpp) += nlen;
    862 			*dvpp = NULL;
    863 			SDT_PROBE(vfs, namecache, revlookup, fail, vp,
    864 			    error, 0, 0, 0);
    865 			return -1;
    866 		}
    867 		*dvpp = dvp;
    868 		SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
    869 		    0, 0, 0);
    870 		COUNT(ncs_revhits);
    871 		return (0);
    872 	}
    873 	rw_exit(&vi->vi_nc_listlock);
    874 	COUNT(ncs_revmiss);
    875  out:
    876 	*dvpp = NULL;
    877 	return (-1);
    878 }
    879 
    880 /*
    881  * Add an entry to the cache.
    882  */
    883 void
    884 cache_enter(struct vnode *dvp, struct vnode *vp,
    885 	    const char *name, size_t namelen, uint32_t cnflags)
    886 {
    887 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
    888 	struct namecache *ncp, *oncp;
    889 	int total;
    890 
    891 	KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
    892 
    893 	/* First, check whether we can/should add a cache entry. */
    894 	if ((cnflags & MAKEENTRY) == 0 ||
    895 	    __predict_false(namelen > cache_maxlen)) {
    896 		SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
    897 		    0, 0);
    898 		return;
    899 	}
    900 
    901 	SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
    902 
    903 	/*
    904 	 * Reclaim some entries if over budget.  This is an unlocked check,
    905 	 * but it doesn't matter.  Just need to catch up with things
    906 	 * eventually: it doesn't matter if we go over temporarily.
    907 	 */
    908 	total = atomic_load_relaxed(&cache_lru.count[LRU_ACTIVE]);
    909 	total += atomic_load_relaxed(&cache_lru.count[LRU_INACTIVE]);
    910 	if (__predict_false(total > desiredvnodes)) {
    911 		cache_reclaim();
    912 	}
    913 
    914 	/* Now allocate a fresh entry. */
    915 	if (__predict_true(namelen <= NCHNAMLEN)) {
    916 		ncp = pool_cache_get(cache_pool, PR_WAITOK);
    917 	} else {
    918 		size_t sz = offsetof(struct namecache, nc_name[namelen]);
    919 		ncp = kmem_alloc(sz, KM_SLEEP);
    920 	}
    921 
    922 	/*
    923 	 * Fill in cache info.  For negative hits, save the ISWHITEOUT flag
    924 	 * so we can restore it later when the cache entry is used again.
    925 	 */
    926 	ncp->nc_vp = vp;
    927 	ncp->nc_dvp = dvp;
    928 	ncp->nc_key = cache_key(name, namelen);
    929 	ncp->nc_nlen = namelen;
    930 	ncp->nc_whiteout = ((cnflags & ISWHITEOUT) != 0);
    931 	memcpy(ncp->nc_name, name, namelen);
    932 
    933 	/*
    934 	 * Insert to the directory.  Concurrent lookups may race for a cache
    935 	 * entry.  If there's a entry there already, purge it.
    936 	 */
    937 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
    938 	oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
    939 	if (oncp != ncp) {
    940 		KASSERT(oncp->nc_key == ncp->nc_key);
    941 		KASSERT(oncp->nc_nlen == ncp->nc_nlen);
    942 		KASSERT(memcmp(oncp->nc_name, name, namelen) == 0);
    943 		cache_remove(oncp, true);
    944 		oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
    945 		KASSERT(oncp == ncp);
    946 	}
    947 
    948 	/*
    949 	 * With the directory lock still held, insert to the tail of the
    950 	 * ACTIVE LRU list (new) and take the opportunity to incrementally
    951 	 * balance the lists.
    952 	 */
    953 	mutex_enter(&cache_lru_lock);
    954 	ncp->nc_lrulist = LRU_ACTIVE;
    955 	cache_lru.count[LRU_ACTIVE]++;
    956 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
    957 	cache_deactivate();
    958 	mutex_exit(&cache_lru_lock);
    959 
    960 	/*
    961 	 * Finally, insert to the vnode and unlock.  With everything set up
    962 	 * it's safe to let cache_revlookup() see the entry.  Partially sort
    963 	 * the per-vnode list: dots go to back so cache_revlookup() doesn't
    964 	 * have to consider them.
    965 	 */
    966 	if (vp != NULL) {
    967 		vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    968 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    969 		if ((namelen == 1 && name[0] == '.') ||
    970 		    (namelen == 2 && name[0] == '.' && name[1] == '.')) {
    971 			TAILQ_INSERT_TAIL(&vi->vi_nc_list, ncp, nc_list);
    972 		} else {
    973 			TAILQ_INSERT_HEAD(&vi->vi_nc_list, ncp, nc_list);
    974 		}
    975 		rw_exit(&vi->vi_nc_listlock);
    976 	}
    977 	rw_exit(&dvi->vi_nc_lock);
    978 }
    979 
    980 /*
    981  * Set identity info in cache for a vnode.  We only care about directories
    982  * so ignore other updates.  The cached info may be marked invalid if the
    983  * inode has an ACL.
    984  */
    985 void
    986 cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid, bool valid)
    987 {
    988 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
    989 
    990 	if (vp->v_type == VDIR) {
    991 		/* Grab both locks, for forward & reverse lookup. */
    992 		rw_enter(&vi->vi_nc_lock, RW_WRITER);
    993 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
    994 		if (valid) {
    995 			vi->vi_nc_mode = mode;
    996 			vi->vi_nc_uid = uid;
    997 			vi->vi_nc_gid = gid;
    998 		} else {
    999 			vi->vi_nc_mode = VNOVAL;
   1000 			vi->vi_nc_uid = VNOVAL;
   1001 			vi->vi_nc_gid = VNOVAL;
   1002 		}
   1003 		rw_exit(&vi->vi_nc_listlock);
   1004 		rw_exit(&vi->vi_nc_lock);
   1005 	}
   1006 }
   1007 
   1008 /*
   1009  * Return true if we have identity for the given vnode, and use as an
   1010  * opportunity to confirm that everything squares up.
   1011  *
   1012  * Because of shared code, some file systems could provide partial
   1013  * information, missing some updates, so check the mount flag too.
   1014  */
   1015 bool
   1016 cache_have_id(struct vnode *vp)
   1017 {
   1018 
   1019 	if (vp->v_type == VDIR &&
   1020 	    (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0 &&
   1021 	    atomic_load_relaxed(&VNODE_TO_VIMPL(vp)->vi_nc_mode) != VNOVAL) {
   1022 		return true;
   1023 	} else {
   1024 		return false;
   1025 	}
   1026 }
   1027 
   1028 /*
   1029  * Enter a mount point.  cvp is the covered vnode, and rvp is the root of
   1030  * the mounted file system.
   1031  */
   1032 void
   1033 cache_enter_mount(struct vnode *cvp, struct vnode *rvp)
   1034 {
   1035 
   1036 	KASSERT(vrefcnt(cvp) > 0);
   1037 	KASSERT(vrefcnt(rvp) > 0);
   1038 	KASSERT(cvp->v_type == VDIR);
   1039 	KASSERT((rvp->v_vflag & VV_ROOT) != 0);
   1040 
   1041 	if (rvp->v_type == VDIR) {
   1042 		cache_enter(cvp, rvp, cache_mp_name, cache_mp_nlen, MAKEENTRY);
   1043 	}
   1044 }
   1045 
   1046 /*
   1047  * Look up a cached mount point.  Used in the strongly locked path.
   1048  */
   1049 bool
   1050 cache_lookup_mount(struct vnode *dvp, struct vnode **vn_ret)
   1051 {
   1052 	bool ret;
   1053 
   1054 	ret = cache_lookup(dvp, cache_mp_name, cache_mp_nlen, LOOKUP,
   1055 	    MAKEENTRY, NULL, vn_ret);
   1056 	KASSERT((*vn_ret != NULL) == ret);
   1057 	return ret;
   1058 }
   1059 
   1060 /*
   1061  * Try to cross a mount point.  For use with cache_lookup_linked().
   1062  */
   1063 bool
   1064 cache_cross_mount(struct vnode **dvp, krwlock_t **plock)
   1065 {
   1066 
   1067 	return cache_lookup_linked(*dvp, cache_mp_name, cache_mp_nlen,
   1068 	   dvp, plock, FSCRED);
   1069 }
   1070 
   1071 /*
   1072  * Name cache initialization, from vfs_init() when the system is booting.
   1073  */
   1074 void
   1075 nchinit(void)
   1076 {
   1077 
   1078 	cache_pool = pool_cache_init(sizeof(struct namecache),
   1079 	    coherency_unit, 0, 0, "namecache", NULL, IPL_NONE, NULL,
   1080 	    NULL, NULL);
   1081 	KASSERT(cache_pool != NULL);
   1082 
   1083 	mutex_init(&cache_lru_lock, MUTEX_DEFAULT, IPL_NONE);
   1084 	TAILQ_INIT(&cache_lru.list[LRU_ACTIVE]);
   1085 	TAILQ_INIT(&cache_lru.list[LRU_INACTIVE]);
   1086 
   1087 	mutex_init(&cache_stat_lock, MUTEX_DEFAULT, IPL_NONE);
   1088 	callout_init(&cache_stat_callout, CALLOUT_MPSAFE);
   1089 	callout_setfunc(&cache_stat_callout, cache_update_stats, NULL);
   1090 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
   1091 
   1092 	KASSERT(cache_sysctllog == NULL);
   1093 	sysctl_createv(&cache_sysctllog, 0, NULL, NULL,
   1094 		       CTLFLAG_PERMANENT,
   1095 		       CTLTYPE_STRUCT, "namecache_stats",
   1096 		       SYSCTL_DESCR("namecache statistics"),
   1097 		       cache_stat_sysctl, 0, NULL, 0,
   1098 		       CTL_VFS, CTL_CREATE, CTL_EOL);
   1099 }
   1100 
   1101 /*
   1102  * Called once for each CPU in the system as attached.
   1103  */
   1104 void
   1105 cache_cpu_init(struct cpu_info *ci)
   1106 {
   1107 	void *p;
   1108 	size_t sz;
   1109 
   1110 	sz = roundup2(sizeof(struct nchcpu), coherency_unit) + coherency_unit;
   1111 	p = kmem_zalloc(sz, KM_SLEEP);
   1112 	ci->ci_data.cpu_nch = (void *)roundup2((uintptr_t)p, coherency_unit);
   1113 }
   1114 
   1115 /*
   1116  * A vnode is being allocated: set up cache structures.
   1117  */
   1118 void
   1119 cache_vnode_init(struct vnode *vp)
   1120 {
   1121 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1122 
   1123 	rw_init(&vi->vi_nc_lock);
   1124 	rw_init(&vi->vi_nc_listlock);
   1125 	rb_tree_init(&vi->vi_nc_tree, &cache_rbtree_ops);
   1126 	TAILQ_INIT(&vi->vi_nc_list);
   1127 	vi->vi_nc_mode = VNOVAL;
   1128 	vi->vi_nc_uid = VNOVAL;
   1129 	vi->vi_nc_gid = VNOVAL;
   1130 }
   1131 
   1132 /*
   1133  * A vnode is being freed: finish cache structures.
   1134  */
   1135 void
   1136 cache_vnode_fini(struct vnode *vp)
   1137 {
   1138 	vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
   1139 
   1140 	KASSERT(RB_TREE_MIN(&vi->vi_nc_tree) == NULL);
   1141 	KASSERT(TAILQ_EMPTY(&vi->vi_nc_list));
   1142 	rw_destroy(&vi->vi_nc_lock);
   1143 	rw_destroy(&vi->vi_nc_listlock);
   1144 }
   1145 
   1146 /*
   1147  * Helper for cache_purge1(): purge cache entries for the given vnode from
   1148  * all directories that the vnode is cached in.
   1149  */
   1150 static void
   1151 cache_purge_parents(struct vnode *vp)
   1152 {
   1153 	vnode_impl_t *dvi, *vi = VNODE_TO_VIMPL(vp);
   1154 	struct vnode *dvp, *blocked;
   1155 	struct namecache *ncp;
   1156 
   1157 	SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
   1158 
   1159 	blocked = NULL;
   1160 
   1161 	rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1162 	while ((ncp = TAILQ_FIRST(&vi->vi_nc_list)) != NULL) {
   1163 		/*
   1164 		 * Locking in the wrong direction.  Try for a hold on the
   1165 		 * directory node's lock, and if we get it then all good,
   1166 		 * nuke the entry and move on to the next.
   1167 		 */
   1168 		dvp = ncp->nc_dvp;
   1169 		dvi = VNODE_TO_VIMPL(dvp);
   1170 		if (rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
   1171 			cache_remove(ncp, false);
   1172 			rw_exit(&dvi->vi_nc_lock);
   1173 			blocked = NULL;
   1174 			continue;
   1175 		}
   1176 
   1177 		/*
   1178 		 * We can't wait on the directory node's lock with our list
   1179 		 * lock held or the system could deadlock.
   1180 		 *
   1181 		 * Take a hold on the directory vnode to prevent it from
   1182 		 * being freed (taking the vnode & lock with it).  Then
   1183 		 * wait for the lock to become available with no other locks
   1184 		 * held, and retry.
   1185 		 *
   1186 		 * If this happens twice in a row, give the other side a
   1187 		 * breather; we can do nothing until it lets go.
   1188 		 */
   1189 		vhold(dvp);
   1190 		rw_exit(&vi->vi_nc_listlock);
   1191 		rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1192 		/* Do nothing. */
   1193 		rw_exit(&dvi->vi_nc_lock);
   1194 		holdrele(dvp);
   1195 		if (blocked == dvp) {
   1196 			kpause("ncpurge", false, 1, NULL);
   1197 		}
   1198 		rw_enter(&vi->vi_nc_listlock, RW_WRITER);
   1199 		blocked = dvp;
   1200 	}
   1201 	rw_exit(&vi->vi_nc_listlock);
   1202 }
   1203 
   1204 /*
   1205  * Helper for cache_purge1(): purge all cache entries hanging off the given
   1206  * directory vnode.
   1207  */
   1208 static void
   1209 cache_purge_children(struct vnode *dvp)
   1210 {
   1211 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
   1212 	struct namecache *ncp;
   1213 
   1214 	SDT_PROBE(vfs, namecache, purge, children, dvp, 0, 0, 0, 0);
   1215 
   1216 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1217 	while ((ncp = RB_TREE_MIN(&dvi->vi_nc_tree)) != NULL) {
   1218 		cache_remove(ncp, true);
   1219 	}
   1220 	rw_exit(&dvi->vi_nc_lock);
   1221 }
   1222 
   1223 /*
   1224  * Helper for cache_purge1(): purge cache entry from the given vnode,
   1225  * finding it by name.
   1226  */
   1227 static void
   1228 cache_purge_name(struct vnode *dvp, const char *name, size_t namelen)
   1229 {
   1230 	vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
   1231 	struct namecache *ncp;
   1232 	uint64_t key;
   1233 
   1234 	SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
   1235 
   1236 	key = cache_key(name, namelen);
   1237 	rw_enter(&dvi->vi_nc_lock, RW_WRITER);
   1238 	ncp = cache_lookup_entry(dvp, name, namelen, key);
   1239 	if (ncp) {
   1240 		cache_remove(ncp, true);
   1241 	}
   1242 	rw_exit(&dvi->vi_nc_lock);
   1243 }
   1244 
   1245 /*
   1246  * Cache flush, a particular vnode; called when a vnode is renamed to
   1247  * hide entries that would now be invalid.
   1248  */
   1249 void
   1250 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
   1251 {
   1252 
   1253 	if (flags & PURGE_PARENTS) {
   1254 		cache_purge_parents(vp);
   1255 	}
   1256 	if (flags & PURGE_CHILDREN) {
   1257 		cache_purge_children(vp);
   1258 	}
   1259 	if (name != NULL) {
   1260 		cache_purge_name(vp, name, namelen);
   1261 	}
   1262 }
   1263 
   1264 /*
   1265  * vnode filter for cache_purgevfs().
   1266  */
   1267 static bool
   1268 cache_vdir_filter(void *cookie, vnode_t *vp)
   1269 {
   1270 
   1271 	return vp->v_type == VDIR;
   1272 }
   1273 
   1274 /*
   1275  * Cache flush, a whole filesystem; called when filesys is umounted to
   1276  * remove entries that would now be invalid.
   1277  */
   1278 void
   1279 cache_purgevfs(struct mount *mp)
   1280 {
   1281 	struct vnode_iterator *iter;
   1282 	vnode_t *dvp;
   1283 
   1284 	vfs_vnode_iterator_init(mp, &iter);
   1285 	for (;;) {
   1286 		dvp = vfs_vnode_iterator_next(iter, cache_vdir_filter, NULL);
   1287 		if (dvp == NULL) {
   1288 			break;
   1289 		}
   1290 		cache_purge_children(dvp);
   1291 		vrele(dvp);
   1292 	}
   1293 	vfs_vnode_iterator_destroy(iter);
   1294 }
   1295 
   1296 /*
   1297  * Re-queue an entry onto the tail of the active LRU list, after it has
   1298  * scored a hit.
   1299  */
   1300 static void
   1301 cache_activate(struct namecache *ncp)
   1302 {
   1303 
   1304 	mutex_enter(&cache_lru_lock);
   1305 	TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
   1306 	TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
   1307 	cache_lru.count[ncp->nc_lrulist]--;
   1308 	cache_lru.count[LRU_ACTIVE]++;
   1309 	ncp->nc_lrulist = LRU_ACTIVE;
   1310 	mutex_exit(&cache_lru_lock);
   1311 }
   1312 
   1313 /*
   1314  * Try to balance the LRU lists.  Pick some victim entries, and re-queue
   1315  * them from the head of the active list to the tail of the inactive list.
   1316  */
   1317 static void
   1318 cache_deactivate(void)
   1319 {
   1320 	struct namecache *ncp;
   1321 	int total, i;
   1322 
   1323 	KASSERT(mutex_owned(&cache_lru_lock));
   1324 
   1325 	/* If we're nowhere near budget yet, don't bother. */
   1326 	total = cache_lru.count[LRU_ACTIVE] + cache_lru.count[LRU_INACTIVE];
   1327 	if (total < (desiredvnodes >> 1)) {
   1328 	    	return;
   1329 	}
   1330 
   1331 	/*
   1332 	 * Aim for a 1:1 ratio of active to inactive.  This is to allow each
   1333 	 * potential victim a reasonable amount of time to cycle through the
   1334 	 * inactive list in order to score a hit and be reactivated, while
   1335 	 * trying not to cause reactivations too frequently.
   1336 	 */
   1337 	if (cache_lru.count[LRU_ACTIVE] < cache_lru.count[LRU_INACTIVE]) {
   1338 		return;
   1339 	}
   1340 
   1341 	/* Move only a few at a time; will catch up eventually. */
   1342 	for (i = 0; i < cache_lru_maxdeact; i++) {
   1343 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_ACTIVE]);
   1344 		if (ncp == NULL) {
   1345 			break;
   1346 		}
   1347 		KASSERT(ncp->nc_lrulist == LRU_ACTIVE);
   1348 		ncp->nc_lrulist = LRU_INACTIVE;
   1349 		TAILQ_REMOVE(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
   1350 		TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE], ncp, nc_lru);
   1351 		cache_lru.count[LRU_ACTIVE]--;
   1352 		cache_lru.count[LRU_INACTIVE]++;
   1353 	}
   1354 }
   1355 
   1356 /*
   1357  * Free some entries from the cache, when we have gone over budget.
   1358  *
   1359  * We don't want to cause too much work for any individual caller, and it
   1360  * doesn't matter if we temporarily go over budget.  This is also "just a
   1361  * cache" so it's not a big deal if we screw up and throw out something we
   1362  * shouldn't.  So we take a relaxed attitude to this process to reduce its
   1363  * impact.
   1364  */
   1365 static void
   1366 cache_reclaim(void)
   1367 {
   1368 	struct namecache *ncp;
   1369 	vnode_impl_t *dvi;
   1370 	int toscan;
   1371 
   1372 	/*
   1373 	 * Scan up to a preset maxium number of entries, but no more than
   1374 	 * 0.8% of the total at once (to allow for very small systems).
   1375 	 *
   1376 	 * On bigger systems, do a larger chunk of work to reduce the number
   1377 	 * of times that cache_lru_lock is held for any length of time.
   1378 	 */
   1379 	mutex_enter(&cache_lru_lock);
   1380 	toscan = MIN(cache_lru_maxscan, desiredvnodes >> 7);
   1381 	toscan = MAX(toscan, 1);
   1382 	SDT_PROBE(vfs, namecache, prune, done, cache_lru.count[LRU_ACTIVE] +
   1383 	    cache_lru.count[LRU_INACTIVE], toscan, 0, 0, 0);
   1384 	while (toscan-- != 0) {
   1385 		/* First try to balance the lists. */
   1386 		cache_deactivate();
   1387 
   1388 		/* Now look for a victim on head of inactive list (old). */
   1389 		ncp = TAILQ_FIRST(&cache_lru.list[LRU_INACTIVE]);
   1390 		if (ncp == NULL) {
   1391 			break;
   1392 		}
   1393 		dvi = VNODE_TO_VIMPL(ncp->nc_dvp);
   1394 		KASSERT(ncp->nc_lrulist == LRU_INACTIVE);
   1395 		KASSERT(dvi != NULL);
   1396 
   1397 		/*
   1398 		 * Locking in the wrong direction.  If we can't get the
   1399 		 * lock, the directory is actively busy, and it could also
   1400 		 * cause problems for the next guy in here, so send the
   1401 		 * entry to the back of the list.
   1402 		 */
   1403 		if (!rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
   1404 			TAILQ_REMOVE(&cache_lru.list[LRU_INACTIVE],
   1405 			    ncp, nc_lru);
   1406 			TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE],
   1407 			    ncp, nc_lru);
   1408 			continue;
   1409 		}
   1410 
   1411 		/*
   1412 		 * Now have the victim entry locked.  Drop the LRU list
   1413 		 * lock, purge the entry, and start over.  The hold on
   1414 		 * vi_nc_lock will prevent the vnode from vanishing until
   1415 		 * finished (cache_purge() will be called on dvp before it
   1416 		 * disappears, and that will wait on vi_nc_lock).
   1417 		 */
   1418 		mutex_exit(&cache_lru_lock);
   1419 		cache_remove(ncp, true);
   1420 		rw_exit(&dvi->vi_nc_lock);
   1421 		mutex_enter(&cache_lru_lock);
   1422 	}
   1423 	mutex_exit(&cache_lru_lock);
   1424 }
   1425 
   1426 /*
   1427  * For file system code: count a lookup that required a full re-scan of
   1428  * directory metadata.
   1429  */
   1430 void
   1431 namecache_count_pass2(void)
   1432 {
   1433 
   1434 	COUNT(ncs_pass2);
   1435 }
   1436 
   1437 /*
   1438  * For file system code: count a lookup that scored a hit in the directory
   1439  * metadata near the location of the last lookup.
   1440  */
   1441 void
   1442 namecache_count_2passes(void)
   1443 {
   1444 
   1445 	COUNT(ncs_2passes);
   1446 }
   1447 
   1448 /*
   1449  * Sum the stats from all CPUs into nchstats.  This needs to run at least
   1450  * once within every window where a 32-bit counter could roll over.  It's
   1451  * called regularly by timer to ensure this.
   1452  */
   1453 static void
   1454 cache_update_stats(void *cookie)
   1455 {
   1456 	CPU_INFO_ITERATOR cii;
   1457 	struct cpu_info *ci;
   1458 
   1459 	mutex_enter(&cache_stat_lock);
   1460 	for (CPU_INFO_FOREACH(cii, ci)) {
   1461 		struct nchcpu *nchcpu = ci->ci_data.cpu_nch;
   1462 		UPDATE(nchcpu, ncs_goodhits);
   1463 		UPDATE(nchcpu, ncs_neghits);
   1464 		UPDATE(nchcpu, ncs_badhits);
   1465 		UPDATE(nchcpu, ncs_falsehits);
   1466 		UPDATE(nchcpu, ncs_miss);
   1467 		UPDATE(nchcpu, ncs_long);
   1468 		UPDATE(nchcpu, ncs_pass2);
   1469 		UPDATE(nchcpu, ncs_2passes);
   1470 		UPDATE(nchcpu, ncs_revhits);
   1471 		UPDATE(nchcpu, ncs_revmiss);
   1472 		UPDATE(nchcpu, ncs_denied);
   1473 	}
   1474 	if (cookie != NULL) {
   1475 		memcpy(cookie, &nchstats, sizeof(nchstats));
   1476 	}
   1477 	/* Reset the timer; arrive back here in N minutes at latest. */
   1478 	callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
   1479 	mutex_exit(&cache_stat_lock);
   1480 }
   1481 
   1482 /*
   1483  * Fetch the current values of the stats for sysctl.
   1484  */
   1485 static int
   1486 cache_stat_sysctl(SYSCTLFN_ARGS)
   1487 {
   1488 	struct nchstats stats;
   1489 
   1490 	if (oldp == NULL) {
   1491 		*oldlenp = sizeof(nchstats);
   1492 		return 0;
   1493 	}
   1494 
   1495 	if (*oldlenp <= 0) {
   1496 		*oldlenp = 0;
   1497 		return 0;
   1498 	}
   1499 
   1500 	/* Refresh the global stats. */
   1501 	sysctl_unlock();
   1502 	cache_update_stats(&stats);
   1503 	sysctl_relock();
   1504 
   1505 	*oldlenp = MIN(sizeof(stats), *oldlenp);
   1506 	return sysctl_copyout(l, &stats, oldp, *oldlenp);
   1507 }
   1508 
   1509 /*
   1510  * For the debugger, given the address of a vnode, print all associated
   1511  * names in the cache.
   1512  */
   1513 #ifdef DDB
   1514 void
   1515 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
   1516 {
   1517 	struct vnode *dvp = NULL;
   1518 	struct namecache *ncp;
   1519 	enum cache_lru_id id;
   1520 
   1521 	for (id = 0; id < LRU_COUNT; id++) {
   1522 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
   1523 			if (ncp->nc_vp == vp) {
   1524 				(*pr)("name %.*s\n", ncp->nc_nlen,
   1525 				    ncp->nc_name);
   1526 				dvp = ncp->nc_dvp;
   1527 			}
   1528 		}
   1529 	}
   1530 	if (dvp == NULL) {
   1531 		(*pr)("name not found\n");
   1532 		return;
   1533 	}
   1534 	for (id = 0; id < LRU_COUNT; id++) {
   1535 		TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
   1536 			if (ncp->nc_vp == dvp) {
   1537 				(*pr)("parent %.*s\n", ncp->nc_nlen,
   1538 				    ncp->nc_name);
   1539 			}
   1540 		}
   1541 	}
   1542 }
   1543 #endif
   1544