Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.100
      1 /*	$NetBSD: vfs_cache.c,v 1.100 2014/11/30 04:11:03 dennis Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1989, 1993
     31  *	The Regents of the University of California.  All rights reserved.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. Neither the name of the University nor the names of its contributors
     42  *    may be used to endorse or promote products derived from this software
     43  *    without specific prior written permission.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.100 2014/11/30 04:11:03 dennis Exp $");
     62 
     63 #include "opt_ddb.h"
     64 #include "opt_revcache.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/sysctl.h>
     69 #include <sys/time.h>
     70 #include <sys/mount.h>
     71 #include <sys/vnode.h>
     72 #include <sys/namei.h>
     73 #include <sys/errno.h>
     74 #include <sys/pool.h>
     75 #include <sys/mutex.h>
     76 #include <sys/atomic.h>
     77 #include <sys/kthread.h>
     78 #include <sys/kernel.h>
     79 #include <sys/cpu.h>
     80 #include <sys/evcnt.h>
     81 
     82 #define NAMECACHE_ENTER_REVERSE
     83 /*
     84  * Name caching works as follows:
     85  *
     86  * Names found by directory scans are retained in a cache
     87  * for future reference.  It is managed LRU, so frequently
     88  * used names will hang around.  Cache is indexed by hash value
     89  * obtained from (dvp, name) where dvp refers to the directory
     90  * containing name.
     91  *
     92  * For simplicity (and economy of storage), names longer than
     93  * a maximum length of NCHNAMLEN are not cached; they occur
     94  * infrequently in any case, and are almost never of interest.
     95  *
     96  * Upon reaching the last segment of a path, if the reference
     97  * is for DELETE, or NOCACHE is set (rewrite), and the
     98  * name is located in the cache, it will be dropped.
     99  * The entry is dropped also when it was not possible to lock
    100  * the cached vnode, either because vget() failed or the generation
    101  * number has changed while waiting for the lock.
    102  */
    103 
    104 /*
    105  * The locking in this subsystem works as follows:
    106  *
    107  * When an entry is added to the cache, via cache_enter(),
    108  * namecache_lock is taken to exclude other writers.  The new
    109  * entry is added to the hash list in a way which permits
    110  * concurrent lookups and invalidations in the cache done on
    111  * other CPUs to continue in parallel.
    112  *
    113  * When a lookup is done in the cache, via cache_lookup() or
    114  * cache_lookup_raw(), the per-cpu lock below is taken.  This
    115  * protects calls to cache_lookup_entry() and cache_invalidate()
    116  * against cache_reclaim(), and protects the per-cpu stats against
    117  * modification in both cache_reclaim() and cache_stat_sysctl(),
    118  * but allows lookups to continue in parallel with cache_enter().
    119  *
    120  * cache_revlookup() takes namecache_lock to exclude cache_enter()
    121  * and cache_reclaim() since the list it operates on is not
    122  * maintained to allow concurrent reads.
    123  *
    124  * When cache_reclaim() is called namecache_lock is held to hold
    125  * off calls to cache_enter() and each of the per-cpu locks is
    126  * taken to hold off lookups.  Holding all these locks essentially
    127  * idles the subsystem, ensuring there are no concurrent references
    128  * to the cache entries being freed.  As a side effect, once the
    129  * per-cpu locks are held, the per-cpu stats are added to the
    130  * subsystem totals and then zeroed.  cache_stat_sysctl() similarly
    131  * takes all locks to collect the per-cpu stats (though it perhaps
    132  * could avoid this by living with stats that were a second out
    133  * of date?).
    134  *
    135  * The per-cpu namecache data is defined below.  cpu_lock is used
    136  * to protect cpu_stats updates and to exclude cache_reclaim()
    137  * during lookups.
    138  */
    139 struct nchcpu {
    140 	kmutex_t	cpu_lock;
    141 	struct nchstats	cpu_stats;
    142 };
    143 
    144 /*
    145  * The type for the hash code. While the hash function generates a
    146  * u32, the hash code has historically been passed around as a u_long,
    147  * and the value is modified by xor'ing a uintptr_t, so it's not
    148  * entirely clear what the best type is. For now I'll leave it
    149  * unchanged as u_long.
    150  */
    151 
    152 typedef u_long nchash_t;
    153 
    154 /*
    155  * Structures associated with name cacheing.
    156  */
    157 
    158 static kmutex_t *namecache_lock __read_mostly;
    159 static pool_cache_t namecache_cache __read_mostly;
    160 static TAILQ_HEAD(, namecache) nclruhead __cacheline_aligned;
    161 
    162 static LIST_HEAD(nchashhead, namecache) *nchashtbl __read_mostly;
    163 static u_long	nchash __read_mostly;
    164 
    165 #define	NCHASH2(hash, dvp)	\
    166 	(((hash) ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    167 
    168 static LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl __read_mostly;
    169 static u_long	ncvhash __read_mostly;
    170 
    171 #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
    172 
    173 /* Number of cache entries allocated. */
    174 static long	numcache __cacheline_aligned;
    175 
    176 /* Garbage collection queue and number of entries pending in it. */
    177 static void	*cache_gcqueue;
    178 static u_int	cache_gcpend;
    179 
    180 /* Cache effectiveness statistics. */
    181 struct nchstats	nchstats __cacheline_aligned;
    182 #define	COUNT(c,x)	(c.x++)
    183 
    184 static const int cache_lowat = 95;
    185 static const int cache_hiwat = 98;
    186 static const int cache_hottime = 5;	/* number of seconds */
    187 static int doingcache = 1;		/* 1 => enable the cache */
    188 
    189 static struct evcnt cache_ev_scan;
    190 static struct evcnt cache_ev_gc;
    191 static struct evcnt cache_ev_over;
    192 static struct evcnt cache_ev_under;
    193 static struct evcnt cache_ev_forced;
    194 
    195 static void cache_invalidate(struct namecache *);
    196 static struct namecache *cache_lookup_entry(
    197     const struct vnode *, const char *, size_t);
    198 static void cache_thread(void *);
    199 static void cache_invalidate(struct namecache *);
    200 static void cache_disassociate(struct namecache *);
    201 static void cache_reclaim(void);
    202 static int cache_ctor(void *, void *, int);
    203 static void cache_dtor(void *, void *);
    204 
    205 /*
    206  * Compute the hash for an entry.
    207  *
    208  * (This is for now a wrapper around namei_hash, whose interface is
    209  * for the time being slightly inconvenient.)
    210  */
    211 static nchash_t
    212 cache_hash(const char *name, size_t namelen)
    213 {
    214 	const char *endptr;
    215 
    216 	endptr = name + namelen;
    217 	return namei_hash(name, &endptr);
    218 }
    219 
    220 /*
    221  * Invalidate a cache entry and enqueue it for garbage collection.
    222  * The caller needs to hold namecache_lock or a per-cpu lock to hold
    223  * off cache_reclaim().
    224  */
    225 static void
    226 cache_invalidate(struct namecache *ncp)
    227 {
    228 	void *head;
    229 
    230 	KASSERT(mutex_owned(&ncp->nc_lock));
    231 
    232 	if (ncp->nc_dvp != NULL) {
    233 		ncp->nc_vp = NULL;
    234 		ncp->nc_dvp = NULL;
    235 		do {
    236 			head = cache_gcqueue;
    237 			ncp->nc_gcqueue = head;
    238 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    239 		atomic_inc_uint(&cache_gcpend);
    240 	}
    241 }
    242 
    243 /*
    244  * Disassociate a namecache entry from any vnodes it is attached to,
    245  * and remove from the global LRU list.
    246  */
    247 static void
    248 cache_disassociate(struct namecache *ncp)
    249 {
    250 
    251 	KASSERT(mutex_owned(namecache_lock));
    252 	KASSERT(ncp->nc_dvp == NULL);
    253 
    254 	if (ncp->nc_lru.tqe_prev != NULL) {
    255 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    256 		ncp->nc_lru.tqe_prev = NULL;
    257 	}
    258 	if (ncp->nc_vhash.le_prev != NULL) {
    259 		LIST_REMOVE(ncp, nc_vhash);
    260 		ncp->nc_vhash.le_prev = NULL;
    261 	}
    262 	if (ncp->nc_vlist.le_prev != NULL) {
    263 		LIST_REMOVE(ncp, nc_vlist);
    264 		ncp->nc_vlist.le_prev = NULL;
    265 	}
    266 	if (ncp->nc_dvlist.le_prev != NULL) {
    267 		LIST_REMOVE(ncp, nc_dvlist);
    268 		ncp->nc_dvlist.le_prev = NULL;
    269 	}
    270 }
    271 
    272 /*
    273  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    274  * this locks out all "readers".
    275  */
    276 #define	UPDATE(f) do { \
    277 	nchstats.f += cpup->cpu_stats.f; \
    278 	cpup->cpu_stats.f = 0; \
    279 } while (/* CONSTCOND */ 0)
    280 
    281 static void
    282 cache_lock_cpus(void)
    283 {
    284 	CPU_INFO_ITERATOR cii;
    285 	struct cpu_info *ci;
    286 	struct nchcpu *cpup;
    287 
    288 	for (CPU_INFO_FOREACH(cii, ci)) {
    289 		cpup = ci->ci_data.cpu_nch;
    290 		mutex_enter(&cpup->cpu_lock);
    291 		UPDATE(ncs_goodhits);
    292 		UPDATE(ncs_neghits);
    293 		UPDATE(ncs_badhits);
    294 		UPDATE(ncs_falsehits);
    295 		UPDATE(ncs_miss);
    296 		UPDATE(ncs_long);
    297 		UPDATE(ncs_pass2);
    298 		UPDATE(ncs_2passes);
    299 		UPDATE(ncs_revhits);
    300 		UPDATE(ncs_revmiss);
    301 	}
    302 }
    303 
    304 #undef UPDATE
    305 
    306 /*
    307  * Release all CPU locks.
    308  */
    309 static void
    310 cache_unlock_cpus(void)
    311 {
    312 	CPU_INFO_ITERATOR cii;
    313 	struct cpu_info *ci;
    314 	struct nchcpu *cpup;
    315 
    316 	for (CPU_INFO_FOREACH(cii, ci)) {
    317 		cpup = ci->ci_data.cpu_nch;
    318 		mutex_exit(&cpup->cpu_lock);
    319 	}
    320 }
    321 
    322 /*
    323  * Find a single cache entry and return it locked.
    324  * The caller needs to hold namecache_lock or a per-cpu lock to hold
    325  * off cache_reclaim().
    326  */
    327 static struct namecache *
    328 cache_lookup_entry(const struct vnode *dvp, const char *name, size_t namelen)
    329 {
    330 	struct nchashhead *ncpp;
    331 	struct namecache *ncp;
    332 	nchash_t hash;
    333 
    334 	KASSERT(dvp != NULL);
    335 	hash = cache_hash(name, namelen);
    336 	ncpp = &nchashtbl[NCHASH2(hash, dvp)];
    337 
    338 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    339 		/* XXX Needs barrier for Alpha here */
    340 		if (ncp->nc_dvp != dvp ||
    341 		    ncp->nc_nlen != namelen ||
    342 		    memcmp(ncp->nc_name, name, (u_int)ncp->nc_nlen))
    343 		    	continue;
    344 	    	mutex_enter(&ncp->nc_lock);
    345 		if (__predict_true(ncp->nc_dvp == dvp)) {
    346 			ncp->nc_hittime = hardclock_ticks;
    347 			return ncp;
    348 		}
    349 		/* Raced: entry has been nullified. */
    350 		mutex_exit(&ncp->nc_lock);
    351 	}
    352 
    353 	return NULL;
    354 }
    355 
    356 /*
    357  * Look for a the name in the cache. We don't do this
    358  * if the segment name is long, simply so the cache can avoid
    359  * holding long names (which would either waste space, or
    360  * add greatly to the complexity).
    361  *
    362  * Lookup is called with DVP pointing to the directory to search,
    363  * and CNP providing the name of the entry being sought: cn_nameptr
    364  * is the name, cn_namelen is its length, and cn_flags is the flags
    365  * word from the namei operation.
    366  *
    367  * DVP must be locked.
    368  *
    369  * There are three possible non-error return states:
    370  *    1. Nothing was found in the cache. Nothing is known about
    371  *       the requested name.
    372  *    2. A negative entry was found in the cache, meaning that the
    373  *       requested name definitely does not exist.
    374  *    3. A positive entry was found in the cache, meaning that the
    375  *       requested name does exist and that we are providing the
    376  *       vnode.
    377  * In these cases the results are:
    378  *    1. 0 returned; VN is set to NULL.
    379  *    2. 1 returned; VN is set to NULL.
    380  *    3. 1 returned; VN is set to the vnode found.
    381  *
    382  * The additional result argument ISWHT is set to zero, unless a
    383  * negative entry is found that was entered as a whiteout, in which
    384  * case ISWHT is set to one.
    385  *
    386  * The ISWHT_RET argument pointer may be null. In this case an
    387  * assertion is made that the whiteout flag is not set. File systems
    388  * that do not support whiteouts can/should do this.
    389  *
    390  * Filesystems that do support whiteouts should add ISWHITEOUT to
    391  * cnp->cn_flags if ISWHT comes back nonzero.
    392  *
    393  * When a vnode is returned, it is locked, as per the vnode lookup
    394  * locking protocol.
    395  *
    396  * There is no way for this function to fail, in the sense of
    397  * generating an error that requires aborting the namei operation.
    398  *
    399  * (Prior to October 2012, this function returned an integer status,
    400  * and a vnode, and mucked with the flags word in CNP for whiteouts.
    401  * The integer status was -1 for "nothing found", ENOENT for "a
    402  * negative entry found", 0 for "a positive entry found", and possibly
    403  * other errors, and the value of VN might or might not have been set
    404  * depending on what error occurred.)
    405  */
    406 int
    407 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
    408 	     uint32_t nameiop, uint32_t cnflags,
    409 	     int *iswht_ret, struct vnode **vn_ret)
    410 {
    411 	struct namecache *ncp;
    412 	struct vnode *vp;
    413 	struct nchcpu *cpup;
    414 	int error, ret_value;
    415 
    416 
    417 	/* Establish default result values */
    418 	if (iswht_ret != NULL) {
    419 		*iswht_ret = 0;
    420 	}
    421 	*vn_ret = NULL;
    422 
    423 	if (__predict_false(!doingcache)) {
    424 		return 0;
    425 	}
    426 
    427 	cpup = curcpu()->ci_data.cpu_nch;
    428 	mutex_enter(&cpup->cpu_lock);
    429 	if (__predict_false(namelen > NCHNAMLEN)) {
    430 		COUNT(cpup->cpu_stats, ncs_long);
    431 		mutex_exit(&cpup->cpu_lock);
    432 		/* found nothing */
    433 		return 0;
    434 	}
    435 
    436 	ncp = cache_lookup_entry(dvp, name, namelen);
    437 	if (__predict_false(ncp == NULL)) {
    438 		COUNT(cpup->cpu_stats, ncs_miss);
    439 		mutex_exit(&cpup->cpu_lock);
    440 		/* found nothing */
    441 		return 0;
    442 	}
    443 	if ((cnflags & MAKEENTRY) == 0) {
    444 		COUNT(cpup->cpu_stats, ncs_badhits);
    445 		/*
    446 		 * Last component and we are renaming or deleting,
    447 		 * the cache entry is invalid, or otherwise don't
    448 		 * want cache entry to exist.
    449 		 */
    450 		cache_invalidate(ncp);
    451 		mutex_exit(&ncp->nc_lock);
    452 		mutex_exit(&cpup->cpu_lock);
    453 		/* found nothing */
    454 		return 0;
    455 	}
    456 	if (ncp->nc_vp == NULL) {
    457 		if (iswht_ret != NULL) {
    458 			/*
    459 			 * Restore the ISWHITEOUT flag saved earlier.
    460 			 */
    461 			KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    462 			*iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
    463 		} else {
    464 			KASSERT(ncp->nc_flags == 0);
    465 		}
    466 
    467 		if (__predict_true(nameiop != CREATE ||
    468 		    (cnflags & ISLASTCN) == 0)) {
    469 			COUNT(cpup->cpu_stats, ncs_neghits);
    470 			/* found neg entry; vn is already null from above */
    471 			ret_value = 1;
    472 		} else {
    473 			COUNT(cpup->cpu_stats, ncs_badhits);
    474 			/*
    475 			 * Last component and we are renaming or
    476 			 * deleting, the cache entry is invalid,
    477 			 * or otherwise don't want cache entry to
    478 			 * exist.
    479 			 */
    480 			cache_invalidate(ncp);
    481 			/* found nothing */
    482 			ret_value = 0;
    483 		}
    484 		mutex_exit(&ncp->nc_lock);
    485 		mutex_exit(&cpup->cpu_lock);
    486 		return ret_value;
    487 	}
    488 
    489 	vp = ncp->nc_vp;
    490 	mutex_enter(vp->v_interlock);
    491 	mutex_exit(&ncp->nc_lock);
    492 
    493 	/*
    494 	 * Drop per-cpu lock across the call to vget(), take it
    495 	 * again for the sake of the stats update.
    496 	 */
    497 	mutex_exit(&cpup->cpu_lock);
    498 	error = vget(vp, LK_NOWAIT);
    499 	mutex_enter(&cpup->cpu_lock);
    500 	if (error) {
    501 		KASSERT(error == EBUSY);
    502 		/*
    503 		 * This vnode is being cleaned out.
    504 		 * XXX badhits?
    505 		 */
    506 		COUNT(cpup->cpu_stats, ncs_falsehits);
    507 		/* found nothing */
    508 		ret_value = 0;
    509 	} else {
    510 		COUNT(cpup->cpu_stats, ncs_goodhits);
    511 		/* found it */
    512 		*vn_ret = vp;
    513 		ret_value = 1;
    514 	}
    515 	mutex_exit(&cpup->cpu_lock);
    516 
    517 	return ret_value;
    518 }
    519 
    520 
    521 /*
    522  * Cut-'n-pasted version of the above without the nameiop argument.
    523  */
    524 int
    525 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
    526 		 uint32_t cnflags,
    527 		 int *iswht_ret, struct vnode **vn_ret)
    528 {
    529 	struct namecache *ncp;
    530 	struct vnode *vp;
    531 	struct nchcpu *cpup;
    532 	int error, ret_value;
    533 
    534 	/* Establish default results. */
    535 	if (iswht_ret != NULL) {
    536 		*iswht_ret = 0;
    537 	}
    538 	*vn_ret = NULL;
    539 
    540 	if (__predict_false(!doingcache)) {
    541 		/* found nothing */
    542 		return 0;
    543 	}
    544 
    545 	cpup = curcpu()->ci_data.cpu_nch;
    546 	mutex_enter(&cpup->cpu_lock);
    547 	if (__predict_false(namelen > NCHNAMLEN)) {
    548 		COUNT(cpup->cpu_stats, ncs_long);
    549 		mutex_exit(&cpup->cpu_lock);
    550 		/* found nothing */
    551 		return 0;
    552 	}
    553 	ncp = cache_lookup_entry(dvp, name, namelen);
    554 	if (__predict_false(ncp == NULL)) {
    555 		COUNT(cpup->cpu_stats, ncs_miss);
    556 		mutex_exit(&cpup->cpu_lock);
    557 		/* found nothing */
    558 		return 0;
    559 	}
    560 	vp = ncp->nc_vp;
    561 	if (vp == NULL) {
    562 		/*
    563 		 * Restore the ISWHITEOUT flag saved earlier.
    564 		 */
    565 		if (iswht_ret != NULL) {
    566 			KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    567 			/*cnp->cn_flags |= ncp->nc_flags;*/
    568 			*iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
    569 		}
    570 		COUNT(cpup->cpu_stats, ncs_neghits);
    571 		mutex_exit(&ncp->nc_lock);
    572 		mutex_exit(&cpup->cpu_lock);
    573 		/* found negative entry; vn is already null from above */
    574 		return 1;
    575 	}
    576 	mutex_enter(vp->v_interlock);
    577 	mutex_exit(&ncp->nc_lock);
    578 
    579 	/*
    580 	 * Drop per-cpu lock across the call to vget(), take it
    581 	 * again for the sake of the stats update.
    582 	 */
    583 	mutex_exit(&cpup->cpu_lock);
    584 	error = vget(vp, LK_NOWAIT);
    585 	mutex_enter(&cpup->cpu_lock);
    586 	if (error) {
    587 		KASSERT(error == EBUSY);
    588 		/*
    589 		 * This vnode is being cleaned out.
    590 		 * XXX badhits?
    591 		 */
    592 		COUNT(cpup->cpu_stats, ncs_falsehits);
    593 		/* found nothing */
    594 		ret_value = 0;
    595 	} else {
    596 		COUNT(cpup->cpu_stats, ncs_goodhits); /* XXX can be "badhits" */
    597 		/* found it */
    598 		*vn_ret = vp;
    599 		ret_value = 1;
    600 	}
    601 	mutex_exit(&cpup->cpu_lock);
    602 
    603 	return ret_value;
    604 }
    605 
    606 /*
    607  * Scan cache looking for name of directory entry pointing at vp.
    608  *
    609  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    610  *
    611  * If bufp is non-NULL, also place the name in the buffer which starts
    612  * at bufp, immediately before *bpp, and move bpp backwards to point
    613  * at the start of it.  (Yes, this is a little baroque, but it's done
    614  * this way to cater to the whims of getcwd).
    615  *
    616  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    617  */
    618 int
    619 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    620 {
    621 	struct namecache *ncp;
    622 	struct vnode *dvp;
    623 	struct nchcpu *cpup;
    624 	struct ncvhashhead *nvcpp;
    625 	char *bp;
    626 	int error, nlen;
    627 
    628 	if (!doingcache)
    629 		goto out;
    630 
    631 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
    632 	cpup = curcpu()->ci_data.cpu_nch;
    633 
    634 	mutex_enter(namecache_lock);
    635 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
    636 		mutex_enter(&ncp->nc_lock);
    637 		if (ncp->nc_vp == vp &&
    638 		    (dvp = ncp->nc_dvp) != NULL &&
    639 		    dvp != vp) { 		/* avoid pesky . entries.. */
    640 
    641 #ifdef DIAGNOSTIC
    642 			if (ncp->nc_nlen == 1 &&
    643 			    ncp->nc_name[0] == '.')
    644 				panic("cache_revlookup: found entry for .");
    645 
    646 			if (ncp->nc_nlen == 2 &&
    647 			    ncp->nc_name[0] == '.' &&
    648 			    ncp->nc_name[1] == '.')
    649 				panic("cache_revlookup: found entry for ..");
    650 #endif
    651 			mutex_enter(&cpup->cpu_lock);
    652 			COUNT(cpup->cpu_stats, ncs_revhits);
    653 			mutex_exit(&cpup->cpu_lock);
    654 			nlen = ncp->nc_nlen;
    655 
    656 			if (bufp) {
    657 				bp = *bpp;
    658 				bp -= nlen;
    659 				if (bp <= bufp) {
    660 					*dvpp = NULL;
    661 					mutex_exit(&ncp->nc_lock);
    662 					mutex_exit(namecache_lock);
    663 					return (ERANGE);
    664 				}
    665 				memcpy(bp, ncp->nc_name, nlen);
    666 				*bpp = bp;
    667 			}
    668 
    669 			mutex_enter(dvp->v_interlock);
    670 			mutex_exit(&ncp->nc_lock);
    671 			mutex_exit(namecache_lock);
    672 			error = vget(dvp, LK_NOWAIT);
    673 			if (error) {
    674 				KASSERT(error == EBUSY);
    675 				if (bufp)
    676 					(*bpp) += nlen;
    677 				*dvpp = NULL;
    678 				return -1;
    679 			}
    680 			*dvpp = dvp;
    681 			return (0);
    682 		}
    683 		mutex_exit(&ncp->nc_lock);
    684 	}
    685 	mutex_enter(&cpup->cpu_lock);
    686 	COUNT(cpup->cpu_stats, ncs_revmiss);
    687 	mutex_exit(&cpup->cpu_lock);
    688 	mutex_exit(namecache_lock);
    689  out:
    690 	*dvpp = NULL;
    691 	return (-1);
    692 }
    693 
    694 /*
    695  * Add an entry to the cache
    696  */
    697 void
    698 cache_enter(struct vnode *dvp, struct vnode *vp,
    699 	    const char *name, size_t namelen, uint32_t cnflags)
    700 {
    701 	struct namecache *ncp;
    702 	struct namecache *oncp;
    703 	struct nchashhead *ncpp;
    704 	struct ncvhashhead *nvcpp;
    705 	nchash_t hash;
    706 
    707 	/* First, check whether we can/should add a cache entry. */
    708 	if ((cnflags & MAKEENTRY) == 0 ||
    709 	    __predict_false(namelen > NCHNAMLEN || !doingcache)) {
    710 		return;
    711 	}
    712 
    713 	if (numcache > desiredvnodes) {
    714 		mutex_enter(namecache_lock);
    715 		cache_ev_forced.ev_count++;
    716 		cache_reclaim();
    717 		mutex_exit(namecache_lock);
    718 	}
    719 
    720 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    721 	mutex_enter(namecache_lock);
    722 	numcache++;
    723 
    724 	/*
    725 	 * Concurrent lookups in the same directory may race for a
    726 	 * cache entry.  if there's a duplicated entry, free it.
    727 	 */
    728 	oncp = cache_lookup_entry(dvp, name, namelen);
    729 	if (oncp) {
    730 		cache_invalidate(oncp);
    731 		mutex_exit(&oncp->nc_lock);
    732 	}
    733 
    734 	/* Grab the vnode we just found. */
    735 	mutex_enter(&ncp->nc_lock);
    736 	ncp->nc_vp = vp;
    737 	ncp->nc_flags = 0;
    738 	ncp->nc_hittime = 0;
    739 	ncp->nc_gcqueue = NULL;
    740 	if (vp == NULL) {
    741 		/*
    742 		 * For negative hits, save the ISWHITEOUT flag so we can
    743 		 * restore it later when the cache entry is used again.
    744 		 */
    745 		ncp->nc_flags = cnflags & ISWHITEOUT;
    746 	}
    747 
    748 	/* Fill in cache info. */
    749 	ncp->nc_dvp = dvp;
    750 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
    751 	if (vp)
    752 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
    753 	else {
    754 		ncp->nc_vlist.le_prev = NULL;
    755 		ncp->nc_vlist.le_next = NULL;
    756 	}
    757 	KASSERT(namelen <= NCHNAMLEN);
    758 	ncp->nc_nlen = namelen;
    759 	memcpy(ncp->nc_name, name, (unsigned)ncp->nc_nlen);
    760 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    761 	hash = cache_hash(name, namelen);
    762 	ncpp = &nchashtbl[NCHASH2(hash, dvp)];
    763 
    764 	/*
    765 	 * Flush updates before making visible in table.  No need for a
    766 	 * memory barrier on the other side: to see modifications the
    767 	 * list must be followed, meaning a dependent pointer load.
    768 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
    769 	 * barrier included in the correct place.
    770 	 */
    771 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
    772 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
    773 	ncp->nc_hash.le_prev = &ncpp->lh_first;
    774 	membar_producer();
    775 	ncpp->lh_first = ncp;
    776 
    777 	ncp->nc_vhash.le_prev = NULL;
    778 	ncp->nc_vhash.le_next = NULL;
    779 
    780 	/*
    781 	 * Create reverse-cache entries (used in getcwd) for directories.
    782 	 * (and in linux procfs exe node)
    783 	 */
    784 	if (vp != NULL &&
    785 	    vp != dvp &&
    786 #ifndef NAMECACHE_ENTER_REVERSE
    787 	    vp->v_type == VDIR &&
    788 #endif
    789 	    (ncp->nc_nlen > 2 ||
    790 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
    791 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
    792 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
    793 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
    794 	}
    795 	mutex_exit(&ncp->nc_lock);
    796 	mutex_exit(namecache_lock);
    797 }
    798 
    799 /*
    800  * Name cache initialization, from vfs_init() when we are booting
    801  */
    802 void
    803 nchinit(void)
    804 {
    805 	int error;
    806 
    807 	TAILQ_INIT(&nclruhead);
    808 	namecache_cache = pool_cache_init(sizeof(struct namecache),
    809 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    810 	    cache_dtor, NULL);
    811 	KASSERT(namecache_cache != NULL);
    812 
    813 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    814 
    815 	nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
    816 	ncvhashtbl =
    817 #ifdef NAMECACHE_ENTER_REVERSE
    818 	    hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
    819 #else
    820 	    hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
    821 #endif
    822 
    823 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    824 	    NULL, NULL, "cachegc");
    825 	if (error != 0)
    826 		panic("nchinit %d", error);
    827 
    828 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    829 	   "namecache", "entries scanned");
    830 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    831 	   "namecache", "entries collected");
    832 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    833 	   "namecache", "over scan target");
    834 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    835 	   "namecache", "under scan target");
    836 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    837 	   "namecache", "forced reclaims");
    838 }
    839 
    840 static int
    841 cache_ctor(void *arg, void *obj, int flag)
    842 {
    843 	struct namecache *ncp;
    844 
    845 	ncp = obj;
    846 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    847 
    848 	return 0;
    849 }
    850 
    851 static void
    852 cache_dtor(void *arg, void *obj)
    853 {
    854 	struct namecache *ncp;
    855 
    856 	ncp = obj;
    857 	mutex_destroy(&ncp->nc_lock);
    858 }
    859 
    860 /*
    861  * Called once for each CPU in the system as attached.
    862  */
    863 void
    864 cache_cpu_init(struct cpu_info *ci)
    865 {
    866 	struct nchcpu *cpup;
    867 	size_t sz;
    868 
    869 	sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
    870 	cpup = kmem_zalloc(sz, KM_SLEEP);
    871 	cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
    872 	mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    873 	ci->ci_data.cpu_nch = cpup;
    874 }
    875 
    876 /*
    877  * Name cache reinitialization, for when the maximum number of vnodes increases.
    878  */
    879 void
    880 nchreinit(void)
    881 {
    882 	struct namecache *ncp;
    883 	struct nchashhead *oldhash1, *hash1;
    884 	struct ncvhashhead *oldhash2, *hash2;
    885 	u_long i, oldmask1, oldmask2, mask1, mask2;
    886 
    887 	hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
    888 	hash2 =
    889 #ifdef NAMECACHE_ENTER_REVERSE
    890 	    hashinit(desiredvnodes, HASH_LIST, true, &mask2);
    891 #else
    892 	    hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
    893 #endif
    894 	mutex_enter(namecache_lock);
    895 	cache_lock_cpus();
    896 	oldhash1 = nchashtbl;
    897 	oldmask1 = nchash;
    898 	nchashtbl = hash1;
    899 	nchash = mask1;
    900 	oldhash2 = ncvhashtbl;
    901 	oldmask2 = ncvhash;
    902 	ncvhashtbl = hash2;
    903 	ncvhash = mask2;
    904 	for (i = 0; i <= oldmask1; i++) {
    905 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
    906 			LIST_REMOVE(ncp, nc_hash);
    907 			ncp->nc_hash.le_prev = NULL;
    908 		}
    909 	}
    910 	for (i = 0; i <= oldmask2; i++) {
    911 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
    912 			LIST_REMOVE(ncp, nc_vhash);
    913 			ncp->nc_vhash.le_prev = NULL;
    914 		}
    915 	}
    916 	cache_unlock_cpus();
    917 	mutex_exit(namecache_lock);
    918 	hashdone(oldhash1, HASH_LIST, oldmask1);
    919 	hashdone(oldhash2, HASH_LIST, oldmask2);
    920 }
    921 
    922 /*
    923  * Cache flush, a particular vnode; called when a vnode is renamed to
    924  * hide entries that would now be invalid
    925  */
    926 void
    927 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
    928 {
    929 	struct namecache *ncp, *ncnext;
    930 
    931 	mutex_enter(namecache_lock);
    932 	if (flags & PURGE_PARENTS) {
    933 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
    934 		    ncp = ncnext) {
    935 			ncnext = LIST_NEXT(ncp, nc_vlist);
    936 			mutex_enter(&ncp->nc_lock);
    937 			cache_invalidate(ncp);
    938 			mutex_exit(&ncp->nc_lock);
    939 			cache_disassociate(ncp);
    940 		}
    941 	}
    942 	if (flags & PURGE_CHILDREN) {
    943 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
    944 		    ncp = ncnext) {
    945 			ncnext = LIST_NEXT(ncp, nc_dvlist);
    946 			mutex_enter(&ncp->nc_lock);
    947 			cache_invalidate(ncp);
    948 			mutex_exit(&ncp->nc_lock);
    949 			cache_disassociate(ncp);
    950 		}
    951 	}
    952 	if (name != NULL) {
    953 		ncp = cache_lookup_entry(vp, name, namelen);
    954 		if (ncp) {
    955 			cache_invalidate(ncp);
    956 			mutex_exit(&ncp->nc_lock);
    957 			cache_disassociate(ncp);
    958 		}
    959 	}
    960 	mutex_exit(namecache_lock);
    961 }
    962 
    963 /*
    964  * Cache flush, a whole filesystem; called when filesys is umounted to
    965  * remove entries that would now be invalid.
    966  */
    967 void
    968 cache_purgevfs(struct mount *mp)
    969 {
    970 	struct namecache *ncp, *nxtcp;
    971 
    972 	mutex_enter(namecache_lock);
    973 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    974 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    975 		mutex_enter(&ncp->nc_lock);
    976 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
    977 			/* Free the resources we had. */
    978 			cache_invalidate(ncp);
    979 			cache_disassociate(ncp);
    980 		}
    981 		mutex_exit(&ncp->nc_lock);
    982 	}
    983 	cache_reclaim();
    984 	mutex_exit(namecache_lock);
    985 }
    986 
    987 /*
    988  * Scan global list invalidating entries until we meet a preset target.
    989  * Prefer to invalidate entries that have not scored a hit within
    990  * cache_hottime seconds.  We sort the LRU list only for this routine's
    991  * benefit.
    992  */
    993 static void
    994 cache_prune(int incache, int target)
    995 {
    996 	struct namecache *ncp, *nxtcp, *sentinel;
    997 	int items, recent, tryharder;
    998 
    999 	KASSERT(mutex_owned(namecache_lock));
   1000 
   1001 	items = 0;
   1002 	tryharder = 0;
   1003 	recent = hardclock_ticks - hz * cache_hottime;
   1004 	sentinel = NULL;
   1005 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
   1006 		if (incache <= target)
   1007 			break;
   1008 		items++;
   1009 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
   1010 		if (ncp == sentinel) {
   1011 			/*
   1012 			 * If we looped back on ourself, then ignore
   1013 			 * recent entries and purge whatever we find.
   1014 			 */
   1015 			tryharder = 1;
   1016 		}
   1017 		if (ncp->nc_dvp == NULL)
   1018 			continue;
   1019 		if (!tryharder && (ncp->nc_hittime - recent) > 0) {
   1020 			if (sentinel == NULL)
   1021 				sentinel = ncp;
   1022 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
   1023 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
   1024 			continue;
   1025 		}
   1026 		mutex_enter(&ncp->nc_lock);
   1027 		if (ncp->nc_dvp != NULL) {
   1028 			cache_invalidate(ncp);
   1029 			cache_disassociate(ncp);
   1030 			incache--;
   1031 		}
   1032 		mutex_exit(&ncp->nc_lock);
   1033 	}
   1034 	cache_ev_scan.ev_count += items;
   1035 }
   1036 
   1037 /*
   1038  * Collect dead cache entries from all CPUs and garbage collect.
   1039  */
   1040 static void
   1041 cache_reclaim(void)
   1042 {
   1043 	struct namecache *ncp, *next;
   1044 	int items;
   1045 
   1046 	KASSERT(mutex_owned(namecache_lock));
   1047 
   1048 	/*
   1049 	 * If the number of extant entries not awaiting garbage collection
   1050 	 * exceeds the high water mark, then reclaim stale entries until we
   1051 	 * reach our low water mark.
   1052 	 */
   1053 	items = numcache - cache_gcpend;
   1054 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
   1055 		cache_prune(items, (int)((uint64_t)desiredvnodes *
   1056 		    cache_lowat / 100));
   1057 		cache_ev_over.ev_count++;
   1058 	} else
   1059 		cache_ev_under.ev_count++;
   1060 
   1061 	/*
   1062 	 * Stop forward lookup activity on all CPUs and garbage collect dead
   1063 	 * entries.
   1064 	 */
   1065 	cache_lock_cpus();
   1066 	ncp = cache_gcqueue;
   1067 	cache_gcqueue = NULL;
   1068 	items = cache_gcpend;
   1069 	cache_gcpend = 0;
   1070 	while (ncp != NULL) {
   1071 		next = ncp->nc_gcqueue;
   1072 		cache_disassociate(ncp);
   1073 		KASSERT(ncp->nc_dvp == NULL);
   1074 		if (ncp->nc_hash.le_prev != NULL) {
   1075 			LIST_REMOVE(ncp, nc_hash);
   1076 			ncp->nc_hash.le_prev = NULL;
   1077 		}
   1078 		pool_cache_put(namecache_cache, ncp);
   1079 		ncp = next;
   1080 	}
   1081 	cache_unlock_cpus();
   1082 	numcache -= items;
   1083 	cache_ev_gc.ev_count += items;
   1084 }
   1085 
   1086 /*
   1087  * Cache maintainence thread, awakening once per second to:
   1088  *
   1089  * => keep number of entries below the high water mark
   1090  * => sort pseudo-LRU list
   1091  * => garbage collect dead entries
   1092  */
   1093 static void
   1094 cache_thread(void *arg)
   1095 {
   1096 
   1097 	mutex_enter(namecache_lock);
   1098 	for (;;) {
   1099 		cache_reclaim();
   1100 		kpause("cachegc", false, hz, namecache_lock);
   1101 	}
   1102 }
   1103 
   1104 #ifdef DDB
   1105 void
   1106 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
   1107 {
   1108 	struct vnode *dvp = NULL;
   1109 	struct namecache *ncp;
   1110 
   1111 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1112 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
   1113 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1114 			dvp = ncp->nc_dvp;
   1115 		}
   1116 	}
   1117 	if (dvp == NULL) {
   1118 		(*pr)("name not found\n");
   1119 		return;
   1120 	}
   1121 	vp = dvp;
   1122 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1123 		if (ncp->nc_vp == vp) {
   1124 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1125 		}
   1126 	}
   1127 }
   1128 #endif
   1129 
   1130 void
   1131 namecache_count_pass2(void)
   1132 {
   1133 	struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
   1134 
   1135 	mutex_enter(&cpup->cpu_lock);
   1136 	COUNT(cpup->cpu_stats, ncs_pass2);
   1137 	mutex_exit(&cpup->cpu_lock);
   1138 }
   1139 
   1140 void
   1141 namecache_count_2passes(void)
   1142 {
   1143 	struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
   1144 
   1145 	mutex_enter(&cpup->cpu_lock);
   1146 	COUNT(cpup->cpu_stats, ncs_2passes);
   1147 	mutex_exit(&cpup->cpu_lock);
   1148 }
   1149 
   1150 static int
   1151 cache_stat_sysctl(SYSCTLFN_ARGS)
   1152 {
   1153 	struct nchstats_sysctl stats;
   1154 
   1155 	if (oldp == NULL) {
   1156 		*oldlenp = sizeof(stats);
   1157 		return 0;
   1158 	}
   1159 
   1160 	if (*oldlenp < sizeof(stats)) {
   1161 		*oldlenp = 0;
   1162 		return 0;
   1163 	}
   1164 
   1165 	memset(&stats, 0, sizeof(stats));
   1166 
   1167 	sysctl_unlock();
   1168 	cache_lock_cpus();
   1169 	stats.ncs_goodhits = nchstats.ncs_goodhits;
   1170 	stats.ncs_neghits = nchstats.ncs_neghits;
   1171 	stats.ncs_badhits = nchstats.ncs_badhits;
   1172 	stats.ncs_falsehits = nchstats.ncs_falsehits;
   1173 	stats.ncs_miss = nchstats.ncs_miss;
   1174 	stats.ncs_long = nchstats.ncs_long;
   1175 	stats.ncs_pass2 = nchstats.ncs_pass2;
   1176 	stats.ncs_2passes = nchstats.ncs_2passes;
   1177 	stats.ncs_revhits = nchstats.ncs_revhits;
   1178 	stats.ncs_revmiss = nchstats.ncs_revmiss;
   1179 	cache_unlock_cpus();
   1180 	sysctl_relock();
   1181 
   1182 	*oldlenp = sizeof(stats);
   1183 	return sysctl_copyout(l, &stats, oldp, sizeof(stats));
   1184 }
   1185 
   1186 SYSCTL_SETUP(sysctl_cache_stat_setup, "vfs.namecache_stats subtree setup")
   1187 {
   1188 	sysctl_createv(clog, 0, NULL, NULL,
   1189 		       CTLFLAG_PERMANENT,
   1190 		       CTLTYPE_STRUCT, "namecache_stats",
   1191 		       SYSCTL_DESCR("namecache statistics"),
   1192 		       cache_stat_sysctl, 0, NULL, 0,
   1193 		       CTL_VFS, CTL_CREATE, CTL_EOL);
   1194 }
   1195