Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.81
      1 /*	$NetBSD: vfs_cache.c,v 1.81 2009/01/16 06:59:21 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1989, 1993
     31  *	The Regents of the University of California.  All rights reserved.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. Neither the name of the University nor the names of its contributors
     42  *    may be used to endorse or promote products derived from this software
     43  *    without specific prior written permission.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.81 2009/01/16 06:59:21 yamt Exp $");
     62 
     63 #include "opt_ddb.h"
     64 #include "opt_revcache.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/time.h>
     69 #include <sys/mount.h>
     70 #include <sys/vnode.h>
     71 #include <sys/namei.h>
     72 #include <sys/errno.h>
     73 #include <sys/pool.h>
     74 #include <sys/mutex.h>
     75 #include <sys/atomic.h>
     76 #include <sys/kthread.h>
     77 #include <sys/kernel.h>
     78 #include <sys/cpu.h>
     79 #include <sys/evcnt.h>
     80 
     81 #define NAMECACHE_ENTER_REVERSE
     82 /*
     83  * Name caching works as follows:
     84  *
     85  * Names found by directory scans are retained in a cache
     86  * for future reference.  It is managed LRU, so frequently
     87  * used names will hang around.  Cache is indexed by hash value
     88  * obtained from (dvp, name) where dvp refers to the directory
     89  * containing name.
     90  *
     91  * For simplicity (and economy of storage), names longer than
     92  * a maximum length of NCHNAMLEN are not cached; they occur
     93  * infrequently in any case, and are almost never of interest.
     94  *
     95  * Upon reaching the last segment of a path, if the reference
     96  * is for DELETE, or NOCACHE is set (rewrite), and the
     97  * name is located in the cache, it will be dropped.
     98  * The entry is dropped also when it was not possible to lock
     99  * the cached vnode, either because vget() failed or the generation
    100  * number has changed while waiting for the lock.
    101  */
    102 
    103 /*
    104  * Per-cpu namecache data.
    105  */
    106 struct nchcpu {
    107 	kmutex_t	cpu_lock;
    108 	struct nchstats	cpu_stats;
    109 };
    110 
    111 /*
    112  * Structures associated with name cacheing.
    113  */
    114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
    115 u_long	nchash;				/* size of hash table - 1 */
    116 #define	NCHASH(cnp, dvp)	\
    117 	(((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    118 
    119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
    120 u_long	ncvhash;			/* size of hash table - 1 */
    121 #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
    122 
    123 long	numcache;			/* number of cache entries allocated */
    124 static u_int	cache_gcpend;		/* number of entries pending GC */
    125 static void	*cache_gcqueue;		/* garbage collection queue */
    126 
    127 TAILQ_HEAD(, namecache) nclruhead =		/* LRU chain */
    128 	TAILQ_HEAD_INITIALIZER(nclruhead);
    129 #define	COUNT(c,x)	(c.x++)
    130 struct	nchstats nchstats;		/* cache effectiveness statistics */
    131 
    132 static pool_cache_t namecache_cache;
    133 
    134 MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
    135 
    136 int cache_lowat = 95;
    137 int cache_hiwat = 98;
    138 int cache_hottime = 5;			/* number of seconds */
    139 int doingcache = 1;			/* 1 => enable the cache */
    140 
    141 static struct evcnt cache_ev_scan;
    142 static struct evcnt cache_ev_gc;
    143 static struct evcnt cache_ev_over;
    144 static struct evcnt cache_ev_under;
    145 static struct evcnt cache_ev_forced;
    146 
    147 /* A single lock to serialize modifications. */
    148 static kmutex_t *namecache_lock;
    149 
    150 static void cache_invalidate(struct namecache *);
    151 static inline struct namecache *cache_lookup_entry(
    152     const struct vnode *, const struct componentname *);
    153 static void cache_thread(void *);
    154 static void cache_invalidate(struct namecache *);
    155 static void cache_disassociate(struct namecache *);
    156 static void cache_reclaim(void);
    157 static int cache_ctor(void *, void *, int);
    158 static void cache_dtor(void *, void *);
    159 
    160 /*
    161  * Invalidate a cache entry and enqueue it for garbage collection.
    162  */
    163 static void
    164 cache_invalidate(struct namecache *ncp)
    165 {
    166 	void *head;
    167 
    168 	KASSERT(mutex_owned(&ncp->nc_lock));
    169 
    170 	if (ncp->nc_dvp != NULL) {
    171 		ncp->nc_vp = NULL;
    172 		ncp->nc_dvp = NULL;
    173 		do {
    174 			head = cache_gcqueue;
    175 			ncp->nc_gcqueue = head;
    176 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    177 		atomic_inc_uint(&cache_gcpend);
    178 	}
    179 }
    180 
    181 /*
    182  * Disassociate a namecache entry from any vnodes it is attached to,
    183  * and remove from the global LRU list.
    184  */
    185 static void
    186 cache_disassociate(struct namecache *ncp)
    187 {
    188 
    189 	KASSERT(mutex_owned(namecache_lock));
    190 	KASSERT(ncp->nc_dvp == NULL);
    191 
    192 	if (ncp->nc_lru.tqe_prev != NULL) {
    193 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    194 		ncp->nc_lru.tqe_prev = NULL;
    195 	}
    196 	if (ncp->nc_vhash.le_prev != NULL) {
    197 		LIST_REMOVE(ncp, nc_vhash);
    198 		ncp->nc_vhash.le_prev = NULL;
    199 	}
    200 	if (ncp->nc_vlist.le_prev != NULL) {
    201 		LIST_REMOVE(ncp, nc_vlist);
    202 		ncp->nc_vlist.le_prev = NULL;
    203 	}
    204 	if (ncp->nc_dvlist.le_prev != NULL) {
    205 		LIST_REMOVE(ncp, nc_dvlist);
    206 		ncp->nc_dvlist.le_prev = NULL;
    207 	}
    208 }
    209 
    210 /*
    211  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    212  * this locks out all "readers".
    213  */
    214 static void
    215 cache_lock_cpus(void)
    216 {
    217 	CPU_INFO_ITERATOR cii;
    218 	struct cpu_info *ci;
    219 	struct nchcpu *cpup;
    220 	long *s, *d, *m;
    221 
    222 	for (CPU_INFO_FOREACH(cii, ci)) {
    223 		cpup = ci->ci_data.cpu_nch;
    224 		mutex_enter(&cpup->cpu_lock);
    225 
    226 		/* Collate statistics. */
    227 		d = (long *)&nchstats;
    228 		s = (long *)&cpup->cpu_stats;
    229 		m = s + sizeof(nchstats) / sizeof(long);
    230 		for (; s < m; s++, d++) {
    231 			*d += *s;
    232 			*s = 0;
    233 		}
    234 	}
    235 }
    236 
    237 /*
    238  * Release all CPU locks.
    239  */
    240 static void
    241 cache_unlock_cpus(void)
    242 {
    243 	CPU_INFO_ITERATOR cii;
    244 	struct cpu_info *ci;
    245 	struct nchcpu *cpup;
    246 
    247 	for (CPU_INFO_FOREACH(cii, ci)) {
    248 		cpup = ci->ci_data.cpu_nch;
    249 		mutex_exit(&cpup->cpu_lock);
    250 	}
    251 }
    252 
    253 /*
    254  * Find a single cache entry and return it locked.  'namecache_lock' or
    255  * at least one of the per-CPU locks must be held.
    256  */
    257 static struct namecache *
    258 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
    259 {
    260 	struct nchashhead *ncpp;
    261 	struct namecache *ncp;
    262 
    263 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    264 
    265 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    266 		if (ncp->nc_dvp != dvp ||
    267 		    ncp->nc_nlen != cnp->cn_namelen ||
    268 		    memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
    269 		    	continue;
    270 	    	mutex_enter(&ncp->nc_lock);
    271 		if (__predict_true(ncp->nc_dvp == dvp)) {
    272 			ncp->nc_hittime = hardclock_ticks;
    273 			return ncp;
    274 		}
    275 		/* Raced: entry has been nullified. */
    276 		mutex_exit(&ncp->nc_lock);
    277 	}
    278 
    279 	return NULL;
    280 }
    281 
    282 /*
    283  * Look for a the name in the cache. We don't do this
    284  * if the segment name is long, simply so the cache can avoid
    285  * holding long names (which would either waste space, or
    286  * add greatly to the complexity).
    287  *
    288  * Lookup is called with ni_dvp pointing to the directory to search,
    289  * ni_ptr pointing to the name of the entry being sought, ni_namelen
    290  * tells the length of the name, and ni_hash contains a hash of
    291  * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
    292  * and a status of zero is returned. If the locking fails for whatever
    293  * reason, the vnode is unlocked and the error is returned to caller.
    294  * If the lookup determines that the name does not exist (negative cacheing),
    295  * a status of ENOENT is returned. If the lookup fails, a status of -1
    296  * is returned.
    297  */
    298 int
    299 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
    300 {
    301 	struct namecache *ncp;
    302 	struct vnode *vp;
    303 	struct nchcpu *cpup;
    304 	int error;
    305 
    306 	if (__predict_false(!doingcache)) {
    307 		cnp->cn_flags &= ~MAKEENTRY;
    308 		*vpp = NULL;
    309 		return -1;
    310 	}
    311 
    312 	cpup = curcpu()->ci_data.cpu_nch;
    313 	mutex_enter(&cpup->cpu_lock);
    314 	if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
    315 		COUNT(cpup->cpu_stats, ncs_long);
    316 		cnp->cn_flags &= ~MAKEENTRY;
    317 		mutex_exit(&cpup->cpu_lock);
    318 		*vpp = NULL;
    319 		return -1;
    320 	}
    321 	ncp = cache_lookup_entry(dvp, cnp);
    322 	if (__predict_false(ncp == NULL)) {
    323 		COUNT(cpup->cpu_stats, ncs_miss);
    324 		mutex_exit(&cpup->cpu_lock);
    325 		*vpp = NULL;
    326 		return -1;
    327 	}
    328 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
    329 		COUNT(cpup->cpu_stats, ncs_badhits);
    330 		/*
    331 		 * Last component and we are renaming or deleting,
    332 		 * the cache entry is invalid, or otherwise don't
    333 		 * want cache entry to exist.
    334 		 */
    335 		cache_invalidate(ncp);
    336 		mutex_exit(&ncp->nc_lock);
    337 		mutex_exit(&cpup->cpu_lock);
    338 		*vpp = NULL;
    339 		return -1;
    340 	} else if (ncp->nc_vp == NULL) {
    341 		/*
    342 		 * Restore the ISWHITEOUT flag saved earlier.
    343 		 */
    344 		KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    345 		cnp->cn_flags |= ncp->nc_flags;
    346 		if (__predict_true(cnp->cn_nameiop != CREATE ||
    347 		    (cnp->cn_flags & ISLASTCN) == 0)) {
    348 			COUNT(cpup->cpu_stats, ncs_neghits);
    349 			mutex_exit(&ncp->nc_lock);
    350 			mutex_exit(&cpup->cpu_lock);
    351 			return ENOENT;
    352 		} else {
    353 			COUNT(cpup->cpu_stats, ncs_badhits);
    354 			/*
    355 			 * Last component and we are renaming or
    356 			 * deleting, the cache entry is invalid,
    357 			 * or otherwise don't want cache entry to
    358 			 * exist.
    359 			 */
    360 			cache_invalidate(ncp);
    361 			mutex_exit(&ncp->nc_lock);
    362 			mutex_exit(&cpup->cpu_lock);
    363 			*vpp = NULL;
    364 			return -1;
    365 		}
    366 	}
    367 
    368 	vp = ncp->nc_vp;
    369 	if (vtryget(vp)) {
    370 		mutex_exit(&ncp->nc_lock);
    371 		mutex_exit(&cpup->cpu_lock);
    372 	} else {
    373 		mutex_enter(&vp->v_interlock);
    374 		mutex_exit(&ncp->nc_lock);
    375 		mutex_exit(&cpup->cpu_lock);
    376 		error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    377 		if (error) {
    378 			KASSERT(error == EBUSY);
    379 			/*
    380 			 * This vnode is being cleaned out.
    381 			 * XXX badhits?
    382 			 */
    383 			COUNT(cpup->cpu_stats, ncs_falsehits);
    384 			*vpp = NULL;
    385 			return -1;
    386 		}
    387 	}
    388 
    389 #ifdef DEBUG
    390 	/*
    391 	 * since we released nb->nb_lock,
    392 	 * we can't use this pointer any more.
    393 	 */
    394 	ncp = NULL;
    395 #endif /* DEBUG */
    396 
    397 	if (vp == dvp) {	/* lookup on "." */
    398 		error = 0;
    399 	} else if (cnp->cn_flags & ISDOTDOT) {
    400 		VOP_UNLOCK(dvp, 0);
    401 		error = vn_lock(vp, LK_EXCLUSIVE);
    402 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
    403 	} else {
    404 		error = vn_lock(vp, LK_EXCLUSIVE);
    405 	}
    406 
    407 	/*
    408 	 * Check that the lock succeeded.
    409 	 */
    410 	if (error) {
    411 		/* Unlocked, but only for stats. */
    412 		COUNT(cpup->cpu_stats, ncs_badhits);
    413 		vrele(vp);
    414 		*vpp = NULL;
    415 		return -1;
    416 	}
    417 
    418 	/* Unlocked, but only for stats. */
    419 	COUNT(cpup->cpu_stats, ncs_goodhits);
    420 	*vpp = vp;
    421 	return 0;
    422 }
    423 
    424 int
    425 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
    426     struct componentname *cnp)
    427 {
    428 	struct namecache *ncp;
    429 	struct vnode *vp;
    430 	struct nchcpu *cpup;
    431 	int error;
    432 
    433 	if (__predict_false(!doingcache)) {
    434 		cnp->cn_flags &= ~MAKEENTRY;
    435 		*vpp = NULL;
    436 		return (-1);
    437 	}
    438 
    439 	cpup = curcpu()->ci_data.cpu_nch;
    440 	mutex_enter(&cpup->cpu_lock);
    441 	if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
    442 		COUNT(cpup->cpu_stats, ncs_long);
    443 		cnp->cn_flags &= ~MAKEENTRY;
    444 		mutex_exit(&cpup->cpu_lock);
    445 		*vpp = NULL;
    446 		return -1;
    447 	}
    448 	ncp = cache_lookup_entry(dvp, cnp);
    449 	if (__predict_false(ncp == NULL)) {
    450 		COUNT(cpup->cpu_stats, ncs_miss);
    451 		mutex_exit(&cpup->cpu_lock);
    452 		*vpp = NULL;
    453 		return -1;
    454 	}
    455 	vp = ncp->nc_vp;
    456 	if (vp == NULL) {
    457 		/*
    458 		 * Restore the ISWHITEOUT flag saved earlier.
    459 		 */
    460 		KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    461 		cnp->cn_flags |= ncp->nc_flags;
    462 		COUNT(cpup->cpu_stats, ncs_neghits);
    463 		mutex_exit(&ncp->nc_lock);
    464 		mutex_exit(&cpup->cpu_lock);
    465 		return ENOENT;
    466 	}
    467 	if (vtryget(vp)) {
    468 		mutex_exit(&ncp->nc_lock);
    469 		mutex_exit(&cpup->cpu_lock);
    470 	} else {
    471 		mutex_enter(&vp->v_interlock);
    472 		mutex_exit(&ncp->nc_lock);
    473 		mutex_exit(&cpup->cpu_lock);
    474 		error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    475 		if (error) {
    476 			KASSERT(error == EBUSY);
    477 			/*
    478 			 * This vnode is being cleaned out.
    479 			 * XXX badhits?
    480 			 */
    481 			COUNT(cpup->cpu_stats, ncs_falsehits);
    482 			*vpp = NULL;
    483 			return -1;
    484 		}
    485 	}
    486 
    487 	/* Unlocked, but only for stats. */
    488 	COUNT(cpup->cpu_stats, ncs_goodhits); /* XXX can be "badhits" */
    489 	*vpp = vp;
    490 	return 0;
    491 }
    492 
    493 /*
    494  * Scan cache looking for name of directory entry pointing at vp.
    495  *
    496  * Fill in dvpp.
    497  *
    498  * If bufp is non-NULL, also place the name in the buffer which starts
    499  * at bufp, immediately before *bpp, and move bpp backwards to point
    500  * at the start of it.  (Yes, this is a little baroque, but it's done
    501  * this way to cater to the whims of getcwd).
    502  *
    503  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    504  */
    505 int
    506 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    507 {
    508 	struct namecache *ncp;
    509 	struct vnode *dvp;
    510 	struct ncvhashhead *nvcpp;
    511 	char *bp;
    512 
    513 	if (!doingcache)
    514 		goto out;
    515 
    516 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
    517 
    518 	mutex_enter(namecache_lock);
    519 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
    520 		mutex_enter(&ncp->nc_lock);
    521 		if (ncp->nc_vp == vp &&
    522 		    (dvp = ncp->nc_dvp) != NULL &&
    523 		    dvp != vp) { 		/* avoid pesky . entries.. */
    524 
    525 #ifdef DIAGNOSTIC
    526 			if (ncp->nc_nlen == 1 &&
    527 			    ncp->nc_name[0] == '.')
    528 				panic("cache_revlookup: found entry for .");
    529 
    530 			if (ncp->nc_nlen == 2 &&
    531 			    ncp->nc_name[0] == '.' &&
    532 			    ncp->nc_name[1] == '.')
    533 				panic("cache_revlookup: found entry for ..");
    534 #endif
    535 			COUNT(nchstats, ncs_revhits);
    536 
    537 			if (bufp) {
    538 				bp = *bpp;
    539 				bp -= ncp->nc_nlen;
    540 				if (bp <= bufp) {
    541 					*dvpp = NULL;
    542 					mutex_exit(&ncp->nc_lock);
    543 					mutex_exit(namecache_lock);
    544 					return (ERANGE);
    545 				}
    546 				memcpy(bp, ncp->nc_name, ncp->nc_nlen);
    547 				*bpp = bp;
    548 			}
    549 
    550 			/* XXX MP: how do we know dvp won't evaporate? */
    551 			*dvpp = dvp;
    552 			mutex_exit(&ncp->nc_lock);
    553 			mutex_exit(namecache_lock);
    554 			return (0);
    555 		}
    556 		mutex_exit(&ncp->nc_lock);
    557 	}
    558 	COUNT(nchstats, ncs_revmiss);
    559 	mutex_exit(namecache_lock);
    560  out:
    561 	*dvpp = NULL;
    562 	return (-1);
    563 }
    564 
    565 /*
    566  * Add an entry to the cache
    567  */
    568 void
    569 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
    570 {
    571 	struct namecache *ncp;
    572 	struct namecache *oncp;
    573 	struct nchashhead *ncpp;
    574 	struct ncvhashhead *nvcpp;
    575 
    576 #ifdef DIAGNOSTIC
    577 	if (cnp->cn_namelen > NCHNAMLEN)
    578 		panic("cache_enter: name too long");
    579 #endif
    580 	if (!doingcache)
    581 		return;
    582 
    583 	if (numcache > desiredvnodes) {
    584 		mutex_enter(namecache_lock);
    585 		cache_ev_forced.ev_count++;
    586 		cache_reclaim();
    587 		mutex_exit(namecache_lock);
    588 	}
    589 
    590 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    591 	mutex_enter(namecache_lock);
    592 	numcache++;
    593 
    594 	/*
    595 	 * Concurrent lookups in the same directory may race for a
    596 	 * cache entry.  if there's a duplicated entry, free it.
    597 	 */
    598 	oncp = cache_lookup_entry(dvp, cnp);
    599 	if (oncp) {
    600 		cache_invalidate(oncp);
    601 		mutex_exit(&oncp->nc_lock);
    602 	}
    603 
    604 	/* Grab the vnode we just found. */
    605 	mutex_enter(&ncp->nc_lock);
    606 	ncp->nc_vp = vp;
    607 	ncp->nc_flags = 0;
    608 	ncp->nc_hittime = 0;
    609 	ncp->nc_gcqueue = NULL;
    610 	if (vp == NULL) {
    611 		/*
    612 		 * For negative hits, save the ISWHITEOUT flag so we can
    613 		 * restore it later when the cache entry is used again.
    614 		 */
    615 		ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
    616 	}
    617 	/* Fill in cache info. */
    618 	ncp->nc_dvp = dvp;
    619 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
    620 	if (vp)
    621 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
    622 	else {
    623 		ncp->nc_vlist.le_prev = NULL;
    624 		ncp->nc_vlist.le_next = NULL;
    625 	}
    626 	ncp->nc_nlen = cnp->cn_namelen;
    627 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    628 	memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
    629 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    630 
    631 	/*
    632 	 * Flush updates before making visible in table.  No need for a
    633 	 * memory barrier on the other side: to see modifications the
    634 	 * list must be followed, meaning a dependent pointer load.
    635 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
    636 	 * barrier included in the correct place.
    637 	 */
    638 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
    639 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
    640 	ncp->nc_hash.le_prev = &ncpp->lh_first;
    641 	membar_producer();
    642 	ncpp->lh_first = ncp;
    643 
    644 	ncp->nc_vhash.le_prev = NULL;
    645 	ncp->nc_vhash.le_next = NULL;
    646 
    647 	/*
    648 	 * Create reverse-cache entries (used in getcwd) for directories.
    649 	 * (and in linux procfs exe node)
    650 	 */
    651 	if (vp != NULL &&
    652 	    vp != dvp &&
    653 #ifndef NAMECACHE_ENTER_REVERSE
    654 	    vp->v_type == VDIR &&
    655 #endif
    656 	    (ncp->nc_nlen > 2 ||
    657 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
    658 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
    659 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
    660 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
    661 	}
    662 	mutex_exit(&ncp->nc_lock);
    663 	mutex_exit(namecache_lock);
    664 }
    665 
    666 /*
    667  * Name cache initialization, from vfs_init() when we are booting
    668  */
    669 void
    670 nchinit(void)
    671 {
    672 	int error;
    673 
    674 	namecache_cache = pool_cache_init(sizeof(struct namecache),
    675 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    676 	    cache_dtor, NULL);
    677 	KASSERT(namecache_cache != NULL);
    678 
    679 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    680 
    681 	nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
    682 	ncvhashtbl =
    683 #ifdef NAMECACHE_ENTER_REVERSE
    684 	    hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
    685 #else
    686 	    hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
    687 #endif
    688 
    689 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    690 	    NULL, NULL, "cachegc");
    691 	if (error != 0)
    692 		panic("nchinit %d", error);
    693 
    694 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    695 	   "namecache", "entries scanned");
    696 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    697 	   "namecache", "entries collected");
    698 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    699 	   "namecache", "over scan target");
    700 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    701 	   "namecache", "under scan target");
    702 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    703 	   "namecache", "forced reclaims");
    704 }
    705 
    706 static int
    707 cache_ctor(void *arg, void *obj, int flag)
    708 {
    709 	struct namecache *ncp;
    710 
    711 	ncp = obj;
    712 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    713 
    714 	return 0;
    715 }
    716 
    717 static void
    718 cache_dtor(void *arg, void *obj)
    719 {
    720 	struct namecache *ncp;
    721 
    722 	ncp = obj;
    723 	mutex_destroy(&ncp->nc_lock);
    724 }
    725 
    726 /*
    727  * Called once for each CPU in the system as attached.
    728  */
    729 void
    730 cache_cpu_init(struct cpu_info *ci)
    731 {
    732 	struct nchcpu *cpup;
    733 	size_t sz;
    734 
    735 	sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
    736 	cpup = kmem_zalloc(sz, KM_SLEEP);
    737 	cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
    738 	mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    739 	ci->ci_data.cpu_nch = cpup;
    740 }
    741 
    742 /*
    743  * Name cache reinitialization, for when the maximum number of vnodes increases.
    744  */
    745 void
    746 nchreinit(void)
    747 {
    748 	struct namecache *ncp;
    749 	struct nchashhead *oldhash1, *hash1;
    750 	struct ncvhashhead *oldhash2, *hash2;
    751 	u_long i, oldmask1, oldmask2, mask1, mask2;
    752 
    753 	hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
    754 	hash2 =
    755 #ifdef NAMECACHE_ENTER_REVERSE
    756 	    hashinit(desiredvnodes, HASH_LIST, true, &mask2);
    757 #else
    758 	    hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
    759 #endif
    760 	mutex_enter(namecache_lock);
    761 	cache_lock_cpus();
    762 	oldhash1 = nchashtbl;
    763 	oldmask1 = nchash;
    764 	nchashtbl = hash1;
    765 	nchash = mask1;
    766 	oldhash2 = ncvhashtbl;
    767 	oldmask2 = ncvhash;
    768 	ncvhashtbl = hash2;
    769 	ncvhash = mask2;
    770 	for (i = 0; i <= oldmask1; i++) {
    771 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
    772 			LIST_REMOVE(ncp, nc_hash);
    773 			ncp->nc_hash.le_prev = NULL;
    774 		}
    775 	}
    776 	for (i = 0; i <= oldmask2; i++) {
    777 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
    778 			LIST_REMOVE(ncp, nc_vhash);
    779 			ncp->nc_vhash.le_prev = NULL;
    780 		}
    781 	}
    782 	cache_unlock_cpus();
    783 	mutex_exit(namecache_lock);
    784 	hashdone(oldhash1, HASH_LIST, oldmask1);
    785 	hashdone(oldhash2, HASH_LIST, oldmask2);
    786 }
    787 
    788 /*
    789  * Cache flush, a particular vnode; called when a vnode is renamed to
    790  * hide entries that would now be invalid
    791  */
    792 void
    793 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
    794 {
    795 	struct namecache *ncp, *ncnext;
    796 
    797 	mutex_enter(namecache_lock);
    798 	if (flags & PURGE_PARENTS) {
    799 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
    800 		    ncp = ncnext) {
    801 			ncnext = LIST_NEXT(ncp, nc_vlist);
    802 			mutex_enter(&ncp->nc_lock);
    803 			cache_invalidate(ncp);
    804 			mutex_exit(&ncp->nc_lock);
    805 			cache_disassociate(ncp);
    806 		}
    807 	}
    808 	if (flags & PURGE_CHILDREN) {
    809 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
    810 		    ncp = ncnext) {
    811 			ncnext = LIST_NEXT(ncp, nc_dvlist);
    812 			mutex_enter(&ncp->nc_lock);
    813 			cache_invalidate(ncp);
    814 			mutex_exit(&ncp->nc_lock);
    815 			cache_disassociate(ncp);
    816 		}
    817 	}
    818 	if (cnp != NULL) {
    819 		ncp = cache_lookup_entry(vp, cnp);
    820 		if (ncp) {
    821 			cache_invalidate(ncp);
    822 			cache_disassociate(ncp);
    823 			mutex_exit(&ncp->nc_lock);
    824 		}
    825 	}
    826 	mutex_exit(namecache_lock);
    827 }
    828 
    829 /*
    830  * Cache flush, a whole filesystem; called when filesys is umounted to
    831  * remove entries that would now be invalid.
    832  */
    833 void
    834 cache_purgevfs(struct mount *mp)
    835 {
    836 	struct namecache *ncp, *nxtcp;
    837 
    838 	mutex_enter(namecache_lock);
    839 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    840 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    841 		mutex_enter(&ncp->nc_lock);
    842 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
    843 			/* Free the resources we had. */
    844 			cache_invalidate(ncp);
    845 			cache_disassociate(ncp);
    846 		}
    847 		mutex_exit(&ncp->nc_lock);
    848 	}
    849 	cache_reclaim();
    850 	mutex_exit(namecache_lock);
    851 }
    852 
    853 /*
    854  * Scan global list invalidating entries until we meet a preset target.
    855  * Prefer to invalidate entries that have not scored a hit within
    856  * cache_hottime seconds.  We sort the LRU list only for this routine's
    857  * benefit.
    858  */
    859 static void
    860 cache_prune(int incache, int target)
    861 {
    862 	struct namecache *ncp, *nxtcp, *sentinel;
    863 	int items, recent, tryharder;
    864 
    865 	KASSERT(mutex_owned(namecache_lock));
    866 
    867 	items = 0;
    868 	tryharder = 0;
    869 	recent = hardclock_ticks - hz * cache_hottime;
    870 	sentinel = NULL;
    871 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    872 		if (incache <= target)
    873 			break;
    874 		items++;
    875 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    876 		if (ncp->nc_dvp == NULL)
    877 			continue;
    878 		if (ncp == sentinel) {
    879 			/*
    880 			 * If we looped back on ourself, then ignore
    881 			 * recent entries and purge whatever we find.
    882 			 */
    883 			tryharder = 1;
    884 		}
    885 		if (!tryharder && (ncp->nc_hittime - recent) > 0) {
    886 			if (sentinel == NULL)
    887 				sentinel = ncp;
    888 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    889 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    890 			continue;
    891 		}
    892 		mutex_enter(&ncp->nc_lock);
    893 		if (ncp->nc_dvp != NULL) {
    894 			cache_invalidate(ncp);
    895 			cache_disassociate(ncp);
    896 			incache--;
    897 		}
    898 		mutex_exit(&ncp->nc_lock);
    899 	}
    900 	cache_ev_scan.ev_count += items;
    901 }
    902 
    903 /*
    904  * Collect dead cache entries from all CPUs and garbage collect.
    905  */
    906 static void
    907 cache_reclaim(void)
    908 {
    909 	struct namecache *ncp, *next;
    910 	int items;
    911 
    912 	KASSERT(mutex_owned(namecache_lock));
    913 
    914 	/*
    915 	 * If the number of extant entries not awaiting garbage collection
    916 	 * exceeds the high water mark, then reclaim stale entries until we
    917 	 * reach our low water mark.
    918 	 */
    919 	items = numcache - cache_gcpend;
    920 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
    921 		cache_prune(items, (int)((uint64_t)desiredvnodes *
    922 		    cache_lowat / 100));
    923 		cache_ev_over.ev_count++;
    924 	} else
    925 		cache_ev_under.ev_count++;
    926 
    927 	/*
    928 	 * Stop forward lookup activity on all CPUs and garbage collect dead
    929 	 * entries.
    930 	 */
    931 	cache_lock_cpus();
    932 	ncp = cache_gcqueue;
    933 	cache_gcqueue = NULL;
    934 	items = cache_gcpend;
    935 	cache_gcpend = 0;
    936 	while (ncp != NULL) {
    937 		next = ncp->nc_gcqueue;
    938 		cache_disassociate(ncp);
    939 		KASSERT(ncp->nc_dvp == NULL);
    940 		if (ncp->nc_hash.le_prev != NULL) {
    941 			LIST_REMOVE(ncp, nc_hash);
    942 			ncp->nc_hash.le_prev = NULL;
    943 		}
    944 		pool_cache_put(namecache_cache, ncp);
    945 		ncp = next;
    946 	}
    947 	cache_unlock_cpus();
    948 	numcache -= items;
    949 	cache_ev_gc.ev_count += items;
    950 }
    951 
    952 /*
    953  * Cache maintainence thread, awakening once per second to:
    954  *
    955  * => keep number of entries below the high water mark
    956  * => sort pseudo-LRU list
    957  * => garbage collect dead entries
    958  */
    959 static void
    960 cache_thread(void *arg)
    961 {
    962 
    963 	mutex_enter(namecache_lock);
    964 	for (;;) {
    965 		cache_reclaim();
    966 		kpause("cachegc", false, hz, namecache_lock);
    967 	}
    968 }
    969 
    970 #ifdef DDB
    971 void
    972 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
    973 {
    974 	struct vnode *dvp = NULL;
    975 	struct namecache *ncp;
    976 
    977 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    978 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
    979 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
    980 			dvp = ncp->nc_dvp;
    981 		}
    982 	}
    983 	if (dvp == NULL) {
    984 		(*pr)("name not found\n");
    985 		return;
    986 	}
    987 	vp = dvp;
    988 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    989 		if (ncp->nc_vp == vp) {
    990 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
    991 		}
    992 	}
    993 }
    994 #endif
    995