Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.78
      1 /*	$NetBSD: vfs_cache.c,v 1.78 2008/08/20 15:34:59 pooka Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1989, 1993
     31  *	The Regents of the University of California.  All rights reserved.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. Neither the name of the University nor the names of its contributors
     42  *    may be used to endorse or promote products derived from this software
     43  *    without specific prior written permission.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.78 2008/08/20 15:34:59 pooka Exp $");
     62 
     63 #include "opt_ddb.h"
     64 #include "opt_revcache.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/time.h>
     69 #include <sys/mount.h>
     70 #include <sys/vnode.h>
     71 #include <sys/namei.h>
     72 #include <sys/errno.h>
     73 #include <sys/pool.h>
     74 #include <sys/mutex.h>
     75 #include <sys/atomic.h>
     76 #include <sys/kthread.h>
     77 #include <sys/kernel.h>
     78 #include <sys/cpu.h>
     79 #include <sys/evcnt.h>
     80 
     81 #define NAMECACHE_ENTER_REVERSE
     82 /*
     83  * Name caching works as follows:
     84  *
     85  * Names found by directory scans are retained in a cache
     86  * for future reference.  It is managed LRU, so frequently
     87  * used names will hang around.  Cache is indexed by hash value
     88  * obtained from (dvp, name) where dvp refers to the directory
     89  * containing name.
     90  *
     91  * For simplicity (and economy of storage), names longer than
     92  * a maximum length of NCHNAMLEN are not cached; they occur
     93  * infrequently in any case, and are almost never of interest.
     94  *
     95  * Upon reaching the last segment of a path, if the reference
     96  * is for DELETE, or NOCACHE is set (rewrite), and the
     97  * name is located in the cache, it will be dropped.
     98  * The entry is dropped also when it was not possible to lock
     99  * the cached vnode, either because vget() failed or the generation
    100  * number has changed while waiting for the lock.
    101  */
    102 
    103 /*
    104  * Per-cpu namecache data.
    105  */
    106 struct nchcpu {
    107 	kmutex_t	cpu_lock;
    108 	struct nchstats	cpu_stats;
    109 };
    110 
    111 /*
    112  * Structures associated with name cacheing.
    113  */
    114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
    115 u_long	nchash;				/* size of hash table - 1 */
    116 #define	NCHASH(cnp, dvp)	\
    117 	(((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    118 
    119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
    120 u_long	ncvhash;			/* size of hash table - 1 */
    121 #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
    122 
    123 long	numcache;			/* number of cache entries allocated */
    124 static u_int	cache_gcpend;		/* number of entries pending GC */
    125 static void	*cache_gcqueue;		/* garbage collection queue */
    126 
    127 TAILQ_HEAD(, namecache) nclruhead =		/* LRU chain */
    128 	TAILQ_HEAD_INITIALIZER(nclruhead);
    129 #define	COUNT(c,x)	(c.x++)
    130 struct	nchstats nchstats;		/* cache effectiveness statistics */
    131 
    132 static pool_cache_t namecache_cache;
    133 
    134 MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
    135 
    136 int cache_lowat = 95;
    137 int cache_hiwat = 98;
    138 int cache_hottime = 5;			/* number of seconds */
    139 int doingcache = 1;			/* 1 => enable the cache */
    140 
    141 static struct evcnt cache_ev_scan;
    142 static struct evcnt cache_ev_gc;
    143 static struct evcnt cache_ev_over;
    144 static struct evcnt cache_ev_under;
    145 static struct evcnt cache_ev_forced;
    146 
    147 /* A single lock to serialize modifications. */
    148 static kmutex_t *namecache_lock;
    149 
    150 static void cache_invalidate(struct namecache *);
    151 static inline struct namecache *cache_lookup_entry(
    152     const struct vnode *, const struct componentname *);
    153 static void cache_thread(void *);
    154 static void cache_invalidate(struct namecache *);
    155 static void cache_disassociate(struct namecache *);
    156 static void cache_reclaim(void);
    157 static int cache_ctor(void *, void *, int);
    158 static void cache_dtor(void *, void *);
    159 
    160 /*
    161  * Invalidate a cache entry and enqueue it for garbage collection.
    162  */
    163 static void
    164 cache_invalidate(struct namecache *ncp)
    165 {
    166 	void *head;
    167 
    168 	KASSERT(mutex_owned(&ncp->nc_lock));
    169 
    170 	if (ncp->nc_dvp != NULL) {
    171 		ncp->nc_vp = NULL;
    172 		ncp->nc_dvp = NULL;
    173 		do {
    174 			head = cache_gcqueue;
    175 			ncp->nc_gcqueue = head;
    176 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    177 		atomic_inc_uint(&cache_gcpend);
    178 	}
    179 }
    180 
    181 /*
    182  * Disassociate a namecache entry from any vnodes it is attached to,
    183  * and remove from the global LRU list.
    184  */
    185 static void
    186 cache_disassociate(struct namecache *ncp)
    187 {
    188 
    189 	KASSERT(mutex_owned(namecache_lock));
    190 	KASSERT(ncp->nc_dvp == NULL);
    191 
    192 	if (ncp->nc_lru.tqe_prev != NULL) {
    193 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    194 		ncp->nc_lru.tqe_prev = NULL;
    195 	}
    196 	if (ncp->nc_vhash.le_prev != NULL) {
    197 		LIST_REMOVE(ncp, nc_vhash);
    198 		ncp->nc_vhash.le_prev = NULL;
    199 	}
    200 	if (ncp->nc_vlist.le_prev != NULL) {
    201 		LIST_REMOVE(ncp, nc_vlist);
    202 		ncp->nc_vlist.le_prev = NULL;
    203 	}
    204 	if (ncp->nc_dvlist.le_prev != NULL) {
    205 		LIST_REMOVE(ncp, nc_dvlist);
    206 		ncp->nc_dvlist.le_prev = NULL;
    207 	}
    208 }
    209 
    210 /*
    211  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    212  * this locks out all "readers".
    213  */
    214 static void
    215 cache_lock_cpus(void)
    216 {
    217 	CPU_INFO_ITERATOR cii;
    218 	struct cpu_info *ci;
    219 	struct nchcpu *cpup;
    220 	long *s, *d, *m;
    221 
    222 	for (CPU_INFO_FOREACH(cii, ci)) {
    223 		cpup = ci->ci_data.cpu_nch;
    224 		mutex_enter(&cpup->cpu_lock);
    225 
    226 		/* Collate statistics. */
    227 		d = (long *)&nchstats;
    228 		s = (long *)&cpup->cpu_stats;
    229 		m = s + sizeof(nchstats) / sizeof(long);
    230 		for (; s < m; s++, d++) {
    231 			*d += *s;
    232 			*s = 0;
    233 		}
    234 	}
    235 }
    236 
    237 /*
    238  * Release all CPU locks.
    239  */
    240 static void
    241 cache_unlock_cpus(void)
    242 {
    243 	CPU_INFO_ITERATOR cii;
    244 	struct cpu_info *ci;
    245 	struct nchcpu *cpup;
    246 
    247 	for (CPU_INFO_FOREACH(cii, ci)) {
    248 		cpup = ci->ci_data.cpu_nch;
    249 		mutex_exit(&cpup->cpu_lock);
    250 	}
    251 }
    252 
    253 /*
    254  * Find a single cache entry and return it locked.  'namecache_lock' or
    255  * at least one of the per-CPU locks must be held.
    256  */
    257 static struct namecache *
    258 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
    259 {
    260 	struct nchashhead *ncpp;
    261 	struct namecache *ncp;
    262 
    263 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    264 
    265 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    266 		if (ncp->nc_dvp != dvp ||
    267 		    ncp->nc_nlen != cnp->cn_namelen ||
    268 		    memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
    269 		    	continue;
    270 	    	mutex_enter(&ncp->nc_lock);
    271 		if (__predict_true(ncp->nc_dvp == dvp)) {
    272 			ncp->nc_hittime = hardclock_ticks;
    273 			return ncp;
    274 		}
    275 		/* Raced: entry has been nullified. */
    276 		mutex_exit(&ncp->nc_lock);
    277 	}
    278 
    279 	return NULL;
    280 }
    281 
    282 /*
    283  * Look for a the name in the cache. We don't do this
    284  * if the segment name is long, simply so the cache can avoid
    285  * holding long names (which would either waste space, or
    286  * add greatly to the complexity).
    287  *
    288  * Lookup is called with ni_dvp pointing to the directory to search,
    289  * ni_ptr pointing to the name of the entry being sought, ni_namelen
    290  * tells the length of the name, and ni_hash contains a hash of
    291  * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
    292  * and a status of zero is returned. If the locking fails for whatever
    293  * reason, the vnode is unlocked and the error is returned to caller.
    294  * If the lookup determines that the name does not exist (negative cacheing),
    295  * a status of ENOENT is returned. If the lookup fails, a status of -1
    296  * is returned.
    297  */
    298 int
    299 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
    300 {
    301 	struct namecache *ncp;
    302 	struct vnode *vp;
    303 	struct nchcpu *cpup;
    304 	int error;
    305 
    306 	if (__predict_false(!doingcache)) {
    307 		cnp->cn_flags &= ~MAKEENTRY;
    308 		*vpp = NULL;
    309 		return -1;
    310 	}
    311 
    312 	cpup = curcpu()->ci_data.cpu_nch;
    313 	mutex_enter(&cpup->cpu_lock);
    314 	if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
    315 		COUNT(cpup->cpu_stats, ncs_long);
    316 		cnp->cn_flags &= ~MAKEENTRY;
    317 		mutex_exit(&cpup->cpu_lock);
    318 		*vpp = NULL;
    319 		return -1;
    320 	}
    321 	ncp = cache_lookup_entry(dvp, cnp);
    322 	if (__predict_false(ncp == NULL)) {
    323 		COUNT(cpup->cpu_stats, ncs_miss);
    324 		mutex_exit(&cpup->cpu_lock);
    325 		*vpp = NULL;
    326 		return -1;
    327 	}
    328 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
    329 		COUNT(cpup->cpu_stats, ncs_badhits);
    330 		/*
    331 		 * Last component and we are renaming or deleting,
    332 		 * the cache entry is invalid, or otherwise don't
    333 		 * want cache entry to exist.
    334 		 */
    335 		cache_invalidate(ncp);
    336 		mutex_exit(&ncp->nc_lock);
    337 		mutex_exit(&cpup->cpu_lock);
    338 		*vpp = NULL;
    339 		return -1;
    340 	} else if (ncp->nc_vp == NULL) {
    341 		/*
    342 		 * Restore the ISWHITEOUT flag saved earlier.
    343 		 */
    344 		cnp->cn_flags |= ncp->nc_flags;
    345 		if (__predict_true(cnp->cn_nameiop != CREATE ||
    346 		    (cnp->cn_flags & ISLASTCN) == 0)) {
    347 			COUNT(cpup->cpu_stats, ncs_neghits);
    348 			mutex_exit(&ncp->nc_lock);
    349 			mutex_exit(&cpup->cpu_lock);
    350 			return ENOENT;
    351 		} else {
    352 			COUNT(cpup->cpu_stats, ncs_badhits);
    353 			/*
    354 			 * Last component and we are renaming or
    355 			 * deleting, the cache entry is invalid,
    356 			 * or otherwise don't want cache entry to
    357 			 * exist.
    358 			 */
    359 			cache_invalidate(ncp);
    360 			mutex_exit(&ncp->nc_lock);
    361 			mutex_exit(&cpup->cpu_lock);
    362 			*vpp = NULL;
    363 			return -1;
    364 		}
    365 	}
    366 
    367 	vp = ncp->nc_vp;
    368 	if (vtryget(vp)) {
    369 		mutex_exit(&ncp->nc_lock);
    370 		mutex_exit(&cpup->cpu_lock);
    371 	} else {
    372 		mutex_enter(&vp->v_interlock);
    373 		mutex_exit(&ncp->nc_lock);
    374 		mutex_exit(&cpup->cpu_lock);
    375 		error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    376 		if (error) {
    377 			KASSERT(error == EBUSY);
    378 			/*
    379 			 * This vnode is being cleaned out.
    380 			 * XXX badhits?
    381 			 */
    382 			COUNT(cpup->cpu_stats, ncs_falsehits);
    383 			*vpp = NULL;
    384 			return -1;
    385 		}
    386 	}
    387 
    388 #ifdef DEBUG
    389 	/*
    390 	 * since we released nb->nb_lock,
    391 	 * we can't use this pointer any more.
    392 	 */
    393 	ncp = NULL;
    394 #endif /* DEBUG */
    395 
    396 	if (vp == dvp) {	/* lookup on "." */
    397 		error = 0;
    398 	} else if (cnp->cn_flags & ISDOTDOT) {
    399 		VOP_UNLOCK(dvp, 0);
    400 		error = vn_lock(vp, LK_EXCLUSIVE);
    401 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
    402 	} else {
    403 		error = vn_lock(vp, LK_EXCLUSIVE);
    404 	}
    405 
    406 	/*
    407 	 * Check that the lock succeeded.
    408 	 */
    409 	if (error) {
    410 		/* Unlocked, but only for stats. */
    411 		COUNT(cpup->cpu_stats, ncs_badhits);
    412 		vrele(vp);
    413 		*vpp = NULL;
    414 		return -1;
    415 	}
    416 
    417 	/* Unlocked, but only for stats. */
    418 	COUNT(cpup->cpu_stats, ncs_goodhits);
    419 	*vpp = vp;
    420 	return 0;
    421 }
    422 
    423 int
    424 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
    425     struct componentname *cnp)
    426 {
    427 	struct namecache *ncp;
    428 	struct vnode *vp;
    429 	struct nchcpu *cpup;
    430 	int error;
    431 
    432 	if (__predict_false(!doingcache)) {
    433 		cnp->cn_flags &= ~MAKEENTRY;
    434 		*vpp = NULL;
    435 		return (-1);
    436 	}
    437 
    438 	cpup = curcpu()->ci_data.cpu_nch;
    439 	mutex_enter(&cpup->cpu_lock);
    440 	if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
    441 		COUNT(cpup->cpu_stats, ncs_long);
    442 		cnp->cn_flags &= ~MAKEENTRY;
    443 		mutex_exit(&cpup->cpu_lock);
    444 		*vpp = NULL;
    445 		return -1;
    446 	}
    447 	ncp = cache_lookup_entry(dvp, cnp);
    448 	if (__predict_false(ncp == NULL)) {
    449 		COUNT(cpup->cpu_stats, ncs_miss);
    450 		mutex_exit(&cpup->cpu_lock);
    451 		*vpp = NULL;
    452 		return -1;
    453 	}
    454 	vp = ncp->nc_vp;
    455 	if (vp == NULL) {
    456 		/*
    457 		 * Restore the ISWHITEOUT flag saved earlier.
    458 		 */
    459 		cnp->cn_flags |= ncp->nc_flags;
    460 		COUNT(cpup->cpu_stats, ncs_neghits);
    461 		mutex_exit(&ncp->nc_lock);
    462 		mutex_exit(&cpup->cpu_lock);
    463 		return ENOENT;
    464 	}
    465 	if (vtryget(vp)) {
    466 		mutex_exit(&ncp->nc_lock);
    467 		mutex_exit(&cpup->cpu_lock);
    468 	} else {
    469 		mutex_enter(&vp->v_interlock);
    470 		mutex_exit(&ncp->nc_lock);
    471 		mutex_exit(&cpup->cpu_lock);
    472 		error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    473 		if (error) {
    474 			KASSERT(error == EBUSY);
    475 			/*
    476 			 * This vnode is being cleaned out.
    477 			 * XXX badhits?
    478 			 */
    479 			COUNT(cpup->cpu_stats, ncs_falsehits);
    480 			*vpp = NULL;
    481 			return -1;
    482 		}
    483 	}
    484 
    485 	*vpp = vp;
    486 	return 0;
    487 }
    488 
    489 /*
    490  * Scan cache looking for name of directory entry pointing at vp.
    491  *
    492  * Fill in dvpp.
    493  *
    494  * If bufp is non-NULL, also place the name in the buffer which starts
    495  * at bufp, immediately before *bpp, and move bpp backwards to point
    496  * at the start of it.  (Yes, this is a little baroque, but it's done
    497  * this way to cater to the whims of getcwd).
    498  *
    499  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    500  */
    501 int
    502 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    503 {
    504 	struct namecache *ncp;
    505 	struct vnode *dvp;
    506 	struct ncvhashhead *nvcpp;
    507 	char *bp;
    508 
    509 	if (!doingcache)
    510 		goto out;
    511 
    512 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
    513 
    514 	mutex_enter(namecache_lock);
    515 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
    516 		mutex_enter(&ncp->nc_lock);
    517 		if (ncp->nc_vp == vp &&
    518 		    (dvp = ncp->nc_dvp) != NULL &&
    519 		    dvp != vp) { 		/* avoid pesky . entries.. */
    520 
    521 #ifdef DIAGNOSTIC
    522 			if (ncp->nc_nlen == 1 &&
    523 			    ncp->nc_name[0] == '.')
    524 				panic("cache_revlookup: found entry for .");
    525 
    526 			if (ncp->nc_nlen == 2 &&
    527 			    ncp->nc_name[0] == '.' &&
    528 			    ncp->nc_name[1] == '.')
    529 				panic("cache_revlookup: found entry for ..");
    530 #endif
    531 			COUNT(nchstats, ncs_revhits);
    532 
    533 			if (bufp) {
    534 				bp = *bpp;
    535 				bp -= ncp->nc_nlen;
    536 				if (bp <= bufp) {
    537 					*dvpp = NULL;
    538 					mutex_exit(&ncp->nc_lock);
    539 					mutex_exit(namecache_lock);
    540 					return (ERANGE);
    541 				}
    542 				memcpy(bp, ncp->nc_name, ncp->nc_nlen);
    543 				*bpp = bp;
    544 			}
    545 
    546 			/* XXX MP: how do we know dvp won't evaporate? */
    547 			*dvpp = dvp;
    548 			mutex_exit(&ncp->nc_lock);
    549 			mutex_exit(namecache_lock);
    550 			return (0);
    551 		}
    552 		mutex_exit(&ncp->nc_lock);
    553 	}
    554 	COUNT(nchstats, ncs_revmiss);
    555 	mutex_exit(namecache_lock);
    556  out:
    557 	*dvpp = NULL;
    558 	return (-1);
    559 }
    560 
    561 /*
    562  * Add an entry to the cache
    563  */
    564 void
    565 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
    566 {
    567 	struct namecache *ncp;
    568 	struct namecache *oncp;
    569 	struct nchashhead *ncpp;
    570 	struct ncvhashhead *nvcpp;
    571 
    572 #ifdef DIAGNOSTIC
    573 	if (cnp->cn_namelen > NCHNAMLEN)
    574 		panic("cache_enter: name too long");
    575 #endif
    576 	if (!doingcache)
    577 		return;
    578 
    579 	if (numcache > desiredvnodes) {
    580 		mutex_enter(namecache_lock);
    581 		cache_ev_forced.ev_count++;
    582 		cache_reclaim();
    583 		mutex_exit(namecache_lock);
    584 	}
    585 
    586 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    587 	mutex_enter(namecache_lock);
    588 	numcache++;
    589 
    590 	/*
    591 	 * Concurrent lookups in the same directory may race for a
    592 	 * cache entry.  if there's a duplicated entry, free it.
    593 	 */
    594 	oncp = cache_lookup_entry(dvp, cnp);
    595 	if (oncp) {
    596 		cache_invalidate(oncp);
    597 		mutex_exit(&oncp->nc_lock);
    598 	}
    599 
    600 	/* Grab the vnode we just found. */
    601 	mutex_enter(&ncp->nc_lock);
    602 	ncp->nc_vp = vp;
    603 	ncp->nc_flags = 0;
    604 	ncp->nc_hittime = 0;
    605 	ncp->nc_gcqueue = NULL;
    606 	if (vp == NULL) {
    607 		/*
    608 		 * For negative hits, save the ISWHITEOUT flag so we can
    609 		 * restore it later when the cache entry is used again.
    610 		 */
    611 		ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
    612 	}
    613 	/* Fill in cache info. */
    614 	ncp->nc_dvp = dvp;
    615 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
    616 	if (vp)
    617 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
    618 	else {
    619 		ncp->nc_vlist.le_prev = NULL;
    620 		ncp->nc_vlist.le_next = NULL;
    621 	}
    622 	ncp->nc_nlen = cnp->cn_namelen;
    623 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    624 	memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
    625 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    626 
    627 	/*
    628 	 * Flush updates before making visible in table.  No need for a
    629 	 * memory barrier on the other side: to see modifications the
    630 	 * list must be followed, meaning a dependent pointer load.
    631 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
    632 	 * barrier included in the correct place.
    633 	 */
    634 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
    635 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
    636 	ncp->nc_hash.le_prev = &ncpp->lh_first;
    637 	membar_producer();
    638 	ncpp->lh_first = ncp;
    639 
    640 	ncp->nc_vhash.le_prev = NULL;
    641 	ncp->nc_vhash.le_next = NULL;
    642 
    643 	/*
    644 	 * Create reverse-cache entries (used in getcwd) for directories.
    645 	 * (and in linux procfs exe node)
    646 	 */
    647 	if (vp != NULL &&
    648 	    vp != dvp &&
    649 #ifndef NAMECACHE_ENTER_REVERSE
    650 	    vp->v_type == VDIR &&
    651 #endif
    652 	    (ncp->nc_nlen > 2 ||
    653 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
    654 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
    655 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
    656 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
    657 	}
    658 	mutex_exit(&ncp->nc_lock);
    659 	mutex_exit(namecache_lock);
    660 }
    661 
    662 /*
    663  * Name cache initialization, from vfs_init() when we are booting
    664  */
    665 void
    666 nchinit(void)
    667 {
    668 	int error;
    669 
    670 	namecache_cache = pool_cache_init(sizeof(struct namecache),
    671 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    672 	    cache_dtor, NULL);
    673 	KASSERT(namecache_cache != NULL);
    674 
    675 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    676 
    677 	nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
    678 	ncvhashtbl =
    679 #ifdef NAMECACHE_ENTER_REVERSE
    680 	    hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
    681 #else
    682 	    hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
    683 #endif
    684 
    685 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    686 	    NULL, NULL, "cachegc");
    687 	if (error != 0)
    688 		panic("nchinit %d", error);
    689 
    690 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    691 	   "namecache", "entries scanned");
    692 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    693 	   "namecache", "entries collected");
    694 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    695 	   "namecache", "over scan target");
    696 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    697 	   "namecache", "under scan target");
    698 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    699 	   "namecache", "forced reclaims");
    700 }
    701 
    702 static int
    703 cache_ctor(void *arg, void *obj, int flag)
    704 {
    705 	struct namecache *ncp;
    706 
    707 	ncp = obj;
    708 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    709 
    710 	return 0;
    711 }
    712 
    713 static void
    714 cache_dtor(void *arg, void *obj)
    715 {
    716 	struct namecache *ncp;
    717 
    718 	ncp = obj;
    719 	mutex_destroy(&ncp->nc_lock);
    720 }
    721 
    722 /*
    723  * Called once for each CPU in the system as attached.
    724  */
    725 void
    726 cache_cpu_init(struct cpu_info *ci)
    727 {
    728 	struct nchcpu *cpup;
    729 	size_t sz;
    730 
    731 	sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
    732 	cpup = kmem_zalloc(sz, KM_SLEEP);
    733 	cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
    734 	mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    735 	ci->ci_data.cpu_nch = cpup;
    736 }
    737 
    738 /*
    739  * Name cache reinitialization, for when the maximum number of vnodes increases.
    740  */
    741 void
    742 nchreinit(void)
    743 {
    744 	struct namecache *ncp;
    745 	struct nchashhead *oldhash1, *hash1;
    746 	struct ncvhashhead *oldhash2, *hash2;
    747 	u_long i, oldmask1, oldmask2, mask1, mask2;
    748 
    749 	hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
    750 	hash2 =
    751 #ifdef NAMECACHE_ENTER_REVERSE
    752 	    hashinit(desiredvnodes, HASH_LIST, true, &mask2);
    753 #else
    754 	    hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
    755 #endif
    756 	mutex_enter(namecache_lock);
    757 	cache_lock_cpus();
    758 	oldhash1 = nchashtbl;
    759 	oldmask1 = nchash;
    760 	nchashtbl = hash1;
    761 	nchash = mask1;
    762 	oldhash2 = ncvhashtbl;
    763 	oldmask2 = ncvhash;
    764 	ncvhashtbl = hash2;
    765 	ncvhash = mask2;
    766 	for (i = 0; i <= oldmask1; i++) {
    767 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
    768 			LIST_REMOVE(ncp, nc_hash);
    769 			ncp->nc_hash.le_prev = NULL;
    770 		}
    771 	}
    772 	for (i = 0; i <= oldmask2; i++) {
    773 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
    774 			LIST_REMOVE(ncp, nc_vhash);
    775 			ncp->nc_vhash.le_prev = NULL;
    776 		}
    777 	}
    778 	cache_unlock_cpus();
    779 	mutex_exit(namecache_lock);
    780 	hashdone(oldhash1, HASH_LIST, oldmask1);
    781 	hashdone(oldhash2, HASH_LIST, oldmask2);
    782 }
    783 
    784 /*
    785  * Cache flush, a particular vnode; called when a vnode is renamed to
    786  * hide entries that would now be invalid
    787  */
    788 void
    789 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
    790 {
    791 	struct namecache *ncp, *ncnext;
    792 
    793 	mutex_enter(namecache_lock);
    794 	if (flags & PURGE_PARENTS) {
    795 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
    796 		    ncp = ncnext) {
    797 			ncnext = LIST_NEXT(ncp, nc_vlist);
    798 			mutex_enter(&ncp->nc_lock);
    799 			cache_invalidate(ncp);
    800 			mutex_exit(&ncp->nc_lock);
    801 			cache_disassociate(ncp);
    802 		}
    803 	}
    804 	if (flags & PURGE_CHILDREN) {
    805 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
    806 		    ncp = ncnext) {
    807 			ncnext = LIST_NEXT(ncp, nc_dvlist);
    808 			mutex_enter(&ncp->nc_lock);
    809 			cache_invalidate(ncp);
    810 			mutex_exit(&ncp->nc_lock);
    811 			cache_disassociate(ncp);
    812 		}
    813 	}
    814 	if (cnp != NULL) {
    815 		ncp = cache_lookup_entry(vp, cnp);
    816 		if (ncp) {
    817 			cache_invalidate(ncp);
    818 			cache_disassociate(ncp);
    819 			mutex_exit(&ncp->nc_lock);
    820 		}
    821 	}
    822 	mutex_exit(namecache_lock);
    823 }
    824 
    825 /*
    826  * Cache flush, a whole filesystem; called when filesys is umounted to
    827  * remove entries that would now be invalid.
    828  */
    829 void
    830 cache_purgevfs(struct mount *mp)
    831 {
    832 	struct namecache *ncp, *nxtcp;
    833 
    834 	mutex_enter(namecache_lock);
    835 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    836 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    837 		mutex_enter(&ncp->nc_lock);
    838 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
    839 			/* Free the resources we had. */
    840 			cache_invalidate(ncp);
    841 			cache_disassociate(ncp);
    842 		}
    843 		mutex_exit(&ncp->nc_lock);
    844 	}
    845 	cache_reclaim();
    846 	mutex_exit(namecache_lock);
    847 }
    848 
    849 /*
    850  * Scan global list invalidating entries until we meet a preset target.
    851  * Prefer to invalidate entries that have not scored a hit within
    852  * cache_hottime seconds.  We sort the LRU list only for this routine's
    853  * benefit.
    854  */
    855 static void
    856 cache_prune(int incache, int target)
    857 {
    858 	struct namecache *ncp, *nxtcp, *sentinel;
    859 	int items, recent, tryharder;
    860 
    861 	KASSERT(mutex_owned(namecache_lock));
    862 
    863 	items = 0;
    864 	tryharder = 0;
    865 	recent = hardclock_ticks - hz * cache_hottime;
    866 	sentinel = NULL;
    867 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    868 		if (incache <= target)
    869 			break;
    870 		items++;
    871 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    872 		if (ncp->nc_dvp == NULL)
    873 			continue;
    874 		if (ncp == sentinel) {
    875 			/*
    876 			 * If we looped back on ourself, then ignore
    877 			 * recent entries and purge whatever we find.
    878 			 */
    879 			tryharder = 1;
    880 		}
    881 		if (!tryharder && ncp->nc_hittime > recent) {
    882 			if (sentinel == NULL)
    883 				sentinel = ncp;
    884 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    885 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    886 			continue;
    887 		}
    888 		mutex_enter(&ncp->nc_lock);
    889 		if (ncp->nc_dvp != NULL) {
    890 			cache_invalidate(ncp);
    891 			cache_disassociate(ncp);
    892 			incache--;
    893 		}
    894 		mutex_exit(&ncp->nc_lock);
    895 	}
    896 	cache_ev_scan.ev_count += items;
    897 }
    898 
    899 /*
    900  * Collect dead cache entries from all CPUs and garbage collect.
    901  */
    902 static void
    903 cache_reclaim(void)
    904 {
    905 	struct namecache *ncp, *next;
    906 	int items;
    907 
    908 	KASSERT(mutex_owned(namecache_lock));
    909 
    910 	/*
    911 	 * If the number of extant entries not awaiting garbage collection
    912 	 * exceeds the high water mark, then reclaim stale entries until we
    913 	 * reach our low water mark.
    914 	 */
    915 	items = numcache - cache_gcpend;
    916 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
    917 		cache_prune(items, (int)((uint64_t)desiredvnodes *
    918 		    cache_lowat / 100));
    919 		cache_ev_over.ev_count++;
    920 	} else
    921 		cache_ev_under.ev_count++;
    922 
    923 	/*
    924 	 * Stop forward lookup activity on all CPUs and garbage collect dead
    925 	 * entries.
    926 	 */
    927 	cache_lock_cpus();
    928 	ncp = cache_gcqueue;
    929 	cache_gcqueue = NULL;
    930 	items = cache_gcpend;
    931 	cache_gcpend = 0;
    932 	while (ncp != NULL) {
    933 		next = ncp->nc_gcqueue;
    934 		cache_disassociate(ncp);
    935 		KASSERT(ncp->nc_dvp == NULL);
    936 		if (ncp->nc_hash.le_prev != NULL) {
    937 			LIST_REMOVE(ncp, nc_hash);
    938 			ncp->nc_hash.le_prev = NULL;
    939 		}
    940 		pool_cache_put(namecache_cache, ncp);
    941 		ncp = next;
    942 	}
    943 	cache_unlock_cpus();
    944 	numcache -= items;
    945 	cache_ev_gc.ev_count += items;
    946 }
    947 
    948 /*
    949  * Cache maintainence thread, awakening once per second to:
    950  *
    951  * => keep number of entries below the high water mark
    952  * => sort pseudo-LRU list
    953  * => garbage collect dead entries
    954  */
    955 static void
    956 cache_thread(void *arg)
    957 {
    958 
    959 	mutex_enter(namecache_lock);
    960 	for (;;) {
    961 		cache_reclaim();
    962 		kpause("cachegc", false, hz, namecache_lock);
    963 	}
    964 }
    965 
    966 #ifdef DDB
    967 void
    968 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
    969 {
    970 	struct vnode *dvp = NULL;
    971 	struct namecache *ncp;
    972 
    973 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    974 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
    975 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
    976 			dvp = ncp->nc_dvp;
    977 		}
    978 	}
    979 	if (dvp == NULL) {
    980 		(*pr)("name not found\n");
    981 		return;
    982 	}
    983 	vp = dvp;
    984 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    985 		if (ncp->nc_vp == vp) {
    986 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
    987 		}
    988 	}
    989 }
    990 #endif
    991