Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.73
      1 /*	$NetBSD: vfs_cache.c,v 1.73 2008/04/11 15:25:24 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the NetBSD
     18  *	Foundation, Inc. and its contributors.
     19  * 4. Neither the name of The NetBSD Foundation nor the names of its
     20  *    contributors may be used to endorse or promote products derived
     21  *    from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     24  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     25  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     26  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     27  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     28  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     29  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     30  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     31  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     32  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     33  * POSSIBILITY OF SUCH DAMAGE.
     34  */
     35 
     36 /*
     37  * Copyright (c) 1989, 1993
     38  *	The Regents of the University of California.  All rights reserved.
     39  *
     40  * Redistribution and use in source and binary forms, with or without
     41  * modification, are permitted provided that the following conditions
     42  * are met:
     43  * 1. Redistributions of source code must retain the above copyright
     44  *    notice, this list of conditions and the following disclaimer.
     45  * 2. Redistributions in binary form must reproduce the above copyright
     46  *    notice, this list of conditions and the following disclaimer in the
     47  *    documentation and/or other materials provided with the distribution.
     48  * 3. Neither the name of the University nor the names of its contributors
     49  *    may be used to endorse or promote products derived from this software
     50  *    without specific prior written permission.
     51  *
     52  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     53  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     54  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     55  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     56  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     57  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     58  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     59  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     60  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     61  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     62  * SUCH DAMAGE.
     63  *
     64  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     65  */
     66 
     67 #include <sys/cdefs.h>
     68 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.73 2008/04/11 15:25:24 ad Exp $");
     69 
     70 #include "opt_ddb.h"
     71 #include "opt_revcache.h"
     72 
     73 #include <sys/param.h>
     74 #include <sys/systm.h>
     75 #include <sys/time.h>
     76 #include <sys/mount.h>
     77 #include <sys/vnode.h>
     78 #include <sys/namei.h>
     79 #include <sys/errno.h>
     80 #include <sys/malloc.h>
     81 #include <sys/pool.h>
     82 #include <sys/mutex.h>
     83 #include <sys/atomic.h>
     84 #include <sys/kthread.h>
     85 #include <sys/kernel.h>
     86 #include <sys/cpu.h>
     87 #include <sys/evcnt.h>
     88 
     89 #define NAMECACHE_ENTER_REVERSE
     90 /*
     91  * Name caching works as follows:
     92  *
     93  * Names found by directory scans are retained in a cache
     94  * for future reference.  It is managed LRU, so frequently
     95  * used names will hang around.  Cache is indexed by hash value
     96  * obtained from (dvp, name) where dvp refers to the directory
     97  * containing name.
     98  *
     99  * For simplicity (and economy of storage), names longer than
    100  * a maximum length of NCHNAMLEN are not cached; they occur
    101  * infrequently in any case, and are almost never of interest.
    102  *
    103  * Upon reaching the last segment of a path, if the reference
    104  * is for DELETE, or NOCACHE is set (rewrite), and the
    105  * name is located in the cache, it will be dropped.
    106  * The entry is dropped also when it was not possible to lock
    107  * the cached vnode, either because vget() failed or the generation
    108  * number has changed while waiting for the lock.
    109  */
    110 
    111 /*
    112  * Structures associated with name cacheing.
    113  */
    114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
    115 u_long	nchash;				/* size of hash table - 1 */
    116 #define	NCHASH(cnp, dvp)	\
    117 	(((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    118 
    119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
    120 u_long	ncvhash;			/* size of hash table - 1 */
    121 #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
    122 
    123 long	numcache;			/* number of cache entries allocated */
    124 static u_int	cache_gcpend;		/* number of entries pending GC */
    125 static void	*cache_gcqueue;		/* garbage collection queue */
    126 
    127 TAILQ_HEAD(, namecache) nclruhead =		/* LRU chain */
    128 	TAILQ_HEAD_INITIALIZER(nclruhead);
    129 #define	COUNT(x)	nchstats.x++
    130 struct	nchstats nchstats;		/* cache effectiveness statistics */
    131 
    132 static pool_cache_t namecache_cache;
    133 
    134 MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
    135 
    136 int cache_lowat = 95;
    137 int cache_hiwat = 98;
    138 int cache_hottime = 5;			/* number of seconds */
    139 int doingcache = 1;			/* 1 => enable the cache */
    140 
    141 static struct evcnt cache_ev_scan;
    142 static struct evcnt cache_ev_gc;
    143 static struct evcnt cache_ev_over;
    144 static struct evcnt cache_ev_under;
    145 static struct evcnt cache_ev_forced;
    146 
    147 /* A single lock to serialize modifications. */
    148 static kmutex_t *namecache_lock;
    149 
    150 static void cache_invalidate(struct namecache *);
    151 static inline struct namecache *cache_lookup_entry(
    152     const struct vnode *, const struct componentname *);
    153 static void cache_thread(void *);
    154 static void cache_invalidate(struct namecache *);
    155 static void cache_disassociate(struct namecache *);
    156 static void cache_reclaim(void);
    157 static int cache_ctor(void *, void *, int);
    158 static void cache_dtor(void *, void *);
    159 
    160 /*
    161  * Invalidate a cache entry and enqueue it for garbage collection.
    162  */
    163 static void
    164 cache_invalidate(struct namecache *ncp)
    165 {
    166 	void *head;
    167 
    168 	KASSERT(mutex_owned(&ncp->nc_lock));
    169 
    170 	if (ncp->nc_dvp != NULL) {
    171 		ncp->nc_vp = NULL;
    172 		ncp->nc_dvp = NULL;
    173 		do {
    174 			head = cache_gcqueue;
    175 			ncp->nc_gcqueue = head;
    176 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    177 		atomic_inc_uint(&cache_gcpend);
    178 	}
    179 }
    180 
    181 /*
    182  * Disassociate a namecache entry from any vnodes it is attached to,
    183  * and remove from the global LRU list.
    184  */
    185 static void
    186 cache_disassociate(struct namecache *ncp)
    187 {
    188 
    189 	KASSERT(mutex_owned(namecache_lock));
    190 	KASSERT(ncp->nc_dvp == NULL);
    191 
    192 	if (ncp->nc_lru.tqe_prev != NULL) {
    193 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    194 		ncp->nc_lru.tqe_prev = NULL;
    195 	}
    196 	if (ncp->nc_vhash.le_prev != NULL) {
    197 		LIST_REMOVE(ncp, nc_vhash);
    198 		ncp->nc_vhash.le_prev = NULL;
    199 	}
    200 	if (ncp->nc_vlist.le_prev != NULL) {
    201 		LIST_REMOVE(ncp, nc_vlist);
    202 		ncp->nc_vlist.le_prev = NULL;
    203 	}
    204 	if (ncp->nc_dvlist.le_prev != NULL) {
    205 		LIST_REMOVE(ncp, nc_dvlist);
    206 		ncp->nc_dvlist.le_prev = NULL;
    207 	}
    208 }
    209 
    210 /*
    211  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    212  * this locks out all "readers".
    213  */
    214 static void
    215 cache_lock_cpus(void)
    216 {
    217 	CPU_INFO_ITERATOR cii;
    218 	struct cpu_info *ci;
    219 
    220 	for (CPU_INFO_FOREACH(cii, ci)) {
    221 		mutex_enter(ci->ci_data.cpu_cachelock);
    222 	}
    223 }
    224 
    225 /*
    226  * Release all CPU locks.
    227  */
    228 static void
    229 cache_unlock_cpus(void)
    230 {
    231 	CPU_INFO_ITERATOR cii;
    232 	struct cpu_info *ci;
    233 
    234 	for (CPU_INFO_FOREACH(cii, ci)) {
    235 		mutex_exit(ci->ci_data.cpu_cachelock);
    236 	}
    237 }
    238 
    239 /*
    240  * Find a single cache entry and return it locked.  'namecache_lock' or
    241  * at least one of the per-CPU locks must be held.
    242  */
    243 static struct namecache *
    244 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
    245 {
    246 	struct nchashhead *ncpp;
    247 	struct namecache *ncp;
    248 
    249 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    250 
    251 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    252 		if (ncp->nc_dvp != dvp ||
    253 		    ncp->nc_nlen != cnp->cn_namelen ||
    254 		    memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
    255 		    	continue;
    256 	    	mutex_enter(&ncp->nc_lock);
    257 		if (ncp->nc_dvp == dvp) {
    258 			ncp->nc_hittime = hardclock_ticks;
    259 			return ncp;
    260 		}
    261 		/* Raced: entry has been nullified. */
    262 		mutex_exit(&ncp->nc_lock);
    263 	}
    264 
    265 	return NULL;
    266 }
    267 
    268 /*
    269  * Look for a the name in the cache. We don't do this
    270  * if the segment name is long, simply so the cache can avoid
    271  * holding long names (which would either waste space, or
    272  * add greatly to the complexity).
    273  *
    274  * Lookup is called with ni_dvp pointing to the directory to search,
    275  * ni_ptr pointing to the name of the entry being sought, ni_namelen
    276  * tells the length of the name, and ni_hash contains a hash of
    277  * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
    278  * and a status of zero is returned. If the locking fails for whatever
    279  * reason, the vnode is unlocked and the error is returned to caller.
    280  * If the lookup determines that the name does not exist (negative cacheing),
    281  * a status of ENOENT is returned. If the lookup fails, a status of -1
    282  * is returned.
    283  */
    284 int
    285 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
    286 {
    287 	struct namecache *ncp;
    288 	struct vnode *vp;
    289 	kmutex_t *cpulock;
    290 	int error;
    291 
    292 	if (!doingcache) {
    293 		cnp->cn_flags &= ~MAKEENTRY;
    294 		*vpp = NULL;
    295 		return (-1);
    296 	}
    297 
    298 	if (cnp->cn_namelen > NCHNAMLEN) {
    299 		/* Unlocked, but only for stats. */
    300 		COUNT(ncs_long);
    301 		cnp->cn_flags &= ~MAKEENTRY;
    302 		goto fail;
    303 	}
    304 	cpulock = curcpu()->ci_data.cpu_cachelock;
    305 	mutex_enter(cpulock);
    306 	ncp = cache_lookup_entry(dvp, cnp);
    307 	if (ncp == NULL) {
    308 		COUNT(ncs_miss);
    309 		goto fail_wlock;
    310 	}
    311 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
    312 		COUNT(ncs_badhits);
    313 		goto remove;
    314 	} else if (ncp->nc_vp == NULL) {
    315 		/*
    316 		 * Restore the ISWHITEOUT flag saved earlier.
    317 		 */
    318 		cnp->cn_flags |= ncp->nc_flags;
    319 		if (cnp->cn_nameiop != CREATE ||
    320 		    (cnp->cn_flags & ISLASTCN) == 0) {
    321 			COUNT(ncs_neghits);
    322 			mutex_exit(&ncp->nc_lock);
    323 			mutex_exit(cpulock);
    324 			return (ENOENT);
    325 		} else {
    326 			COUNT(ncs_badhits);
    327 			goto remove;
    328 		}
    329 	}
    330 
    331 	vp = ncp->nc_vp;
    332 	mutex_enter(&vp->v_interlock);
    333 	mutex_exit(&ncp->nc_lock);
    334 	mutex_exit(cpulock);
    335 	error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    336 
    337 #ifdef DEBUG
    338 	/*
    339 	 * since we released nb->nb_lock,
    340 	 * we can't use this pointer any more.
    341 	 */
    342 	ncp = NULL;
    343 #endif /* DEBUG */
    344 
    345 	if (error) {
    346 		KASSERT(error == EBUSY);
    347 		/*
    348 		 * this vnode is being cleaned out.
    349 		 */
    350 		COUNT(ncs_falsehits); /* XXX badhits? */
    351 		goto fail;
    352 	}
    353 
    354 	if (vp == dvp) {	/* lookup on "." */
    355 		error = 0;
    356 	} else if (cnp->cn_flags & ISDOTDOT) {
    357 		VOP_UNLOCK(dvp, 0);
    358 		error = vn_lock(vp, LK_EXCLUSIVE);
    359 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
    360 	} else {
    361 		error = vn_lock(vp, LK_EXCLUSIVE);
    362 	}
    363 
    364 	/*
    365 	 * Check that the lock succeeded.
    366 	 */
    367 	if (error) {
    368 		/* Unlocked, but only for stats. */
    369 		COUNT(ncs_badhits);
    370 		*vpp = NULL;
    371 		return (-1);
    372 	}
    373 
    374 	/* Unlocked, but only for stats. */
    375 	COUNT(ncs_goodhits);
    376 	*vpp = vp;
    377 	return (0);
    378 
    379 remove:
    380 	/*
    381 	 * Last component and we are renaming or deleting,
    382 	 * the cache entry is invalid, or otherwise don't
    383 	 * want cache entry to exist.
    384 	 */
    385 	cache_invalidate(ncp);
    386 	mutex_exit(&ncp->nc_lock);
    387 fail_wlock:
    388 	mutex_exit(cpulock);
    389 fail:
    390 	*vpp = NULL;
    391 	return (-1);
    392 }
    393 
    394 int
    395 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
    396     struct componentname *cnp)
    397 {
    398 	struct namecache *ncp;
    399 	struct vnode *vp;
    400 	kmutex_t *cpulock;
    401 	int error;
    402 
    403 	if (!doingcache) {
    404 		cnp->cn_flags &= ~MAKEENTRY;
    405 		*vpp = NULL;
    406 		return (-1);
    407 	}
    408 
    409 	if (cnp->cn_namelen > NCHNAMLEN) {
    410 		/* Unlocked, but only for stats. */
    411 		COUNT(ncs_long);
    412 		cnp->cn_flags &= ~MAKEENTRY;
    413 		goto fail;
    414 	}
    415 	cpulock = curcpu()->ci_data.cpu_cachelock;
    416 	mutex_enter(cpulock);
    417 	ncp = cache_lookup_entry(dvp, cnp);
    418 	if (ncp == NULL) {
    419 		COUNT(ncs_miss);
    420 		goto fail_wlock;
    421 	}
    422 	vp = ncp->nc_vp;
    423 	if (vp == NULL) {
    424 		/*
    425 		 * Restore the ISWHITEOUT flag saved earlier.
    426 		 */
    427 		cnp->cn_flags |= ncp->nc_flags;
    428 		COUNT(ncs_neghits);
    429 		mutex_exit(&ncp->nc_lock);
    430 		mutex_exit(cpulock);
    431 		return (ENOENT);
    432 	}
    433 	mutex_enter(&vp->v_interlock);
    434 	mutex_exit(&ncp->nc_lock);
    435 	mutex_exit(cpulock);
    436 	error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    437 
    438 	if (error) {
    439 		KASSERT(error == EBUSY);
    440 		/*
    441 		 * this vnode is being cleaned out.
    442 		 */
    443 		COUNT(ncs_falsehits); /* XXX badhits? */
    444 		goto fail;
    445 	}
    446 
    447 	*vpp = vp;
    448 
    449 	return 0;
    450 
    451 fail_wlock:
    452 	mutex_exit(cpulock);
    453 fail:
    454 	*vpp = NULL;
    455 	return -1;
    456 }
    457 
    458 /*
    459  * Scan cache looking for name of directory entry pointing at vp.
    460  *
    461  * Fill in dvpp.
    462  *
    463  * If bufp is non-NULL, also place the name in the buffer which starts
    464  * at bufp, immediately before *bpp, and move bpp backwards to point
    465  * at the start of it.  (Yes, this is a little baroque, but it's done
    466  * this way to cater to the whims of getcwd).
    467  *
    468  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    469  */
    470 int
    471 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    472 {
    473 	struct namecache *ncp;
    474 	struct vnode *dvp;
    475 	struct ncvhashhead *nvcpp;
    476 	char *bp;
    477 
    478 	if (!doingcache)
    479 		goto out;
    480 
    481 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
    482 
    483 	mutex_enter(namecache_lock);
    484 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
    485 		mutex_enter(&ncp->nc_lock);
    486 		if (ncp->nc_vp == vp &&
    487 		    (dvp = ncp->nc_dvp) != NULL &&
    488 		    dvp != vp) { 		/* avoid pesky . entries.. */
    489 
    490 #ifdef DIAGNOSTIC
    491 			if (ncp->nc_nlen == 1 &&
    492 			    ncp->nc_name[0] == '.')
    493 				panic("cache_revlookup: found entry for .");
    494 
    495 			if (ncp->nc_nlen == 2 &&
    496 			    ncp->nc_name[0] == '.' &&
    497 			    ncp->nc_name[1] == '.')
    498 				panic("cache_revlookup: found entry for ..");
    499 #endif
    500 			COUNT(ncs_revhits);
    501 
    502 			if (bufp) {
    503 				bp = *bpp;
    504 				bp -= ncp->nc_nlen;
    505 				if (bp <= bufp) {
    506 					*dvpp = NULL;
    507 					mutex_exit(&ncp->nc_lock);
    508 					mutex_exit(namecache_lock);
    509 					return (ERANGE);
    510 				}
    511 				memcpy(bp, ncp->nc_name, ncp->nc_nlen);
    512 				*bpp = bp;
    513 			}
    514 
    515 			/* XXX MP: how do we know dvp won't evaporate? */
    516 			*dvpp = dvp;
    517 			mutex_exit(&ncp->nc_lock);
    518 			mutex_exit(namecache_lock);
    519 			return (0);
    520 		}
    521 		mutex_exit(&ncp->nc_lock);
    522 	}
    523 	COUNT(ncs_revmiss);
    524 	mutex_exit(namecache_lock);
    525  out:
    526 	*dvpp = NULL;
    527 	return (-1);
    528 }
    529 
    530 /*
    531  * Add an entry to the cache
    532  */
    533 void
    534 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
    535 {
    536 	struct namecache *ncp;
    537 	struct namecache *oncp;
    538 	struct nchashhead *ncpp;
    539 	struct ncvhashhead *nvcpp;
    540 
    541 #ifdef DIAGNOSTIC
    542 	if (cnp->cn_namelen > NCHNAMLEN)
    543 		panic("cache_enter: name too long");
    544 #endif
    545 	if (!doingcache)
    546 		return;
    547 
    548 	if (numcache > desiredvnodes) {
    549 		mutex_enter(namecache_lock);
    550 		cache_ev_forced.ev_count++;
    551 		cache_reclaim();
    552 		mutex_exit(namecache_lock);
    553 	}
    554 
    555 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    556 	mutex_enter(namecache_lock);
    557 	numcache++;
    558 
    559 	/*
    560 	 * Concurrent lookups in the same directory may race for a
    561 	 * cache entry.  if there's a duplicated entry, free it.
    562 	 */
    563 	oncp = cache_lookup_entry(dvp, cnp);
    564 	if (oncp) {
    565 		cache_invalidate(oncp);
    566 		mutex_exit(&oncp->nc_lock);
    567 	}
    568 
    569 	/* Grab the vnode we just found. */
    570 	mutex_enter(&ncp->nc_lock);
    571 	ncp->nc_vp = vp;
    572 	ncp->nc_flags = 0;
    573 	ncp->nc_hittime = 0;
    574 	ncp->nc_gcqueue = NULL;
    575 	if (vp == NULL) {
    576 		/*
    577 		 * For negative hits, save the ISWHITEOUT flag so we can
    578 		 * restore it later when the cache entry is used again.
    579 		 */
    580 		ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
    581 	}
    582 	/* Fill in cache info. */
    583 	ncp->nc_dvp = dvp;
    584 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
    585 	if (vp)
    586 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
    587 	else {
    588 		ncp->nc_vlist.le_prev = NULL;
    589 		ncp->nc_vlist.le_next = NULL;
    590 	}
    591 	ncp->nc_nlen = cnp->cn_namelen;
    592 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    593 	memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
    594 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    595 
    596 	/*
    597 	 * Flush updates before making visible in table.  No need for a
    598 	 * memory barrier on the other side: to see modifications the
    599 	 * list must be followed, meaning a dependent pointer load.
    600 	 */
    601 	membar_producer();
    602 	LIST_INSERT_HEAD(ncpp, ncp, nc_hash);
    603 
    604 	ncp->nc_vhash.le_prev = NULL;
    605 	ncp->nc_vhash.le_next = NULL;
    606 
    607 	/*
    608 	 * Create reverse-cache entries (used in getcwd) for directories.
    609 	 * (and in linux procfs exe node)
    610 	 */
    611 	if (vp != NULL &&
    612 	    vp != dvp &&
    613 #ifndef NAMECACHE_ENTER_REVERSE
    614 	    vp->v_type == VDIR &&
    615 #endif
    616 	    (ncp->nc_nlen > 2 ||
    617 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
    618 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
    619 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
    620 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
    621 	}
    622 	mutex_exit(&ncp->nc_lock);
    623 	mutex_exit(namecache_lock);
    624 }
    625 
    626 /*
    627  * Name cache initialization, from vfs_init() when we are booting
    628  */
    629 void
    630 nchinit(void)
    631 {
    632 	int error;
    633 
    634 	namecache_cache = pool_cache_init(sizeof(struct namecache),
    635 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    636 	    cache_dtor, NULL);
    637 	KASSERT(namecache_cache != NULL);
    638 
    639 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    640 
    641 	nchashtbl =
    642 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &nchash);
    643 	ncvhashtbl =
    644 #ifdef NAMECACHE_ENTER_REVERSE
    645 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
    646 #else
    647 	    hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &ncvhash);
    648 #endif
    649 
    650 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    651 	    NULL, NULL, "cachegc");
    652 	if (error != 0)
    653 		panic("nchinit %d", error);
    654 
    655 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    656 	   "namecache", "entries scanned");
    657 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    658 	   "namecache", "entries collected");
    659 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    660 	   "namecache", "over scan target");
    661 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    662 	   "namecache", "under scan target");
    663 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    664 	   "namecache", "forced reclaims");
    665 }
    666 
    667 static int
    668 cache_ctor(void *arg, void *obj, int flag)
    669 {
    670 	struct namecache *ncp;
    671 
    672 	ncp = obj;
    673 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    674 
    675 	return 0;
    676 }
    677 
    678 static void
    679 cache_dtor(void *arg, void *obj)
    680 {
    681 	struct namecache *ncp;
    682 
    683 	ncp = obj;
    684 	mutex_destroy(&ncp->nc_lock);
    685 }
    686 
    687 /*
    688  * Called once for each CPU in the system as attached.
    689  */
    690 void
    691 cache_cpu_init(struct cpu_info *ci)
    692 {
    693 
    694 	ci->ci_data.cpu_cachelock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    695 }
    696 
    697 /*
    698  * Name cache reinitialization, for when the maximum number of vnodes increases.
    699  */
    700 void
    701 nchreinit(void)
    702 {
    703 	struct namecache *ncp;
    704 	struct nchashhead *oldhash1, *hash1;
    705 	struct ncvhashhead *oldhash2, *hash2;
    706 	u_long i, oldmask1, oldmask2, mask1, mask2;
    707 
    708 	hash1 = hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask1);
    709 	hash2 =
    710 #ifdef NAMECACHE_ENTER_REVERSE
    711 	    hashinit(desiredvnodes, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
    712 #else
    713 	    hashinit(desiredvnodes/8, HASH_LIST, M_CACHE, M_WAITOK, &mask2);
    714 #endif
    715 	mutex_enter(namecache_lock);
    716 	cache_lock_cpus();
    717 	oldhash1 = nchashtbl;
    718 	oldmask1 = nchash;
    719 	nchashtbl = hash1;
    720 	nchash = mask1;
    721 	oldhash2 = ncvhashtbl;
    722 	oldmask2 = ncvhash;
    723 	ncvhashtbl = hash2;
    724 	ncvhash = mask2;
    725 	for (i = 0; i <= oldmask1; i++) {
    726 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
    727 			LIST_REMOVE(ncp, nc_hash);
    728 			ncp->nc_hash.le_prev = NULL;
    729 		}
    730 	}
    731 	for (i = 0; i <= oldmask2; i++) {
    732 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
    733 			LIST_REMOVE(ncp, nc_vhash);
    734 			ncp->nc_vhash.le_prev = NULL;
    735 		}
    736 	}
    737 	cache_unlock_cpus();
    738 	mutex_exit(namecache_lock);
    739 	hashdone(oldhash1, M_CACHE);
    740 	hashdone(oldhash2, M_CACHE);
    741 }
    742 
    743 /*
    744  * Cache flush, a particular vnode; called when a vnode is renamed to
    745  * hide entries that would now be invalid
    746  */
    747 void
    748 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
    749 {
    750 	struct namecache *ncp, *ncnext;
    751 
    752 	mutex_enter(namecache_lock);
    753 	if (flags & PURGE_PARENTS) {
    754 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
    755 		    ncp = ncnext) {
    756 			ncnext = LIST_NEXT(ncp, nc_vlist);
    757 			mutex_enter(&ncp->nc_lock);
    758 			cache_invalidate(ncp);
    759 			mutex_exit(&ncp->nc_lock);
    760 			cache_disassociate(ncp);
    761 		}
    762 	}
    763 	if (flags & PURGE_CHILDREN) {
    764 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
    765 		    ncp = ncnext) {
    766 			ncnext = LIST_NEXT(ncp, nc_dvlist);
    767 			mutex_enter(&ncp->nc_lock);
    768 			cache_invalidate(ncp);
    769 			mutex_exit(&ncp->nc_lock);
    770 			cache_disassociate(ncp);
    771 		}
    772 	}
    773 	if (cnp != NULL) {
    774 		ncp = cache_lookup_entry(vp, cnp);
    775 		if (ncp) {
    776 			cache_invalidate(ncp);
    777 			cache_disassociate(ncp);
    778 			mutex_exit(&ncp->nc_lock);
    779 		}
    780 	}
    781 	mutex_exit(namecache_lock);
    782 }
    783 
    784 /*
    785  * Cache flush, a whole filesystem; called when filesys is umounted to
    786  * remove entries that would now be invalid.
    787  */
    788 void
    789 cache_purgevfs(struct mount *mp)
    790 {
    791 	struct namecache *ncp, *nxtcp;
    792 
    793 	mutex_enter(namecache_lock);
    794 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    795 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    796 		mutex_enter(&ncp->nc_lock);
    797 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
    798 			/* Free the resources we had. */
    799 			cache_invalidate(ncp);
    800 			cache_disassociate(ncp);
    801 		}
    802 		mutex_exit(&ncp->nc_lock);
    803 	}
    804 	cache_reclaim();
    805 	mutex_exit(namecache_lock);
    806 }
    807 
    808 /*
    809  * Scan global list invalidating entries until we meet a preset target.
    810  * Prefer to invalidate entries that have not scored a hit within
    811  * cache_hottime seconds.  We sort the LRU list only for this routine's
    812  * benefit.
    813  */
    814 static void
    815 cache_prune(int incache, int target)
    816 {
    817 	struct namecache *ncp, *nxtcp, *sentinel;
    818 	int items, recent, tryharder;
    819 
    820 	KASSERT(mutex_owned(namecache_lock));
    821 
    822 	items = 0;
    823 	tryharder = 0;
    824 	recent = hardclock_ticks - hz * cache_hottime;
    825 	sentinel = NULL;
    826 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    827 		if (incache <= target)
    828 			break;
    829 		items++;
    830 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    831 		if (ncp->nc_dvp == NULL)
    832 			continue;
    833 		if (ncp == sentinel) {
    834 			/*
    835 			 * If we looped back on ourself, then ignore
    836 			 * recent entries and purge whatever we find.
    837 			 */
    838 			tryharder = 1;
    839 		}
    840 		if (!tryharder && ncp->nc_hittime > recent) {
    841 			if (sentinel == NULL)
    842 				sentinel = ncp;
    843 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    844 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    845 			continue;
    846 		}
    847 		mutex_enter(&ncp->nc_lock);
    848 		if (ncp->nc_dvp != NULL) {
    849 			cache_invalidate(ncp);
    850 			cache_disassociate(ncp);
    851 			incache--;
    852 		}
    853 		mutex_exit(&ncp->nc_lock);
    854 	}
    855 	cache_ev_scan.ev_count += items;
    856 }
    857 
    858 /*
    859  * Collect dead cache entries from all CPUs and garbage collect.
    860  */
    861 static void
    862 cache_reclaim(void)
    863 {
    864 	struct namecache *ncp, *next;
    865 	int items;
    866 
    867 	KASSERT(mutex_owned(namecache_lock));
    868 
    869 	/*
    870 	 * If the number of extant entries not awaiting garbage collection
    871 	 * exceeds the high water mark, then reclaim stale entries until we
    872 	 * reach our low water mark.
    873 	 */
    874 	items = numcache - cache_gcpend;
    875 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
    876 		cache_prune(items, (int)((uint64_t)desiredvnodes *
    877 		    cache_lowat / 100));
    878 		cache_ev_over.ev_count++;
    879 	} else
    880 		cache_ev_under.ev_count++;
    881 
    882 	/*
    883 	 * Stop forward lookup activity on all CPUs and garbage collect dead
    884 	 * entries.
    885 	 */
    886 	cache_lock_cpus();
    887 	ncp = cache_gcqueue;
    888 	cache_gcqueue = NULL;
    889 	items = cache_gcpend;
    890 	cache_gcpend = 0;
    891 	while (ncp != NULL) {
    892 		next = ncp->nc_gcqueue;
    893 		cache_disassociate(ncp);
    894 		KASSERT(ncp->nc_dvp == NULL);
    895 		if (ncp->nc_hash.le_prev != NULL) {
    896 			LIST_REMOVE(ncp, nc_hash);
    897 			ncp->nc_hash.le_prev = NULL;
    898 		}
    899 		pool_cache_put(namecache_cache, ncp);
    900 		ncp = next;
    901 	}
    902 	cache_unlock_cpus();
    903 	numcache -= items;
    904 	cache_ev_gc.ev_count += items;
    905 }
    906 
    907 /*
    908  * Cache maintainence thread, awakening once per second to:
    909  *
    910  * => keep number of entries below the high water mark
    911  * => sort pseudo-LRU list
    912  * => garbage collect dead entries
    913  */
    914 static void
    915 cache_thread(void *arg)
    916 {
    917 
    918 	mutex_enter(namecache_lock);
    919 	for (;;) {
    920 		cache_reclaim();
    921 		kpause("cachegc", false, hz, namecache_lock);
    922 	}
    923 }
    924 
    925 #ifdef DDB
    926 void
    927 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
    928 {
    929 	struct vnode *dvp = NULL;
    930 	struct namecache *ncp;
    931 
    932 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    933 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
    934 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
    935 			dvp = ncp->nc_dvp;
    936 		}
    937 	}
    938 	if (dvp == NULL) {
    939 		(*pr)("name not found\n");
    940 		return;
    941 	}
    942 	vp = dvp;
    943 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    944 		if (ncp->nc_vp == vp) {
    945 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
    946 		}
    947 	}
    948 }
    949 #endif
    950