Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.86
      1 /*	$NetBSD: vfs_cache.c,v 1.86 2010/07/21 09:01:36 hannken Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  *
     16  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  * POSSIBILITY OF SUCH DAMAGE.
     27  */
     28 
     29 /*
     30  * Copyright (c) 1989, 1993
     31  *	The Regents of the University of California.  All rights reserved.
     32  *
     33  * Redistribution and use in source and binary forms, with or without
     34  * modification, are permitted provided that the following conditions
     35  * are met:
     36  * 1. Redistributions of source code must retain the above copyright
     37  *    notice, this list of conditions and the following disclaimer.
     38  * 2. Redistributions in binary form must reproduce the above copyright
     39  *    notice, this list of conditions and the following disclaimer in the
     40  *    documentation and/or other materials provided with the distribution.
     41  * 3. Neither the name of the University nor the names of its contributors
     42  *    may be used to endorse or promote products derived from this software
     43  *    without specific prior written permission.
     44  *
     45  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55  * SUCH DAMAGE.
     56  *
     57  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     58  */
     59 
     60 #include <sys/cdefs.h>
     61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.86 2010/07/21 09:01:36 hannken Exp $");
     62 
     63 #include "opt_ddb.h"
     64 #include "opt_revcache.h"
     65 
     66 #include <sys/param.h>
     67 #include <sys/systm.h>
     68 #include <sys/time.h>
     69 #include <sys/mount.h>
     70 #include <sys/vnode.h>
     71 #include <sys/namei.h>
     72 #include <sys/errno.h>
     73 #include <sys/pool.h>
     74 #include <sys/mutex.h>
     75 #include <sys/atomic.h>
     76 #include <sys/kthread.h>
     77 #include <sys/kernel.h>
     78 #include <sys/cpu.h>
     79 #include <sys/evcnt.h>
     80 
     81 #define NAMECACHE_ENTER_REVERSE
     82 /*
     83  * Name caching works as follows:
     84  *
     85  * Names found by directory scans are retained in a cache
     86  * for future reference.  It is managed LRU, so frequently
     87  * used names will hang around.  Cache is indexed by hash value
     88  * obtained from (dvp, name) where dvp refers to the directory
     89  * containing name.
     90  *
     91  * For simplicity (and economy of storage), names longer than
     92  * a maximum length of NCHNAMLEN are not cached; they occur
     93  * infrequently in any case, and are almost never of interest.
     94  *
     95  * Upon reaching the last segment of a path, if the reference
     96  * is for DELETE, or NOCACHE is set (rewrite), and the
     97  * name is located in the cache, it will be dropped.
     98  * The entry is dropped also when it was not possible to lock
     99  * the cached vnode, either because vget() failed or the generation
    100  * number has changed while waiting for the lock.
    101  */
    102 
    103 /*
    104  * Per-cpu namecache data.
    105  */
    106 struct nchcpu {
    107 	kmutex_t	cpu_lock;
    108 	struct nchstats	cpu_stats;
    109 };
    110 
    111 /*
    112  * Structures associated with name cacheing.
    113  */
    114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
    115 u_long	nchash;				/* size of hash table - 1 */
    116 #define	NCHASH(cnp, dvp)	\
    117 	(((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    118 
    119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
    120 u_long	ncvhash;			/* size of hash table - 1 */
    121 #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
    122 
    123 long	numcache;			/* number of cache entries allocated */
    124 static u_int	cache_gcpend;		/* number of entries pending GC */
    125 static void	*cache_gcqueue;		/* garbage collection queue */
    126 
    127 TAILQ_HEAD(, namecache) nclruhead =		/* LRU chain */
    128 	TAILQ_HEAD_INITIALIZER(nclruhead);
    129 #define	COUNT(c,x)	(c.x++)
    130 struct	nchstats nchstats;		/* cache effectiveness statistics */
    131 
    132 static pool_cache_t namecache_cache;
    133 
    134 int cache_lowat = 95;
    135 int cache_hiwat = 98;
    136 int cache_hottime = 5;			/* number of seconds */
    137 int doingcache = 1;			/* 1 => enable the cache */
    138 
    139 static struct evcnt cache_ev_scan;
    140 static struct evcnt cache_ev_gc;
    141 static struct evcnt cache_ev_over;
    142 static struct evcnt cache_ev_under;
    143 static struct evcnt cache_ev_forced;
    144 
    145 /* A single lock to serialize modifications. */
    146 static kmutex_t *namecache_lock;
    147 
    148 static void cache_invalidate(struct namecache *);
    149 static inline struct namecache *cache_lookup_entry(
    150     const struct vnode *, const struct componentname *);
    151 static void cache_thread(void *);
    152 static void cache_invalidate(struct namecache *);
    153 static void cache_disassociate(struct namecache *);
    154 static void cache_reclaim(void);
    155 static int cache_ctor(void *, void *, int);
    156 static void cache_dtor(void *, void *);
    157 
    158 /*
    159  * Invalidate a cache entry and enqueue it for garbage collection.
    160  */
    161 static void
    162 cache_invalidate(struct namecache *ncp)
    163 {
    164 	void *head;
    165 
    166 	KASSERT(mutex_owned(&ncp->nc_lock));
    167 
    168 	if (ncp->nc_dvp != NULL) {
    169 		ncp->nc_vp = NULL;
    170 		ncp->nc_dvp = NULL;
    171 		do {
    172 			head = cache_gcqueue;
    173 			ncp->nc_gcqueue = head;
    174 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    175 		atomic_inc_uint(&cache_gcpend);
    176 	}
    177 }
    178 
    179 /*
    180  * Disassociate a namecache entry from any vnodes it is attached to,
    181  * and remove from the global LRU list.
    182  */
    183 static void
    184 cache_disassociate(struct namecache *ncp)
    185 {
    186 
    187 	KASSERT(mutex_owned(namecache_lock));
    188 	KASSERT(ncp->nc_dvp == NULL);
    189 
    190 	if (ncp->nc_lru.tqe_prev != NULL) {
    191 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    192 		ncp->nc_lru.tqe_prev = NULL;
    193 	}
    194 	if (ncp->nc_vhash.le_prev != NULL) {
    195 		LIST_REMOVE(ncp, nc_vhash);
    196 		ncp->nc_vhash.le_prev = NULL;
    197 	}
    198 	if (ncp->nc_vlist.le_prev != NULL) {
    199 		LIST_REMOVE(ncp, nc_vlist);
    200 		ncp->nc_vlist.le_prev = NULL;
    201 	}
    202 	if (ncp->nc_dvlist.le_prev != NULL) {
    203 		LIST_REMOVE(ncp, nc_dvlist);
    204 		ncp->nc_dvlist.le_prev = NULL;
    205 	}
    206 }
    207 
    208 /*
    209  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    210  * this locks out all "readers".
    211  */
    212 static void
    213 cache_lock_cpus(void)
    214 {
    215 	CPU_INFO_ITERATOR cii;
    216 	struct cpu_info *ci;
    217 	struct nchcpu *cpup;
    218 	long *s, *d, *m;
    219 
    220 	for (CPU_INFO_FOREACH(cii, ci)) {
    221 		cpup = ci->ci_data.cpu_nch;
    222 		mutex_enter(&cpup->cpu_lock);
    223 
    224 		/* Collate statistics. */
    225 		d = (long *)&nchstats;
    226 		s = (long *)&cpup->cpu_stats;
    227 		m = s + sizeof(nchstats) / sizeof(long);
    228 		for (; s < m; s++, d++) {
    229 			*d += *s;
    230 			*s = 0;
    231 		}
    232 	}
    233 }
    234 
    235 /*
    236  * Release all CPU locks.
    237  */
    238 static void
    239 cache_unlock_cpus(void)
    240 {
    241 	CPU_INFO_ITERATOR cii;
    242 	struct cpu_info *ci;
    243 	struct nchcpu *cpup;
    244 
    245 	for (CPU_INFO_FOREACH(cii, ci)) {
    246 		cpup = ci->ci_data.cpu_nch;
    247 		mutex_exit(&cpup->cpu_lock);
    248 	}
    249 }
    250 
    251 /*
    252  * Find a single cache entry and return it locked.  'namecache_lock' or
    253  * at least one of the per-CPU locks must be held.
    254  */
    255 static struct namecache *
    256 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
    257 {
    258 	struct nchashhead *ncpp;
    259 	struct namecache *ncp;
    260 
    261 	KASSERT(dvp != NULL);
    262 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    263 
    264 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    265 		if (ncp->nc_dvp != dvp ||
    266 		    ncp->nc_nlen != cnp->cn_namelen ||
    267 		    memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
    268 		    	continue;
    269 	    	mutex_enter(&ncp->nc_lock);
    270 		if (__predict_true(ncp->nc_dvp == dvp)) {
    271 			ncp->nc_hittime = hardclock_ticks;
    272 			return ncp;
    273 		}
    274 		/* Raced: entry has been nullified. */
    275 		mutex_exit(&ncp->nc_lock);
    276 	}
    277 
    278 	return NULL;
    279 }
    280 
    281 /*
    282  * Look for a the name in the cache. We don't do this
    283  * if the segment name is long, simply so the cache can avoid
    284  * holding long names (which would either waste space, or
    285  * add greatly to the complexity).
    286  *
    287  * Lookup is called with ni_dvp pointing to the directory to search,
    288  * ni_ptr pointing to the name of the entry being sought, ni_namelen
    289  * tells the length of the name, and ni_hash contains a hash of
    290  * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
    291  * and a status of zero is returned. If the locking fails for whatever
    292  * reason, the vnode is unlocked and the error is returned to caller.
    293  * If the lookup determines that the name does not exist (negative cacheing),
    294  * a status of ENOENT is returned. If the lookup fails, a status of -1
    295  * is returned.
    296  */
    297 int
    298 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
    299 {
    300 	struct namecache *ncp;
    301 	struct vnode *vp;
    302 	struct nchcpu *cpup;
    303 	int error;
    304 
    305 	if (__predict_false(!doingcache)) {
    306 		cnp->cn_flags &= ~MAKEENTRY;
    307 		*vpp = NULL;
    308 		return -1;
    309 	}
    310 
    311 	cpup = curcpu()->ci_data.cpu_nch;
    312 	mutex_enter(&cpup->cpu_lock);
    313 	if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
    314 		COUNT(cpup->cpu_stats, ncs_long);
    315 		cnp->cn_flags &= ~MAKEENTRY;
    316 		mutex_exit(&cpup->cpu_lock);
    317 		*vpp = NULL;
    318 		return -1;
    319 	}
    320 	ncp = cache_lookup_entry(dvp, cnp);
    321 	if (__predict_false(ncp == NULL)) {
    322 		COUNT(cpup->cpu_stats, ncs_miss);
    323 		mutex_exit(&cpup->cpu_lock);
    324 		*vpp = NULL;
    325 		return -1;
    326 	}
    327 	if ((cnp->cn_flags & MAKEENTRY) == 0) {
    328 		COUNT(cpup->cpu_stats, ncs_badhits);
    329 		/*
    330 		 * Last component and we are renaming or deleting,
    331 		 * the cache entry is invalid, or otherwise don't
    332 		 * want cache entry to exist.
    333 		 */
    334 		cache_invalidate(ncp);
    335 		mutex_exit(&ncp->nc_lock);
    336 		mutex_exit(&cpup->cpu_lock);
    337 		*vpp = NULL;
    338 		return -1;
    339 	} else if (ncp->nc_vp == NULL) {
    340 		/*
    341 		 * Restore the ISWHITEOUT flag saved earlier.
    342 		 */
    343 		KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    344 		cnp->cn_flags |= ncp->nc_flags;
    345 		if (__predict_true(cnp->cn_nameiop != CREATE ||
    346 		    (cnp->cn_flags & ISLASTCN) == 0)) {
    347 			COUNT(cpup->cpu_stats, ncs_neghits);
    348 			mutex_exit(&ncp->nc_lock);
    349 			mutex_exit(&cpup->cpu_lock);
    350 			return ENOENT;
    351 		} else {
    352 			COUNT(cpup->cpu_stats, ncs_badhits);
    353 			/*
    354 			 * Last component and we are renaming or
    355 			 * deleting, the cache entry is invalid,
    356 			 * or otherwise don't want cache entry to
    357 			 * exist.
    358 			 */
    359 			cache_invalidate(ncp);
    360 			mutex_exit(&ncp->nc_lock);
    361 			mutex_exit(&cpup->cpu_lock);
    362 			*vpp = NULL;
    363 			return -1;
    364 		}
    365 	}
    366 
    367 	vp = ncp->nc_vp;
    368 	if (vtryget(vp)) {
    369 		mutex_exit(&ncp->nc_lock);
    370 		mutex_exit(&cpup->cpu_lock);
    371 	} else {
    372 		mutex_enter(&vp->v_interlock);
    373 		mutex_exit(&ncp->nc_lock);
    374 		mutex_exit(&cpup->cpu_lock);
    375 		error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    376 		if (error) {
    377 			KASSERT(error == EBUSY);
    378 			/*
    379 			 * This vnode is being cleaned out.
    380 			 * XXX badhits?
    381 			 */
    382 			COUNT(cpup->cpu_stats, ncs_falsehits);
    383 			*vpp = NULL;
    384 			return -1;
    385 		}
    386 	}
    387 
    388 #ifdef DEBUG
    389 	/*
    390 	 * since we released nb->nb_lock,
    391 	 * we can't use this pointer any more.
    392 	 */
    393 	ncp = NULL;
    394 #endif /* DEBUG */
    395 
    396 	if (vp == dvp) {	/* lookup on "." */
    397 		error = 0;
    398 	} else if (cnp->cn_flags & ISDOTDOT) {
    399 		VOP_UNLOCK(dvp);
    400 		error = vn_lock(vp, LK_EXCLUSIVE);
    401 		vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
    402 	} else {
    403 		error = vn_lock(vp, LK_EXCLUSIVE);
    404 	}
    405 
    406 	/*
    407 	 * Check that the lock succeeded.
    408 	 */
    409 	if (error) {
    410 		/* Unlocked, but only for stats. */
    411 		COUNT(cpup->cpu_stats, ncs_badhits);
    412 		vrele(vp);
    413 		*vpp = NULL;
    414 		return -1;
    415 	}
    416 
    417 	/* Unlocked, but only for stats. */
    418 	COUNT(cpup->cpu_stats, ncs_goodhits);
    419 	*vpp = vp;
    420 	return 0;
    421 }
    422 
    423 int
    424 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
    425     struct componentname *cnp)
    426 {
    427 	struct namecache *ncp;
    428 	struct vnode *vp;
    429 	struct nchcpu *cpup;
    430 	int error;
    431 
    432 	if (__predict_false(!doingcache)) {
    433 		cnp->cn_flags &= ~MAKEENTRY;
    434 		*vpp = NULL;
    435 		return (-1);
    436 	}
    437 
    438 	cpup = curcpu()->ci_data.cpu_nch;
    439 	mutex_enter(&cpup->cpu_lock);
    440 	if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
    441 		COUNT(cpup->cpu_stats, ncs_long);
    442 		cnp->cn_flags &= ~MAKEENTRY;
    443 		mutex_exit(&cpup->cpu_lock);
    444 		*vpp = NULL;
    445 		return -1;
    446 	}
    447 	ncp = cache_lookup_entry(dvp, cnp);
    448 	if (__predict_false(ncp == NULL)) {
    449 		COUNT(cpup->cpu_stats, ncs_miss);
    450 		mutex_exit(&cpup->cpu_lock);
    451 		*vpp = NULL;
    452 		return -1;
    453 	}
    454 	vp = ncp->nc_vp;
    455 	if (vp == NULL) {
    456 		/*
    457 		 * Restore the ISWHITEOUT flag saved earlier.
    458 		 */
    459 		KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    460 		cnp->cn_flags |= ncp->nc_flags;
    461 		COUNT(cpup->cpu_stats, ncs_neghits);
    462 		mutex_exit(&ncp->nc_lock);
    463 		mutex_exit(&cpup->cpu_lock);
    464 		return ENOENT;
    465 	}
    466 	if (vtryget(vp)) {
    467 		mutex_exit(&ncp->nc_lock);
    468 		mutex_exit(&cpup->cpu_lock);
    469 	} else {
    470 		mutex_enter(&vp->v_interlock);
    471 		mutex_exit(&ncp->nc_lock);
    472 		mutex_exit(&cpup->cpu_lock);
    473 		error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
    474 		if (error) {
    475 			KASSERT(error == EBUSY);
    476 			/*
    477 			 * This vnode is being cleaned out.
    478 			 * XXX badhits?
    479 			 */
    480 			COUNT(cpup->cpu_stats, ncs_falsehits);
    481 			*vpp = NULL;
    482 			return -1;
    483 		}
    484 	}
    485 
    486 	/* Unlocked, but only for stats. */
    487 	COUNT(cpup->cpu_stats, ncs_goodhits); /* XXX can be "badhits" */
    488 	*vpp = vp;
    489 	return 0;
    490 }
    491 
    492 /*
    493  * Scan cache looking for name of directory entry pointing at vp.
    494  *
    495  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    496  *
    497  * If bufp is non-NULL, also place the name in the buffer which starts
    498  * at bufp, immediately before *bpp, and move bpp backwards to point
    499  * at the start of it.  (Yes, this is a little baroque, but it's done
    500  * this way to cater to the whims of getcwd).
    501  *
    502  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    503  */
    504 int
    505 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    506 {
    507 	struct namecache *ncp;
    508 	struct vnode *dvp;
    509 	struct ncvhashhead *nvcpp;
    510 	char *bp;
    511 	int error, nlen;
    512 
    513 	if (!doingcache)
    514 		goto out;
    515 
    516 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
    517 
    518 	mutex_enter(namecache_lock);
    519 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
    520 		mutex_enter(&ncp->nc_lock);
    521 		if (ncp->nc_vp == vp &&
    522 		    (dvp = ncp->nc_dvp) != NULL &&
    523 		    dvp != vp) { 		/* avoid pesky . entries.. */
    524 
    525 #ifdef DIAGNOSTIC
    526 			if (ncp->nc_nlen == 1 &&
    527 			    ncp->nc_name[0] == '.')
    528 				panic("cache_revlookup: found entry for .");
    529 
    530 			if (ncp->nc_nlen == 2 &&
    531 			    ncp->nc_name[0] == '.' &&
    532 			    ncp->nc_name[1] == '.')
    533 				panic("cache_revlookup: found entry for ..");
    534 #endif
    535 			COUNT(nchstats, ncs_revhits);
    536 			nlen = ncp->nc_nlen;
    537 
    538 			if (bufp) {
    539 				bp = *bpp;
    540 				bp -= nlen;
    541 				if (bp <= bufp) {
    542 					*dvpp = NULL;
    543 					mutex_exit(&ncp->nc_lock);
    544 					mutex_exit(namecache_lock);
    545 					return (ERANGE);
    546 				}
    547 				memcpy(bp, ncp->nc_name, nlen);
    548 				*bpp = bp;
    549 			}
    550 
    551 			if (vtryget(dvp)) {
    552 				mutex_exit(&ncp->nc_lock);
    553 				mutex_exit(namecache_lock);
    554 			} else {
    555 				mutex_enter(&dvp->v_interlock);
    556 				mutex_exit(&ncp->nc_lock);
    557 				mutex_exit(namecache_lock);
    558 				error = vget(dvp, LK_NOWAIT | LK_INTERLOCK);
    559 				if (error) {
    560 					KASSERT(error == EBUSY);
    561 					if (bufp)
    562 						(*bpp) += nlen;
    563 					*dvpp = NULL;
    564 					return -1;
    565 				}
    566 			}
    567 			*dvpp = dvp;
    568 			return (0);
    569 		}
    570 		mutex_exit(&ncp->nc_lock);
    571 	}
    572 	COUNT(nchstats, ncs_revmiss);
    573 	mutex_exit(namecache_lock);
    574  out:
    575 	*dvpp = NULL;
    576 	return (-1);
    577 }
    578 
    579 /*
    580  * Add an entry to the cache
    581  */
    582 void
    583 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
    584 {
    585 	struct namecache *ncp;
    586 	struct namecache *oncp;
    587 	struct nchashhead *ncpp;
    588 	struct ncvhashhead *nvcpp;
    589 
    590 #ifdef DIAGNOSTIC
    591 	if (cnp->cn_namelen > NCHNAMLEN)
    592 		panic("cache_enter: name too long");
    593 #endif
    594 	if (!doingcache)
    595 		return;
    596 
    597 	if (numcache > desiredvnodes) {
    598 		mutex_enter(namecache_lock);
    599 		cache_ev_forced.ev_count++;
    600 		cache_reclaim();
    601 		mutex_exit(namecache_lock);
    602 	}
    603 
    604 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    605 	mutex_enter(namecache_lock);
    606 	numcache++;
    607 
    608 	/*
    609 	 * Concurrent lookups in the same directory may race for a
    610 	 * cache entry.  if there's a duplicated entry, free it.
    611 	 */
    612 	oncp = cache_lookup_entry(dvp, cnp);
    613 	if (oncp) {
    614 		cache_invalidate(oncp);
    615 		mutex_exit(&oncp->nc_lock);
    616 	}
    617 
    618 	/* Grab the vnode we just found. */
    619 	mutex_enter(&ncp->nc_lock);
    620 	ncp->nc_vp = vp;
    621 	ncp->nc_flags = 0;
    622 	ncp->nc_hittime = 0;
    623 	ncp->nc_gcqueue = NULL;
    624 	if (vp == NULL) {
    625 		/*
    626 		 * For negative hits, save the ISWHITEOUT flag so we can
    627 		 * restore it later when the cache entry is used again.
    628 		 */
    629 		ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
    630 	}
    631 	/* Fill in cache info. */
    632 	ncp->nc_dvp = dvp;
    633 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
    634 	if (vp)
    635 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
    636 	else {
    637 		ncp->nc_vlist.le_prev = NULL;
    638 		ncp->nc_vlist.le_next = NULL;
    639 	}
    640 	ncp->nc_nlen = cnp->cn_namelen;
    641 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    642 	memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
    643 	ncpp = &nchashtbl[NCHASH(cnp, dvp)];
    644 
    645 	/*
    646 	 * Flush updates before making visible in table.  No need for a
    647 	 * memory barrier on the other side: to see modifications the
    648 	 * list must be followed, meaning a dependent pointer load.
    649 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
    650 	 * barrier included in the correct place.
    651 	 */
    652 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
    653 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
    654 	ncp->nc_hash.le_prev = &ncpp->lh_first;
    655 	membar_producer();
    656 	ncpp->lh_first = ncp;
    657 
    658 	ncp->nc_vhash.le_prev = NULL;
    659 	ncp->nc_vhash.le_next = NULL;
    660 
    661 	/*
    662 	 * Create reverse-cache entries (used in getcwd) for directories.
    663 	 * (and in linux procfs exe node)
    664 	 */
    665 	if (vp != NULL &&
    666 	    vp != dvp &&
    667 #ifndef NAMECACHE_ENTER_REVERSE
    668 	    vp->v_type == VDIR &&
    669 #endif
    670 	    (ncp->nc_nlen > 2 ||
    671 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
    672 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
    673 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
    674 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
    675 	}
    676 	mutex_exit(&ncp->nc_lock);
    677 	mutex_exit(namecache_lock);
    678 }
    679 
    680 /*
    681  * Name cache initialization, from vfs_init() when we are booting
    682  */
    683 void
    684 nchinit(void)
    685 {
    686 	int error;
    687 
    688 	namecache_cache = pool_cache_init(sizeof(struct namecache),
    689 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    690 	    cache_dtor, NULL);
    691 	KASSERT(namecache_cache != NULL);
    692 
    693 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    694 
    695 	nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
    696 	ncvhashtbl =
    697 #ifdef NAMECACHE_ENTER_REVERSE
    698 	    hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
    699 #else
    700 	    hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
    701 #endif
    702 
    703 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    704 	    NULL, NULL, "cachegc");
    705 	if (error != 0)
    706 		panic("nchinit %d", error);
    707 
    708 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    709 	   "namecache", "entries scanned");
    710 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    711 	   "namecache", "entries collected");
    712 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    713 	   "namecache", "over scan target");
    714 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    715 	   "namecache", "under scan target");
    716 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    717 	   "namecache", "forced reclaims");
    718 }
    719 
    720 static int
    721 cache_ctor(void *arg, void *obj, int flag)
    722 {
    723 	struct namecache *ncp;
    724 
    725 	ncp = obj;
    726 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    727 
    728 	return 0;
    729 }
    730 
    731 static void
    732 cache_dtor(void *arg, void *obj)
    733 {
    734 	struct namecache *ncp;
    735 
    736 	ncp = obj;
    737 	mutex_destroy(&ncp->nc_lock);
    738 }
    739 
    740 /*
    741  * Called once for each CPU in the system as attached.
    742  */
    743 void
    744 cache_cpu_init(struct cpu_info *ci)
    745 {
    746 	struct nchcpu *cpup;
    747 	size_t sz;
    748 
    749 	sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
    750 	cpup = kmem_zalloc(sz, KM_SLEEP);
    751 	cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
    752 	mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    753 	ci->ci_data.cpu_nch = cpup;
    754 }
    755 
    756 /*
    757  * Name cache reinitialization, for when the maximum number of vnodes increases.
    758  */
    759 void
    760 nchreinit(void)
    761 {
    762 	struct namecache *ncp;
    763 	struct nchashhead *oldhash1, *hash1;
    764 	struct ncvhashhead *oldhash2, *hash2;
    765 	u_long i, oldmask1, oldmask2, mask1, mask2;
    766 
    767 	hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
    768 	hash2 =
    769 #ifdef NAMECACHE_ENTER_REVERSE
    770 	    hashinit(desiredvnodes, HASH_LIST, true, &mask2);
    771 #else
    772 	    hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
    773 #endif
    774 	mutex_enter(namecache_lock);
    775 	cache_lock_cpus();
    776 	oldhash1 = nchashtbl;
    777 	oldmask1 = nchash;
    778 	nchashtbl = hash1;
    779 	nchash = mask1;
    780 	oldhash2 = ncvhashtbl;
    781 	oldmask2 = ncvhash;
    782 	ncvhashtbl = hash2;
    783 	ncvhash = mask2;
    784 	for (i = 0; i <= oldmask1; i++) {
    785 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
    786 			LIST_REMOVE(ncp, nc_hash);
    787 			ncp->nc_hash.le_prev = NULL;
    788 		}
    789 	}
    790 	for (i = 0; i <= oldmask2; i++) {
    791 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
    792 			LIST_REMOVE(ncp, nc_vhash);
    793 			ncp->nc_vhash.le_prev = NULL;
    794 		}
    795 	}
    796 	cache_unlock_cpus();
    797 	mutex_exit(namecache_lock);
    798 	hashdone(oldhash1, HASH_LIST, oldmask1);
    799 	hashdone(oldhash2, HASH_LIST, oldmask2);
    800 }
    801 
    802 /*
    803  * Cache flush, a particular vnode; called when a vnode is renamed to
    804  * hide entries that would now be invalid
    805  */
    806 void
    807 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
    808 {
    809 	struct namecache *ncp, *ncnext;
    810 
    811 	mutex_enter(namecache_lock);
    812 	if (flags & PURGE_PARENTS) {
    813 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
    814 		    ncp = ncnext) {
    815 			ncnext = LIST_NEXT(ncp, nc_vlist);
    816 			mutex_enter(&ncp->nc_lock);
    817 			cache_invalidate(ncp);
    818 			mutex_exit(&ncp->nc_lock);
    819 			cache_disassociate(ncp);
    820 		}
    821 	}
    822 	if (flags & PURGE_CHILDREN) {
    823 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
    824 		    ncp = ncnext) {
    825 			ncnext = LIST_NEXT(ncp, nc_dvlist);
    826 			mutex_enter(&ncp->nc_lock);
    827 			cache_invalidate(ncp);
    828 			mutex_exit(&ncp->nc_lock);
    829 			cache_disassociate(ncp);
    830 		}
    831 	}
    832 	if (cnp != NULL) {
    833 		ncp = cache_lookup_entry(vp, cnp);
    834 		if (ncp) {
    835 			cache_invalidate(ncp);
    836 			mutex_exit(&ncp->nc_lock);
    837 			cache_disassociate(ncp);
    838 		}
    839 	}
    840 	mutex_exit(namecache_lock);
    841 }
    842 
    843 /*
    844  * Cache flush, a whole filesystem; called when filesys is umounted to
    845  * remove entries that would now be invalid.
    846  */
    847 void
    848 cache_purgevfs(struct mount *mp)
    849 {
    850 	struct namecache *ncp, *nxtcp;
    851 
    852 	mutex_enter(namecache_lock);
    853 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    854 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    855 		mutex_enter(&ncp->nc_lock);
    856 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
    857 			/* Free the resources we had. */
    858 			cache_invalidate(ncp);
    859 			cache_disassociate(ncp);
    860 		}
    861 		mutex_exit(&ncp->nc_lock);
    862 	}
    863 	cache_reclaim();
    864 	mutex_exit(namecache_lock);
    865 }
    866 
    867 /*
    868  * Scan global list invalidating entries until we meet a preset target.
    869  * Prefer to invalidate entries that have not scored a hit within
    870  * cache_hottime seconds.  We sort the LRU list only for this routine's
    871  * benefit.
    872  */
    873 static void
    874 cache_prune(int incache, int target)
    875 {
    876 	struct namecache *ncp, *nxtcp, *sentinel;
    877 	int items, recent, tryharder;
    878 
    879 	KASSERT(mutex_owned(namecache_lock));
    880 
    881 	items = 0;
    882 	tryharder = 0;
    883 	recent = hardclock_ticks - hz * cache_hottime;
    884 	sentinel = NULL;
    885 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    886 		if (incache <= target)
    887 			break;
    888 		items++;
    889 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    890 		if (ncp->nc_dvp == NULL)
    891 			continue;
    892 		if (ncp == sentinel) {
    893 			/*
    894 			 * If we looped back on ourself, then ignore
    895 			 * recent entries and purge whatever we find.
    896 			 */
    897 			tryharder = 1;
    898 		}
    899 		if (!tryharder && (ncp->nc_hittime - recent) > 0) {
    900 			if (sentinel == NULL)
    901 				sentinel = ncp;
    902 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    903 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    904 			continue;
    905 		}
    906 		mutex_enter(&ncp->nc_lock);
    907 		if (ncp->nc_dvp != NULL) {
    908 			cache_invalidate(ncp);
    909 			cache_disassociate(ncp);
    910 			incache--;
    911 		}
    912 		mutex_exit(&ncp->nc_lock);
    913 	}
    914 	cache_ev_scan.ev_count += items;
    915 }
    916 
    917 /*
    918  * Collect dead cache entries from all CPUs and garbage collect.
    919  */
    920 static void
    921 cache_reclaim(void)
    922 {
    923 	struct namecache *ncp, *next;
    924 	int items;
    925 
    926 	KASSERT(mutex_owned(namecache_lock));
    927 
    928 	/*
    929 	 * If the number of extant entries not awaiting garbage collection
    930 	 * exceeds the high water mark, then reclaim stale entries until we
    931 	 * reach our low water mark.
    932 	 */
    933 	items = numcache - cache_gcpend;
    934 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
    935 		cache_prune(items, (int)((uint64_t)desiredvnodes *
    936 		    cache_lowat / 100));
    937 		cache_ev_over.ev_count++;
    938 	} else
    939 		cache_ev_under.ev_count++;
    940 
    941 	/*
    942 	 * Stop forward lookup activity on all CPUs and garbage collect dead
    943 	 * entries.
    944 	 */
    945 	cache_lock_cpus();
    946 	ncp = cache_gcqueue;
    947 	cache_gcqueue = NULL;
    948 	items = cache_gcpend;
    949 	cache_gcpend = 0;
    950 	while (ncp != NULL) {
    951 		next = ncp->nc_gcqueue;
    952 		cache_disassociate(ncp);
    953 		KASSERT(ncp->nc_dvp == NULL);
    954 		if (ncp->nc_hash.le_prev != NULL) {
    955 			LIST_REMOVE(ncp, nc_hash);
    956 			ncp->nc_hash.le_prev = NULL;
    957 		}
    958 		pool_cache_put(namecache_cache, ncp);
    959 		ncp = next;
    960 	}
    961 	cache_unlock_cpus();
    962 	numcache -= items;
    963 	cache_ev_gc.ev_count += items;
    964 }
    965 
    966 /*
    967  * Cache maintainence thread, awakening once per second to:
    968  *
    969  * => keep number of entries below the high water mark
    970  * => sort pseudo-LRU list
    971  * => garbage collect dead entries
    972  */
    973 static void
    974 cache_thread(void *arg)
    975 {
    976 
    977 	mutex_enter(namecache_lock);
    978 	for (;;) {
    979 		cache_reclaim();
    980 		kpause("cachegc", false, hz, namecache_lock);
    981 	}
    982 }
    983 
    984 #ifdef DDB
    985 void
    986 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
    987 {
    988 	struct vnode *dvp = NULL;
    989 	struct namecache *ncp;
    990 
    991 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
    992 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
    993 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
    994 			dvp = ncp->nc_dvp;
    995 		}
    996 	}
    997 	if (dvp == NULL) {
    998 		(*pr)("name not found\n");
    999 		return;
   1000 	}
   1001 	vp = dvp;
   1002 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1003 		if (ncp->nc_vp == vp) {
   1004 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1005 		}
   1006 	}
   1007 }
   1008 #endif
   1009