Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.97
      1  1.97     joerg /*	$NetBSD: vfs_cache.c,v 1.97 2014/06/03 21:16:15 joerg Exp $	*/
      2  1.73        ad 
      3  1.73        ad /*-
      4  1.73        ad  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5  1.73        ad  * All rights reserved.
      6  1.73        ad  *
      7  1.73        ad  * Redistribution and use in source and binary forms, with or without
      8  1.73        ad  * modification, are permitted provided that the following conditions
      9  1.73        ad  * are met:
     10  1.73        ad  * 1. Redistributions of source code must retain the above copyright
     11  1.73        ad  *    notice, this list of conditions and the following disclaimer.
     12  1.73        ad  * 2. Redistributions in binary form must reproduce the above copyright
     13  1.73        ad  *    notice, this list of conditions and the following disclaimer in the
     14  1.73        ad  *    documentation and/or other materials provided with the distribution.
     15  1.73        ad  *
     16  1.73        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17  1.73        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18  1.73        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19  1.73        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20  1.73        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21  1.73        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22  1.73        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23  1.73        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24  1.73        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25  1.73        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26  1.73        ad  * POSSIBILITY OF SUCH DAMAGE.
     27  1.73        ad  */
     28   1.6       cgd 
     29   1.1       cgd /*
     30   1.5   mycroft  * Copyright (c) 1989, 1993
     31   1.5   mycroft  *	The Regents of the University of California.  All rights reserved.
     32   1.1       cgd  *
     33   1.1       cgd  * Redistribution and use in source and binary forms, with or without
     34   1.1       cgd  * modification, are permitted provided that the following conditions
     35   1.1       cgd  * are met:
     36   1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     37   1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     38   1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     39   1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     40   1.1       cgd  *    documentation and/or other materials provided with the distribution.
     41  1.51       agc  * 3. Neither the name of the University nor the names of its contributors
     42   1.1       cgd  *    may be used to endorse or promote products derived from this software
     43   1.1       cgd  *    without specific prior written permission.
     44   1.1       cgd  *
     45   1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46   1.1       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47   1.1       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48   1.1       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49   1.1       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50   1.1       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51   1.1       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52   1.1       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53   1.1       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54   1.1       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55   1.1       cgd  * SUCH DAMAGE.
     56   1.1       cgd  *
     57  1.10   mycroft  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     58   1.1       cgd  */
     59  1.32     lukem 
     60  1.32     lukem #include <sys/cdefs.h>
     61  1.97     joerg __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.97 2014/06/03 21:16:15 joerg Exp $");
     62   1.1       cgd 
     63  1.28       chs #include "opt_ddb.h"
     64  1.29      fvdl #include "opt_revcache.h"
     65  1.28       chs 
     66   1.4   mycroft #include <sys/param.h>
     67   1.4   mycroft #include <sys/systm.h>
     68  1.97     joerg #include <sys/sysctl.h>
     69   1.4   mycroft #include <sys/time.h>
     70   1.4   mycroft #include <sys/mount.h>
     71   1.4   mycroft #include <sys/vnode.h>
     72   1.4   mycroft #include <sys/namei.h>
     73   1.4   mycroft #include <sys/errno.h>
     74  1.18   thorpej #include <sys/pool.h>
     75  1.68        ad #include <sys/mutex.h>
     76  1.73        ad #include <sys/atomic.h>
     77  1.73        ad #include <sys/kthread.h>
     78  1.73        ad #include <sys/kernel.h>
     79  1.73        ad #include <sys/cpu.h>
     80  1.73        ad #include <sys/evcnt.h>
     81   1.1       cgd 
     82  1.66  christos #define NAMECACHE_ENTER_REVERSE
     83   1.1       cgd /*
     84   1.1       cgd  * Name caching works as follows:
     85   1.1       cgd  *
     86   1.1       cgd  * Names found by directory scans are retained in a cache
     87   1.1       cgd  * for future reference.  It is managed LRU, so frequently
     88   1.1       cgd  * used names will hang around.  Cache is indexed by hash value
     89  1.20  jdolecek  * obtained from (dvp, name) where dvp refers to the directory
     90   1.1       cgd  * containing name.
     91   1.1       cgd  *
     92   1.1       cgd  * For simplicity (and economy of storage), names longer than
     93   1.1       cgd  * a maximum length of NCHNAMLEN are not cached; they occur
     94   1.1       cgd  * infrequently in any case, and are almost never of interest.
     95   1.1       cgd  *
     96   1.1       cgd  * Upon reaching the last segment of a path, if the reference
     97   1.1       cgd  * is for DELETE, or NOCACHE is set (rewrite), and the
     98   1.1       cgd  * name is located in the cache, it will be dropped.
     99  1.20  jdolecek  * The entry is dropped also when it was not possible to lock
    100  1.20  jdolecek  * the cached vnode, either because vget() failed or the generation
    101  1.20  jdolecek  * number has changed while waiting for the lock.
    102   1.1       cgd  */
    103   1.1       cgd 
    104   1.1       cgd /*
    105  1.77        ad  * Per-cpu namecache data.
    106  1.77        ad  */
    107  1.77        ad struct nchcpu {
    108  1.77        ad 	kmutex_t	cpu_lock;
    109  1.77        ad 	struct nchstats	cpu_stats;
    110  1.77        ad };
    111  1.77        ad 
    112  1.77        ad /*
    113  1.90  dholland  * The type for the hash code. While the hash function generates a
    114  1.90  dholland  * u32, the hash code has historically been passed around as a u_long,
    115  1.90  dholland  * and the value is modified by xor'ing a uintptr_t, so it's not
    116  1.90  dholland  * entirely clear what the best type is. For now I'll leave it
    117  1.90  dholland  * unchanged as u_long.
    118  1.90  dholland  */
    119  1.90  dholland 
    120  1.90  dholland typedef u_long nchash_t;
    121  1.90  dholland 
    122  1.90  dholland /*
    123   1.1       cgd  * Structures associated with name cacheing.
    124   1.1       cgd  */
    125  1.89     rmind 
    126  1.89     rmind static kmutex_t *namecache_lock __read_mostly;
    127  1.89     rmind static pool_cache_t namecache_cache __read_mostly;
    128  1.89     rmind static TAILQ_HEAD(, namecache) nclruhead __cacheline_aligned;
    129  1.89     rmind 
    130  1.89     rmind static LIST_HEAD(nchashhead, namecache) *nchashtbl __read_mostly;
    131  1.89     rmind static u_long	nchash __read_mostly;
    132  1.89     rmind 
    133  1.90  dholland #define	NCHASH2(hash, dvp)	\
    134  1.90  dholland 	(((hash) ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    135  1.19  sommerfe 
    136  1.89     rmind static LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl __read_mostly;
    137  1.89     rmind static u_long	ncvhash __read_mostly;
    138  1.89     rmind 
    139  1.48      yamt #define	NCVHASH(vp)		(((uintptr_t)(vp) >> 3) & ncvhash)
    140  1.19  sommerfe 
    141  1.89     rmind /* Number of cache entries allocated. */
    142  1.89     rmind static long	numcache __cacheline_aligned;
    143  1.73        ad 
    144  1.89     rmind /* Garbage collection queue and number of entries pending in it. */
    145  1.89     rmind static void	*cache_gcqueue;
    146  1.89     rmind static u_int	cache_gcpend;
    147  1.89     rmind 
    148  1.89     rmind /* Cache effectiveness statistics. */
    149  1.89     rmind struct nchstats	nchstats __cacheline_aligned;
    150  1.77        ad #define	COUNT(c,x)	(c.x++)
    151  1.38   thorpej 
    152  1.89     rmind static const int cache_lowat = 95;
    153  1.89     rmind static const int cache_hiwat = 98;
    154  1.89     rmind static const int cache_hottime = 5;	/* number of seconds */
    155  1.89     rmind static int doingcache = 1;		/* 1 => enable the cache */
    156   1.1       cgd 
    157  1.73        ad static struct evcnt cache_ev_scan;
    158  1.73        ad static struct evcnt cache_ev_gc;
    159  1.73        ad static struct evcnt cache_ev_over;
    160  1.73        ad static struct evcnt cache_ev_under;
    161  1.73        ad static struct evcnt cache_ev_forced;
    162  1.73        ad 
    163  1.73        ad static void cache_invalidate(struct namecache *);
    164  1.89     rmind static struct namecache *cache_lookup_entry(
    165  1.91  dholland     const struct vnode *, const char *, size_t);
    166  1.73        ad static void cache_thread(void *);
    167  1.73        ad static void cache_invalidate(struct namecache *);
    168  1.73        ad static void cache_disassociate(struct namecache *);
    169  1.73        ad static void cache_reclaim(void);
    170  1.73        ad static int cache_ctor(void *, void *, int);
    171  1.73        ad static void cache_dtor(void *, void *);
    172  1.46      yamt 
    173  1.73        ad /*
    174  1.90  dholland  * Compute the hash for an entry.
    175  1.90  dholland  *
    176  1.90  dholland  * (This is for now a wrapper around namei_hash, whose interface is
    177  1.90  dholland  * for the time being slightly inconvenient.)
    178  1.90  dholland  */
    179  1.90  dholland static nchash_t
    180  1.91  dholland cache_hash(const char *name, size_t namelen)
    181  1.90  dholland {
    182  1.90  dholland 	const char *endptr;
    183  1.90  dholland 
    184  1.91  dholland 	endptr = name + namelen;
    185  1.91  dholland 	return namei_hash(name, &endptr);
    186  1.90  dholland }
    187  1.90  dholland 
    188  1.90  dholland /*
    189  1.73        ad  * Invalidate a cache entry and enqueue it for garbage collection.
    190  1.73        ad  */
    191  1.46      yamt static void
    192  1.73        ad cache_invalidate(struct namecache *ncp)
    193  1.46      yamt {
    194  1.73        ad 	void *head;
    195  1.46      yamt 
    196  1.73        ad 	KASSERT(mutex_owned(&ncp->nc_lock));
    197  1.46      yamt 
    198  1.73        ad 	if (ncp->nc_dvp != NULL) {
    199  1.73        ad 		ncp->nc_vp = NULL;
    200  1.73        ad 		ncp->nc_dvp = NULL;
    201  1.73        ad 		do {
    202  1.73        ad 			head = cache_gcqueue;
    203  1.73        ad 			ncp->nc_gcqueue = head;
    204  1.73        ad 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    205  1.73        ad 		atomic_inc_uint(&cache_gcpend);
    206  1.73        ad 	}
    207  1.73        ad }
    208  1.46      yamt 
    209  1.73        ad /*
    210  1.73        ad  * Disassociate a namecache entry from any vnodes it is attached to,
    211  1.73        ad  * and remove from the global LRU list.
    212  1.73        ad  */
    213  1.73        ad static void
    214  1.73        ad cache_disassociate(struct namecache *ncp)
    215  1.73        ad {
    216  1.73        ad 
    217  1.73        ad 	KASSERT(mutex_owned(namecache_lock));
    218  1.73        ad 	KASSERT(ncp->nc_dvp == NULL);
    219  1.73        ad 
    220  1.73        ad 	if (ncp->nc_lru.tqe_prev != NULL) {
    221  1.73        ad 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    222  1.73        ad 		ncp->nc_lru.tqe_prev = NULL;
    223  1.46      yamt 	}
    224  1.46      yamt 	if (ncp->nc_vhash.le_prev != NULL) {
    225  1.46      yamt 		LIST_REMOVE(ncp, nc_vhash);
    226  1.46      yamt 		ncp->nc_vhash.le_prev = NULL;
    227  1.46      yamt 	}
    228  1.46      yamt 	if (ncp->nc_vlist.le_prev != NULL) {
    229  1.46      yamt 		LIST_REMOVE(ncp, nc_vlist);
    230  1.46      yamt 		ncp->nc_vlist.le_prev = NULL;
    231  1.46      yamt 	}
    232  1.46      yamt 	if (ncp->nc_dvlist.le_prev != NULL) {
    233  1.46      yamt 		LIST_REMOVE(ncp, nc_dvlist);
    234  1.46      yamt 		ncp->nc_dvlist.le_prev = NULL;
    235  1.46      yamt 	}
    236  1.46      yamt }
    237  1.46      yamt 
    238  1.73        ad /*
    239  1.73        ad  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    240  1.73        ad  * this locks out all "readers".
    241  1.73        ad  */
    242  1.96     joerg #define	UPDATE(f) do { \
    243  1.96     joerg 	nchstats.f += cpup->cpu_stats.f; \
    244  1.96     joerg 	cpup->cpu_stats.f = 0; \
    245  1.96     joerg } while (/* CONSTCOND */ 0)
    246  1.96     joerg 
    247  1.46      yamt static void
    248  1.73        ad cache_lock_cpus(void)
    249  1.46      yamt {
    250  1.73        ad 	CPU_INFO_ITERATOR cii;
    251  1.73        ad 	struct cpu_info *ci;
    252  1.77        ad 	struct nchcpu *cpup;
    253  1.46      yamt 
    254  1.73        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    255  1.77        ad 		cpup = ci->ci_data.cpu_nch;
    256  1.77        ad 		mutex_enter(&cpup->cpu_lock);
    257  1.96     joerg 		UPDATE(ncs_goodhits);
    258  1.96     joerg 		UPDATE(ncs_neghits);
    259  1.96     joerg 		UPDATE(ncs_badhits);
    260  1.96     joerg 		UPDATE(ncs_falsehits);
    261  1.96     joerg 		UPDATE(ncs_miss);
    262  1.96     joerg 		UPDATE(ncs_long);
    263  1.96     joerg 		UPDATE(ncs_pass2);
    264  1.96     joerg 		UPDATE(ncs_2passes);
    265  1.96     joerg 		UPDATE(ncs_revhits);
    266  1.96     joerg 		UPDATE(ncs_revmiss);
    267  1.73        ad 	}
    268  1.46      yamt }
    269  1.46      yamt 
    270  1.96     joerg #undef UPDATE
    271  1.96     joerg 
    272  1.73        ad /*
    273  1.73        ad  * Release all CPU locks.
    274  1.73        ad  */
    275  1.73        ad static void
    276  1.73        ad cache_unlock_cpus(void)
    277  1.73        ad {
    278  1.73        ad 	CPU_INFO_ITERATOR cii;
    279  1.73        ad 	struct cpu_info *ci;
    280  1.77        ad 	struct nchcpu *cpup;
    281  1.73        ad 
    282  1.73        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    283  1.77        ad 		cpup = ci->ci_data.cpu_nch;
    284  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    285  1.73        ad 	}
    286  1.73        ad }
    287  1.73        ad 
    288  1.73        ad /*
    289  1.73        ad  * Find a single cache entry and return it locked.  'namecache_lock' or
    290  1.73        ad  * at least one of the per-CPU locks must be held.
    291  1.73        ad  */
    292  1.73        ad static struct namecache *
    293  1.91  dholland cache_lookup_entry(const struct vnode *dvp, const char *name, size_t namelen)
    294  1.55      yamt {
    295  1.55      yamt 	struct nchashhead *ncpp;
    296  1.55      yamt 	struct namecache *ncp;
    297  1.90  dholland 	nchash_t hash;
    298  1.55      yamt 
    299  1.84      yamt 	KASSERT(dvp != NULL);
    300  1.91  dholland 	hash = cache_hash(name, namelen);
    301  1.90  dholland 	ncpp = &nchashtbl[NCHASH2(hash, dvp)];
    302  1.55      yamt 
    303  1.55      yamt 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    304  1.73        ad 		if (ncp->nc_dvp != dvp ||
    305  1.91  dholland 		    ncp->nc_nlen != namelen ||
    306  1.91  dholland 		    memcmp(ncp->nc_name, name, (u_int)ncp->nc_nlen))
    307  1.73        ad 		    	continue;
    308  1.73        ad 	    	mutex_enter(&ncp->nc_lock);
    309  1.77        ad 		if (__predict_true(ncp->nc_dvp == dvp)) {
    310  1.73        ad 			ncp->nc_hittime = hardclock_ticks;
    311  1.73        ad 			return ncp;
    312  1.73        ad 		}
    313  1.73        ad 		/* Raced: entry has been nullified. */
    314  1.73        ad 		mutex_exit(&ncp->nc_lock);
    315  1.55      yamt 	}
    316  1.55      yamt 
    317  1.73        ad 	return NULL;
    318  1.55      yamt }
    319  1.55      yamt 
    320   1.1       cgd /*
    321   1.1       cgd  * Look for a the name in the cache. We don't do this
    322   1.1       cgd  * if the segment name is long, simply so the cache can avoid
    323   1.1       cgd  * holding long names (which would either waste space, or
    324   1.1       cgd  * add greatly to the complexity).
    325   1.1       cgd  *
    326  1.90  dholland  * Lookup is called with DVP pointing to the directory to search,
    327  1.90  dholland  * and CNP providing the name of the entry being sought: cn_nameptr
    328  1.90  dholland  * is the name, cn_namelen is its length, and cn_flags is the flags
    329  1.90  dholland  * word from the namei operation.
    330  1.90  dholland  *
    331  1.90  dholland  * DVP must be locked.
    332  1.90  dholland  *
    333  1.90  dholland  * There are three possible non-error return states:
    334  1.90  dholland  *    1. Nothing was found in the cache. Nothing is known about
    335  1.90  dholland  *       the requested name.
    336  1.90  dholland  *    2. A negative entry was found in the cache, meaning that the
    337  1.90  dholland  *       requested name definitely does not exist.
    338  1.90  dholland  *    3. A positive entry was found in the cache, meaning that the
    339  1.90  dholland  *       requested name does exist and that we are providing the
    340  1.90  dholland  *       vnode.
    341  1.90  dholland  * In these cases the results are:
    342  1.90  dholland  *    1. 0 returned; VN is set to NULL.
    343  1.90  dholland  *    2. 1 returned; VN is set to NULL.
    344  1.90  dholland  *    3. 1 returned; VN is set to the vnode found.
    345  1.90  dholland  *
    346  1.90  dholland  * The additional result argument ISWHT is set to zero, unless a
    347  1.90  dholland  * negative entry is found that was entered as a whiteout, in which
    348  1.90  dholland  * case ISWHT is set to one.
    349  1.90  dholland  *
    350  1.90  dholland  * The ISWHT_RET argument pointer may be null. In this case an
    351  1.90  dholland  * assertion is made that the whiteout flag is not set. File systems
    352  1.90  dholland  * that do not support whiteouts can/should do this.
    353  1.90  dholland  *
    354  1.90  dholland  * Filesystems that do support whiteouts should add ISWHITEOUT to
    355  1.90  dholland  * cnp->cn_flags if ISWHT comes back nonzero.
    356  1.90  dholland  *
    357  1.90  dholland  * When a vnode is returned, it is locked, as per the vnode lookup
    358  1.90  dholland  * locking protocol.
    359  1.90  dholland  *
    360  1.90  dholland  * There is no way for this function to fail, in the sense of
    361  1.90  dholland  * generating an error that requires aborting the namei operation.
    362  1.90  dholland  *
    363  1.90  dholland  * (Prior to October 2012, this function returned an integer status,
    364  1.90  dholland  * and a vnode, and mucked with the flags word in CNP for whiteouts.
    365  1.90  dholland  * The integer status was -1 for "nothing found", ENOENT for "a
    366  1.90  dholland  * negative entry found", 0 for "a positive entry found", and possibly
    367  1.90  dholland  * other errors, and the value of VN might or might not have been set
    368  1.90  dholland  * depending on what error occurred.)
    369   1.1       cgd  */
    370   1.5   mycroft int
    371  1.91  dholland cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
    372  1.91  dholland 	     uint32_t nameiop, uint32_t cnflags,
    373  1.90  dholland 	     int *iswht_ret, struct vnode **vn_ret)
    374   1.1       cgd {
    375  1.23  augustss 	struct namecache *ncp;
    376  1.20  jdolecek 	struct vnode *vp;
    377  1.77        ad 	struct nchcpu *cpup;
    378  1.36   thorpej 	int error;
    379   1.1       cgd 
    380  1.90  dholland 	/* Establish default result values */
    381  1.90  dholland 	if (iswht_ret != NULL) {
    382  1.90  dholland 		*iswht_ret = 0;
    383  1.90  dholland 	}
    384  1.90  dholland 	*vn_ret = NULL;
    385  1.90  dholland 
    386  1.77        ad 	if (__predict_false(!doingcache)) {
    387  1.90  dholland 		return 0;
    388   1.8       cgd 	}
    389  1.39        pk 
    390  1.77        ad 	cpup = curcpu()->ci_data.cpu_nch;
    391  1.77        ad 	mutex_enter(&cpup->cpu_lock);
    392  1.91  dholland 	if (__predict_false(namelen > NCHNAMLEN)) {
    393  1.77        ad 		COUNT(cpup->cpu_stats, ncs_long);
    394  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    395  1.90  dholland 		/* found nothing */
    396  1.90  dholland 		return 0;
    397   1.1       cgd 	}
    398  1.91  dholland 	ncp = cache_lookup_entry(dvp, name, namelen);
    399  1.77        ad 	if (__predict_false(ncp == NULL)) {
    400  1.77        ad 		COUNT(cpup->cpu_stats, ncs_miss);
    401  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    402  1.90  dholland 		/* found nothing */
    403  1.90  dholland 		return 0;
    404   1.1       cgd 	}
    405  1.91  dholland 	if ((cnflags & MAKEENTRY) == 0) {
    406  1.77        ad 		COUNT(cpup->cpu_stats, ncs_badhits);
    407  1.77        ad 		/*
    408  1.77        ad 		 * Last component and we are renaming or deleting,
    409  1.77        ad 		 * the cache entry is invalid, or otherwise don't
    410  1.77        ad 		 * want cache entry to exist.
    411  1.77        ad 		 */
    412  1.77        ad 		cache_invalidate(ncp);
    413  1.77        ad 		mutex_exit(&ncp->nc_lock);
    414  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    415  1.90  dholland 		/* found nothing */
    416  1.90  dholland 		return 0;
    417  1.90  dholland 	}
    418  1.90  dholland 	if (ncp->nc_vp == NULL) {
    419  1.90  dholland 		if (iswht_ret != NULL) {
    420  1.90  dholland 			/*
    421  1.90  dholland 			 * Restore the ISWHITEOUT flag saved earlier.
    422  1.90  dholland 			 */
    423  1.90  dholland 			KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    424  1.90  dholland 			*iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
    425  1.90  dholland 		} else {
    426  1.90  dholland 			KASSERT(ncp->nc_flags == 0);
    427  1.90  dholland 		}
    428  1.90  dholland 
    429  1.91  dholland 		if (__predict_true(nameiop != CREATE ||
    430  1.91  dholland 		    (cnflags & ISLASTCN) == 0)) {
    431  1.77        ad 			COUNT(cpup->cpu_stats, ncs_neghits);
    432  1.73        ad 			mutex_exit(&ncp->nc_lock);
    433  1.77        ad 			mutex_exit(&cpup->cpu_lock);
    434  1.90  dholland 			/* found neg entry; vn is already null from above */
    435  1.90  dholland 			return 1;
    436  1.20  jdolecek 		} else {
    437  1.77        ad 			COUNT(cpup->cpu_stats, ncs_badhits);
    438  1.77        ad 			/*
    439  1.77        ad 			 * Last component and we are renaming or
    440  1.77        ad 			 * deleting, the cache entry is invalid,
    441  1.77        ad 			 * or otherwise don't want cache entry to
    442  1.77        ad 			 * exist.
    443  1.77        ad 			 */
    444  1.77        ad 			cache_invalidate(ncp);
    445  1.77        ad 			mutex_exit(&ncp->nc_lock);
    446  1.77        ad 			mutex_exit(&cpup->cpu_lock);
    447  1.90  dholland 			/* found nothing */
    448  1.90  dholland 			return 0;
    449  1.20  jdolecek 		}
    450  1.20  jdolecek 	}
    451  1.20  jdolecek 
    452  1.20  jdolecek 	vp = ncp->nc_vp;
    453  1.92   hannken 	mutex_enter(vp->v_interlock);
    454  1.92   hannken 	mutex_exit(&ncp->nc_lock);
    455  1.92   hannken 	mutex_exit(&cpup->cpu_lock);
    456  1.92   hannken 	error = vget(vp, LK_NOWAIT);
    457  1.92   hannken 	if (error) {
    458  1.92   hannken 		KASSERT(error == EBUSY);
    459  1.92   hannken 		/*
    460  1.92   hannken 		 * This vnode is being cleaned out.
    461  1.92   hannken 		 * XXX badhits?
    462  1.92   hannken 		 */
    463  1.92   hannken 		COUNT(cpup->cpu_stats, ncs_falsehits);
    464  1.92   hannken 		/* found nothing */
    465  1.92   hannken 		return 0;
    466  1.77        ad 	}
    467  1.39        pk 
    468  1.52      yamt #ifdef DEBUG
    469  1.52      yamt 	/*
    470  1.73        ad 	 * since we released nb->nb_lock,
    471  1.52      yamt 	 * we can't use this pointer any more.
    472  1.52      yamt 	 */
    473  1.52      yamt 	ncp = NULL;
    474  1.52      yamt #endif /* DEBUG */
    475  1.52      yamt 
    476  1.90  dholland 	/* We don't have the right lock, but this is only for stats. */
    477  1.77        ad 	COUNT(cpup->cpu_stats, ncs_goodhits);
    478  1.90  dholland 
    479  1.90  dholland 	/* found it */
    480  1.90  dholland 	*vn_ret = vp;
    481  1.90  dholland 	return 1;
    482   1.1       cgd }
    483   1.1       cgd 
    484  1.61      yamt int
    485  1.91  dholland cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
    486  1.91  dholland 		 uint32_t cnflags,
    487  1.90  dholland 		 int *iswht_ret, struct vnode **vn_ret)
    488  1.61      yamt {
    489  1.61      yamt 	struct namecache *ncp;
    490  1.61      yamt 	struct vnode *vp;
    491  1.77        ad 	struct nchcpu *cpup;
    492  1.61      yamt 	int error;
    493  1.61      yamt 
    494  1.90  dholland 	/* Establish default results. */
    495  1.90  dholland 	if (iswht_ret != NULL) {
    496  1.90  dholland 		*iswht_ret = 0;
    497  1.90  dholland 	}
    498  1.90  dholland 	*vn_ret = NULL;
    499  1.90  dholland 
    500  1.77        ad 	if (__predict_false(!doingcache)) {
    501  1.90  dholland 		/* found nothing */
    502  1.90  dholland 		return 0;
    503  1.61      yamt 	}
    504  1.61      yamt 
    505  1.77        ad 	cpup = curcpu()->ci_data.cpu_nch;
    506  1.77        ad 	mutex_enter(&cpup->cpu_lock);
    507  1.91  dholland 	if (__predict_false(namelen > NCHNAMLEN)) {
    508  1.77        ad 		COUNT(cpup->cpu_stats, ncs_long);
    509  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    510  1.90  dholland 		/* found nothing */
    511  1.90  dholland 		return 0;
    512  1.61      yamt 	}
    513  1.91  dholland 	ncp = cache_lookup_entry(dvp, name, namelen);
    514  1.77        ad 	if (__predict_false(ncp == NULL)) {
    515  1.77        ad 		COUNT(cpup->cpu_stats, ncs_miss);
    516  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    517  1.90  dholland 		/* found nothing */
    518  1.90  dholland 		return 0;
    519  1.61      yamt 	}
    520  1.61      yamt 	vp = ncp->nc_vp;
    521  1.61      yamt 	if (vp == NULL) {
    522  1.61      yamt 		/*
    523  1.61      yamt 		 * Restore the ISWHITEOUT flag saved earlier.
    524  1.61      yamt 		 */
    525  1.90  dholland 		if (iswht_ret != NULL) {
    526  1.90  dholland 			KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    527  1.90  dholland 			/*cnp->cn_flags |= ncp->nc_flags;*/
    528  1.90  dholland 			*iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
    529  1.90  dholland 		}
    530  1.77        ad 		COUNT(cpup->cpu_stats, ncs_neghits);
    531  1.73        ad 		mutex_exit(&ncp->nc_lock);
    532  1.77        ad 		mutex_exit(&cpup->cpu_lock);
    533  1.90  dholland 		/* found negative entry; vn is already null from above */
    534  1.90  dholland 		return 1;
    535  1.61      yamt 	}
    536  1.92   hannken 	mutex_enter(vp->v_interlock);
    537  1.92   hannken 	mutex_exit(&ncp->nc_lock);
    538  1.92   hannken 	mutex_exit(&cpup->cpu_lock);
    539  1.92   hannken 	error = vget(vp, LK_NOWAIT);
    540  1.92   hannken 	if (error) {
    541  1.92   hannken 		KASSERT(error == EBUSY);
    542  1.92   hannken 		/*
    543  1.92   hannken 		 * This vnode is being cleaned out.
    544  1.92   hannken 		 * XXX badhits?
    545  1.92   hannken 		 */
    546  1.92   hannken 		COUNT(cpup->cpu_stats, ncs_falsehits);
    547  1.92   hannken 		/* found nothing */
    548  1.92   hannken 		return 0;
    549  1.61      yamt 	}
    550  1.61      yamt 
    551  1.80      yamt 	/* Unlocked, but only for stats. */
    552  1.80      yamt 	COUNT(cpup->cpu_stats, ncs_goodhits); /* XXX can be "badhits" */
    553  1.90  dholland 
    554  1.90  dholland 	/* found it */
    555  1.90  dholland 	*vn_ret = vp;
    556  1.90  dholland 	return 1;
    557  1.61      yamt }
    558  1.61      yamt 
    559   1.1       cgd /*
    560  1.19  sommerfe  * Scan cache looking for name of directory entry pointing at vp.
    561  1.19  sommerfe  *
    562  1.86   hannken  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    563  1.19  sommerfe  *
    564  1.19  sommerfe  * If bufp is non-NULL, also place the name in the buffer which starts
    565  1.19  sommerfe  * at bufp, immediately before *bpp, and move bpp backwards to point
    566  1.19  sommerfe  * at the start of it.  (Yes, this is a little baroque, but it's done
    567  1.19  sommerfe  * this way to cater to the whims of getcwd).
    568  1.19  sommerfe  *
    569  1.19  sommerfe  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    570  1.19  sommerfe  */
    571  1.19  sommerfe int
    572  1.34     enami cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    573  1.19  sommerfe {
    574  1.19  sommerfe 	struct namecache *ncp;
    575  1.19  sommerfe 	struct vnode *dvp;
    576  1.95     joerg 	struct nchcpu *cpup;
    577  1.19  sommerfe 	struct ncvhashhead *nvcpp;
    578  1.34     enami 	char *bp;
    579  1.86   hannken 	int error, nlen;
    580  1.34     enami 
    581  1.19  sommerfe 	if (!doingcache)
    582  1.19  sommerfe 		goto out;
    583  1.19  sommerfe 
    584  1.30       chs 	nvcpp = &ncvhashtbl[NCVHASH(vp)];
    585  1.95     joerg 	cpup = curcpu()->ci_data.cpu_nch;
    586  1.19  sommerfe 
    587  1.73        ad 	mutex_enter(namecache_lock);
    588  1.27       chs 	LIST_FOREACH(ncp, nvcpp, nc_vhash) {
    589  1.73        ad 		mutex_enter(&ncp->nc_lock);
    590  1.34     enami 		if (ncp->nc_vp == vp &&
    591  1.34     enami 		    (dvp = ncp->nc_dvp) != NULL &&
    592  1.47      yamt 		    dvp != vp) { 		/* avoid pesky . entries.. */
    593  1.34     enami 
    594  1.19  sommerfe #ifdef DIAGNOSTIC
    595  1.34     enami 			if (ncp->nc_nlen == 1 &&
    596  1.34     enami 			    ncp->nc_name[0] == '.')
    597  1.19  sommerfe 				panic("cache_revlookup: found entry for .");
    598  1.19  sommerfe 
    599  1.34     enami 			if (ncp->nc_nlen == 2 &&
    600  1.34     enami 			    ncp->nc_name[0] == '.' &&
    601  1.34     enami 			    ncp->nc_name[1] == '.')
    602  1.19  sommerfe 				panic("cache_revlookup: found entry for ..");
    603  1.19  sommerfe #endif
    604  1.95     joerg 			mutex_enter(&cpup->cpu_lock);
    605  1.95     joerg 			COUNT(cpup->cpu_stats, ncs_revhits);
    606  1.95     joerg 			mutex_exit(&cpup->cpu_lock);
    607  1.86   hannken 			nlen = ncp->nc_nlen;
    608  1.19  sommerfe 
    609  1.19  sommerfe 			if (bufp) {
    610  1.19  sommerfe 				bp = *bpp;
    611  1.86   hannken 				bp -= nlen;
    612  1.19  sommerfe 				if (bp <= bufp) {
    613  1.34     enami 					*dvpp = NULL;
    614  1.73        ad 					mutex_exit(&ncp->nc_lock);
    615  1.73        ad 					mutex_exit(namecache_lock);
    616  1.34     enami 					return (ERANGE);
    617  1.19  sommerfe 				}
    618  1.86   hannken 				memcpy(bp, ncp->nc_name, nlen);
    619  1.19  sommerfe 				*bpp = bp;
    620  1.19  sommerfe 			}
    621  1.34     enami 
    622  1.92   hannken 			mutex_enter(dvp->v_interlock);
    623  1.92   hannken 			mutex_exit(&ncp->nc_lock);
    624  1.92   hannken 			mutex_exit(namecache_lock);
    625  1.92   hannken 			error = vget(dvp, LK_NOWAIT);
    626  1.92   hannken 			if (error) {
    627  1.92   hannken 				KASSERT(error == EBUSY);
    628  1.92   hannken 				if (bufp)
    629  1.92   hannken 					(*bpp) += nlen;
    630  1.92   hannken 				*dvpp = NULL;
    631  1.92   hannken 				return -1;
    632  1.86   hannken 			}
    633  1.19  sommerfe 			*dvpp = dvp;
    634  1.34     enami 			return (0);
    635  1.19  sommerfe 		}
    636  1.73        ad 		mutex_exit(&ncp->nc_lock);
    637  1.19  sommerfe 	}
    638  1.95     joerg 	mutex_enter(&cpup->cpu_lock);
    639  1.95     joerg 	COUNT(cpup->cpu_stats, ncs_revmiss);
    640  1.95     joerg 	mutex_exit(&cpup->cpu_lock);
    641  1.73        ad 	mutex_exit(namecache_lock);
    642  1.19  sommerfe  out:
    643  1.34     enami 	*dvpp = NULL;
    644  1.34     enami 	return (-1);
    645  1.19  sommerfe }
    646  1.19  sommerfe 
    647  1.19  sommerfe /*
    648   1.1       cgd  * Add an entry to the cache
    649   1.1       cgd  */
    650  1.13  christos void
    651  1.91  dholland cache_enter(struct vnode *dvp, struct vnode *vp,
    652  1.91  dholland 	    const char *name, size_t namelen, uint32_t cnflags)
    653   1.1       cgd {
    654  1.23  augustss 	struct namecache *ncp;
    655  1.59      yamt 	struct namecache *oncp;
    656  1.23  augustss 	struct nchashhead *ncpp;
    657  1.23  augustss 	struct ncvhashhead *nvcpp;
    658  1.90  dholland 	nchash_t hash;
    659   1.1       cgd 
    660  1.89     rmind 	/* First, check whether we can/should add a cache entry. */
    661  1.91  dholland 	if ((cnflags & MAKEENTRY) == 0 ||
    662  1.91  dholland 	    __predict_false(namelen > NCHNAMLEN || !doingcache)) {
    663   1.1       cgd 		return;
    664  1.89     rmind 	}
    665  1.58      yamt 
    666  1.73        ad 	if (numcache > desiredvnodes) {
    667  1.73        ad 		mutex_enter(namecache_lock);
    668  1.73        ad 		cache_ev_forced.ev_count++;
    669  1.73        ad 		cache_reclaim();
    670  1.73        ad 		mutex_exit(namecache_lock);
    671  1.39        pk 	}
    672  1.57        pk 
    673  1.73        ad 	ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    674  1.73        ad 	mutex_enter(namecache_lock);
    675  1.73        ad 	numcache++;
    676  1.73        ad 
    677  1.59      yamt 	/*
    678  1.59      yamt 	 * Concurrent lookups in the same directory may race for a
    679  1.59      yamt 	 * cache entry.  if there's a duplicated entry, free it.
    680  1.59      yamt 	 */
    681  1.91  dholland 	oncp = cache_lookup_entry(dvp, name, namelen);
    682  1.59      yamt 	if (oncp) {
    683  1.73        ad 		cache_invalidate(oncp);
    684  1.73        ad 		mutex_exit(&oncp->nc_lock);
    685  1.59      yamt 	}
    686  1.59      yamt 
    687  1.34     enami 	/* Grab the vnode we just found. */
    688  1.73        ad 	mutex_enter(&ncp->nc_lock);
    689   1.5   mycroft 	ncp->nc_vp = vp;
    690  1.73        ad 	ncp->nc_flags = 0;
    691  1.73        ad 	ncp->nc_hittime = 0;
    692  1.73        ad 	ncp->nc_gcqueue = NULL;
    693  1.47      yamt 	if (vp == NULL) {
    694  1.11   mycroft 		/*
    695  1.11   mycroft 		 * For negative hits, save the ISWHITEOUT flag so we can
    696  1.11   mycroft 		 * restore it later when the cache entry is used again.
    697  1.11   mycroft 		 */
    698  1.91  dholland 		ncp->nc_flags = cnflags & ISWHITEOUT;
    699  1.11   mycroft 	}
    700  1.89     rmind 
    701  1.34     enami 	/* Fill in cache info. */
    702   1.5   mycroft 	ncp->nc_dvp = dvp;
    703  1.46      yamt 	LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
    704  1.46      yamt 	if (vp)
    705  1.46      yamt 		LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
    706  1.73        ad 	else {
    707  1.73        ad 		ncp->nc_vlist.le_prev = NULL;
    708  1.73        ad 		ncp->nc_vlist.le_next = NULL;
    709  1.73        ad 	}
    710  1.91  dholland 	KASSERT(namelen <= NCHNAMLEN);
    711  1.91  dholland 	ncp->nc_nlen = namelen;
    712  1.91  dholland 	memcpy(ncp->nc_name, name, (unsigned)ncp->nc_nlen);
    713  1.73        ad 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    714  1.91  dholland 	hash = cache_hash(name, namelen);
    715  1.90  dholland 	ncpp = &nchashtbl[NCHASH2(hash, dvp)];
    716  1.73        ad 
    717  1.73        ad 	/*
    718  1.73        ad 	 * Flush updates before making visible in table.  No need for a
    719  1.73        ad 	 * memory barrier on the other side: to see modifications the
    720  1.73        ad 	 * list must be followed, meaning a dependent pointer load.
    721  1.74        ad 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
    722  1.74        ad 	 * barrier included in the correct place.
    723  1.73        ad 	 */
    724  1.74        ad 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
    725  1.74        ad 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
    726  1.74        ad 	ncp->nc_hash.le_prev = &ncpp->lh_first;
    727  1.73        ad 	membar_producer();
    728  1.74        ad 	ncpp->lh_first = ncp;
    729  1.19  sommerfe 
    730  1.34     enami 	ncp->nc_vhash.le_prev = NULL;
    731  1.34     enami 	ncp->nc_vhash.le_next = NULL;
    732  1.34     enami 
    733  1.19  sommerfe 	/*
    734  1.19  sommerfe 	 * Create reverse-cache entries (used in getcwd) for directories.
    735  1.66  christos 	 * (and in linux procfs exe node)
    736  1.19  sommerfe 	 */
    737  1.33     enami 	if (vp != NULL &&
    738  1.33     enami 	    vp != dvp &&
    739  1.29      fvdl #ifndef NAMECACHE_ENTER_REVERSE
    740  1.33     enami 	    vp->v_type == VDIR &&
    741  1.29      fvdl #endif
    742  1.33     enami 	    (ncp->nc_nlen > 2 ||
    743  1.33     enami 	    (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
    744  1.33     enami 	    (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
    745  1.30       chs 		nvcpp = &ncvhashtbl[NCVHASH(vp)];
    746  1.19  sommerfe 		LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
    747  1.19  sommerfe 	}
    748  1.73        ad 	mutex_exit(&ncp->nc_lock);
    749  1.73        ad 	mutex_exit(namecache_lock);
    750   1.1       cgd }
    751   1.1       cgd 
    752   1.1       cgd /*
    753   1.1       cgd  * Name cache initialization, from vfs_init() when we are booting
    754   1.1       cgd  */
    755  1.13  christos void
    756  1.34     enami nchinit(void)
    757   1.1       cgd {
    758  1.73        ad 	int error;
    759   1.1       cgd 
    760  1.89     rmind 	TAILQ_INIT(&nclruhead);
    761  1.73        ad 	namecache_cache = pool_cache_init(sizeof(struct namecache),
    762  1.73        ad 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    763  1.73        ad 	    cache_dtor, NULL);
    764  1.71        ad 	KASSERT(namecache_cache != NULL);
    765  1.71        ad 
    766  1.73        ad 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    767  1.73        ad 
    768  1.76        ad 	nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
    769  1.26        ad 	ncvhashtbl =
    770  1.29      fvdl #ifdef NAMECACHE_ENTER_REVERSE
    771  1.76        ad 	    hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
    772  1.29      fvdl #else
    773  1.76        ad 	    hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
    774  1.29      fvdl #endif
    775  1.73        ad 
    776  1.73        ad 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    777  1.73        ad 	    NULL, NULL, "cachegc");
    778  1.73        ad 	if (error != 0)
    779  1.73        ad 		panic("nchinit %d", error);
    780  1.73        ad 
    781  1.73        ad 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    782  1.73        ad 	   "namecache", "entries scanned");
    783  1.73        ad 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    784  1.73        ad 	   "namecache", "entries collected");
    785  1.73        ad 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    786  1.73        ad 	   "namecache", "over scan target");
    787  1.73        ad 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    788  1.73        ad 	   "namecache", "under scan target");
    789  1.73        ad 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    790  1.73        ad 	   "namecache", "forced reclaims");
    791  1.73        ad }
    792  1.73        ad 
    793  1.73        ad static int
    794  1.73        ad cache_ctor(void *arg, void *obj, int flag)
    795  1.73        ad {
    796  1.73        ad 	struct namecache *ncp;
    797  1.73        ad 
    798  1.73        ad 	ncp = obj;
    799  1.73        ad 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    800  1.73        ad 
    801  1.73        ad 	return 0;
    802  1.73        ad }
    803  1.73        ad 
    804  1.73        ad static void
    805  1.73        ad cache_dtor(void *arg, void *obj)
    806  1.73        ad {
    807  1.73        ad 	struct namecache *ncp;
    808  1.73        ad 
    809  1.73        ad 	ncp = obj;
    810  1.73        ad 	mutex_destroy(&ncp->nc_lock);
    811  1.73        ad }
    812  1.73        ad 
    813  1.73        ad /*
    814  1.73        ad  * Called once for each CPU in the system as attached.
    815  1.73        ad  */
    816  1.73        ad void
    817  1.73        ad cache_cpu_init(struct cpu_info *ci)
    818  1.73        ad {
    819  1.77        ad 	struct nchcpu *cpup;
    820  1.77        ad 	size_t sz;
    821  1.73        ad 
    822  1.77        ad 	sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
    823  1.77        ad 	cpup = kmem_zalloc(sz, KM_SLEEP);
    824  1.77        ad 	cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
    825  1.77        ad 	mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
    826  1.77        ad 	ci->ci_data.cpu_nch = cpup;
    827  1.30       chs }
    828  1.30       chs 
    829  1.30       chs /*
    830  1.30       chs  * Name cache reinitialization, for when the maximum number of vnodes increases.
    831  1.30       chs  */
    832  1.30       chs void
    833  1.34     enami nchreinit(void)
    834  1.30       chs {
    835  1.30       chs 	struct namecache *ncp;
    836  1.30       chs 	struct nchashhead *oldhash1, *hash1;
    837  1.30       chs 	struct ncvhashhead *oldhash2, *hash2;
    838  1.36   thorpej 	u_long i, oldmask1, oldmask2, mask1, mask2;
    839  1.30       chs 
    840  1.76        ad 	hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
    841  1.30       chs 	hash2 =
    842  1.30       chs #ifdef NAMECACHE_ENTER_REVERSE
    843  1.76        ad 	    hashinit(desiredvnodes, HASH_LIST, true, &mask2);
    844  1.30       chs #else
    845  1.76        ad 	    hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
    846  1.30       chs #endif
    847  1.73        ad 	mutex_enter(namecache_lock);
    848  1.73        ad 	cache_lock_cpus();
    849  1.30       chs 	oldhash1 = nchashtbl;
    850  1.30       chs 	oldmask1 = nchash;
    851  1.30       chs 	nchashtbl = hash1;
    852  1.30       chs 	nchash = mask1;
    853  1.30       chs 	oldhash2 = ncvhashtbl;
    854  1.30       chs 	oldmask2 = ncvhash;
    855  1.30       chs 	ncvhashtbl = hash2;
    856  1.30       chs 	ncvhash = mask2;
    857  1.30       chs 	for (i = 0; i <= oldmask1; i++) {
    858  1.30       chs 		while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
    859  1.30       chs 			LIST_REMOVE(ncp, nc_hash);
    860  1.30       chs 			ncp->nc_hash.le_prev = NULL;
    861  1.30       chs 		}
    862  1.30       chs 	}
    863  1.30       chs 	for (i = 0; i <= oldmask2; i++) {
    864  1.30       chs 		while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
    865  1.30       chs 			LIST_REMOVE(ncp, nc_vhash);
    866  1.30       chs 			ncp->nc_vhash.le_prev = NULL;
    867  1.30       chs 		}
    868  1.30       chs 	}
    869  1.73        ad 	cache_unlock_cpus();
    870  1.73        ad 	mutex_exit(namecache_lock);
    871  1.76        ad 	hashdone(oldhash1, HASH_LIST, oldmask1);
    872  1.76        ad 	hashdone(oldhash2, HASH_LIST, oldmask2);
    873   1.1       cgd }
    874   1.1       cgd 
    875   1.1       cgd /*
    876   1.1       cgd  * Cache flush, a particular vnode; called when a vnode is renamed to
    877   1.1       cgd  * hide entries that would now be invalid
    878   1.1       cgd  */
    879  1.13  christos void
    880  1.91  dholland cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
    881   1.1       cgd {
    882  1.46      yamt 	struct namecache *ncp, *ncnext;
    883   1.1       cgd 
    884  1.73        ad 	mutex_enter(namecache_lock);
    885  1.55      yamt 	if (flags & PURGE_PARENTS) {
    886  1.55      yamt 		for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
    887  1.55      yamt 		    ncp = ncnext) {
    888  1.55      yamt 			ncnext = LIST_NEXT(ncp, nc_vlist);
    889  1.73        ad 			mutex_enter(&ncp->nc_lock);
    890  1.73        ad 			cache_invalidate(ncp);
    891  1.73        ad 			mutex_exit(&ncp->nc_lock);
    892  1.73        ad 			cache_disassociate(ncp);
    893  1.55      yamt 		}
    894  1.55      yamt 	}
    895  1.55      yamt 	if (flags & PURGE_CHILDREN) {
    896  1.55      yamt 		for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
    897  1.55      yamt 		    ncp = ncnext) {
    898  1.55      yamt 			ncnext = LIST_NEXT(ncp, nc_dvlist);
    899  1.73        ad 			mutex_enter(&ncp->nc_lock);
    900  1.73        ad 			cache_invalidate(ncp);
    901  1.73        ad 			mutex_exit(&ncp->nc_lock);
    902  1.73        ad 			cache_disassociate(ncp);
    903  1.55      yamt 		}
    904  1.46      yamt 	}
    905  1.91  dholland 	if (name != NULL) {
    906  1.91  dholland 		ncp = cache_lookup_entry(vp, name, namelen);
    907  1.55      yamt 		if (ncp) {
    908  1.73        ad 			cache_invalidate(ncp);
    909  1.83      yamt 			mutex_exit(&ncp->nc_lock);
    910  1.73        ad 			cache_disassociate(ncp);
    911  1.55      yamt 		}
    912  1.46      yamt 	}
    913  1.73        ad 	mutex_exit(namecache_lock);
    914   1.1       cgd }
    915   1.1       cgd 
    916   1.1       cgd /*
    917   1.1       cgd  * Cache flush, a whole filesystem; called when filesys is umounted to
    918  1.27       chs  * remove entries that would now be invalid.
    919   1.1       cgd  */
    920  1.13  christos void
    921  1.34     enami cache_purgevfs(struct mount *mp)
    922   1.1       cgd {
    923  1.23  augustss 	struct namecache *ncp, *nxtcp;
    924   1.1       cgd 
    925  1.73        ad 	mutex_enter(namecache_lock);
    926  1.73        ad 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    927  1.73        ad 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    928  1.73        ad 		mutex_enter(&ncp->nc_lock);
    929  1.73        ad 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
    930  1.73        ad 			/* Free the resources we had. */
    931  1.73        ad 			cache_invalidate(ncp);
    932  1.73        ad 			cache_disassociate(ncp);
    933  1.73        ad 		}
    934  1.73        ad 		mutex_exit(&ncp->nc_lock);
    935  1.73        ad 	}
    936  1.73        ad 	cache_reclaim();
    937  1.73        ad 	mutex_exit(namecache_lock);
    938  1.73        ad }
    939  1.73        ad 
    940  1.73        ad /*
    941  1.73        ad  * Scan global list invalidating entries until we meet a preset target.
    942  1.73        ad  * Prefer to invalidate entries that have not scored a hit within
    943  1.73        ad  * cache_hottime seconds.  We sort the LRU list only for this routine's
    944  1.73        ad  * benefit.
    945  1.73        ad  */
    946  1.73        ad static void
    947  1.73        ad cache_prune(int incache, int target)
    948  1.73        ad {
    949  1.73        ad 	struct namecache *ncp, *nxtcp, *sentinel;
    950  1.73        ad 	int items, recent, tryharder;
    951  1.73        ad 
    952  1.73        ad 	KASSERT(mutex_owned(namecache_lock));
    953  1.73        ad 
    954  1.73        ad 	items = 0;
    955  1.73        ad 	tryharder = 0;
    956  1.73        ad 	recent = hardclock_ticks - hz * cache_hottime;
    957  1.73        ad 	sentinel = NULL;
    958  1.27       chs 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
    959  1.73        ad 		if (incache <= target)
    960  1.73        ad 			break;
    961  1.73        ad 		items++;
    962  1.27       chs 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
    963  1.73        ad 		if (ncp == sentinel) {
    964  1.73        ad 			/*
    965  1.73        ad 			 * If we looped back on ourself, then ignore
    966  1.73        ad 			 * recent entries and purge whatever we find.
    967  1.73        ad 			 */
    968  1.73        ad 			tryharder = 1;
    969   1.5   mycroft 		}
    970  1.93   hannken 		if (ncp->nc_dvp == NULL)
    971  1.93   hannken 			continue;
    972  1.81      yamt 		if (!tryharder && (ncp->nc_hittime - recent) > 0) {
    973  1.73        ad 			if (sentinel == NULL)
    974  1.73        ad 				sentinel = ncp;
    975  1.73        ad 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    976  1.73        ad 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    977  1.73        ad 			continue;
    978  1.73        ad 		}
    979  1.73        ad 		mutex_enter(&ncp->nc_lock);
    980  1.73        ad 		if (ncp->nc_dvp != NULL) {
    981  1.73        ad 			cache_invalidate(ncp);
    982  1.73        ad 			cache_disassociate(ncp);
    983  1.73        ad 			incache--;
    984  1.73        ad 		}
    985  1.73        ad 		mutex_exit(&ncp->nc_lock);
    986  1.73        ad 	}
    987  1.73        ad 	cache_ev_scan.ev_count += items;
    988  1.73        ad }
    989  1.73        ad 
    990  1.73        ad /*
    991  1.73        ad  * Collect dead cache entries from all CPUs and garbage collect.
    992  1.73        ad  */
    993  1.73        ad static void
    994  1.73        ad cache_reclaim(void)
    995  1.73        ad {
    996  1.73        ad 	struct namecache *ncp, *next;
    997  1.73        ad 	int items;
    998  1.73        ad 
    999  1.73        ad 	KASSERT(mutex_owned(namecache_lock));
   1000  1.73        ad 
   1001  1.73        ad 	/*
   1002  1.73        ad 	 * If the number of extant entries not awaiting garbage collection
   1003  1.73        ad 	 * exceeds the high water mark, then reclaim stale entries until we
   1004  1.73        ad 	 * reach our low water mark.
   1005  1.73        ad 	 */
   1006  1.73        ad 	items = numcache - cache_gcpend;
   1007  1.73        ad 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
   1008  1.73        ad 		cache_prune(items, (int)((uint64_t)desiredvnodes *
   1009  1.73        ad 		    cache_lowat / 100));
   1010  1.73        ad 		cache_ev_over.ev_count++;
   1011  1.73        ad 	} else
   1012  1.73        ad 		cache_ev_under.ev_count++;
   1013  1.73        ad 
   1014  1.73        ad 	/*
   1015  1.73        ad 	 * Stop forward lookup activity on all CPUs and garbage collect dead
   1016  1.73        ad 	 * entries.
   1017  1.73        ad 	 */
   1018  1.73        ad 	cache_lock_cpus();
   1019  1.73        ad 	ncp = cache_gcqueue;
   1020  1.73        ad 	cache_gcqueue = NULL;
   1021  1.73        ad 	items = cache_gcpend;
   1022  1.73        ad 	cache_gcpend = 0;
   1023  1.73        ad 	while (ncp != NULL) {
   1024  1.73        ad 		next = ncp->nc_gcqueue;
   1025  1.73        ad 		cache_disassociate(ncp);
   1026  1.73        ad 		KASSERT(ncp->nc_dvp == NULL);
   1027  1.73        ad 		if (ncp->nc_hash.le_prev != NULL) {
   1028  1.73        ad 			LIST_REMOVE(ncp, nc_hash);
   1029  1.73        ad 			ncp->nc_hash.le_prev = NULL;
   1030  1.73        ad 		}
   1031  1.73        ad 		pool_cache_put(namecache_cache, ncp);
   1032  1.73        ad 		ncp = next;
   1033  1.73        ad 	}
   1034  1.73        ad 	cache_unlock_cpus();
   1035  1.73        ad 	numcache -= items;
   1036  1.73        ad 	cache_ev_gc.ev_count += items;
   1037  1.73        ad }
   1038  1.73        ad 
   1039  1.73        ad /*
   1040  1.73        ad  * Cache maintainence thread, awakening once per second to:
   1041  1.73        ad  *
   1042  1.73        ad  * => keep number of entries below the high water mark
   1043  1.73        ad  * => sort pseudo-LRU list
   1044  1.73        ad  * => garbage collect dead entries
   1045  1.73        ad  */
   1046  1.73        ad static void
   1047  1.73        ad cache_thread(void *arg)
   1048  1.73        ad {
   1049  1.73        ad 
   1050  1.73        ad 	mutex_enter(namecache_lock);
   1051  1.73        ad 	for (;;) {
   1052  1.73        ad 		cache_reclaim();
   1053  1.73        ad 		kpause("cachegc", false, hz, namecache_lock);
   1054   1.1       cgd 	}
   1055   1.1       cgd }
   1056  1.19  sommerfe 
   1057  1.28       chs #ifdef DDB
   1058  1.28       chs void
   1059  1.28       chs namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
   1060  1.28       chs {
   1061  1.28       chs 	struct vnode *dvp = NULL;
   1062  1.28       chs 	struct namecache *ncp;
   1063  1.28       chs 
   1064  1.28       chs 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1065  1.73        ad 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
   1066  1.28       chs 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1067  1.28       chs 			dvp = ncp->nc_dvp;
   1068  1.28       chs 		}
   1069  1.28       chs 	}
   1070  1.28       chs 	if (dvp == NULL) {
   1071  1.28       chs 		(*pr)("name not found\n");
   1072  1.28       chs 		return;
   1073  1.28       chs 	}
   1074  1.28       chs 	vp = dvp;
   1075  1.28       chs 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1076  1.47      yamt 		if (ncp->nc_vp == vp) {
   1077  1.28       chs 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1078  1.28       chs 		}
   1079  1.28       chs 	}
   1080  1.28       chs }
   1081  1.28       chs #endif
   1082  1.95     joerg 
   1083  1.95     joerg void
   1084  1.95     joerg namecache_count_pass2(void)
   1085  1.95     joerg {
   1086  1.95     joerg 	struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
   1087  1.95     joerg 
   1088  1.95     joerg 	mutex_enter(&cpup->cpu_lock);
   1089  1.95     joerg 	COUNT(cpup->cpu_stats, ncs_pass2);
   1090  1.95     joerg 	mutex_exit(&cpup->cpu_lock);
   1091  1.95     joerg }
   1092  1.95     joerg 
   1093  1.95     joerg void
   1094  1.95     joerg namecache_count_2passes(void)
   1095  1.95     joerg {
   1096  1.95     joerg 	struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
   1097  1.95     joerg 
   1098  1.95     joerg 	mutex_enter(&cpup->cpu_lock);
   1099  1.95     joerg 	COUNT(cpup->cpu_stats, ncs_2passes);
   1100  1.95     joerg 	mutex_exit(&cpup->cpu_lock);
   1101  1.95     joerg }
   1102  1.97     joerg 
   1103  1.97     joerg static int
   1104  1.97     joerg cache_stat_sysctl(SYSCTLFN_ARGS)
   1105  1.97     joerg {
   1106  1.97     joerg 	struct nchstats_sysctl stats;
   1107  1.97     joerg 
   1108  1.97     joerg 	if (oldp == NULL) {
   1109  1.97     joerg 		*oldlenp = sizeof(stats);
   1110  1.97     joerg 		return 0;
   1111  1.97     joerg 	}
   1112  1.97     joerg 
   1113  1.97     joerg 	if (*oldlenp < sizeof(stats)) {
   1114  1.97     joerg 		*oldlenp = 0;
   1115  1.97     joerg 		return 0;
   1116  1.97     joerg 	}
   1117  1.97     joerg 
   1118  1.97     joerg 	memset(&stats, 0, sizeof(stats));
   1119  1.97     joerg 
   1120  1.97     joerg 	sysctl_unlock();
   1121  1.97     joerg 	cache_lock_cpus();
   1122  1.97     joerg 	stats.ncs_goodhits = nchstats.ncs_goodhits;
   1123  1.97     joerg 	stats.ncs_neghits = nchstats.ncs_neghits;
   1124  1.97     joerg 	stats.ncs_badhits = nchstats.ncs_badhits;
   1125  1.97     joerg 	stats.ncs_falsehits = nchstats.ncs_falsehits;
   1126  1.97     joerg 	stats.ncs_miss = nchstats.ncs_miss;
   1127  1.97     joerg 	stats.ncs_long = nchstats.ncs_long;
   1128  1.97     joerg 	stats.ncs_pass2 = nchstats.ncs_pass2;
   1129  1.97     joerg 	stats.ncs_2passes = nchstats.ncs_2passes;
   1130  1.97     joerg 	stats.ncs_revhits = nchstats.ncs_revhits;
   1131  1.97     joerg 	stats.ncs_revmiss = nchstats.ncs_revmiss;
   1132  1.97     joerg 	cache_unlock_cpus();
   1133  1.97     joerg 	sysctl_relock();
   1134  1.97     joerg 
   1135  1.97     joerg 	*oldlenp = sizeof(stats);
   1136  1.97     joerg 	return sysctl_copyout(l, &stats, oldp, sizeof(stats));
   1137  1.97     joerg }
   1138  1.97     joerg 
   1139  1.97     joerg SYSCTL_SETUP(sysctl_cache_stat_setup, "vfs.namecache_stats subtree setup")
   1140  1.97     joerg {
   1141  1.97     joerg 	sysctl_createv(clog, 0, NULL, NULL,
   1142  1.97     joerg 		       CTLFLAG_PERMANENT,
   1143  1.97     joerg 		       CTLTYPE_STRUCT, "namecache_stats",
   1144  1.97     joerg 		       SYSCTL_DESCR("namecache statistics"),
   1145  1.97     joerg 		       cache_stat_sysctl, 0, NULL, 0,
   1146  1.97     joerg 		       CTL_VFS, CTL_CREATE, CTL_EOL);
   1147  1.97     joerg }
   1148