Home | History | Annotate | Line # | Download | only in kern
vfs_cache.c revision 1.127
      1  1.127        ad /*	$NetBSD: vfs_cache.c,v 1.127 2020/01/08 12:04:56 ad Exp $	*/
      2   1.73        ad 
      3   1.73        ad /*-
      4  1.125        ad  * Copyright (c) 2008 The NetBSD Foundation, Inc.
      5   1.73        ad  * All rights reserved.
      6   1.73        ad  *
      7   1.73        ad  * Redistribution and use in source and binary forms, with or without
      8   1.73        ad  * modification, are permitted provided that the following conditions
      9   1.73        ad  * are met:
     10   1.73        ad  * 1. Redistributions of source code must retain the above copyright
     11   1.73        ad  *    notice, this list of conditions and the following disclaimer.
     12   1.73        ad  * 2. Redistributions in binary form must reproduce the above copyright
     13   1.73        ad  *    notice, this list of conditions and the following disclaimer in the
     14   1.73        ad  *    documentation and/or other materials provided with the distribution.
     15   1.73        ad  *
     16   1.73        ad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     17   1.73        ad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     18   1.73        ad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     19   1.73        ad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     20   1.73        ad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     21   1.73        ad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     22   1.73        ad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     23   1.73        ad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     24   1.73        ad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     25   1.73        ad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     26   1.73        ad  * POSSIBILITY OF SUCH DAMAGE.
     27   1.73        ad  */
     28    1.6       cgd 
     29    1.1       cgd /*
     30    1.5   mycroft  * Copyright (c) 1989, 1993
     31    1.5   mycroft  *	The Regents of the University of California.  All rights reserved.
     32    1.1       cgd  *
     33    1.1       cgd  * Redistribution and use in source and binary forms, with or without
     34    1.1       cgd  * modification, are permitted provided that the following conditions
     35    1.1       cgd  * are met:
     36    1.1       cgd  * 1. Redistributions of source code must retain the above copyright
     37    1.1       cgd  *    notice, this list of conditions and the following disclaimer.
     38    1.1       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     39    1.1       cgd  *    notice, this list of conditions and the following disclaimer in the
     40    1.1       cgd  *    documentation and/or other materials provided with the distribution.
     41   1.51       agc  * 3. Neither the name of the University nor the names of its contributors
     42    1.1       cgd  *    may be used to endorse or promote products derived from this software
     43    1.1       cgd  *    without specific prior written permission.
     44    1.1       cgd  *
     45    1.1       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     46    1.1       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     47    1.1       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     48    1.1       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     49    1.1       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     50    1.1       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     51    1.1       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     52    1.1       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     53    1.1       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     54    1.1       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     55    1.1       cgd  * SUCH DAMAGE.
     56    1.1       cgd  *
     57   1.10   mycroft  *	@(#)vfs_cache.c	8.3 (Berkeley) 8/22/94
     58    1.1       cgd  */
     59   1.32     lukem 
     60   1.32     lukem #include <sys/cdefs.h>
     61  1.127        ad __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.127 2020/01/08 12:04:56 ad Exp $");
     62    1.1       cgd 
     63  1.121  christos #define __NAMECACHE_PRIVATE
     64  1.107     pooka #ifdef _KERNEL_OPT
     65   1.28       chs #include "opt_ddb.h"
     66  1.115  riastrad #include "opt_dtrace.h"
     67  1.107     pooka #endif
     68   1.28       chs 
     69    1.4   mycroft #include <sys/param.h>
     70  1.115  riastrad #include <sys/atomic.h>
     71  1.115  riastrad #include <sys/cpu.h>
     72  1.115  riastrad #include <sys/errno.h>
     73  1.115  riastrad #include <sys/evcnt.h>
     74  1.115  riastrad #include <sys/kernel.h>
     75  1.115  riastrad #include <sys/kthread.h>
     76    1.4   mycroft #include <sys/mount.h>
     77  1.115  riastrad #include <sys/mutex.h>
     78    1.4   mycroft #include <sys/namei.h>
     79   1.18   thorpej #include <sys/pool.h>
     80  1.108  christos #include <sys/sdt.h>
     81  1.115  riastrad #include <sys/sysctl.h>
     82  1.115  riastrad #include <sys/systm.h>
     83  1.115  riastrad #include <sys/time.h>
     84  1.115  riastrad #include <sys/vnode_impl.h>
     85    1.1       cgd 
     86    1.1       cgd /*
     87    1.1       cgd  * Name caching works as follows:
     88    1.1       cgd  *
     89    1.1       cgd  * Names found by directory scans are retained in a cache
     90    1.1       cgd  * for future reference.  It is managed LRU, so frequently
     91    1.1       cgd  * used names will hang around.  Cache is indexed by hash value
     92   1.20  jdolecek  * obtained from (dvp, name) where dvp refers to the directory
     93    1.1       cgd  * containing name.
     94    1.1       cgd  *
     95    1.1       cgd  * Upon reaching the last segment of a path, if the reference
     96    1.1       cgd  * is for DELETE, or NOCACHE is set (rewrite), and the
     97    1.1       cgd  * name is located in the cache, it will be dropped.
     98    1.1       cgd  */
     99    1.1       cgd 
    100    1.1       cgd /*
    101  1.120  riastrad  * Cache entry lifetime:
    102  1.120  riastrad  *
    103  1.120  riastrad  *	nonexistent
    104  1.120  riastrad  *	---create---> active
    105  1.120  riastrad  *	---invalidate---> queued
    106  1.120  riastrad  *	---reclaim---> nonexistent.
    107  1.120  riastrad  *
    108  1.120  riastrad  * States:
    109  1.120  riastrad  * - Nonexistent.  Cache entry does not exist.
    110  1.120  riastrad  *
    111  1.120  riastrad  * - Active.  cache_lookup, cache_lookup_raw, cache_revlookup can look
    112  1.120  riastrad  *   up, acquire references, and hand off references to vnodes,
    113  1.120  riastrad  *   e.g. via v_interlock.  Marked by nonnull ncp->nc_dvp.
    114  1.120  riastrad  *
    115  1.120  riastrad  * - Queued.  Pending desstruction by cache_reclaim.  Cannot be used by
    116  1.120  riastrad  *   cache_lookup, cache_lookup_raw, or cache_revlookup.  May still be
    117  1.120  riastrad  *   on lists.  Marked by null ncp->nc_dvp.
    118  1.120  riastrad  *
    119  1.120  riastrad  * Transitions:
    120  1.120  riastrad  *
    121  1.120  riastrad  * - Create: nonexistent--->active
    122  1.120  riastrad  *
    123  1.120  riastrad  *   Done by cache_enter(dvp, vp, name, namelen, cnflags), called by
    124  1.120  riastrad  *   VOP_LOOKUP after the answer is found.  Allocates a struct
    125  1.120  riastrad  *   namecache object, initializes it with the above fields, and
    126  1.120  riastrad  *   activates it by inserting it into the forward and reverse tables.
    127  1.120  riastrad  *
    128  1.120  riastrad  * - Invalidate: active--->queued
    129  1.120  riastrad  *
    130  1.120  riastrad  *   Done by cache_invalidate.  If not already invalidated, nullify
    131  1.127        ad  *   ncp->nc_dvp and and add to cache_gcqueue.  Called,
    132  1.120  riastrad  *   among various other places, in cache_lookup(dvp, name, namelen,
    133  1.120  riastrad  *   nameiop, cnflags, &iswht, &vp) when MAKEENTRY is missing from
    134  1.120  riastrad  *   cnflags.
    135  1.120  riastrad  *
    136  1.120  riastrad  * - Reclaim: queued--->nonexistent
    137  1.120  riastrad  *
    138  1.120  riastrad  *   Done by cache_reclaim.  Disassociate ncp from any lists it is on
    139  1.120  riastrad  *   and free memory.
    140  1.120  riastrad  */
    141  1.120  riastrad 
    142  1.120  riastrad /*
    143  1.117  riastrad  * Locking.
    144  1.102    dennis  *
    145  1.117  riastrad  * L namecache_lock		Global lock for namecache table and queues.
    146  1.117  riastrad  * C struct nchcpu::cpu_lock	Per-CPU lock to reduce read contention.
    147  1.125        ad  * N struct namecache::nc_lock	Per-entry lock.
    148  1.125        ad  * V struct vnode::v_interlock	Vnode interlock.
    149  1.117  riastrad  *
    150  1.125        ad  * Lock order: L -> C -> N -> V
    151  1.118  riastrad  *
    152  1.118  riastrad  *	Examples:
    153  1.118  riastrad  *	. L->C: cache_reclaim
    154  1.125        ad  *	. C->N->V: cache_lookup
    155  1.125        ad  *	. L->N->V: cache_purge1, cache_revlookup
    156  1.117  riastrad  *
    157  1.117  riastrad  * All use serialized by namecache_lock:
    158  1.117  riastrad  *
    159  1.117  riastrad  *	nclruhead / struct namecache::nc_lru
    160  1.117  riastrad  *	struct vnode_impl::vi_dnclist / struct namecache::nc_dvlist
    161  1.117  riastrad  *	struct vnode_impl::vi_nclist / struct namecache::nc_vlist
    162  1.117  riastrad  *	nchstats
    163  1.117  riastrad  *
    164  1.117  riastrad  * - Insertion serialized by namecache_lock,
    165  1.117  riastrad  * - read protected by per-CPU lock,
    166  1.117  riastrad  * - insert/read ordering guaranteed by memory barriers, and
    167  1.125        ad  * - deletion allowed only under namecache_lock and *all* per-CPU locks
    168  1.125        ad  *   in CPU_INFO_FOREACH order:
    169  1.117  riastrad  *
    170  1.117  riastrad  *	nchashtbl / struct namecache::nc_hash
    171  1.117  riastrad  *
    172  1.117  riastrad  *   The per-CPU locks exist only to reduce the probability of
    173  1.117  riastrad  *   contention between readers.  We do not bind to a CPU, so
    174  1.117  riastrad  *   contention is still possible.
    175  1.117  riastrad  *
    176  1.117  riastrad  * All use serialized by struct namecache::nc_lock:
    177  1.117  riastrad  *
    178  1.117  riastrad  *	struct namecache::nc_dvp
    179  1.117  riastrad  *	struct namecache::nc_vp
    180  1.125        ad  *	struct namecache::nc_gcqueue (*)
    181  1.125        ad  *	struct namecache::nc_hittime (**)
    182  1.117  riastrad  *
    183  1.125        ad  * (*) Once on the queue, only cache_thread uses this nc_gcqueue, unlocked.
    184  1.125        ad  * (**) cache_prune reads nc_hittime unlocked, since approximate is OK.
    185  1.117  riastrad  *
    186  1.117  riastrad  * Unlocked because stable after initialization:
    187  1.117  riastrad  *
    188  1.117  riastrad  *	struct namecache::nc_dvp
    189  1.117  riastrad  *	struct namecache::nc_vp
    190  1.117  riastrad  *	struct namecache::nc_flags
    191  1.117  riastrad  *	struct namecache::nc_nlen
    192  1.117  riastrad  *	struct namecache::nc_name
    193  1.117  riastrad  *
    194  1.117  riastrad  * Unlocked because approximation is OK:
    195  1.117  riastrad  *
    196  1.117  riastrad  *	struct nchcpu::cpu_stats
    197  1.117  riastrad  *	struct nchcpu::cpu_stats_last
    198  1.117  riastrad  *
    199  1.117  riastrad  * Updates under namecache_lock or any per-CPU lock are marked with
    200  1.117  riastrad  * COUNT, while updates outside those locks are marked with COUNT_UNL.
    201  1.117  riastrad  *
    202  1.117  riastrad  * - The theory seems to have been that you could replace COUNT_UNL by
    203  1.117  riastrad  *   atomic operations -- except that doesn't help unless you also
    204  1.117  riastrad  *   replace COUNT by atomic operations, because mixing atomics and
    205  1.117  riastrad  *   nonatomics is a recipe for failure.
    206  1.117  riastrad  * - We use 32-bit per-CPU counters and 64-bit global counters under
    207  1.117  riastrad  *   the theory that 32-bit counters are less likely to be hosed by
    208  1.117  riastrad  *   nonatomic increment.
    209  1.117  riastrad  */
    210  1.117  riastrad 
    211  1.117  riastrad /*
    212  1.117  riastrad  * The comment below is preserved for posterity in case it is
    213  1.117  riastrad  * important, but it is clear that everywhere the namecache_count_*()
    214  1.117  riastrad  * functions are called, other cache_*() functions that take the same
    215  1.117  riastrad  * locks are also called, so I can't imagine how this could be a
    216  1.117  riastrad  * problem:
    217  1.103    dennis  *
    218  1.103    dennis  * N.B.: Attempting to protect COUNT_UNL() increments by taking
    219  1.103    dennis  * a per-cpu lock in the namecache_count_*() functions causes
    220  1.103    dennis  * a deadlock.  Don't do that, use atomic increments instead if
    221  1.103    dennis  * the imperfections here bug you.
    222  1.117  riastrad  */
    223  1.117  riastrad 
    224  1.117  riastrad /*
    225  1.117  riastrad  * struct nchstats_percpu:
    226  1.103    dennis  *
    227  1.117  riastrad  *	Per-CPU counters.
    228   1.77        ad  */
    229  1.103    dennis struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
    230  1.103    dennis 
    231  1.117  riastrad /*
    232  1.117  riastrad  * struct nchcpu:
    233  1.117  riastrad  *
    234  1.117  riastrad  *	Per-CPU namecache state: lock and per-CPU counters.
    235  1.117  riastrad  */
    236   1.77        ad struct nchcpu {
    237  1.103    dennis 	kmutex_t		cpu_lock;
    238  1.103    dennis 	struct nchstats_percpu	cpu_stats;
    239  1.103    dennis 	/* XXX maybe __cacheline_aligned would improve this? */
    240  1.103    dennis 	struct nchstats_percpu	cpu_stats_last;	/* from last sample */
    241   1.77        ad };
    242   1.77        ad 
    243   1.77        ad /*
    244   1.90  dholland  * The type for the hash code. While the hash function generates a
    245   1.90  dholland  * u32, the hash code has historically been passed around as a u_long,
    246   1.90  dholland  * and the value is modified by xor'ing a uintptr_t, so it's not
    247   1.90  dholland  * entirely clear what the best type is. For now I'll leave it
    248   1.90  dholland  * unchanged as u_long.
    249   1.90  dholland  */
    250   1.90  dholland 
    251   1.90  dholland typedef u_long nchash_t;
    252   1.90  dholland 
    253   1.90  dholland /*
    254    1.1       cgd  * Structures associated with name cacheing.
    255    1.1       cgd  */
    256   1.89     rmind 
    257  1.125        ad static kmutex_t *namecache_lock __read_mostly;
    258   1.89     rmind static pool_cache_t namecache_cache __read_mostly;
    259   1.89     rmind static TAILQ_HEAD(, namecache) nclruhead __cacheline_aligned;
    260   1.89     rmind 
    261   1.89     rmind static LIST_HEAD(nchashhead, namecache) *nchashtbl __read_mostly;
    262   1.89     rmind static u_long	nchash __read_mostly;
    263   1.89     rmind 
    264   1.90  dholland #define	NCHASH2(hash, dvp)	\
    265   1.90  dholland 	(((hash) ^ ((uintptr_t)(dvp) >> 3)) & nchash)
    266   1.19  sommerfe 
    267   1.89     rmind /* Number of cache entries allocated. */
    268   1.89     rmind static long	numcache __cacheline_aligned;
    269   1.73        ad 
    270   1.89     rmind /* Garbage collection queue and number of entries pending in it. */
    271  1.125        ad static void	*cache_gcqueue;
    272  1.125        ad static u_int	cache_gcpend;
    273   1.89     rmind 
    274  1.103    dennis /* Cache effectiveness statistics.  This holds total from per-cpu stats */
    275   1.89     rmind struct nchstats	nchstats __cacheline_aligned;
    276  1.103    dennis 
    277  1.103    dennis /*
    278  1.103    dennis  * Macros to count an event, update the central stats with per-cpu
    279  1.103    dennis  * values and add current per-cpu increments to the subsystem total
    280  1.103    dennis  * last collected by cache_reclaim().
    281  1.103    dennis  */
    282  1.125        ad #define	CACHE_STATS_CURRENT	/* nothing */
    283  1.125        ad 
    284  1.103    dennis #define	COUNT(cpup, f)	((cpup)->cpu_stats.f++)
    285  1.103    dennis 
    286  1.103    dennis #define	UPDATE(cpup, f) do { \
    287  1.103    dennis 	struct nchcpu *Xcpup = (cpup); \
    288  1.103    dennis 	uint32_t Xcnt = (volatile uint32_t) Xcpup->cpu_stats.f; \
    289  1.103    dennis 	nchstats.f += Xcnt - Xcpup->cpu_stats_last.f; \
    290  1.103    dennis 	Xcpup->cpu_stats_last.f = Xcnt; \
    291  1.103    dennis } while (/* CONSTCOND */ 0)
    292  1.103    dennis 
    293  1.125        ad #define	ADD(stats, cpup, f) do { \
    294  1.125        ad 	struct nchcpu *Xcpup = (cpup); \
    295  1.125        ad 	stats.f += Xcpup->cpu_stats.f - Xcpup->cpu_stats_last.f; \
    296  1.125        ad } while (/* CONSTCOND */ 0)
    297  1.125        ad 
    298  1.103    dennis /* Do unlocked stats the same way. Use a different name to allow mind changes */
    299  1.103    dennis #define	COUNT_UNL(cpup, f)	COUNT((cpup), f)
    300   1.38   thorpej 
    301  1.125        ad static const int cache_lowat = 95;
    302   1.89     rmind static const int cache_hiwat = 98;
    303   1.89     rmind static const int cache_hottime = 5;	/* number of seconds */
    304   1.89     rmind static int doingcache = 1;		/* 1 => enable the cache */
    305    1.1       cgd 
    306   1.73        ad static struct evcnt cache_ev_scan;
    307   1.73        ad static struct evcnt cache_ev_gc;
    308   1.73        ad static struct evcnt cache_ev_over;
    309   1.73        ad static struct evcnt cache_ev_under;
    310   1.73        ad static struct evcnt cache_ev_forced;
    311   1.73        ad 
    312   1.89     rmind static struct namecache *cache_lookup_entry(
    313   1.91  dholland     const struct vnode *, const char *, size_t);
    314   1.73        ad static void cache_thread(void *);
    315   1.73        ad static void cache_invalidate(struct namecache *);
    316   1.73        ad static void cache_disassociate(struct namecache *);
    317   1.73        ad static void cache_reclaim(void);
    318   1.73        ad static int cache_ctor(void *, void *, int);
    319   1.73        ad static void cache_dtor(void *, void *);
    320   1.46      yamt 
    321  1.104     pooka static struct sysctllog *sysctllog;
    322  1.104     pooka static void sysctl_cache_stat_setup(void);
    323  1.104     pooka 
    324  1.108  christos SDT_PROVIDER_DEFINE(vfs);
    325  1.108  christos 
    326  1.108  christos SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
    327  1.108  christos SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
    328  1.108  christos SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
    329  1.108  christos SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
    330  1.108  christos SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
    331  1.108  christos SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
    332  1.108  christos     "char *", "size_t");
    333  1.108  christos SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
    334  1.108  christos     "char *", "size_t");
    335  1.108  christos SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
    336  1.108  christos     "char *", "size_t");
    337  1.108  christos SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
    338  1.108  christos      "struct vnode *");
    339  1.108  christos SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
    340  1.108  christos      "int");
    341  1.108  christos SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
    342  1.108  christos SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
    343  1.108  christos     "char *", "size_t");
    344  1.108  christos SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
    345  1.108  christos     "char *", "size_t");
    346  1.108  christos 
    347   1.73        ad /*
    348   1.90  dholland  * Compute the hash for an entry.
    349   1.90  dholland  *
    350   1.90  dholland  * (This is for now a wrapper around namei_hash, whose interface is
    351   1.90  dholland  * for the time being slightly inconvenient.)
    352   1.90  dholland  */
    353   1.90  dholland static nchash_t
    354   1.91  dholland cache_hash(const char *name, size_t namelen)
    355   1.90  dholland {
    356   1.90  dholland 	const char *endptr;
    357   1.90  dholland 
    358   1.91  dholland 	endptr = name + namelen;
    359   1.91  dholland 	return namei_hash(name, &endptr);
    360   1.90  dholland }
    361   1.90  dholland 
    362   1.90  dholland /*
    363   1.73        ad  * Invalidate a cache entry and enqueue it for garbage collection.
    364  1.125        ad  * The caller needs to hold namecache_lock or a per-cpu lock to hold
    365  1.125        ad  * off cache_reclaim().
    366   1.73        ad  */
    367   1.46      yamt static void
    368   1.73        ad cache_invalidate(struct namecache *ncp)
    369   1.46      yamt {
    370  1.125        ad 	void *head;
    371   1.46      yamt 
    372  1.125        ad 	KASSERT(mutex_owned(&ncp->nc_lock));
    373   1.46      yamt 
    374   1.73        ad 	if (ncp->nc_dvp != NULL) {
    375  1.108  christos 		SDT_PROBE(vfs, namecache, invalidate, done, ncp->nc_dvp,
    376  1.108  christos 		    0, 0, 0, 0);
    377  1.108  christos 
    378   1.73        ad 		ncp->nc_dvp = NULL;
    379  1.125        ad 		do {
    380  1.125        ad 			head = cache_gcqueue;
    381  1.125        ad 			ncp->nc_gcqueue = head;
    382  1.125        ad 		} while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
    383  1.125        ad 		atomic_inc_uint(&cache_gcpend);
    384   1.73        ad 	}
    385   1.73        ad }
    386   1.46      yamt 
    387   1.73        ad /*
    388   1.73        ad  * Disassociate a namecache entry from any vnodes it is attached to,
    389   1.73        ad  * and remove from the global LRU list.
    390   1.73        ad  */
    391   1.73        ad static void
    392   1.73        ad cache_disassociate(struct namecache *ncp)
    393   1.73        ad {
    394   1.73        ad 
    395  1.125        ad 	KASSERT(mutex_owned(namecache_lock));
    396   1.73        ad 	KASSERT(ncp->nc_dvp == NULL);
    397   1.73        ad 
    398   1.73        ad 	if (ncp->nc_lru.tqe_prev != NULL) {
    399   1.73        ad 		TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
    400   1.73        ad 		ncp->nc_lru.tqe_prev = NULL;
    401   1.46      yamt 	}
    402  1.127        ad 	if (ncp->nc_vlist.tqe_prev != NULL) {
    403  1.127        ad 		KASSERT(ncp->nc_vp != NULL);
    404  1.127        ad 		TAILQ_REMOVE(&VNODE_TO_VIMPL(ncp->nc_vp)->vi_nclist, ncp,
    405  1.127        ad 		    nc_vlist);
    406  1.127        ad 		ncp->nc_vlist.tqe_prev = NULL;
    407   1.46      yamt 	}
    408   1.46      yamt 	if (ncp->nc_dvlist.le_prev != NULL) {
    409   1.46      yamt 		LIST_REMOVE(ncp, nc_dvlist);
    410   1.46      yamt 		ncp->nc_dvlist.le_prev = NULL;
    411   1.46      yamt 	}
    412   1.46      yamt }
    413   1.46      yamt 
    414   1.73        ad /*
    415   1.73        ad  * Lock all CPUs to prevent any cache lookup activity.  Conceptually,
    416  1.125        ad  * this locks out all "readers".
    417   1.73        ad  */
    418   1.46      yamt static void
    419   1.73        ad cache_lock_cpus(void)
    420   1.46      yamt {
    421   1.73        ad 	CPU_INFO_ITERATOR cii;
    422   1.73        ad 	struct cpu_info *ci;
    423   1.77        ad 	struct nchcpu *cpup;
    424   1.46      yamt 
    425  1.103    dennis 	/*
    426  1.103    dennis 	 * Lock out all CPUs first, then harvest per-cpu stats.  This
    427  1.103    dennis 	 * is probably not quite as cache-efficient as doing the lock
    428  1.103    dennis 	 * and harvest at the same time, but allows cache_stat_sysctl()
    429  1.103    dennis 	 * to make do with a per-cpu lock.
    430  1.103    dennis 	 */
    431   1.73        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    432   1.77        ad 		cpup = ci->ci_data.cpu_nch;
    433   1.77        ad 		mutex_enter(&cpup->cpu_lock);
    434  1.103    dennis 	}
    435  1.103    dennis 	for (CPU_INFO_FOREACH(cii, ci)) {
    436  1.103    dennis 		cpup = ci->ci_data.cpu_nch;
    437  1.103    dennis 		UPDATE(cpup, ncs_goodhits);
    438  1.103    dennis 		UPDATE(cpup, ncs_neghits);
    439  1.103    dennis 		UPDATE(cpup, ncs_badhits);
    440  1.103    dennis 		UPDATE(cpup, ncs_falsehits);
    441  1.103    dennis 		UPDATE(cpup, ncs_miss);
    442  1.103    dennis 		UPDATE(cpup, ncs_long);
    443  1.103    dennis 		UPDATE(cpup, ncs_pass2);
    444  1.103    dennis 		UPDATE(cpup, ncs_2passes);
    445  1.103    dennis 		UPDATE(cpup, ncs_revhits);
    446  1.103    dennis 		UPDATE(cpup, ncs_revmiss);
    447   1.73        ad 	}
    448   1.46      yamt }
    449   1.46      yamt 
    450   1.73        ad /*
    451   1.73        ad  * Release all CPU locks.
    452   1.73        ad  */
    453   1.73        ad static void
    454   1.73        ad cache_unlock_cpus(void)
    455   1.73        ad {
    456   1.73        ad 	CPU_INFO_ITERATOR cii;
    457   1.73        ad 	struct cpu_info *ci;
    458   1.77        ad 	struct nchcpu *cpup;
    459   1.73        ad 
    460   1.73        ad 	for (CPU_INFO_FOREACH(cii, ci)) {
    461   1.77        ad 		cpup = ci->ci_data.cpu_nch;
    462   1.77        ad 		mutex_exit(&cpup->cpu_lock);
    463   1.73        ad 	}
    464   1.73        ad }
    465   1.73        ad 
    466   1.73        ad /*
    467  1.103    dennis  * Find a single cache entry and return it locked.
    468  1.103    dennis  * The caller needs to hold namecache_lock or a per-cpu lock to hold
    469  1.103    dennis  * off cache_reclaim().
    470   1.73        ad  */
    471   1.73        ad static struct namecache *
    472   1.91  dholland cache_lookup_entry(const struct vnode *dvp, const char *name, size_t namelen)
    473   1.55      yamt {
    474   1.55      yamt 	struct nchashhead *ncpp;
    475   1.55      yamt 	struct namecache *ncp;
    476   1.90  dholland 	nchash_t hash;
    477   1.55      yamt 
    478   1.84      yamt 	KASSERT(dvp != NULL);
    479   1.91  dholland 	hash = cache_hash(name, namelen);
    480   1.90  dholland 	ncpp = &nchashtbl[NCHASH2(hash, dvp)];
    481   1.55      yamt 
    482   1.55      yamt 	LIST_FOREACH(ncp, ncpp, nc_hash) {
    483  1.105    dennis 		membar_datadep_consumer();	/* for Alpha... */
    484   1.73        ad 		if (ncp->nc_dvp != dvp ||
    485   1.91  dholland 		    ncp->nc_nlen != namelen ||
    486   1.91  dholland 		    memcmp(ncp->nc_name, name, (u_int)ncp->nc_nlen))
    487   1.73        ad 		    	continue;
    488  1.125        ad 	    	mutex_enter(&ncp->nc_lock);
    489   1.77        ad 		if (__predict_true(ncp->nc_dvp == dvp)) {
    490  1.125        ad 			ncp->nc_hittime = hardclock_ticks;
    491  1.108  christos 			SDT_PROBE(vfs, namecache, lookup, hit, dvp,
    492  1.108  christos 			    name, namelen, 0, 0);
    493   1.73        ad 			return ncp;
    494   1.73        ad 		}
    495   1.73        ad 		/* Raced: entry has been nullified. */
    496  1.125        ad 		mutex_exit(&ncp->nc_lock);
    497   1.55      yamt 	}
    498   1.55      yamt 
    499  1.108  christos 	SDT_PROBE(vfs, namecache, lookup, miss, dvp,
    500  1.108  christos 	    name, namelen, 0, 0);
    501   1.73        ad 	return NULL;
    502   1.55      yamt }
    503   1.55      yamt 
    504    1.1       cgd /*
    505    1.1       cgd  * Look for a the name in the cache. We don't do this
    506    1.1       cgd  * if the segment name is long, simply so the cache can avoid
    507    1.1       cgd  * holding long names (which would either waste space, or
    508    1.1       cgd  * add greatly to the complexity).
    509    1.1       cgd  *
    510   1.90  dholland  * Lookup is called with DVP pointing to the directory to search,
    511   1.90  dholland  * and CNP providing the name of the entry being sought: cn_nameptr
    512   1.90  dholland  * is the name, cn_namelen is its length, and cn_flags is the flags
    513   1.90  dholland  * word from the namei operation.
    514   1.90  dholland  *
    515   1.90  dholland  * DVP must be locked.
    516   1.90  dholland  *
    517   1.90  dholland  * There are three possible non-error return states:
    518   1.90  dholland  *    1. Nothing was found in the cache. Nothing is known about
    519   1.90  dholland  *       the requested name.
    520   1.90  dholland  *    2. A negative entry was found in the cache, meaning that the
    521   1.90  dholland  *       requested name definitely does not exist.
    522   1.90  dholland  *    3. A positive entry was found in the cache, meaning that the
    523   1.90  dholland  *       requested name does exist and that we are providing the
    524   1.90  dholland  *       vnode.
    525   1.90  dholland  * In these cases the results are:
    526   1.90  dholland  *    1. 0 returned; VN is set to NULL.
    527   1.90  dholland  *    2. 1 returned; VN is set to NULL.
    528   1.90  dholland  *    3. 1 returned; VN is set to the vnode found.
    529   1.90  dholland  *
    530   1.90  dholland  * The additional result argument ISWHT is set to zero, unless a
    531   1.90  dholland  * negative entry is found that was entered as a whiteout, in which
    532   1.90  dholland  * case ISWHT is set to one.
    533   1.90  dholland  *
    534   1.90  dholland  * The ISWHT_RET argument pointer may be null. In this case an
    535   1.90  dholland  * assertion is made that the whiteout flag is not set. File systems
    536   1.90  dholland  * that do not support whiteouts can/should do this.
    537   1.90  dholland  *
    538   1.90  dholland  * Filesystems that do support whiteouts should add ISWHITEOUT to
    539   1.90  dholland  * cnp->cn_flags if ISWHT comes back nonzero.
    540   1.90  dholland  *
    541   1.90  dholland  * When a vnode is returned, it is locked, as per the vnode lookup
    542   1.90  dholland  * locking protocol.
    543   1.90  dholland  *
    544   1.90  dholland  * There is no way for this function to fail, in the sense of
    545   1.90  dholland  * generating an error that requires aborting the namei operation.
    546   1.90  dholland  *
    547   1.90  dholland  * (Prior to October 2012, this function returned an integer status,
    548   1.90  dholland  * and a vnode, and mucked with the flags word in CNP for whiteouts.
    549   1.90  dholland  * The integer status was -1 for "nothing found", ENOENT for "a
    550   1.90  dholland  * negative entry found", 0 for "a positive entry found", and possibly
    551   1.90  dholland  * other errors, and the value of VN might or might not have been set
    552   1.90  dholland  * depending on what error occurred.)
    553    1.1       cgd  */
    554  1.113  riastrad bool
    555   1.91  dholland cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
    556   1.91  dholland 	     uint32_t nameiop, uint32_t cnflags,
    557   1.90  dholland 	     int *iswht_ret, struct vnode **vn_ret)
    558    1.1       cgd {
    559   1.23  augustss 	struct namecache *ncp;
    560   1.20  jdolecek 	struct vnode *vp;
    561   1.77        ad 	struct nchcpu *cpup;
    562  1.113  riastrad 	int error;
    563  1.113  riastrad 	bool hit;
    564  1.103    dennis 
    565  1.125        ad 
    566   1.90  dholland 	/* Establish default result values */
    567   1.90  dholland 	if (iswht_ret != NULL) {
    568   1.90  dholland 		*iswht_ret = 0;
    569   1.90  dholland 	}
    570   1.90  dholland 	*vn_ret = NULL;
    571   1.90  dholland 
    572   1.77        ad 	if (__predict_false(!doingcache)) {
    573  1.113  riastrad 		return false;
    574    1.8       cgd 	}
    575   1.39        pk 
    576   1.77        ad 	cpup = curcpu()->ci_data.cpu_nch;
    577  1.102    dennis 	mutex_enter(&cpup->cpu_lock);
    578  1.121  christos 	if (__predict_false(namelen > USHRT_MAX)) {
    579  1.108  christos 		SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
    580  1.108  christos 		    name, namelen, 0, 0);
    581  1.103    dennis 		COUNT(cpup, ncs_long);
    582   1.77        ad 		mutex_exit(&cpup->cpu_lock);
    583   1.90  dholland 		/* found nothing */
    584  1.113  riastrad 		return false;
    585    1.1       cgd 	}
    586  1.103    dennis 
    587   1.91  dholland 	ncp = cache_lookup_entry(dvp, name, namelen);
    588   1.77        ad 	if (__predict_false(ncp == NULL)) {
    589  1.103    dennis 		COUNT(cpup, ncs_miss);
    590   1.77        ad 		mutex_exit(&cpup->cpu_lock);
    591   1.90  dholland 		/* found nothing */
    592  1.113  riastrad 		return false;
    593    1.1       cgd 	}
    594   1.91  dholland 	if ((cnflags & MAKEENTRY) == 0) {
    595  1.103    dennis 		COUNT(cpup, ncs_badhits);
    596   1.77        ad 		/*
    597   1.77        ad 		 * Last component and we are renaming or deleting,
    598   1.77        ad 		 * the cache entry is invalid, or otherwise don't
    599   1.77        ad 		 * want cache entry to exist.
    600   1.77        ad 		 */
    601   1.77        ad 		cache_invalidate(ncp);
    602  1.125        ad 		mutex_exit(&ncp->nc_lock);
    603  1.102    dennis 		mutex_exit(&cpup->cpu_lock);
    604   1.90  dholland 		/* found nothing */
    605  1.113  riastrad 		return false;
    606   1.90  dholland 	}
    607  1.125        ad 	if (ncp->nc_vp == NULL) {
    608   1.90  dholland 		if (iswht_ret != NULL) {
    609   1.90  dholland 			/*
    610   1.90  dholland 			 * Restore the ISWHITEOUT flag saved earlier.
    611   1.90  dholland 			 */
    612   1.90  dholland 			KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    613   1.90  dholland 			*iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
    614   1.90  dholland 		} else {
    615   1.90  dholland 			KASSERT(ncp->nc_flags == 0);
    616   1.90  dholland 		}
    617   1.90  dholland 
    618   1.91  dholland 		if (__predict_true(nameiop != CREATE ||
    619   1.91  dholland 		    (cnflags & ISLASTCN) == 0)) {
    620  1.103    dennis 			COUNT(cpup, ncs_neghits);
    621   1.90  dholland 			/* found neg entry; vn is already null from above */
    622  1.113  riastrad 			hit = true;
    623   1.20  jdolecek 		} else {
    624  1.103    dennis 			COUNT(cpup, ncs_badhits);
    625   1.77        ad 			/*
    626  1.109  dholland 			 * Last component and we are preparing to create
    627  1.109  dholland 			 * the named object, so flush the negative cache
    628  1.109  dholland 			 * entry.
    629   1.77        ad 			 */
    630   1.77        ad 			cache_invalidate(ncp);
    631   1.90  dholland 			/* found nothing */
    632  1.113  riastrad 			hit = false;
    633   1.20  jdolecek 		}
    634  1.125        ad 		mutex_exit(&ncp->nc_lock);
    635  1.103    dennis 		mutex_exit(&cpup->cpu_lock);
    636  1.113  riastrad 		return hit;
    637   1.20  jdolecek 	}
    638  1.125        ad 
    639  1.125        ad 	vp = ncp->nc_vp;
    640  1.125        ad 	mutex_enter(vp->v_interlock);
    641  1.125        ad 	mutex_exit(&ncp->nc_lock);
    642  1.102    dennis 	mutex_exit(&cpup->cpu_lock);
    643  1.103    dennis 
    644  1.103    dennis 	/*
    645  1.111   hannken 	 * Unlocked except for the vnode interlock.  Call vcache_tryvget().
    646  1.103    dennis 	 */
    647  1.111   hannken 	error = vcache_tryvget(vp);
    648   1.92   hannken 	if (error) {
    649   1.92   hannken 		KASSERT(error == EBUSY);
    650   1.92   hannken 		/*
    651   1.92   hannken 		 * This vnode is being cleaned out.
    652   1.92   hannken 		 * XXX badhits?
    653   1.92   hannken 		 */
    654  1.103    dennis 		COUNT_UNL(cpup, ncs_falsehits);
    655   1.92   hannken 		/* found nothing */
    656  1.113  riastrad 		return false;
    657   1.77        ad 	}
    658  1.101  christos 
    659  1.103    dennis 	COUNT_UNL(cpup, ncs_goodhits);
    660  1.101  christos 	/* found it */
    661  1.101  christos 	*vn_ret = vp;
    662  1.113  riastrad 	return true;
    663    1.1       cgd }
    664    1.1       cgd 
    665  1.103    dennis 
    666  1.103    dennis /*
    667  1.103    dennis  * Cut-'n-pasted version of the above without the nameiop argument.
    668  1.103    dennis  */
    669  1.113  riastrad bool
    670   1.91  dholland cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
    671   1.91  dholland 		 uint32_t cnflags,
    672   1.90  dholland 		 int *iswht_ret, struct vnode **vn_ret)
    673   1.61      yamt {
    674   1.61      yamt 	struct namecache *ncp;
    675   1.61      yamt 	struct vnode *vp;
    676   1.77        ad 	struct nchcpu *cpup;
    677  1.101  christos 	int error;
    678   1.61      yamt 
    679   1.90  dholland 	/* Establish default results. */
    680   1.90  dholland 	if (iswht_ret != NULL) {
    681   1.90  dholland 		*iswht_ret = 0;
    682   1.90  dholland 	}
    683   1.90  dholland 	*vn_ret = NULL;
    684   1.90  dholland 
    685   1.77        ad 	if (__predict_false(!doingcache)) {
    686   1.90  dholland 		/* found nothing */
    687  1.113  riastrad 		return false;
    688   1.61      yamt 	}
    689   1.61      yamt 
    690   1.77        ad 	cpup = curcpu()->ci_data.cpu_nch;
    691  1.102    dennis 	mutex_enter(&cpup->cpu_lock);
    692  1.121  christos 	if (__predict_false(namelen > USHRT_MAX)) {
    693  1.103    dennis 		COUNT(cpup, ncs_long);
    694   1.77        ad 		mutex_exit(&cpup->cpu_lock);
    695   1.90  dholland 		/* found nothing */
    696  1.113  riastrad 		return false;
    697   1.61      yamt 	}
    698   1.91  dholland 	ncp = cache_lookup_entry(dvp, name, namelen);
    699   1.77        ad 	if (__predict_false(ncp == NULL)) {
    700  1.103    dennis 		COUNT(cpup, ncs_miss);
    701   1.77        ad 		mutex_exit(&cpup->cpu_lock);
    702   1.90  dholland 		/* found nothing */
    703  1.113  riastrad 		return false;
    704   1.61      yamt 	}
    705   1.61      yamt 	vp = ncp->nc_vp;
    706   1.61      yamt 	if (vp == NULL) {
    707   1.61      yamt 		/*
    708   1.61      yamt 		 * Restore the ISWHITEOUT flag saved earlier.
    709   1.61      yamt 		 */
    710   1.90  dholland 		if (iswht_ret != NULL) {
    711   1.90  dholland 			KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
    712   1.90  dholland 			/*cnp->cn_flags |= ncp->nc_flags;*/
    713   1.90  dholland 			*iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
    714   1.90  dholland 		}
    715  1.103    dennis 		COUNT(cpup, ncs_neghits);
    716  1.125        ad 		mutex_exit(&ncp->nc_lock);
    717  1.101  christos 		mutex_exit(&cpup->cpu_lock);
    718   1.90  dholland 		/* found negative entry; vn is already null from above */
    719  1.113  riastrad 		return true;
    720   1.61      yamt 	}
    721  1.125        ad 	mutex_enter(vp->v_interlock);
    722  1.125        ad 	mutex_exit(&ncp->nc_lock);
    723  1.102    dennis 	mutex_exit(&cpup->cpu_lock);
    724  1.103    dennis 
    725  1.103    dennis 	/*
    726  1.111   hannken 	 * Unlocked except for the vnode interlock.  Call vcache_tryvget().
    727  1.103    dennis 	 */
    728  1.111   hannken 	error = vcache_tryvget(vp);
    729   1.92   hannken 	if (error) {
    730   1.92   hannken 		KASSERT(error == EBUSY);
    731   1.92   hannken 		/*
    732   1.92   hannken 		 * This vnode is being cleaned out.
    733   1.92   hannken 		 * XXX badhits?
    734   1.92   hannken 		 */
    735  1.103    dennis 		COUNT_UNL(cpup, ncs_falsehits);
    736   1.92   hannken 		/* found nothing */
    737  1.113  riastrad 		return false;
    738   1.61      yamt 	}
    739  1.101  christos 
    740  1.103    dennis 	COUNT_UNL(cpup, ncs_goodhits); /* XXX can be "badhits" */
    741  1.101  christos 	/* found it */
    742  1.101  christos 	*vn_ret = vp;
    743  1.113  riastrad 	return true;
    744   1.61      yamt }
    745   1.61      yamt 
    746    1.1       cgd /*
    747   1.19  sommerfe  * Scan cache looking for name of directory entry pointing at vp.
    748   1.19  sommerfe  *
    749   1.86   hannken  * If the lookup succeeds the vnode is referenced and stored in dvpp.
    750   1.19  sommerfe  *
    751   1.19  sommerfe  * If bufp is non-NULL, also place the name in the buffer which starts
    752   1.19  sommerfe  * at bufp, immediately before *bpp, and move bpp backwards to point
    753   1.19  sommerfe  * at the start of it.  (Yes, this is a little baroque, but it's done
    754   1.19  sommerfe  * this way to cater to the whims of getcwd).
    755   1.19  sommerfe  *
    756   1.19  sommerfe  * Returns 0 on success, -1 on cache miss, positive errno on failure.
    757   1.19  sommerfe  */
    758   1.19  sommerfe int
    759   1.34     enami cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
    760   1.19  sommerfe {
    761   1.19  sommerfe 	struct namecache *ncp;
    762   1.19  sommerfe 	struct vnode *dvp;
    763   1.95     joerg 	struct nchcpu *cpup;
    764   1.34     enami 	char *bp;
    765   1.86   hannken 	int error, nlen;
    766   1.34     enami 
    767  1.126        ad 	KASSERT(vp != NULL);
    768  1.126        ad 
    769   1.19  sommerfe 	if (!doingcache)
    770   1.19  sommerfe 		goto out;
    771   1.19  sommerfe 
    772  1.103    dennis 	/*
    773  1.103    dennis 	 * We increment counters in the local CPU's per-cpu stats.
    774  1.103    dennis 	 * We don't take the per-cpu lock, however, since this function
    775  1.103    dennis 	 * is the only place these counters are incremented so no one
    776  1.103    dennis 	 * will be racing with us to increment them.
    777  1.103    dennis 	 */
    778   1.95     joerg 	cpup = curcpu()->ci_data.cpu_nch;
    779  1.125        ad 	mutex_enter(namecache_lock);
    780  1.127        ad 	TAILQ_FOREACH(ncp, &VNODE_TO_VIMPL(vp)->vi_nclist, nc_vlist) {
    781  1.125        ad 		mutex_enter(&ncp->nc_lock);
    782  1.127        ad 		/* Ignore invalidated entries. */
    783  1.127        ad 		dvp = ncp->nc_dvp;
    784  1.127        ad 		if (dvp == NULL) {
    785  1.127        ad 			mutex_exit(&ncp->nc_lock);
    786  1.127        ad 			continue;
    787  1.127        ad 		}
    788  1.127        ad 
    789  1.127        ad 		/*
    790  1.127        ad 		 * The list is partially sorted.  Once we hit dot or dotdot
    791  1.127        ad 		 * it's only more dots from there on in.
    792  1.127        ad 		 */
    793  1.127        ad 		nlen = ncp->nc_nlen;
    794  1.127        ad 		if (ncp->nc_name[0] == '.') {
    795  1.127        ad 			if (nlen == 1 ||
    796  1.127        ad 			    (nlen == 2 && ncp->nc_name[1] == '.')) {
    797  1.127        ad 				mutex_exit(&ncp->nc_lock);
    798  1.127        ad 				break;
    799   1.19  sommerfe 			}
    800  1.127        ad 		}
    801  1.127        ad 		COUNT(cpup, ncs_revhits);
    802   1.34     enami 
    803  1.127        ad 		if (bufp) {
    804  1.127        ad 			bp = *bpp;
    805  1.127        ad 			bp -= nlen;
    806  1.127        ad 			if (bp <= bufp) {
    807   1.92   hannken 				*dvpp = NULL;
    808  1.127        ad 				mutex_exit(&ncp->nc_lock);
    809  1.127        ad 				mutex_exit(namecache_lock);
    810  1.127        ad 				SDT_PROBE(vfs, namecache, revlookup,
    811  1.127        ad 				    fail, vp, ERANGE, 0, 0, 0);
    812  1.127        ad 				return (ERANGE);
    813   1.86   hannken 			}
    814  1.127        ad 			memcpy(bp, ncp->nc_name, nlen);
    815  1.127        ad 			*bpp = bp;
    816   1.19  sommerfe 		}
    817  1.127        ad 
    818  1.127        ad 		mutex_enter(dvp->v_interlock);
    819  1.125        ad 		mutex_exit(&ncp->nc_lock);
    820  1.127        ad 		mutex_exit(namecache_lock);
    821  1.127        ad 		error = vcache_tryvget(dvp);
    822  1.127        ad 		if (error) {
    823  1.127        ad 			KASSERT(error == EBUSY);
    824  1.127        ad 			if (bufp)
    825  1.127        ad 				(*bpp) += nlen;
    826  1.127        ad 			*dvpp = NULL;
    827  1.127        ad 			SDT_PROBE(vfs, namecache, revlookup, fail, vp,
    828  1.127        ad 			    error, 0, 0, 0);
    829  1.127        ad 			return -1;
    830  1.127        ad 		}
    831  1.127        ad 		*dvpp = dvp;
    832  1.127        ad 		SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
    833  1.127        ad 		    0, 0, 0);
    834  1.127        ad 		return (0);
    835   1.19  sommerfe 	}
    836  1.103    dennis 	COUNT(cpup, ncs_revmiss);
    837  1.125        ad 	mutex_exit(namecache_lock);
    838   1.19  sommerfe  out:
    839   1.34     enami 	*dvpp = NULL;
    840   1.34     enami 	return (-1);
    841   1.19  sommerfe }
    842   1.19  sommerfe 
    843   1.19  sommerfe /*
    844    1.1       cgd  * Add an entry to the cache
    845    1.1       cgd  */
    846   1.13  christos void
    847   1.91  dholland cache_enter(struct vnode *dvp, struct vnode *vp,
    848   1.91  dholland 	    const char *name, size_t namelen, uint32_t cnflags)
    849    1.1       cgd {
    850   1.23  augustss 	struct namecache *ncp;
    851   1.59      yamt 	struct namecache *oncp;
    852   1.23  augustss 	struct nchashhead *ncpp;
    853   1.90  dholland 	nchash_t hash;
    854    1.1       cgd 
    855   1.89     rmind 	/* First, check whether we can/should add a cache entry. */
    856   1.91  dholland 	if ((cnflags & MAKEENTRY) == 0 ||
    857  1.121  christos 	    __predict_false(namelen > USHRT_MAX || !doingcache)) {
    858  1.108  christos 		SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
    859  1.108  christos 		    0, 0);
    860    1.1       cgd 		return;
    861   1.89     rmind 	}
    862   1.58      yamt 
    863  1.108  christos 	SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
    864   1.73        ad 	if (numcache > desiredvnodes) {
    865  1.125        ad 		mutex_enter(namecache_lock);
    866   1.73        ad 		cache_ev_forced.ev_count++;
    867   1.73        ad 		cache_reclaim();
    868  1.125        ad 		mutex_exit(namecache_lock);
    869   1.39        pk 	}
    870   1.57        pk 
    871  1.121  christos 	if (namelen > NCHNAMLEN) {
    872  1.121  christos 		ncp = kmem_alloc(sizeof(*ncp) + namelen, KM_SLEEP);
    873  1.121  christos 		cache_ctor(NULL, ncp, 0);
    874  1.121  christos 	} else
    875  1.122      maya 		ncp = pool_cache_get(namecache_cache, PR_WAITOK);
    876  1.122      maya 
    877  1.125        ad 	mutex_enter(namecache_lock);
    878   1.73        ad 	numcache++;
    879   1.73        ad 
    880   1.59      yamt 	/*
    881   1.59      yamt 	 * Concurrent lookups in the same directory may race for a
    882   1.59      yamt 	 * cache entry.  if there's a duplicated entry, free it.
    883   1.59      yamt 	 */
    884   1.91  dholland 	oncp = cache_lookup_entry(dvp, name, namelen);
    885   1.59      yamt 	if (oncp) {
    886   1.73        ad 		cache_invalidate(oncp);
    887  1.125        ad 		mutex_exit(&oncp->nc_lock);
    888   1.59      yamt 	}
    889   1.59      yamt 
    890   1.34     enami 	/* Grab the vnode we just found. */
    891  1.125        ad 	mutex_enter(&ncp->nc_lock);
    892    1.5   mycroft 	ncp->nc_vp = vp;
    893   1.73        ad 	ncp->nc_flags = 0;
    894   1.73        ad 	ncp->nc_hittime = 0;
    895  1.125        ad 	ncp->nc_gcqueue = NULL;
    896   1.47      yamt 	if (vp == NULL) {
    897   1.11   mycroft 		/*
    898   1.11   mycroft 		 * For negative hits, save the ISWHITEOUT flag so we can
    899   1.11   mycroft 		 * restore it later when the cache entry is used again.
    900   1.11   mycroft 		 */
    901   1.91  dholland 		ncp->nc_flags = cnflags & ISWHITEOUT;
    902   1.11   mycroft 	}
    903   1.89     rmind 
    904   1.34     enami 	/* Fill in cache info. */
    905    1.5   mycroft 	ncp->nc_dvp = dvp;
    906  1.112   hannken 	LIST_INSERT_HEAD(&VNODE_TO_VIMPL(dvp)->vi_dnclist, ncp, nc_dvlist);
    907  1.127        ad 	if (vp) {
    908  1.127        ad 		/* Partially sort the per-vnode list: dots go to back. */
    909  1.127        ad 		if ((namelen == 1 && name[0] == '.') ||
    910  1.127        ad 		    (namelen == 2 && name[0] == '.' && name[1] == '.')) {
    911  1.127        ad 			TAILQ_INSERT_TAIL(&VNODE_TO_VIMPL(vp)->vi_nclist, ncp,
    912  1.127        ad 			    nc_vlist);
    913  1.127        ad 		} else {
    914  1.127        ad 			TAILQ_INSERT_HEAD(&VNODE_TO_VIMPL(vp)->vi_nclist, ncp,
    915  1.127        ad 			    nc_vlist);
    916  1.127        ad 		}
    917  1.127        ad 	} else {
    918  1.127        ad 		ncp->nc_vlist.tqe_prev = NULL;
    919  1.127        ad 		ncp->nc_vlist.tqe_next = NULL;
    920   1.73        ad 	}
    921  1.121  christos 	KASSERT(namelen <= USHRT_MAX);
    922   1.91  dholland 	ncp->nc_nlen = namelen;
    923   1.91  dholland 	memcpy(ncp->nc_name, name, (unsigned)ncp->nc_nlen);
    924   1.73        ad 	TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
    925   1.91  dholland 	hash = cache_hash(name, namelen);
    926   1.90  dholland 	ncpp = &nchashtbl[NCHASH2(hash, dvp)];
    927   1.73        ad 
    928   1.73        ad 	/*
    929   1.73        ad 	 * Flush updates before making visible in table.  No need for a
    930   1.73        ad 	 * memory barrier on the other side: to see modifications the
    931   1.73        ad 	 * list must be followed, meaning a dependent pointer load.
    932   1.74        ad 	 * The below is LIST_INSERT_HEAD() inlined, with the memory
    933   1.74        ad 	 * barrier included in the correct place.
    934   1.73        ad 	 */
    935   1.74        ad 	if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
    936   1.74        ad 		ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
    937   1.74        ad 	ncp->nc_hash.le_prev = &ncpp->lh_first;
    938   1.73        ad 	membar_producer();
    939   1.74        ad 	ncpp->lh_first = ncp;
    940  1.125        ad 	mutex_exit(&ncp->nc_lock);
    941  1.125        ad 	mutex_exit(namecache_lock);
    942    1.1       cgd }
    943    1.1       cgd 
    944    1.1       cgd /*
    945    1.1       cgd  * Name cache initialization, from vfs_init() when we are booting
    946    1.1       cgd  */
    947   1.13  christos void
    948   1.34     enami nchinit(void)
    949    1.1       cgd {
    950   1.73        ad 	int error;
    951    1.1       cgd 
    952   1.89     rmind 	TAILQ_INIT(&nclruhead);
    953  1.121  christos 	namecache_cache = pool_cache_init(sizeof(struct namecache) + NCHNAMLEN,
    954   1.73        ad 	    coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
    955   1.73        ad 	    cache_dtor, NULL);
    956   1.71        ad 	KASSERT(namecache_cache != NULL);
    957   1.71        ad 
    958  1.125        ad 	namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
    959   1.76        ad 	nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
    960   1.73        ad 
    961   1.73        ad 	error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
    962   1.73        ad 	    NULL, NULL, "cachegc");
    963   1.73        ad 	if (error != 0)
    964   1.73        ad 		panic("nchinit %d", error);
    965   1.73        ad 
    966   1.73        ad 	evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
    967   1.73        ad 	   "namecache", "entries scanned");
    968   1.73        ad 	evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
    969   1.73        ad 	   "namecache", "entries collected");
    970   1.73        ad 	evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
    971   1.73        ad 	   "namecache", "over scan target");
    972   1.73        ad 	evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
    973   1.73        ad 	   "namecache", "under scan target");
    974   1.73        ad 	evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
    975   1.73        ad 	   "namecache", "forced reclaims");
    976  1.104     pooka 
    977  1.104     pooka 	sysctl_cache_stat_setup();
    978   1.73        ad }
    979   1.73        ad 
    980   1.73        ad static int
    981   1.73        ad cache_ctor(void *arg, void *obj, int flag)
    982   1.73        ad {
    983  1.125        ad 	struct namecache *ncp;
    984  1.125        ad 
    985  1.125        ad 	ncp = obj;
    986  1.125        ad 	mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
    987   1.73        ad 
    988   1.73        ad 	return 0;
    989   1.73        ad }
    990   1.73        ad 
    991   1.73        ad static void
    992   1.73        ad cache_dtor(void *arg, void *obj)
    993   1.73        ad {
    994  1.125        ad 	struct namecache *ncp;
    995   1.73        ad 
    996  1.125        ad 	ncp = obj;
    997  1.125        ad 	mutex_destroy(&ncp->nc_lock);
    998   1.73        ad }
    999   1.73        ad 
   1000   1.73        ad /*
   1001   1.73        ad  * Called once for each CPU in the system as attached.
   1002   1.73        ad  */
   1003   1.73        ad void
   1004   1.73        ad cache_cpu_init(struct cpu_info *ci)
   1005   1.73        ad {
   1006   1.77        ad 	struct nchcpu *cpup;
   1007   1.77        ad 	size_t sz;
   1008   1.73        ad 
   1009   1.77        ad 	sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
   1010   1.77        ad 	cpup = kmem_zalloc(sz, KM_SLEEP);
   1011   1.77        ad 	cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
   1012   1.77        ad 	mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
   1013   1.77        ad 	ci->ci_data.cpu_nch = cpup;
   1014   1.30       chs }
   1015   1.30       chs 
   1016   1.30       chs /*
   1017   1.30       chs  * Name cache reinitialization, for when the maximum number of vnodes increases.
   1018   1.30       chs  */
   1019   1.30       chs void
   1020   1.34     enami nchreinit(void)
   1021   1.30       chs {
   1022   1.30       chs 	struct namecache *ncp;
   1023  1.126        ad 	struct nchashhead *oldhash, *hash;
   1024  1.126        ad 	u_long i, oldmask, mask;
   1025  1.126        ad 
   1026  1.126        ad 	hash = hashinit(desiredvnodes, HASH_LIST, true, &mask);
   1027  1.125        ad 	mutex_enter(namecache_lock);
   1028   1.73        ad 	cache_lock_cpus();
   1029  1.126        ad 	oldhash = nchashtbl;
   1030  1.126        ad 	oldmask = nchash;
   1031  1.126        ad 	nchashtbl = hash;
   1032  1.126        ad 	nchash = mask;
   1033  1.126        ad 	for (i = 0; i <= oldmask; i++) {
   1034  1.126        ad 		while ((ncp = LIST_FIRST(&oldhash[i])) != NULL) {
   1035   1.30       chs 			LIST_REMOVE(ncp, nc_hash);
   1036   1.30       chs 			ncp->nc_hash.le_prev = NULL;
   1037   1.30       chs 		}
   1038   1.30       chs 	}
   1039   1.73        ad 	cache_unlock_cpus();
   1040  1.125        ad 	mutex_exit(namecache_lock);
   1041  1.126        ad 	hashdone(oldhash, HASH_LIST, oldmask);
   1042    1.1       cgd }
   1043    1.1       cgd 
   1044    1.1       cgd /*
   1045    1.1       cgd  * Cache flush, a particular vnode; called when a vnode is renamed to
   1046    1.1       cgd  * hide entries that would now be invalid
   1047    1.1       cgd  */
   1048   1.13  christos void
   1049   1.91  dholland cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
   1050    1.1       cgd {
   1051   1.46      yamt 	struct namecache *ncp, *ncnext;
   1052    1.1       cgd 
   1053  1.125        ad 	mutex_enter(namecache_lock);
   1054   1.55      yamt 	if (flags & PURGE_PARENTS) {
   1055  1.108  christos 		SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
   1056  1.108  christos 
   1057  1.127        ad 		for (ncp = TAILQ_FIRST(&VNODE_TO_VIMPL(vp)->vi_nclist);
   1058  1.112   hannken 		    ncp != NULL; ncp = ncnext) {
   1059  1.127        ad 			ncnext = TAILQ_NEXT(ncp, nc_vlist);
   1060  1.125        ad 			mutex_enter(&ncp->nc_lock);
   1061   1.73        ad 			cache_invalidate(ncp);
   1062  1.125        ad 			mutex_exit(&ncp->nc_lock);
   1063   1.73        ad 			cache_disassociate(ncp);
   1064   1.55      yamt 		}
   1065   1.55      yamt 	}
   1066   1.55      yamt 	if (flags & PURGE_CHILDREN) {
   1067  1.108  christos 		SDT_PROBE(vfs, namecache, purge, children, vp, 0, 0, 0, 0);
   1068  1.112   hannken 		for (ncp = LIST_FIRST(&VNODE_TO_VIMPL(vp)->vi_dnclist);
   1069  1.112   hannken 		    ncp != NULL; ncp = ncnext) {
   1070   1.55      yamt 			ncnext = LIST_NEXT(ncp, nc_dvlist);
   1071  1.125        ad 			mutex_enter(&ncp->nc_lock);
   1072   1.73        ad 			cache_invalidate(ncp);
   1073  1.125        ad 			mutex_exit(&ncp->nc_lock);
   1074   1.73        ad 			cache_disassociate(ncp);
   1075   1.55      yamt 		}
   1076   1.46      yamt 	}
   1077   1.91  dholland 	if (name != NULL) {
   1078  1.108  christos 		SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
   1079   1.91  dholland 		ncp = cache_lookup_entry(vp, name, namelen);
   1080   1.55      yamt 		if (ncp) {
   1081   1.73        ad 			cache_invalidate(ncp);
   1082  1.125        ad 			mutex_exit(&ncp->nc_lock);
   1083   1.73        ad 			cache_disassociate(ncp);
   1084   1.55      yamt 		}
   1085   1.46      yamt 	}
   1086  1.125        ad 	mutex_exit(namecache_lock);
   1087    1.1       cgd }
   1088    1.1       cgd 
   1089    1.1       cgd /*
   1090    1.1       cgd  * Cache flush, a whole filesystem; called when filesys is umounted to
   1091   1.27       chs  * remove entries that would now be invalid.
   1092    1.1       cgd  */
   1093   1.13  christos void
   1094   1.34     enami cache_purgevfs(struct mount *mp)
   1095    1.1       cgd {
   1096   1.23  augustss 	struct namecache *ncp, *nxtcp;
   1097    1.1       cgd 
   1098  1.108  christos 	SDT_PROBE(vfs, namecache, purge, vfs, mp, 0, 0, 0, 0);
   1099  1.125        ad 	mutex_enter(namecache_lock);
   1100   1.73        ad 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
   1101   1.73        ad 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
   1102  1.125        ad 		mutex_enter(&ncp->nc_lock);
   1103   1.73        ad 		if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
   1104   1.73        ad 			/* Free the resources we had. */
   1105   1.73        ad 			cache_invalidate(ncp);
   1106   1.73        ad 			cache_disassociate(ncp);
   1107   1.73        ad 		}
   1108  1.125        ad 		mutex_exit(&ncp->nc_lock);
   1109   1.73        ad 	}
   1110   1.73        ad 	cache_reclaim();
   1111  1.125        ad 	mutex_exit(namecache_lock);
   1112   1.73        ad }
   1113   1.73        ad 
   1114   1.73        ad /*
   1115  1.116  riastrad  * Scan global list invalidating entries until we meet a preset target.
   1116   1.73        ad  * Prefer to invalidate entries that have not scored a hit within
   1117   1.73        ad  * cache_hottime seconds.  We sort the LRU list only for this routine's
   1118   1.73        ad  * benefit.
   1119   1.73        ad  */
   1120   1.73        ad static void
   1121   1.73        ad cache_prune(int incache, int target)
   1122   1.73        ad {
   1123   1.73        ad 	struct namecache *ncp, *nxtcp, *sentinel;
   1124   1.73        ad 	int items, recent, tryharder;
   1125   1.73        ad 
   1126  1.125        ad 	KASSERT(mutex_owned(namecache_lock));
   1127   1.73        ad 
   1128  1.108  christos 	SDT_PROBE(vfs, namecache, prune, done, incache, target, 0, 0, 0);
   1129   1.73        ad 	items = 0;
   1130   1.73        ad 	tryharder = 0;
   1131   1.73        ad 	recent = hardclock_ticks - hz * cache_hottime;
   1132   1.73        ad 	sentinel = NULL;
   1133   1.27       chs 	for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
   1134   1.73        ad 		if (incache <= target)
   1135   1.73        ad 			break;
   1136   1.73        ad 		items++;
   1137   1.27       chs 		nxtcp = TAILQ_NEXT(ncp, nc_lru);
   1138   1.73        ad 		if (ncp == sentinel) {
   1139   1.73        ad 			/*
   1140   1.73        ad 			 * If we looped back on ourself, then ignore
   1141   1.73        ad 			 * recent entries and purge whatever we find.
   1142   1.73        ad 			 */
   1143   1.73        ad 			tryharder = 1;
   1144    1.5   mycroft 		}
   1145   1.93   hannken 		if (ncp->nc_dvp == NULL)
   1146   1.93   hannken 			continue;
   1147   1.81      yamt 		if (!tryharder && (ncp->nc_hittime - recent) > 0) {
   1148   1.73        ad 			if (sentinel == NULL)
   1149   1.73        ad 				sentinel = ncp;
   1150   1.73        ad 			TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
   1151   1.73        ad 			TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
   1152   1.73        ad 			continue;
   1153   1.73        ad 		}
   1154  1.125        ad 		mutex_enter(&ncp->nc_lock);
   1155   1.73        ad 		if (ncp->nc_dvp != NULL) {
   1156   1.73        ad 			cache_invalidate(ncp);
   1157   1.73        ad 			cache_disassociate(ncp);
   1158   1.73        ad 			incache--;
   1159   1.73        ad 		}
   1160  1.125        ad 		mutex_exit(&ncp->nc_lock);
   1161   1.73        ad 	}
   1162   1.73        ad 	cache_ev_scan.ev_count += items;
   1163   1.73        ad }
   1164   1.73        ad 
   1165   1.73        ad /*
   1166   1.73        ad  * Collect dead cache entries from all CPUs and garbage collect.
   1167   1.73        ad  */
   1168   1.73        ad static void
   1169   1.73        ad cache_reclaim(void)
   1170   1.73        ad {
   1171   1.73        ad 	struct namecache *ncp, *next;
   1172   1.73        ad 	int items;
   1173   1.73        ad 
   1174  1.125        ad 	KASSERT(mutex_owned(namecache_lock));
   1175   1.73        ad 
   1176   1.73        ad 	/*
   1177   1.73        ad 	 * If the number of extant entries not awaiting garbage collection
   1178   1.73        ad 	 * exceeds the high water mark, then reclaim stale entries until we
   1179   1.73        ad 	 * reach our low water mark.
   1180   1.73        ad 	 */
   1181  1.125        ad 	items = numcache - cache_gcpend;
   1182   1.73        ad 	if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
   1183   1.73        ad 		cache_prune(items, (int)((uint64_t)desiredvnodes *
   1184   1.73        ad 		    cache_lowat / 100));
   1185   1.73        ad 		cache_ev_over.ev_count++;
   1186   1.73        ad 	} else
   1187   1.73        ad 		cache_ev_under.ev_count++;
   1188   1.73        ad 
   1189   1.73        ad 	/*
   1190  1.125        ad 	 * Stop forward lookup activity on all CPUs and garbage collect dead
   1191  1.125        ad 	 * entries.
   1192   1.73        ad 	 */
   1193  1.125        ad 	cache_lock_cpus();
   1194  1.125        ad 	ncp = cache_gcqueue;
   1195  1.125        ad 	cache_gcqueue = NULL;
   1196  1.125        ad 	items = cache_gcpend;
   1197  1.125        ad 	cache_gcpend = 0;
   1198  1.125        ad 	while (ncp != NULL) {
   1199  1.125        ad 		next = ncp->nc_gcqueue;
   1200   1.73        ad 		cache_disassociate(ncp);
   1201   1.73        ad 		KASSERT(ncp->nc_dvp == NULL);
   1202   1.73        ad 		if (ncp->nc_hash.le_prev != NULL) {
   1203   1.73        ad 			LIST_REMOVE(ncp, nc_hash);
   1204   1.73        ad 			ncp->nc_hash.le_prev = NULL;
   1205   1.73        ad 		}
   1206  1.121  christos 		if (ncp->nc_nlen > NCHNAMLEN) {
   1207  1.121  christos 			cache_dtor(NULL, ncp);
   1208  1.121  christos 			kmem_free(ncp, sizeof(*ncp) + ncp->nc_nlen);
   1209  1.121  christos 		} else
   1210  1.123      maya 			pool_cache_put(namecache_cache, ncp);
   1211  1.125        ad 		ncp = next;
   1212   1.73        ad 	}
   1213  1.125        ad 	cache_unlock_cpus();
   1214   1.73        ad 	numcache -= items;
   1215   1.73        ad 	cache_ev_gc.ev_count += items;
   1216   1.73        ad }
   1217   1.73        ad 
   1218   1.73        ad /*
   1219   1.73        ad  * Cache maintainence thread, awakening once per second to:
   1220   1.73        ad  *
   1221   1.73        ad  * => keep number of entries below the high water mark
   1222   1.73        ad  * => sort pseudo-LRU list
   1223   1.73        ad  * => garbage collect dead entries
   1224   1.73        ad  */
   1225   1.73        ad static void
   1226   1.73        ad cache_thread(void *arg)
   1227   1.73        ad {
   1228   1.73        ad 
   1229  1.125        ad 	mutex_enter(namecache_lock);
   1230   1.73        ad 	for (;;) {
   1231   1.73        ad 		cache_reclaim();
   1232  1.125        ad 		kpause("cachegc", false, hz, namecache_lock);
   1233    1.1       cgd 	}
   1234    1.1       cgd }
   1235   1.19  sommerfe 
   1236   1.28       chs #ifdef DDB
   1237   1.28       chs void
   1238   1.28       chs namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
   1239   1.28       chs {
   1240   1.28       chs 	struct vnode *dvp = NULL;
   1241   1.28       chs 	struct namecache *ncp;
   1242   1.28       chs 
   1243   1.28       chs 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1244   1.73        ad 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
   1245   1.28       chs 			(*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1246   1.28       chs 			dvp = ncp->nc_dvp;
   1247   1.28       chs 		}
   1248   1.28       chs 	}
   1249   1.28       chs 	if (dvp == NULL) {
   1250   1.28       chs 		(*pr)("name not found\n");
   1251   1.28       chs 		return;
   1252   1.28       chs 	}
   1253   1.28       chs 	vp = dvp;
   1254   1.28       chs 	TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
   1255  1.127        ad 		if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
   1256   1.28       chs 			(*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
   1257   1.28       chs 		}
   1258   1.28       chs 	}
   1259   1.28       chs }
   1260   1.28       chs #endif
   1261   1.95     joerg 
   1262   1.95     joerg void
   1263   1.95     joerg namecache_count_pass2(void)
   1264   1.95     joerg {
   1265   1.95     joerg 	struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
   1266   1.95     joerg 
   1267  1.103    dennis 	COUNT_UNL(cpup, ncs_pass2);
   1268   1.95     joerg }
   1269   1.95     joerg 
   1270   1.95     joerg void
   1271   1.95     joerg namecache_count_2passes(void)
   1272   1.95     joerg {
   1273   1.95     joerg 	struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
   1274   1.95     joerg 
   1275  1.103    dennis 	COUNT_UNL(cpup, ncs_2passes);
   1276   1.95     joerg }
   1277   1.97     joerg 
   1278  1.103    dennis /*
   1279  1.103    dennis  * Fetch the current values of the stats.  We return the most
   1280  1.103    dennis  * recent values harvested into nchstats by cache_reclaim(), which
   1281  1.103    dennis  * will be less than a second old.
   1282  1.103    dennis  */
   1283   1.97     joerg static int
   1284   1.97     joerg cache_stat_sysctl(SYSCTLFN_ARGS)
   1285   1.97     joerg {
   1286  1.125        ad 	struct nchstats stats;
   1287  1.125        ad 	struct nchcpu *my_cpup;
   1288  1.125        ad #ifdef CACHE_STATS_CURRENT
   1289  1.103    dennis 	CPU_INFO_ITERATOR cii;
   1290  1.103    dennis 	struct cpu_info *ci;
   1291  1.125        ad #endif	/* CACHE_STATS_CURRENT */
   1292   1.97     joerg 
   1293   1.97     joerg 	if (oldp == NULL) {
   1294  1.125        ad 		*oldlenp = sizeof(stats);
   1295   1.97     joerg 		return 0;
   1296   1.97     joerg 	}
   1297   1.97     joerg 
   1298  1.125        ad 	if (*oldlenp < sizeof(stats)) {
   1299   1.97     joerg 		*oldlenp = 0;
   1300   1.97     joerg 		return 0;
   1301   1.97     joerg 	}
   1302   1.97     joerg 
   1303  1.125        ad 	/*
   1304  1.125        ad 	 * Take this CPU's per-cpu lock to hold off cache_reclaim()
   1305  1.125        ad 	 * from doing a stats update while doing minimal damage to
   1306  1.125        ad 	 * concurrent operations.
   1307  1.125        ad 	 */
   1308  1.103    dennis 	sysctl_unlock();
   1309  1.125        ad 	my_cpup = curcpu()->ci_data.cpu_nch;
   1310  1.125        ad 	mutex_enter(&my_cpup->cpu_lock);
   1311  1.125        ad 	stats = nchstats;
   1312  1.125        ad #ifdef CACHE_STATS_CURRENT
   1313  1.103    dennis 	for (CPU_INFO_FOREACH(cii, ci)) {
   1314  1.103    dennis 		struct nchcpu *cpup = ci->ci_data.cpu_nch;
   1315   1.97     joerg 
   1316  1.125        ad 		ADD(stats, cpup, ncs_goodhits);
   1317  1.125        ad 		ADD(stats, cpup, ncs_neghits);
   1318  1.125        ad 		ADD(stats, cpup, ncs_badhits);
   1319  1.125        ad 		ADD(stats, cpup, ncs_falsehits);
   1320  1.125        ad 		ADD(stats, cpup, ncs_miss);
   1321  1.125        ad 		ADD(stats, cpup, ncs_long);
   1322  1.125        ad 		ADD(stats, cpup, ncs_pass2);
   1323  1.125        ad 		ADD(stats, cpup, ncs_2passes);
   1324  1.125        ad 		ADD(stats, cpup, ncs_revhits);
   1325  1.125        ad 		ADD(stats, cpup, ncs_revmiss);
   1326  1.103    dennis 	}
   1327  1.125        ad #endif	/* CACHE_STATS_CURRENT */
   1328  1.125        ad 	mutex_exit(&my_cpup->cpu_lock);
   1329   1.97     joerg 	sysctl_relock();
   1330   1.97     joerg 
   1331  1.125        ad 	*oldlenp = sizeof(stats);
   1332  1.125        ad 	return sysctl_copyout(l, &stats, oldp, sizeof(stats));
   1333   1.97     joerg }
   1334   1.97     joerg 
   1335  1.104     pooka static void
   1336  1.104     pooka sysctl_cache_stat_setup(void)
   1337   1.97     joerg {
   1338  1.104     pooka 
   1339  1.104     pooka 	KASSERT(sysctllog == NULL);
   1340  1.104     pooka 	sysctl_createv(&sysctllog, 0, NULL, NULL,
   1341   1.97     joerg 		       CTLFLAG_PERMANENT,
   1342   1.97     joerg 		       CTLTYPE_STRUCT, "namecache_stats",
   1343   1.97     joerg 		       SYSCTL_DESCR("namecache statistics"),
   1344   1.97     joerg 		       cache_stat_sysctl, 0, NULL, 0,
   1345   1.97     joerg 		       CTL_VFS, CTL_CREATE, CTL_EOL);
   1346   1.97     joerg }
   1347