vfs_cache.c revision 1.113 1 /* $NetBSD: vfs_cache.c,v 1.113 2017/03/18 19:43:31 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.113 2017/03/18 19:43:31 riastradh Exp $");
62
63 #ifdef _KERNEL_OPT
64 #include "opt_ddb.h"
65 #include "opt_revcache.h"
66 #include "opt_dtrace.h"
67 #endif
68
69 #include <sys/param.h>
70 #include <sys/systm.h>
71 #include <sys/sysctl.h>
72 #include <sys/time.h>
73 #include <sys/mount.h>
74 #include <sys/vnode_impl.h>
75 #include <sys/namei.h>
76 #include <sys/errno.h>
77 #include <sys/pool.h>
78 #include <sys/mutex.h>
79 #include <sys/atomic.h>
80 #include <sys/kthread.h>
81 #include <sys/kernel.h>
82 #include <sys/cpu.h>
83 #include <sys/evcnt.h>
84 #include <sys/sdt.h>
85
86 #define NAMECACHE_ENTER_REVERSE
87 /*
88 * Name caching works as follows:
89 *
90 * Names found by directory scans are retained in a cache
91 * for future reference. It is managed LRU, so frequently
92 * used names will hang around. Cache is indexed by hash value
93 * obtained from (dvp, name) where dvp refers to the directory
94 * containing name.
95 *
96 * For simplicity (and economy of storage), names longer than
97 * a maximum length of NCHNAMLEN are not cached; they occur
98 * infrequently in any case, and are almost never of interest.
99 *
100 * Upon reaching the last segment of a path, if the reference
101 * is for DELETE, or NOCACHE is set (rewrite), and the
102 * name is located in the cache, it will be dropped.
103 * The entry is dropped also when it was not possible to lock
104 * the cached vnode, either because vcache_tryvget() failed or
105 * the generation number has changed while waiting for the lock.
106 */
107
108 /*
109 * The locking in this subsystem works as follows:
110 *
111 * When an entry is added to the cache, via cache_enter(),
112 * namecache_lock is taken to exclude other writers. The new
113 * entry is added to the hash list in a way which permits
114 * concurrent lookups and invalidations in the cache done on
115 * other CPUs to continue in parallel.
116 *
117 * When a lookup is done in the cache, via cache_lookup() or
118 * cache_lookup_raw(), the per-cpu lock below is taken. This
119 * protects calls to cache_lookup_entry() and cache_invalidate()
120 * against cache_reclaim() but allows lookups to continue in
121 * parallel with cache_enter().
122 *
123 * cache_revlookup() takes namecache_lock to exclude cache_enter()
124 * and cache_reclaim() since the list it operates on is not
125 * maintained to allow concurrent reads.
126 *
127 * When cache_reclaim() is called namecache_lock is held to hold
128 * off calls to cache_enter()/cache_revlookup() and each of the
129 * per-cpu locks is taken to hold off lookups. Holding all these
130 * locks essentially idles the subsystem, ensuring there are no
131 * concurrent references to the cache entries being freed.
132 *
133 * 32 bit per-cpu statistic counters (struct nchstats_percpu) are
134 * incremented when the operations they count are performed while
135 * running on the corresponding CPU. Frequently individual counters
136 * are incremented while holding a lock (either a per-cpu lock or
137 * namecache_lock) sufficient to preclude concurrent increments
138 * being done to the same counter, so non-atomic increments are
139 * done using the COUNT() macro. Counters which are incremented
140 * when one of these locks is not held use the COUNT_UNL() macro
141 * instead. COUNT_UNL() could be defined to do atomic increments
142 * but currently just does what COUNT() does, on the theory that
143 * it is unlikely the non-atomic increment will be interrupted
144 * by something on the same CPU that increments the same counter,
145 * but even if it does happen the consequences aren't serious.
146 *
147 * N.B.: Attempting to protect COUNT_UNL() increments by taking
148 * a per-cpu lock in the namecache_count_*() functions causes
149 * a deadlock. Don't do that, use atomic increments instead if
150 * the imperfections here bug you.
151 *
152 * The 64 bit system-wide statistic counts (struct nchstats) are
153 * maintained by sampling the per-cpu counters periodically, adding
154 * in the deltas since the last samples and recording the current
155 * samples to use to compute the next delta. The sampling is done
156 * as a side effect of cache_reclaim() which is run periodically,
157 * for its own purposes, often enough to avoid overflow of the 32
158 * bit counters. While sampling in this fashion requires no locking
159 * it is never-the-less done only after all locks have been taken by
160 * cache_reclaim() to allow cache_stat_sysctl() to hold off
161 * cache_reclaim() with minimal locking.
162 *
163 * cache_stat_sysctl() takes its CPU's per-cpu lock to hold off
164 * cache_reclaim() so that it can copy the subsystem total stats
165 * without them being concurrently modified. If CACHE_STATS_CURRENT
166 * is defined it also harvests the per-cpu increments into the total,
167 * which again requires cache_reclaim() to be held off.
168 *
169 * The per-cpu data (a lock and the per-cpu stats structures)
170 * are defined next.
171 */
172 struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
173
174 struct nchcpu {
175 kmutex_t cpu_lock;
176 struct nchstats_percpu cpu_stats;
177 /* XXX maybe __cacheline_aligned would improve this? */
178 struct nchstats_percpu cpu_stats_last; /* from last sample */
179 };
180
181 /*
182 * The type for the hash code. While the hash function generates a
183 * u32, the hash code has historically been passed around as a u_long,
184 * and the value is modified by xor'ing a uintptr_t, so it's not
185 * entirely clear what the best type is. For now I'll leave it
186 * unchanged as u_long.
187 */
188
189 typedef u_long nchash_t;
190
191 /*
192 * Structures associated with name cacheing.
193 */
194
195 static kmutex_t *namecache_lock __read_mostly;
196 static pool_cache_t namecache_cache __read_mostly;
197 static TAILQ_HEAD(, namecache) nclruhead __cacheline_aligned;
198
199 static LIST_HEAD(nchashhead, namecache) *nchashtbl __read_mostly;
200 static u_long nchash __read_mostly;
201
202 #define NCHASH2(hash, dvp) \
203 (((hash) ^ ((uintptr_t)(dvp) >> 3)) & nchash)
204
205 static LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl __read_mostly;
206 static u_long ncvhash __read_mostly;
207
208 #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
209
210 /* Number of cache entries allocated. */
211 static long numcache __cacheline_aligned;
212
213 /* Garbage collection queue and number of entries pending in it. */
214 static void *cache_gcqueue;
215 static u_int cache_gcpend;
216
217 /* Cache effectiveness statistics. This holds total from per-cpu stats */
218 struct nchstats nchstats __cacheline_aligned;
219
220 /*
221 * Macros to count an event, update the central stats with per-cpu
222 * values and add current per-cpu increments to the subsystem total
223 * last collected by cache_reclaim().
224 */
225 #define CACHE_STATS_CURRENT /* nothing */
226
227 #define COUNT(cpup, f) ((cpup)->cpu_stats.f++)
228
229 #define UPDATE(cpup, f) do { \
230 struct nchcpu *Xcpup = (cpup); \
231 uint32_t Xcnt = (volatile uint32_t) Xcpup->cpu_stats.f; \
232 nchstats.f += Xcnt - Xcpup->cpu_stats_last.f; \
233 Xcpup->cpu_stats_last.f = Xcnt; \
234 } while (/* CONSTCOND */ 0)
235
236 #define ADD(stats, cpup, f) do { \
237 struct nchcpu *Xcpup = (cpup); \
238 stats.f += Xcpup->cpu_stats.f - Xcpup->cpu_stats_last.f; \
239 } while (/* CONSTCOND */ 0)
240
241 /* Do unlocked stats the same way. Use a different name to allow mind changes */
242 #define COUNT_UNL(cpup, f) COUNT((cpup), f)
243
244 static const int cache_lowat = 95;
245 static const int cache_hiwat = 98;
246 static const int cache_hottime = 5; /* number of seconds */
247 static int doingcache = 1; /* 1 => enable the cache */
248
249 static struct evcnt cache_ev_scan;
250 static struct evcnt cache_ev_gc;
251 static struct evcnt cache_ev_over;
252 static struct evcnt cache_ev_under;
253 static struct evcnt cache_ev_forced;
254
255 static void cache_invalidate(struct namecache *);
256 static struct namecache *cache_lookup_entry(
257 const struct vnode *, const char *, size_t);
258 static void cache_thread(void *);
259 static void cache_invalidate(struct namecache *);
260 static void cache_disassociate(struct namecache *);
261 static void cache_reclaim(void);
262 static int cache_ctor(void *, void *, int);
263 static void cache_dtor(void *, void *);
264
265 static struct sysctllog *sysctllog;
266 static void sysctl_cache_stat_setup(void);
267
268 SDT_PROVIDER_DEFINE(vfs);
269
270 SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
271 SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
272 SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
273 SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
274 SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
275 SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
276 "char *", "size_t");
277 SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
278 "char *", "size_t");
279 SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
280 "char *", "size_t");
281 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
282 "struct vnode *");
283 SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
284 "int");
285 SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
286 SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
287 "char *", "size_t");
288 SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
289 "char *", "size_t");
290
291 /*
292 * Compute the hash for an entry.
293 *
294 * (This is for now a wrapper around namei_hash, whose interface is
295 * for the time being slightly inconvenient.)
296 */
297 static nchash_t
298 cache_hash(const char *name, size_t namelen)
299 {
300 const char *endptr;
301
302 endptr = name + namelen;
303 return namei_hash(name, &endptr);
304 }
305
306 /*
307 * Invalidate a cache entry and enqueue it for garbage collection.
308 * The caller needs to hold namecache_lock or a per-cpu lock to hold
309 * off cache_reclaim().
310 */
311 static void
312 cache_invalidate(struct namecache *ncp)
313 {
314 void *head;
315
316 KASSERT(mutex_owned(&ncp->nc_lock));
317
318 if (ncp->nc_dvp != NULL) {
319 SDT_PROBE(vfs, namecache, invalidate, done, ncp->nc_dvp,
320 0, 0, 0, 0);
321
322 ncp->nc_vp = NULL;
323 ncp->nc_dvp = NULL;
324 do {
325 head = cache_gcqueue;
326 ncp->nc_gcqueue = head;
327 } while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
328 atomic_inc_uint(&cache_gcpend);
329 }
330 }
331
332 /*
333 * Disassociate a namecache entry from any vnodes it is attached to,
334 * and remove from the global LRU list.
335 */
336 static void
337 cache_disassociate(struct namecache *ncp)
338 {
339
340 KASSERT(mutex_owned(namecache_lock));
341 KASSERT(ncp->nc_dvp == NULL);
342
343 if (ncp->nc_lru.tqe_prev != NULL) {
344 TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
345 ncp->nc_lru.tqe_prev = NULL;
346 }
347 if (ncp->nc_vhash.le_prev != NULL) {
348 LIST_REMOVE(ncp, nc_vhash);
349 ncp->nc_vhash.le_prev = NULL;
350 }
351 if (ncp->nc_vlist.le_prev != NULL) {
352 LIST_REMOVE(ncp, nc_vlist);
353 ncp->nc_vlist.le_prev = NULL;
354 }
355 if (ncp->nc_dvlist.le_prev != NULL) {
356 LIST_REMOVE(ncp, nc_dvlist);
357 ncp->nc_dvlist.le_prev = NULL;
358 }
359 }
360
361 /*
362 * Lock all CPUs to prevent any cache lookup activity. Conceptually,
363 * this locks out all "readers".
364 */
365 static void
366 cache_lock_cpus(void)
367 {
368 CPU_INFO_ITERATOR cii;
369 struct cpu_info *ci;
370 struct nchcpu *cpup;
371
372 /*
373 * Lock out all CPUs first, then harvest per-cpu stats. This
374 * is probably not quite as cache-efficient as doing the lock
375 * and harvest at the same time, but allows cache_stat_sysctl()
376 * to make do with a per-cpu lock.
377 */
378 for (CPU_INFO_FOREACH(cii, ci)) {
379 cpup = ci->ci_data.cpu_nch;
380 mutex_enter(&cpup->cpu_lock);
381 }
382 for (CPU_INFO_FOREACH(cii, ci)) {
383 cpup = ci->ci_data.cpu_nch;
384 UPDATE(cpup, ncs_goodhits);
385 UPDATE(cpup, ncs_neghits);
386 UPDATE(cpup, ncs_badhits);
387 UPDATE(cpup, ncs_falsehits);
388 UPDATE(cpup, ncs_miss);
389 UPDATE(cpup, ncs_long);
390 UPDATE(cpup, ncs_pass2);
391 UPDATE(cpup, ncs_2passes);
392 UPDATE(cpup, ncs_revhits);
393 UPDATE(cpup, ncs_revmiss);
394 }
395 }
396
397 /*
398 * Release all CPU locks.
399 */
400 static void
401 cache_unlock_cpus(void)
402 {
403 CPU_INFO_ITERATOR cii;
404 struct cpu_info *ci;
405 struct nchcpu *cpup;
406
407 for (CPU_INFO_FOREACH(cii, ci)) {
408 cpup = ci->ci_data.cpu_nch;
409 mutex_exit(&cpup->cpu_lock);
410 }
411 }
412
413 /*
414 * Find a single cache entry and return it locked.
415 * The caller needs to hold namecache_lock or a per-cpu lock to hold
416 * off cache_reclaim().
417 */
418 static struct namecache *
419 cache_lookup_entry(const struct vnode *dvp, const char *name, size_t namelen)
420 {
421 struct nchashhead *ncpp;
422 struct namecache *ncp;
423 nchash_t hash;
424
425 KASSERT(dvp != NULL);
426 hash = cache_hash(name, namelen);
427 ncpp = &nchashtbl[NCHASH2(hash, dvp)];
428
429 LIST_FOREACH(ncp, ncpp, nc_hash) {
430 membar_datadep_consumer(); /* for Alpha... */
431 if (ncp->nc_dvp != dvp ||
432 ncp->nc_nlen != namelen ||
433 memcmp(ncp->nc_name, name, (u_int)ncp->nc_nlen))
434 continue;
435 mutex_enter(&ncp->nc_lock);
436 if (__predict_true(ncp->nc_dvp == dvp)) {
437 ncp->nc_hittime = hardclock_ticks;
438 SDT_PROBE(vfs, namecache, lookup, hit, dvp,
439 name, namelen, 0, 0);
440 return ncp;
441 }
442 /* Raced: entry has been nullified. */
443 mutex_exit(&ncp->nc_lock);
444 }
445
446 SDT_PROBE(vfs, namecache, lookup, miss, dvp,
447 name, namelen, 0, 0);
448 return NULL;
449 }
450
451 /*
452 * Look for a the name in the cache. We don't do this
453 * if the segment name is long, simply so the cache can avoid
454 * holding long names (which would either waste space, or
455 * add greatly to the complexity).
456 *
457 * Lookup is called with DVP pointing to the directory to search,
458 * and CNP providing the name of the entry being sought: cn_nameptr
459 * is the name, cn_namelen is its length, and cn_flags is the flags
460 * word from the namei operation.
461 *
462 * DVP must be locked.
463 *
464 * There are three possible non-error return states:
465 * 1. Nothing was found in the cache. Nothing is known about
466 * the requested name.
467 * 2. A negative entry was found in the cache, meaning that the
468 * requested name definitely does not exist.
469 * 3. A positive entry was found in the cache, meaning that the
470 * requested name does exist and that we are providing the
471 * vnode.
472 * In these cases the results are:
473 * 1. 0 returned; VN is set to NULL.
474 * 2. 1 returned; VN is set to NULL.
475 * 3. 1 returned; VN is set to the vnode found.
476 *
477 * The additional result argument ISWHT is set to zero, unless a
478 * negative entry is found that was entered as a whiteout, in which
479 * case ISWHT is set to one.
480 *
481 * The ISWHT_RET argument pointer may be null. In this case an
482 * assertion is made that the whiteout flag is not set. File systems
483 * that do not support whiteouts can/should do this.
484 *
485 * Filesystems that do support whiteouts should add ISWHITEOUT to
486 * cnp->cn_flags if ISWHT comes back nonzero.
487 *
488 * When a vnode is returned, it is locked, as per the vnode lookup
489 * locking protocol.
490 *
491 * There is no way for this function to fail, in the sense of
492 * generating an error that requires aborting the namei operation.
493 *
494 * (Prior to October 2012, this function returned an integer status,
495 * and a vnode, and mucked with the flags word in CNP for whiteouts.
496 * The integer status was -1 for "nothing found", ENOENT for "a
497 * negative entry found", 0 for "a positive entry found", and possibly
498 * other errors, and the value of VN might or might not have been set
499 * depending on what error occurred.)
500 */
501 bool
502 cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
503 uint32_t nameiop, uint32_t cnflags,
504 int *iswht_ret, struct vnode **vn_ret)
505 {
506 struct namecache *ncp;
507 struct vnode *vp;
508 struct nchcpu *cpup;
509 int error;
510 bool hit;
511
512
513 /* Establish default result values */
514 if (iswht_ret != NULL) {
515 *iswht_ret = 0;
516 }
517 *vn_ret = NULL;
518
519 if (__predict_false(!doingcache)) {
520 return false;
521 }
522
523 cpup = curcpu()->ci_data.cpu_nch;
524 mutex_enter(&cpup->cpu_lock);
525 if (__predict_false(namelen > NCHNAMLEN)) {
526 SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
527 name, namelen, 0, 0);
528 COUNT(cpup, ncs_long);
529 mutex_exit(&cpup->cpu_lock);
530 /* found nothing */
531 return false;
532 }
533
534 ncp = cache_lookup_entry(dvp, name, namelen);
535 if (__predict_false(ncp == NULL)) {
536 COUNT(cpup, ncs_miss);
537 mutex_exit(&cpup->cpu_lock);
538 /* found nothing */
539 return false;
540 }
541 if ((cnflags & MAKEENTRY) == 0) {
542 COUNT(cpup, ncs_badhits);
543 /*
544 * Last component and we are renaming or deleting,
545 * the cache entry is invalid, or otherwise don't
546 * want cache entry to exist.
547 */
548 cache_invalidate(ncp);
549 mutex_exit(&ncp->nc_lock);
550 mutex_exit(&cpup->cpu_lock);
551 /* found nothing */
552 return false;
553 }
554 if (ncp->nc_vp == NULL) {
555 if (iswht_ret != NULL) {
556 /*
557 * Restore the ISWHITEOUT flag saved earlier.
558 */
559 KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
560 *iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
561 } else {
562 KASSERT(ncp->nc_flags == 0);
563 }
564
565 if (__predict_true(nameiop != CREATE ||
566 (cnflags & ISLASTCN) == 0)) {
567 COUNT(cpup, ncs_neghits);
568 /* found neg entry; vn is already null from above */
569 hit = true;
570 } else {
571 COUNT(cpup, ncs_badhits);
572 /*
573 * Last component and we are preparing to create
574 * the named object, so flush the negative cache
575 * entry.
576 */
577 cache_invalidate(ncp);
578 /* found nothing */
579 hit = false;
580 }
581 mutex_exit(&ncp->nc_lock);
582 mutex_exit(&cpup->cpu_lock);
583 return hit;
584 }
585
586 vp = ncp->nc_vp;
587 mutex_enter(vp->v_interlock);
588 mutex_exit(&ncp->nc_lock);
589 mutex_exit(&cpup->cpu_lock);
590
591 /*
592 * Unlocked except for the vnode interlock. Call vcache_tryvget().
593 */
594 error = vcache_tryvget(vp);
595 if (error) {
596 KASSERT(error == EBUSY);
597 /*
598 * This vnode is being cleaned out.
599 * XXX badhits?
600 */
601 COUNT_UNL(cpup, ncs_falsehits);
602 /* found nothing */
603 return false;
604 }
605
606 COUNT_UNL(cpup, ncs_goodhits);
607 /* found it */
608 *vn_ret = vp;
609 return true;
610 }
611
612
613 /*
614 * Cut-'n-pasted version of the above without the nameiop argument.
615 */
616 bool
617 cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
618 uint32_t cnflags,
619 int *iswht_ret, struct vnode **vn_ret)
620 {
621 struct namecache *ncp;
622 struct vnode *vp;
623 struct nchcpu *cpup;
624 int error;
625
626 /* Establish default results. */
627 if (iswht_ret != NULL) {
628 *iswht_ret = 0;
629 }
630 *vn_ret = NULL;
631
632 if (__predict_false(!doingcache)) {
633 /* found nothing */
634 return false;
635 }
636
637 cpup = curcpu()->ci_data.cpu_nch;
638 mutex_enter(&cpup->cpu_lock);
639 if (__predict_false(namelen > NCHNAMLEN)) {
640 COUNT(cpup, ncs_long);
641 mutex_exit(&cpup->cpu_lock);
642 /* found nothing */
643 return false;
644 }
645 ncp = cache_lookup_entry(dvp, name, namelen);
646 if (__predict_false(ncp == NULL)) {
647 COUNT(cpup, ncs_miss);
648 mutex_exit(&cpup->cpu_lock);
649 /* found nothing */
650 return false;
651 }
652 vp = ncp->nc_vp;
653 if (vp == NULL) {
654 /*
655 * Restore the ISWHITEOUT flag saved earlier.
656 */
657 if (iswht_ret != NULL) {
658 KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
659 /*cnp->cn_flags |= ncp->nc_flags;*/
660 *iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
661 }
662 COUNT(cpup, ncs_neghits);
663 mutex_exit(&ncp->nc_lock);
664 mutex_exit(&cpup->cpu_lock);
665 /* found negative entry; vn is already null from above */
666 return true;
667 }
668 mutex_enter(vp->v_interlock);
669 mutex_exit(&ncp->nc_lock);
670 mutex_exit(&cpup->cpu_lock);
671
672 /*
673 * Unlocked except for the vnode interlock. Call vcache_tryvget().
674 */
675 error = vcache_tryvget(vp);
676 if (error) {
677 KASSERT(error == EBUSY);
678 /*
679 * This vnode is being cleaned out.
680 * XXX badhits?
681 */
682 COUNT_UNL(cpup, ncs_falsehits);
683 /* found nothing */
684 return false;
685 }
686
687 COUNT_UNL(cpup, ncs_goodhits); /* XXX can be "badhits" */
688 /* found it */
689 *vn_ret = vp;
690 return true;
691 }
692
693 /*
694 * Scan cache looking for name of directory entry pointing at vp.
695 *
696 * If the lookup succeeds the vnode is referenced and stored in dvpp.
697 *
698 * If bufp is non-NULL, also place the name in the buffer which starts
699 * at bufp, immediately before *bpp, and move bpp backwards to point
700 * at the start of it. (Yes, this is a little baroque, but it's done
701 * this way to cater to the whims of getcwd).
702 *
703 * Returns 0 on success, -1 on cache miss, positive errno on failure.
704 */
705 int
706 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
707 {
708 struct namecache *ncp;
709 struct vnode *dvp;
710 struct ncvhashhead *nvcpp;
711 struct nchcpu *cpup;
712 char *bp;
713 int error, nlen;
714
715 if (!doingcache)
716 goto out;
717
718 nvcpp = &ncvhashtbl[NCVHASH(vp)];
719
720 /*
721 * We increment counters in the local CPU's per-cpu stats.
722 * We don't take the per-cpu lock, however, since this function
723 * is the only place these counters are incremented so no one
724 * will be racing with us to increment them.
725 */
726 cpup = curcpu()->ci_data.cpu_nch;
727 mutex_enter(namecache_lock);
728 LIST_FOREACH(ncp, nvcpp, nc_vhash) {
729 mutex_enter(&ncp->nc_lock);
730 if (ncp->nc_vp == vp &&
731 (dvp = ncp->nc_dvp) != NULL &&
732 dvp != vp) { /* avoid pesky . entries.. */
733
734 #ifdef DIAGNOSTIC
735 if (ncp->nc_nlen == 1 &&
736 ncp->nc_name[0] == '.')
737 panic("cache_revlookup: found entry for .");
738
739 if (ncp->nc_nlen == 2 &&
740 ncp->nc_name[0] == '.' &&
741 ncp->nc_name[1] == '.')
742 panic("cache_revlookup: found entry for ..");
743 #endif
744 COUNT(cpup, ncs_revhits);
745 nlen = ncp->nc_nlen;
746
747 if (bufp) {
748 bp = *bpp;
749 bp -= nlen;
750 if (bp <= bufp) {
751 *dvpp = NULL;
752 mutex_exit(&ncp->nc_lock);
753 mutex_exit(namecache_lock);
754 SDT_PROBE(vfs, namecache, revlookup,
755 fail, vp, ERANGE, 0, 0, 0);
756 return (ERANGE);
757 }
758 memcpy(bp, ncp->nc_name, nlen);
759 *bpp = bp;
760 }
761
762 mutex_enter(dvp->v_interlock);
763 mutex_exit(&ncp->nc_lock);
764 mutex_exit(namecache_lock);
765 error = vcache_tryvget(dvp);
766 if (error) {
767 KASSERT(error == EBUSY);
768 if (bufp)
769 (*bpp) += nlen;
770 *dvpp = NULL;
771 SDT_PROBE(vfs, namecache, revlookup, fail, vp,
772 error, 0, 0, 0);
773 return -1;
774 }
775 *dvpp = dvp;
776 SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
777 0, 0, 0);
778 return (0);
779 }
780 mutex_exit(&ncp->nc_lock);
781 }
782 COUNT(cpup, ncs_revmiss);
783 mutex_exit(namecache_lock);
784 out:
785 *dvpp = NULL;
786 return (-1);
787 }
788
789 /*
790 * Add an entry to the cache
791 */
792 void
793 cache_enter(struct vnode *dvp, struct vnode *vp,
794 const char *name, size_t namelen, uint32_t cnflags)
795 {
796 struct namecache *ncp;
797 struct namecache *oncp;
798 struct nchashhead *ncpp;
799 struct ncvhashhead *nvcpp;
800 nchash_t hash;
801
802 /* First, check whether we can/should add a cache entry. */
803 if ((cnflags & MAKEENTRY) == 0 ||
804 __predict_false(namelen > NCHNAMLEN || !doingcache)) {
805 SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
806 0, 0);
807 return;
808 }
809
810 SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
811 if (numcache > desiredvnodes) {
812 mutex_enter(namecache_lock);
813 cache_ev_forced.ev_count++;
814 cache_reclaim();
815 mutex_exit(namecache_lock);
816 }
817
818 ncp = pool_cache_get(namecache_cache, PR_WAITOK);
819 mutex_enter(namecache_lock);
820 numcache++;
821
822 /*
823 * Concurrent lookups in the same directory may race for a
824 * cache entry. if there's a duplicated entry, free it.
825 */
826 oncp = cache_lookup_entry(dvp, name, namelen);
827 if (oncp) {
828 cache_invalidate(oncp);
829 mutex_exit(&oncp->nc_lock);
830 }
831
832 /* Grab the vnode we just found. */
833 mutex_enter(&ncp->nc_lock);
834 ncp->nc_vp = vp;
835 ncp->nc_flags = 0;
836 ncp->nc_hittime = 0;
837 ncp->nc_gcqueue = NULL;
838 if (vp == NULL) {
839 /*
840 * For negative hits, save the ISWHITEOUT flag so we can
841 * restore it later when the cache entry is used again.
842 */
843 ncp->nc_flags = cnflags & ISWHITEOUT;
844 }
845
846 /* Fill in cache info. */
847 ncp->nc_dvp = dvp;
848 LIST_INSERT_HEAD(&VNODE_TO_VIMPL(dvp)->vi_dnclist, ncp, nc_dvlist);
849 if (vp)
850 LIST_INSERT_HEAD(&VNODE_TO_VIMPL(vp)->vi_nclist, ncp, nc_vlist);
851 else {
852 ncp->nc_vlist.le_prev = NULL;
853 ncp->nc_vlist.le_next = NULL;
854 }
855 KASSERT(namelen <= NCHNAMLEN);
856 ncp->nc_nlen = namelen;
857 memcpy(ncp->nc_name, name, (unsigned)ncp->nc_nlen);
858 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
859 hash = cache_hash(name, namelen);
860 ncpp = &nchashtbl[NCHASH2(hash, dvp)];
861
862 /*
863 * Flush updates before making visible in table. No need for a
864 * memory barrier on the other side: to see modifications the
865 * list must be followed, meaning a dependent pointer load.
866 * The below is LIST_INSERT_HEAD() inlined, with the memory
867 * barrier included in the correct place.
868 */
869 if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
870 ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
871 ncp->nc_hash.le_prev = &ncpp->lh_first;
872 membar_producer();
873 ncpp->lh_first = ncp;
874
875 ncp->nc_vhash.le_prev = NULL;
876 ncp->nc_vhash.le_next = NULL;
877
878 /*
879 * Create reverse-cache entries (used in getcwd) for directories.
880 * (and in linux procfs exe node)
881 */
882 if (vp != NULL &&
883 vp != dvp &&
884 #ifndef NAMECACHE_ENTER_REVERSE
885 vp->v_type == VDIR &&
886 #endif
887 (ncp->nc_nlen > 2 ||
888 (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
889 (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
890 nvcpp = &ncvhashtbl[NCVHASH(vp)];
891 LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
892 }
893 mutex_exit(&ncp->nc_lock);
894 mutex_exit(namecache_lock);
895 }
896
897 /*
898 * Name cache initialization, from vfs_init() when we are booting
899 */
900 void
901 nchinit(void)
902 {
903 int error;
904
905 TAILQ_INIT(&nclruhead);
906 namecache_cache = pool_cache_init(sizeof(struct namecache),
907 coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
908 cache_dtor, NULL);
909 KASSERT(namecache_cache != NULL);
910
911 namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
912
913 nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
914 ncvhashtbl =
915 #ifdef NAMECACHE_ENTER_REVERSE
916 hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
917 #else
918 hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
919 #endif
920
921 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
922 NULL, NULL, "cachegc");
923 if (error != 0)
924 panic("nchinit %d", error);
925
926 evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
927 "namecache", "entries scanned");
928 evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
929 "namecache", "entries collected");
930 evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
931 "namecache", "over scan target");
932 evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
933 "namecache", "under scan target");
934 evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
935 "namecache", "forced reclaims");
936
937 sysctl_cache_stat_setup();
938 }
939
940 static int
941 cache_ctor(void *arg, void *obj, int flag)
942 {
943 struct namecache *ncp;
944
945 ncp = obj;
946 mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
947
948 return 0;
949 }
950
951 static void
952 cache_dtor(void *arg, void *obj)
953 {
954 struct namecache *ncp;
955
956 ncp = obj;
957 mutex_destroy(&ncp->nc_lock);
958 }
959
960 /*
961 * Called once for each CPU in the system as attached.
962 */
963 void
964 cache_cpu_init(struct cpu_info *ci)
965 {
966 struct nchcpu *cpup;
967 size_t sz;
968
969 sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
970 cpup = kmem_zalloc(sz, KM_SLEEP);
971 cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
972 mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
973 ci->ci_data.cpu_nch = cpup;
974 }
975
976 /*
977 * Name cache reinitialization, for when the maximum number of vnodes increases.
978 */
979 void
980 nchreinit(void)
981 {
982 struct namecache *ncp;
983 struct nchashhead *oldhash1, *hash1;
984 struct ncvhashhead *oldhash2, *hash2;
985 u_long i, oldmask1, oldmask2, mask1, mask2;
986
987 hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
988 hash2 =
989 #ifdef NAMECACHE_ENTER_REVERSE
990 hashinit(desiredvnodes, HASH_LIST, true, &mask2);
991 #else
992 hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
993 #endif
994 mutex_enter(namecache_lock);
995 cache_lock_cpus();
996 oldhash1 = nchashtbl;
997 oldmask1 = nchash;
998 nchashtbl = hash1;
999 nchash = mask1;
1000 oldhash2 = ncvhashtbl;
1001 oldmask2 = ncvhash;
1002 ncvhashtbl = hash2;
1003 ncvhash = mask2;
1004 for (i = 0; i <= oldmask1; i++) {
1005 while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
1006 LIST_REMOVE(ncp, nc_hash);
1007 ncp->nc_hash.le_prev = NULL;
1008 }
1009 }
1010 for (i = 0; i <= oldmask2; i++) {
1011 while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
1012 LIST_REMOVE(ncp, nc_vhash);
1013 ncp->nc_vhash.le_prev = NULL;
1014 }
1015 }
1016 cache_unlock_cpus();
1017 mutex_exit(namecache_lock);
1018 hashdone(oldhash1, HASH_LIST, oldmask1);
1019 hashdone(oldhash2, HASH_LIST, oldmask2);
1020 }
1021
1022 /*
1023 * Cache flush, a particular vnode; called when a vnode is renamed to
1024 * hide entries that would now be invalid
1025 */
1026 void
1027 cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
1028 {
1029 struct namecache *ncp, *ncnext;
1030
1031 mutex_enter(namecache_lock);
1032 if (flags & PURGE_PARENTS) {
1033 SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
1034
1035 for (ncp = LIST_FIRST(&VNODE_TO_VIMPL(vp)->vi_nclist);
1036 ncp != NULL; ncp = ncnext) {
1037 ncnext = LIST_NEXT(ncp, nc_vlist);
1038 mutex_enter(&ncp->nc_lock);
1039 cache_invalidate(ncp);
1040 mutex_exit(&ncp->nc_lock);
1041 cache_disassociate(ncp);
1042 }
1043 }
1044 if (flags & PURGE_CHILDREN) {
1045 SDT_PROBE(vfs, namecache, purge, children, vp, 0, 0, 0, 0);
1046 for (ncp = LIST_FIRST(&VNODE_TO_VIMPL(vp)->vi_dnclist);
1047 ncp != NULL; ncp = ncnext) {
1048 ncnext = LIST_NEXT(ncp, nc_dvlist);
1049 mutex_enter(&ncp->nc_lock);
1050 cache_invalidate(ncp);
1051 mutex_exit(&ncp->nc_lock);
1052 cache_disassociate(ncp);
1053 }
1054 }
1055 if (name != NULL) {
1056 SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
1057 ncp = cache_lookup_entry(vp, name, namelen);
1058 if (ncp) {
1059 cache_invalidate(ncp);
1060 mutex_exit(&ncp->nc_lock);
1061 cache_disassociate(ncp);
1062 }
1063 }
1064 mutex_exit(namecache_lock);
1065 }
1066
1067 /*
1068 * Cache flush, a whole filesystem; called when filesys is umounted to
1069 * remove entries that would now be invalid.
1070 */
1071 void
1072 cache_purgevfs(struct mount *mp)
1073 {
1074 struct namecache *ncp, *nxtcp;
1075
1076 SDT_PROBE(vfs, namecache, purge, vfs, mp, 0, 0, 0, 0);
1077 mutex_enter(namecache_lock);
1078 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
1079 nxtcp = TAILQ_NEXT(ncp, nc_lru);
1080 mutex_enter(&ncp->nc_lock);
1081 if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
1082 /* Free the resources we had. */
1083 cache_invalidate(ncp);
1084 cache_disassociate(ncp);
1085 }
1086 mutex_exit(&ncp->nc_lock);
1087 }
1088 cache_reclaim();
1089 mutex_exit(namecache_lock);
1090 }
1091
1092 /*
1093 * Scan global list invalidating entries until we meet a preset target.
1094 * Prefer to invalidate entries that have not scored a hit within
1095 * cache_hottime seconds. We sort the LRU list only for this routine's
1096 * benefit.
1097 */
1098 static void
1099 cache_prune(int incache, int target)
1100 {
1101 struct namecache *ncp, *nxtcp, *sentinel;
1102 int items, recent, tryharder;
1103
1104 KASSERT(mutex_owned(namecache_lock));
1105
1106 SDT_PROBE(vfs, namecache, prune, done, incache, target, 0, 0, 0);
1107 items = 0;
1108 tryharder = 0;
1109 recent = hardclock_ticks - hz * cache_hottime;
1110 sentinel = NULL;
1111 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
1112 if (incache <= target)
1113 break;
1114 items++;
1115 nxtcp = TAILQ_NEXT(ncp, nc_lru);
1116 if (ncp == sentinel) {
1117 /*
1118 * If we looped back on ourself, then ignore
1119 * recent entries and purge whatever we find.
1120 */
1121 tryharder = 1;
1122 }
1123 if (ncp->nc_dvp == NULL)
1124 continue;
1125 if (!tryharder && (ncp->nc_hittime - recent) > 0) {
1126 if (sentinel == NULL)
1127 sentinel = ncp;
1128 TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
1129 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
1130 continue;
1131 }
1132 mutex_enter(&ncp->nc_lock);
1133 if (ncp->nc_dvp != NULL) {
1134 cache_invalidate(ncp);
1135 cache_disassociate(ncp);
1136 incache--;
1137 }
1138 mutex_exit(&ncp->nc_lock);
1139 }
1140 cache_ev_scan.ev_count += items;
1141 }
1142
1143 /*
1144 * Collect dead cache entries from all CPUs and garbage collect.
1145 */
1146 static void
1147 cache_reclaim(void)
1148 {
1149 struct namecache *ncp, *next;
1150 int items;
1151
1152 KASSERT(mutex_owned(namecache_lock));
1153
1154 /*
1155 * If the number of extant entries not awaiting garbage collection
1156 * exceeds the high water mark, then reclaim stale entries until we
1157 * reach our low water mark.
1158 */
1159 items = numcache - cache_gcpend;
1160 if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
1161 cache_prune(items, (int)((uint64_t)desiredvnodes *
1162 cache_lowat / 100));
1163 cache_ev_over.ev_count++;
1164 } else
1165 cache_ev_under.ev_count++;
1166
1167 /*
1168 * Stop forward lookup activity on all CPUs and garbage collect dead
1169 * entries.
1170 */
1171 cache_lock_cpus();
1172 ncp = cache_gcqueue;
1173 cache_gcqueue = NULL;
1174 items = cache_gcpend;
1175 cache_gcpend = 0;
1176 while (ncp != NULL) {
1177 next = ncp->nc_gcqueue;
1178 cache_disassociate(ncp);
1179 KASSERT(ncp->nc_dvp == NULL);
1180 if (ncp->nc_hash.le_prev != NULL) {
1181 LIST_REMOVE(ncp, nc_hash);
1182 ncp->nc_hash.le_prev = NULL;
1183 }
1184 pool_cache_put(namecache_cache, ncp);
1185 ncp = next;
1186 }
1187 cache_unlock_cpus();
1188 numcache -= items;
1189 cache_ev_gc.ev_count += items;
1190 }
1191
1192 /*
1193 * Cache maintainence thread, awakening once per second to:
1194 *
1195 * => keep number of entries below the high water mark
1196 * => sort pseudo-LRU list
1197 * => garbage collect dead entries
1198 */
1199 static void
1200 cache_thread(void *arg)
1201 {
1202
1203 mutex_enter(namecache_lock);
1204 for (;;) {
1205 cache_reclaim();
1206 kpause("cachegc", false, hz, namecache_lock);
1207 }
1208 }
1209
1210 #ifdef DDB
1211 void
1212 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
1213 {
1214 struct vnode *dvp = NULL;
1215 struct namecache *ncp;
1216
1217 TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1218 if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
1219 (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
1220 dvp = ncp->nc_dvp;
1221 }
1222 }
1223 if (dvp == NULL) {
1224 (*pr)("name not found\n");
1225 return;
1226 }
1227 vp = dvp;
1228 TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1229 if (ncp->nc_vp == vp) {
1230 (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
1231 }
1232 }
1233 }
1234 #endif
1235
1236 void
1237 namecache_count_pass2(void)
1238 {
1239 struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
1240
1241 COUNT_UNL(cpup, ncs_pass2);
1242 }
1243
1244 void
1245 namecache_count_2passes(void)
1246 {
1247 struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
1248
1249 COUNT_UNL(cpup, ncs_2passes);
1250 }
1251
1252 /*
1253 * Fetch the current values of the stats. We return the most
1254 * recent values harvested into nchstats by cache_reclaim(), which
1255 * will be less than a second old.
1256 */
1257 static int
1258 cache_stat_sysctl(SYSCTLFN_ARGS)
1259 {
1260 struct nchstats stats;
1261 struct nchcpu *my_cpup;
1262 #ifdef CACHE_STATS_CURRENT
1263 CPU_INFO_ITERATOR cii;
1264 struct cpu_info *ci;
1265 #endif /* CACHE_STATS_CURRENT */
1266
1267 if (oldp == NULL) {
1268 *oldlenp = sizeof(stats);
1269 return 0;
1270 }
1271
1272 if (*oldlenp < sizeof(stats)) {
1273 *oldlenp = 0;
1274 return 0;
1275 }
1276
1277 /*
1278 * Take this CPU's per-cpu lock to hold off cache_reclaim()
1279 * from doing a stats update while doing minimal damage to
1280 * concurrent operations.
1281 */
1282 sysctl_unlock();
1283 my_cpup = curcpu()->ci_data.cpu_nch;
1284 mutex_enter(&my_cpup->cpu_lock);
1285 stats = nchstats;
1286 #ifdef CACHE_STATS_CURRENT
1287 for (CPU_INFO_FOREACH(cii, ci)) {
1288 struct nchcpu *cpup = ci->ci_data.cpu_nch;
1289
1290 ADD(stats, cpup, ncs_goodhits);
1291 ADD(stats, cpup, ncs_neghits);
1292 ADD(stats, cpup, ncs_badhits);
1293 ADD(stats, cpup, ncs_falsehits);
1294 ADD(stats, cpup, ncs_miss);
1295 ADD(stats, cpup, ncs_long);
1296 ADD(stats, cpup, ncs_pass2);
1297 ADD(stats, cpup, ncs_2passes);
1298 ADD(stats, cpup, ncs_revhits);
1299 ADD(stats, cpup, ncs_revmiss);
1300 }
1301 #endif /* CACHE_STATS_CURRENT */
1302 mutex_exit(&my_cpup->cpu_lock);
1303 sysctl_relock();
1304
1305 *oldlenp = sizeof(stats);
1306 return sysctl_copyout(l, &stats, oldp, sizeof(stats));
1307 }
1308
1309 static void
1310 sysctl_cache_stat_setup(void)
1311 {
1312
1313 KASSERT(sysctllog == NULL);
1314 sysctl_createv(&sysctllog, 0, NULL, NULL,
1315 CTLFLAG_PERMANENT,
1316 CTLTYPE_STRUCT, "namecache_stats",
1317 SYSCTL_DESCR("namecache statistics"),
1318 cache_stat_sysctl, 0, NULL, 0,
1319 CTL_VFS, CTL_CREATE, CTL_EOL);
1320 }
1321