vfs_cache.c revision 1.77 1 /* $NetBSD: vfs_cache.c,v 1.77 2008/06/03 15:50:22 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.77 2008/06/03 15:50:22 ad Exp $");
62
63 #include "opt_ddb.h"
64 #include "opt_revcache.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/time.h>
69 #include <sys/mount.h>
70 #include <sys/vnode.h>
71 #include <sys/namei.h>
72 #include <sys/errno.h>
73 #include <sys/pool.h>
74 #include <sys/mutex.h>
75 #include <sys/atomic.h>
76 #include <sys/kthread.h>
77 #include <sys/kernel.h>
78 #include <sys/cpu.h>
79 #include <sys/evcnt.h>
80
81 #define NAMECACHE_ENTER_REVERSE
82 /*
83 * Name caching works as follows:
84 *
85 * Names found by directory scans are retained in a cache
86 * for future reference. It is managed LRU, so frequently
87 * used names will hang around. Cache is indexed by hash value
88 * obtained from (dvp, name) where dvp refers to the directory
89 * containing name.
90 *
91 * For simplicity (and economy of storage), names longer than
92 * a maximum length of NCHNAMLEN are not cached; they occur
93 * infrequently in any case, and are almost never of interest.
94 *
95 * Upon reaching the last segment of a path, if the reference
96 * is for DELETE, or NOCACHE is set (rewrite), and the
97 * name is located in the cache, it will be dropped.
98 * The entry is dropped also when it was not possible to lock
99 * the cached vnode, either because vget() failed or the generation
100 * number has changed while waiting for the lock.
101 */
102
103 /*
104 * Per-cpu namecache data.
105 */
106 struct nchcpu {
107 kmutex_t cpu_lock;
108 struct nchstats cpu_stats;
109 };
110
111 /*
112 * Structures associated with name cacheing.
113 */
114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
115 u_long nchash; /* size of hash table - 1 */
116 #define NCHASH(cnp, dvp) \
117 (((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
118
119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
120 u_long ncvhash; /* size of hash table - 1 */
121 #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
122
123 long numcache; /* number of cache entries allocated */
124 static u_int cache_gcpend; /* number of entries pending GC */
125 static void *cache_gcqueue; /* garbage collection queue */
126
127 TAILQ_HEAD(, namecache) nclruhead = /* LRU chain */
128 TAILQ_HEAD_INITIALIZER(nclruhead);
129 #define COUNT(c,x) (c.x++)
130 struct nchstats nchstats; /* cache effectiveness statistics */
131
132 static pool_cache_t namecache_cache;
133
134 MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
135
136 int cache_lowat = 95;
137 int cache_hiwat = 98;
138 int cache_hottime = 5; /* number of seconds */
139 int doingcache = 1; /* 1 => enable the cache */
140
141 static struct evcnt cache_ev_scan;
142 static struct evcnt cache_ev_gc;
143 static struct evcnt cache_ev_over;
144 static struct evcnt cache_ev_under;
145 static struct evcnt cache_ev_forced;
146
147 /* A single lock to serialize modifications. */
148 static kmutex_t *namecache_lock;
149
150 static void cache_invalidate(struct namecache *);
151 static inline struct namecache *cache_lookup_entry(
152 const struct vnode *, const struct componentname *);
153 static void cache_thread(void *);
154 static void cache_invalidate(struct namecache *);
155 static void cache_disassociate(struct namecache *);
156 static void cache_reclaim(void);
157 static int cache_ctor(void *, void *, int);
158 static void cache_dtor(void *, void *);
159
160 /*
161 * Invalidate a cache entry and enqueue it for garbage collection.
162 */
163 static void
164 cache_invalidate(struct namecache *ncp)
165 {
166 void *head;
167
168 KASSERT(mutex_owned(&ncp->nc_lock));
169
170 if (ncp->nc_dvp != NULL) {
171 ncp->nc_vp = NULL;
172 ncp->nc_dvp = NULL;
173 do {
174 head = cache_gcqueue;
175 ncp->nc_gcqueue = head;
176 } while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
177 atomic_inc_uint(&cache_gcpend);
178 }
179 }
180
181 /*
182 * Disassociate a namecache entry from any vnodes it is attached to,
183 * and remove from the global LRU list.
184 */
185 static void
186 cache_disassociate(struct namecache *ncp)
187 {
188
189 KASSERT(mutex_owned(namecache_lock));
190 KASSERT(ncp->nc_dvp == NULL);
191
192 if (ncp->nc_lru.tqe_prev != NULL) {
193 TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
194 ncp->nc_lru.tqe_prev = NULL;
195 }
196 if (ncp->nc_vhash.le_prev != NULL) {
197 LIST_REMOVE(ncp, nc_vhash);
198 ncp->nc_vhash.le_prev = NULL;
199 }
200 if (ncp->nc_vlist.le_prev != NULL) {
201 LIST_REMOVE(ncp, nc_vlist);
202 ncp->nc_vlist.le_prev = NULL;
203 }
204 if (ncp->nc_dvlist.le_prev != NULL) {
205 LIST_REMOVE(ncp, nc_dvlist);
206 ncp->nc_dvlist.le_prev = NULL;
207 }
208 }
209
210 /*
211 * Lock all CPUs to prevent any cache lookup activity. Conceptually,
212 * this locks out all "readers".
213 */
214 static void
215 cache_lock_cpus(void)
216 {
217 CPU_INFO_ITERATOR cii;
218 struct cpu_info *ci;
219 struct nchcpu *cpup;
220 long *s, *d, *m;
221
222 for (CPU_INFO_FOREACH(cii, ci)) {
223 cpup = ci->ci_data.cpu_nch;
224 mutex_enter(&cpup->cpu_lock);
225
226 /* Collate statistics. */
227 d = (long *)&nchstats;
228 s = (long *)&cpup->cpu_stats;
229 m = s + sizeof(nchstats) / sizeof(long);
230 for (; s < m; s++, d++) {
231 *d += *s;
232 *s = 0;
233 }
234 }
235 }
236
237 /*
238 * Release all CPU locks.
239 */
240 static void
241 cache_unlock_cpus(void)
242 {
243 CPU_INFO_ITERATOR cii;
244 struct cpu_info *ci;
245 struct nchcpu *cpup;
246
247 for (CPU_INFO_FOREACH(cii, ci)) {
248 cpup = ci->ci_data.cpu_nch;
249 mutex_exit(&cpup->cpu_lock);
250 }
251 }
252
253 /*
254 * Find a single cache entry and return it locked. 'namecache_lock' or
255 * at least one of the per-CPU locks must be held.
256 */
257 static struct namecache *
258 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
259 {
260 struct nchashhead *ncpp;
261 struct namecache *ncp;
262
263 ncpp = &nchashtbl[NCHASH(cnp, dvp)];
264
265 LIST_FOREACH(ncp, ncpp, nc_hash) {
266 if (ncp->nc_dvp != dvp ||
267 ncp->nc_nlen != cnp->cn_namelen ||
268 memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
269 continue;
270 mutex_enter(&ncp->nc_lock);
271 if (__predict_true(ncp->nc_dvp == dvp)) {
272 ncp->nc_hittime = hardclock_ticks;
273 return ncp;
274 }
275 /* Raced: entry has been nullified. */
276 mutex_exit(&ncp->nc_lock);
277 }
278
279 return NULL;
280 }
281
282 /*
283 * Look for a the name in the cache. We don't do this
284 * if the segment name is long, simply so the cache can avoid
285 * holding long names (which would either waste space, or
286 * add greatly to the complexity).
287 *
288 * Lookup is called with ni_dvp pointing to the directory to search,
289 * ni_ptr pointing to the name of the entry being sought, ni_namelen
290 * tells the length of the name, and ni_hash contains a hash of
291 * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
292 * and a status of zero is returned. If the locking fails for whatever
293 * reason, the vnode is unlocked and the error is returned to caller.
294 * If the lookup determines that the name does not exist (negative cacheing),
295 * a status of ENOENT is returned. If the lookup fails, a status of -1
296 * is returned.
297 */
298 int
299 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
300 {
301 struct namecache *ncp;
302 struct vnode *vp;
303 struct nchcpu *cpup;
304 int error;
305
306 if (__predict_false(!doingcache)) {
307 cnp->cn_flags &= ~MAKEENTRY;
308 *vpp = NULL;
309 return -1;
310 }
311
312 cpup = curcpu()->ci_data.cpu_nch;
313 mutex_enter(&cpup->cpu_lock);
314 if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
315 COUNT(cpup->cpu_stats, ncs_long);
316 cnp->cn_flags &= ~MAKEENTRY;
317 mutex_exit(&cpup->cpu_lock);
318 *vpp = NULL;
319 return -1;
320 }
321 ncp = cache_lookup_entry(dvp, cnp);
322 if (__predict_false(ncp == NULL)) {
323 COUNT(cpup->cpu_stats, ncs_miss);
324 mutex_exit(&cpup->cpu_lock);
325 *vpp = NULL;
326 return -1;
327 }
328 if ((cnp->cn_flags & MAKEENTRY) == 0) {
329 COUNT(cpup->cpu_stats, ncs_badhits);
330 /*
331 * Last component and we are renaming or deleting,
332 * the cache entry is invalid, or otherwise don't
333 * want cache entry to exist.
334 */
335 cache_invalidate(ncp);
336 mutex_exit(&ncp->nc_lock);
337 mutex_exit(&cpup->cpu_lock);
338 *vpp = NULL;
339 return -1;
340 } else if (ncp->nc_vp == NULL) {
341 /*
342 * Restore the ISWHITEOUT flag saved earlier.
343 */
344 cnp->cn_flags |= ncp->nc_flags;
345 if (__predict_true(cnp->cn_nameiop != CREATE ||
346 (cnp->cn_flags & ISLASTCN) == 0)) {
347 COUNT(cpup->cpu_stats, ncs_neghits);
348 mutex_exit(&ncp->nc_lock);
349 mutex_exit(&cpup->cpu_lock);
350 return ENOENT;
351 } else {
352 COUNT(cpup->cpu_stats, ncs_badhits);
353 /*
354 * Last component and we are renaming or
355 * deleting, the cache entry is invalid,
356 * or otherwise don't want cache entry to
357 * exist.
358 */
359 cache_invalidate(ncp);
360 mutex_exit(&ncp->nc_lock);
361 mutex_exit(&cpup->cpu_lock);
362 *vpp = NULL;
363 return -1;
364 }
365 }
366
367 vp = ncp->nc_vp;
368 if (vtryget(vp)) {
369 mutex_exit(&ncp->nc_lock);
370 mutex_exit(&cpup->cpu_lock);
371 } else {
372 mutex_enter(&vp->v_interlock);
373 mutex_exit(&ncp->nc_lock);
374 mutex_exit(&cpup->cpu_lock);
375 error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
376 if (error) {
377 KASSERT(error == EBUSY);
378 /*
379 * This vnode is being cleaned out.
380 * XXX badhits?
381 */
382 COUNT(cpup->cpu_stats, ncs_falsehits);
383 *vpp = NULL;
384 return -1;
385 }
386 }
387
388 #ifdef DEBUG
389 /*
390 * since we released nb->nb_lock,
391 * we can't use this pointer any more.
392 */
393 ncp = NULL;
394 #endif /* DEBUG */
395
396 if (vp == dvp) { /* lookup on "." */
397 error = 0;
398 } else if (cnp->cn_flags & ISDOTDOT) {
399 VOP_UNLOCK(dvp, 0);
400 error = vn_lock(vp, LK_EXCLUSIVE);
401 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
402 } else {
403 error = vn_lock(vp, LK_EXCLUSIVE);
404 }
405
406 /*
407 * Check that the lock succeeded.
408 */
409 if (error) {
410 /* Unlocked, but only for stats. */
411 COUNT(cpup->cpu_stats, ncs_badhits);
412 *vpp = NULL;
413 return -1;
414 }
415
416 /* Unlocked, but only for stats. */
417 COUNT(cpup->cpu_stats, ncs_goodhits);
418 *vpp = vp;
419 return 0;
420 }
421
422 int
423 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
424 struct componentname *cnp)
425 {
426 struct namecache *ncp;
427 struct vnode *vp;
428 struct nchcpu *cpup;
429 int error;
430
431 if (__predict_false(!doingcache)) {
432 cnp->cn_flags &= ~MAKEENTRY;
433 *vpp = NULL;
434 return (-1);
435 }
436
437 cpup = curcpu()->ci_data.cpu_nch;
438 mutex_enter(&cpup->cpu_lock);
439 if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
440 COUNT(cpup->cpu_stats, ncs_long);
441 cnp->cn_flags &= ~MAKEENTRY;
442 mutex_exit(&cpup->cpu_lock);
443 *vpp = NULL;
444 return -1;
445 }
446 ncp = cache_lookup_entry(dvp, cnp);
447 if (__predict_false(ncp == NULL)) {
448 COUNT(cpup->cpu_stats, ncs_miss);
449 mutex_exit(&cpup->cpu_lock);
450 *vpp = NULL;
451 return -1;
452 }
453 vp = ncp->nc_vp;
454 if (vp == NULL) {
455 /*
456 * Restore the ISWHITEOUT flag saved earlier.
457 */
458 cnp->cn_flags |= ncp->nc_flags;
459 COUNT(cpup->cpu_stats, ncs_neghits);
460 mutex_exit(&ncp->nc_lock);
461 mutex_exit(&cpup->cpu_lock);
462 return ENOENT;
463 }
464 if (vtryget(vp)) {
465 mutex_exit(&ncp->nc_lock);
466 mutex_exit(&cpup->cpu_lock);
467 } else {
468 mutex_enter(&vp->v_interlock);
469 mutex_exit(&ncp->nc_lock);
470 mutex_exit(&cpup->cpu_lock);
471 error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
472 if (error) {
473 KASSERT(error == EBUSY);
474 /*
475 * This vnode is being cleaned out.
476 * XXX badhits?
477 */
478 COUNT(cpup->cpu_stats, ncs_falsehits);
479 *vpp = NULL;
480 return -1;
481 }
482 }
483
484 *vpp = vp;
485 return 0;
486 }
487
488 /*
489 * Scan cache looking for name of directory entry pointing at vp.
490 *
491 * Fill in dvpp.
492 *
493 * If bufp is non-NULL, also place the name in the buffer which starts
494 * at bufp, immediately before *bpp, and move bpp backwards to point
495 * at the start of it. (Yes, this is a little baroque, but it's done
496 * this way to cater to the whims of getcwd).
497 *
498 * Returns 0 on success, -1 on cache miss, positive errno on failure.
499 */
500 int
501 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
502 {
503 struct namecache *ncp;
504 struct vnode *dvp;
505 struct ncvhashhead *nvcpp;
506 char *bp;
507
508 if (!doingcache)
509 goto out;
510
511 nvcpp = &ncvhashtbl[NCVHASH(vp)];
512
513 mutex_enter(namecache_lock);
514 LIST_FOREACH(ncp, nvcpp, nc_vhash) {
515 mutex_enter(&ncp->nc_lock);
516 if (ncp->nc_vp == vp &&
517 (dvp = ncp->nc_dvp) != NULL &&
518 dvp != vp) { /* avoid pesky . entries.. */
519
520 #ifdef DIAGNOSTIC
521 if (ncp->nc_nlen == 1 &&
522 ncp->nc_name[0] == '.')
523 panic("cache_revlookup: found entry for .");
524
525 if (ncp->nc_nlen == 2 &&
526 ncp->nc_name[0] == '.' &&
527 ncp->nc_name[1] == '.')
528 panic("cache_revlookup: found entry for ..");
529 #endif
530 COUNT(nchstats, ncs_revhits);
531
532 if (bufp) {
533 bp = *bpp;
534 bp -= ncp->nc_nlen;
535 if (bp <= bufp) {
536 *dvpp = NULL;
537 mutex_exit(&ncp->nc_lock);
538 mutex_exit(namecache_lock);
539 return (ERANGE);
540 }
541 memcpy(bp, ncp->nc_name, ncp->nc_nlen);
542 *bpp = bp;
543 }
544
545 /* XXX MP: how do we know dvp won't evaporate? */
546 *dvpp = dvp;
547 mutex_exit(&ncp->nc_lock);
548 mutex_exit(namecache_lock);
549 return (0);
550 }
551 mutex_exit(&ncp->nc_lock);
552 }
553 COUNT(nchstats, ncs_revmiss);
554 mutex_exit(namecache_lock);
555 out:
556 *dvpp = NULL;
557 return (-1);
558 }
559
560 /*
561 * Add an entry to the cache
562 */
563 void
564 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
565 {
566 struct namecache *ncp;
567 struct namecache *oncp;
568 struct nchashhead *ncpp;
569 struct ncvhashhead *nvcpp;
570
571 #ifdef DIAGNOSTIC
572 if (cnp->cn_namelen > NCHNAMLEN)
573 panic("cache_enter: name too long");
574 #endif
575 if (!doingcache)
576 return;
577
578 if (numcache > desiredvnodes) {
579 mutex_enter(namecache_lock);
580 cache_ev_forced.ev_count++;
581 cache_reclaim();
582 mutex_exit(namecache_lock);
583 }
584
585 ncp = pool_cache_get(namecache_cache, PR_WAITOK);
586 mutex_enter(namecache_lock);
587 numcache++;
588
589 /*
590 * Concurrent lookups in the same directory may race for a
591 * cache entry. if there's a duplicated entry, free it.
592 */
593 oncp = cache_lookup_entry(dvp, cnp);
594 if (oncp) {
595 cache_invalidate(oncp);
596 mutex_exit(&oncp->nc_lock);
597 }
598
599 /* Grab the vnode we just found. */
600 mutex_enter(&ncp->nc_lock);
601 ncp->nc_vp = vp;
602 ncp->nc_flags = 0;
603 ncp->nc_hittime = 0;
604 ncp->nc_gcqueue = NULL;
605 if (vp == NULL) {
606 /*
607 * For negative hits, save the ISWHITEOUT flag so we can
608 * restore it later when the cache entry is used again.
609 */
610 ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
611 }
612 /* Fill in cache info. */
613 ncp->nc_dvp = dvp;
614 LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
615 if (vp)
616 LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
617 else {
618 ncp->nc_vlist.le_prev = NULL;
619 ncp->nc_vlist.le_next = NULL;
620 }
621 ncp->nc_nlen = cnp->cn_namelen;
622 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
623 memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
624 ncpp = &nchashtbl[NCHASH(cnp, dvp)];
625
626 /*
627 * Flush updates before making visible in table. No need for a
628 * memory barrier on the other side: to see modifications the
629 * list must be followed, meaning a dependent pointer load.
630 * The below is LIST_INSERT_HEAD() inlined, with the memory
631 * barrier included in the correct place.
632 */
633 if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
634 ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
635 ncp->nc_hash.le_prev = &ncpp->lh_first;
636 membar_producer();
637 ncpp->lh_first = ncp;
638
639 ncp->nc_vhash.le_prev = NULL;
640 ncp->nc_vhash.le_next = NULL;
641
642 /*
643 * Create reverse-cache entries (used in getcwd) for directories.
644 * (and in linux procfs exe node)
645 */
646 if (vp != NULL &&
647 vp != dvp &&
648 #ifndef NAMECACHE_ENTER_REVERSE
649 vp->v_type == VDIR &&
650 #endif
651 (ncp->nc_nlen > 2 ||
652 (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
653 (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
654 nvcpp = &ncvhashtbl[NCVHASH(vp)];
655 LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
656 }
657 mutex_exit(&ncp->nc_lock);
658 mutex_exit(namecache_lock);
659 }
660
661 /*
662 * Name cache initialization, from vfs_init() when we are booting
663 */
664 void
665 nchinit(void)
666 {
667 int error;
668
669 namecache_cache = pool_cache_init(sizeof(struct namecache),
670 coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
671 cache_dtor, NULL);
672 KASSERT(namecache_cache != NULL);
673
674 namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
675
676 nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
677 ncvhashtbl =
678 #ifdef NAMECACHE_ENTER_REVERSE
679 hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
680 #else
681 hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
682 #endif
683
684 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
685 NULL, NULL, "cachegc");
686 if (error != 0)
687 panic("nchinit %d", error);
688
689 evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
690 "namecache", "entries scanned");
691 evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
692 "namecache", "entries collected");
693 evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
694 "namecache", "over scan target");
695 evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
696 "namecache", "under scan target");
697 evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
698 "namecache", "forced reclaims");
699 }
700
701 static int
702 cache_ctor(void *arg, void *obj, int flag)
703 {
704 struct namecache *ncp;
705
706 ncp = obj;
707 mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
708
709 return 0;
710 }
711
712 static void
713 cache_dtor(void *arg, void *obj)
714 {
715 struct namecache *ncp;
716
717 ncp = obj;
718 mutex_destroy(&ncp->nc_lock);
719 }
720
721 /*
722 * Called once for each CPU in the system as attached.
723 */
724 void
725 cache_cpu_init(struct cpu_info *ci)
726 {
727 struct nchcpu *cpup;
728 size_t sz;
729
730 sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
731 cpup = kmem_zalloc(sz, KM_SLEEP);
732 cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
733 mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
734 ci->ci_data.cpu_nch = cpup;
735 }
736
737 /*
738 * Name cache reinitialization, for when the maximum number of vnodes increases.
739 */
740 void
741 nchreinit(void)
742 {
743 struct namecache *ncp;
744 struct nchashhead *oldhash1, *hash1;
745 struct ncvhashhead *oldhash2, *hash2;
746 u_long i, oldmask1, oldmask2, mask1, mask2;
747
748 hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
749 hash2 =
750 #ifdef NAMECACHE_ENTER_REVERSE
751 hashinit(desiredvnodes, HASH_LIST, true, &mask2);
752 #else
753 hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
754 #endif
755 mutex_enter(namecache_lock);
756 cache_lock_cpus();
757 oldhash1 = nchashtbl;
758 oldmask1 = nchash;
759 nchashtbl = hash1;
760 nchash = mask1;
761 oldhash2 = ncvhashtbl;
762 oldmask2 = ncvhash;
763 ncvhashtbl = hash2;
764 ncvhash = mask2;
765 for (i = 0; i <= oldmask1; i++) {
766 while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
767 LIST_REMOVE(ncp, nc_hash);
768 ncp->nc_hash.le_prev = NULL;
769 }
770 }
771 for (i = 0; i <= oldmask2; i++) {
772 while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
773 LIST_REMOVE(ncp, nc_vhash);
774 ncp->nc_vhash.le_prev = NULL;
775 }
776 }
777 cache_unlock_cpus();
778 mutex_exit(namecache_lock);
779 hashdone(oldhash1, HASH_LIST, oldmask1);
780 hashdone(oldhash2, HASH_LIST, oldmask2);
781 }
782
783 /*
784 * Cache flush, a particular vnode; called when a vnode is renamed to
785 * hide entries that would now be invalid
786 */
787 void
788 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
789 {
790 struct namecache *ncp, *ncnext;
791
792 mutex_enter(namecache_lock);
793 if (flags & PURGE_PARENTS) {
794 for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
795 ncp = ncnext) {
796 ncnext = LIST_NEXT(ncp, nc_vlist);
797 mutex_enter(&ncp->nc_lock);
798 cache_invalidate(ncp);
799 mutex_exit(&ncp->nc_lock);
800 cache_disassociate(ncp);
801 }
802 }
803 if (flags & PURGE_CHILDREN) {
804 for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
805 ncp = ncnext) {
806 ncnext = LIST_NEXT(ncp, nc_dvlist);
807 mutex_enter(&ncp->nc_lock);
808 cache_invalidate(ncp);
809 mutex_exit(&ncp->nc_lock);
810 cache_disassociate(ncp);
811 }
812 }
813 if (cnp != NULL) {
814 ncp = cache_lookup_entry(vp, cnp);
815 if (ncp) {
816 cache_invalidate(ncp);
817 cache_disassociate(ncp);
818 mutex_exit(&ncp->nc_lock);
819 }
820 }
821 mutex_exit(namecache_lock);
822 }
823
824 /*
825 * Cache flush, a whole filesystem; called when filesys is umounted to
826 * remove entries that would now be invalid.
827 */
828 void
829 cache_purgevfs(struct mount *mp)
830 {
831 struct namecache *ncp, *nxtcp;
832
833 mutex_enter(namecache_lock);
834 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
835 nxtcp = TAILQ_NEXT(ncp, nc_lru);
836 mutex_enter(&ncp->nc_lock);
837 if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
838 /* Free the resources we had. */
839 cache_invalidate(ncp);
840 cache_disassociate(ncp);
841 }
842 mutex_exit(&ncp->nc_lock);
843 }
844 cache_reclaim();
845 mutex_exit(namecache_lock);
846 }
847
848 /*
849 * Scan global list invalidating entries until we meet a preset target.
850 * Prefer to invalidate entries that have not scored a hit within
851 * cache_hottime seconds. We sort the LRU list only for this routine's
852 * benefit.
853 */
854 static void
855 cache_prune(int incache, int target)
856 {
857 struct namecache *ncp, *nxtcp, *sentinel;
858 int items, recent, tryharder;
859
860 KASSERT(mutex_owned(namecache_lock));
861
862 items = 0;
863 tryharder = 0;
864 recent = hardclock_ticks - hz * cache_hottime;
865 sentinel = NULL;
866 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
867 if (incache <= target)
868 break;
869 items++;
870 nxtcp = TAILQ_NEXT(ncp, nc_lru);
871 if (ncp->nc_dvp == NULL)
872 continue;
873 if (ncp == sentinel) {
874 /*
875 * If we looped back on ourself, then ignore
876 * recent entries and purge whatever we find.
877 */
878 tryharder = 1;
879 }
880 if (!tryharder && ncp->nc_hittime > recent) {
881 if (sentinel == NULL)
882 sentinel = ncp;
883 TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
884 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
885 continue;
886 }
887 mutex_enter(&ncp->nc_lock);
888 if (ncp->nc_dvp != NULL) {
889 cache_invalidate(ncp);
890 cache_disassociate(ncp);
891 incache--;
892 }
893 mutex_exit(&ncp->nc_lock);
894 }
895 cache_ev_scan.ev_count += items;
896 }
897
898 /*
899 * Collect dead cache entries from all CPUs and garbage collect.
900 */
901 static void
902 cache_reclaim(void)
903 {
904 struct namecache *ncp, *next;
905 int items;
906
907 KASSERT(mutex_owned(namecache_lock));
908
909 /*
910 * If the number of extant entries not awaiting garbage collection
911 * exceeds the high water mark, then reclaim stale entries until we
912 * reach our low water mark.
913 */
914 items = numcache - cache_gcpend;
915 if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
916 cache_prune(items, (int)((uint64_t)desiredvnodes *
917 cache_lowat / 100));
918 cache_ev_over.ev_count++;
919 } else
920 cache_ev_under.ev_count++;
921
922 /*
923 * Stop forward lookup activity on all CPUs and garbage collect dead
924 * entries.
925 */
926 cache_lock_cpus();
927 ncp = cache_gcqueue;
928 cache_gcqueue = NULL;
929 items = cache_gcpend;
930 cache_gcpend = 0;
931 while (ncp != NULL) {
932 next = ncp->nc_gcqueue;
933 cache_disassociate(ncp);
934 KASSERT(ncp->nc_dvp == NULL);
935 if (ncp->nc_hash.le_prev != NULL) {
936 LIST_REMOVE(ncp, nc_hash);
937 ncp->nc_hash.le_prev = NULL;
938 }
939 pool_cache_put(namecache_cache, ncp);
940 ncp = next;
941 }
942 cache_unlock_cpus();
943 numcache -= items;
944 cache_ev_gc.ev_count += items;
945 }
946
947 /*
948 * Cache maintainence thread, awakening once per second to:
949 *
950 * => keep number of entries below the high water mark
951 * => sort pseudo-LRU list
952 * => garbage collect dead entries
953 */
954 static void
955 cache_thread(void *arg)
956 {
957
958 mutex_enter(namecache_lock);
959 for (;;) {
960 cache_reclaim();
961 kpause("cachegc", false, hz, namecache_lock);
962 }
963 }
964
965 #ifdef DDB
966 void
967 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
968 {
969 struct vnode *dvp = NULL;
970 struct namecache *ncp;
971
972 TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
973 if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
974 (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
975 dvp = ncp->nc_dvp;
976 }
977 }
978 if (dvp == NULL) {
979 (*pr)("name not found\n");
980 return;
981 }
982 vp = dvp;
983 TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
984 if (ncp->nc_vp == vp) {
985 (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
986 }
987 }
988 }
989 #endif
990