vfs_cache.c revision 1.83 1 /* $NetBSD: vfs_cache.c,v 1.83 2009/02/18 13:24:18 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 /*
30 * Copyright (c) 1989, 1993
31 * The Regents of the University of California. All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the University nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 *
57 * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
58 */
59
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.83 2009/02/18 13:24:18 yamt Exp $");
62
63 #include "opt_ddb.h"
64 #include "opt_revcache.h"
65
66 #include <sys/param.h>
67 #include <sys/systm.h>
68 #include <sys/time.h>
69 #include <sys/mount.h>
70 #include <sys/vnode.h>
71 #include <sys/namei.h>
72 #include <sys/errno.h>
73 #include <sys/pool.h>
74 #include <sys/mutex.h>
75 #include <sys/atomic.h>
76 #include <sys/kthread.h>
77 #include <sys/kernel.h>
78 #include <sys/cpu.h>
79 #include <sys/evcnt.h>
80
81 #define NAMECACHE_ENTER_REVERSE
82 /*
83 * Name caching works as follows:
84 *
85 * Names found by directory scans are retained in a cache
86 * for future reference. It is managed LRU, so frequently
87 * used names will hang around. Cache is indexed by hash value
88 * obtained from (dvp, name) where dvp refers to the directory
89 * containing name.
90 *
91 * For simplicity (and economy of storage), names longer than
92 * a maximum length of NCHNAMLEN are not cached; they occur
93 * infrequently in any case, and are almost never of interest.
94 *
95 * Upon reaching the last segment of a path, if the reference
96 * is for DELETE, or NOCACHE is set (rewrite), and the
97 * name is located in the cache, it will be dropped.
98 * The entry is dropped also when it was not possible to lock
99 * the cached vnode, either because vget() failed or the generation
100 * number has changed while waiting for the lock.
101 */
102
103 /*
104 * Per-cpu namecache data.
105 */
106 struct nchcpu {
107 kmutex_t cpu_lock;
108 struct nchstats cpu_stats;
109 };
110
111 /*
112 * Structures associated with name cacheing.
113 */
114 LIST_HEAD(nchashhead, namecache) *nchashtbl;
115 u_long nchash; /* size of hash table - 1 */
116 #define NCHASH(cnp, dvp) \
117 (((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
118
119 LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
120 u_long ncvhash; /* size of hash table - 1 */
121 #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
122
123 long numcache; /* number of cache entries allocated */
124 static u_int cache_gcpend; /* number of entries pending GC */
125 static void *cache_gcqueue; /* garbage collection queue */
126
127 TAILQ_HEAD(, namecache) nclruhead = /* LRU chain */
128 TAILQ_HEAD_INITIALIZER(nclruhead);
129 #define COUNT(c,x) (c.x++)
130 struct nchstats nchstats; /* cache effectiveness statistics */
131
132 static pool_cache_t namecache_cache;
133
134 int cache_lowat = 95;
135 int cache_hiwat = 98;
136 int cache_hottime = 5; /* number of seconds */
137 int doingcache = 1; /* 1 => enable the cache */
138
139 static struct evcnt cache_ev_scan;
140 static struct evcnt cache_ev_gc;
141 static struct evcnt cache_ev_over;
142 static struct evcnt cache_ev_under;
143 static struct evcnt cache_ev_forced;
144
145 /* A single lock to serialize modifications. */
146 static kmutex_t *namecache_lock;
147
148 static void cache_invalidate(struct namecache *);
149 static inline struct namecache *cache_lookup_entry(
150 const struct vnode *, const struct componentname *);
151 static void cache_thread(void *);
152 static void cache_invalidate(struct namecache *);
153 static void cache_disassociate(struct namecache *);
154 static void cache_reclaim(void);
155 static int cache_ctor(void *, void *, int);
156 static void cache_dtor(void *, void *);
157
158 /*
159 * Invalidate a cache entry and enqueue it for garbage collection.
160 */
161 static void
162 cache_invalidate(struct namecache *ncp)
163 {
164 void *head;
165
166 KASSERT(mutex_owned(&ncp->nc_lock));
167
168 if (ncp->nc_dvp != NULL) {
169 ncp->nc_vp = NULL;
170 ncp->nc_dvp = NULL;
171 do {
172 head = cache_gcqueue;
173 ncp->nc_gcqueue = head;
174 } while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
175 atomic_inc_uint(&cache_gcpend);
176 }
177 }
178
179 /*
180 * Disassociate a namecache entry from any vnodes it is attached to,
181 * and remove from the global LRU list.
182 */
183 static void
184 cache_disassociate(struct namecache *ncp)
185 {
186
187 KASSERT(mutex_owned(namecache_lock));
188 KASSERT(ncp->nc_dvp == NULL);
189
190 if (ncp->nc_lru.tqe_prev != NULL) {
191 TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
192 ncp->nc_lru.tqe_prev = NULL;
193 }
194 if (ncp->nc_vhash.le_prev != NULL) {
195 LIST_REMOVE(ncp, nc_vhash);
196 ncp->nc_vhash.le_prev = NULL;
197 }
198 if (ncp->nc_vlist.le_prev != NULL) {
199 LIST_REMOVE(ncp, nc_vlist);
200 ncp->nc_vlist.le_prev = NULL;
201 }
202 if (ncp->nc_dvlist.le_prev != NULL) {
203 LIST_REMOVE(ncp, nc_dvlist);
204 ncp->nc_dvlist.le_prev = NULL;
205 }
206 }
207
208 /*
209 * Lock all CPUs to prevent any cache lookup activity. Conceptually,
210 * this locks out all "readers".
211 */
212 static void
213 cache_lock_cpus(void)
214 {
215 CPU_INFO_ITERATOR cii;
216 struct cpu_info *ci;
217 struct nchcpu *cpup;
218 long *s, *d, *m;
219
220 for (CPU_INFO_FOREACH(cii, ci)) {
221 cpup = ci->ci_data.cpu_nch;
222 mutex_enter(&cpup->cpu_lock);
223
224 /* Collate statistics. */
225 d = (long *)&nchstats;
226 s = (long *)&cpup->cpu_stats;
227 m = s + sizeof(nchstats) / sizeof(long);
228 for (; s < m; s++, d++) {
229 *d += *s;
230 *s = 0;
231 }
232 }
233 }
234
235 /*
236 * Release all CPU locks.
237 */
238 static void
239 cache_unlock_cpus(void)
240 {
241 CPU_INFO_ITERATOR cii;
242 struct cpu_info *ci;
243 struct nchcpu *cpup;
244
245 for (CPU_INFO_FOREACH(cii, ci)) {
246 cpup = ci->ci_data.cpu_nch;
247 mutex_exit(&cpup->cpu_lock);
248 }
249 }
250
251 /*
252 * Find a single cache entry and return it locked. 'namecache_lock' or
253 * at least one of the per-CPU locks must be held.
254 */
255 static struct namecache *
256 cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
257 {
258 struct nchashhead *ncpp;
259 struct namecache *ncp;
260
261 ncpp = &nchashtbl[NCHASH(cnp, dvp)];
262
263 LIST_FOREACH(ncp, ncpp, nc_hash) {
264 if (ncp->nc_dvp != dvp ||
265 ncp->nc_nlen != cnp->cn_namelen ||
266 memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
267 continue;
268 mutex_enter(&ncp->nc_lock);
269 if (__predict_true(ncp->nc_dvp == dvp)) {
270 ncp->nc_hittime = hardclock_ticks;
271 return ncp;
272 }
273 /* Raced: entry has been nullified. */
274 mutex_exit(&ncp->nc_lock);
275 }
276
277 return NULL;
278 }
279
280 /*
281 * Look for a the name in the cache. We don't do this
282 * if the segment name is long, simply so the cache can avoid
283 * holding long names (which would either waste space, or
284 * add greatly to the complexity).
285 *
286 * Lookup is called with ni_dvp pointing to the directory to search,
287 * ni_ptr pointing to the name of the entry being sought, ni_namelen
288 * tells the length of the name, and ni_hash contains a hash of
289 * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
290 * and a status of zero is returned. If the locking fails for whatever
291 * reason, the vnode is unlocked and the error is returned to caller.
292 * If the lookup determines that the name does not exist (negative cacheing),
293 * a status of ENOENT is returned. If the lookup fails, a status of -1
294 * is returned.
295 */
296 int
297 cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
298 {
299 struct namecache *ncp;
300 struct vnode *vp;
301 struct nchcpu *cpup;
302 int error;
303
304 if (__predict_false(!doingcache)) {
305 cnp->cn_flags &= ~MAKEENTRY;
306 *vpp = NULL;
307 return -1;
308 }
309
310 cpup = curcpu()->ci_data.cpu_nch;
311 mutex_enter(&cpup->cpu_lock);
312 if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
313 COUNT(cpup->cpu_stats, ncs_long);
314 cnp->cn_flags &= ~MAKEENTRY;
315 mutex_exit(&cpup->cpu_lock);
316 *vpp = NULL;
317 return -1;
318 }
319 ncp = cache_lookup_entry(dvp, cnp);
320 if (__predict_false(ncp == NULL)) {
321 COUNT(cpup->cpu_stats, ncs_miss);
322 mutex_exit(&cpup->cpu_lock);
323 *vpp = NULL;
324 return -1;
325 }
326 if ((cnp->cn_flags & MAKEENTRY) == 0) {
327 COUNT(cpup->cpu_stats, ncs_badhits);
328 /*
329 * Last component and we are renaming or deleting,
330 * the cache entry is invalid, or otherwise don't
331 * want cache entry to exist.
332 */
333 cache_invalidate(ncp);
334 mutex_exit(&ncp->nc_lock);
335 mutex_exit(&cpup->cpu_lock);
336 *vpp = NULL;
337 return -1;
338 } else if (ncp->nc_vp == NULL) {
339 /*
340 * Restore the ISWHITEOUT flag saved earlier.
341 */
342 KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
343 cnp->cn_flags |= ncp->nc_flags;
344 if (__predict_true(cnp->cn_nameiop != CREATE ||
345 (cnp->cn_flags & ISLASTCN) == 0)) {
346 COUNT(cpup->cpu_stats, ncs_neghits);
347 mutex_exit(&ncp->nc_lock);
348 mutex_exit(&cpup->cpu_lock);
349 return ENOENT;
350 } else {
351 COUNT(cpup->cpu_stats, ncs_badhits);
352 /*
353 * Last component and we are renaming or
354 * deleting, the cache entry is invalid,
355 * or otherwise don't want cache entry to
356 * exist.
357 */
358 cache_invalidate(ncp);
359 mutex_exit(&ncp->nc_lock);
360 mutex_exit(&cpup->cpu_lock);
361 *vpp = NULL;
362 return -1;
363 }
364 }
365
366 vp = ncp->nc_vp;
367 if (vtryget(vp)) {
368 mutex_exit(&ncp->nc_lock);
369 mutex_exit(&cpup->cpu_lock);
370 } else {
371 mutex_enter(&vp->v_interlock);
372 mutex_exit(&ncp->nc_lock);
373 mutex_exit(&cpup->cpu_lock);
374 error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
375 if (error) {
376 KASSERT(error == EBUSY);
377 /*
378 * This vnode is being cleaned out.
379 * XXX badhits?
380 */
381 COUNT(cpup->cpu_stats, ncs_falsehits);
382 *vpp = NULL;
383 return -1;
384 }
385 }
386
387 #ifdef DEBUG
388 /*
389 * since we released nb->nb_lock,
390 * we can't use this pointer any more.
391 */
392 ncp = NULL;
393 #endif /* DEBUG */
394
395 if (vp == dvp) { /* lookup on "." */
396 error = 0;
397 } else if (cnp->cn_flags & ISDOTDOT) {
398 VOP_UNLOCK(dvp, 0);
399 error = vn_lock(vp, LK_EXCLUSIVE);
400 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
401 } else {
402 error = vn_lock(vp, LK_EXCLUSIVE);
403 }
404
405 /*
406 * Check that the lock succeeded.
407 */
408 if (error) {
409 /* Unlocked, but only for stats. */
410 COUNT(cpup->cpu_stats, ncs_badhits);
411 vrele(vp);
412 *vpp = NULL;
413 return -1;
414 }
415
416 /* Unlocked, but only for stats. */
417 COUNT(cpup->cpu_stats, ncs_goodhits);
418 *vpp = vp;
419 return 0;
420 }
421
422 int
423 cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
424 struct componentname *cnp)
425 {
426 struct namecache *ncp;
427 struct vnode *vp;
428 struct nchcpu *cpup;
429 int error;
430
431 if (__predict_false(!doingcache)) {
432 cnp->cn_flags &= ~MAKEENTRY;
433 *vpp = NULL;
434 return (-1);
435 }
436
437 cpup = curcpu()->ci_data.cpu_nch;
438 mutex_enter(&cpup->cpu_lock);
439 if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
440 COUNT(cpup->cpu_stats, ncs_long);
441 cnp->cn_flags &= ~MAKEENTRY;
442 mutex_exit(&cpup->cpu_lock);
443 *vpp = NULL;
444 return -1;
445 }
446 ncp = cache_lookup_entry(dvp, cnp);
447 if (__predict_false(ncp == NULL)) {
448 COUNT(cpup->cpu_stats, ncs_miss);
449 mutex_exit(&cpup->cpu_lock);
450 *vpp = NULL;
451 return -1;
452 }
453 vp = ncp->nc_vp;
454 if (vp == NULL) {
455 /*
456 * Restore the ISWHITEOUT flag saved earlier.
457 */
458 KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
459 cnp->cn_flags |= ncp->nc_flags;
460 COUNT(cpup->cpu_stats, ncs_neghits);
461 mutex_exit(&ncp->nc_lock);
462 mutex_exit(&cpup->cpu_lock);
463 return ENOENT;
464 }
465 if (vtryget(vp)) {
466 mutex_exit(&ncp->nc_lock);
467 mutex_exit(&cpup->cpu_lock);
468 } else {
469 mutex_enter(&vp->v_interlock);
470 mutex_exit(&ncp->nc_lock);
471 mutex_exit(&cpup->cpu_lock);
472 error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
473 if (error) {
474 KASSERT(error == EBUSY);
475 /*
476 * This vnode is being cleaned out.
477 * XXX badhits?
478 */
479 COUNT(cpup->cpu_stats, ncs_falsehits);
480 *vpp = NULL;
481 return -1;
482 }
483 }
484
485 /* Unlocked, but only for stats. */
486 COUNT(cpup->cpu_stats, ncs_goodhits); /* XXX can be "badhits" */
487 *vpp = vp;
488 return 0;
489 }
490
491 /*
492 * Scan cache looking for name of directory entry pointing at vp.
493 *
494 * Fill in dvpp.
495 *
496 * If bufp is non-NULL, also place the name in the buffer which starts
497 * at bufp, immediately before *bpp, and move bpp backwards to point
498 * at the start of it. (Yes, this is a little baroque, but it's done
499 * this way to cater to the whims of getcwd).
500 *
501 * Returns 0 on success, -1 on cache miss, positive errno on failure.
502 */
503 int
504 cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
505 {
506 struct namecache *ncp;
507 struct vnode *dvp;
508 struct ncvhashhead *nvcpp;
509 char *bp;
510
511 if (!doingcache)
512 goto out;
513
514 nvcpp = &ncvhashtbl[NCVHASH(vp)];
515
516 mutex_enter(namecache_lock);
517 LIST_FOREACH(ncp, nvcpp, nc_vhash) {
518 mutex_enter(&ncp->nc_lock);
519 if (ncp->nc_vp == vp &&
520 (dvp = ncp->nc_dvp) != NULL &&
521 dvp != vp) { /* avoid pesky . entries.. */
522
523 #ifdef DIAGNOSTIC
524 if (ncp->nc_nlen == 1 &&
525 ncp->nc_name[0] == '.')
526 panic("cache_revlookup: found entry for .");
527
528 if (ncp->nc_nlen == 2 &&
529 ncp->nc_name[0] == '.' &&
530 ncp->nc_name[1] == '.')
531 panic("cache_revlookup: found entry for ..");
532 #endif
533 COUNT(nchstats, ncs_revhits);
534
535 if (bufp) {
536 bp = *bpp;
537 bp -= ncp->nc_nlen;
538 if (bp <= bufp) {
539 *dvpp = NULL;
540 mutex_exit(&ncp->nc_lock);
541 mutex_exit(namecache_lock);
542 return (ERANGE);
543 }
544 memcpy(bp, ncp->nc_name, ncp->nc_nlen);
545 *bpp = bp;
546 }
547
548 /* XXX MP: how do we know dvp won't evaporate? */
549 *dvpp = dvp;
550 mutex_exit(&ncp->nc_lock);
551 mutex_exit(namecache_lock);
552 return (0);
553 }
554 mutex_exit(&ncp->nc_lock);
555 }
556 COUNT(nchstats, ncs_revmiss);
557 mutex_exit(namecache_lock);
558 out:
559 *dvpp = NULL;
560 return (-1);
561 }
562
563 /*
564 * Add an entry to the cache
565 */
566 void
567 cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
568 {
569 struct namecache *ncp;
570 struct namecache *oncp;
571 struct nchashhead *ncpp;
572 struct ncvhashhead *nvcpp;
573
574 #ifdef DIAGNOSTIC
575 if (cnp->cn_namelen > NCHNAMLEN)
576 panic("cache_enter: name too long");
577 #endif
578 if (!doingcache)
579 return;
580
581 if (numcache > desiredvnodes) {
582 mutex_enter(namecache_lock);
583 cache_ev_forced.ev_count++;
584 cache_reclaim();
585 mutex_exit(namecache_lock);
586 }
587
588 ncp = pool_cache_get(namecache_cache, PR_WAITOK);
589 mutex_enter(namecache_lock);
590 numcache++;
591
592 /*
593 * Concurrent lookups in the same directory may race for a
594 * cache entry. if there's a duplicated entry, free it.
595 */
596 oncp = cache_lookup_entry(dvp, cnp);
597 if (oncp) {
598 cache_invalidate(oncp);
599 mutex_exit(&oncp->nc_lock);
600 }
601
602 /* Grab the vnode we just found. */
603 mutex_enter(&ncp->nc_lock);
604 ncp->nc_vp = vp;
605 ncp->nc_flags = 0;
606 ncp->nc_hittime = 0;
607 ncp->nc_gcqueue = NULL;
608 if (vp == NULL) {
609 /*
610 * For negative hits, save the ISWHITEOUT flag so we can
611 * restore it later when the cache entry is used again.
612 */
613 ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
614 }
615 /* Fill in cache info. */
616 ncp->nc_dvp = dvp;
617 LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
618 if (vp)
619 LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
620 else {
621 ncp->nc_vlist.le_prev = NULL;
622 ncp->nc_vlist.le_next = NULL;
623 }
624 ncp->nc_nlen = cnp->cn_namelen;
625 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
626 memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
627 ncpp = &nchashtbl[NCHASH(cnp, dvp)];
628
629 /*
630 * Flush updates before making visible in table. No need for a
631 * memory barrier on the other side: to see modifications the
632 * list must be followed, meaning a dependent pointer load.
633 * The below is LIST_INSERT_HEAD() inlined, with the memory
634 * barrier included in the correct place.
635 */
636 if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
637 ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
638 ncp->nc_hash.le_prev = &ncpp->lh_first;
639 membar_producer();
640 ncpp->lh_first = ncp;
641
642 ncp->nc_vhash.le_prev = NULL;
643 ncp->nc_vhash.le_next = NULL;
644
645 /*
646 * Create reverse-cache entries (used in getcwd) for directories.
647 * (and in linux procfs exe node)
648 */
649 if (vp != NULL &&
650 vp != dvp &&
651 #ifndef NAMECACHE_ENTER_REVERSE
652 vp->v_type == VDIR &&
653 #endif
654 (ncp->nc_nlen > 2 ||
655 (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
656 (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
657 nvcpp = &ncvhashtbl[NCVHASH(vp)];
658 LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
659 }
660 mutex_exit(&ncp->nc_lock);
661 mutex_exit(namecache_lock);
662 }
663
664 /*
665 * Name cache initialization, from vfs_init() when we are booting
666 */
667 void
668 nchinit(void)
669 {
670 int error;
671
672 namecache_cache = pool_cache_init(sizeof(struct namecache),
673 coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
674 cache_dtor, NULL);
675 KASSERT(namecache_cache != NULL);
676
677 namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
678
679 nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
680 ncvhashtbl =
681 #ifdef NAMECACHE_ENTER_REVERSE
682 hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
683 #else
684 hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
685 #endif
686
687 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
688 NULL, NULL, "cachegc");
689 if (error != 0)
690 panic("nchinit %d", error);
691
692 evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
693 "namecache", "entries scanned");
694 evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
695 "namecache", "entries collected");
696 evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
697 "namecache", "over scan target");
698 evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
699 "namecache", "under scan target");
700 evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
701 "namecache", "forced reclaims");
702 }
703
704 static int
705 cache_ctor(void *arg, void *obj, int flag)
706 {
707 struct namecache *ncp;
708
709 ncp = obj;
710 mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
711
712 return 0;
713 }
714
715 static void
716 cache_dtor(void *arg, void *obj)
717 {
718 struct namecache *ncp;
719
720 ncp = obj;
721 mutex_destroy(&ncp->nc_lock);
722 }
723
724 /*
725 * Called once for each CPU in the system as attached.
726 */
727 void
728 cache_cpu_init(struct cpu_info *ci)
729 {
730 struct nchcpu *cpup;
731 size_t sz;
732
733 sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
734 cpup = kmem_zalloc(sz, KM_SLEEP);
735 cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
736 mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
737 ci->ci_data.cpu_nch = cpup;
738 }
739
740 /*
741 * Name cache reinitialization, for when the maximum number of vnodes increases.
742 */
743 void
744 nchreinit(void)
745 {
746 struct namecache *ncp;
747 struct nchashhead *oldhash1, *hash1;
748 struct ncvhashhead *oldhash2, *hash2;
749 u_long i, oldmask1, oldmask2, mask1, mask2;
750
751 hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
752 hash2 =
753 #ifdef NAMECACHE_ENTER_REVERSE
754 hashinit(desiredvnodes, HASH_LIST, true, &mask2);
755 #else
756 hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
757 #endif
758 mutex_enter(namecache_lock);
759 cache_lock_cpus();
760 oldhash1 = nchashtbl;
761 oldmask1 = nchash;
762 nchashtbl = hash1;
763 nchash = mask1;
764 oldhash2 = ncvhashtbl;
765 oldmask2 = ncvhash;
766 ncvhashtbl = hash2;
767 ncvhash = mask2;
768 for (i = 0; i <= oldmask1; i++) {
769 while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
770 LIST_REMOVE(ncp, nc_hash);
771 ncp->nc_hash.le_prev = NULL;
772 }
773 }
774 for (i = 0; i <= oldmask2; i++) {
775 while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
776 LIST_REMOVE(ncp, nc_vhash);
777 ncp->nc_vhash.le_prev = NULL;
778 }
779 }
780 cache_unlock_cpus();
781 mutex_exit(namecache_lock);
782 hashdone(oldhash1, HASH_LIST, oldmask1);
783 hashdone(oldhash2, HASH_LIST, oldmask2);
784 }
785
786 /*
787 * Cache flush, a particular vnode; called when a vnode is renamed to
788 * hide entries that would now be invalid
789 */
790 void
791 cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
792 {
793 struct namecache *ncp, *ncnext;
794
795 mutex_enter(namecache_lock);
796 if (flags & PURGE_PARENTS) {
797 for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
798 ncp = ncnext) {
799 ncnext = LIST_NEXT(ncp, nc_vlist);
800 mutex_enter(&ncp->nc_lock);
801 cache_invalidate(ncp);
802 mutex_exit(&ncp->nc_lock);
803 cache_disassociate(ncp);
804 }
805 }
806 if (flags & PURGE_CHILDREN) {
807 for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
808 ncp = ncnext) {
809 ncnext = LIST_NEXT(ncp, nc_dvlist);
810 mutex_enter(&ncp->nc_lock);
811 cache_invalidate(ncp);
812 mutex_exit(&ncp->nc_lock);
813 cache_disassociate(ncp);
814 }
815 }
816 if (cnp != NULL) {
817 ncp = cache_lookup_entry(vp, cnp);
818 if (ncp) {
819 cache_invalidate(ncp);
820 mutex_exit(&ncp->nc_lock);
821 cache_disassociate(ncp);
822 }
823 }
824 mutex_exit(namecache_lock);
825 }
826
827 /*
828 * Cache flush, a whole filesystem; called when filesys is umounted to
829 * remove entries that would now be invalid.
830 */
831 void
832 cache_purgevfs(struct mount *mp)
833 {
834 struct namecache *ncp, *nxtcp;
835
836 mutex_enter(namecache_lock);
837 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
838 nxtcp = TAILQ_NEXT(ncp, nc_lru);
839 mutex_enter(&ncp->nc_lock);
840 if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
841 /* Free the resources we had. */
842 cache_invalidate(ncp);
843 cache_disassociate(ncp);
844 }
845 mutex_exit(&ncp->nc_lock);
846 }
847 cache_reclaim();
848 mutex_exit(namecache_lock);
849 }
850
851 /*
852 * Scan global list invalidating entries until we meet a preset target.
853 * Prefer to invalidate entries that have not scored a hit within
854 * cache_hottime seconds. We sort the LRU list only for this routine's
855 * benefit.
856 */
857 static void
858 cache_prune(int incache, int target)
859 {
860 struct namecache *ncp, *nxtcp, *sentinel;
861 int items, recent, tryharder;
862
863 KASSERT(mutex_owned(namecache_lock));
864
865 items = 0;
866 tryharder = 0;
867 recent = hardclock_ticks - hz * cache_hottime;
868 sentinel = NULL;
869 for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
870 if (incache <= target)
871 break;
872 items++;
873 nxtcp = TAILQ_NEXT(ncp, nc_lru);
874 if (ncp->nc_dvp == NULL)
875 continue;
876 if (ncp == sentinel) {
877 /*
878 * If we looped back on ourself, then ignore
879 * recent entries and purge whatever we find.
880 */
881 tryharder = 1;
882 }
883 if (!tryharder && (ncp->nc_hittime - recent) > 0) {
884 if (sentinel == NULL)
885 sentinel = ncp;
886 TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
887 TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
888 continue;
889 }
890 mutex_enter(&ncp->nc_lock);
891 if (ncp->nc_dvp != NULL) {
892 cache_invalidate(ncp);
893 cache_disassociate(ncp);
894 incache--;
895 }
896 mutex_exit(&ncp->nc_lock);
897 }
898 cache_ev_scan.ev_count += items;
899 }
900
901 /*
902 * Collect dead cache entries from all CPUs and garbage collect.
903 */
904 static void
905 cache_reclaim(void)
906 {
907 struct namecache *ncp, *next;
908 int items;
909
910 KASSERT(mutex_owned(namecache_lock));
911
912 /*
913 * If the number of extant entries not awaiting garbage collection
914 * exceeds the high water mark, then reclaim stale entries until we
915 * reach our low water mark.
916 */
917 items = numcache - cache_gcpend;
918 if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
919 cache_prune(items, (int)((uint64_t)desiredvnodes *
920 cache_lowat / 100));
921 cache_ev_over.ev_count++;
922 } else
923 cache_ev_under.ev_count++;
924
925 /*
926 * Stop forward lookup activity on all CPUs and garbage collect dead
927 * entries.
928 */
929 cache_lock_cpus();
930 ncp = cache_gcqueue;
931 cache_gcqueue = NULL;
932 items = cache_gcpend;
933 cache_gcpend = 0;
934 while (ncp != NULL) {
935 next = ncp->nc_gcqueue;
936 cache_disassociate(ncp);
937 KASSERT(ncp->nc_dvp == NULL);
938 if (ncp->nc_hash.le_prev != NULL) {
939 LIST_REMOVE(ncp, nc_hash);
940 ncp->nc_hash.le_prev = NULL;
941 }
942 pool_cache_put(namecache_cache, ncp);
943 ncp = next;
944 }
945 cache_unlock_cpus();
946 numcache -= items;
947 cache_ev_gc.ev_count += items;
948 }
949
950 /*
951 * Cache maintainence thread, awakening once per second to:
952 *
953 * => keep number of entries below the high water mark
954 * => sort pseudo-LRU list
955 * => garbage collect dead entries
956 */
957 static void
958 cache_thread(void *arg)
959 {
960
961 mutex_enter(namecache_lock);
962 for (;;) {
963 cache_reclaim();
964 kpause("cachegc", false, hz, namecache_lock);
965 }
966 }
967
968 #ifdef DDB
969 void
970 namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
971 {
972 struct vnode *dvp = NULL;
973 struct namecache *ncp;
974
975 TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
976 if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
977 (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
978 dvp = ncp->nc_dvp;
979 }
980 }
981 if (dvp == NULL) {
982 (*pr)("name not found\n");
983 return;
984 }
985 vp = dvp;
986 TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
987 if (ncp->nc_vp == vp) {
988 (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
989 }
990 }
991 }
992 #endif
993