vfs_cache.c revision 1.77 1 1.77 ad /* $NetBSD: vfs_cache.c,v 1.77 2008/06/03 15:50:22 ad Exp $ */
2 1.73 ad
3 1.73 ad /*-
4 1.73 ad * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 1.73 ad * All rights reserved.
6 1.73 ad *
7 1.73 ad * Redistribution and use in source and binary forms, with or without
8 1.73 ad * modification, are permitted provided that the following conditions
9 1.73 ad * are met:
10 1.73 ad * 1. Redistributions of source code must retain the above copyright
11 1.73 ad * notice, this list of conditions and the following disclaimer.
12 1.73 ad * 2. Redistributions in binary form must reproduce the above copyright
13 1.73 ad * notice, this list of conditions and the following disclaimer in the
14 1.73 ad * documentation and/or other materials provided with the distribution.
15 1.73 ad *
16 1.73 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.73 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.73 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.73 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.73 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.73 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.73 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.73 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.73 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.73 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.73 ad * POSSIBILITY OF SUCH DAMAGE.
27 1.73 ad */
28 1.6 cgd
29 1.1 cgd /*
30 1.5 mycroft * Copyright (c) 1989, 1993
31 1.5 mycroft * The Regents of the University of California. All rights reserved.
32 1.1 cgd *
33 1.1 cgd * Redistribution and use in source and binary forms, with or without
34 1.1 cgd * modification, are permitted provided that the following conditions
35 1.1 cgd * are met:
36 1.1 cgd * 1. Redistributions of source code must retain the above copyright
37 1.1 cgd * notice, this list of conditions and the following disclaimer.
38 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
39 1.1 cgd * notice, this list of conditions and the following disclaimer in the
40 1.1 cgd * documentation and/or other materials provided with the distribution.
41 1.51 agc * 3. Neither the name of the University nor the names of its contributors
42 1.1 cgd * may be used to endorse or promote products derived from this software
43 1.1 cgd * without specific prior written permission.
44 1.1 cgd *
45 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.1 cgd * SUCH DAMAGE.
56 1.1 cgd *
57 1.10 mycroft * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
58 1.1 cgd */
59 1.32 lukem
60 1.32 lukem #include <sys/cdefs.h>
61 1.77 ad __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.77 2008/06/03 15:50:22 ad Exp $");
62 1.1 cgd
63 1.28 chs #include "opt_ddb.h"
64 1.29 fvdl #include "opt_revcache.h"
65 1.28 chs
66 1.4 mycroft #include <sys/param.h>
67 1.4 mycroft #include <sys/systm.h>
68 1.4 mycroft #include <sys/time.h>
69 1.4 mycroft #include <sys/mount.h>
70 1.4 mycroft #include <sys/vnode.h>
71 1.4 mycroft #include <sys/namei.h>
72 1.4 mycroft #include <sys/errno.h>
73 1.18 thorpej #include <sys/pool.h>
74 1.68 ad #include <sys/mutex.h>
75 1.73 ad #include <sys/atomic.h>
76 1.73 ad #include <sys/kthread.h>
77 1.73 ad #include <sys/kernel.h>
78 1.73 ad #include <sys/cpu.h>
79 1.73 ad #include <sys/evcnt.h>
80 1.1 cgd
81 1.66 christos #define NAMECACHE_ENTER_REVERSE
82 1.1 cgd /*
83 1.1 cgd * Name caching works as follows:
84 1.1 cgd *
85 1.1 cgd * Names found by directory scans are retained in a cache
86 1.1 cgd * for future reference. It is managed LRU, so frequently
87 1.1 cgd * used names will hang around. Cache is indexed by hash value
88 1.20 jdolecek * obtained from (dvp, name) where dvp refers to the directory
89 1.1 cgd * containing name.
90 1.1 cgd *
91 1.1 cgd * For simplicity (and economy of storage), names longer than
92 1.1 cgd * a maximum length of NCHNAMLEN are not cached; they occur
93 1.1 cgd * infrequently in any case, and are almost never of interest.
94 1.1 cgd *
95 1.1 cgd * Upon reaching the last segment of a path, if the reference
96 1.1 cgd * is for DELETE, or NOCACHE is set (rewrite), and the
97 1.1 cgd * name is located in the cache, it will be dropped.
98 1.20 jdolecek * The entry is dropped also when it was not possible to lock
99 1.20 jdolecek * the cached vnode, either because vget() failed or the generation
100 1.20 jdolecek * number has changed while waiting for the lock.
101 1.1 cgd */
102 1.1 cgd
103 1.1 cgd /*
104 1.77 ad * Per-cpu namecache data.
105 1.77 ad */
106 1.77 ad struct nchcpu {
107 1.77 ad kmutex_t cpu_lock;
108 1.77 ad struct nchstats cpu_stats;
109 1.77 ad };
110 1.77 ad
111 1.77 ad /*
112 1.1 cgd * Structures associated with name cacheing.
113 1.1 cgd */
114 1.9 mycroft LIST_HEAD(nchashhead, namecache) *nchashtbl;
115 1.1 cgd u_long nchash; /* size of hash table - 1 */
116 1.49 yamt #define NCHASH(cnp, dvp) \
117 1.49 yamt (((cnp)->cn_hash ^ ((uintptr_t)(dvp) >> 3)) & nchash)
118 1.19 sommerfe
119 1.19 sommerfe LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl;
120 1.19 sommerfe u_long ncvhash; /* size of hash table - 1 */
121 1.48 yamt #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
122 1.19 sommerfe
123 1.73 ad long numcache; /* number of cache entries allocated */
124 1.73 ad static u_int cache_gcpend; /* number of entries pending GC */
125 1.73 ad static void *cache_gcqueue; /* garbage collection queue */
126 1.73 ad
127 1.72 matt TAILQ_HEAD(, namecache) nclruhead = /* LRU chain */
128 1.72 matt TAILQ_HEAD_INITIALIZER(nclruhead);
129 1.77 ad #define COUNT(c,x) (c.x++)
130 1.1 cgd struct nchstats nchstats; /* cache effectiveness statistics */
131 1.1 cgd
132 1.71 ad static pool_cache_t namecache_cache;
133 1.38 thorpej
134 1.38 thorpej MALLOC_DEFINE(M_CACHE, "namecache", "Dynamically allocated cache entries");
135 1.18 thorpej
136 1.73 ad int cache_lowat = 95;
137 1.73 ad int cache_hiwat = 98;
138 1.73 ad int cache_hottime = 5; /* number of seconds */
139 1.7 chopps int doingcache = 1; /* 1 => enable the cache */
140 1.1 cgd
141 1.73 ad static struct evcnt cache_ev_scan;
142 1.73 ad static struct evcnt cache_ev_gc;
143 1.73 ad static struct evcnt cache_ev_over;
144 1.73 ad static struct evcnt cache_ev_under;
145 1.73 ad static struct evcnt cache_ev_forced;
146 1.73 ad
147 1.73 ad /* A single lock to serialize modifications. */
148 1.73 ad static kmutex_t *namecache_lock;
149 1.39 pk
150 1.73 ad static void cache_invalidate(struct namecache *);
151 1.63 perry static inline struct namecache *cache_lookup_entry(
152 1.55 yamt const struct vnode *, const struct componentname *);
153 1.73 ad static void cache_thread(void *);
154 1.73 ad static void cache_invalidate(struct namecache *);
155 1.73 ad static void cache_disassociate(struct namecache *);
156 1.73 ad static void cache_reclaim(void);
157 1.73 ad static int cache_ctor(void *, void *, int);
158 1.73 ad static void cache_dtor(void *, void *);
159 1.46 yamt
160 1.73 ad /*
161 1.73 ad * Invalidate a cache entry and enqueue it for garbage collection.
162 1.73 ad */
163 1.46 yamt static void
164 1.73 ad cache_invalidate(struct namecache *ncp)
165 1.46 yamt {
166 1.73 ad void *head;
167 1.46 yamt
168 1.73 ad KASSERT(mutex_owned(&ncp->nc_lock));
169 1.46 yamt
170 1.73 ad if (ncp->nc_dvp != NULL) {
171 1.73 ad ncp->nc_vp = NULL;
172 1.73 ad ncp->nc_dvp = NULL;
173 1.73 ad do {
174 1.73 ad head = cache_gcqueue;
175 1.73 ad ncp->nc_gcqueue = head;
176 1.73 ad } while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
177 1.73 ad atomic_inc_uint(&cache_gcpend);
178 1.73 ad }
179 1.73 ad }
180 1.46 yamt
181 1.73 ad /*
182 1.73 ad * Disassociate a namecache entry from any vnodes it is attached to,
183 1.73 ad * and remove from the global LRU list.
184 1.73 ad */
185 1.73 ad static void
186 1.73 ad cache_disassociate(struct namecache *ncp)
187 1.73 ad {
188 1.73 ad
189 1.73 ad KASSERT(mutex_owned(namecache_lock));
190 1.73 ad KASSERT(ncp->nc_dvp == NULL);
191 1.73 ad
192 1.73 ad if (ncp->nc_lru.tqe_prev != NULL) {
193 1.73 ad TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
194 1.73 ad ncp->nc_lru.tqe_prev = NULL;
195 1.46 yamt }
196 1.46 yamt if (ncp->nc_vhash.le_prev != NULL) {
197 1.46 yamt LIST_REMOVE(ncp, nc_vhash);
198 1.46 yamt ncp->nc_vhash.le_prev = NULL;
199 1.46 yamt }
200 1.46 yamt if (ncp->nc_vlist.le_prev != NULL) {
201 1.46 yamt LIST_REMOVE(ncp, nc_vlist);
202 1.46 yamt ncp->nc_vlist.le_prev = NULL;
203 1.46 yamt }
204 1.46 yamt if (ncp->nc_dvlist.le_prev != NULL) {
205 1.46 yamt LIST_REMOVE(ncp, nc_dvlist);
206 1.46 yamt ncp->nc_dvlist.le_prev = NULL;
207 1.46 yamt }
208 1.46 yamt }
209 1.46 yamt
210 1.73 ad /*
211 1.73 ad * Lock all CPUs to prevent any cache lookup activity. Conceptually,
212 1.73 ad * this locks out all "readers".
213 1.73 ad */
214 1.46 yamt static void
215 1.73 ad cache_lock_cpus(void)
216 1.46 yamt {
217 1.73 ad CPU_INFO_ITERATOR cii;
218 1.73 ad struct cpu_info *ci;
219 1.77 ad struct nchcpu *cpup;
220 1.77 ad long *s, *d, *m;
221 1.46 yamt
222 1.73 ad for (CPU_INFO_FOREACH(cii, ci)) {
223 1.77 ad cpup = ci->ci_data.cpu_nch;
224 1.77 ad mutex_enter(&cpup->cpu_lock);
225 1.77 ad
226 1.77 ad /* Collate statistics. */
227 1.77 ad d = (long *)&nchstats;
228 1.77 ad s = (long *)&cpup->cpu_stats;
229 1.77 ad m = s + sizeof(nchstats) / sizeof(long);
230 1.77 ad for (; s < m; s++, d++) {
231 1.77 ad *d += *s;
232 1.77 ad *s = 0;
233 1.77 ad }
234 1.73 ad }
235 1.46 yamt }
236 1.46 yamt
237 1.73 ad /*
238 1.73 ad * Release all CPU locks.
239 1.73 ad */
240 1.73 ad static void
241 1.73 ad cache_unlock_cpus(void)
242 1.73 ad {
243 1.73 ad CPU_INFO_ITERATOR cii;
244 1.73 ad struct cpu_info *ci;
245 1.77 ad struct nchcpu *cpup;
246 1.73 ad
247 1.73 ad for (CPU_INFO_FOREACH(cii, ci)) {
248 1.77 ad cpup = ci->ci_data.cpu_nch;
249 1.77 ad mutex_exit(&cpup->cpu_lock);
250 1.73 ad }
251 1.73 ad }
252 1.73 ad
253 1.73 ad /*
254 1.73 ad * Find a single cache entry and return it locked. 'namecache_lock' or
255 1.73 ad * at least one of the per-CPU locks must be held.
256 1.73 ad */
257 1.73 ad static struct namecache *
258 1.55 yamt cache_lookup_entry(const struct vnode *dvp, const struct componentname *cnp)
259 1.55 yamt {
260 1.55 yamt struct nchashhead *ncpp;
261 1.55 yamt struct namecache *ncp;
262 1.55 yamt
263 1.55 yamt ncpp = &nchashtbl[NCHASH(cnp, dvp)];
264 1.55 yamt
265 1.55 yamt LIST_FOREACH(ncp, ncpp, nc_hash) {
266 1.73 ad if (ncp->nc_dvp != dvp ||
267 1.73 ad ncp->nc_nlen != cnp->cn_namelen ||
268 1.73 ad memcmp(ncp->nc_name, cnp->cn_nameptr, (u_int)ncp->nc_nlen))
269 1.73 ad continue;
270 1.73 ad mutex_enter(&ncp->nc_lock);
271 1.77 ad if (__predict_true(ncp->nc_dvp == dvp)) {
272 1.73 ad ncp->nc_hittime = hardclock_ticks;
273 1.73 ad return ncp;
274 1.73 ad }
275 1.73 ad /* Raced: entry has been nullified. */
276 1.73 ad mutex_exit(&ncp->nc_lock);
277 1.55 yamt }
278 1.55 yamt
279 1.73 ad return NULL;
280 1.55 yamt }
281 1.55 yamt
282 1.1 cgd /*
283 1.1 cgd * Look for a the name in the cache. We don't do this
284 1.1 cgd * if the segment name is long, simply so the cache can avoid
285 1.1 cgd * holding long names (which would either waste space, or
286 1.1 cgd * add greatly to the complexity).
287 1.1 cgd *
288 1.1 cgd * Lookup is called with ni_dvp pointing to the directory to search,
289 1.1 cgd * ni_ptr pointing to the name of the entry being sought, ni_namelen
290 1.1 cgd * tells the length of the name, and ni_hash contains a hash of
291 1.20 jdolecek * the name. If the lookup succeeds, the vnode is locked, stored in ni_vp
292 1.20 jdolecek * and a status of zero is returned. If the locking fails for whatever
293 1.20 jdolecek * reason, the vnode is unlocked and the error is returned to caller.
294 1.20 jdolecek * If the lookup determines that the name does not exist (negative cacheing),
295 1.20 jdolecek * a status of ENOENT is returned. If the lookup fails, a status of -1
296 1.20 jdolecek * is returned.
297 1.1 cgd */
298 1.5 mycroft int
299 1.34 enami cache_lookup(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp)
300 1.1 cgd {
301 1.23 augustss struct namecache *ncp;
302 1.20 jdolecek struct vnode *vp;
303 1.77 ad struct nchcpu *cpup;
304 1.36 thorpej int error;
305 1.1 cgd
306 1.77 ad if (__predict_false(!doingcache)) {
307 1.8 cgd cnp->cn_flags &= ~MAKEENTRY;
308 1.34 enami *vpp = NULL;
309 1.77 ad return -1;
310 1.8 cgd }
311 1.39 pk
312 1.77 ad cpup = curcpu()->ci_data.cpu_nch;
313 1.77 ad mutex_enter(&cpup->cpu_lock);
314 1.77 ad if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
315 1.77 ad COUNT(cpup->cpu_stats, ncs_long);
316 1.5 mycroft cnp->cn_flags &= ~MAKEENTRY;
317 1.77 ad mutex_exit(&cpup->cpu_lock);
318 1.77 ad *vpp = NULL;
319 1.77 ad return -1;
320 1.1 cgd }
321 1.55 yamt ncp = cache_lookup_entry(dvp, cnp);
322 1.77 ad if (__predict_false(ncp == NULL)) {
323 1.77 ad COUNT(cpup->cpu_stats, ncs_miss);
324 1.77 ad mutex_exit(&cpup->cpu_lock);
325 1.77 ad *vpp = NULL;
326 1.77 ad return -1;
327 1.1 cgd }
328 1.9 mycroft if ((cnp->cn_flags & MAKEENTRY) == 0) {
329 1.77 ad COUNT(cpup->cpu_stats, ncs_badhits);
330 1.77 ad /*
331 1.77 ad * Last component and we are renaming or deleting,
332 1.77 ad * the cache entry is invalid, or otherwise don't
333 1.77 ad * want cache entry to exist.
334 1.77 ad */
335 1.77 ad cache_invalidate(ncp);
336 1.77 ad mutex_exit(&ncp->nc_lock);
337 1.77 ad mutex_exit(&cpup->cpu_lock);
338 1.77 ad *vpp = NULL;
339 1.77 ad return -1;
340 1.1 cgd } else if (ncp->nc_vp == NULL) {
341 1.11 mycroft /*
342 1.11 mycroft * Restore the ISWHITEOUT flag saved earlier.
343 1.11 mycroft */
344 1.50 yamt cnp->cn_flags |= ncp->nc_flags;
345 1.77 ad if (__predict_true(cnp->cn_nameiop != CREATE ||
346 1.77 ad (cnp->cn_flags & ISLASTCN) == 0)) {
347 1.77 ad COUNT(cpup->cpu_stats, ncs_neghits);
348 1.73 ad mutex_exit(&ncp->nc_lock);
349 1.77 ad mutex_exit(&cpup->cpu_lock);
350 1.77 ad return ENOENT;
351 1.20 jdolecek } else {
352 1.77 ad COUNT(cpup->cpu_stats, ncs_badhits);
353 1.77 ad /*
354 1.77 ad * Last component and we are renaming or
355 1.77 ad * deleting, the cache entry is invalid,
356 1.77 ad * or otherwise don't want cache entry to
357 1.77 ad * exist.
358 1.77 ad */
359 1.77 ad cache_invalidate(ncp);
360 1.77 ad mutex_exit(&ncp->nc_lock);
361 1.77 ad mutex_exit(&cpup->cpu_lock);
362 1.77 ad *vpp = NULL;
363 1.77 ad return -1;
364 1.20 jdolecek }
365 1.20 jdolecek }
366 1.20 jdolecek
367 1.20 jdolecek vp = ncp->nc_vp;
368 1.77 ad if (vtryget(vp)) {
369 1.77 ad mutex_exit(&ncp->nc_lock);
370 1.77 ad mutex_exit(&cpup->cpu_lock);
371 1.77 ad } else {
372 1.77 ad mutex_enter(&vp->v_interlock);
373 1.77 ad mutex_exit(&ncp->nc_lock);
374 1.77 ad mutex_exit(&cpup->cpu_lock);
375 1.77 ad error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
376 1.77 ad if (error) {
377 1.77 ad KASSERT(error == EBUSY);
378 1.77 ad /*
379 1.77 ad * This vnode is being cleaned out.
380 1.77 ad * XXX badhits?
381 1.77 ad */
382 1.77 ad COUNT(cpup->cpu_stats, ncs_falsehits);
383 1.77 ad *vpp = NULL;
384 1.77 ad return -1;
385 1.77 ad }
386 1.77 ad }
387 1.39 pk
388 1.52 yamt #ifdef DEBUG
389 1.52 yamt /*
390 1.73 ad * since we released nb->nb_lock,
391 1.52 yamt * we can't use this pointer any more.
392 1.52 yamt */
393 1.52 yamt ncp = NULL;
394 1.52 yamt #endif /* DEBUG */
395 1.52 yamt
396 1.20 jdolecek if (vp == dvp) { /* lookup on "." */
397 1.20 jdolecek error = 0;
398 1.20 jdolecek } else if (cnp->cn_flags & ISDOTDOT) {
399 1.20 jdolecek VOP_UNLOCK(dvp, 0);
400 1.60 yamt error = vn_lock(vp, LK_EXCLUSIVE);
401 1.67 chs vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
402 1.1 cgd } else {
403 1.60 yamt error = vn_lock(vp, LK_EXCLUSIVE);
404 1.20 jdolecek }
405 1.20 jdolecek
406 1.20 jdolecek /*
407 1.54 yamt * Check that the lock succeeded.
408 1.20 jdolecek */
409 1.47 yamt if (error) {
410 1.70 ad /* Unlocked, but only for stats. */
411 1.77 ad COUNT(cpup->cpu_stats, ncs_badhits);
412 1.31 chs *vpp = NULL;
413 1.77 ad return -1;
414 1.20 jdolecek }
415 1.20 jdolecek
416 1.70 ad /* Unlocked, but only for stats. */
417 1.77 ad COUNT(cpup->cpu_stats, ncs_goodhits);
418 1.20 jdolecek *vpp = vp;
419 1.77 ad return 0;
420 1.1 cgd }
421 1.1 cgd
422 1.61 yamt int
423 1.61 yamt cache_lookup_raw(struct vnode *dvp, struct vnode **vpp,
424 1.61 yamt struct componentname *cnp)
425 1.61 yamt {
426 1.61 yamt struct namecache *ncp;
427 1.61 yamt struct vnode *vp;
428 1.77 ad struct nchcpu *cpup;
429 1.61 yamt int error;
430 1.61 yamt
431 1.77 ad if (__predict_false(!doingcache)) {
432 1.61 yamt cnp->cn_flags &= ~MAKEENTRY;
433 1.61 yamt *vpp = NULL;
434 1.61 yamt return (-1);
435 1.61 yamt }
436 1.61 yamt
437 1.77 ad cpup = curcpu()->ci_data.cpu_nch;
438 1.77 ad mutex_enter(&cpup->cpu_lock);
439 1.77 ad if (__predict_false(cnp->cn_namelen > NCHNAMLEN)) {
440 1.77 ad COUNT(cpup->cpu_stats, ncs_long);
441 1.61 yamt cnp->cn_flags &= ~MAKEENTRY;
442 1.77 ad mutex_exit(&cpup->cpu_lock);
443 1.77 ad *vpp = NULL;
444 1.77 ad return -1;
445 1.61 yamt }
446 1.61 yamt ncp = cache_lookup_entry(dvp, cnp);
447 1.77 ad if (__predict_false(ncp == NULL)) {
448 1.77 ad COUNT(cpup->cpu_stats, ncs_miss);
449 1.77 ad mutex_exit(&cpup->cpu_lock);
450 1.77 ad *vpp = NULL;
451 1.77 ad return -1;
452 1.61 yamt }
453 1.61 yamt vp = ncp->nc_vp;
454 1.61 yamt if (vp == NULL) {
455 1.61 yamt /*
456 1.61 yamt * Restore the ISWHITEOUT flag saved earlier.
457 1.61 yamt */
458 1.61 yamt cnp->cn_flags |= ncp->nc_flags;
459 1.77 ad COUNT(cpup->cpu_stats, ncs_neghits);
460 1.73 ad mutex_exit(&ncp->nc_lock);
461 1.77 ad mutex_exit(&cpup->cpu_lock);
462 1.77 ad return ENOENT;
463 1.61 yamt }
464 1.77 ad if (vtryget(vp)) {
465 1.77 ad mutex_exit(&ncp->nc_lock);
466 1.77 ad mutex_exit(&cpup->cpu_lock);
467 1.77 ad } else {
468 1.77 ad mutex_enter(&vp->v_interlock);
469 1.77 ad mutex_exit(&ncp->nc_lock);
470 1.77 ad mutex_exit(&cpup->cpu_lock);
471 1.77 ad error = vget(vp, LK_NOWAIT | LK_INTERLOCK);
472 1.77 ad if (error) {
473 1.77 ad KASSERT(error == EBUSY);
474 1.77 ad /*
475 1.77 ad * This vnode is being cleaned out.
476 1.77 ad * XXX badhits?
477 1.77 ad */
478 1.77 ad COUNT(cpup->cpu_stats, ncs_falsehits);
479 1.77 ad *vpp = NULL;
480 1.77 ad return -1;
481 1.77 ad }
482 1.61 yamt }
483 1.61 yamt
484 1.61 yamt *vpp = vp;
485 1.61 yamt return 0;
486 1.61 yamt }
487 1.61 yamt
488 1.1 cgd /*
489 1.19 sommerfe * Scan cache looking for name of directory entry pointing at vp.
490 1.19 sommerfe *
491 1.19 sommerfe * Fill in dvpp.
492 1.19 sommerfe *
493 1.19 sommerfe * If bufp is non-NULL, also place the name in the buffer which starts
494 1.19 sommerfe * at bufp, immediately before *bpp, and move bpp backwards to point
495 1.19 sommerfe * at the start of it. (Yes, this is a little baroque, but it's done
496 1.19 sommerfe * this way to cater to the whims of getcwd).
497 1.19 sommerfe *
498 1.19 sommerfe * Returns 0 on success, -1 on cache miss, positive errno on failure.
499 1.19 sommerfe */
500 1.19 sommerfe int
501 1.34 enami cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
502 1.19 sommerfe {
503 1.19 sommerfe struct namecache *ncp;
504 1.19 sommerfe struct vnode *dvp;
505 1.19 sommerfe struct ncvhashhead *nvcpp;
506 1.34 enami char *bp;
507 1.34 enami
508 1.19 sommerfe if (!doingcache)
509 1.19 sommerfe goto out;
510 1.19 sommerfe
511 1.30 chs nvcpp = &ncvhashtbl[NCVHASH(vp)];
512 1.19 sommerfe
513 1.73 ad mutex_enter(namecache_lock);
514 1.27 chs LIST_FOREACH(ncp, nvcpp, nc_vhash) {
515 1.73 ad mutex_enter(&ncp->nc_lock);
516 1.34 enami if (ncp->nc_vp == vp &&
517 1.34 enami (dvp = ncp->nc_dvp) != NULL &&
518 1.47 yamt dvp != vp) { /* avoid pesky . entries.. */
519 1.34 enami
520 1.19 sommerfe #ifdef DIAGNOSTIC
521 1.34 enami if (ncp->nc_nlen == 1 &&
522 1.34 enami ncp->nc_name[0] == '.')
523 1.19 sommerfe panic("cache_revlookup: found entry for .");
524 1.19 sommerfe
525 1.34 enami if (ncp->nc_nlen == 2 &&
526 1.34 enami ncp->nc_name[0] == '.' &&
527 1.34 enami ncp->nc_name[1] == '.')
528 1.19 sommerfe panic("cache_revlookup: found entry for ..");
529 1.19 sommerfe #endif
530 1.77 ad COUNT(nchstats, ncs_revhits);
531 1.19 sommerfe
532 1.19 sommerfe if (bufp) {
533 1.19 sommerfe bp = *bpp;
534 1.19 sommerfe bp -= ncp->nc_nlen;
535 1.19 sommerfe if (bp <= bufp) {
536 1.34 enami *dvpp = NULL;
537 1.73 ad mutex_exit(&ncp->nc_lock);
538 1.73 ad mutex_exit(namecache_lock);
539 1.34 enami return (ERANGE);
540 1.19 sommerfe }
541 1.19 sommerfe memcpy(bp, ncp->nc_name, ncp->nc_nlen);
542 1.19 sommerfe *bpp = bp;
543 1.19 sommerfe }
544 1.34 enami
545 1.19 sommerfe /* XXX MP: how do we know dvp won't evaporate? */
546 1.19 sommerfe *dvpp = dvp;
547 1.73 ad mutex_exit(&ncp->nc_lock);
548 1.73 ad mutex_exit(namecache_lock);
549 1.34 enami return (0);
550 1.19 sommerfe }
551 1.73 ad mutex_exit(&ncp->nc_lock);
552 1.19 sommerfe }
553 1.77 ad COUNT(nchstats, ncs_revmiss);
554 1.73 ad mutex_exit(namecache_lock);
555 1.19 sommerfe out:
556 1.34 enami *dvpp = NULL;
557 1.34 enami return (-1);
558 1.19 sommerfe }
559 1.19 sommerfe
560 1.19 sommerfe /*
561 1.1 cgd * Add an entry to the cache
562 1.1 cgd */
563 1.13 christos void
564 1.34 enami cache_enter(struct vnode *dvp, struct vnode *vp, struct componentname *cnp)
565 1.1 cgd {
566 1.23 augustss struct namecache *ncp;
567 1.59 yamt struct namecache *oncp;
568 1.23 augustss struct nchashhead *ncpp;
569 1.23 augustss struct ncvhashhead *nvcpp;
570 1.1 cgd
571 1.5 mycroft #ifdef DIAGNOSTIC
572 1.5 mycroft if (cnp->cn_namelen > NCHNAMLEN)
573 1.5 mycroft panic("cache_enter: name too long");
574 1.5 mycroft #endif
575 1.1 cgd if (!doingcache)
576 1.1 cgd return;
577 1.58 yamt
578 1.73 ad if (numcache > desiredvnodes) {
579 1.73 ad mutex_enter(namecache_lock);
580 1.73 ad cache_ev_forced.ev_count++;
581 1.73 ad cache_reclaim();
582 1.73 ad mutex_exit(namecache_lock);
583 1.39 pk }
584 1.57 pk
585 1.73 ad ncp = pool_cache_get(namecache_cache, PR_WAITOK);
586 1.73 ad mutex_enter(namecache_lock);
587 1.73 ad numcache++;
588 1.73 ad
589 1.59 yamt /*
590 1.59 yamt * Concurrent lookups in the same directory may race for a
591 1.59 yamt * cache entry. if there's a duplicated entry, free it.
592 1.59 yamt */
593 1.59 yamt oncp = cache_lookup_entry(dvp, cnp);
594 1.59 yamt if (oncp) {
595 1.73 ad cache_invalidate(oncp);
596 1.73 ad mutex_exit(&oncp->nc_lock);
597 1.59 yamt }
598 1.59 yamt
599 1.34 enami /* Grab the vnode we just found. */
600 1.73 ad mutex_enter(&ncp->nc_lock);
601 1.5 mycroft ncp->nc_vp = vp;
602 1.73 ad ncp->nc_flags = 0;
603 1.73 ad ncp->nc_hittime = 0;
604 1.73 ad ncp->nc_gcqueue = NULL;
605 1.47 yamt if (vp == NULL) {
606 1.11 mycroft /*
607 1.11 mycroft * For negative hits, save the ISWHITEOUT flag so we can
608 1.11 mycroft * restore it later when the cache entry is used again.
609 1.11 mycroft */
610 1.50 yamt ncp->nc_flags = cnp->cn_flags & ISWHITEOUT;
611 1.11 mycroft }
612 1.34 enami /* Fill in cache info. */
613 1.5 mycroft ncp->nc_dvp = dvp;
614 1.46 yamt LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
615 1.46 yamt if (vp)
616 1.46 yamt LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
617 1.73 ad else {
618 1.73 ad ncp->nc_vlist.le_prev = NULL;
619 1.73 ad ncp->nc_vlist.le_next = NULL;
620 1.73 ad }
621 1.5 mycroft ncp->nc_nlen = cnp->cn_namelen;
622 1.73 ad TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
623 1.17 perry memcpy(ncp->nc_name, cnp->cn_nameptr, (unsigned)ncp->nc_nlen);
624 1.30 chs ncpp = &nchashtbl[NCHASH(cnp, dvp)];
625 1.73 ad
626 1.73 ad /*
627 1.73 ad * Flush updates before making visible in table. No need for a
628 1.73 ad * memory barrier on the other side: to see modifications the
629 1.73 ad * list must be followed, meaning a dependent pointer load.
630 1.74 ad * The below is LIST_INSERT_HEAD() inlined, with the memory
631 1.74 ad * barrier included in the correct place.
632 1.73 ad */
633 1.74 ad if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
634 1.74 ad ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
635 1.74 ad ncp->nc_hash.le_prev = &ncpp->lh_first;
636 1.73 ad membar_producer();
637 1.74 ad ncpp->lh_first = ncp;
638 1.19 sommerfe
639 1.34 enami ncp->nc_vhash.le_prev = NULL;
640 1.34 enami ncp->nc_vhash.le_next = NULL;
641 1.34 enami
642 1.19 sommerfe /*
643 1.19 sommerfe * Create reverse-cache entries (used in getcwd) for directories.
644 1.66 christos * (and in linux procfs exe node)
645 1.19 sommerfe */
646 1.33 enami if (vp != NULL &&
647 1.33 enami vp != dvp &&
648 1.29 fvdl #ifndef NAMECACHE_ENTER_REVERSE
649 1.33 enami vp->v_type == VDIR &&
650 1.29 fvdl #endif
651 1.33 enami (ncp->nc_nlen > 2 ||
652 1.33 enami (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
653 1.33 enami (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
654 1.30 chs nvcpp = &ncvhashtbl[NCVHASH(vp)];
655 1.19 sommerfe LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
656 1.19 sommerfe }
657 1.73 ad mutex_exit(&ncp->nc_lock);
658 1.73 ad mutex_exit(namecache_lock);
659 1.1 cgd }
660 1.1 cgd
661 1.1 cgd /*
662 1.1 cgd * Name cache initialization, from vfs_init() when we are booting
663 1.1 cgd */
664 1.13 christos void
665 1.34 enami nchinit(void)
666 1.1 cgd {
667 1.73 ad int error;
668 1.1 cgd
669 1.73 ad namecache_cache = pool_cache_init(sizeof(struct namecache),
670 1.73 ad coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
671 1.73 ad cache_dtor, NULL);
672 1.71 ad KASSERT(namecache_cache != NULL);
673 1.71 ad
674 1.73 ad namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
675 1.73 ad
676 1.76 ad nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
677 1.26 ad ncvhashtbl =
678 1.29 fvdl #ifdef NAMECACHE_ENTER_REVERSE
679 1.76 ad hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
680 1.29 fvdl #else
681 1.76 ad hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
682 1.29 fvdl #endif
683 1.73 ad
684 1.73 ad error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
685 1.73 ad NULL, NULL, "cachegc");
686 1.73 ad if (error != 0)
687 1.73 ad panic("nchinit %d", error);
688 1.73 ad
689 1.73 ad evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
690 1.73 ad "namecache", "entries scanned");
691 1.73 ad evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
692 1.73 ad "namecache", "entries collected");
693 1.73 ad evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
694 1.73 ad "namecache", "over scan target");
695 1.73 ad evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
696 1.73 ad "namecache", "under scan target");
697 1.73 ad evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
698 1.73 ad "namecache", "forced reclaims");
699 1.73 ad }
700 1.73 ad
701 1.73 ad static int
702 1.73 ad cache_ctor(void *arg, void *obj, int flag)
703 1.73 ad {
704 1.73 ad struct namecache *ncp;
705 1.73 ad
706 1.73 ad ncp = obj;
707 1.73 ad mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
708 1.73 ad
709 1.73 ad return 0;
710 1.73 ad }
711 1.73 ad
712 1.73 ad static void
713 1.73 ad cache_dtor(void *arg, void *obj)
714 1.73 ad {
715 1.73 ad struct namecache *ncp;
716 1.73 ad
717 1.73 ad ncp = obj;
718 1.73 ad mutex_destroy(&ncp->nc_lock);
719 1.73 ad }
720 1.73 ad
721 1.73 ad /*
722 1.73 ad * Called once for each CPU in the system as attached.
723 1.73 ad */
724 1.73 ad void
725 1.73 ad cache_cpu_init(struct cpu_info *ci)
726 1.73 ad {
727 1.77 ad struct nchcpu *cpup;
728 1.77 ad size_t sz;
729 1.73 ad
730 1.77 ad sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
731 1.77 ad cpup = kmem_zalloc(sz, KM_SLEEP);
732 1.77 ad cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
733 1.77 ad mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
734 1.77 ad ci->ci_data.cpu_nch = cpup;
735 1.30 chs }
736 1.30 chs
737 1.30 chs /*
738 1.30 chs * Name cache reinitialization, for when the maximum number of vnodes increases.
739 1.30 chs */
740 1.30 chs void
741 1.34 enami nchreinit(void)
742 1.30 chs {
743 1.30 chs struct namecache *ncp;
744 1.30 chs struct nchashhead *oldhash1, *hash1;
745 1.30 chs struct ncvhashhead *oldhash2, *hash2;
746 1.36 thorpej u_long i, oldmask1, oldmask2, mask1, mask2;
747 1.30 chs
748 1.76 ad hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
749 1.30 chs hash2 =
750 1.30 chs #ifdef NAMECACHE_ENTER_REVERSE
751 1.76 ad hashinit(desiredvnodes, HASH_LIST, true, &mask2);
752 1.30 chs #else
753 1.76 ad hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
754 1.30 chs #endif
755 1.73 ad mutex_enter(namecache_lock);
756 1.73 ad cache_lock_cpus();
757 1.30 chs oldhash1 = nchashtbl;
758 1.30 chs oldmask1 = nchash;
759 1.30 chs nchashtbl = hash1;
760 1.30 chs nchash = mask1;
761 1.30 chs oldhash2 = ncvhashtbl;
762 1.30 chs oldmask2 = ncvhash;
763 1.30 chs ncvhashtbl = hash2;
764 1.30 chs ncvhash = mask2;
765 1.30 chs for (i = 0; i <= oldmask1; i++) {
766 1.30 chs while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
767 1.30 chs LIST_REMOVE(ncp, nc_hash);
768 1.30 chs ncp->nc_hash.le_prev = NULL;
769 1.30 chs }
770 1.30 chs }
771 1.30 chs for (i = 0; i <= oldmask2; i++) {
772 1.30 chs while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
773 1.30 chs LIST_REMOVE(ncp, nc_vhash);
774 1.30 chs ncp->nc_vhash.le_prev = NULL;
775 1.30 chs }
776 1.30 chs }
777 1.73 ad cache_unlock_cpus();
778 1.73 ad mutex_exit(namecache_lock);
779 1.76 ad hashdone(oldhash1, HASH_LIST, oldmask1);
780 1.76 ad hashdone(oldhash2, HASH_LIST, oldmask2);
781 1.1 cgd }
782 1.1 cgd
783 1.1 cgd /*
784 1.1 cgd * Cache flush, a particular vnode; called when a vnode is renamed to
785 1.1 cgd * hide entries that would now be invalid
786 1.1 cgd */
787 1.13 christos void
788 1.55 yamt cache_purge1(struct vnode *vp, const struct componentname *cnp, int flags)
789 1.1 cgd {
790 1.46 yamt struct namecache *ncp, *ncnext;
791 1.1 cgd
792 1.73 ad mutex_enter(namecache_lock);
793 1.55 yamt if (flags & PURGE_PARENTS) {
794 1.55 yamt for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
795 1.55 yamt ncp = ncnext) {
796 1.55 yamt ncnext = LIST_NEXT(ncp, nc_vlist);
797 1.73 ad mutex_enter(&ncp->nc_lock);
798 1.73 ad cache_invalidate(ncp);
799 1.73 ad mutex_exit(&ncp->nc_lock);
800 1.73 ad cache_disassociate(ncp);
801 1.55 yamt }
802 1.55 yamt }
803 1.55 yamt if (flags & PURGE_CHILDREN) {
804 1.55 yamt for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
805 1.55 yamt ncp = ncnext) {
806 1.55 yamt ncnext = LIST_NEXT(ncp, nc_dvlist);
807 1.73 ad mutex_enter(&ncp->nc_lock);
808 1.73 ad cache_invalidate(ncp);
809 1.73 ad mutex_exit(&ncp->nc_lock);
810 1.73 ad cache_disassociate(ncp);
811 1.55 yamt }
812 1.46 yamt }
813 1.55 yamt if (cnp != NULL) {
814 1.55 yamt ncp = cache_lookup_entry(vp, cnp);
815 1.55 yamt if (ncp) {
816 1.73 ad cache_invalidate(ncp);
817 1.73 ad cache_disassociate(ncp);
818 1.73 ad mutex_exit(&ncp->nc_lock);
819 1.55 yamt }
820 1.46 yamt }
821 1.73 ad mutex_exit(namecache_lock);
822 1.1 cgd }
823 1.1 cgd
824 1.1 cgd /*
825 1.1 cgd * Cache flush, a whole filesystem; called when filesys is umounted to
826 1.27 chs * remove entries that would now be invalid.
827 1.1 cgd */
828 1.13 christos void
829 1.34 enami cache_purgevfs(struct mount *mp)
830 1.1 cgd {
831 1.23 augustss struct namecache *ncp, *nxtcp;
832 1.1 cgd
833 1.73 ad mutex_enter(namecache_lock);
834 1.73 ad for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
835 1.73 ad nxtcp = TAILQ_NEXT(ncp, nc_lru);
836 1.73 ad mutex_enter(&ncp->nc_lock);
837 1.73 ad if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
838 1.73 ad /* Free the resources we had. */
839 1.73 ad cache_invalidate(ncp);
840 1.73 ad cache_disassociate(ncp);
841 1.73 ad }
842 1.73 ad mutex_exit(&ncp->nc_lock);
843 1.73 ad }
844 1.73 ad cache_reclaim();
845 1.73 ad mutex_exit(namecache_lock);
846 1.73 ad }
847 1.73 ad
848 1.73 ad /*
849 1.73 ad * Scan global list invalidating entries until we meet a preset target.
850 1.73 ad * Prefer to invalidate entries that have not scored a hit within
851 1.73 ad * cache_hottime seconds. We sort the LRU list only for this routine's
852 1.73 ad * benefit.
853 1.73 ad */
854 1.73 ad static void
855 1.73 ad cache_prune(int incache, int target)
856 1.73 ad {
857 1.73 ad struct namecache *ncp, *nxtcp, *sentinel;
858 1.73 ad int items, recent, tryharder;
859 1.73 ad
860 1.73 ad KASSERT(mutex_owned(namecache_lock));
861 1.73 ad
862 1.73 ad items = 0;
863 1.73 ad tryharder = 0;
864 1.73 ad recent = hardclock_ticks - hz * cache_hottime;
865 1.73 ad sentinel = NULL;
866 1.27 chs for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
867 1.73 ad if (incache <= target)
868 1.73 ad break;
869 1.73 ad items++;
870 1.27 chs nxtcp = TAILQ_NEXT(ncp, nc_lru);
871 1.73 ad if (ncp->nc_dvp == NULL)
872 1.1 cgd continue;
873 1.73 ad if (ncp == sentinel) {
874 1.73 ad /*
875 1.73 ad * If we looped back on ourself, then ignore
876 1.73 ad * recent entries and purge whatever we find.
877 1.73 ad */
878 1.73 ad tryharder = 1;
879 1.5 mycroft }
880 1.73 ad if (!tryharder && ncp->nc_hittime > recent) {
881 1.73 ad if (sentinel == NULL)
882 1.73 ad sentinel = ncp;
883 1.73 ad TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
884 1.73 ad TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
885 1.73 ad continue;
886 1.73 ad }
887 1.73 ad mutex_enter(&ncp->nc_lock);
888 1.73 ad if (ncp->nc_dvp != NULL) {
889 1.73 ad cache_invalidate(ncp);
890 1.73 ad cache_disassociate(ncp);
891 1.73 ad incache--;
892 1.73 ad }
893 1.73 ad mutex_exit(&ncp->nc_lock);
894 1.73 ad }
895 1.73 ad cache_ev_scan.ev_count += items;
896 1.73 ad }
897 1.73 ad
898 1.73 ad /*
899 1.73 ad * Collect dead cache entries from all CPUs and garbage collect.
900 1.73 ad */
901 1.73 ad static void
902 1.73 ad cache_reclaim(void)
903 1.73 ad {
904 1.73 ad struct namecache *ncp, *next;
905 1.73 ad int items;
906 1.73 ad
907 1.73 ad KASSERT(mutex_owned(namecache_lock));
908 1.73 ad
909 1.73 ad /*
910 1.73 ad * If the number of extant entries not awaiting garbage collection
911 1.73 ad * exceeds the high water mark, then reclaim stale entries until we
912 1.73 ad * reach our low water mark.
913 1.73 ad */
914 1.73 ad items = numcache - cache_gcpend;
915 1.73 ad if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
916 1.73 ad cache_prune(items, (int)((uint64_t)desiredvnodes *
917 1.73 ad cache_lowat / 100));
918 1.73 ad cache_ev_over.ev_count++;
919 1.73 ad } else
920 1.73 ad cache_ev_under.ev_count++;
921 1.73 ad
922 1.73 ad /*
923 1.73 ad * Stop forward lookup activity on all CPUs and garbage collect dead
924 1.73 ad * entries.
925 1.73 ad */
926 1.73 ad cache_lock_cpus();
927 1.73 ad ncp = cache_gcqueue;
928 1.73 ad cache_gcqueue = NULL;
929 1.73 ad items = cache_gcpend;
930 1.73 ad cache_gcpend = 0;
931 1.73 ad while (ncp != NULL) {
932 1.73 ad next = ncp->nc_gcqueue;
933 1.73 ad cache_disassociate(ncp);
934 1.73 ad KASSERT(ncp->nc_dvp == NULL);
935 1.73 ad if (ncp->nc_hash.le_prev != NULL) {
936 1.73 ad LIST_REMOVE(ncp, nc_hash);
937 1.73 ad ncp->nc_hash.le_prev = NULL;
938 1.73 ad }
939 1.73 ad pool_cache_put(namecache_cache, ncp);
940 1.73 ad ncp = next;
941 1.73 ad }
942 1.73 ad cache_unlock_cpus();
943 1.73 ad numcache -= items;
944 1.73 ad cache_ev_gc.ev_count += items;
945 1.73 ad }
946 1.73 ad
947 1.73 ad /*
948 1.73 ad * Cache maintainence thread, awakening once per second to:
949 1.73 ad *
950 1.73 ad * => keep number of entries below the high water mark
951 1.73 ad * => sort pseudo-LRU list
952 1.73 ad * => garbage collect dead entries
953 1.73 ad */
954 1.73 ad static void
955 1.73 ad cache_thread(void *arg)
956 1.73 ad {
957 1.73 ad
958 1.73 ad mutex_enter(namecache_lock);
959 1.73 ad for (;;) {
960 1.73 ad cache_reclaim();
961 1.73 ad kpause("cachegc", false, hz, namecache_lock);
962 1.1 cgd }
963 1.1 cgd }
964 1.19 sommerfe
965 1.28 chs #ifdef DDB
966 1.28 chs void
967 1.28 chs namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
968 1.28 chs {
969 1.28 chs struct vnode *dvp = NULL;
970 1.28 chs struct namecache *ncp;
971 1.28 chs
972 1.28 chs TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
973 1.73 ad if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
974 1.28 chs (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
975 1.28 chs dvp = ncp->nc_dvp;
976 1.28 chs }
977 1.28 chs }
978 1.28 chs if (dvp == NULL) {
979 1.28 chs (*pr)("name not found\n");
980 1.28 chs return;
981 1.28 chs }
982 1.28 chs vp = dvp;
983 1.28 chs TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
984 1.47 yamt if (ncp->nc_vp == vp) {
985 1.28 chs (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
986 1.28 chs }
987 1.28 chs }
988 1.28 chs }
989 1.28 chs #endif
990