vfs_cache.c revision 1.93 1 1.93 hannken /* $NetBSD: vfs_cache.c,v 1.93 2014/01/20 07:47:22 hannken Exp $ */
2 1.73 ad
3 1.73 ad /*-
4 1.73 ad * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 1.73 ad * All rights reserved.
6 1.73 ad *
7 1.73 ad * Redistribution and use in source and binary forms, with or without
8 1.73 ad * modification, are permitted provided that the following conditions
9 1.73 ad * are met:
10 1.73 ad * 1. Redistributions of source code must retain the above copyright
11 1.73 ad * notice, this list of conditions and the following disclaimer.
12 1.73 ad * 2. Redistributions in binary form must reproduce the above copyright
13 1.73 ad * notice, this list of conditions and the following disclaimer in the
14 1.73 ad * documentation and/or other materials provided with the distribution.
15 1.73 ad *
16 1.73 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.73 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.73 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.73 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.73 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.73 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.73 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.73 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.73 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.73 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.73 ad * POSSIBILITY OF SUCH DAMAGE.
27 1.73 ad */
28 1.6 cgd
29 1.1 cgd /*
30 1.5 mycroft * Copyright (c) 1989, 1993
31 1.5 mycroft * The Regents of the University of California. All rights reserved.
32 1.1 cgd *
33 1.1 cgd * Redistribution and use in source and binary forms, with or without
34 1.1 cgd * modification, are permitted provided that the following conditions
35 1.1 cgd * are met:
36 1.1 cgd * 1. Redistributions of source code must retain the above copyright
37 1.1 cgd * notice, this list of conditions and the following disclaimer.
38 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
39 1.1 cgd * notice, this list of conditions and the following disclaimer in the
40 1.1 cgd * documentation and/or other materials provided with the distribution.
41 1.51 agc * 3. Neither the name of the University nor the names of its contributors
42 1.1 cgd * may be used to endorse or promote products derived from this software
43 1.1 cgd * without specific prior written permission.
44 1.1 cgd *
45 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.1 cgd * SUCH DAMAGE.
56 1.1 cgd *
57 1.10 mycroft * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
58 1.1 cgd */
59 1.32 lukem
60 1.32 lukem #include <sys/cdefs.h>
61 1.93 hannken __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.93 2014/01/20 07:47:22 hannken Exp $");
62 1.1 cgd
63 1.28 chs #include "opt_ddb.h"
64 1.29 fvdl #include "opt_revcache.h"
65 1.28 chs
66 1.4 mycroft #include <sys/param.h>
67 1.4 mycroft #include <sys/systm.h>
68 1.4 mycroft #include <sys/time.h>
69 1.4 mycroft #include <sys/mount.h>
70 1.4 mycroft #include <sys/vnode.h>
71 1.4 mycroft #include <sys/namei.h>
72 1.4 mycroft #include <sys/errno.h>
73 1.18 thorpej #include <sys/pool.h>
74 1.68 ad #include <sys/mutex.h>
75 1.73 ad #include <sys/atomic.h>
76 1.73 ad #include <sys/kthread.h>
77 1.73 ad #include <sys/kernel.h>
78 1.73 ad #include <sys/cpu.h>
79 1.73 ad #include <sys/evcnt.h>
80 1.1 cgd
81 1.66 christos #define NAMECACHE_ENTER_REVERSE
82 1.1 cgd /*
83 1.1 cgd * Name caching works as follows:
84 1.1 cgd *
85 1.1 cgd * Names found by directory scans are retained in a cache
86 1.1 cgd * for future reference. It is managed LRU, so frequently
87 1.1 cgd * used names will hang around. Cache is indexed by hash value
88 1.20 jdolecek * obtained from (dvp, name) where dvp refers to the directory
89 1.1 cgd * containing name.
90 1.1 cgd *
91 1.1 cgd * For simplicity (and economy of storage), names longer than
92 1.1 cgd * a maximum length of NCHNAMLEN are not cached; they occur
93 1.1 cgd * infrequently in any case, and are almost never of interest.
94 1.1 cgd *
95 1.1 cgd * Upon reaching the last segment of a path, if the reference
96 1.1 cgd * is for DELETE, or NOCACHE is set (rewrite), and the
97 1.1 cgd * name is located in the cache, it will be dropped.
98 1.20 jdolecek * The entry is dropped also when it was not possible to lock
99 1.20 jdolecek * the cached vnode, either because vget() failed or the generation
100 1.20 jdolecek * number has changed while waiting for the lock.
101 1.1 cgd */
102 1.1 cgd
103 1.1 cgd /*
104 1.77 ad * Per-cpu namecache data.
105 1.77 ad */
106 1.77 ad struct nchcpu {
107 1.77 ad kmutex_t cpu_lock;
108 1.77 ad struct nchstats cpu_stats;
109 1.77 ad };
110 1.77 ad
111 1.77 ad /*
112 1.90 dholland * The type for the hash code. While the hash function generates a
113 1.90 dholland * u32, the hash code has historically been passed around as a u_long,
114 1.90 dholland * and the value is modified by xor'ing a uintptr_t, so it's not
115 1.90 dholland * entirely clear what the best type is. For now I'll leave it
116 1.90 dholland * unchanged as u_long.
117 1.90 dholland */
118 1.90 dholland
119 1.90 dholland typedef u_long nchash_t;
120 1.90 dholland
121 1.90 dholland /*
122 1.1 cgd * Structures associated with name cacheing.
123 1.1 cgd */
124 1.89 rmind
125 1.89 rmind static kmutex_t *namecache_lock __read_mostly;
126 1.89 rmind static pool_cache_t namecache_cache __read_mostly;
127 1.89 rmind static TAILQ_HEAD(, namecache) nclruhead __cacheline_aligned;
128 1.89 rmind
129 1.89 rmind static LIST_HEAD(nchashhead, namecache) *nchashtbl __read_mostly;
130 1.89 rmind static u_long nchash __read_mostly;
131 1.89 rmind
132 1.90 dholland #define NCHASH2(hash, dvp) \
133 1.90 dholland (((hash) ^ ((uintptr_t)(dvp) >> 3)) & nchash)
134 1.19 sommerfe
135 1.89 rmind static LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl __read_mostly;
136 1.89 rmind static u_long ncvhash __read_mostly;
137 1.89 rmind
138 1.48 yamt #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
139 1.19 sommerfe
140 1.89 rmind /* Number of cache entries allocated. */
141 1.89 rmind static long numcache __cacheline_aligned;
142 1.73 ad
143 1.89 rmind /* Garbage collection queue and number of entries pending in it. */
144 1.89 rmind static void *cache_gcqueue;
145 1.89 rmind static u_int cache_gcpend;
146 1.89 rmind
147 1.89 rmind /* Cache effectiveness statistics. */
148 1.89 rmind struct nchstats nchstats __cacheline_aligned;
149 1.77 ad #define COUNT(c,x) (c.x++)
150 1.38 thorpej
151 1.89 rmind static const int cache_lowat = 95;
152 1.89 rmind static const int cache_hiwat = 98;
153 1.89 rmind static const int cache_hottime = 5; /* number of seconds */
154 1.89 rmind static int doingcache = 1; /* 1 => enable the cache */
155 1.1 cgd
156 1.73 ad static struct evcnt cache_ev_scan;
157 1.73 ad static struct evcnt cache_ev_gc;
158 1.73 ad static struct evcnt cache_ev_over;
159 1.73 ad static struct evcnt cache_ev_under;
160 1.73 ad static struct evcnt cache_ev_forced;
161 1.73 ad
162 1.73 ad static void cache_invalidate(struct namecache *);
163 1.89 rmind static struct namecache *cache_lookup_entry(
164 1.91 dholland const struct vnode *, const char *, size_t);
165 1.73 ad static void cache_thread(void *);
166 1.73 ad static void cache_invalidate(struct namecache *);
167 1.73 ad static void cache_disassociate(struct namecache *);
168 1.73 ad static void cache_reclaim(void);
169 1.73 ad static int cache_ctor(void *, void *, int);
170 1.73 ad static void cache_dtor(void *, void *);
171 1.46 yamt
172 1.73 ad /*
173 1.90 dholland * Compute the hash for an entry.
174 1.90 dholland *
175 1.90 dholland * (This is for now a wrapper around namei_hash, whose interface is
176 1.90 dholland * for the time being slightly inconvenient.)
177 1.90 dholland */
178 1.90 dholland static nchash_t
179 1.91 dholland cache_hash(const char *name, size_t namelen)
180 1.90 dholland {
181 1.90 dholland const char *endptr;
182 1.90 dholland
183 1.91 dholland endptr = name + namelen;
184 1.91 dholland return namei_hash(name, &endptr);
185 1.90 dholland }
186 1.90 dholland
187 1.90 dholland /*
188 1.73 ad * Invalidate a cache entry and enqueue it for garbage collection.
189 1.73 ad */
190 1.46 yamt static void
191 1.73 ad cache_invalidate(struct namecache *ncp)
192 1.46 yamt {
193 1.73 ad void *head;
194 1.46 yamt
195 1.73 ad KASSERT(mutex_owned(&ncp->nc_lock));
196 1.46 yamt
197 1.73 ad if (ncp->nc_dvp != NULL) {
198 1.73 ad ncp->nc_vp = NULL;
199 1.73 ad ncp->nc_dvp = NULL;
200 1.73 ad do {
201 1.73 ad head = cache_gcqueue;
202 1.73 ad ncp->nc_gcqueue = head;
203 1.73 ad } while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
204 1.73 ad atomic_inc_uint(&cache_gcpend);
205 1.73 ad }
206 1.73 ad }
207 1.46 yamt
208 1.73 ad /*
209 1.73 ad * Disassociate a namecache entry from any vnodes it is attached to,
210 1.73 ad * and remove from the global LRU list.
211 1.73 ad */
212 1.73 ad static void
213 1.73 ad cache_disassociate(struct namecache *ncp)
214 1.73 ad {
215 1.73 ad
216 1.73 ad KASSERT(mutex_owned(namecache_lock));
217 1.73 ad KASSERT(ncp->nc_dvp == NULL);
218 1.73 ad
219 1.73 ad if (ncp->nc_lru.tqe_prev != NULL) {
220 1.73 ad TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
221 1.73 ad ncp->nc_lru.tqe_prev = NULL;
222 1.46 yamt }
223 1.46 yamt if (ncp->nc_vhash.le_prev != NULL) {
224 1.46 yamt LIST_REMOVE(ncp, nc_vhash);
225 1.46 yamt ncp->nc_vhash.le_prev = NULL;
226 1.46 yamt }
227 1.46 yamt if (ncp->nc_vlist.le_prev != NULL) {
228 1.46 yamt LIST_REMOVE(ncp, nc_vlist);
229 1.46 yamt ncp->nc_vlist.le_prev = NULL;
230 1.46 yamt }
231 1.46 yamt if (ncp->nc_dvlist.le_prev != NULL) {
232 1.46 yamt LIST_REMOVE(ncp, nc_dvlist);
233 1.46 yamt ncp->nc_dvlist.le_prev = NULL;
234 1.46 yamt }
235 1.46 yamt }
236 1.46 yamt
237 1.73 ad /*
238 1.73 ad * Lock all CPUs to prevent any cache lookup activity. Conceptually,
239 1.73 ad * this locks out all "readers".
240 1.73 ad */
241 1.46 yamt static void
242 1.73 ad cache_lock_cpus(void)
243 1.46 yamt {
244 1.73 ad CPU_INFO_ITERATOR cii;
245 1.73 ad struct cpu_info *ci;
246 1.77 ad struct nchcpu *cpup;
247 1.77 ad long *s, *d, *m;
248 1.46 yamt
249 1.73 ad for (CPU_INFO_FOREACH(cii, ci)) {
250 1.77 ad cpup = ci->ci_data.cpu_nch;
251 1.77 ad mutex_enter(&cpup->cpu_lock);
252 1.77 ad
253 1.77 ad /* Collate statistics. */
254 1.77 ad d = (long *)&nchstats;
255 1.77 ad s = (long *)&cpup->cpu_stats;
256 1.77 ad m = s + sizeof(nchstats) / sizeof(long);
257 1.77 ad for (; s < m; s++, d++) {
258 1.77 ad *d += *s;
259 1.77 ad *s = 0;
260 1.77 ad }
261 1.73 ad }
262 1.46 yamt }
263 1.46 yamt
264 1.73 ad /*
265 1.73 ad * Release all CPU locks.
266 1.73 ad */
267 1.73 ad static void
268 1.73 ad cache_unlock_cpus(void)
269 1.73 ad {
270 1.73 ad CPU_INFO_ITERATOR cii;
271 1.73 ad struct cpu_info *ci;
272 1.77 ad struct nchcpu *cpup;
273 1.73 ad
274 1.73 ad for (CPU_INFO_FOREACH(cii, ci)) {
275 1.77 ad cpup = ci->ci_data.cpu_nch;
276 1.77 ad mutex_exit(&cpup->cpu_lock);
277 1.73 ad }
278 1.73 ad }
279 1.73 ad
280 1.73 ad /*
281 1.73 ad * Find a single cache entry and return it locked. 'namecache_lock' or
282 1.73 ad * at least one of the per-CPU locks must be held.
283 1.73 ad */
284 1.73 ad static struct namecache *
285 1.91 dholland cache_lookup_entry(const struct vnode *dvp, const char *name, size_t namelen)
286 1.55 yamt {
287 1.55 yamt struct nchashhead *ncpp;
288 1.55 yamt struct namecache *ncp;
289 1.90 dholland nchash_t hash;
290 1.55 yamt
291 1.84 yamt KASSERT(dvp != NULL);
292 1.91 dholland hash = cache_hash(name, namelen);
293 1.90 dholland ncpp = &nchashtbl[NCHASH2(hash, dvp)];
294 1.55 yamt
295 1.55 yamt LIST_FOREACH(ncp, ncpp, nc_hash) {
296 1.73 ad if (ncp->nc_dvp != dvp ||
297 1.91 dholland ncp->nc_nlen != namelen ||
298 1.91 dholland memcmp(ncp->nc_name, name, (u_int)ncp->nc_nlen))
299 1.73 ad continue;
300 1.73 ad mutex_enter(&ncp->nc_lock);
301 1.77 ad if (__predict_true(ncp->nc_dvp == dvp)) {
302 1.73 ad ncp->nc_hittime = hardclock_ticks;
303 1.73 ad return ncp;
304 1.73 ad }
305 1.73 ad /* Raced: entry has been nullified. */
306 1.73 ad mutex_exit(&ncp->nc_lock);
307 1.55 yamt }
308 1.55 yamt
309 1.73 ad return NULL;
310 1.55 yamt }
311 1.55 yamt
312 1.1 cgd /*
313 1.1 cgd * Look for a the name in the cache. We don't do this
314 1.1 cgd * if the segment name is long, simply so the cache can avoid
315 1.1 cgd * holding long names (which would either waste space, or
316 1.1 cgd * add greatly to the complexity).
317 1.1 cgd *
318 1.90 dholland * Lookup is called with DVP pointing to the directory to search,
319 1.90 dholland * and CNP providing the name of the entry being sought: cn_nameptr
320 1.90 dholland * is the name, cn_namelen is its length, and cn_flags is the flags
321 1.90 dholland * word from the namei operation.
322 1.90 dholland *
323 1.90 dholland * DVP must be locked.
324 1.90 dholland *
325 1.90 dholland * There are three possible non-error return states:
326 1.90 dholland * 1. Nothing was found in the cache. Nothing is known about
327 1.90 dholland * the requested name.
328 1.90 dholland * 2. A negative entry was found in the cache, meaning that the
329 1.90 dholland * requested name definitely does not exist.
330 1.90 dholland * 3. A positive entry was found in the cache, meaning that the
331 1.90 dholland * requested name does exist and that we are providing the
332 1.90 dholland * vnode.
333 1.90 dholland * In these cases the results are:
334 1.90 dholland * 1. 0 returned; VN is set to NULL.
335 1.90 dholland * 2. 1 returned; VN is set to NULL.
336 1.90 dholland * 3. 1 returned; VN is set to the vnode found.
337 1.90 dholland *
338 1.90 dholland * The additional result argument ISWHT is set to zero, unless a
339 1.90 dholland * negative entry is found that was entered as a whiteout, in which
340 1.90 dholland * case ISWHT is set to one.
341 1.90 dholland *
342 1.90 dholland * The ISWHT_RET argument pointer may be null. In this case an
343 1.90 dholland * assertion is made that the whiteout flag is not set. File systems
344 1.90 dholland * that do not support whiteouts can/should do this.
345 1.90 dholland *
346 1.90 dholland * Filesystems that do support whiteouts should add ISWHITEOUT to
347 1.90 dholland * cnp->cn_flags if ISWHT comes back nonzero.
348 1.90 dholland *
349 1.90 dholland * When a vnode is returned, it is locked, as per the vnode lookup
350 1.90 dholland * locking protocol.
351 1.90 dholland *
352 1.90 dholland * There is no way for this function to fail, in the sense of
353 1.90 dholland * generating an error that requires aborting the namei operation.
354 1.90 dholland *
355 1.90 dholland * (Prior to October 2012, this function returned an integer status,
356 1.90 dholland * and a vnode, and mucked with the flags word in CNP for whiteouts.
357 1.90 dholland * The integer status was -1 for "nothing found", ENOENT for "a
358 1.90 dholland * negative entry found", 0 for "a positive entry found", and possibly
359 1.90 dholland * other errors, and the value of VN might or might not have been set
360 1.90 dholland * depending on what error occurred.)
361 1.1 cgd */
362 1.5 mycroft int
363 1.91 dholland cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
364 1.91 dholland uint32_t nameiop, uint32_t cnflags,
365 1.90 dholland int *iswht_ret, struct vnode **vn_ret)
366 1.1 cgd {
367 1.23 augustss struct namecache *ncp;
368 1.20 jdolecek struct vnode *vp;
369 1.77 ad struct nchcpu *cpup;
370 1.36 thorpej int error;
371 1.1 cgd
372 1.90 dholland /* Establish default result values */
373 1.90 dholland if (iswht_ret != NULL) {
374 1.90 dholland *iswht_ret = 0;
375 1.90 dholland }
376 1.90 dholland *vn_ret = NULL;
377 1.90 dholland
378 1.77 ad if (__predict_false(!doingcache)) {
379 1.90 dholland return 0;
380 1.8 cgd }
381 1.39 pk
382 1.77 ad cpup = curcpu()->ci_data.cpu_nch;
383 1.77 ad mutex_enter(&cpup->cpu_lock);
384 1.91 dholland if (__predict_false(namelen > NCHNAMLEN)) {
385 1.77 ad COUNT(cpup->cpu_stats, ncs_long);
386 1.77 ad mutex_exit(&cpup->cpu_lock);
387 1.90 dholland /* found nothing */
388 1.90 dholland return 0;
389 1.1 cgd }
390 1.91 dholland ncp = cache_lookup_entry(dvp, name, namelen);
391 1.77 ad if (__predict_false(ncp == NULL)) {
392 1.77 ad COUNT(cpup->cpu_stats, ncs_miss);
393 1.77 ad mutex_exit(&cpup->cpu_lock);
394 1.90 dholland /* found nothing */
395 1.90 dholland return 0;
396 1.1 cgd }
397 1.91 dholland if ((cnflags & MAKEENTRY) == 0) {
398 1.77 ad COUNT(cpup->cpu_stats, ncs_badhits);
399 1.77 ad /*
400 1.77 ad * Last component and we are renaming or deleting,
401 1.77 ad * the cache entry is invalid, or otherwise don't
402 1.77 ad * want cache entry to exist.
403 1.77 ad */
404 1.77 ad cache_invalidate(ncp);
405 1.77 ad mutex_exit(&ncp->nc_lock);
406 1.77 ad mutex_exit(&cpup->cpu_lock);
407 1.90 dholland /* found nothing */
408 1.90 dholland return 0;
409 1.90 dholland }
410 1.90 dholland if (ncp->nc_vp == NULL) {
411 1.90 dholland if (iswht_ret != NULL) {
412 1.90 dholland /*
413 1.90 dholland * Restore the ISWHITEOUT flag saved earlier.
414 1.90 dholland */
415 1.90 dholland KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
416 1.90 dholland *iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
417 1.90 dholland } else {
418 1.90 dholland KASSERT(ncp->nc_flags == 0);
419 1.90 dholland }
420 1.90 dholland
421 1.91 dholland if (__predict_true(nameiop != CREATE ||
422 1.91 dholland (cnflags & ISLASTCN) == 0)) {
423 1.77 ad COUNT(cpup->cpu_stats, ncs_neghits);
424 1.73 ad mutex_exit(&ncp->nc_lock);
425 1.77 ad mutex_exit(&cpup->cpu_lock);
426 1.90 dholland /* found neg entry; vn is already null from above */
427 1.90 dholland return 1;
428 1.20 jdolecek } else {
429 1.77 ad COUNT(cpup->cpu_stats, ncs_badhits);
430 1.77 ad /*
431 1.77 ad * Last component and we are renaming or
432 1.77 ad * deleting, the cache entry is invalid,
433 1.77 ad * or otherwise don't want cache entry to
434 1.77 ad * exist.
435 1.77 ad */
436 1.77 ad cache_invalidate(ncp);
437 1.77 ad mutex_exit(&ncp->nc_lock);
438 1.77 ad mutex_exit(&cpup->cpu_lock);
439 1.90 dholland /* found nothing */
440 1.90 dholland return 0;
441 1.20 jdolecek }
442 1.20 jdolecek }
443 1.20 jdolecek
444 1.20 jdolecek vp = ncp->nc_vp;
445 1.92 hannken mutex_enter(vp->v_interlock);
446 1.92 hannken mutex_exit(&ncp->nc_lock);
447 1.92 hannken mutex_exit(&cpup->cpu_lock);
448 1.92 hannken error = vget(vp, LK_NOWAIT);
449 1.92 hannken if (error) {
450 1.92 hannken KASSERT(error == EBUSY);
451 1.92 hannken /*
452 1.92 hannken * This vnode is being cleaned out.
453 1.92 hannken * XXX badhits?
454 1.92 hannken */
455 1.92 hannken COUNT(cpup->cpu_stats, ncs_falsehits);
456 1.92 hannken /* found nothing */
457 1.92 hannken return 0;
458 1.77 ad }
459 1.39 pk
460 1.52 yamt #ifdef DEBUG
461 1.52 yamt /*
462 1.73 ad * since we released nb->nb_lock,
463 1.52 yamt * we can't use this pointer any more.
464 1.52 yamt */
465 1.52 yamt ncp = NULL;
466 1.52 yamt #endif /* DEBUG */
467 1.52 yamt
468 1.20 jdolecek if (vp == dvp) { /* lookup on "." */
469 1.20 jdolecek error = 0;
470 1.91 dholland } else if (cnflags & ISDOTDOT) {
471 1.85 hannken VOP_UNLOCK(dvp);
472 1.60 yamt error = vn_lock(vp, LK_EXCLUSIVE);
473 1.67 chs vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
474 1.1 cgd } else {
475 1.60 yamt error = vn_lock(vp, LK_EXCLUSIVE);
476 1.20 jdolecek }
477 1.20 jdolecek
478 1.20 jdolecek /*
479 1.54 yamt * Check that the lock succeeded.
480 1.20 jdolecek */
481 1.47 yamt if (error) {
482 1.90 dholland /* We don't have the right lock, but this is only for stats. */
483 1.77 ad COUNT(cpup->cpu_stats, ncs_badhits);
484 1.90 dholland
485 1.78 pooka vrele(vp);
486 1.90 dholland /* found nothing */
487 1.90 dholland return 0;
488 1.20 jdolecek }
489 1.20 jdolecek
490 1.90 dholland /* We don't have the right lock, but this is only for stats. */
491 1.77 ad COUNT(cpup->cpu_stats, ncs_goodhits);
492 1.90 dholland
493 1.90 dholland /* found it */
494 1.90 dholland *vn_ret = vp;
495 1.90 dholland return 1;
496 1.1 cgd }
497 1.1 cgd
498 1.61 yamt int
499 1.91 dholland cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
500 1.91 dholland uint32_t cnflags,
501 1.90 dholland int *iswht_ret, struct vnode **vn_ret)
502 1.61 yamt {
503 1.61 yamt struct namecache *ncp;
504 1.61 yamt struct vnode *vp;
505 1.77 ad struct nchcpu *cpup;
506 1.61 yamt int error;
507 1.61 yamt
508 1.90 dholland /* Establish default results. */
509 1.90 dholland if (iswht_ret != NULL) {
510 1.90 dholland *iswht_ret = 0;
511 1.90 dholland }
512 1.90 dholland *vn_ret = NULL;
513 1.90 dholland
514 1.77 ad if (__predict_false(!doingcache)) {
515 1.90 dholland /* found nothing */
516 1.90 dholland return 0;
517 1.61 yamt }
518 1.61 yamt
519 1.77 ad cpup = curcpu()->ci_data.cpu_nch;
520 1.77 ad mutex_enter(&cpup->cpu_lock);
521 1.91 dholland if (__predict_false(namelen > NCHNAMLEN)) {
522 1.77 ad COUNT(cpup->cpu_stats, ncs_long);
523 1.77 ad mutex_exit(&cpup->cpu_lock);
524 1.90 dholland /* found nothing */
525 1.90 dholland return 0;
526 1.61 yamt }
527 1.91 dholland ncp = cache_lookup_entry(dvp, name, namelen);
528 1.77 ad if (__predict_false(ncp == NULL)) {
529 1.77 ad COUNT(cpup->cpu_stats, ncs_miss);
530 1.77 ad mutex_exit(&cpup->cpu_lock);
531 1.90 dholland /* found nothing */
532 1.90 dholland return 0;
533 1.61 yamt }
534 1.61 yamt vp = ncp->nc_vp;
535 1.61 yamt if (vp == NULL) {
536 1.61 yamt /*
537 1.61 yamt * Restore the ISWHITEOUT flag saved earlier.
538 1.61 yamt */
539 1.90 dholland if (iswht_ret != NULL) {
540 1.90 dholland KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
541 1.90 dholland /*cnp->cn_flags |= ncp->nc_flags;*/
542 1.90 dholland *iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
543 1.90 dholland }
544 1.77 ad COUNT(cpup->cpu_stats, ncs_neghits);
545 1.73 ad mutex_exit(&ncp->nc_lock);
546 1.77 ad mutex_exit(&cpup->cpu_lock);
547 1.90 dholland /* found negative entry; vn is already null from above */
548 1.90 dholland return 1;
549 1.61 yamt }
550 1.92 hannken mutex_enter(vp->v_interlock);
551 1.92 hannken mutex_exit(&ncp->nc_lock);
552 1.92 hannken mutex_exit(&cpup->cpu_lock);
553 1.92 hannken error = vget(vp, LK_NOWAIT);
554 1.92 hannken if (error) {
555 1.92 hannken KASSERT(error == EBUSY);
556 1.92 hannken /*
557 1.92 hannken * This vnode is being cleaned out.
558 1.92 hannken * XXX badhits?
559 1.92 hannken */
560 1.92 hannken COUNT(cpup->cpu_stats, ncs_falsehits);
561 1.92 hannken /* found nothing */
562 1.92 hannken return 0;
563 1.61 yamt }
564 1.61 yamt
565 1.80 yamt /* Unlocked, but only for stats. */
566 1.80 yamt COUNT(cpup->cpu_stats, ncs_goodhits); /* XXX can be "badhits" */
567 1.90 dholland
568 1.90 dholland /* found it */
569 1.90 dholland *vn_ret = vp;
570 1.90 dholland return 1;
571 1.61 yamt }
572 1.61 yamt
573 1.1 cgd /*
574 1.19 sommerfe * Scan cache looking for name of directory entry pointing at vp.
575 1.19 sommerfe *
576 1.86 hannken * If the lookup succeeds the vnode is referenced and stored in dvpp.
577 1.19 sommerfe *
578 1.19 sommerfe * If bufp is non-NULL, also place the name in the buffer which starts
579 1.19 sommerfe * at bufp, immediately before *bpp, and move bpp backwards to point
580 1.19 sommerfe * at the start of it. (Yes, this is a little baroque, but it's done
581 1.19 sommerfe * this way to cater to the whims of getcwd).
582 1.19 sommerfe *
583 1.19 sommerfe * Returns 0 on success, -1 on cache miss, positive errno on failure.
584 1.19 sommerfe */
585 1.19 sommerfe int
586 1.34 enami cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
587 1.19 sommerfe {
588 1.19 sommerfe struct namecache *ncp;
589 1.19 sommerfe struct vnode *dvp;
590 1.19 sommerfe struct ncvhashhead *nvcpp;
591 1.34 enami char *bp;
592 1.86 hannken int error, nlen;
593 1.34 enami
594 1.19 sommerfe if (!doingcache)
595 1.19 sommerfe goto out;
596 1.19 sommerfe
597 1.30 chs nvcpp = &ncvhashtbl[NCVHASH(vp)];
598 1.19 sommerfe
599 1.73 ad mutex_enter(namecache_lock);
600 1.27 chs LIST_FOREACH(ncp, nvcpp, nc_vhash) {
601 1.73 ad mutex_enter(&ncp->nc_lock);
602 1.34 enami if (ncp->nc_vp == vp &&
603 1.34 enami (dvp = ncp->nc_dvp) != NULL &&
604 1.47 yamt dvp != vp) { /* avoid pesky . entries.. */
605 1.34 enami
606 1.19 sommerfe #ifdef DIAGNOSTIC
607 1.34 enami if (ncp->nc_nlen == 1 &&
608 1.34 enami ncp->nc_name[0] == '.')
609 1.19 sommerfe panic("cache_revlookup: found entry for .");
610 1.19 sommerfe
611 1.34 enami if (ncp->nc_nlen == 2 &&
612 1.34 enami ncp->nc_name[0] == '.' &&
613 1.34 enami ncp->nc_name[1] == '.')
614 1.19 sommerfe panic("cache_revlookup: found entry for ..");
615 1.19 sommerfe #endif
616 1.77 ad COUNT(nchstats, ncs_revhits);
617 1.86 hannken nlen = ncp->nc_nlen;
618 1.19 sommerfe
619 1.19 sommerfe if (bufp) {
620 1.19 sommerfe bp = *bpp;
621 1.86 hannken bp -= nlen;
622 1.19 sommerfe if (bp <= bufp) {
623 1.34 enami *dvpp = NULL;
624 1.73 ad mutex_exit(&ncp->nc_lock);
625 1.73 ad mutex_exit(namecache_lock);
626 1.34 enami return (ERANGE);
627 1.19 sommerfe }
628 1.86 hannken memcpy(bp, ncp->nc_name, nlen);
629 1.19 sommerfe *bpp = bp;
630 1.19 sommerfe }
631 1.34 enami
632 1.92 hannken mutex_enter(dvp->v_interlock);
633 1.92 hannken mutex_exit(&ncp->nc_lock);
634 1.92 hannken mutex_exit(namecache_lock);
635 1.92 hannken error = vget(dvp, LK_NOWAIT);
636 1.92 hannken if (error) {
637 1.92 hannken KASSERT(error == EBUSY);
638 1.92 hannken if (bufp)
639 1.92 hannken (*bpp) += nlen;
640 1.92 hannken *dvpp = NULL;
641 1.92 hannken return -1;
642 1.86 hannken }
643 1.19 sommerfe *dvpp = dvp;
644 1.34 enami return (0);
645 1.19 sommerfe }
646 1.73 ad mutex_exit(&ncp->nc_lock);
647 1.19 sommerfe }
648 1.77 ad COUNT(nchstats, ncs_revmiss);
649 1.73 ad mutex_exit(namecache_lock);
650 1.19 sommerfe out:
651 1.34 enami *dvpp = NULL;
652 1.34 enami return (-1);
653 1.19 sommerfe }
654 1.19 sommerfe
655 1.19 sommerfe /*
656 1.1 cgd * Add an entry to the cache
657 1.1 cgd */
658 1.13 christos void
659 1.91 dholland cache_enter(struct vnode *dvp, struct vnode *vp,
660 1.91 dholland const char *name, size_t namelen, uint32_t cnflags)
661 1.1 cgd {
662 1.23 augustss struct namecache *ncp;
663 1.59 yamt struct namecache *oncp;
664 1.23 augustss struct nchashhead *ncpp;
665 1.23 augustss struct ncvhashhead *nvcpp;
666 1.90 dholland nchash_t hash;
667 1.1 cgd
668 1.89 rmind /* First, check whether we can/should add a cache entry. */
669 1.91 dholland if ((cnflags & MAKEENTRY) == 0 ||
670 1.91 dholland __predict_false(namelen > NCHNAMLEN || !doingcache)) {
671 1.1 cgd return;
672 1.89 rmind }
673 1.58 yamt
674 1.73 ad if (numcache > desiredvnodes) {
675 1.73 ad mutex_enter(namecache_lock);
676 1.73 ad cache_ev_forced.ev_count++;
677 1.73 ad cache_reclaim();
678 1.73 ad mutex_exit(namecache_lock);
679 1.39 pk }
680 1.57 pk
681 1.73 ad ncp = pool_cache_get(namecache_cache, PR_WAITOK);
682 1.73 ad mutex_enter(namecache_lock);
683 1.73 ad numcache++;
684 1.73 ad
685 1.59 yamt /*
686 1.59 yamt * Concurrent lookups in the same directory may race for a
687 1.59 yamt * cache entry. if there's a duplicated entry, free it.
688 1.59 yamt */
689 1.91 dholland oncp = cache_lookup_entry(dvp, name, namelen);
690 1.59 yamt if (oncp) {
691 1.73 ad cache_invalidate(oncp);
692 1.73 ad mutex_exit(&oncp->nc_lock);
693 1.59 yamt }
694 1.59 yamt
695 1.34 enami /* Grab the vnode we just found. */
696 1.73 ad mutex_enter(&ncp->nc_lock);
697 1.5 mycroft ncp->nc_vp = vp;
698 1.73 ad ncp->nc_flags = 0;
699 1.73 ad ncp->nc_hittime = 0;
700 1.73 ad ncp->nc_gcqueue = NULL;
701 1.47 yamt if (vp == NULL) {
702 1.11 mycroft /*
703 1.11 mycroft * For negative hits, save the ISWHITEOUT flag so we can
704 1.11 mycroft * restore it later when the cache entry is used again.
705 1.11 mycroft */
706 1.91 dholland ncp->nc_flags = cnflags & ISWHITEOUT;
707 1.11 mycroft }
708 1.89 rmind
709 1.34 enami /* Fill in cache info. */
710 1.5 mycroft ncp->nc_dvp = dvp;
711 1.46 yamt LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
712 1.46 yamt if (vp)
713 1.46 yamt LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
714 1.73 ad else {
715 1.73 ad ncp->nc_vlist.le_prev = NULL;
716 1.73 ad ncp->nc_vlist.le_next = NULL;
717 1.73 ad }
718 1.91 dholland KASSERT(namelen <= NCHNAMLEN);
719 1.91 dholland ncp->nc_nlen = namelen;
720 1.91 dholland memcpy(ncp->nc_name, name, (unsigned)ncp->nc_nlen);
721 1.73 ad TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
722 1.91 dholland hash = cache_hash(name, namelen);
723 1.90 dholland ncpp = &nchashtbl[NCHASH2(hash, dvp)];
724 1.73 ad
725 1.73 ad /*
726 1.73 ad * Flush updates before making visible in table. No need for a
727 1.73 ad * memory barrier on the other side: to see modifications the
728 1.73 ad * list must be followed, meaning a dependent pointer load.
729 1.74 ad * The below is LIST_INSERT_HEAD() inlined, with the memory
730 1.74 ad * barrier included in the correct place.
731 1.73 ad */
732 1.74 ad if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
733 1.74 ad ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
734 1.74 ad ncp->nc_hash.le_prev = &ncpp->lh_first;
735 1.73 ad membar_producer();
736 1.74 ad ncpp->lh_first = ncp;
737 1.19 sommerfe
738 1.34 enami ncp->nc_vhash.le_prev = NULL;
739 1.34 enami ncp->nc_vhash.le_next = NULL;
740 1.34 enami
741 1.19 sommerfe /*
742 1.19 sommerfe * Create reverse-cache entries (used in getcwd) for directories.
743 1.66 christos * (and in linux procfs exe node)
744 1.19 sommerfe */
745 1.33 enami if (vp != NULL &&
746 1.33 enami vp != dvp &&
747 1.29 fvdl #ifndef NAMECACHE_ENTER_REVERSE
748 1.33 enami vp->v_type == VDIR &&
749 1.29 fvdl #endif
750 1.33 enami (ncp->nc_nlen > 2 ||
751 1.33 enami (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
752 1.33 enami (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
753 1.30 chs nvcpp = &ncvhashtbl[NCVHASH(vp)];
754 1.19 sommerfe LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
755 1.19 sommerfe }
756 1.73 ad mutex_exit(&ncp->nc_lock);
757 1.73 ad mutex_exit(namecache_lock);
758 1.1 cgd }
759 1.1 cgd
760 1.1 cgd /*
761 1.1 cgd * Name cache initialization, from vfs_init() when we are booting
762 1.1 cgd */
763 1.13 christos void
764 1.34 enami nchinit(void)
765 1.1 cgd {
766 1.73 ad int error;
767 1.1 cgd
768 1.89 rmind TAILQ_INIT(&nclruhead);
769 1.73 ad namecache_cache = pool_cache_init(sizeof(struct namecache),
770 1.73 ad coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
771 1.73 ad cache_dtor, NULL);
772 1.71 ad KASSERT(namecache_cache != NULL);
773 1.71 ad
774 1.73 ad namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
775 1.73 ad
776 1.76 ad nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
777 1.26 ad ncvhashtbl =
778 1.29 fvdl #ifdef NAMECACHE_ENTER_REVERSE
779 1.76 ad hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
780 1.29 fvdl #else
781 1.76 ad hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
782 1.29 fvdl #endif
783 1.73 ad
784 1.73 ad error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
785 1.73 ad NULL, NULL, "cachegc");
786 1.73 ad if (error != 0)
787 1.73 ad panic("nchinit %d", error);
788 1.73 ad
789 1.73 ad evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
790 1.73 ad "namecache", "entries scanned");
791 1.73 ad evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
792 1.73 ad "namecache", "entries collected");
793 1.73 ad evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
794 1.73 ad "namecache", "over scan target");
795 1.73 ad evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
796 1.73 ad "namecache", "under scan target");
797 1.73 ad evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
798 1.73 ad "namecache", "forced reclaims");
799 1.73 ad }
800 1.73 ad
801 1.73 ad static int
802 1.73 ad cache_ctor(void *arg, void *obj, int flag)
803 1.73 ad {
804 1.73 ad struct namecache *ncp;
805 1.73 ad
806 1.73 ad ncp = obj;
807 1.73 ad mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
808 1.73 ad
809 1.73 ad return 0;
810 1.73 ad }
811 1.73 ad
812 1.73 ad static void
813 1.73 ad cache_dtor(void *arg, void *obj)
814 1.73 ad {
815 1.73 ad struct namecache *ncp;
816 1.73 ad
817 1.73 ad ncp = obj;
818 1.73 ad mutex_destroy(&ncp->nc_lock);
819 1.73 ad }
820 1.73 ad
821 1.73 ad /*
822 1.73 ad * Called once for each CPU in the system as attached.
823 1.73 ad */
824 1.73 ad void
825 1.73 ad cache_cpu_init(struct cpu_info *ci)
826 1.73 ad {
827 1.77 ad struct nchcpu *cpup;
828 1.77 ad size_t sz;
829 1.73 ad
830 1.77 ad sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
831 1.77 ad cpup = kmem_zalloc(sz, KM_SLEEP);
832 1.77 ad cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
833 1.77 ad mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
834 1.77 ad ci->ci_data.cpu_nch = cpup;
835 1.30 chs }
836 1.30 chs
837 1.30 chs /*
838 1.30 chs * Name cache reinitialization, for when the maximum number of vnodes increases.
839 1.30 chs */
840 1.30 chs void
841 1.34 enami nchreinit(void)
842 1.30 chs {
843 1.30 chs struct namecache *ncp;
844 1.30 chs struct nchashhead *oldhash1, *hash1;
845 1.30 chs struct ncvhashhead *oldhash2, *hash2;
846 1.36 thorpej u_long i, oldmask1, oldmask2, mask1, mask2;
847 1.30 chs
848 1.76 ad hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
849 1.30 chs hash2 =
850 1.30 chs #ifdef NAMECACHE_ENTER_REVERSE
851 1.76 ad hashinit(desiredvnodes, HASH_LIST, true, &mask2);
852 1.30 chs #else
853 1.76 ad hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
854 1.30 chs #endif
855 1.73 ad mutex_enter(namecache_lock);
856 1.73 ad cache_lock_cpus();
857 1.30 chs oldhash1 = nchashtbl;
858 1.30 chs oldmask1 = nchash;
859 1.30 chs nchashtbl = hash1;
860 1.30 chs nchash = mask1;
861 1.30 chs oldhash2 = ncvhashtbl;
862 1.30 chs oldmask2 = ncvhash;
863 1.30 chs ncvhashtbl = hash2;
864 1.30 chs ncvhash = mask2;
865 1.30 chs for (i = 0; i <= oldmask1; i++) {
866 1.30 chs while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
867 1.30 chs LIST_REMOVE(ncp, nc_hash);
868 1.30 chs ncp->nc_hash.le_prev = NULL;
869 1.30 chs }
870 1.30 chs }
871 1.30 chs for (i = 0; i <= oldmask2; i++) {
872 1.30 chs while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
873 1.30 chs LIST_REMOVE(ncp, nc_vhash);
874 1.30 chs ncp->nc_vhash.le_prev = NULL;
875 1.30 chs }
876 1.30 chs }
877 1.73 ad cache_unlock_cpus();
878 1.73 ad mutex_exit(namecache_lock);
879 1.76 ad hashdone(oldhash1, HASH_LIST, oldmask1);
880 1.76 ad hashdone(oldhash2, HASH_LIST, oldmask2);
881 1.1 cgd }
882 1.1 cgd
883 1.1 cgd /*
884 1.1 cgd * Cache flush, a particular vnode; called when a vnode is renamed to
885 1.1 cgd * hide entries that would now be invalid
886 1.1 cgd */
887 1.13 christos void
888 1.91 dholland cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
889 1.1 cgd {
890 1.46 yamt struct namecache *ncp, *ncnext;
891 1.1 cgd
892 1.73 ad mutex_enter(namecache_lock);
893 1.55 yamt if (flags & PURGE_PARENTS) {
894 1.55 yamt for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
895 1.55 yamt ncp = ncnext) {
896 1.55 yamt ncnext = LIST_NEXT(ncp, nc_vlist);
897 1.73 ad mutex_enter(&ncp->nc_lock);
898 1.73 ad cache_invalidate(ncp);
899 1.73 ad mutex_exit(&ncp->nc_lock);
900 1.73 ad cache_disassociate(ncp);
901 1.55 yamt }
902 1.55 yamt }
903 1.55 yamt if (flags & PURGE_CHILDREN) {
904 1.55 yamt for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
905 1.55 yamt ncp = ncnext) {
906 1.55 yamt ncnext = LIST_NEXT(ncp, nc_dvlist);
907 1.73 ad mutex_enter(&ncp->nc_lock);
908 1.73 ad cache_invalidate(ncp);
909 1.73 ad mutex_exit(&ncp->nc_lock);
910 1.73 ad cache_disassociate(ncp);
911 1.55 yamt }
912 1.46 yamt }
913 1.91 dholland if (name != NULL) {
914 1.91 dholland ncp = cache_lookup_entry(vp, name, namelen);
915 1.55 yamt if (ncp) {
916 1.73 ad cache_invalidate(ncp);
917 1.83 yamt mutex_exit(&ncp->nc_lock);
918 1.73 ad cache_disassociate(ncp);
919 1.55 yamt }
920 1.46 yamt }
921 1.73 ad mutex_exit(namecache_lock);
922 1.1 cgd }
923 1.1 cgd
924 1.1 cgd /*
925 1.1 cgd * Cache flush, a whole filesystem; called when filesys is umounted to
926 1.27 chs * remove entries that would now be invalid.
927 1.1 cgd */
928 1.13 christos void
929 1.34 enami cache_purgevfs(struct mount *mp)
930 1.1 cgd {
931 1.23 augustss struct namecache *ncp, *nxtcp;
932 1.1 cgd
933 1.73 ad mutex_enter(namecache_lock);
934 1.73 ad for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
935 1.73 ad nxtcp = TAILQ_NEXT(ncp, nc_lru);
936 1.73 ad mutex_enter(&ncp->nc_lock);
937 1.73 ad if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
938 1.73 ad /* Free the resources we had. */
939 1.73 ad cache_invalidate(ncp);
940 1.73 ad cache_disassociate(ncp);
941 1.73 ad }
942 1.73 ad mutex_exit(&ncp->nc_lock);
943 1.73 ad }
944 1.73 ad cache_reclaim();
945 1.73 ad mutex_exit(namecache_lock);
946 1.73 ad }
947 1.73 ad
948 1.73 ad /*
949 1.73 ad * Scan global list invalidating entries until we meet a preset target.
950 1.73 ad * Prefer to invalidate entries that have not scored a hit within
951 1.73 ad * cache_hottime seconds. We sort the LRU list only for this routine's
952 1.73 ad * benefit.
953 1.73 ad */
954 1.73 ad static void
955 1.73 ad cache_prune(int incache, int target)
956 1.73 ad {
957 1.73 ad struct namecache *ncp, *nxtcp, *sentinel;
958 1.73 ad int items, recent, tryharder;
959 1.73 ad
960 1.73 ad KASSERT(mutex_owned(namecache_lock));
961 1.73 ad
962 1.73 ad items = 0;
963 1.73 ad tryharder = 0;
964 1.73 ad recent = hardclock_ticks - hz * cache_hottime;
965 1.73 ad sentinel = NULL;
966 1.27 chs for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
967 1.73 ad if (incache <= target)
968 1.73 ad break;
969 1.73 ad items++;
970 1.27 chs nxtcp = TAILQ_NEXT(ncp, nc_lru);
971 1.73 ad if (ncp == sentinel) {
972 1.73 ad /*
973 1.73 ad * If we looped back on ourself, then ignore
974 1.73 ad * recent entries and purge whatever we find.
975 1.73 ad */
976 1.73 ad tryharder = 1;
977 1.5 mycroft }
978 1.93 hannken if (ncp->nc_dvp == NULL)
979 1.93 hannken continue;
980 1.81 yamt if (!tryharder && (ncp->nc_hittime - recent) > 0) {
981 1.73 ad if (sentinel == NULL)
982 1.73 ad sentinel = ncp;
983 1.73 ad TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
984 1.73 ad TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
985 1.73 ad continue;
986 1.73 ad }
987 1.73 ad mutex_enter(&ncp->nc_lock);
988 1.73 ad if (ncp->nc_dvp != NULL) {
989 1.73 ad cache_invalidate(ncp);
990 1.73 ad cache_disassociate(ncp);
991 1.73 ad incache--;
992 1.73 ad }
993 1.73 ad mutex_exit(&ncp->nc_lock);
994 1.73 ad }
995 1.73 ad cache_ev_scan.ev_count += items;
996 1.73 ad }
997 1.73 ad
998 1.73 ad /*
999 1.73 ad * Collect dead cache entries from all CPUs and garbage collect.
1000 1.73 ad */
1001 1.73 ad static void
1002 1.73 ad cache_reclaim(void)
1003 1.73 ad {
1004 1.73 ad struct namecache *ncp, *next;
1005 1.73 ad int items;
1006 1.73 ad
1007 1.73 ad KASSERT(mutex_owned(namecache_lock));
1008 1.73 ad
1009 1.73 ad /*
1010 1.73 ad * If the number of extant entries not awaiting garbage collection
1011 1.73 ad * exceeds the high water mark, then reclaim stale entries until we
1012 1.73 ad * reach our low water mark.
1013 1.73 ad */
1014 1.73 ad items = numcache - cache_gcpend;
1015 1.73 ad if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
1016 1.73 ad cache_prune(items, (int)((uint64_t)desiredvnodes *
1017 1.73 ad cache_lowat / 100));
1018 1.73 ad cache_ev_over.ev_count++;
1019 1.73 ad } else
1020 1.73 ad cache_ev_under.ev_count++;
1021 1.73 ad
1022 1.73 ad /*
1023 1.73 ad * Stop forward lookup activity on all CPUs and garbage collect dead
1024 1.73 ad * entries.
1025 1.73 ad */
1026 1.73 ad cache_lock_cpus();
1027 1.73 ad ncp = cache_gcqueue;
1028 1.73 ad cache_gcqueue = NULL;
1029 1.73 ad items = cache_gcpend;
1030 1.73 ad cache_gcpend = 0;
1031 1.73 ad while (ncp != NULL) {
1032 1.73 ad next = ncp->nc_gcqueue;
1033 1.73 ad cache_disassociate(ncp);
1034 1.73 ad KASSERT(ncp->nc_dvp == NULL);
1035 1.73 ad if (ncp->nc_hash.le_prev != NULL) {
1036 1.73 ad LIST_REMOVE(ncp, nc_hash);
1037 1.73 ad ncp->nc_hash.le_prev = NULL;
1038 1.73 ad }
1039 1.73 ad pool_cache_put(namecache_cache, ncp);
1040 1.73 ad ncp = next;
1041 1.73 ad }
1042 1.73 ad cache_unlock_cpus();
1043 1.73 ad numcache -= items;
1044 1.73 ad cache_ev_gc.ev_count += items;
1045 1.73 ad }
1046 1.73 ad
1047 1.73 ad /*
1048 1.73 ad * Cache maintainence thread, awakening once per second to:
1049 1.73 ad *
1050 1.73 ad * => keep number of entries below the high water mark
1051 1.73 ad * => sort pseudo-LRU list
1052 1.73 ad * => garbage collect dead entries
1053 1.73 ad */
1054 1.73 ad static void
1055 1.73 ad cache_thread(void *arg)
1056 1.73 ad {
1057 1.73 ad
1058 1.73 ad mutex_enter(namecache_lock);
1059 1.73 ad for (;;) {
1060 1.73 ad cache_reclaim();
1061 1.73 ad kpause("cachegc", false, hz, namecache_lock);
1062 1.1 cgd }
1063 1.1 cgd }
1064 1.19 sommerfe
1065 1.28 chs #ifdef DDB
1066 1.28 chs void
1067 1.28 chs namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
1068 1.28 chs {
1069 1.28 chs struct vnode *dvp = NULL;
1070 1.28 chs struct namecache *ncp;
1071 1.28 chs
1072 1.28 chs TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1073 1.73 ad if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
1074 1.28 chs (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
1075 1.28 chs dvp = ncp->nc_dvp;
1076 1.28 chs }
1077 1.28 chs }
1078 1.28 chs if (dvp == NULL) {
1079 1.28 chs (*pr)("name not found\n");
1080 1.28 chs return;
1081 1.28 chs }
1082 1.28 chs vp = dvp;
1083 1.28 chs TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1084 1.47 yamt if (ncp->nc_vp == vp) {
1085 1.28 chs (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
1086 1.28 chs }
1087 1.28 chs }
1088 1.28 chs }
1089 1.28 chs #endif
1090