vfs_cache.c revision 1.103 1 1.103 dennis /* $NetBSD: vfs_cache.c,v 1.103 2014/12/24 20:01:21 dennis Exp $ */
2 1.73 ad
3 1.73 ad /*-
4 1.73 ad * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 1.73 ad * All rights reserved.
6 1.73 ad *
7 1.73 ad * Redistribution and use in source and binary forms, with or without
8 1.73 ad * modification, are permitted provided that the following conditions
9 1.73 ad * are met:
10 1.73 ad * 1. Redistributions of source code must retain the above copyright
11 1.73 ad * notice, this list of conditions and the following disclaimer.
12 1.73 ad * 2. Redistributions in binary form must reproduce the above copyright
13 1.73 ad * notice, this list of conditions and the following disclaimer in the
14 1.73 ad * documentation and/or other materials provided with the distribution.
15 1.73 ad *
16 1.73 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
17 1.73 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
18 1.73 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 1.73 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
20 1.73 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 1.73 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 1.73 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 1.73 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 1.73 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 1.73 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 1.73 ad * POSSIBILITY OF SUCH DAMAGE.
27 1.73 ad */
28 1.6 cgd
29 1.1 cgd /*
30 1.5 mycroft * Copyright (c) 1989, 1993
31 1.5 mycroft * The Regents of the University of California. All rights reserved.
32 1.1 cgd *
33 1.1 cgd * Redistribution and use in source and binary forms, with or without
34 1.1 cgd * modification, are permitted provided that the following conditions
35 1.1 cgd * are met:
36 1.1 cgd * 1. Redistributions of source code must retain the above copyright
37 1.1 cgd * notice, this list of conditions and the following disclaimer.
38 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
39 1.1 cgd * notice, this list of conditions and the following disclaimer in the
40 1.1 cgd * documentation and/or other materials provided with the distribution.
41 1.51 agc * 3. Neither the name of the University nor the names of its contributors
42 1.1 cgd * may be used to endorse or promote products derived from this software
43 1.1 cgd * without specific prior written permission.
44 1.1 cgd *
45 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
46 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
49 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 1.1 cgd * SUCH DAMAGE.
56 1.1 cgd *
57 1.10 mycroft * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
58 1.1 cgd */
59 1.32 lukem
60 1.32 lukem #include <sys/cdefs.h>
61 1.103 dennis __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.103 2014/12/24 20:01:21 dennis Exp $");
62 1.1 cgd
63 1.28 chs #include "opt_ddb.h"
64 1.29 fvdl #include "opt_revcache.h"
65 1.28 chs
66 1.4 mycroft #include <sys/param.h>
67 1.4 mycroft #include <sys/systm.h>
68 1.97 joerg #include <sys/sysctl.h>
69 1.4 mycroft #include <sys/time.h>
70 1.4 mycroft #include <sys/mount.h>
71 1.4 mycroft #include <sys/vnode.h>
72 1.4 mycroft #include <sys/namei.h>
73 1.4 mycroft #include <sys/errno.h>
74 1.18 thorpej #include <sys/pool.h>
75 1.68 ad #include <sys/mutex.h>
76 1.73 ad #include <sys/atomic.h>
77 1.73 ad #include <sys/kthread.h>
78 1.73 ad #include <sys/kernel.h>
79 1.73 ad #include <sys/cpu.h>
80 1.73 ad #include <sys/evcnt.h>
81 1.1 cgd
82 1.66 christos #define NAMECACHE_ENTER_REVERSE
83 1.1 cgd /*
84 1.1 cgd * Name caching works as follows:
85 1.1 cgd *
86 1.1 cgd * Names found by directory scans are retained in a cache
87 1.1 cgd * for future reference. It is managed LRU, so frequently
88 1.1 cgd * used names will hang around. Cache is indexed by hash value
89 1.20 jdolecek * obtained from (dvp, name) where dvp refers to the directory
90 1.1 cgd * containing name.
91 1.1 cgd *
92 1.1 cgd * For simplicity (and economy of storage), names longer than
93 1.1 cgd * a maximum length of NCHNAMLEN are not cached; they occur
94 1.1 cgd * infrequently in any case, and are almost never of interest.
95 1.1 cgd *
96 1.1 cgd * Upon reaching the last segment of a path, if the reference
97 1.1 cgd * is for DELETE, or NOCACHE is set (rewrite), and the
98 1.1 cgd * name is located in the cache, it will be dropped.
99 1.20 jdolecek * The entry is dropped also when it was not possible to lock
100 1.20 jdolecek * the cached vnode, either because vget() failed or the generation
101 1.20 jdolecek * number has changed while waiting for the lock.
102 1.1 cgd */
103 1.1 cgd
104 1.1 cgd /*
105 1.102 dennis * The locking in this subsystem works as follows:
106 1.102 dennis *
107 1.102 dennis * When an entry is added to the cache, via cache_enter(),
108 1.102 dennis * namecache_lock is taken to exclude other writers. The new
109 1.102 dennis * entry is added to the hash list in a way which permits
110 1.102 dennis * concurrent lookups and invalidations in the cache done on
111 1.102 dennis * other CPUs to continue in parallel.
112 1.102 dennis *
113 1.102 dennis * When a lookup is done in the cache, via cache_lookup() or
114 1.102 dennis * cache_lookup_raw(), the per-cpu lock below is taken. This
115 1.102 dennis * protects calls to cache_lookup_entry() and cache_invalidate()
116 1.102 dennis * against cache_reclaim() but allows lookups to continue in
117 1.102 dennis * parallel with cache_enter().
118 1.102 dennis *
119 1.102 dennis * cache_revlookup() takes namecache_lock to exclude cache_enter()
120 1.102 dennis * and cache_reclaim() since the list it operates on is not
121 1.102 dennis * maintained to allow concurrent reads.
122 1.102 dennis *
123 1.102 dennis * When cache_reclaim() is called namecache_lock is held to hold
124 1.102 dennis * off calls to cache_enter()/cache_revlookup() and each of the
125 1.102 dennis * per-cpu locks is taken to hold off lookups. Holding all these
126 1.102 dennis * locks essentially idles the subsystem, ensuring there are no
127 1.102 dennis * concurrent references to the cache entries being freed.
128 1.102 dennis *
129 1.103 dennis * 32 bit per-cpu statistic counters (struct nchstats_percpu) are
130 1.103 dennis * incremented when the operations they count are performed while
131 1.103 dennis * running on the corresponding CPU. Frequently individual counters
132 1.103 dennis * are incremented while holding a lock (either a per-cpu lock or
133 1.103 dennis * namecache_lock) sufficient to preclude concurrent increments
134 1.103 dennis * being done to the same counter, so non-atomic increments are
135 1.103 dennis * done using the COUNT() macro. Counters which are incremented
136 1.103 dennis * when one of these locks is not held use the COUNT_UNL() macro
137 1.103 dennis * instead. COUNT_UNL() could be defined to do atomic increments
138 1.103 dennis * but currently just does what COUNT() does, on the theory that
139 1.103 dennis * it is unlikely the non-atomic increment will be interrupted
140 1.103 dennis * by something on the same CPU that increments the same counter,
141 1.103 dennis * but even if it does happen the consequences aren't serious.
142 1.103 dennis *
143 1.103 dennis * N.B.: Attempting to protect COUNT_UNL() increments by taking
144 1.103 dennis * a per-cpu lock in the namecache_count_*() functions causes
145 1.103 dennis * a deadlock. Don't do that, use atomic increments instead if
146 1.103 dennis * the imperfections here bug you.
147 1.103 dennis *
148 1.103 dennis * The 64 bit system-wide statistic counts (struct nchstats) are
149 1.103 dennis * maintained by sampling the per-cpu counters periodically, adding
150 1.103 dennis * in the deltas since the last samples and recording the current
151 1.103 dennis * samples to use to compute the next delta. The sampling is done
152 1.103 dennis * as a side effect of cache_reclaim() which is run periodically,
153 1.103 dennis * for its own purposes, often enough to avoid overflow of the 32
154 1.103 dennis * bit counters. While sampling in this fashion requires no locking
155 1.103 dennis * it is never-the-less done only after all locks have been taken by
156 1.103 dennis * cache_reclaim() to allow cache_stat_sysctl() to hold off
157 1.103 dennis * cache_reclaim() with minimal locking.
158 1.103 dennis *
159 1.103 dennis * cache_stat_sysctl() takes its CPU's per-cpu lock to hold off
160 1.103 dennis * cache_reclaim() so that it can copy the subsystem total stats
161 1.103 dennis * without them being concurrently modified. If CACHE_STATS_CURRENT
162 1.103 dennis * is defined it also harvests the per-cpu increments into the total,
163 1.103 dennis * which again requires cache_reclaim() to be held off.
164 1.102 dennis *
165 1.103 dennis * The per-cpu data (a lock and the per-cpu stats structures)
166 1.103 dennis * are defined next.
167 1.77 ad */
168 1.103 dennis struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
169 1.103 dennis
170 1.77 ad struct nchcpu {
171 1.103 dennis kmutex_t cpu_lock;
172 1.103 dennis struct nchstats_percpu cpu_stats;
173 1.103 dennis /* XXX maybe __cacheline_aligned would improve this? */
174 1.103 dennis struct nchstats_percpu cpu_stats_last; /* from last sample */
175 1.77 ad };
176 1.77 ad
177 1.77 ad /*
178 1.90 dholland * The type for the hash code. While the hash function generates a
179 1.90 dholland * u32, the hash code has historically been passed around as a u_long,
180 1.90 dholland * and the value is modified by xor'ing a uintptr_t, so it's not
181 1.90 dholland * entirely clear what the best type is. For now I'll leave it
182 1.90 dholland * unchanged as u_long.
183 1.90 dholland */
184 1.90 dholland
185 1.90 dholland typedef u_long nchash_t;
186 1.90 dholland
187 1.90 dholland /*
188 1.1 cgd * Structures associated with name cacheing.
189 1.1 cgd */
190 1.89 rmind
191 1.89 rmind static kmutex_t *namecache_lock __read_mostly;
192 1.89 rmind static pool_cache_t namecache_cache __read_mostly;
193 1.89 rmind static TAILQ_HEAD(, namecache) nclruhead __cacheline_aligned;
194 1.89 rmind
195 1.89 rmind static LIST_HEAD(nchashhead, namecache) *nchashtbl __read_mostly;
196 1.89 rmind static u_long nchash __read_mostly;
197 1.89 rmind
198 1.90 dholland #define NCHASH2(hash, dvp) \
199 1.90 dholland (((hash) ^ ((uintptr_t)(dvp) >> 3)) & nchash)
200 1.19 sommerfe
201 1.89 rmind static LIST_HEAD(ncvhashhead, namecache) *ncvhashtbl __read_mostly;
202 1.89 rmind static u_long ncvhash __read_mostly;
203 1.89 rmind
204 1.48 yamt #define NCVHASH(vp) (((uintptr_t)(vp) >> 3) & ncvhash)
205 1.19 sommerfe
206 1.89 rmind /* Number of cache entries allocated. */
207 1.89 rmind static long numcache __cacheline_aligned;
208 1.73 ad
209 1.89 rmind /* Garbage collection queue and number of entries pending in it. */
210 1.89 rmind static void *cache_gcqueue;
211 1.89 rmind static u_int cache_gcpend;
212 1.89 rmind
213 1.103 dennis /* Cache effectiveness statistics. This holds total from per-cpu stats */
214 1.89 rmind struct nchstats nchstats __cacheline_aligned;
215 1.103 dennis
216 1.103 dennis /*
217 1.103 dennis * Macros to count an event, update the central stats with per-cpu
218 1.103 dennis * values and add current per-cpu increments to the subsystem total
219 1.103 dennis * last collected by cache_reclaim().
220 1.103 dennis */
221 1.103 dennis #define CACHE_STATS_CURRENT /* nothing */
222 1.103 dennis
223 1.103 dennis #define COUNT(cpup, f) ((cpup)->cpu_stats.f++)
224 1.103 dennis
225 1.103 dennis #define UPDATE(cpup, f) do { \
226 1.103 dennis struct nchcpu *Xcpup = (cpup); \
227 1.103 dennis uint32_t Xcnt = (volatile uint32_t) Xcpup->cpu_stats.f; \
228 1.103 dennis nchstats.f += Xcnt - Xcpup->cpu_stats_last.f; \
229 1.103 dennis Xcpup->cpu_stats_last.f = Xcnt; \
230 1.103 dennis } while (/* CONSTCOND */ 0)
231 1.103 dennis
232 1.103 dennis #define ADD(stats, cpup, f) do { \
233 1.103 dennis struct nchcpu *Xcpup = (cpup); \
234 1.103 dennis stats.f += Xcpup->cpu_stats.f - Xcpup->cpu_stats_last.f; \
235 1.103 dennis } while (/* CONSTCOND */ 0)
236 1.103 dennis
237 1.103 dennis /* Do unlocked stats the same way. Use a different name to allow mind changes */
238 1.103 dennis #define COUNT_UNL(cpup, f) COUNT((cpup), f)
239 1.38 thorpej
240 1.89 rmind static const int cache_lowat = 95;
241 1.89 rmind static const int cache_hiwat = 98;
242 1.89 rmind static const int cache_hottime = 5; /* number of seconds */
243 1.89 rmind static int doingcache = 1; /* 1 => enable the cache */
244 1.1 cgd
245 1.73 ad static struct evcnt cache_ev_scan;
246 1.73 ad static struct evcnt cache_ev_gc;
247 1.73 ad static struct evcnt cache_ev_over;
248 1.73 ad static struct evcnt cache_ev_under;
249 1.73 ad static struct evcnt cache_ev_forced;
250 1.73 ad
251 1.73 ad static void cache_invalidate(struct namecache *);
252 1.89 rmind static struct namecache *cache_lookup_entry(
253 1.91 dholland const struct vnode *, const char *, size_t);
254 1.73 ad static void cache_thread(void *);
255 1.73 ad static void cache_invalidate(struct namecache *);
256 1.73 ad static void cache_disassociate(struct namecache *);
257 1.73 ad static void cache_reclaim(void);
258 1.73 ad static int cache_ctor(void *, void *, int);
259 1.73 ad static void cache_dtor(void *, void *);
260 1.46 yamt
261 1.73 ad /*
262 1.90 dholland * Compute the hash for an entry.
263 1.90 dholland *
264 1.90 dholland * (This is for now a wrapper around namei_hash, whose interface is
265 1.90 dholland * for the time being slightly inconvenient.)
266 1.90 dholland */
267 1.90 dholland static nchash_t
268 1.91 dholland cache_hash(const char *name, size_t namelen)
269 1.90 dholland {
270 1.90 dholland const char *endptr;
271 1.90 dholland
272 1.91 dholland endptr = name + namelen;
273 1.91 dholland return namei_hash(name, &endptr);
274 1.90 dholland }
275 1.90 dholland
276 1.90 dholland /*
277 1.73 ad * Invalidate a cache entry and enqueue it for garbage collection.
278 1.103 dennis * The caller needs to hold namecache_lock or a per-cpu lock to hold
279 1.103 dennis * off cache_reclaim().
280 1.73 ad */
281 1.46 yamt static void
282 1.73 ad cache_invalidate(struct namecache *ncp)
283 1.46 yamt {
284 1.73 ad void *head;
285 1.46 yamt
286 1.73 ad KASSERT(mutex_owned(&ncp->nc_lock));
287 1.46 yamt
288 1.73 ad if (ncp->nc_dvp != NULL) {
289 1.73 ad ncp->nc_vp = NULL;
290 1.73 ad ncp->nc_dvp = NULL;
291 1.73 ad do {
292 1.73 ad head = cache_gcqueue;
293 1.73 ad ncp->nc_gcqueue = head;
294 1.73 ad } while (atomic_cas_ptr(&cache_gcqueue, head, ncp) != head);
295 1.73 ad atomic_inc_uint(&cache_gcpend);
296 1.73 ad }
297 1.73 ad }
298 1.46 yamt
299 1.73 ad /*
300 1.73 ad * Disassociate a namecache entry from any vnodes it is attached to,
301 1.73 ad * and remove from the global LRU list.
302 1.73 ad */
303 1.73 ad static void
304 1.73 ad cache_disassociate(struct namecache *ncp)
305 1.73 ad {
306 1.73 ad
307 1.73 ad KASSERT(mutex_owned(namecache_lock));
308 1.73 ad KASSERT(ncp->nc_dvp == NULL);
309 1.73 ad
310 1.73 ad if (ncp->nc_lru.tqe_prev != NULL) {
311 1.73 ad TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
312 1.73 ad ncp->nc_lru.tqe_prev = NULL;
313 1.46 yamt }
314 1.46 yamt if (ncp->nc_vhash.le_prev != NULL) {
315 1.46 yamt LIST_REMOVE(ncp, nc_vhash);
316 1.46 yamt ncp->nc_vhash.le_prev = NULL;
317 1.46 yamt }
318 1.46 yamt if (ncp->nc_vlist.le_prev != NULL) {
319 1.46 yamt LIST_REMOVE(ncp, nc_vlist);
320 1.46 yamt ncp->nc_vlist.le_prev = NULL;
321 1.46 yamt }
322 1.46 yamt if (ncp->nc_dvlist.le_prev != NULL) {
323 1.46 yamt LIST_REMOVE(ncp, nc_dvlist);
324 1.46 yamt ncp->nc_dvlist.le_prev = NULL;
325 1.46 yamt }
326 1.46 yamt }
327 1.46 yamt
328 1.73 ad /*
329 1.73 ad * Lock all CPUs to prevent any cache lookup activity. Conceptually,
330 1.73 ad * this locks out all "readers".
331 1.73 ad */
332 1.46 yamt static void
333 1.73 ad cache_lock_cpus(void)
334 1.46 yamt {
335 1.73 ad CPU_INFO_ITERATOR cii;
336 1.73 ad struct cpu_info *ci;
337 1.77 ad struct nchcpu *cpup;
338 1.46 yamt
339 1.103 dennis /*
340 1.103 dennis * Lock out all CPUs first, then harvest per-cpu stats. This
341 1.103 dennis * is probably not quite as cache-efficient as doing the lock
342 1.103 dennis * and harvest at the same time, but allows cache_stat_sysctl()
343 1.103 dennis * to make do with a per-cpu lock.
344 1.103 dennis */
345 1.73 ad for (CPU_INFO_FOREACH(cii, ci)) {
346 1.77 ad cpup = ci->ci_data.cpu_nch;
347 1.77 ad mutex_enter(&cpup->cpu_lock);
348 1.103 dennis }
349 1.103 dennis for (CPU_INFO_FOREACH(cii, ci)) {
350 1.103 dennis cpup = ci->ci_data.cpu_nch;
351 1.103 dennis UPDATE(cpup, ncs_goodhits);
352 1.103 dennis UPDATE(cpup, ncs_neghits);
353 1.103 dennis UPDATE(cpup, ncs_badhits);
354 1.103 dennis UPDATE(cpup, ncs_falsehits);
355 1.103 dennis UPDATE(cpup, ncs_miss);
356 1.103 dennis UPDATE(cpup, ncs_long);
357 1.103 dennis UPDATE(cpup, ncs_pass2);
358 1.103 dennis UPDATE(cpup, ncs_2passes);
359 1.103 dennis UPDATE(cpup, ncs_revhits);
360 1.103 dennis UPDATE(cpup, ncs_revmiss);
361 1.73 ad }
362 1.46 yamt }
363 1.46 yamt
364 1.73 ad /*
365 1.73 ad * Release all CPU locks.
366 1.73 ad */
367 1.73 ad static void
368 1.73 ad cache_unlock_cpus(void)
369 1.73 ad {
370 1.73 ad CPU_INFO_ITERATOR cii;
371 1.73 ad struct cpu_info *ci;
372 1.77 ad struct nchcpu *cpup;
373 1.73 ad
374 1.73 ad for (CPU_INFO_FOREACH(cii, ci)) {
375 1.77 ad cpup = ci->ci_data.cpu_nch;
376 1.77 ad mutex_exit(&cpup->cpu_lock);
377 1.73 ad }
378 1.73 ad }
379 1.73 ad
380 1.73 ad /*
381 1.103 dennis * Find a single cache entry and return it locked.
382 1.103 dennis * The caller needs to hold namecache_lock or a per-cpu lock to hold
383 1.103 dennis * off cache_reclaim().
384 1.73 ad */
385 1.73 ad static struct namecache *
386 1.91 dholland cache_lookup_entry(const struct vnode *dvp, const char *name, size_t namelen)
387 1.55 yamt {
388 1.55 yamt struct nchashhead *ncpp;
389 1.55 yamt struct namecache *ncp;
390 1.90 dholland nchash_t hash;
391 1.55 yamt
392 1.84 yamt KASSERT(dvp != NULL);
393 1.91 dholland hash = cache_hash(name, namelen);
394 1.90 dholland ncpp = &nchashtbl[NCHASH2(hash, dvp)];
395 1.55 yamt
396 1.55 yamt LIST_FOREACH(ncp, ncpp, nc_hash) {
397 1.103 dennis /* XXX Needs barrier for Alpha here */
398 1.73 ad if (ncp->nc_dvp != dvp ||
399 1.91 dholland ncp->nc_nlen != namelen ||
400 1.91 dholland memcmp(ncp->nc_name, name, (u_int)ncp->nc_nlen))
401 1.73 ad continue;
402 1.73 ad mutex_enter(&ncp->nc_lock);
403 1.77 ad if (__predict_true(ncp->nc_dvp == dvp)) {
404 1.73 ad ncp->nc_hittime = hardclock_ticks;
405 1.73 ad return ncp;
406 1.73 ad }
407 1.73 ad /* Raced: entry has been nullified. */
408 1.73 ad mutex_exit(&ncp->nc_lock);
409 1.55 yamt }
410 1.55 yamt
411 1.73 ad return NULL;
412 1.55 yamt }
413 1.55 yamt
414 1.1 cgd /*
415 1.1 cgd * Look for a the name in the cache. We don't do this
416 1.1 cgd * if the segment name is long, simply so the cache can avoid
417 1.1 cgd * holding long names (which would either waste space, or
418 1.1 cgd * add greatly to the complexity).
419 1.1 cgd *
420 1.90 dholland * Lookup is called with DVP pointing to the directory to search,
421 1.90 dholland * and CNP providing the name of the entry being sought: cn_nameptr
422 1.90 dholland * is the name, cn_namelen is its length, and cn_flags is the flags
423 1.90 dholland * word from the namei operation.
424 1.90 dholland *
425 1.90 dholland * DVP must be locked.
426 1.90 dholland *
427 1.90 dholland * There are three possible non-error return states:
428 1.90 dholland * 1. Nothing was found in the cache. Nothing is known about
429 1.90 dholland * the requested name.
430 1.90 dholland * 2. A negative entry was found in the cache, meaning that the
431 1.90 dholland * requested name definitely does not exist.
432 1.90 dholland * 3. A positive entry was found in the cache, meaning that the
433 1.90 dholland * requested name does exist and that we are providing the
434 1.90 dholland * vnode.
435 1.90 dholland * In these cases the results are:
436 1.90 dholland * 1. 0 returned; VN is set to NULL.
437 1.90 dholland * 2. 1 returned; VN is set to NULL.
438 1.90 dholland * 3. 1 returned; VN is set to the vnode found.
439 1.90 dholland *
440 1.90 dholland * The additional result argument ISWHT is set to zero, unless a
441 1.90 dholland * negative entry is found that was entered as a whiteout, in which
442 1.90 dholland * case ISWHT is set to one.
443 1.90 dholland *
444 1.90 dholland * The ISWHT_RET argument pointer may be null. In this case an
445 1.90 dholland * assertion is made that the whiteout flag is not set. File systems
446 1.90 dholland * that do not support whiteouts can/should do this.
447 1.90 dholland *
448 1.90 dholland * Filesystems that do support whiteouts should add ISWHITEOUT to
449 1.90 dholland * cnp->cn_flags if ISWHT comes back nonzero.
450 1.90 dholland *
451 1.90 dholland * When a vnode is returned, it is locked, as per the vnode lookup
452 1.90 dholland * locking protocol.
453 1.90 dholland *
454 1.90 dholland * There is no way for this function to fail, in the sense of
455 1.90 dholland * generating an error that requires aborting the namei operation.
456 1.90 dholland *
457 1.90 dholland * (Prior to October 2012, this function returned an integer status,
458 1.90 dholland * and a vnode, and mucked with the flags word in CNP for whiteouts.
459 1.90 dholland * The integer status was -1 for "nothing found", ENOENT for "a
460 1.90 dholland * negative entry found", 0 for "a positive entry found", and possibly
461 1.90 dholland * other errors, and the value of VN might or might not have been set
462 1.90 dholland * depending on what error occurred.)
463 1.1 cgd */
464 1.5 mycroft int
465 1.91 dholland cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
466 1.91 dholland uint32_t nameiop, uint32_t cnflags,
467 1.90 dholland int *iswht_ret, struct vnode **vn_ret)
468 1.1 cgd {
469 1.23 augustss struct namecache *ncp;
470 1.20 jdolecek struct vnode *vp;
471 1.77 ad struct nchcpu *cpup;
472 1.103 dennis int error, ret_value;
473 1.103 dennis
474 1.1 cgd
475 1.90 dholland /* Establish default result values */
476 1.90 dholland if (iswht_ret != NULL) {
477 1.90 dholland *iswht_ret = 0;
478 1.90 dholland }
479 1.90 dholland *vn_ret = NULL;
480 1.90 dholland
481 1.77 ad if (__predict_false(!doingcache)) {
482 1.90 dholland return 0;
483 1.8 cgd }
484 1.39 pk
485 1.77 ad cpup = curcpu()->ci_data.cpu_nch;
486 1.102 dennis mutex_enter(&cpup->cpu_lock);
487 1.91 dholland if (__predict_false(namelen > NCHNAMLEN)) {
488 1.103 dennis COUNT(cpup, ncs_long);
489 1.77 ad mutex_exit(&cpup->cpu_lock);
490 1.90 dholland /* found nothing */
491 1.90 dholland return 0;
492 1.1 cgd }
493 1.103 dennis
494 1.91 dholland ncp = cache_lookup_entry(dvp, name, namelen);
495 1.77 ad if (__predict_false(ncp == NULL)) {
496 1.103 dennis COUNT(cpup, ncs_miss);
497 1.77 ad mutex_exit(&cpup->cpu_lock);
498 1.90 dholland /* found nothing */
499 1.90 dholland return 0;
500 1.1 cgd }
501 1.91 dholland if ((cnflags & MAKEENTRY) == 0) {
502 1.103 dennis COUNT(cpup, ncs_badhits);
503 1.77 ad /*
504 1.77 ad * Last component and we are renaming or deleting,
505 1.77 ad * the cache entry is invalid, or otherwise don't
506 1.77 ad * want cache entry to exist.
507 1.77 ad */
508 1.77 ad cache_invalidate(ncp);
509 1.77 ad mutex_exit(&ncp->nc_lock);
510 1.102 dennis mutex_exit(&cpup->cpu_lock);
511 1.90 dholland /* found nothing */
512 1.90 dholland return 0;
513 1.90 dholland }
514 1.90 dholland if (ncp->nc_vp == NULL) {
515 1.90 dholland if (iswht_ret != NULL) {
516 1.90 dholland /*
517 1.90 dholland * Restore the ISWHITEOUT flag saved earlier.
518 1.90 dholland */
519 1.90 dholland KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
520 1.90 dholland *iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
521 1.90 dholland } else {
522 1.90 dholland KASSERT(ncp->nc_flags == 0);
523 1.90 dholland }
524 1.90 dholland
525 1.91 dholland if (__predict_true(nameiop != CREATE ||
526 1.91 dholland (cnflags & ISLASTCN) == 0)) {
527 1.103 dennis COUNT(cpup, ncs_neghits);
528 1.90 dholland /* found neg entry; vn is already null from above */
529 1.103 dennis ret_value = 1;
530 1.20 jdolecek } else {
531 1.103 dennis COUNT(cpup, ncs_badhits);
532 1.77 ad /*
533 1.77 ad * Last component and we are renaming or
534 1.77 ad * deleting, the cache entry is invalid,
535 1.77 ad * or otherwise don't want cache entry to
536 1.77 ad * exist.
537 1.77 ad */
538 1.77 ad cache_invalidate(ncp);
539 1.90 dholland /* found nothing */
540 1.103 dennis ret_value = 0;
541 1.20 jdolecek }
542 1.103 dennis mutex_exit(&ncp->nc_lock);
543 1.103 dennis mutex_exit(&cpup->cpu_lock);
544 1.103 dennis return ret_value;
545 1.20 jdolecek }
546 1.20 jdolecek
547 1.20 jdolecek vp = ncp->nc_vp;
548 1.92 hannken mutex_enter(vp->v_interlock);
549 1.92 hannken mutex_exit(&ncp->nc_lock);
550 1.102 dennis mutex_exit(&cpup->cpu_lock);
551 1.103 dennis
552 1.103 dennis /*
553 1.103 dennis * Unlocked except for the vnode interlock. Call vget().
554 1.103 dennis */
555 1.92 hannken error = vget(vp, LK_NOWAIT);
556 1.92 hannken if (error) {
557 1.92 hannken KASSERT(error == EBUSY);
558 1.92 hannken /*
559 1.92 hannken * This vnode is being cleaned out.
560 1.92 hannken * XXX badhits?
561 1.92 hannken */
562 1.103 dennis COUNT_UNL(cpup, ncs_falsehits);
563 1.92 hannken /* found nothing */
564 1.101 christos return 0;
565 1.77 ad }
566 1.101 christos
567 1.103 dennis COUNT_UNL(cpup, ncs_goodhits);
568 1.101 christos /* found it */
569 1.101 christos *vn_ret = vp;
570 1.101 christos return 1;
571 1.1 cgd }
572 1.1 cgd
573 1.103 dennis
574 1.103 dennis /*
575 1.103 dennis * Cut-'n-pasted version of the above without the nameiop argument.
576 1.103 dennis */
577 1.61 yamt int
578 1.91 dholland cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
579 1.91 dholland uint32_t cnflags,
580 1.90 dholland int *iswht_ret, struct vnode **vn_ret)
581 1.61 yamt {
582 1.61 yamt struct namecache *ncp;
583 1.61 yamt struct vnode *vp;
584 1.77 ad struct nchcpu *cpup;
585 1.101 christos int error;
586 1.61 yamt
587 1.90 dholland /* Establish default results. */
588 1.90 dholland if (iswht_ret != NULL) {
589 1.90 dholland *iswht_ret = 0;
590 1.90 dholland }
591 1.90 dholland *vn_ret = NULL;
592 1.90 dholland
593 1.77 ad if (__predict_false(!doingcache)) {
594 1.90 dholland /* found nothing */
595 1.90 dholland return 0;
596 1.61 yamt }
597 1.61 yamt
598 1.77 ad cpup = curcpu()->ci_data.cpu_nch;
599 1.102 dennis mutex_enter(&cpup->cpu_lock);
600 1.91 dholland if (__predict_false(namelen > NCHNAMLEN)) {
601 1.103 dennis COUNT(cpup, ncs_long);
602 1.77 ad mutex_exit(&cpup->cpu_lock);
603 1.90 dholland /* found nothing */
604 1.90 dholland return 0;
605 1.61 yamt }
606 1.91 dholland ncp = cache_lookup_entry(dvp, name, namelen);
607 1.77 ad if (__predict_false(ncp == NULL)) {
608 1.103 dennis COUNT(cpup, ncs_miss);
609 1.77 ad mutex_exit(&cpup->cpu_lock);
610 1.90 dholland /* found nothing */
611 1.90 dholland return 0;
612 1.61 yamt }
613 1.61 yamt vp = ncp->nc_vp;
614 1.61 yamt if (vp == NULL) {
615 1.61 yamt /*
616 1.61 yamt * Restore the ISWHITEOUT flag saved earlier.
617 1.61 yamt */
618 1.90 dholland if (iswht_ret != NULL) {
619 1.90 dholland KASSERT((ncp->nc_flags & ~ISWHITEOUT) == 0);
620 1.90 dholland /*cnp->cn_flags |= ncp->nc_flags;*/
621 1.90 dholland *iswht_ret = (ncp->nc_flags & ISWHITEOUT) != 0;
622 1.90 dholland }
623 1.103 dennis COUNT(cpup, ncs_neghits);
624 1.102 dennis mutex_exit(&ncp->nc_lock);
625 1.101 christos mutex_exit(&cpup->cpu_lock);
626 1.90 dholland /* found negative entry; vn is already null from above */
627 1.90 dholland return 1;
628 1.61 yamt }
629 1.92 hannken mutex_enter(vp->v_interlock);
630 1.92 hannken mutex_exit(&ncp->nc_lock);
631 1.102 dennis mutex_exit(&cpup->cpu_lock);
632 1.103 dennis
633 1.103 dennis /*
634 1.103 dennis * Unlocked except for the vnode interlock. Call vget().
635 1.103 dennis */
636 1.92 hannken error = vget(vp, LK_NOWAIT);
637 1.92 hannken if (error) {
638 1.92 hannken KASSERT(error == EBUSY);
639 1.92 hannken /*
640 1.92 hannken * This vnode is being cleaned out.
641 1.92 hannken * XXX badhits?
642 1.92 hannken */
643 1.103 dennis COUNT_UNL(cpup, ncs_falsehits);
644 1.92 hannken /* found nothing */
645 1.101 christos return 0;
646 1.61 yamt }
647 1.101 christos
648 1.103 dennis COUNT_UNL(cpup, ncs_goodhits); /* XXX can be "badhits" */
649 1.101 christos /* found it */
650 1.101 christos *vn_ret = vp;
651 1.101 christos return 1;
652 1.61 yamt }
653 1.61 yamt
654 1.1 cgd /*
655 1.19 sommerfe * Scan cache looking for name of directory entry pointing at vp.
656 1.19 sommerfe *
657 1.86 hannken * If the lookup succeeds the vnode is referenced and stored in dvpp.
658 1.19 sommerfe *
659 1.19 sommerfe * If bufp is non-NULL, also place the name in the buffer which starts
660 1.19 sommerfe * at bufp, immediately before *bpp, and move bpp backwards to point
661 1.19 sommerfe * at the start of it. (Yes, this is a little baroque, but it's done
662 1.19 sommerfe * this way to cater to the whims of getcwd).
663 1.19 sommerfe *
664 1.19 sommerfe * Returns 0 on success, -1 on cache miss, positive errno on failure.
665 1.19 sommerfe */
666 1.19 sommerfe int
667 1.34 enami cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp)
668 1.19 sommerfe {
669 1.19 sommerfe struct namecache *ncp;
670 1.19 sommerfe struct vnode *dvp;
671 1.103 dennis struct ncvhashhead *nvcpp;
672 1.95 joerg struct nchcpu *cpup;
673 1.34 enami char *bp;
674 1.86 hannken int error, nlen;
675 1.34 enami
676 1.19 sommerfe if (!doingcache)
677 1.19 sommerfe goto out;
678 1.19 sommerfe
679 1.30 chs nvcpp = &ncvhashtbl[NCVHASH(vp)];
680 1.103 dennis
681 1.103 dennis /*
682 1.103 dennis * We increment counters in the local CPU's per-cpu stats.
683 1.103 dennis * We don't take the per-cpu lock, however, since this function
684 1.103 dennis * is the only place these counters are incremented so no one
685 1.103 dennis * will be racing with us to increment them.
686 1.103 dennis */
687 1.95 joerg cpup = curcpu()->ci_data.cpu_nch;
688 1.73 ad mutex_enter(namecache_lock);
689 1.27 chs LIST_FOREACH(ncp, nvcpp, nc_vhash) {
690 1.73 ad mutex_enter(&ncp->nc_lock);
691 1.34 enami if (ncp->nc_vp == vp &&
692 1.34 enami (dvp = ncp->nc_dvp) != NULL &&
693 1.47 yamt dvp != vp) { /* avoid pesky . entries.. */
694 1.34 enami
695 1.19 sommerfe #ifdef DIAGNOSTIC
696 1.34 enami if (ncp->nc_nlen == 1 &&
697 1.34 enami ncp->nc_name[0] == '.')
698 1.19 sommerfe panic("cache_revlookup: found entry for .");
699 1.19 sommerfe
700 1.34 enami if (ncp->nc_nlen == 2 &&
701 1.34 enami ncp->nc_name[0] == '.' &&
702 1.34 enami ncp->nc_name[1] == '.')
703 1.19 sommerfe panic("cache_revlookup: found entry for ..");
704 1.19 sommerfe #endif
705 1.103 dennis COUNT(cpup, ncs_revhits);
706 1.86 hannken nlen = ncp->nc_nlen;
707 1.19 sommerfe
708 1.19 sommerfe if (bufp) {
709 1.19 sommerfe bp = *bpp;
710 1.86 hannken bp -= nlen;
711 1.19 sommerfe if (bp <= bufp) {
712 1.34 enami *dvpp = NULL;
713 1.73 ad mutex_exit(&ncp->nc_lock);
714 1.73 ad mutex_exit(namecache_lock);
715 1.34 enami return (ERANGE);
716 1.19 sommerfe }
717 1.86 hannken memcpy(bp, ncp->nc_name, nlen);
718 1.19 sommerfe *bpp = bp;
719 1.19 sommerfe }
720 1.34 enami
721 1.92 hannken mutex_enter(dvp->v_interlock);
722 1.92 hannken mutex_exit(&ncp->nc_lock);
723 1.92 hannken mutex_exit(namecache_lock);
724 1.92 hannken error = vget(dvp, LK_NOWAIT);
725 1.92 hannken if (error) {
726 1.92 hannken KASSERT(error == EBUSY);
727 1.92 hannken if (bufp)
728 1.92 hannken (*bpp) += nlen;
729 1.92 hannken *dvpp = NULL;
730 1.92 hannken return -1;
731 1.86 hannken }
732 1.19 sommerfe *dvpp = dvp;
733 1.34 enami return (0);
734 1.19 sommerfe }
735 1.73 ad mutex_exit(&ncp->nc_lock);
736 1.19 sommerfe }
737 1.103 dennis COUNT(cpup, ncs_revmiss);
738 1.73 ad mutex_exit(namecache_lock);
739 1.19 sommerfe out:
740 1.34 enami *dvpp = NULL;
741 1.34 enami return (-1);
742 1.19 sommerfe }
743 1.19 sommerfe
744 1.19 sommerfe /*
745 1.1 cgd * Add an entry to the cache
746 1.1 cgd */
747 1.13 christos void
748 1.91 dholland cache_enter(struct vnode *dvp, struct vnode *vp,
749 1.91 dholland const char *name, size_t namelen, uint32_t cnflags)
750 1.1 cgd {
751 1.23 augustss struct namecache *ncp;
752 1.59 yamt struct namecache *oncp;
753 1.23 augustss struct nchashhead *ncpp;
754 1.23 augustss struct ncvhashhead *nvcpp;
755 1.90 dholland nchash_t hash;
756 1.1 cgd
757 1.89 rmind /* First, check whether we can/should add a cache entry. */
758 1.91 dholland if ((cnflags & MAKEENTRY) == 0 ||
759 1.91 dholland __predict_false(namelen > NCHNAMLEN || !doingcache)) {
760 1.1 cgd return;
761 1.89 rmind }
762 1.58 yamt
763 1.73 ad if (numcache > desiredvnodes) {
764 1.73 ad mutex_enter(namecache_lock);
765 1.73 ad cache_ev_forced.ev_count++;
766 1.73 ad cache_reclaim();
767 1.73 ad mutex_exit(namecache_lock);
768 1.39 pk }
769 1.57 pk
770 1.73 ad ncp = pool_cache_get(namecache_cache, PR_WAITOK);
771 1.73 ad mutex_enter(namecache_lock);
772 1.73 ad numcache++;
773 1.73 ad
774 1.59 yamt /*
775 1.59 yamt * Concurrent lookups in the same directory may race for a
776 1.59 yamt * cache entry. if there's a duplicated entry, free it.
777 1.59 yamt */
778 1.91 dholland oncp = cache_lookup_entry(dvp, name, namelen);
779 1.59 yamt if (oncp) {
780 1.73 ad cache_invalidate(oncp);
781 1.73 ad mutex_exit(&oncp->nc_lock);
782 1.59 yamt }
783 1.59 yamt
784 1.34 enami /* Grab the vnode we just found. */
785 1.73 ad mutex_enter(&ncp->nc_lock);
786 1.5 mycroft ncp->nc_vp = vp;
787 1.73 ad ncp->nc_flags = 0;
788 1.73 ad ncp->nc_hittime = 0;
789 1.73 ad ncp->nc_gcqueue = NULL;
790 1.47 yamt if (vp == NULL) {
791 1.11 mycroft /*
792 1.11 mycroft * For negative hits, save the ISWHITEOUT flag so we can
793 1.11 mycroft * restore it later when the cache entry is used again.
794 1.11 mycroft */
795 1.91 dholland ncp->nc_flags = cnflags & ISWHITEOUT;
796 1.11 mycroft }
797 1.89 rmind
798 1.34 enami /* Fill in cache info. */
799 1.5 mycroft ncp->nc_dvp = dvp;
800 1.46 yamt LIST_INSERT_HEAD(&dvp->v_dnclist, ncp, nc_dvlist);
801 1.46 yamt if (vp)
802 1.46 yamt LIST_INSERT_HEAD(&vp->v_nclist, ncp, nc_vlist);
803 1.73 ad else {
804 1.73 ad ncp->nc_vlist.le_prev = NULL;
805 1.73 ad ncp->nc_vlist.le_next = NULL;
806 1.73 ad }
807 1.91 dholland KASSERT(namelen <= NCHNAMLEN);
808 1.91 dholland ncp->nc_nlen = namelen;
809 1.91 dholland memcpy(ncp->nc_name, name, (unsigned)ncp->nc_nlen);
810 1.73 ad TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
811 1.91 dholland hash = cache_hash(name, namelen);
812 1.90 dholland ncpp = &nchashtbl[NCHASH2(hash, dvp)];
813 1.73 ad
814 1.73 ad /*
815 1.73 ad * Flush updates before making visible in table. No need for a
816 1.73 ad * memory barrier on the other side: to see modifications the
817 1.73 ad * list must be followed, meaning a dependent pointer load.
818 1.74 ad * The below is LIST_INSERT_HEAD() inlined, with the memory
819 1.74 ad * barrier included in the correct place.
820 1.73 ad */
821 1.74 ad if ((ncp->nc_hash.le_next = ncpp->lh_first) != NULL)
822 1.74 ad ncpp->lh_first->nc_hash.le_prev = &ncp->nc_hash.le_next;
823 1.74 ad ncp->nc_hash.le_prev = &ncpp->lh_first;
824 1.73 ad membar_producer();
825 1.74 ad ncpp->lh_first = ncp;
826 1.19 sommerfe
827 1.34 enami ncp->nc_vhash.le_prev = NULL;
828 1.34 enami ncp->nc_vhash.le_next = NULL;
829 1.34 enami
830 1.19 sommerfe /*
831 1.19 sommerfe * Create reverse-cache entries (used in getcwd) for directories.
832 1.66 christos * (and in linux procfs exe node)
833 1.19 sommerfe */
834 1.33 enami if (vp != NULL &&
835 1.33 enami vp != dvp &&
836 1.29 fvdl #ifndef NAMECACHE_ENTER_REVERSE
837 1.33 enami vp->v_type == VDIR &&
838 1.29 fvdl #endif
839 1.33 enami (ncp->nc_nlen > 2 ||
840 1.33 enami (ncp->nc_nlen > 1 && ncp->nc_name[1] != '.') ||
841 1.33 enami (/* ncp->nc_nlen > 0 && */ ncp->nc_name[0] != '.'))) {
842 1.30 chs nvcpp = &ncvhashtbl[NCVHASH(vp)];
843 1.19 sommerfe LIST_INSERT_HEAD(nvcpp, ncp, nc_vhash);
844 1.19 sommerfe }
845 1.73 ad mutex_exit(&ncp->nc_lock);
846 1.73 ad mutex_exit(namecache_lock);
847 1.1 cgd }
848 1.1 cgd
849 1.1 cgd /*
850 1.1 cgd * Name cache initialization, from vfs_init() when we are booting
851 1.1 cgd */
852 1.13 christos void
853 1.34 enami nchinit(void)
854 1.1 cgd {
855 1.73 ad int error;
856 1.1 cgd
857 1.89 rmind TAILQ_INIT(&nclruhead);
858 1.73 ad namecache_cache = pool_cache_init(sizeof(struct namecache),
859 1.73 ad coherency_unit, 0, 0, "ncache", NULL, IPL_NONE, cache_ctor,
860 1.73 ad cache_dtor, NULL);
861 1.71 ad KASSERT(namecache_cache != NULL);
862 1.71 ad
863 1.73 ad namecache_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
864 1.73 ad
865 1.76 ad nchashtbl = hashinit(desiredvnodes, HASH_LIST, true, &nchash);
866 1.26 ad ncvhashtbl =
867 1.29 fvdl #ifdef NAMECACHE_ENTER_REVERSE
868 1.76 ad hashinit(desiredvnodes, HASH_LIST, true, &ncvhash);
869 1.29 fvdl #else
870 1.76 ad hashinit(desiredvnodes/8, HASH_LIST, true, &ncvhash);
871 1.29 fvdl #endif
872 1.73 ad
873 1.73 ad error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, cache_thread,
874 1.73 ad NULL, NULL, "cachegc");
875 1.73 ad if (error != 0)
876 1.73 ad panic("nchinit %d", error);
877 1.73 ad
878 1.73 ad evcnt_attach_dynamic(&cache_ev_scan, EVCNT_TYPE_MISC, NULL,
879 1.73 ad "namecache", "entries scanned");
880 1.73 ad evcnt_attach_dynamic(&cache_ev_gc, EVCNT_TYPE_MISC, NULL,
881 1.73 ad "namecache", "entries collected");
882 1.73 ad evcnt_attach_dynamic(&cache_ev_over, EVCNT_TYPE_MISC, NULL,
883 1.73 ad "namecache", "over scan target");
884 1.73 ad evcnt_attach_dynamic(&cache_ev_under, EVCNT_TYPE_MISC, NULL,
885 1.73 ad "namecache", "under scan target");
886 1.73 ad evcnt_attach_dynamic(&cache_ev_forced, EVCNT_TYPE_MISC, NULL,
887 1.73 ad "namecache", "forced reclaims");
888 1.73 ad }
889 1.73 ad
890 1.73 ad static int
891 1.73 ad cache_ctor(void *arg, void *obj, int flag)
892 1.73 ad {
893 1.73 ad struct namecache *ncp;
894 1.73 ad
895 1.73 ad ncp = obj;
896 1.73 ad mutex_init(&ncp->nc_lock, MUTEX_DEFAULT, IPL_NONE);
897 1.73 ad
898 1.73 ad return 0;
899 1.73 ad }
900 1.73 ad
901 1.73 ad static void
902 1.73 ad cache_dtor(void *arg, void *obj)
903 1.73 ad {
904 1.73 ad struct namecache *ncp;
905 1.73 ad
906 1.73 ad ncp = obj;
907 1.73 ad mutex_destroy(&ncp->nc_lock);
908 1.73 ad }
909 1.73 ad
910 1.73 ad /*
911 1.73 ad * Called once for each CPU in the system as attached.
912 1.73 ad */
913 1.73 ad void
914 1.73 ad cache_cpu_init(struct cpu_info *ci)
915 1.73 ad {
916 1.77 ad struct nchcpu *cpup;
917 1.77 ad size_t sz;
918 1.73 ad
919 1.77 ad sz = roundup2(sizeof(*cpup), coherency_unit) + coherency_unit;
920 1.77 ad cpup = kmem_zalloc(sz, KM_SLEEP);
921 1.77 ad cpup = (void *)roundup2((uintptr_t)cpup, coherency_unit);
922 1.77 ad mutex_init(&cpup->cpu_lock, MUTEX_DEFAULT, IPL_NONE);
923 1.77 ad ci->ci_data.cpu_nch = cpup;
924 1.30 chs }
925 1.30 chs
926 1.30 chs /*
927 1.30 chs * Name cache reinitialization, for when the maximum number of vnodes increases.
928 1.30 chs */
929 1.30 chs void
930 1.34 enami nchreinit(void)
931 1.30 chs {
932 1.30 chs struct namecache *ncp;
933 1.30 chs struct nchashhead *oldhash1, *hash1;
934 1.30 chs struct ncvhashhead *oldhash2, *hash2;
935 1.36 thorpej u_long i, oldmask1, oldmask2, mask1, mask2;
936 1.30 chs
937 1.76 ad hash1 = hashinit(desiredvnodes, HASH_LIST, true, &mask1);
938 1.30 chs hash2 =
939 1.30 chs #ifdef NAMECACHE_ENTER_REVERSE
940 1.76 ad hashinit(desiredvnodes, HASH_LIST, true, &mask2);
941 1.30 chs #else
942 1.76 ad hashinit(desiredvnodes/8, HASH_LIST, true, &mask2);
943 1.30 chs #endif
944 1.73 ad mutex_enter(namecache_lock);
945 1.73 ad cache_lock_cpus();
946 1.30 chs oldhash1 = nchashtbl;
947 1.30 chs oldmask1 = nchash;
948 1.30 chs nchashtbl = hash1;
949 1.30 chs nchash = mask1;
950 1.30 chs oldhash2 = ncvhashtbl;
951 1.30 chs oldmask2 = ncvhash;
952 1.30 chs ncvhashtbl = hash2;
953 1.30 chs ncvhash = mask2;
954 1.30 chs for (i = 0; i <= oldmask1; i++) {
955 1.30 chs while ((ncp = LIST_FIRST(&oldhash1[i])) != NULL) {
956 1.30 chs LIST_REMOVE(ncp, nc_hash);
957 1.30 chs ncp->nc_hash.le_prev = NULL;
958 1.30 chs }
959 1.30 chs }
960 1.30 chs for (i = 0; i <= oldmask2; i++) {
961 1.30 chs while ((ncp = LIST_FIRST(&oldhash2[i])) != NULL) {
962 1.30 chs LIST_REMOVE(ncp, nc_vhash);
963 1.30 chs ncp->nc_vhash.le_prev = NULL;
964 1.30 chs }
965 1.30 chs }
966 1.73 ad cache_unlock_cpus();
967 1.73 ad mutex_exit(namecache_lock);
968 1.76 ad hashdone(oldhash1, HASH_LIST, oldmask1);
969 1.76 ad hashdone(oldhash2, HASH_LIST, oldmask2);
970 1.1 cgd }
971 1.1 cgd
972 1.1 cgd /*
973 1.1 cgd * Cache flush, a particular vnode; called when a vnode is renamed to
974 1.1 cgd * hide entries that would now be invalid
975 1.1 cgd */
976 1.13 christos void
977 1.91 dholland cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
978 1.1 cgd {
979 1.46 yamt struct namecache *ncp, *ncnext;
980 1.1 cgd
981 1.73 ad mutex_enter(namecache_lock);
982 1.55 yamt if (flags & PURGE_PARENTS) {
983 1.55 yamt for (ncp = LIST_FIRST(&vp->v_nclist); ncp != NULL;
984 1.55 yamt ncp = ncnext) {
985 1.55 yamt ncnext = LIST_NEXT(ncp, nc_vlist);
986 1.73 ad mutex_enter(&ncp->nc_lock);
987 1.73 ad cache_invalidate(ncp);
988 1.73 ad mutex_exit(&ncp->nc_lock);
989 1.73 ad cache_disassociate(ncp);
990 1.55 yamt }
991 1.55 yamt }
992 1.55 yamt if (flags & PURGE_CHILDREN) {
993 1.55 yamt for (ncp = LIST_FIRST(&vp->v_dnclist); ncp != NULL;
994 1.55 yamt ncp = ncnext) {
995 1.55 yamt ncnext = LIST_NEXT(ncp, nc_dvlist);
996 1.73 ad mutex_enter(&ncp->nc_lock);
997 1.73 ad cache_invalidate(ncp);
998 1.73 ad mutex_exit(&ncp->nc_lock);
999 1.73 ad cache_disassociate(ncp);
1000 1.55 yamt }
1001 1.46 yamt }
1002 1.91 dholland if (name != NULL) {
1003 1.91 dholland ncp = cache_lookup_entry(vp, name, namelen);
1004 1.55 yamt if (ncp) {
1005 1.73 ad cache_invalidate(ncp);
1006 1.83 yamt mutex_exit(&ncp->nc_lock);
1007 1.73 ad cache_disassociate(ncp);
1008 1.55 yamt }
1009 1.46 yamt }
1010 1.73 ad mutex_exit(namecache_lock);
1011 1.1 cgd }
1012 1.1 cgd
1013 1.1 cgd /*
1014 1.1 cgd * Cache flush, a whole filesystem; called when filesys is umounted to
1015 1.27 chs * remove entries that would now be invalid.
1016 1.1 cgd */
1017 1.13 christos void
1018 1.34 enami cache_purgevfs(struct mount *mp)
1019 1.1 cgd {
1020 1.23 augustss struct namecache *ncp, *nxtcp;
1021 1.1 cgd
1022 1.73 ad mutex_enter(namecache_lock);
1023 1.73 ad for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
1024 1.73 ad nxtcp = TAILQ_NEXT(ncp, nc_lru);
1025 1.73 ad mutex_enter(&ncp->nc_lock);
1026 1.73 ad if (ncp->nc_dvp != NULL && ncp->nc_dvp->v_mount == mp) {
1027 1.73 ad /* Free the resources we had. */
1028 1.73 ad cache_invalidate(ncp);
1029 1.73 ad cache_disassociate(ncp);
1030 1.73 ad }
1031 1.73 ad mutex_exit(&ncp->nc_lock);
1032 1.73 ad }
1033 1.73 ad cache_reclaim();
1034 1.73 ad mutex_exit(namecache_lock);
1035 1.73 ad }
1036 1.73 ad
1037 1.73 ad /*
1038 1.73 ad * Scan global list invalidating entries until we meet a preset target.
1039 1.73 ad * Prefer to invalidate entries that have not scored a hit within
1040 1.73 ad * cache_hottime seconds. We sort the LRU list only for this routine's
1041 1.73 ad * benefit.
1042 1.73 ad */
1043 1.73 ad static void
1044 1.73 ad cache_prune(int incache, int target)
1045 1.73 ad {
1046 1.73 ad struct namecache *ncp, *nxtcp, *sentinel;
1047 1.73 ad int items, recent, tryharder;
1048 1.73 ad
1049 1.73 ad KASSERT(mutex_owned(namecache_lock));
1050 1.73 ad
1051 1.73 ad items = 0;
1052 1.73 ad tryharder = 0;
1053 1.73 ad recent = hardclock_ticks - hz * cache_hottime;
1054 1.73 ad sentinel = NULL;
1055 1.27 chs for (ncp = TAILQ_FIRST(&nclruhead); ncp != NULL; ncp = nxtcp) {
1056 1.73 ad if (incache <= target)
1057 1.73 ad break;
1058 1.73 ad items++;
1059 1.27 chs nxtcp = TAILQ_NEXT(ncp, nc_lru);
1060 1.73 ad if (ncp == sentinel) {
1061 1.73 ad /*
1062 1.73 ad * If we looped back on ourself, then ignore
1063 1.73 ad * recent entries and purge whatever we find.
1064 1.73 ad */
1065 1.73 ad tryharder = 1;
1066 1.5 mycroft }
1067 1.93 hannken if (ncp->nc_dvp == NULL)
1068 1.93 hannken continue;
1069 1.81 yamt if (!tryharder && (ncp->nc_hittime - recent) > 0) {
1070 1.73 ad if (sentinel == NULL)
1071 1.73 ad sentinel = ncp;
1072 1.73 ad TAILQ_REMOVE(&nclruhead, ncp, nc_lru);
1073 1.73 ad TAILQ_INSERT_TAIL(&nclruhead, ncp, nc_lru);
1074 1.73 ad continue;
1075 1.73 ad }
1076 1.73 ad mutex_enter(&ncp->nc_lock);
1077 1.73 ad if (ncp->nc_dvp != NULL) {
1078 1.73 ad cache_invalidate(ncp);
1079 1.73 ad cache_disassociate(ncp);
1080 1.73 ad incache--;
1081 1.73 ad }
1082 1.73 ad mutex_exit(&ncp->nc_lock);
1083 1.73 ad }
1084 1.73 ad cache_ev_scan.ev_count += items;
1085 1.73 ad }
1086 1.73 ad
1087 1.73 ad /*
1088 1.73 ad * Collect dead cache entries from all CPUs and garbage collect.
1089 1.73 ad */
1090 1.73 ad static void
1091 1.73 ad cache_reclaim(void)
1092 1.73 ad {
1093 1.73 ad struct namecache *ncp, *next;
1094 1.73 ad int items;
1095 1.73 ad
1096 1.73 ad KASSERT(mutex_owned(namecache_lock));
1097 1.73 ad
1098 1.73 ad /*
1099 1.73 ad * If the number of extant entries not awaiting garbage collection
1100 1.73 ad * exceeds the high water mark, then reclaim stale entries until we
1101 1.73 ad * reach our low water mark.
1102 1.73 ad */
1103 1.73 ad items = numcache - cache_gcpend;
1104 1.73 ad if (items > (uint64_t)desiredvnodes * cache_hiwat / 100) {
1105 1.73 ad cache_prune(items, (int)((uint64_t)desiredvnodes *
1106 1.73 ad cache_lowat / 100));
1107 1.73 ad cache_ev_over.ev_count++;
1108 1.73 ad } else
1109 1.73 ad cache_ev_under.ev_count++;
1110 1.73 ad
1111 1.73 ad /*
1112 1.73 ad * Stop forward lookup activity on all CPUs and garbage collect dead
1113 1.73 ad * entries.
1114 1.73 ad */
1115 1.73 ad cache_lock_cpus();
1116 1.73 ad ncp = cache_gcqueue;
1117 1.73 ad cache_gcqueue = NULL;
1118 1.73 ad items = cache_gcpend;
1119 1.73 ad cache_gcpend = 0;
1120 1.73 ad while (ncp != NULL) {
1121 1.73 ad next = ncp->nc_gcqueue;
1122 1.73 ad cache_disassociate(ncp);
1123 1.73 ad KASSERT(ncp->nc_dvp == NULL);
1124 1.73 ad if (ncp->nc_hash.le_prev != NULL) {
1125 1.73 ad LIST_REMOVE(ncp, nc_hash);
1126 1.73 ad ncp->nc_hash.le_prev = NULL;
1127 1.73 ad }
1128 1.73 ad pool_cache_put(namecache_cache, ncp);
1129 1.73 ad ncp = next;
1130 1.73 ad }
1131 1.73 ad cache_unlock_cpus();
1132 1.73 ad numcache -= items;
1133 1.73 ad cache_ev_gc.ev_count += items;
1134 1.73 ad }
1135 1.73 ad
1136 1.73 ad /*
1137 1.73 ad * Cache maintainence thread, awakening once per second to:
1138 1.73 ad *
1139 1.73 ad * => keep number of entries below the high water mark
1140 1.73 ad * => sort pseudo-LRU list
1141 1.73 ad * => garbage collect dead entries
1142 1.73 ad */
1143 1.73 ad static void
1144 1.73 ad cache_thread(void *arg)
1145 1.73 ad {
1146 1.73 ad
1147 1.73 ad mutex_enter(namecache_lock);
1148 1.73 ad for (;;) {
1149 1.73 ad cache_reclaim();
1150 1.73 ad kpause("cachegc", false, hz, namecache_lock);
1151 1.1 cgd }
1152 1.1 cgd }
1153 1.19 sommerfe
1154 1.28 chs #ifdef DDB
1155 1.28 chs void
1156 1.28 chs namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
1157 1.28 chs {
1158 1.28 chs struct vnode *dvp = NULL;
1159 1.28 chs struct namecache *ncp;
1160 1.28 chs
1161 1.28 chs TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1162 1.73 ad if (ncp->nc_vp == vp && ncp->nc_dvp != NULL) {
1163 1.28 chs (*pr)("name %.*s\n", ncp->nc_nlen, ncp->nc_name);
1164 1.28 chs dvp = ncp->nc_dvp;
1165 1.28 chs }
1166 1.28 chs }
1167 1.28 chs if (dvp == NULL) {
1168 1.28 chs (*pr)("name not found\n");
1169 1.28 chs return;
1170 1.28 chs }
1171 1.28 chs vp = dvp;
1172 1.28 chs TAILQ_FOREACH(ncp, &nclruhead, nc_lru) {
1173 1.47 yamt if (ncp->nc_vp == vp) {
1174 1.28 chs (*pr)("parent %.*s\n", ncp->nc_nlen, ncp->nc_name);
1175 1.28 chs }
1176 1.28 chs }
1177 1.28 chs }
1178 1.28 chs #endif
1179 1.95 joerg
1180 1.95 joerg void
1181 1.95 joerg namecache_count_pass2(void)
1182 1.95 joerg {
1183 1.95 joerg struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
1184 1.95 joerg
1185 1.103 dennis COUNT_UNL(cpup, ncs_pass2);
1186 1.95 joerg }
1187 1.95 joerg
1188 1.95 joerg void
1189 1.95 joerg namecache_count_2passes(void)
1190 1.95 joerg {
1191 1.95 joerg struct nchcpu *cpup = curcpu()->ci_data.cpu_nch;
1192 1.95 joerg
1193 1.103 dennis COUNT_UNL(cpup, ncs_2passes);
1194 1.95 joerg }
1195 1.97 joerg
1196 1.103 dennis /*
1197 1.103 dennis * Fetch the current values of the stats. We return the most
1198 1.103 dennis * recent values harvested into nchstats by cache_reclaim(), which
1199 1.103 dennis * will be less than a second old.
1200 1.103 dennis */
1201 1.97 joerg static int
1202 1.97 joerg cache_stat_sysctl(SYSCTLFN_ARGS)
1203 1.97 joerg {
1204 1.103 dennis struct nchstats stats;
1205 1.103 dennis struct nchcpu *my_cpup;
1206 1.103 dennis #ifdef CACHE_STATS_CURRENT
1207 1.103 dennis CPU_INFO_ITERATOR cii;
1208 1.103 dennis struct cpu_info *ci;
1209 1.103 dennis #endif /* CACHE_STATS_CURRENT */
1210 1.97 joerg
1211 1.97 joerg if (oldp == NULL) {
1212 1.97 joerg *oldlenp = sizeof(stats);
1213 1.97 joerg return 0;
1214 1.97 joerg }
1215 1.97 joerg
1216 1.97 joerg if (*oldlenp < sizeof(stats)) {
1217 1.97 joerg *oldlenp = 0;
1218 1.97 joerg return 0;
1219 1.97 joerg }
1220 1.97 joerg
1221 1.103 dennis /*
1222 1.103 dennis * Take this CPU's per-cpu lock to hold off cache_reclaim()
1223 1.103 dennis * from doing a stats update while doing minimal damage to
1224 1.103 dennis * concurrent operations.
1225 1.103 dennis */
1226 1.103 dennis sysctl_unlock();
1227 1.103 dennis my_cpup = curcpu()->ci_data.cpu_nch;
1228 1.103 dennis mutex_enter(&my_cpup->cpu_lock);
1229 1.103 dennis stats = nchstats;
1230 1.103 dennis #ifdef CACHE_STATS_CURRENT
1231 1.103 dennis for (CPU_INFO_FOREACH(cii, ci)) {
1232 1.103 dennis struct nchcpu *cpup = ci->ci_data.cpu_nch;
1233 1.97 joerg
1234 1.103 dennis ADD(stats, cpup, ncs_goodhits);
1235 1.103 dennis ADD(stats, cpup, ncs_neghits);
1236 1.103 dennis ADD(stats, cpup, ncs_badhits);
1237 1.103 dennis ADD(stats, cpup, ncs_falsehits);
1238 1.103 dennis ADD(stats, cpup, ncs_miss);
1239 1.103 dennis ADD(stats, cpup, ncs_long);
1240 1.103 dennis ADD(stats, cpup, ncs_pass2);
1241 1.103 dennis ADD(stats, cpup, ncs_2passes);
1242 1.103 dennis ADD(stats, cpup, ncs_revhits);
1243 1.103 dennis ADD(stats, cpup, ncs_revmiss);
1244 1.103 dennis }
1245 1.103 dennis #endif /* CACHE_STATS_CURRENT */
1246 1.103 dennis mutex_exit(&my_cpup->cpu_lock);
1247 1.97 joerg sysctl_relock();
1248 1.97 joerg
1249 1.97 joerg *oldlenp = sizeof(stats);
1250 1.97 joerg return sysctl_copyout(l, &stats, oldp, sizeof(stats));
1251 1.97 joerg }
1252 1.97 joerg
1253 1.97 joerg SYSCTL_SETUP(sysctl_cache_stat_setup, "vfs.namecache_stats subtree setup")
1254 1.97 joerg {
1255 1.97 joerg sysctl_createv(clog, 0, NULL, NULL,
1256 1.97 joerg CTLFLAG_PERMANENT,
1257 1.97 joerg CTLTYPE_STRUCT, "namecache_stats",
1258 1.97 joerg SYSCTL_DESCR("namecache statistics"),
1259 1.97 joerg cache_stat_sysctl, 0, NULL, 0,
1260 1.97 joerg CTL_VFS, CTL_CREATE, CTL_EOL);
1261 1.97 joerg }
1262