vfs_cache.c revision 1.155 1 1.155 ad /* $NetBSD: vfs_cache.c,v 1.155 2023/09/09 18:27:59 ad Exp $ */
2 1.73 ad
3 1.73 ad /*-
4 1.155 ad * Copyright (c) 2008, 2019, 2020, 2023 The NetBSD Foundation, Inc.
5 1.73 ad * All rights reserved.
6 1.73 ad *
7 1.128 ad * This code is derived from software contributed to The NetBSD Foundation
8 1.128 ad * by Andrew Doran.
9 1.128 ad *
10 1.73 ad * Redistribution and use in source and binary forms, with or without
11 1.73 ad * modification, are permitted provided that the following conditions
12 1.73 ad * are met:
13 1.73 ad * 1. Redistributions of source code must retain the above copyright
14 1.73 ad * notice, this list of conditions and the following disclaimer.
15 1.73 ad * 2. Redistributions in binary form must reproduce the above copyright
16 1.73 ad * notice, this list of conditions and the following disclaimer in the
17 1.73 ad * documentation and/or other materials provided with the distribution.
18 1.73 ad *
19 1.73 ad * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 1.73 ad * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 1.73 ad * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 1.73 ad * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 1.73 ad * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 1.73 ad * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 1.73 ad * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 1.73 ad * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 1.73 ad * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 1.73 ad * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 1.73 ad * POSSIBILITY OF SUCH DAMAGE.
30 1.73 ad */
31 1.6 cgd
32 1.1 cgd /*
33 1.5 mycroft * Copyright (c) 1989, 1993
34 1.5 mycroft * The Regents of the University of California. All rights reserved.
35 1.1 cgd *
36 1.1 cgd * Redistribution and use in source and binary forms, with or without
37 1.1 cgd * modification, are permitted provided that the following conditions
38 1.1 cgd * are met:
39 1.1 cgd * 1. Redistributions of source code must retain the above copyright
40 1.1 cgd * notice, this list of conditions and the following disclaimer.
41 1.1 cgd * 2. Redistributions in binary form must reproduce the above copyright
42 1.1 cgd * notice, this list of conditions and the following disclaimer in the
43 1.1 cgd * documentation and/or other materials provided with the distribution.
44 1.51 agc * 3. Neither the name of the University nor the names of its contributors
45 1.1 cgd * may be used to endorse or promote products derived from this software
46 1.1 cgd * without specific prior written permission.
47 1.1 cgd *
48 1.1 cgd * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 1.1 cgd * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 1.1 cgd * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 1.1 cgd * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 1.1 cgd * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 1.1 cgd * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 1.1 cgd * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 1.1 cgd * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 1.1 cgd * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 1.1 cgd * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 1.1 cgd * SUCH DAMAGE.
59 1.1 cgd *
60 1.10 mycroft * @(#)vfs_cache.c 8.3 (Berkeley) 8/22/94
61 1.1 cgd */
62 1.32 lukem
63 1.128 ad /*
64 1.128 ad * Name caching:
65 1.128 ad *
66 1.128 ad * Names found by directory scans are retained in a cache for future
67 1.128 ad * reference. It is managed LRU, so frequently used names will hang
68 1.128 ad * around. The cache is indexed by hash value obtained from the name.
69 1.128 ad *
70 1.128 ad * The name cache is the brainchild of Robert Elz and was introduced in
71 1.128 ad * 4.3BSD. See "Using gprof to Tune the 4.2BSD Kernel", Marshall Kirk
72 1.128 ad * McKusick, May 21 1984.
73 1.128 ad *
74 1.128 ad * Data structures:
75 1.128 ad *
76 1.128 ad * Most Unix namecaches very sensibly use a global hash table to index
77 1.128 ad * names. The global hash table works well, but can cause concurrency
78 1.128 ad * headaches for the kernel hacker. In the NetBSD 10.0 implementation
79 1.128 ad * we are not sensible, and use a per-directory data structure to index
80 1.128 ad * names, but the cache otherwise functions the same.
81 1.128 ad *
82 1.155 ad * The index is a red-black tree. It should not be difficult to
83 1.155 ad * experiment with other types of index, however note that a tree
84 1.155 ad * can trivially be made to support lockless lookup.
85 1.128 ad *
86 1.128 ad * Each cached name is stored in a struct namecache, along with a
87 1.128 ad * pointer to the associated vnode (nc_vp). Names longer than a
88 1.128 ad * maximum length of NCHNAMLEN are allocated with kmem_alloc(); they
89 1.128 ad * occur infrequently, and names shorter than this are stored directly
90 1.128 ad * in struct namecache. If it is a "negative" entry, (i.e. for a name
91 1.128 ad * that is known NOT to exist) the vnode pointer will be NULL.
92 1.128 ad *
93 1.155 ad * In practice this implementation is not any slower than the hash
94 1.155 ad * table that preceeded it and in some cases it significantly
95 1.155 ad * outperforms the hash table. Some reasons why this might be:
96 1.155 ad *
97 1.155 ad * - natural partitioning provided by the file system structure, which
98 1.155 ad * the prior implementation discarded (global hash table).
99 1.155 ad * - worst case tree traversal of O(log n), the hash table could have
100 1.155 ad * many collisions.
101 1.155 ad * - minimized cache misses & total L2/L3 CPU cache footprint; struct
102 1.155 ad * namecache and vnode_impl_t are laid out to keep cache footprint
103 1.155 ad * minimal in the lookup path; no hash table buckets to cache.
104 1.155 ad * - minimized number of conditionals & string comparisons.
105 1.155 ad *
106 1.128 ad * For a directory with 3 cached names for 3 distinct vnodes, the
107 1.128 ad * various vnodes and namecache structs would be connected like this
108 1.128 ad * (the root is at the bottom of the diagram):
109 1.128 ad *
110 1.128 ad * ...
111 1.128 ad * ^
112 1.128 ad * |- vi_nc_tree
113 1.147 riastrad * |
114 1.128 ad * +----o----+ +---------+ +---------+
115 1.128 ad * | VDIR | | VCHR | | VREG |
116 1.128 ad * | vnode o-----+ | vnode o-----+ | vnode o------+
117 1.128 ad * +---------+ | +---------+ | +---------+ |
118 1.128 ad * ^ | ^ | ^ |
119 1.128 ad * |- nc_vp |- vi_nc_list |- nc_vp |- vi_nc_list |- nc_vp |
120 1.128 ad * | | | | | |
121 1.128 ad * +----o----+ | +----o----+ | +----o----+ |
122 1.128 ad * +---onamecache|<----+ +---onamecache|<----+ +---onamecache|<-----+
123 1.128 ad * | +---------+ | +---------+ | +---------+
124 1.128 ad * | ^ | ^ | ^
125 1.128 ad * | | | | | |
126 1.128 ad * | | +----------------------+ | |
127 1.128 ad * |-nc_dvp | +-------------------------------------------------+
128 1.128 ad * | |/- vi_nc_tree | |
129 1.128 ad * | | |- nc_dvp |- nc_dvp
130 1.128 ad * | +----o----+ | |
131 1.128 ad * +-->| VDIR |<----------+ |
132 1.128 ad * | vnode |<------------------------------------+
133 1.128 ad * +---------+
134 1.128 ad *
135 1.128 ad * START HERE
136 1.128 ad *
137 1.128 ad * Replacement:
138 1.128 ad *
139 1.128 ad * As the cache becomes full, old and unused entries are purged as new
140 1.128 ad * entries are added. The synchronization overhead in maintaining a
141 1.128 ad * strict ordering would be prohibitive, so the VM system's "clock" or
142 1.128 ad * "second chance" page replacement algorithm is aped here. New
143 1.128 ad * entries go to the tail of the active list. After they age out and
144 1.128 ad * reach the head of the list, they are moved to the tail of the
145 1.128 ad * inactive list. Any use of the deactivated cache entry reactivates
146 1.128 ad * it, saving it from impending doom; if not reactivated, the entry
147 1.128 ad * eventually reaches the head of the inactive list and is purged.
148 1.128 ad *
149 1.128 ad * Concurrency:
150 1.128 ad *
151 1.128 ad * From a performance perspective, cache_lookup(nameiop == LOOKUP) is
152 1.128 ad * what really matters; insertion of new entries with cache_enter() is
153 1.128 ad * comparatively infrequent, and overshadowed by the cost of expensive
154 1.128 ad * file system metadata operations (which may involve disk I/O). We
155 1.128 ad * therefore want to make everything simplest in the lookup path.
156 1.128 ad *
157 1.128 ad * struct namecache is mostly stable except for list and tree related
158 1.128 ad * entries, changes to which don't affect the cached name or vnode.
159 1.128 ad * For changes to name+vnode, entries are purged in preference to
160 1.128 ad * modifying them.
161 1.128 ad *
162 1.128 ad * Read access to namecache entries is made via tree, list, or LRU
163 1.128 ad * list. A lock corresponding to the direction of access should be
164 1.128 ad * held. See definition of "struct namecache" in src/sys/namei.src,
165 1.128 ad * and the definition of "struct vnode" for the particulars.
166 1.128 ad *
167 1.128 ad * Per-CPU statistics, and LRU list totals are read unlocked, since
168 1.128 ad * an approximate value is OK. We maintain 32-bit sized per-CPU
169 1.128 ad * counters and 64-bit global counters under the theory that 32-bit
170 1.128 ad * sized counters are less likely to be hosed by nonatomic increment
171 1.128 ad * (on 32-bit platforms).
172 1.128 ad *
173 1.128 ad * The lock order is:
174 1.128 ad *
175 1.128 ad * 1) vi->vi_nc_lock (tree or parent -> child direction,
176 1.128 ad * used during forward lookup)
177 1.128 ad *
178 1.128 ad * 2) vi->vi_nc_listlock (list or child -> parent direction,
179 1.128 ad * used during reverse lookup)
180 1.128 ad *
181 1.128 ad * 3) cache_lru_lock (LRU list direction, used during reclaim)
182 1.128 ad */
183 1.128 ad
184 1.32 lukem #include <sys/cdefs.h>
185 1.155 ad __KERNEL_RCSID(0, "$NetBSD: vfs_cache.c,v 1.155 2023/09/09 18:27:59 ad Exp $");
186 1.1 cgd
187 1.121 christos #define __NAMECACHE_PRIVATE
188 1.107 pooka #ifdef _KERNEL_OPT
189 1.28 chs #include "opt_ddb.h"
190 1.115 riastrad #include "opt_dtrace.h"
191 1.107 pooka #endif
192 1.28 chs
193 1.150 skrll #include <sys/param.h>
194 1.128 ad #include <sys/types.h>
195 1.115 riastrad #include <sys/atomic.h>
196 1.128 ad #include <sys/callout.h>
197 1.115 riastrad #include <sys/cpu.h>
198 1.115 riastrad #include <sys/errno.h>
199 1.115 riastrad #include <sys/evcnt.h>
200 1.128 ad #include <sys/hash.h>
201 1.115 riastrad #include <sys/kernel.h>
202 1.4 mycroft #include <sys/mount.h>
203 1.115 riastrad #include <sys/mutex.h>
204 1.4 mycroft #include <sys/namei.h>
205 1.128 ad #include <sys/param.h>
206 1.18 thorpej #include <sys/pool.h>
207 1.108 christos #include <sys/sdt.h>
208 1.115 riastrad #include <sys/sysctl.h>
209 1.115 riastrad #include <sys/systm.h>
210 1.115 riastrad #include <sys/time.h>
211 1.115 riastrad #include <sys/vnode_impl.h>
212 1.1 cgd
213 1.128 ad #include <miscfs/genfs/genfs.h>
214 1.1 cgd
215 1.155 ad /*
216 1.155 ad * Assert that data structure layout hasn't changed unintentionally.
217 1.155 ad */
218 1.155 ad #ifdef _LP64
219 1.155 ad CTASSERT(sizeof(struct namecache) == 128);
220 1.155 ad #else
221 1.155 ad CTASSERT(sizeof(struct namecache) == 64);
222 1.155 ad #endif
223 1.155 ad CTASSERT(NC_NLEN_MASK >= MAXPATHLEN);
224 1.155 ad
225 1.128 ad static void cache_activate(struct namecache *);
226 1.128 ad static void cache_update_stats(void *);
227 1.128 ad static int cache_compare_nodes(void *, const void *, const void *);
228 1.128 ad static void cache_deactivate(void);
229 1.128 ad static void cache_reclaim(void);
230 1.128 ad static int cache_stat_sysctl(SYSCTLFN_ARGS);
231 1.128 ad
232 1.131 ad /*
233 1.131 ad * Global pool cache.
234 1.131 ad */
235 1.128 ad static pool_cache_t cache_pool __read_mostly;
236 1.128 ad
237 1.131 ad /*
238 1.131 ad * LRU replacement.
239 1.131 ad */
240 1.128 ad enum cache_lru_id {
241 1.128 ad LRU_ACTIVE,
242 1.128 ad LRU_INACTIVE,
243 1.128 ad LRU_COUNT
244 1.128 ad };
245 1.120 riastrad
246 1.128 ad static struct {
247 1.128 ad TAILQ_HEAD(, namecache) list[LRU_COUNT];
248 1.128 ad u_int count[LRU_COUNT];
249 1.128 ad } cache_lru __cacheline_aligned;
250 1.117 riastrad
251 1.128 ad static kmutex_t cache_lru_lock __cacheline_aligned;
252 1.117 riastrad
253 1.131 ad /*
254 1.131 ad * Cache effectiveness statistics. nchstats holds system-wide total.
255 1.131 ad */
256 1.128 ad struct nchstats nchstats;
257 1.103 dennis struct nchstats_percpu _NAMEI_CACHE_STATS(uint32_t);
258 1.77 ad struct nchcpu {
259 1.128 ad struct nchstats_percpu cur;
260 1.128 ad struct nchstats_percpu last;
261 1.77 ad };
262 1.128 ad static callout_t cache_stat_callout;
263 1.128 ad static kmutex_t cache_stat_lock __cacheline_aligned;
264 1.77 ad
265 1.138 ad #define COUNT(f) do { \
266 1.128 ad lwp_t *l = curlwp; \
267 1.128 ad KPREEMPT_DISABLE(l); \
268 1.149 christos struct nchcpu *nchcpu = curcpu()->ci_data.cpu_nch; \
269 1.149 christos nchcpu->cur.f++; \
270 1.128 ad KPREEMPT_ENABLE(l); \
271 1.128 ad } while (/* CONSTCOND */ 0);
272 1.128 ad
273 1.128 ad #define UPDATE(nchcpu, f) do { \
274 1.128 ad uint32_t cur = atomic_load_relaxed(&nchcpu->cur.f); \
275 1.135 ad nchstats.f += (uint32_t)(cur - nchcpu->last.f); \
276 1.128 ad nchcpu->last.f = cur; \
277 1.128 ad } while (/* CONSTCOND */ 0)
278 1.90 dholland
279 1.90 dholland /*
280 1.128 ad * Tunables. cache_maxlen replaces the historical doingcache:
281 1.128 ad * set it zero to disable caching for debugging purposes.
282 1.1 cgd */
283 1.128 ad int cache_lru_maxdeact __read_mostly = 2; /* max # to deactivate */
284 1.128 ad int cache_lru_maxscan __read_mostly = 64; /* max # to scan/reclaim */
285 1.155 ad int cache_maxlen __read_mostly = NC_NLEN_MASK; /* max name length to cache */
286 1.128 ad int cache_stat_interval __read_mostly = 300; /* in seconds */
287 1.128 ad
288 1.131 ad /*
289 1.131 ad * sysctl stuff.
290 1.131 ad */
291 1.128 ad static struct sysctllog *cache_sysctllog;
292 1.128 ad
293 1.131 ad /*
294 1.146 ad * This is a dummy name that cannot usually occur anywhere in the cache nor
295 1.146 ad * file system. It's used when caching the root vnode of mounted file
296 1.146 ad * systems. The name is attached to the directory that the file system is
297 1.146 ad * mounted on.
298 1.146 ad */
299 1.146 ad static const char cache_mp_name[] = "";
300 1.146 ad static const int cache_mp_nlen = sizeof(cache_mp_name) - 1;
301 1.146 ad
302 1.146 ad /*
303 1.131 ad * Red-black tree stuff.
304 1.131 ad */
305 1.128 ad static const rb_tree_ops_t cache_rbtree_ops = {
306 1.128 ad .rbto_compare_nodes = cache_compare_nodes,
307 1.131 ad .rbto_compare_key = cache_compare_nodes,
308 1.128 ad .rbto_node_offset = offsetof(struct namecache, nc_tree),
309 1.128 ad .rbto_context = NULL
310 1.128 ad };
311 1.89 rmind
312 1.131 ad /*
313 1.131 ad * dtrace probes.
314 1.131 ad */
315 1.108 christos SDT_PROBE_DEFINE1(vfs, namecache, invalidate, done, "struct vnode *");
316 1.108 christos SDT_PROBE_DEFINE1(vfs, namecache, purge, parents, "struct vnode *");
317 1.108 christos SDT_PROBE_DEFINE1(vfs, namecache, purge, children, "struct vnode *");
318 1.108 christos SDT_PROBE_DEFINE2(vfs, namecache, purge, name, "char *", "size_t");
319 1.108 christos SDT_PROBE_DEFINE1(vfs, namecache, purge, vfs, "struct mount *");
320 1.108 christos SDT_PROBE_DEFINE3(vfs, namecache, lookup, hit, "struct vnode *",
321 1.108 christos "char *", "size_t");
322 1.108 christos SDT_PROBE_DEFINE3(vfs, namecache, lookup, miss, "struct vnode *",
323 1.108 christos "char *", "size_t");
324 1.108 christos SDT_PROBE_DEFINE3(vfs, namecache, lookup, toolong, "struct vnode *",
325 1.108 christos "char *", "size_t");
326 1.108 christos SDT_PROBE_DEFINE2(vfs, namecache, revlookup, success, "struct vnode *",
327 1.108 christos "struct vnode *");
328 1.108 christos SDT_PROBE_DEFINE2(vfs, namecache, revlookup, fail, "struct vnode *",
329 1.108 christos "int");
330 1.108 christos SDT_PROBE_DEFINE2(vfs, namecache, prune, done, "int", "int");
331 1.108 christos SDT_PROBE_DEFINE3(vfs, namecache, enter, toolong, "struct vnode *",
332 1.108 christos "char *", "size_t");
333 1.108 christos SDT_PROBE_DEFINE3(vfs, namecache, enter, done, "struct vnode *",
334 1.108 christos "char *", "size_t");
335 1.108 christos
336 1.73 ad /*
337 1.128 ad * rbtree: compare two nodes.
338 1.90 dholland */
339 1.128 ad static int
340 1.128 ad cache_compare_nodes(void *context, const void *n1, const void *n2)
341 1.90 dholland {
342 1.128 ad const struct namecache *nc1 = n1;
343 1.128 ad const struct namecache *nc2 = n2;
344 1.90 dholland
345 1.128 ad if (nc1->nc_key < nc2->nc_key) {
346 1.128 ad return -1;
347 1.128 ad }
348 1.128 ad if (nc1->nc_key > nc2->nc_key) {
349 1.128 ad return 1;
350 1.128 ad }
351 1.155 ad KASSERT(NC_NLEN(nc1) == NC_NLEN(nc2));
352 1.155 ad return memcmp(nc1->nc_name, nc2->nc_name, NC_NLEN(nc1));
353 1.73 ad }
354 1.46 yamt
355 1.73 ad /*
356 1.128 ad * Compute a key value for the given name. The name length is encoded in
357 1.128 ad * the key value to try and improve uniqueness, and so that length doesn't
358 1.128 ad * need to be compared separately for string comparisons.
359 1.73 ad */
360 1.155 ad static uintptr_t
361 1.128 ad cache_key(const char *name, size_t nlen)
362 1.73 ad {
363 1.155 ad uintptr_t key;
364 1.73 ad
365 1.155 ad KASSERT((nlen & ~NC_NLEN_MASK) == 0);
366 1.73 ad
367 1.128 ad key = hash32_buf(name, nlen, HASH32_STR_INIT);
368 1.155 ad return (key << NC_NLEN_BITS) | (uintptr_t)nlen;
369 1.46 yamt }
370 1.46 yamt
371 1.73 ad /*
372 1.128 ad * Remove an entry from the cache. vi_nc_lock must be held, and if dir2node
373 1.128 ad * is true, then we're locking in the conventional direction and the list
374 1.128 ad * lock will be acquired when removing the entry from the vnode list.
375 1.73 ad */
376 1.73 ad static void
377 1.128 ad cache_remove(struct namecache *ncp, const bool dir2node)
378 1.73 ad {
379 1.128 ad struct vnode *vp, *dvp = ncp->nc_dvp;
380 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
381 1.155 ad size_t namelen = NC_NLEN(ncp);
382 1.128 ad
383 1.128 ad KASSERT(rw_write_held(&dvi->vi_nc_lock));
384 1.155 ad KASSERT(cache_key(ncp->nc_name, namelen) == ncp->nc_key);
385 1.132 ad KASSERT(rb_tree_find_node(&dvi->vi_nc_tree, ncp) == ncp);
386 1.128 ad
387 1.155 ad SDT_PROBE(vfs, namecache, invalidate, done, ncp, 0, 0, 0, 0);
388 1.128 ad
389 1.134 ad /*
390 1.134 ad * Remove from the vnode's list. This excludes cache_revlookup(),
391 1.134 ad * and then it's safe to remove from the LRU lists.
392 1.134 ad */
393 1.128 ad if ((vp = ncp->nc_vp) != NULL) {
394 1.128 ad vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
395 1.128 ad if (__predict_true(dir2node)) {
396 1.128 ad rw_enter(&vi->vi_nc_listlock, RW_WRITER);
397 1.128 ad TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
398 1.128 ad rw_exit(&vi->vi_nc_listlock);
399 1.128 ad } else {
400 1.128 ad TAILQ_REMOVE(&vi->vi_nc_list, ncp, nc_list);
401 1.128 ad }
402 1.128 ad }
403 1.73 ad
404 1.134 ad /* Remove from the directory's rbtree. */
405 1.134 ad rb_tree_remove_node(&dvi->vi_nc_tree, ncp);
406 1.134 ad
407 1.134 ad /* Remove from the LRU lists. */
408 1.134 ad mutex_enter(&cache_lru_lock);
409 1.134 ad TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
410 1.134 ad cache_lru.count[ncp->nc_lrulist]--;
411 1.134 ad mutex_exit(&cache_lru_lock);
412 1.134 ad
413 1.128 ad /* Finally, free it. */
414 1.155 ad if (namelen > NCHNAMLEN) {
415 1.155 ad size_t sz = offsetof(struct namecache, nc_name[namelen]);
416 1.128 ad kmem_free(ncp, sz);
417 1.128 ad } else {
418 1.128 ad pool_cache_put(cache_pool, ncp);
419 1.73 ad }
420 1.73 ad }
421 1.73 ad
422 1.73 ad /*
423 1.128 ad * Find a single cache entry and return it. vi_nc_lock must be held.
424 1.73 ad */
425 1.128 ad static struct namecache * __noinline
426 1.128 ad cache_lookup_entry(struct vnode *dvp, const char *name, size_t namelen,
427 1.155 ad uintptr_t key)
428 1.55 yamt {
429 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
430 1.128 ad struct rb_node *node = dvi->vi_nc_tree.rbt_root;
431 1.55 yamt struct namecache *ncp;
432 1.155 ad enum cache_lru_id lrulist;
433 1.155 ad int diff;
434 1.128 ad
435 1.155 ad KASSERT(namelen <= MAXPATHLEN);
436 1.128 ad KASSERT(rw_lock_held(&dvi->vi_nc_lock));
437 1.128 ad
438 1.128 ad /*
439 1.128 ad * Search the RB tree for the key. This is an inlined lookup
440 1.128 ad * tailored for exactly what's needed here (64-bit key and so on)
441 1.147 riastrad * that is quite a bit faster than using rb_tree_find_node().
442 1.131 ad *
443 1.135 ad * For a matching key memcmp() needs to be called once to confirm
444 1.135 ad * that the correct name has been found. Very rarely there will be
445 1.135 ad * a key value collision and the search will continue.
446 1.128 ad */
447 1.128 ad for (;;) {
448 1.128 ad if (__predict_false(RB_SENTINEL_P(node))) {
449 1.128 ad return NULL;
450 1.128 ad }
451 1.138 ad ncp = (struct namecache *)node;
452 1.128 ad KASSERT((void *)&ncp->nc_tree == (void *)ncp);
453 1.128 ad KASSERT(ncp->nc_dvp == dvp);
454 1.138 ad if (ncp->nc_key == key) {
455 1.155 ad KASSERT(NC_NLEN(ncp) == namelen);
456 1.131 ad diff = memcmp(ncp->nc_name, name, namelen);
457 1.131 ad if (__predict_true(diff == 0)) {
458 1.131 ad break;
459 1.131 ad }
460 1.147 riastrad node = node->rb_nodes[diff < 0];
461 1.131 ad } else {
462 1.131 ad node = node->rb_nodes[ncp->nc_key < key];
463 1.128 ad }
464 1.128 ad }
465 1.55 yamt
466 1.128 ad /*
467 1.128 ad * If the entry is on the wrong LRU list, requeue it. This is an
468 1.128 ad * unlocked check, but it will rarely be wrong and even then there
469 1.128 ad * will be no harm caused.
470 1.128 ad */
471 1.128 ad lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
472 1.128 ad if (__predict_false(lrulist != LRU_ACTIVE)) {
473 1.128 ad cache_activate(ncp);
474 1.128 ad }
475 1.128 ad return ncp;
476 1.55 yamt }
477 1.55 yamt
478 1.1 cgd /*
479 1.1 cgd * Look for a the name in the cache. We don't do this
480 1.1 cgd * if the segment name is long, simply so the cache can avoid
481 1.1 cgd * holding long names (which would either waste space, or
482 1.1 cgd * add greatly to the complexity).
483 1.1 cgd *
484 1.90 dholland * Lookup is called with DVP pointing to the directory to search,
485 1.90 dholland * and CNP providing the name of the entry being sought: cn_nameptr
486 1.90 dholland * is the name, cn_namelen is its length, and cn_flags is the flags
487 1.90 dholland * word from the namei operation.
488 1.90 dholland *
489 1.90 dholland * DVP must be locked.
490 1.90 dholland *
491 1.90 dholland * There are three possible non-error return states:
492 1.90 dholland * 1. Nothing was found in the cache. Nothing is known about
493 1.90 dholland * the requested name.
494 1.90 dholland * 2. A negative entry was found in the cache, meaning that the
495 1.90 dholland * requested name definitely does not exist.
496 1.90 dholland * 3. A positive entry was found in the cache, meaning that the
497 1.90 dholland * requested name does exist and that we are providing the
498 1.90 dholland * vnode.
499 1.90 dholland * In these cases the results are:
500 1.90 dholland * 1. 0 returned; VN is set to NULL.
501 1.90 dholland * 2. 1 returned; VN is set to NULL.
502 1.90 dholland * 3. 1 returned; VN is set to the vnode found.
503 1.90 dholland *
504 1.90 dholland * The additional result argument ISWHT is set to zero, unless a
505 1.90 dholland * negative entry is found that was entered as a whiteout, in which
506 1.90 dholland * case ISWHT is set to one.
507 1.90 dholland *
508 1.90 dholland * The ISWHT_RET argument pointer may be null. In this case an
509 1.90 dholland * assertion is made that the whiteout flag is not set. File systems
510 1.90 dholland * that do not support whiteouts can/should do this.
511 1.90 dholland *
512 1.90 dholland * Filesystems that do support whiteouts should add ISWHITEOUT to
513 1.90 dholland * cnp->cn_flags if ISWHT comes back nonzero.
514 1.90 dholland *
515 1.90 dholland * When a vnode is returned, it is locked, as per the vnode lookup
516 1.90 dholland * locking protocol.
517 1.90 dholland *
518 1.90 dholland * There is no way for this function to fail, in the sense of
519 1.90 dholland * generating an error that requires aborting the namei operation.
520 1.90 dholland *
521 1.90 dholland * (Prior to October 2012, this function returned an integer status,
522 1.90 dholland * and a vnode, and mucked with the flags word in CNP for whiteouts.
523 1.90 dholland * The integer status was -1 for "nothing found", ENOENT for "a
524 1.90 dholland * negative entry found", 0 for "a positive entry found", and possibly
525 1.90 dholland * other errors, and the value of VN might or might not have been set
526 1.90 dholland * depending on what error occurred.)
527 1.1 cgd */
528 1.113 riastrad bool
529 1.91 dholland cache_lookup(struct vnode *dvp, const char *name, size_t namelen,
530 1.91 dholland uint32_t nameiop, uint32_t cnflags,
531 1.90 dholland int *iswht_ret, struct vnode **vn_ret)
532 1.1 cgd {
533 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
534 1.23 augustss struct namecache *ncp;
535 1.20 jdolecek struct vnode *vp;
536 1.155 ad uintptr_t key;
537 1.113 riastrad int error;
538 1.113 riastrad bool hit;
539 1.128 ad krw_t op;
540 1.125 ad
541 1.146 ad KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
542 1.146 ad
543 1.90 dholland /* Establish default result values */
544 1.90 dholland if (iswht_ret != NULL) {
545 1.90 dholland *iswht_ret = 0;
546 1.90 dholland }
547 1.90 dholland *vn_ret = NULL;
548 1.90 dholland
549 1.128 ad if (__predict_false(namelen > cache_maxlen)) {
550 1.128 ad SDT_PROBE(vfs, namecache, lookup, toolong, dvp,
551 1.128 ad name, namelen, 0, 0);
552 1.128 ad COUNT(ncs_long);
553 1.113 riastrad return false;
554 1.8 cgd }
555 1.39 pk
556 1.128 ad /* Compute the key up front - don't need the lock. */
557 1.128 ad key = cache_key(name, namelen);
558 1.128 ad
559 1.128 ad /* Could the entry be purged below? */
560 1.128 ad if ((cnflags & ISLASTCN) != 0 &&
561 1.128 ad ((cnflags & MAKEENTRY) == 0 || nameiop == CREATE)) {
562 1.128 ad op = RW_WRITER;
563 1.128 ad } else {
564 1.128 ad op = RW_READER;
565 1.1 cgd }
566 1.103 dennis
567 1.128 ad /* Now look for the name. */
568 1.128 ad rw_enter(&dvi->vi_nc_lock, op);
569 1.128 ad ncp = cache_lookup_entry(dvp, name, namelen, key);
570 1.77 ad if (__predict_false(ncp == NULL)) {
571 1.128 ad rw_exit(&dvi->vi_nc_lock);
572 1.128 ad COUNT(ncs_miss);
573 1.128 ad SDT_PROBE(vfs, namecache, lookup, miss, dvp,
574 1.128 ad name, namelen, 0, 0);
575 1.113 riastrad return false;
576 1.1 cgd }
577 1.128 ad if (__predict_false((cnflags & MAKEENTRY) == 0)) {
578 1.77 ad /*
579 1.77 ad * Last component and we are renaming or deleting,
580 1.77 ad * the cache entry is invalid, or otherwise don't
581 1.77 ad * want cache entry to exist.
582 1.77 ad */
583 1.128 ad KASSERT((cnflags & ISLASTCN) != 0);
584 1.128 ad cache_remove(ncp, true);
585 1.128 ad rw_exit(&dvi->vi_nc_lock);
586 1.128 ad COUNT(ncs_badhits);
587 1.113 riastrad return false;
588 1.90 dholland }
589 1.155 ad if ((vp = ncp->nc_vp) == NULL) {
590 1.136 ad if (iswht_ret != NULL) {
591 1.136 ad /*
592 1.136 ad * Restore the ISWHITEOUT flag saved earlier.
593 1.136 ad */
594 1.136 ad *iswht_ret = ncp->nc_whiteout;
595 1.136 ad } else {
596 1.136 ad KASSERT(!ncp->nc_whiteout);
597 1.136 ad }
598 1.128 ad if (nameiop == CREATE && (cnflags & ISLASTCN) != 0) {
599 1.90 dholland /*
600 1.128 ad * Last component and we are preparing to create
601 1.128 ad * the named object, so flush the negative cache
602 1.128 ad * entry.
603 1.90 dholland */
604 1.128 ad COUNT(ncs_badhits);
605 1.128 ad cache_remove(ncp, true);
606 1.128 ad hit = false;
607 1.90 dholland } else {
608 1.128 ad COUNT(ncs_neghits);
609 1.128 ad SDT_PROBE(vfs, namecache, lookup, hit, dvp, name,
610 1.128 ad namelen, 0, 0);
611 1.90 dholland /* found neg entry; vn is already null from above */
612 1.113 riastrad hit = true;
613 1.128 ad }
614 1.128 ad rw_exit(&dvi->vi_nc_lock);
615 1.113 riastrad return hit;
616 1.20 jdolecek }
617 1.144 ad error = vcache_tryvget(vp);
618 1.128 ad rw_exit(&dvi->vi_nc_lock);
619 1.92 hannken if (error) {
620 1.92 hannken KASSERT(error == EBUSY);
621 1.92 hannken /*
622 1.92 hannken * This vnode is being cleaned out.
623 1.92 hannken * XXX badhits?
624 1.92 hannken */
625 1.128 ad COUNT(ncs_falsehits);
626 1.113 riastrad return false;
627 1.77 ad }
628 1.101 christos
629 1.128 ad COUNT(ncs_goodhits);
630 1.128 ad SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
631 1.101 christos /* found it */
632 1.101 christos *vn_ret = vp;
633 1.113 riastrad return true;
634 1.1 cgd }
635 1.1 cgd
636 1.103 dennis /*
637 1.128 ad * Version of the above without the nameiop argument, for NFS.
638 1.103 dennis */
639 1.113 riastrad bool
640 1.91 dholland cache_lookup_raw(struct vnode *dvp, const char *name, size_t namelen,
641 1.91 dholland uint32_t cnflags,
642 1.90 dholland int *iswht_ret, struct vnode **vn_ret)
643 1.61 yamt {
644 1.128 ad
645 1.128 ad return cache_lookup(dvp, name, namelen, LOOKUP, cnflags | MAKEENTRY,
646 1.128 ad iswht_ret, vn_ret);
647 1.128 ad }
648 1.128 ad
649 1.128 ad /*
650 1.128 ad * Used by namei() to walk down a path, component by component by looking up
651 1.128 ad * names in the cache. The node locks are chained along the way: a parent's
652 1.128 ad * lock is not dropped until the child's is acquired.
653 1.128 ad */
654 1.128 ad bool
655 1.128 ad cache_lookup_linked(struct vnode *dvp, const char *name, size_t namelen,
656 1.128 ad struct vnode **vn_ret, krwlock_t **plock,
657 1.128 ad kauth_cred_t cred)
658 1.128 ad {
659 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
660 1.61 yamt struct namecache *ncp;
661 1.145 ad krwlock_t *oldlock, *newlock;
662 1.155 ad struct vnode *vp;
663 1.155 ad uintptr_t key;
664 1.101 christos int error;
665 1.61 yamt
666 1.146 ad KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
667 1.146 ad
668 1.128 ad /* If disabled, or file system doesn't support this, bail out. */
669 1.131 ad if (__predict_false((dvp->v_mount->mnt_iflag & IMNT_NCLOOKUP) == 0)) {
670 1.113 riastrad return false;
671 1.61 yamt }
672 1.61 yamt
673 1.131 ad if (__predict_false(namelen > cache_maxlen)) {
674 1.128 ad COUNT(ncs_long);
675 1.128 ad return false;
676 1.128 ad }
677 1.128 ad
678 1.128 ad /* Compute the key up front - don't need the lock. */
679 1.128 ad key = cache_key(name, namelen);
680 1.128 ad
681 1.128 ad /*
682 1.128 ad * Acquire the directory lock. Once we have that, we can drop the
683 1.128 ad * previous one (if any).
684 1.128 ad *
685 1.128 ad * The two lock holds mean that the directory can't go away while
686 1.128 ad * here: the directory must be purged with cache_purge() before
687 1.128 ad * being freed, and both parent & child's vi_nc_lock must be taken
688 1.128 ad * before that point is passed.
689 1.128 ad *
690 1.128 ad * However if there's no previous lock, like at the root of the
691 1.128 ad * chain, then "dvp" must be referenced to prevent dvp going away
692 1.128 ad * before we get its lock.
693 1.128 ad *
694 1.128 ad * Note that the two locks can be the same if looking up a dot, for
695 1.140 ad * example: /usr/bin/. If looking up the parent (..) we can't wait
696 1.140 ad * on the lock as child -> parent is the wrong direction.
697 1.128 ad */
698 1.128 ad if (*plock != &dvi->vi_nc_lock) {
699 1.145 ad oldlock = *plock;
700 1.145 ad newlock = &dvi->vi_nc_lock;
701 1.141 ad if (!rw_tryenter(&dvi->vi_nc_lock, RW_READER)) {
702 1.141 ad return false;
703 1.140 ad }
704 1.145 ad } else {
705 1.145 ad oldlock = NULL;
706 1.145 ad newlock = NULL;
707 1.145 ad if (*plock == NULL) {
708 1.145 ad KASSERT(vrefcnt(dvp) > 0);
709 1.128 ad }
710 1.128 ad }
711 1.128 ad
712 1.128 ad /*
713 1.128 ad * First up check if the user is allowed to look up files in this
714 1.128 ad * directory.
715 1.128 ad */
716 1.145 ad if (cred != FSCRED) {
717 1.145 ad if (dvi->vi_nc_mode == VNOVAL) {
718 1.145 ad if (newlock != NULL) {
719 1.145 ad rw_exit(newlock);
720 1.145 ad }
721 1.145 ad return false;
722 1.145 ad }
723 1.153 riastrad KASSERT(dvi->vi_nc_uid != VNOVAL);
724 1.153 riastrad KASSERT(dvi->vi_nc_gid != VNOVAL);
725 1.151 christos error = kauth_authorize_vnode(cred,
726 1.151 christos KAUTH_ACCESS_ACTION(VEXEC,
727 1.145 ad dvp->v_type, dvi->vi_nc_mode & ALLPERMS), dvp, NULL,
728 1.145 ad genfs_can_access(dvp, cred, dvi->vi_nc_uid, dvi->vi_nc_gid,
729 1.145 ad dvi->vi_nc_mode & ALLPERMS, NULL, VEXEC));
730 1.145 ad if (error != 0) {
731 1.145 ad if (newlock != NULL) {
732 1.145 ad rw_exit(newlock);
733 1.145 ad }
734 1.145 ad COUNT(ncs_denied);
735 1.145 ad return false;
736 1.145 ad }
737 1.61 yamt }
738 1.128 ad
739 1.128 ad /*
740 1.128 ad * Now look for a matching cache entry.
741 1.128 ad */
742 1.128 ad ncp = cache_lookup_entry(dvp, name, namelen, key);
743 1.77 ad if (__predict_false(ncp == NULL)) {
744 1.145 ad if (newlock != NULL) {
745 1.145 ad rw_exit(newlock);
746 1.145 ad }
747 1.128 ad COUNT(ncs_miss);
748 1.128 ad SDT_PROBE(vfs, namecache, lookup, miss, dvp,
749 1.128 ad name, namelen, 0, 0);
750 1.113 riastrad return false;
751 1.61 yamt }
752 1.155 ad if ((vp = ncp->nc_vp) == NULL) {
753 1.90 dholland /* found negative entry; vn is already null from above */
754 1.153 riastrad KASSERT(namelen != cache_mp_nlen);
755 1.153 riastrad KASSERT(name != cache_mp_name);
756 1.128 ad COUNT(ncs_neghits);
757 1.145 ad } else {
758 1.145 ad COUNT(ncs_goodhits); /* XXX can be "badhits" */
759 1.61 yamt }
760 1.128 ad SDT_PROBE(vfs, namecache, lookup, hit, dvp, name, namelen, 0, 0);
761 1.103 dennis
762 1.103 dennis /*
763 1.128 ad * Return with the directory lock still held. It will either be
764 1.128 ad * returned to us with another call to cache_lookup_linked() when
765 1.128 ad * looking up the next component, or the caller will release it
766 1.128 ad * manually when finished.
767 1.103 dennis */
768 1.145 ad if (oldlock) {
769 1.145 ad rw_exit(oldlock);
770 1.145 ad }
771 1.145 ad if (newlock) {
772 1.145 ad *plock = newlock;
773 1.147 riastrad }
774 1.155 ad *vn_ret = vp;
775 1.113 riastrad return true;
776 1.61 yamt }
777 1.61 yamt
778 1.1 cgd /*
779 1.19 sommerfe * Scan cache looking for name of directory entry pointing at vp.
780 1.128 ad * Will not search for "." or "..".
781 1.19 sommerfe *
782 1.86 hannken * If the lookup succeeds the vnode is referenced and stored in dvpp.
783 1.19 sommerfe *
784 1.19 sommerfe * If bufp is non-NULL, also place the name in the buffer which starts
785 1.19 sommerfe * at bufp, immediately before *bpp, and move bpp backwards to point
786 1.19 sommerfe * at the start of it. (Yes, this is a little baroque, but it's done
787 1.19 sommerfe * this way to cater to the whims of getcwd).
788 1.19 sommerfe *
789 1.19 sommerfe * Returns 0 on success, -1 on cache miss, positive errno on failure.
790 1.19 sommerfe */
791 1.19 sommerfe int
792 1.128 ad cache_revlookup(struct vnode *vp, struct vnode **dvpp, char **bpp, char *bufp,
793 1.143 christos bool checkaccess, accmode_t accmode)
794 1.19 sommerfe {
795 1.128 ad vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
796 1.19 sommerfe struct namecache *ncp;
797 1.155 ad enum cache_lru_id lrulist;
798 1.19 sommerfe struct vnode *dvp;
799 1.155 ad int error, nlen;
800 1.34 enami char *bp;
801 1.34 enami
802 1.126 ad KASSERT(vp != NULL);
803 1.126 ad
804 1.128 ad if (cache_maxlen == 0)
805 1.19 sommerfe goto out;
806 1.19 sommerfe
807 1.128 ad rw_enter(&vi->vi_nc_listlock, RW_READER);
808 1.128 ad if (checkaccess) {
809 1.128 ad /*
810 1.128 ad * Check if the user is allowed to see. NOTE: this is
811 1.128 ad * checking for access on the "wrong" directory. getcwd()
812 1.128 ad * wants to see that there is access on every component
813 1.128 ad * along the way, not that there is access to any individual
814 1.128 ad * component. Don't use this to check you can look in vp.
815 1.128 ad *
816 1.128 ad * I don't like it, I didn't come up with it, don't blame me!
817 1.128 ad */
818 1.142 ad if (vi->vi_nc_mode == VNOVAL) {
819 1.142 ad rw_exit(&vi->vi_nc_listlock);
820 1.142 ad return -1;
821 1.142 ad }
822 1.153 riastrad KASSERT(vi->vi_nc_uid != VNOVAL);
823 1.153 riastrad KASSERT(vi->vi_nc_gid != VNOVAL);
824 1.151 christos error = kauth_authorize_vnode(kauth_cred_get(),
825 1.128 ad KAUTH_ACCESS_ACTION(VEXEC, vp->v_type, vi->vi_nc_mode &
826 1.143 christos ALLPERMS), vp, NULL, genfs_can_access(vp, curlwp->l_cred,
827 1.143 christos vi->vi_nc_uid, vi->vi_nc_gid, vi->vi_nc_mode & ALLPERMS,
828 1.143 christos NULL, accmode));
829 1.128 ad if (error != 0) {
830 1.128 ad rw_exit(&vi->vi_nc_listlock);
831 1.128 ad COUNT(ncs_denied);
832 1.128 ad return EACCES;
833 1.127 ad }
834 1.128 ad }
835 1.128 ad TAILQ_FOREACH(ncp, &vi->vi_nc_list, nc_list) {
836 1.128 ad KASSERT(ncp->nc_vp == vp);
837 1.128 ad KASSERT(ncp->nc_dvp != NULL);
838 1.155 ad nlen = NC_NLEN(ncp);
839 1.128 ad
840 1.127 ad /*
841 1.146 ad * Ignore mountpoint entries.
842 1.146 ad */
843 1.155 ad if (nlen == cache_mp_nlen) {
844 1.146 ad continue;
845 1.146 ad }
846 1.146 ad
847 1.146 ad /*
848 1.128 ad * The queue is partially sorted. Once we hit dots, nothing
849 1.128 ad * else remains but dots and dotdots, so bail out.
850 1.127 ad */
851 1.127 ad if (ncp->nc_name[0] == '.') {
852 1.127 ad if (nlen == 1 ||
853 1.127 ad (nlen == 2 && ncp->nc_name[1] == '.')) {
854 1.128 ad break;
855 1.19 sommerfe }
856 1.127 ad }
857 1.128 ad
858 1.135 ad /*
859 1.135 ad * Record a hit on the entry. This is an unlocked read but
860 1.135 ad * even if wrong it doesn't matter too much.
861 1.135 ad */
862 1.128 ad lrulist = atomic_load_relaxed(&ncp->nc_lrulist);
863 1.128 ad if (lrulist != LRU_ACTIVE) {
864 1.128 ad cache_activate(ncp);
865 1.128 ad }
866 1.34 enami
867 1.127 ad if (bufp) {
868 1.127 ad bp = *bpp;
869 1.127 ad bp -= nlen;
870 1.127 ad if (bp <= bufp) {
871 1.92 hannken *dvpp = NULL;
872 1.128 ad rw_exit(&vi->vi_nc_listlock);
873 1.127 ad SDT_PROBE(vfs, namecache, revlookup,
874 1.127 ad fail, vp, ERANGE, 0, 0, 0);
875 1.127 ad return (ERANGE);
876 1.86 hannken }
877 1.127 ad memcpy(bp, ncp->nc_name, nlen);
878 1.127 ad *bpp = bp;
879 1.19 sommerfe }
880 1.127 ad
881 1.128 ad dvp = ncp->nc_dvp;
882 1.144 ad error = vcache_tryvget(dvp);
883 1.128 ad rw_exit(&vi->vi_nc_listlock);
884 1.127 ad if (error) {
885 1.127 ad KASSERT(error == EBUSY);
886 1.127 ad if (bufp)
887 1.127 ad (*bpp) += nlen;
888 1.127 ad *dvpp = NULL;
889 1.127 ad SDT_PROBE(vfs, namecache, revlookup, fail, vp,
890 1.127 ad error, 0, 0, 0);
891 1.127 ad return -1;
892 1.127 ad }
893 1.127 ad *dvpp = dvp;
894 1.127 ad SDT_PROBE(vfs, namecache, revlookup, success, vp, dvp,
895 1.127 ad 0, 0, 0);
896 1.128 ad COUNT(ncs_revhits);
897 1.127 ad return (0);
898 1.19 sommerfe }
899 1.128 ad rw_exit(&vi->vi_nc_listlock);
900 1.128 ad COUNT(ncs_revmiss);
901 1.19 sommerfe out:
902 1.34 enami *dvpp = NULL;
903 1.34 enami return (-1);
904 1.19 sommerfe }
905 1.19 sommerfe
906 1.19 sommerfe /*
907 1.128 ad * Add an entry to the cache.
908 1.1 cgd */
909 1.13 christos void
910 1.91 dholland cache_enter(struct vnode *dvp, struct vnode *vp,
911 1.91 dholland const char *name, size_t namelen, uint32_t cnflags)
912 1.1 cgd {
913 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
914 1.128 ad struct namecache *ncp, *oncp;
915 1.128 ad int total;
916 1.1 cgd
917 1.146 ad KASSERT(namelen != cache_mp_nlen || name == cache_mp_name);
918 1.146 ad
919 1.89 rmind /* First, check whether we can/should add a cache entry. */
920 1.91 dholland if ((cnflags & MAKEENTRY) == 0 ||
921 1.128 ad __predict_false(namelen > cache_maxlen)) {
922 1.108 christos SDT_PROBE(vfs, namecache, enter, toolong, vp, name, namelen,
923 1.108 christos 0, 0);
924 1.1 cgd return;
925 1.89 rmind }
926 1.58 yamt
927 1.108 christos SDT_PROBE(vfs, namecache, enter, done, vp, name, namelen, 0, 0);
928 1.128 ad
929 1.128 ad /*
930 1.128 ad * Reclaim some entries if over budget. This is an unlocked check,
931 1.128 ad * but it doesn't matter. Just need to catch up with things
932 1.128 ad * eventually: it doesn't matter if we go over temporarily.
933 1.128 ad */
934 1.128 ad total = atomic_load_relaxed(&cache_lru.count[LRU_ACTIVE]);
935 1.128 ad total += atomic_load_relaxed(&cache_lru.count[LRU_INACTIVE]);
936 1.128 ad if (__predict_false(total > desiredvnodes)) {
937 1.73 ad cache_reclaim();
938 1.39 pk }
939 1.57 pk
940 1.128 ad /* Now allocate a fresh entry. */
941 1.128 ad if (__predict_true(namelen <= NCHNAMLEN)) {
942 1.128 ad ncp = pool_cache_get(cache_pool, PR_WAITOK);
943 1.128 ad } else {
944 1.128 ad size_t sz = offsetof(struct namecache, nc_name[namelen]);
945 1.128 ad ncp = kmem_alloc(sz, KM_SLEEP);
946 1.128 ad }
947 1.122 maya
948 1.130 ad /*
949 1.130 ad * Fill in cache info. For negative hits, save the ISWHITEOUT flag
950 1.130 ad * so we can restore it later when the cache entry is used again.
951 1.130 ad */
952 1.130 ad ncp->nc_vp = vp;
953 1.128 ad ncp->nc_dvp = dvp;
954 1.128 ad ncp->nc_key = cache_key(name, namelen);
955 1.130 ad ncp->nc_whiteout = ((cnflags & ISWHITEOUT) != 0);
956 1.128 ad memcpy(ncp->nc_name, name, namelen);
957 1.73 ad
958 1.59 yamt /*
959 1.130 ad * Insert to the directory. Concurrent lookups may race for a cache
960 1.130 ad * entry. If there's a entry there already, purge it.
961 1.59 yamt */
962 1.128 ad rw_enter(&dvi->vi_nc_lock, RW_WRITER);
963 1.128 ad oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
964 1.128 ad if (oncp != ncp) {
965 1.128 ad KASSERT(oncp->nc_key == ncp->nc_key);
966 1.155 ad KASSERT(NC_NLEN(oncp) == NC_NLEN(ncp));
967 1.131 ad KASSERT(memcmp(oncp->nc_name, name, namelen) == 0);
968 1.128 ad cache_remove(oncp, true);
969 1.128 ad oncp = rb_tree_insert_node(&dvi->vi_nc_tree, ncp);
970 1.128 ad KASSERT(oncp == ncp);
971 1.59 yamt }
972 1.59 yamt
973 1.130 ad /*
974 1.130 ad * With the directory lock still held, insert to the tail of the
975 1.135 ad * ACTIVE LRU list (new) and take the opportunity to incrementally
976 1.135 ad * balance the lists.
977 1.130 ad */
978 1.130 ad mutex_enter(&cache_lru_lock);
979 1.130 ad ncp->nc_lrulist = LRU_ACTIVE;
980 1.130 ad cache_lru.count[LRU_ACTIVE]++;
981 1.130 ad TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
982 1.130 ad cache_deactivate();
983 1.130 ad mutex_exit(&cache_lru_lock);
984 1.130 ad
985 1.130 ad /*
986 1.135 ad * Finally, insert to the vnode and unlock. With everything set up
987 1.135 ad * it's safe to let cache_revlookup() see the entry. Partially sort
988 1.135 ad * the per-vnode list: dots go to back so cache_revlookup() doesn't
989 1.135 ad * have to consider them.
990 1.130 ad */
991 1.130 ad if (vp != NULL) {
992 1.128 ad vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
993 1.128 ad rw_enter(&vi->vi_nc_listlock, RW_WRITER);
994 1.127 ad if ((namelen == 1 && name[0] == '.') ||
995 1.127 ad (namelen == 2 && name[0] == '.' && name[1] == '.')) {
996 1.128 ad TAILQ_INSERT_TAIL(&vi->vi_nc_list, ncp, nc_list);
997 1.127 ad } else {
998 1.128 ad TAILQ_INSERT_HEAD(&vi->vi_nc_list, ncp, nc_list);
999 1.127 ad }
1000 1.128 ad rw_exit(&vi->vi_nc_listlock);
1001 1.73 ad }
1002 1.128 ad rw_exit(&dvi->vi_nc_lock);
1003 1.128 ad }
1004 1.128 ad
1005 1.128 ad /*
1006 1.128 ad * Set identity info in cache for a vnode. We only care about directories
1007 1.142 ad * so ignore other updates. The cached info may be marked invalid if the
1008 1.142 ad * inode has an ACL.
1009 1.128 ad */
1010 1.128 ad void
1011 1.142 ad cache_enter_id(struct vnode *vp, mode_t mode, uid_t uid, gid_t gid, bool valid)
1012 1.128 ad {
1013 1.128 ad vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1014 1.128 ad
1015 1.128 ad if (vp->v_type == VDIR) {
1016 1.128 ad /* Grab both locks, for forward & reverse lookup. */
1017 1.128 ad rw_enter(&vi->vi_nc_lock, RW_WRITER);
1018 1.128 ad rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1019 1.142 ad if (valid) {
1020 1.142 ad vi->vi_nc_mode = mode;
1021 1.142 ad vi->vi_nc_uid = uid;
1022 1.142 ad vi->vi_nc_gid = gid;
1023 1.142 ad } else {
1024 1.142 ad vi->vi_nc_mode = VNOVAL;
1025 1.142 ad vi->vi_nc_uid = VNOVAL;
1026 1.142 ad vi->vi_nc_gid = VNOVAL;
1027 1.142 ad }
1028 1.128 ad rw_exit(&vi->vi_nc_listlock);
1029 1.128 ad rw_exit(&vi->vi_nc_lock);
1030 1.128 ad }
1031 1.128 ad }
1032 1.128 ad
1033 1.128 ad /*
1034 1.128 ad * Return true if we have identity for the given vnode, and use as an
1035 1.128 ad * opportunity to confirm that everything squares up.
1036 1.128 ad *
1037 1.128 ad * Because of shared code, some file systems could provide partial
1038 1.142 ad * information, missing some updates, so check the mount flag too.
1039 1.128 ad */
1040 1.128 ad bool
1041 1.128 ad cache_have_id(struct vnode *vp)
1042 1.128 ad {
1043 1.128 ad
1044 1.128 ad if (vp->v_type == VDIR &&
1045 1.142 ad (vp->v_mount->mnt_iflag & IMNT_NCLOOKUP) != 0 &&
1046 1.142 ad atomic_load_relaxed(&VNODE_TO_VIMPL(vp)->vi_nc_mode) != VNOVAL) {
1047 1.128 ad return true;
1048 1.128 ad } else {
1049 1.128 ad return false;
1050 1.128 ad }
1051 1.1 cgd }
1052 1.1 cgd
1053 1.1 cgd /*
1054 1.146 ad * Enter a mount point. cvp is the covered vnode, and rvp is the root of
1055 1.146 ad * the mounted file system.
1056 1.146 ad */
1057 1.146 ad void
1058 1.146 ad cache_enter_mount(struct vnode *cvp, struct vnode *rvp)
1059 1.146 ad {
1060 1.146 ad
1061 1.146 ad KASSERT(vrefcnt(cvp) > 0);
1062 1.146 ad KASSERT(vrefcnt(rvp) > 0);
1063 1.146 ad KASSERT(cvp->v_type == VDIR);
1064 1.146 ad KASSERT((rvp->v_vflag & VV_ROOT) != 0);
1065 1.146 ad
1066 1.146 ad if (rvp->v_type == VDIR) {
1067 1.146 ad cache_enter(cvp, rvp, cache_mp_name, cache_mp_nlen, MAKEENTRY);
1068 1.146 ad }
1069 1.146 ad }
1070 1.146 ad
1071 1.146 ad /*
1072 1.146 ad * Look up a cached mount point. Used in the strongly locked path.
1073 1.146 ad */
1074 1.146 ad bool
1075 1.146 ad cache_lookup_mount(struct vnode *dvp, struct vnode **vn_ret)
1076 1.146 ad {
1077 1.146 ad bool ret;
1078 1.146 ad
1079 1.146 ad ret = cache_lookup(dvp, cache_mp_name, cache_mp_nlen, LOOKUP,
1080 1.146 ad MAKEENTRY, NULL, vn_ret);
1081 1.146 ad KASSERT((*vn_ret != NULL) == ret);
1082 1.146 ad return ret;
1083 1.146 ad }
1084 1.146 ad
1085 1.146 ad /*
1086 1.146 ad * Try to cross a mount point. For use with cache_lookup_linked().
1087 1.146 ad */
1088 1.146 ad bool
1089 1.146 ad cache_cross_mount(struct vnode **dvp, krwlock_t **plock)
1090 1.146 ad {
1091 1.146 ad
1092 1.146 ad return cache_lookup_linked(*dvp, cache_mp_name, cache_mp_nlen,
1093 1.146 ad dvp, plock, FSCRED);
1094 1.146 ad }
1095 1.146 ad
1096 1.146 ad /*
1097 1.128 ad * Name cache initialization, from vfs_init() when the system is booting.
1098 1.1 cgd */
1099 1.13 christos void
1100 1.34 enami nchinit(void)
1101 1.1 cgd {
1102 1.1 cgd
1103 1.128 ad cache_pool = pool_cache_init(sizeof(struct namecache),
1104 1.135 ad coherency_unit, 0, 0, "namecache", NULL, IPL_NONE, NULL,
1105 1.128 ad NULL, NULL);
1106 1.128 ad KASSERT(cache_pool != NULL);
1107 1.128 ad
1108 1.128 ad mutex_init(&cache_lru_lock, MUTEX_DEFAULT, IPL_NONE);
1109 1.128 ad TAILQ_INIT(&cache_lru.list[LRU_ACTIVE]);
1110 1.128 ad TAILQ_INIT(&cache_lru.list[LRU_INACTIVE]);
1111 1.128 ad
1112 1.128 ad mutex_init(&cache_stat_lock, MUTEX_DEFAULT, IPL_NONE);
1113 1.128 ad callout_init(&cache_stat_callout, CALLOUT_MPSAFE);
1114 1.128 ad callout_setfunc(&cache_stat_callout, cache_update_stats, NULL);
1115 1.128 ad callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
1116 1.128 ad
1117 1.128 ad KASSERT(cache_sysctllog == NULL);
1118 1.128 ad sysctl_createv(&cache_sysctllog, 0, NULL, NULL,
1119 1.128 ad CTLFLAG_PERMANENT,
1120 1.128 ad CTLTYPE_STRUCT, "namecache_stats",
1121 1.128 ad SYSCTL_DESCR("namecache statistics"),
1122 1.128 ad cache_stat_sysctl, 0, NULL, 0,
1123 1.128 ad CTL_VFS, CTL_CREATE, CTL_EOL);
1124 1.128 ad }
1125 1.128 ad
1126 1.128 ad /*
1127 1.128 ad * Called once for each CPU in the system as attached.
1128 1.128 ad */
1129 1.128 ad void
1130 1.128 ad cache_cpu_init(struct cpu_info *ci)
1131 1.128 ad {
1132 1.128 ad size_t sz;
1133 1.104 pooka
1134 1.155 ad sz = roundup2(sizeof(struct nchcpu), coherency_unit);
1135 1.155 ad ci->ci_data.cpu_nch = kmem_zalloc(sz, KM_SLEEP);
1136 1.155 ad KASSERT(((uintptr_t)ci->ci_data.cpu_nch & (coherency_unit - 1)) == 0);
1137 1.73 ad }
1138 1.73 ad
1139 1.128 ad /*
1140 1.128 ad * A vnode is being allocated: set up cache structures.
1141 1.128 ad */
1142 1.128 ad void
1143 1.128 ad cache_vnode_init(struct vnode *vp)
1144 1.73 ad {
1145 1.128 ad vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1146 1.128 ad
1147 1.128 ad rw_init(&vi->vi_nc_lock);
1148 1.128 ad rw_init(&vi->vi_nc_listlock);
1149 1.128 ad rb_tree_init(&vi->vi_nc_tree, &cache_rbtree_ops);
1150 1.128 ad TAILQ_INIT(&vi->vi_nc_list);
1151 1.128 ad vi->vi_nc_mode = VNOVAL;
1152 1.128 ad vi->vi_nc_uid = VNOVAL;
1153 1.128 ad vi->vi_nc_gid = VNOVAL;
1154 1.128 ad }
1155 1.125 ad
1156 1.128 ad /*
1157 1.128 ad * A vnode is being freed: finish cache structures.
1158 1.128 ad */
1159 1.128 ad void
1160 1.128 ad cache_vnode_fini(struct vnode *vp)
1161 1.128 ad {
1162 1.128 ad vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
1163 1.73 ad
1164 1.128 ad KASSERT(RB_TREE_MIN(&vi->vi_nc_tree) == NULL);
1165 1.128 ad KASSERT(TAILQ_EMPTY(&vi->vi_nc_list));
1166 1.128 ad rw_destroy(&vi->vi_nc_lock);
1167 1.128 ad rw_destroy(&vi->vi_nc_listlock);
1168 1.73 ad }
1169 1.73 ad
1170 1.128 ad /*
1171 1.128 ad * Helper for cache_purge1(): purge cache entries for the given vnode from
1172 1.128 ad * all directories that the vnode is cached in.
1173 1.128 ad */
1174 1.73 ad static void
1175 1.128 ad cache_purge_parents(struct vnode *vp)
1176 1.73 ad {
1177 1.128 ad vnode_impl_t *dvi, *vi = VNODE_TO_VIMPL(vp);
1178 1.128 ad struct vnode *dvp, *blocked;
1179 1.125 ad struct namecache *ncp;
1180 1.73 ad
1181 1.128 ad SDT_PROBE(vfs, namecache, purge, parents, vp, 0, 0, 0, 0);
1182 1.128 ad
1183 1.128 ad blocked = NULL;
1184 1.128 ad
1185 1.128 ad rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1186 1.128 ad while ((ncp = TAILQ_FIRST(&vi->vi_nc_list)) != NULL) {
1187 1.128 ad /*
1188 1.128 ad * Locking in the wrong direction. Try for a hold on the
1189 1.128 ad * directory node's lock, and if we get it then all good,
1190 1.128 ad * nuke the entry and move on to the next.
1191 1.128 ad */
1192 1.128 ad dvp = ncp->nc_dvp;
1193 1.128 ad dvi = VNODE_TO_VIMPL(dvp);
1194 1.128 ad if (rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
1195 1.128 ad cache_remove(ncp, false);
1196 1.128 ad rw_exit(&dvi->vi_nc_lock);
1197 1.128 ad blocked = NULL;
1198 1.128 ad continue;
1199 1.128 ad }
1200 1.128 ad
1201 1.128 ad /*
1202 1.128 ad * We can't wait on the directory node's lock with our list
1203 1.128 ad * lock held or the system could deadlock.
1204 1.128 ad *
1205 1.128 ad * Take a hold on the directory vnode to prevent it from
1206 1.128 ad * being freed (taking the vnode & lock with it). Then
1207 1.128 ad * wait for the lock to become available with no other locks
1208 1.128 ad * held, and retry.
1209 1.128 ad *
1210 1.128 ad * If this happens twice in a row, give the other side a
1211 1.128 ad * breather; we can do nothing until it lets go.
1212 1.128 ad */
1213 1.128 ad vhold(dvp);
1214 1.128 ad rw_exit(&vi->vi_nc_listlock);
1215 1.128 ad rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1216 1.128 ad /* Do nothing. */
1217 1.128 ad rw_exit(&dvi->vi_nc_lock);
1218 1.128 ad holdrele(dvp);
1219 1.128 ad if (blocked == dvp) {
1220 1.128 ad kpause("ncpurge", false, 1, NULL);
1221 1.128 ad }
1222 1.128 ad rw_enter(&vi->vi_nc_listlock, RW_WRITER);
1223 1.128 ad blocked = dvp;
1224 1.128 ad }
1225 1.128 ad rw_exit(&vi->vi_nc_listlock);
1226 1.73 ad }
1227 1.73 ad
1228 1.73 ad /*
1229 1.128 ad * Helper for cache_purge1(): purge all cache entries hanging off the given
1230 1.128 ad * directory vnode.
1231 1.73 ad */
1232 1.128 ad static void
1233 1.128 ad cache_purge_children(struct vnode *dvp)
1234 1.73 ad {
1235 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
1236 1.128 ad struct namecache *ncp;
1237 1.128 ad
1238 1.128 ad SDT_PROBE(vfs, namecache, purge, children, dvp, 0, 0, 0, 0);
1239 1.73 ad
1240 1.128 ad rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1241 1.135 ad while ((ncp = RB_TREE_MIN(&dvi->vi_nc_tree)) != NULL) {
1242 1.128 ad cache_remove(ncp, true);
1243 1.128 ad }
1244 1.128 ad rw_exit(&dvi->vi_nc_lock);
1245 1.30 chs }
1246 1.30 chs
1247 1.30 chs /*
1248 1.128 ad * Helper for cache_purge1(): purge cache entry from the given vnode,
1249 1.128 ad * finding it by name.
1250 1.30 chs */
1251 1.128 ad static void
1252 1.128 ad cache_purge_name(struct vnode *dvp, const char *name, size_t namelen)
1253 1.30 chs {
1254 1.128 ad vnode_impl_t *dvi = VNODE_TO_VIMPL(dvp);
1255 1.30 chs struct namecache *ncp;
1256 1.155 ad uintptr_t key;
1257 1.126 ad
1258 1.128 ad SDT_PROBE(vfs, namecache, purge, name, name, namelen, 0, 0, 0);
1259 1.128 ad
1260 1.128 ad key = cache_key(name, namelen);
1261 1.128 ad rw_enter(&dvi->vi_nc_lock, RW_WRITER);
1262 1.128 ad ncp = cache_lookup_entry(dvp, name, namelen, key);
1263 1.128 ad if (ncp) {
1264 1.128 ad cache_remove(ncp, true);
1265 1.128 ad }
1266 1.128 ad rw_exit(&dvi->vi_nc_lock);
1267 1.1 cgd }
1268 1.1 cgd
1269 1.1 cgd /*
1270 1.1 cgd * Cache flush, a particular vnode; called when a vnode is renamed to
1271 1.128 ad * hide entries that would now be invalid.
1272 1.1 cgd */
1273 1.13 christos void
1274 1.91 dholland cache_purge1(struct vnode *vp, const char *name, size_t namelen, int flags)
1275 1.1 cgd {
1276 1.1 cgd
1277 1.55 yamt if (flags & PURGE_PARENTS) {
1278 1.128 ad cache_purge_parents(vp);
1279 1.55 yamt }
1280 1.55 yamt if (flags & PURGE_CHILDREN) {
1281 1.128 ad cache_purge_children(vp);
1282 1.46 yamt }
1283 1.91 dholland if (name != NULL) {
1284 1.128 ad cache_purge_name(vp, name, namelen);
1285 1.46 yamt }
1286 1.128 ad }
1287 1.128 ad
1288 1.128 ad /*
1289 1.128 ad * vnode filter for cache_purgevfs().
1290 1.128 ad */
1291 1.128 ad static bool
1292 1.128 ad cache_vdir_filter(void *cookie, vnode_t *vp)
1293 1.128 ad {
1294 1.128 ad
1295 1.128 ad return vp->v_type == VDIR;
1296 1.1 cgd }
1297 1.1 cgd
1298 1.1 cgd /*
1299 1.1 cgd * Cache flush, a whole filesystem; called when filesys is umounted to
1300 1.27 chs * remove entries that would now be invalid.
1301 1.1 cgd */
1302 1.13 christos void
1303 1.34 enami cache_purgevfs(struct mount *mp)
1304 1.1 cgd {
1305 1.128 ad struct vnode_iterator *iter;
1306 1.128 ad vnode_t *dvp;
1307 1.1 cgd
1308 1.128 ad vfs_vnode_iterator_init(mp, &iter);
1309 1.128 ad for (;;) {
1310 1.128 ad dvp = vfs_vnode_iterator_next(iter, cache_vdir_filter, NULL);
1311 1.128 ad if (dvp == NULL) {
1312 1.128 ad break;
1313 1.73 ad }
1314 1.128 ad cache_purge_children(dvp);
1315 1.128 ad vrele(dvp);
1316 1.73 ad }
1317 1.128 ad vfs_vnode_iterator_destroy(iter);
1318 1.73 ad }
1319 1.73 ad
1320 1.73 ad /*
1321 1.135 ad * Re-queue an entry onto the tail of the active LRU list, after it has
1322 1.135 ad * scored a hit.
1323 1.73 ad */
1324 1.73 ad static void
1325 1.128 ad cache_activate(struct namecache *ncp)
1326 1.73 ad {
1327 1.73 ad
1328 1.128 ad mutex_enter(&cache_lru_lock);
1329 1.128 ad TAILQ_REMOVE(&cache_lru.list[ncp->nc_lrulist], ncp, nc_lru);
1330 1.128 ad TAILQ_INSERT_TAIL(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
1331 1.128 ad cache_lru.count[ncp->nc_lrulist]--;
1332 1.128 ad cache_lru.count[LRU_ACTIVE]++;
1333 1.128 ad ncp->nc_lrulist = LRU_ACTIVE;
1334 1.128 ad mutex_exit(&cache_lru_lock);
1335 1.73 ad }
1336 1.73 ad
1337 1.73 ad /*
1338 1.128 ad * Try to balance the LRU lists. Pick some victim entries, and re-queue
1339 1.147 riastrad * them from the head of the active list to the tail of the inactive list.
1340 1.73 ad */
1341 1.73 ad static void
1342 1.128 ad cache_deactivate(void)
1343 1.73 ad {
1344 1.128 ad struct namecache *ncp;
1345 1.128 ad int total, i;
1346 1.128 ad
1347 1.128 ad KASSERT(mutex_owned(&cache_lru_lock));
1348 1.73 ad
1349 1.128 ad /* If we're nowhere near budget yet, don't bother. */
1350 1.128 ad total = cache_lru.count[LRU_ACTIVE] + cache_lru.count[LRU_INACTIVE];
1351 1.128 ad if (total < (desiredvnodes >> 1)) {
1352 1.128 ad return;
1353 1.128 ad }
1354 1.73 ad
1355 1.73 ad /*
1356 1.128 ad * Aim for a 1:1 ratio of active to inactive. This is to allow each
1357 1.128 ad * potential victim a reasonable amount of time to cycle through the
1358 1.128 ad * inactive list in order to score a hit and be reactivated, while
1359 1.128 ad * trying not to cause reactivations too frequently.
1360 1.73 ad */
1361 1.128 ad if (cache_lru.count[LRU_ACTIVE] < cache_lru.count[LRU_INACTIVE]) {
1362 1.128 ad return;
1363 1.128 ad }
1364 1.73 ad
1365 1.128 ad /* Move only a few at a time; will catch up eventually. */
1366 1.128 ad for (i = 0; i < cache_lru_maxdeact; i++) {
1367 1.128 ad ncp = TAILQ_FIRST(&cache_lru.list[LRU_ACTIVE]);
1368 1.128 ad if (ncp == NULL) {
1369 1.128 ad break;
1370 1.128 ad }
1371 1.128 ad KASSERT(ncp->nc_lrulist == LRU_ACTIVE);
1372 1.128 ad ncp->nc_lrulist = LRU_INACTIVE;
1373 1.128 ad TAILQ_REMOVE(&cache_lru.list[LRU_ACTIVE], ncp, nc_lru);
1374 1.128 ad TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE], ncp, nc_lru);
1375 1.128 ad cache_lru.count[LRU_ACTIVE]--;
1376 1.128 ad cache_lru.count[LRU_INACTIVE]++;
1377 1.128 ad }
1378 1.73 ad }
1379 1.73 ad
1380 1.73 ad /*
1381 1.128 ad * Free some entries from the cache, when we have gone over budget.
1382 1.128 ad *
1383 1.128 ad * We don't want to cause too much work for any individual caller, and it
1384 1.128 ad * doesn't matter if we temporarily go over budget. This is also "just a
1385 1.128 ad * cache" so it's not a big deal if we screw up and throw out something we
1386 1.128 ad * shouldn't. So we take a relaxed attitude to this process to reduce its
1387 1.128 ad * impact.
1388 1.73 ad */
1389 1.73 ad static void
1390 1.128 ad cache_reclaim(void)
1391 1.28 chs {
1392 1.28 chs struct namecache *ncp;
1393 1.128 ad vnode_impl_t *dvi;
1394 1.128 ad int toscan;
1395 1.28 chs
1396 1.128 ad /*
1397 1.152 andvar * Scan up to a preset maximum number of entries, but no more than
1398 1.128 ad * 0.8% of the total at once (to allow for very small systems).
1399 1.128 ad *
1400 1.128 ad * On bigger systems, do a larger chunk of work to reduce the number
1401 1.128 ad * of times that cache_lru_lock is held for any length of time.
1402 1.128 ad */
1403 1.128 ad mutex_enter(&cache_lru_lock);
1404 1.128 ad toscan = MIN(cache_lru_maxscan, desiredvnodes >> 7);
1405 1.128 ad toscan = MAX(toscan, 1);
1406 1.128 ad SDT_PROBE(vfs, namecache, prune, done, cache_lru.count[LRU_ACTIVE] +
1407 1.128 ad cache_lru.count[LRU_INACTIVE], toscan, 0, 0, 0);
1408 1.128 ad while (toscan-- != 0) {
1409 1.128 ad /* First try to balance the lists. */
1410 1.128 ad cache_deactivate();
1411 1.128 ad
1412 1.128 ad /* Now look for a victim on head of inactive list (old). */
1413 1.128 ad ncp = TAILQ_FIRST(&cache_lru.list[LRU_INACTIVE]);
1414 1.128 ad if (ncp == NULL) {
1415 1.128 ad break;
1416 1.28 chs }
1417 1.128 ad dvi = VNODE_TO_VIMPL(ncp->nc_dvp);
1418 1.128 ad KASSERT(ncp->nc_lrulist == LRU_INACTIVE);
1419 1.128 ad KASSERT(dvi != NULL);
1420 1.128 ad
1421 1.128 ad /*
1422 1.128 ad * Locking in the wrong direction. If we can't get the
1423 1.128 ad * lock, the directory is actively busy, and it could also
1424 1.128 ad * cause problems for the next guy in here, so send the
1425 1.128 ad * entry to the back of the list.
1426 1.128 ad */
1427 1.128 ad if (!rw_tryenter(&dvi->vi_nc_lock, RW_WRITER)) {
1428 1.128 ad TAILQ_REMOVE(&cache_lru.list[LRU_INACTIVE],
1429 1.128 ad ncp, nc_lru);
1430 1.128 ad TAILQ_INSERT_TAIL(&cache_lru.list[LRU_INACTIVE],
1431 1.128 ad ncp, nc_lru);
1432 1.128 ad continue;
1433 1.28 chs }
1434 1.128 ad
1435 1.128 ad /*
1436 1.128 ad * Now have the victim entry locked. Drop the LRU list
1437 1.128 ad * lock, purge the entry, and start over. The hold on
1438 1.128 ad * vi_nc_lock will prevent the vnode from vanishing until
1439 1.128 ad * finished (cache_purge() will be called on dvp before it
1440 1.128 ad * disappears, and that will wait on vi_nc_lock).
1441 1.128 ad */
1442 1.128 ad mutex_exit(&cache_lru_lock);
1443 1.128 ad cache_remove(ncp, true);
1444 1.128 ad rw_exit(&dvi->vi_nc_lock);
1445 1.128 ad mutex_enter(&cache_lru_lock);
1446 1.28 chs }
1447 1.128 ad mutex_exit(&cache_lru_lock);
1448 1.28 chs }
1449 1.95 joerg
1450 1.128 ad /*
1451 1.128 ad * For file system code: count a lookup that required a full re-scan of
1452 1.128 ad * directory metadata.
1453 1.128 ad */
1454 1.95 joerg void
1455 1.95 joerg namecache_count_pass2(void)
1456 1.95 joerg {
1457 1.95 joerg
1458 1.128 ad COUNT(ncs_pass2);
1459 1.95 joerg }
1460 1.95 joerg
1461 1.128 ad /*
1462 1.128 ad * For file system code: count a lookup that scored a hit in the directory
1463 1.128 ad * metadata near the location of the last lookup.
1464 1.128 ad */
1465 1.95 joerg void
1466 1.95 joerg namecache_count_2passes(void)
1467 1.95 joerg {
1468 1.95 joerg
1469 1.128 ad COUNT(ncs_2passes);
1470 1.128 ad }
1471 1.128 ad
1472 1.128 ad /*
1473 1.128 ad * Sum the stats from all CPUs into nchstats. This needs to run at least
1474 1.128 ad * once within every window where a 32-bit counter could roll over. It's
1475 1.128 ad * called regularly by timer to ensure this.
1476 1.128 ad */
1477 1.128 ad static void
1478 1.128 ad cache_update_stats(void *cookie)
1479 1.128 ad {
1480 1.128 ad CPU_INFO_ITERATOR cii;
1481 1.128 ad struct cpu_info *ci;
1482 1.128 ad
1483 1.128 ad mutex_enter(&cache_stat_lock);
1484 1.128 ad for (CPU_INFO_FOREACH(cii, ci)) {
1485 1.128 ad struct nchcpu *nchcpu = ci->ci_data.cpu_nch;
1486 1.128 ad UPDATE(nchcpu, ncs_goodhits);
1487 1.128 ad UPDATE(nchcpu, ncs_neghits);
1488 1.128 ad UPDATE(nchcpu, ncs_badhits);
1489 1.128 ad UPDATE(nchcpu, ncs_falsehits);
1490 1.128 ad UPDATE(nchcpu, ncs_miss);
1491 1.128 ad UPDATE(nchcpu, ncs_long);
1492 1.128 ad UPDATE(nchcpu, ncs_pass2);
1493 1.128 ad UPDATE(nchcpu, ncs_2passes);
1494 1.128 ad UPDATE(nchcpu, ncs_revhits);
1495 1.128 ad UPDATE(nchcpu, ncs_revmiss);
1496 1.128 ad UPDATE(nchcpu, ncs_denied);
1497 1.128 ad }
1498 1.128 ad if (cookie != NULL) {
1499 1.128 ad memcpy(cookie, &nchstats, sizeof(nchstats));
1500 1.128 ad }
1501 1.128 ad /* Reset the timer; arrive back here in N minutes at latest. */
1502 1.128 ad callout_schedule(&cache_stat_callout, cache_stat_interval * hz);
1503 1.128 ad mutex_exit(&cache_stat_lock);
1504 1.95 joerg }
1505 1.97 joerg
1506 1.103 dennis /*
1507 1.131 ad * Fetch the current values of the stats for sysctl.
1508 1.103 dennis */
1509 1.97 joerg static int
1510 1.97 joerg cache_stat_sysctl(SYSCTLFN_ARGS)
1511 1.97 joerg {
1512 1.125 ad struct nchstats stats;
1513 1.97 joerg
1514 1.97 joerg if (oldp == NULL) {
1515 1.128 ad *oldlenp = sizeof(nchstats);
1516 1.97 joerg return 0;
1517 1.97 joerg }
1518 1.97 joerg
1519 1.128 ad if (*oldlenp <= 0) {
1520 1.97 joerg *oldlenp = 0;
1521 1.97 joerg return 0;
1522 1.97 joerg }
1523 1.97 joerg
1524 1.128 ad /* Refresh the global stats. */
1525 1.103 dennis sysctl_unlock();
1526 1.128 ad cache_update_stats(&stats);
1527 1.97 joerg sysctl_relock();
1528 1.97 joerg
1529 1.128 ad *oldlenp = MIN(sizeof(stats), *oldlenp);
1530 1.128 ad return sysctl_copyout(l, &stats, oldp, *oldlenp);
1531 1.97 joerg }
1532 1.97 joerg
1533 1.128 ad /*
1534 1.128 ad * For the debugger, given the address of a vnode, print all associated
1535 1.128 ad * names in the cache.
1536 1.128 ad */
1537 1.128 ad #ifdef DDB
1538 1.128 ad void
1539 1.128 ad namecache_print(struct vnode *vp, void (*pr)(const char *, ...))
1540 1.97 joerg {
1541 1.128 ad struct vnode *dvp = NULL;
1542 1.128 ad struct namecache *ncp;
1543 1.128 ad enum cache_lru_id id;
1544 1.104 pooka
1545 1.128 ad for (id = 0; id < LRU_COUNT; id++) {
1546 1.128 ad TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
1547 1.128 ad if (ncp->nc_vp == vp) {
1548 1.155 ad (*pr)("name %.*s\n", NC_NLEN(ncp),
1549 1.128 ad ncp->nc_name);
1550 1.128 ad dvp = ncp->nc_dvp;
1551 1.128 ad }
1552 1.128 ad }
1553 1.128 ad }
1554 1.128 ad if (dvp == NULL) {
1555 1.128 ad (*pr)("name not found\n");
1556 1.128 ad return;
1557 1.128 ad }
1558 1.128 ad for (id = 0; id < LRU_COUNT; id++) {
1559 1.128 ad TAILQ_FOREACH(ncp, &cache_lru.list[id], nc_lru) {
1560 1.128 ad if (ncp->nc_vp == dvp) {
1561 1.155 ad (*pr)("parent %.*s\n", NC_NLEN(ncp),
1562 1.128 ad ncp->nc_name);
1563 1.128 ad }
1564 1.128 ad }
1565 1.128 ad }
1566 1.97 joerg }
1567 1.128 ad #endif
1568