vfs_vnode.c revision 1.136 1 /* $NetBSD: vfs_vnode.c,v 1.136 2022/03/12 15:32:32 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * BLOCKED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * v_usecount is adjusted with atomic operations, however to change
147 * from a non-zero value to zero the interlock must also be held.
148 */
149
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.136 2022/03/12 15:32:32 riastradh Exp $");
152
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178
179 #include <uvm/uvm.h>
180 #include <uvm/uvm_readahead.h>
181 #include <uvm/uvm_stat.h>
182
183 /* Flags to vrelel. */
184 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
185
186 #define LRU_VRELE 0
187 #define LRU_FREE 1
188 #define LRU_HOLD 2
189 #define LRU_COUNT 3
190
191 /*
192 * There are three lru lists: one holds vnodes waiting for async release,
193 * one is for vnodes which have no buffer/page references and one for those
194 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
195 * private cache line as vnodes migrate between them while under the same
196 * lock (vdrain_lock).
197 */
198 u_int numvnodes __cacheline_aligned;
199 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
200 static kmutex_t vdrain_lock __cacheline_aligned;
201 static kcondvar_t vdrain_cv;
202 static int vdrain_gen;
203 static kcondvar_t vdrain_gen_cv;
204 static bool vdrain_retry;
205 static lwp_t * vdrain_lwp;
206 SLIST_HEAD(hashhead, vnode_impl);
207 static kmutex_t vcache_lock __cacheline_aligned;
208 static kcondvar_t vcache_cv;
209 static u_int vcache_hashsize;
210 static u_long vcache_hashmask;
211 static struct hashhead *vcache_hashtab;
212 static pool_cache_t vcache_pool;
213 static void lru_requeue(vnode_t *, vnodelst_t *);
214 static vnodelst_t * lru_which(vnode_t *);
215 static vnode_impl_t * vcache_alloc(void);
216 static void vcache_dealloc(vnode_impl_t *);
217 static void vcache_free(vnode_impl_t *);
218 static void vcache_init(void);
219 static void vcache_reinit(void);
220 static void vcache_reclaim(vnode_t *);
221 static void vrelel(vnode_t *, int, int);
222 static void vdrain_thread(void *);
223 static void vnpanic(vnode_t *, const char *, ...)
224 __printflike(2, 3);
225
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount *dead_rootmount;
228 extern int (**dead_vnodeop_p)(void *);
229 extern int (**spec_vnodeop_p)(void *);
230 extern struct vfsops dead_vfsops;
231
232 /*
233 * The high bit of v_usecount is a gate for vcache_tryvget(). It's set
234 * only when the vnode state is LOADED.
235 * The next bit of v_usecount is a flag for vrelel(). It's set
236 * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
237 */
238 #define VUSECOUNT_MASK 0x3fffffff
239 #define VUSECOUNT_GATE 0x80000000
240 #define VUSECOUNT_VGET 0x40000000
241
242 /*
243 * Return the current usecount of a vnode.
244 */
245 inline int
246 vrefcnt(struct vnode *vp)
247 {
248
249 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
250 }
251
252 /* Vnode state operations and diagnostics. */
253
254 #if defined(DIAGNOSTIC)
255
256 #define VSTATE_VALID(state) \
257 ((state) != VS_ACTIVE && (state) != VS_MARKER)
258 #define VSTATE_GET(vp) \
259 vstate_assert_get((vp), __func__, __LINE__)
260 #define VSTATE_CHANGE(vp, from, to) \
261 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
262 #define VSTATE_WAIT_STABLE(vp) \
263 vstate_assert_wait_stable((vp), __func__, __LINE__)
264
265 void
266 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
267 bool has_lock)
268 {
269 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
270 int refcnt = vrefcnt(vp);
271
272 if (!has_lock) {
273 /*
274 * Prevent predictive loads from the CPU, but check the state
275 * without loooking first.
276 */
277 membar_enter();
278 if (state == VS_ACTIVE && refcnt > 0 &&
279 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
280 return;
281 if (vip->vi_state == state)
282 return;
283 mutex_enter((vp)->v_interlock);
284 }
285
286 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
287
288 if ((state == VS_ACTIVE && refcnt > 0 &&
289 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
290 vip->vi_state == state) {
291 if (!has_lock)
292 mutex_exit((vp)->v_interlock);
293 return;
294 }
295 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
296 vstate_name(vip->vi_state), refcnt,
297 vstate_name(state), func, line);
298 }
299
300 static enum vnode_state
301 vstate_assert_get(vnode_t *vp, const char *func, int line)
302 {
303 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
304
305 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
306 if (! VSTATE_VALID(vip->vi_state))
307 vnpanic(vp, "state is %s at %s:%d",
308 vstate_name(vip->vi_state), func, line);
309
310 return vip->vi_state;
311 }
312
313 static void
314 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
315 {
316 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
317
318 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
319 if (! VSTATE_VALID(vip->vi_state))
320 vnpanic(vp, "state is %s at %s:%d",
321 vstate_name(vip->vi_state), func, line);
322
323 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
324 cv_wait(&vp->v_cv, vp->v_interlock);
325
326 if (! VSTATE_VALID(vip->vi_state))
327 vnpanic(vp, "state is %s at %s:%d",
328 vstate_name(vip->vi_state), func, line);
329 }
330
331 static void
332 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
333 const char *func, int line)
334 {
335 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
336 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
337
338 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
339 if (from == VS_LOADING)
340 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
341
342 if (! VSTATE_VALID(from))
343 vnpanic(vp, "from is %s at %s:%d",
344 vstate_name(from), func, line);
345 if (! VSTATE_VALID(to))
346 vnpanic(vp, "to is %s at %s:%d",
347 vstate_name(to), func, line);
348 if (vip->vi_state != from)
349 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
350 vstate_name(vip->vi_state), vstate_name(from), func, line);
351 if ((from == VS_LOADED) != gated)
352 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
353 vstate_name(vip->vi_state), gated, func, line);
354
355 /* Open/close the gate for vcache_tryvget(). */
356 if (to == VS_LOADED) {
357 #ifndef __HAVE_ATOMIC_AS_MEMBAR
358 membar_exit();
359 #endif
360 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
361 } else {
362 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
363 }
364
365 vip->vi_state = to;
366 if (from == VS_LOADING)
367 cv_broadcast(&vcache_cv);
368 if (to == VS_LOADED || to == VS_RECLAIMED)
369 cv_broadcast(&vp->v_cv);
370 }
371
372 #else /* defined(DIAGNOSTIC) */
373
374 #define VSTATE_GET(vp) \
375 (VNODE_TO_VIMPL((vp))->vi_state)
376 #define VSTATE_CHANGE(vp, from, to) \
377 vstate_change((vp), (from), (to))
378 #define VSTATE_WAIT_STABLE(vp) \
379 vstate_wait_stable((vp))
380 void
381 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
382 bool has_lock)
383 {
384
385 }
386
387 static void
388 vstate_wait_stable(vnode_t *vp)
389 {
390 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
391
392 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
393 cv_wait(&vp->v_cv, vp->v_interlock);
394 }
395
396 static void
397 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
398 {
399 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
400
401 /* Open/close the gate for vcache_tryvget(). */
402 if (to == VS_LOADED) {
403 #ifndef __HAVE_ATOMIC_AS_MEMBAR
404 membar_exit();
405 #endif
406 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
407 } else {
408 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
409 }
410
411 vip->vi_state = to;
412 if (from == VS_LOADING)
413 cv_broadcast(&vcache_cv);
414 if (to == VS_LOADED || to == VS_RECLAIMED)
415 cv_broadcast(&vp->v_cv);
416 }
417
418 #endif /* defined(DIAGNOSTIC) */
419
420 void
421 vfs_vnode_sysinit(void)
422 {
423 int error __diagused, i;
424
425 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
426 KASSERT(dead_rootmount != NULL);
427 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
428
429 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
430 for (i = 0; i < LRU_COUNT; i++) {
431 TAILQ_INIT(&lru_list[i]);
432 }
433 vcache_init();
434
435 cv_init(&vdrain_cv, "vdrain");
436 cv_init(&vdrain_gen_cv, "vdrainwt");
437 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
438 NULL, &vdrain_lwp, "vdrain");
439 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
440 }
441
442 /*
443 * Allocate a new marker vnode.
444 */
445 vnode_t *
446 vnalloc_marker(struct mount *mp)
447 {
448 vnode_impl_t *vip;
449 vnode_t *vp;
450
451 vip = pool_cache_get(vcache_pool, PR_WAITOK);
452 memset(vip, 0, sizeof(*vip));
453 vp = VIMPL_TO_VNODE(vip);
454 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
455 vp->v_mount = mp;
456 vp->v_type = VBAD;
457 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
458 klist_init(&vp->v_klist);
459 vip->vi_state = VS_MARKER;
460
461 return vp;
462 }
463
464 /*
465 * Free a marker vnode.
466 */
467 void
468 vnfree_marker(vnode_t *vp)
469 {
470 vnode_impl_t *vip;
471
472 vip = VNODE_TO_VIMPL(vp);
473 KASSERT(vip->vi_state == VS_MARKER);
474 mutex_obj_free(vp->v_interlock);
475 uvm_obj_destroy(&vp->v_uobj, true);
476 klist_fini(&vp->v_klist);
477 pool_cache_put(vcache_pool, vip);
478 }
479
480 /*
481 * Test a vnode for being a marker vnode.
482 */
483 bool
484 vnis_marker(vnode_t *vp)
485 {
486
487 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
488 }
489
490 /*
491 * Return the lru list this node should be on.
492 */
493 static vnodelst_t *
494 lru_which(vnode_t *vp)
495 {
496
497 KASSERT(mutex_owned(vp->v_interlock));
498
499 if (vp->v_holdcnt > 0)
500 return &lru_list[LRU_HOLD];
501 else
502 return &lru_list[LRU_FREE];
503 }
504
505 /*
506 * Put vnode to end of given list.
507 * Both the current and the new list may be NULL, used on vnode alloc/free.
508 * Adjust numvnodes and signal vdrain thread if there is work.
509 */
510 static void
511 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
512 {
513 vnode_impl_t *vip;
514 int d;
515
516 /*
517 * If the vnode is on the correct list, and was put there recently,
518 * then leave it be, thus avoiding huge cache and lock contention.
519 */
520 vip = VNODE_TO_VIMPL(vp);
521 if (listhd == vip->vi_lrulisthd &&
522 (getticks() - vip->vi_lrulisttm) < hz) {
523 return;
524 }
525
526 mutex_enter(&vdrain_lock);
527 d = 0;
528 if (vip->vi_lrulisthd != NULL)
529 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
530 else
531 d++;
532 vip->vi_lrulisthd = listhd;
533 vip->vi_lrulisttm = getticks();
534 if (vip->vi_lrulisthd != NULL)
535 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
536 else
537 d--;
538 if (d != 0) {
539 /*
540 * Looks strange? This is not a bug. Don't store
541 * numvnodes unless there is a change - avoid false
542 * sharing on MP.
543 */
544 numvnodes += d;
545 }
546 if ((d > 0 && numvnodes > desiredvnodes) ||
547 listhd == &lru_list[LRU_VRELE])
548 cv_signal(&vdrain_cv);
549 mutex_exit(&vdrain_lock);
550 }
551
552 /*
553 * Release deferred vrele vnodes for this mount.
554 * Called with file system suspended.
555 */
556 void
557 vrele_flush(struct mount *mp)
558 {
559 vnode_impl_t *vip, *marker;
560 vnode_t *vp;
561 int when = 0;
562
563 KASSERT(fstrans_is_owner(mp));
564
565 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
566
567 mutex_enter(&vdrain_lock);
568 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
569
570 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
571 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
572 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
573 vi_lrulist);
574 vp = VIMPL_TO_VNODE(vip);
575 if (vnis_marker(vp))
576 continue;
577
578 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
579 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
580 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
581 vip->vi_lrulisttm = getticks();
582 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
583 mutex_exit(&vdrain_lock);
584
585 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
586 mutex_enter(vp->v_interlock);
587 vrelel(vp, 0, LK_EXCLUSIVE);
588
589 if (getticks() > when) {
590 yield();
591 when = getticks() + hz / 10;
592 }
593
594 mutex_enter(&vdrain_lock);
595 }
596
597 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
598 mutex_exit(&vdrain_lock);
599
600 vnfree_marker(VIMPL_TO_VNODE(marker));
601 }
602
603 /*
604 * Reclaim a cached vnode. Used from vdrain_thread only.
605 */
606 static __inline void
607 vdrain_remove(vnode_t *vp)
608 {
609 struct mount *mp;
610
611 KASSERT(mutex_owned(&vdrain_lock));
612
613 /* Probe usecount (unlocked). */
614 if (vrefcnt(vp) > 0)
615 return;
616 /* Try v_interlock -- we lock the wrong direction! */
617 if (!mutex_tryenter(vp->v_interlock))
618 return;
619 /* Probe usecount and state. */
620 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
621 mutex_exit(vp->v_interlock);
622 return;
623 }
624 mp = vp->v_mount;
625 if (fstrans_start_nowait(mp) != 0) {
626 mutex_exit(vp->v_interlock);
627 return;
628 }
629 vdrain_retry = true;
630 mutex_exit(&vdrain_lock);
631
632 if (vcache_vget(vp) == 0) {
633 if (!vrecycle(vp)) {
634 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
635 mutex_enter(vp->v_interlock);
636 vrelel(vp, 0, LK_EXCLUSIVE);
637 }
638 }
639 fstrans_done(mp);
640
641 mutex_enter(&vdrain_lock);
642 }
643
644 /*
645 * Release a cached vnode. Used from vdrain_thread only.
646 */
647 static __inline void
648 vdrain_vrele(vnode_t *vp)
649 {
650 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
651 struct mount *mp;
652
653 KASSERT(mutex_owned(&vdrain_lock));
654
655 mp = vp->v_mount;
656 if (fstrans_start_nowait(mp) != 0)
657 return;
658
659 /*
660 * First remove the vnode from the vrele list.
661 * Put it on the last lru list, the last vrele()
662 * will put it back onto the right list before
663 * its usecount reaches zero.
664 */
665 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
666 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
667 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
668 vip->vi_lrulisttm = getticks();
669 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
670
671 vdrain_retry = true;
672 mutex_exit(&vdrain_lock);
673
674 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
675 mutex_enter(vp->v_interlock);
676 vrelel(vp, 0, LK_EXCLUSIVE);
677 fstrans_done(mp);
678
679 mutex_enter(&vdrain_lock);
680 }
681
682 /*
683 * Helper thread to keep the number of vnodes below desiredvnodes
684 * and release vnodes from asynchronous vrele.
685 */
686 static void
687 vdrain_thread(void *cookie)
688 {
689 int i;
690 u_int target;
691 vnode_impl_t *vip, *marker;
692
693 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
694
695 mutex_enter(&vdrain_lock);
696
697 for (;;) {
698 vdrain_retry = false;
699 target = desiredvnodes - desiredvnodes/10;
700
701 for (i = 0; i < LRU_COUNT; i++) {
702 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
703 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
704 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
705 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
706 vi_lrulist);
707 if (vnis_marker(VIMPL_TO_VNODE(vip)))
708 continue;
709 if (i == LRU_VRELE)
710 vdrain_vrele(VIMPL_TO_VNODE(vip));
711 else if (numvnodes < target)
712 break;
713 else
714 vdrain_remove(VIMPL_TO_VNODE(vip));
715 }
716 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
717 }
718
719 if (vdrain_retry) {
720 kpause("vdrainrt", false, 1, &vdrain_lock);
721 } else {
722 vdrain_gen++;
723 cv_broadcast(&vdrain_gen_cv);
724 cv_wait(&vdrain_cv, &vdrain_lock);
725 }
726 }
727 }
728
729 /*
730 * Try to drop reference on a vnode. Abort if we are releasing the
731 * last reference. Note: this _must_ succeed if not the last reference.
732 */
733 static bool
734 vtryrele(vnode_t *vp)
735 {
736 u_int use, next;
737
738 #ifndef __HAVE_ATOMIC_AS_MEMBAR
739 membar_exit();
740 #endif
741 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
742 if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
743 return false;
744 }
745 KASSERT((use & VUSECOUNT_MASK) > 1);
746 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
747 if (__predict_true(next == use)) {
748 return true;
749 }
750 }
751 }
752
753 /*
754 * vput: unlock and release the reference.
755 */
756 void
757 vput(vnode_t *vp)
758 {
759 int lktype;
760
761 /*
762 * Do an unlocked check of the usecount. If it looks like we're not
763 * about to drop the last reference, then unlock the vnode and try
764 * to drop the reference. If it ends up being the last reference
765 * after all, vrelel() can fix it all up. Most of the time this
766 * will all go to plan.
767 */
768 if (vrefcnt(vp) > 1) {
769 VOP_UNLOCK(vp);
770 if (vtryrele(vp)) {
771 return;
772 }
773 lktype = LK_NONE;
774 } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
775 VOP_UNLOCK(vp);
776 lktype = LK_NONE;
777 } else {
778 lktype = VOP_ISLOCKED(vp);
779 KASSERT(lktype != LK_NONE);
780 }
781 mutex_enter(vp->v_interlock);
782 vrelel(vp, 0, lktype);
783 }
784
785 /*
786 * Vnode release. If reference count drops to zero, call inactive
787 * routine and either return to freelist or free to the pool.
788 */
789 static void
790 vrelel(vnode_t *vp, int flags, int lktype)
791 {
792 const bool async = ((flags & VRELEL_ASYNC) != 0);
793 bool recycle, defer, objlock_held;
794 u_int use, next;
795 int error;
796
797 objlock_held = false;
798
799 retry:
800 KASSERT(mutex_owned(vp->v_interlock));
801
802 if (__predict_false(vp->v_op == dead_vnodeop_p &&
803 VSTATE_GET(vp) != VS_RECLAIMED)) {
804 vnpanic(vp, "dead but not clean");
805 }
806
807 /*
808 * If not the last reference, just unlock and drop the reference count.
809 *
810 * Otherwise make sure we pass a point in time where we hold the
811 * last reference with VGET flag unset.
812 */
813 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
814 if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
815 if (objlock_held) {
816 objlock_held = false;
817 rw_exit(vp->v_uobj.vmobjlock);
818 }
819 if (lktype != LK_NONE) {
820 mutex_exit(vp->v_interlock);
821 lktype = LK_NONE;
822 VOP_UNLOCK(vp);
823 mutex_enter(vp->v_interlock);
824 }
825 if (vtryrele(vp)) {
826 mutex_exit(vp->v_interlock);
827 return;
828 }
829 next = atomic_load_relaxed(&vp->v_usecount);
830 continue;
831 }
832 KASSERT((use & VUSECOUNT_MASK) == 1);
833 next = use & ~VUSECOUNT_VGET;
834 if (next != use) {
835 next = atomic_cas_uint(&vp->v_usecount, use, next);
836 }
837 if (__predict_true(next == use)) {
838 break;
839 }
840 }
841 #ifndef __HAVE_ATOMIC_AS_MEMBAR
842 membar_enter();
843 #endif
844 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
845 vnpanic(vp, "%s: bad ref count", __func__);
846 }
847
848 #ifdef DIAGNOSTIC
849 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
850 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
851 vprint("vrelel: missing VOP_CLOSE()", vp);
852 }
853 #endif
854
855 /*
856 * If already clean there is no need to lock, defer or
857 * deactivate this node.
858 */
859 if (VSTATE_GET(vp) == VS_RECLAIMED) {
860 if (objlock_held) {
861 objlock_held = false;
862 rw_exit(vp->v_uobj.vmobjlock);
863 }
864 if (lktype != LK_NONE) {
865 mutex_exit(vp->v_interlock);
866 lktype = LK_NONE;
867 VOP_UNLOCK(vp);
868 mutex_enter(vp->v_interlock);
869 }
870 goto out;
871 }
872
873 /*
874 * First try to get the vnode locked for VOP_INACTIVE().
875 * Defer vnode release to vdrain_thread if caller requests
876 * it explicitly, is the pagedaemon or the lock failed.
877 */
878 defer = false;
879 if ((curlwp == uvm.pagedaemon_lwp) || async) {
880 defer = true;
881 } else if (lktype == LK_SHARED) {
882 /* Excellent chance of getting, if the last ref. */
883 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
884 if (error != 0) {
885 defer = true;
886 } else {
887 lktype = LK_EXCLUSIVE;
888 }
889 } else if (lktype == LK_NONE) {
890 /* Excellent chance of getting, if the last ref. */
891 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
892 if (error != 0) {
893 defer = true;
894 } else {
895 lktype = LK_EXCLUSIVE;
896 }
897 }
898 KASSERT(mutex_owned(vp->v_interlock));
899 if (defer) {
900 /*
901 * Defer reclaim to the kthread; it's not safe to
902 * clean it here. We donate it our last reference.
903 */
904 if (lktype != LK_NONE) {
905 mutex_exit(vp->v_interlock);
906 VOP_UNLOCK(vp);
907 mutex_enter(vp->v_interlock);
908 }
909 lru_requeue(vp, &lru_list[LRU_VRELE]);
910 mutex_exit(vp->v_interlock);
911 return;
912 }
913 KASSERT(lktype == LK_EXCLUSIVE);
914
915 /* If the node gained another reference, retry. */
916 use = atomic_load_relaxed(&vp->v_usecount);
917 if ((use & VUSECOUNT_VGET) != 0 || (use & VUSECOUNT_MASK) != 1) {
918 goto retry;
919 }
920
921 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
922 (vp->v_vflag & VV_MAPPED) != 0) {
923 /* Take care of space accounting. */
924 if (!objlock_held) {
925 objlock_held = true;
926 if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
927 mutex_exit(vp->v_interlock);
928 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
929 mutex_enter(vp->v_interlock);
930 goto retry;
931 }
932 }
933 if ((vp->v_iflag & VI_EXECMAP) != 0) {
934 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
935 }
936 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
937 vp->v_vflag &= ~VV_MAPPED;
938 }
939 if (objlock_held) {
940 objlock_held = false;
941 rw_exit(vp->v_uobj.vmobjlock);
942 }
943
944 /*
945 * Deactivate the vnode, but preserve our reference across
946 * the call to VOP_INACTIVE().
947 *
948 * If VOP_INACTIVE() indicates that the file has been
949 * deleted, then recycle the vnode.
950 *
951 * Note that VOP_INACTIVE() will not drop the vnode lock.
952 */
953 mutex_exit(vp->v_interlock);
954 recycle = false;
955 VOP_INACTIVE(vp, &recycle);
956 if (!recycle) {
957 lktype = LK_NONE;
958 VOP_UNLOCK(vp);
959 }
960 mutex_enter(vp->v_interlock);
961
962 /*
963 * Block new references then check again to see if a
964 * new reference was acquired in the meantime. If
965 * it was, restore the vnode state and try again.
966 */
967 if (recycle) {
968 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
969 use = atomic_load_relaxed(&vp->v_usecount);
970 if ((use & VUSECOUNT_VGET) != 0 ||
971 (use & VUSECOUNT_MASK) != 1) {
972 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
973 goto retry;
974 }
975 }
976
977 /*
978 * Recycle the vnode if the file is now unused (unlinked).
979 */
980 if (recycle) {
981 VSTATE_ASSERT(vp, VS_BLOCKED);
982 KASSERT(lktype == LK_EXCLUSIVE);
983 /* vcache_reclaim drops the lock. */
984 lktype = LK_NONE;
985 vcache_reclaim(vp);
986 }
987 KASSERT(vrefcnt(vp) > 0);
988 KASSERT(lktype == LK_NONE);
989
990 out:
991 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
992 if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
993 (use & VUSECOUNT_MASK) == 1)) {
994 /* Gained and released another reference, retry. */
995 goto retry;
996 }
997 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
998 if (__predict_true(next == use)) {
999 if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
1000 /* Gained another reference. */
1001 mutex_exit(vp->v_interlock);
1002 return;
1003 }
1004 break;
1005 }
1006 }
1007 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1008 membar_enter();
1009 #endif
1010
1011 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
1012 /*
1013 * It's clean so destroy it. It isn't referenced
1014 * anywhere since it has been reclaimed.
1015 */
1016 vcache_free(VNODE_TO_VIMPL(vp));
1017 } else {
1018 /*
1019 * Otherwise, put it back onto the freelist. It
1020 * can't be destroyed while still associated with
1021 * a file system.
1022 */
1023 lru_requeue(vp, lru_which(vp));
1024 mutex_exit(vp->v_interlock);
1025 }
1026 }
1027
1028 void
1029 vrele(vnode_t *vp)
1030 {
1031
1032 if (vtryrele(vp)) {
1033 return;
1034 }
1035 mutex_enter(vp->v_interlock);
1036 vrelel(vp, 0, LK_NONE);
1037 }
1038
1039 /*
1040 * Asynchronous vnode release, vnode is released in different context.
1041 */
1042 void
1043 vrele_async(vnode_t *vp)
1044 {
1045
1046 if (vtryrele(vp)) {
1047 return;
1048 }
1049 mutex_enter(vp->v_interlock);
1050 vrelel(vp, VRELEL_ASYNC, LK_NONE);
1051 }
1052
1053 /*
1054 * Vnode reference, where a reference is already held by some other
1055 * object (for example, a file structure).
1056 *
1057 * NB: lockless code sequences may rely on this not blocking.
1058 */
1059 void
1060 vref(vnode_t *vp)
1061 {
1062
1063 KASSERT(vrefcnt(vp) > 0);
1064
1065 atomic_inc_uint(&vp->v_usecount);
1066 }
1067
1068 /*
1069 * Page or buffer structure gets a reference.
1070 * Called with v_interlock held.
1071 */
1072 void
1073 vholdl(vnode_t *vp)
1074 {
1075
1076 KASSERT(mutex_owned(vp->v_interlock));
1077
1078 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1079 lru_requeue(vp, lru_which(vp));
1080 }
1081
1082 /*
1083 * Page or buffer structure gets a reference.
1084 */
1085 void
1086 vhold(vnode_t *vp)
1087 {
1088
1089 mutex_enter(vp->v_interlock);
1090 vholdl(vp);
1091 mutex_exit(vp->v_interlock);
1092 }
1093
1094 /*
1095 * Page or buffer structure frees a reference.
1096 * Called with v_interlock held.
1097 */
1098 void
1099 holdrelel(vnode_t *vp)
1100 {
1101
1102 KASSERT(mutex_owned(vp->v_interlock));
1103
1104 if (vp->v_holdcnt <= 0) {
1105 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1106 }
1107
1108 vp->v_holdcnt--;
1109 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1110 lru_requeue(vp, lru_which(vp));
1111 }
1112
1113 /*
1114 * Page or buffer structure frees a reference.
1115 */
1116 void
1117 holdrele(vnode_t *vp)
1118 {
1119
1120 mutex_enter(vp->v_interlock);
1121 holdrelel(vp);
1122 mutex_exit(vp->v_interlock);
1123 }
1124
1125 /*
1126 * Recycle an unused vnode if caller holds the last reference.
1127 */
1128 bool
1129 vrecycle(vnode_t *vp)
1130 {
1131 int error __diagused;
1132
1133 mutex_enter(vp->v_interlock);
1134
1135 /* If the vnode is already clean we're done. */
1136 VSTATE_WAIT_STABLE(vp);
1137 if (VSTATE_GET(vp) != VS_LOADED) {
1138 VSTATE_ASSERT(vp, VS_RECLAIMED);
1139 vrelel(vp, 0, LK_NONE);
1140 return true;
1141 }
1142
1143 /* Prevent further references until the vnode is locked. */
1144 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1145
1146 /* Make sure we hold the last reference. */
1147 if (vrefcnt(vp) != 1) {
1148 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1149 mutex_exit(vp->v_interlock);
1150 return false;
1151 }
1152
1153 mutex_exit(vp->v_interlock);
1154
1155 /*
1156 * On a leaf file system this lock will always succeed as we hold
1157 * the last reference and prevent further references.
1158 * On layered file systems waiting for the lock would open a can of
1159 * deadlocks as the lower vnodes may have other active references.
1160 */
1161 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1162
1163 mutex_enter(vp->v_interlock);
1164 if (error) {
1165 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1166 mutex_exit(vp->v_interlock);
1167 return false;
1168 }
1169
1170 KASSERT(vrefcnt(vp) == 1);
1171 vcache_reclaim(vp);
1172 vrelel(vp, 0, LK_NONE);
1173
1174 return true;
1175 }
1176
1177 /*
1178 * Helper for vrevoke() to propagate suspension from lastmp
1179 * to thismp. Both args may be NULL.
1180 * Returns the currently suspended file system or NULL.
1181 */
1182 static struct mount *
1183 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1184 {
1185 int error;
1186
1187 if (lastmp == thismp)
1188 return thismp;
1189
1190 if (lastmp != NULL)
1191 vfs_resume(lastmp);
1192
1193 if (thismp == NULL)
1194 return NULL;
1195
1196 do {
1197 error = vfs_suspend(thismp, 0);
1198 } while (error == EINTR || error == ERESTART);
1199
1200 if (error == 0)
1201 return thismp;
1202
1203 KASSERT(error == EOPNOTSUPP || error == ENOENT);
1204 return NULL;
1205 }
1206
1207 /*
1208 * Eliminate all activity associated with the requested vnode
1209 * and with all vnodes aliased to the requested vnode.
1210 */
1211 void
1212 vrevoke(vnode_t *vp)
1213 {
1214 struct mount *mp;
1215 vnode_t *vq;
1216 enum vtype type;
1217 dev_t dev;
1218
1219 KASSERT(vrefcnt(vp) > 0);
1220
1221 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1222
1223 mutex_enter(vp->v_interlock);
1224 VSTATE_WAIT_STABLE(vp);
1225 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1226 mutex_exit(vp->v_interlock);
1227 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1228 atomic_inc_uint(&vp->v_usecount);
1229 mutex_exit(vp->v_interlock);
1230 vgone(vp);
1231 } else {
1232 dev = vp->v_rdev;
1233 type = vp->v_type;
1234 mutex_exit(vp->v_interlock);
1235
1236 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1237 mp = vrevoke_suspend_next(mp, vq->v_mount);
1238 vgone(vq);
1239 }
1240 }
1241 vrevoke_suspend_next(mp, NULL);
1242 }
1243
1244 /*
1245 * Eliminate all activity associated with a vnode in preparation for
1246 * reuse. Drops a reference from the vnode.
1247 */
1248 void
1249 vgone(vnode_t *vp)
1250 {
1251 int lktype;
1252
1253 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1254
1255 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1256 lktype = LK_EXCLUSIVE;
1257 mutex_enter(vp->v_interlock);
1258 VSTATE_WAIT_STABLE(vp);
1259 if (VSTATE_GET(vp) == VS_LOADED) {
1260 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1261 vcache_reclaim(vp);
1262 lktype = LK_NONE;
1263 }
1264 VSTATE_ASSERT(vp, VS_RECLAIMED);
1265 vrelel(vp, 0, lktype);
1266 }
1267
1268 static inline uint32_t
1269 vcache_hash(const struct vcache_key *key)
1270 {
1271 uint32_t hash = HASH32_BUF_INIT;
1272
1273 KASSERT(key->vk_key_len > 0);
1274
1275 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1276 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1277 return hash;
1278 }
1279
1280 static int
1281 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1282 {
1283 vnode_impl_t *vip;
1284 uint64_t chain;
1285
1286 strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1287 strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1288 if (!fill)
1289 return 0;
1290
1291 hs->hash_size = vcache_hashmask + 1;
1292
1293 for (size_t i = 0; i < hs->hash_size; i++) {
1294 chain = 0;
1295 mutex_enter(&vcache_lock);
1296 SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1297 chain++;
1298 }
1299 mutex_exit(&vcache_lock);
1300 if (chain > 0) {
1301 hs->hash_used++;
1302 hs->hash_items += chain;
1303 if (chain > hs->hash_maxchain)
1304 hs->hash_maxchain = chain;
1305 }
1306 preempt_point();
1307 }
1308
1309 return 0;
1310 }
1311
1312 static void
1313 vcache_init(void)
1314 {
1315
1316 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1317 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1318 KASSERT(vcache_pool != NULL);
1319 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1320 cv_init(&vcache_cv, "vcache");
1321 vcache_hashsize = desiredvnodes;
1322 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1323 &vcache_hashmask);
1324 hashstat_register("vcache", vcache_stats);
1325 }
1326
1327 static void
1328 vcache_reinit(void)
1329 {
1330 int i;
1331 uint32_t hash;
1332 u_long oldmask, newmask;
1333 struct hashhead *oldtab, *newtab;
1334 vnode_impl_t *vip;
1335
1336 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1337 mutex_enter(&vcache_lock);
1338 oldtab = vcache_hashtab;
1339 oldmask = vcache_hashmask;
1340 vcache_hashsize = desiredvnodes;
1341 vcache_hashtab = newtab;
1342 vcache_hashmask = newmask;
1343 for (i = 0; i <= oldmask; i++) {
1344 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1345 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1346 hash = vcache_hash(&vip->vi_key);
1347 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1348 vip, vi_hash);
1349 }
1350 }
1351 mutex_exit(&vcache_lock);
1352 hashdone(oldtab, HASH_SLIST, oldmask);
1353 }
1354
1355 static inline vnode_impl_t *
1356 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1357 {
1358 struct hashhead *hashp;
1359 vnode_impl_t *vip;
1360
1361 KASSERT(mutex_owned(&vcache_lock));
1362
1363 hashp = &vcache_hashtab[hash & vcache_hashmask];
1364 SLIST_FOREACH(vip, hashp, vi_hash) {
1365 if (key->vk_mount != vip->vi_key.vk_mount)
1366 continue;
1367 if (key->vk_key_len != vip->vi_key.vk_key_len)
1368 continue;
1369 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1370 continue;
1371 return vip;
1372 }
1373 return NULL;
1374 }
1375
1376 /*
1377 * Allocate a new, uninitialized vcache node.
1378 */
1379 static vnode_impl_t *
1380 vcache_alloc(void)
1381 {
1382 vnode_impl_t *vip;
1383 vnode_t *vp;
1384
1385 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1386 vp = VIMPL_TO_VNODE(vip);
1387 memset(vip, 0, sizeof(*vip));
1388
1389 rw_init(&vip->vi_lock);
1390 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1391
1392 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1393 klist_init(&vp->v_klist);
1394 cv_init(&vp->v_cv, "vnode");
1395 cache_vnode_init(vp);
1396
1397 vp->v_usecount = 1;
1398 vp->v_type = VNON;
1399 vp->v_size = vp->v_writesize = VSIZENOTSET;
1400
1401 vip->vi_state = VS_LOADING;
1402
1403 lru_requeue(vp, &lru_list[LRU_FREE]);
1404
1405 return vip;
1406 }
1407
1408 /*
1409 * Deallocate a vcache node in state VS_LOADING.
1410 *
1411 * vcache_lock held on entry and released on return.
1412 */
1413 static void
1414 vcache_dealloc(vnode_impl_t *vip)
1415 {
1416 vnode_t *vp;
1417
1418 KASSERT(mutex_owned(&vcache_lock));
1419
1420 vp = VIMPL_TO_VNODE(vip);
1421 vfs_ref(dead_rootmount);
1422 vfs_insmntque(vp, dead_rootmount);
1423 mutex_enter(vp->v_interlock);
1424 vp->v_op = dead_vnodeop_p;
1425 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1426 mutex_exit(&vcache_lock);
1427 vrelel(vp, 0, LK_NONE);
1428 }
1429
1430 /*
1431 * Free an unused, unreferenced vcache node.
1432 * v_interlock locked on entry.
1433 */
1434 static void
1435 vcache_free(vnode_impl_t *vip)
1436 {
1437 vnode_t *vp;
1438
1439 vp = VIMPL_TO_VNODE(vip);
1440 KASSERT(mutex_owned(vp->v_interlock));
1441
1442 KASSERT(vrefcnt(vp) == 0);
1443 KASSERT(vp->v_holdcnt == 0);
1444 KASSERT(vp->v_writecount == 0);
1445 lru_requeue(vp, NULL);
1446 mutex_exit(vp->v_interlock);
1447
1448 vfs_insmntque(vp, NULL);
1449 if (vp->v_type == VBLK || vp->v_type == VCHR)
1450 spec_node_destroy(vp);
1451
1452 mutex_obj_free(vp->v_interlock);
1453 rw_destroy(&vip->vi_lock);
1454 uvm_obj_destroy(&vp->v_uobj, true);
1455 klist_fini(&vp->v_klist);
1456 cv_destroy(&vp->v_cv);
1457 cache_vnode_fini(vp);
1458 pool_cache_put(vcache_pool, vip);
1459 }
1460
1461 /*
1462 * Try to get an initial reference on this cached vnode.
1463 * Returns zero on success or EBUSY if the vnode state is not LOADED.
1464 *
1465 * NB: lockless code sequences may rely on this not blocking.
1466 */
1467 int
1468 vcache_tryvget(vnode_t *vp)
1469 {
1470 u_int use, next;
1471
1472 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1473 if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1474 return EBUSY;
1475 }
1476 next = atomic_cas_uint(&vp->v_usecount,
1477 use, (use + 1) | VUSECOUNT_VGET);
1478 if (__predict_true(next == use)) {
1479 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1480 membar_enter();
1481 #endif
1482 return 0;
1483 }
1484 }
1485 }
1486
1487 /*
1488 * Try to get an initial reference on this cached vnode.
1489 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1490 * Will wait for the vnode state to be stable.
1491 *
1492 * v_interlock locked on entry and unlocked on exit.
1493 */
1494 int
1495 vcache_vget(vnode_t *vp)
1496 {
1497 int error;
1498
1499 KASSERT(mutex_owned(vp->v_interlock));
1500
1501 /* Increment hold count to prevent vnode from disappearing. */
1502 vp->v_holdcnt++;
1503 VSTATE_WAIT_STABLE(vp);
1504 vp->v_holdcnt--;
1505
1506 /* If this was the last reference to a reclaimed vnode free it now. */
1507 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1508 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1509 vcache_free(VNODE_TO_VIMPL(vp));
1510 else
1511 mutex_exit(vp->v_interlock);
1512 return ENOENT;
1513 }
1514 VSTATE_ASSERT(vp, VS_LOADED);
1515 error = vcache_tryvget(vp);
1516 KASSERT(error == 0);
1517 mutex_exit(vp->v_interlock);
1518
1519 return 0;
1520 }
1521
1522 /*
1523 * Get a vnode / fs node pair by key and return it referenced through vpp.
1524 */
1525 int
1526 vcache_get(struct mount *mp, const void *key, size_t key_len,
1527 struct vnode **vpp)
1528 {
1529 int error;
1530 uint32_t hash;
1531 const void *new_key;
1532 struct vnode *vp;
1533 struct vcache_key vcache_key;
1534 vnode_impl_t *vip, *new_vip;
1535
1536 new_key = NULL;
1537 *vpp = NULL;
1538
1539 vcache_key.vk_mount = mp;
1540 vcache_key.vk_key = key;
1541 vcache_key.vk_key_len = key_len;
1542 hash = vcache_hash(&vcache_key);
1543
1544 again:
1545 mutex_enter(&vcache_lock);
1546 vip = vcache_hash_lookup(&vcache_key, hash);
1547
1548 /* If found, take a reference or retry. */
1549 if (__predict_true(vip != NULL)) {
1550 /*
1551 * If the vnode is loading we cannot take the v_interlock
1552 * here as it might change during load (see uvm_obj_setlock()).
1553 * As changing state from VS_LOADING requires both vcache_lock
1554 * and v_interlock it is safe to test with vcache_lock held.
1555 *
1556 * Wait for vnodes changing state from VS_LOADING and retry.
1557 */
1558 if (__predict_false(vip->vi_state == VS_LOADING)) {
1559 cv_wait(&vcache_cv, &vcache_lock);
1560 mutex_exit(&vcache_lock);
1561 goto again;
1562 }
1563 vp = VIMPL_TO_VNODE(vip);
1564 mutex_enter(vp->v_interlock);
1565 mutex_exit(&vcache_lock);
1566 error = vcache_vget(vp);
1567 if (error == ENOENT)
1568 goto again;
1569 if (error == 0)
1570 *vpp = vp;
1571 KASSERT((error != 0) == (*vpp == NULL));
1572 return error;
1573 }
1574 mutex_exit(&vcache_lock);
1575
1576 /* Allocate and initialize a new vcache / vnode pair. */
1577 error = vfs_busy(mp);
1578 if (error)
1579 return error;
1580 new_vip = vcache_alloc();
1581 new_vip->vi_key = vcache_key;
1582 vp = VIMPL_TO_VNODE(new_vip);
1583 mutex_enter(&vcache_lock);
1584 vip = vcache_hash_lookup(&vcache_key, hash);
1585 if (vip == NULL) {
1586 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1587 new_vip, vi_hash);
1588 vip = new_vip;
1589 }
1590
1591 /* If another thread beat us inserting this node, retry. */
1592 if (vip != new_vip) {
1593 vcache_dealloc(new_vip);
1594 vfs_unbusy(mp);
1595 goto again;
1596 }
1597 mutex_exit(&vcache_lock);
1598
1599 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1600 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1601 if (error) {
1602 mutex_enter(&vcache_lock);
1603 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1604 new_vip, vnode_impl, vi_hash);
1605 vcache_dealloc(new_vip);
1606 vfs_unbusy(mp);
1607 KASSERT(*vpp == NULL);
1608 return error;
1609 }
1610 KASSERT(new_key != NULL);
1611 KASSERT(memcmp(key, new_key, key_len) == 0);
1612 KASSERT(vp->v_op != NULL);
1613 vfs_insmntque(vp, mp);
1614 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1615 vp->v_vflag |= VV_MPSAFE;
1616 vfs_ref(mp);
1617 vfs_unbusy(mp);
1618
1619 /* Finished loading, finalize node. */
1620 mutex_enter(&vcache_lock);
1621 new_vip->vi_key.vk_key = new_key;
1622 mutex_enter(vp->v_interlock);
1623 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1624 mutex_exit(vp->v_interlock);
1625 mutex_exit(&vcache_lock);
1626 *vpp = vp;
1627 return 0;
1628 }
1629
1630 /*
1631 * Create a new vnode / fs node pair and return it referenced through vpp.
1632 */
1633 int
1634 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1635 kauth_cred_t cred, void *extra, struct vnode **vpp)
1636 {
1637 int error;
1638 uint32_t hash;
1639 struct vnode *vp, *ovp;
1640 vnode_impl_t *vip, *ovip;
1641
1642 *vpp = NULL;
1643
1644 /* Allocate and initialize a new vcache / vnode pair. */
1645 error = vfs_busy(mp);
1646 if (error)
1647 return error;
1648 vip = vcache_alloc();
1649 vip->vi_key.vk_mount = mp;
1650 vp = VIMPL_TO_VNODE(vip);
1651
1652 /* Create and load the fs node. */
1653 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1654 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1655 if (error) {
1656 mutex_enter(&vcache_lock);
1657 vcache_dealloc(vip);
1658 vfs_unbusy(mp);
1659 KASSERT(*vpp == NULL);
1660 return error;
1661 }
1662 KASSERT(vp->v_op != NULL);
1663 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1664 if (vip->vi_key.vk_key_len > 0) {
1665 KASSERT(vip->vi_key.vk_key != NULL);
1666 hash = vcache_hash(&vip->vi_key);
1667
1668 /*
1669 * Wait for previous instance to be reclaimed,
1670 * then insert new node.
1671 */
1672 mutex_enter(&vcache_lock);
1673 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1674 ovp = VIMPL_TO_VNODE(ovip);
1675 mutex_enter(ovp->v_interlock);
1676 mutex_exit(&vcache_lock);
1677 error = vcache_vget(ovp);
1678 KASSERT(error == ENOENT);
1679 mutex_enter(&vcache_lock);
1680 }
1681 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1682 vip, vi_hash);
1683 mutex_exit(&vcache_lock);
1684 }
1685 vfs_insmntque(vp, mp);
1686 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1687 vp->v_vflag |= VV_MPSAFE;
1688 vfs_ref(mp);
1689 vfs_unbusy(mp);
1690
1691 /* Finished loading, finalize node. */
1692 mutex_enter(&vcache_lock);
1693 mutex_enter(vp->v_interlock);
1694 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1695 mutex_exit(&vcache_lock);
1696 mutex_exit(vp->v_interlock);
1697 *vpp = vp;
1698 return 0;
1699 }
1700
1701 /*
1702 * Prepare key change: update old cache nodes key and lock new cache node.
1703 * Return an error if the new node already exists.
1704 */
1705 int
1706 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1707 const void *old_key, size_t old_key_len,
1708 const void *new_key, size_t new_key_len)
1709 {
1710 uint32_t old_hash, new_hash;
1711 struct vcache_key old_vcache_key, new_vcache_key;
1712 vnode_impl_t *vip, *new_vip;
1713
1714 old_vcache_key.vk_mount = mp;
1715 old_vcache_key.vk_key = old_key;
1716 old_vcache_key.vk_key_len = old_key_len;
1717 old_hash = vcache_hash(&old_vcache_key);
1718
1719 new_vcache_key.vk_mount = mp;
1720 new_vcache_key.vk_key = new_key;
1721 new_vcache_key.vk_key_len = new_key_len;
1722 new_hash = vcache_hash(&new_vcache_key);
1723
1724 new_vip = vcache_alloc();
1725 new_vip->vi_key = new_vcache_key;
1726
1727 /* Insert locked new node used as placeholder. */
1728 mutex_enter(&vcache_lock);
1729 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1730 if (vip != NULL) {
1731 vcache_dealloc(new_vip);
1732 return EEXIST;
1733 }
1734 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1735 new_vip, vi_hash);
1736
1737 /* Replace old nodes key with the temporary copy. */
1738 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1739 KASSERT(vip != NULL);
1740 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1741 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1742 vip->vi_key = old_vcache_key;
1743 mutex_exit(&vcache_lock);
1744 return 0;
1745 }
1746
1747 /*
1748 * Key change complete: update old node and remove placeholder.
1749 */
1750 void
1751 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1752 const void *old_key, size_t old_key_len,
1753 const void *new_key, size_t new_key_len)
1754 {
1755 uint32_t old_hash, new_hash;
1756 struct vcache_key old_vcache_key, new_vcache_key;
1757 vnode_impl_t *vip, *new_vip;
1758 struct vnode *new_vp;
1759
1760 old_vcache_key.vk_mount = mp;
1761 old_vcache_key.vk_key = old_key;
1762 old_vcache_key.vk_key_len = old_key_len;
1763 old_hash = vcache_hash(&old_vcache_key);
1764
1765 new_vcache_key.vk_mount = mp;
1766 new_vcache_key.vk_key = new_key;
1767 new_vcache_key.vk_key_len = new_key_len;
1768 new_hash = vcache_hash(&new_vcache_key);
1769
1770 mutex_enter(&vcache_lock);
1771
1772 /* Lookup old and new node. */
1773 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1774 KASSERT(vip != NULL);
1775 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1776
1777 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1778 KASSERT(new_vip != NULL);
1779 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1780 new_vp = VIMPL_TO_VNODE(new_vip);
1781 mutex_enter(new_vp->v_interlock);
1782 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1783 mutex_exit(new_vp->v_interlock);
1784
1785 /* Rekey old node and put it onto its new hashlist. */
1786 vip->vi_key = new_vcache_key;
1787 if (old_hash != new_hash) {
1788 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1789 vip, vnode_impl, vi_hash);
1790 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1791 vip, vi_hash);
1792 }
1793
1794 /* Remove new node used as placeholder. */
1795 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1796 new_vip, vnode_impl, vi_hash);
1797 vcache_dealloc(new_vip);
1798 }
1799
1800 /*
1801 * Disassociate the underlying file system from a vnode.
1802 *
1803 * Must be called with vnode locked and will return unlocked.
1804 * Must be called with the interlock held, and will return with it held.
1805 */
1806 static void
1807 vcache_reclaim(vnode_t *vp)
1808 {
1809 lwp_t *l = curlwp;
1810 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1811 struct mount *mp = vp->v_mount;
1812 uint32_t hash;
1813 uint8_t temp_buf[64], *temp_key;
1814 size_t temp_key_len;
1815 bool recycle, active;
1816 int error;
1817
1818 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1819 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1820 KASSERT(mutex_owned(vp->v_interlock));
1821 KASSERT(vrefcnt(vp) != 0);
1822
1823 active = (vrefcnt(vp) > 1);
1824 temp_key_len = vip->vi_key.vk_key_len;
1825 /*
1826 * Prevent the vnode from being recycled or brought into use
1827 * while we clean it out.
1828 */
1829 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1830 mutex_exit(vp->v_interlock);
1831
1832 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1833 mutex_enter(vp->v_interlock);
1834 if ((vp->v_iflag & VI_EXECMAP) != 0) {
1835 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1836 }
1837 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1838 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1839 mutex_exit(vp->v_interlock);
1840 rw_exit(vp->v_uobj.vmobjlock);
1841
1842 /*
1843 * With vnode state set to reclaiming, purge name cache immediately
1844 * to prevent new handles on vnode, and wait for existing threads
1845 * trying to get a handle to notice VS_RECLAIMED status and abort.
1846 */
1847 cache_purge(vp);
1848
1849 /* Replace the vnode key with a temporary copy. */
1850 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1851 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1852 } else {
1853 temp_key = temp_buf;
1854 }
1855 if (vip->vi_key.vk_key_len > 0) {
1856 mutex_enter(&vcache_lock);
1857 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1858 vip->vi_key.vk_key = temp_key;
1859 mutex_exit(&vcache_lock);
1860 }
1861
1862 fstrans_start(mp);
1863
1864 /*
1865 * Clean out any cached data associated with the vnode.
1866 * If purging an active vnode, it must be closed and
1867 * deactivated before being reclaimed.
1868 */
1869 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1870 if (error != 0) {
1871 if (wapbl_vphaswapbl(vp))
1872 WAPBL_DISCARD(wapbl_vptomp(vp));
1873 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1874 }
1875 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1876 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1877 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1878 spec_node_revoke(vp);
1879 }
1880
1881 /*
1882 * Disassociate the underlying file system from the vnode.
1883 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1884 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1885 * would no longer function.
1886 */
1887 VOP_INACTIVE(vp, &recycle);
1888 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1889 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1890 if (VOP_RECLAIM(vp)) {
1891 vnpanic(vp, "%s: cannot reclaim", __func__);
1892 }
1893
1894 KASSERT(vp->v_data == NULL);
1895 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1896
1897 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1898 uvm_ra_freectx(vp->v_ractx);
1899 vp->v_ractx = NULL;
1900 }
1901
1902 if (vip->vi_key.vk_key_len > 0) {
1903 /* Remove from vnode cache. */
1904 hash = vcache_hash(&vip->vi_key);
1905 mutex_enter(&vcache_lock);
1906 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1907 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1908 vip, vnode_impl, vi_hash);
1909 mutex_exit(&vcache_lock);
1910 }
1911 if (temp_key != temp_buf)
1912 kmem_free(temp_key, temp_key_len);
1913
1914 /* Done with purge, notify sleepers of the grim news. */
1915 mutex_enter(vp->v_interlock);
1916 vp->v_op = dead_vnodeop_p;
1917 vp->v_vflag |= VV_LOCKSWORK;
1918 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1919 vp->v_tag = VT_NON;
1920 /*
1921 * Don't check for interest in NOTE_REVOKE; it's always posted
1922 * because it sets EV_EOF.
1923 */
1924 KNOTE(&vp->v_klist, NOTE_REVOKE);
1925 mutex_exit(vp->v_interlock);
1926
1927 /*
1928 * Move to dead mount. Must be after changing the operations
1929 * vector as vnode operations enter the mount before using the
1930 * operations vector. See sys/kern/vnode_if.c.
1931 */
1932 vp->v_vflag &= ~VV_ROOT;
1933 vfs_ref(dead_rootmount);
1934 vfs_insmntque(vp, dead_rootmount);
1935
1936 #ifdef PAX_SEGVGUARD
1937 pax_segvguard_cleanup(vp);
1938 #endif /* PAX_SEGVGUARD */
1939
1940 mutex_enter(vp->v_interlock);
1941 fstrans_done(mp);
1942 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1943 }
1944
1945 /*
1946 * Disassociate the underlying file system from an open device vnode
1947 * and make it anonymous.
1948 *
1949 * Vnode unlocked on entry, drops a reference to the vnode.
1950 */
1951 void
1952 vcache_make_anon(vnode_t *vp)
1953 {
1954 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1955 uint32_t hash;
1956 bool recycle;
1957
1958 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1959 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1960 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1961
1962 /* Remove from vnode cache. */
1963 hash = vcache_hash(&vip->vi_key);
1964 mutex_enter(&vcache_lock);
1965 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1966 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1967 vip, vnode_impl, vi_hash);
1968 vip->vi_key.vk_mount = dead_rootmount;
1969 vip->vi_key.vk_key_len = 0;
1970 vip->vi_key.vk_key = NULL;
1971 mutex_exit(&vcache_lock);
1972
1973 /*
1974 * Disassociate the underlying file system from the vnode.
1975 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1976 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1977 * would no longer function.
1978 */
1979 if (vn_lock(vp, LK_EXCLUSIVE)) {
1980 vnpanic(vp, "%s: cannot lock", __func__);
1981 }
1982 VOP_INACTIVE(vp, &recycle);
1983 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1984 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1985 if (VOP_RECLAIM(vp)) {
1986 vnpanic(vp, "%s: cannot reclaim", __func__);
1987 }
1988
1989 /* Purge name cache. */
1990 cache_purge(vp);
1991
1992 /* Done with purge, change operations vector. */
1993 mutex_enter(vp->v_interlock);
1994 vp->v_op = spec_vnodeop_p;
1995 vp->v_vflag |= VV_MPSAFE;
1996 vp->v_vflag &= ~VV_LOCKSWORK;
1997 mutex_exit(vp->v_interlock);
1998
1999 /*
2000 * Move to dead mount. Must be after changing the operations
2001 * vector as vnode operations enter the mount before using the
2002 * operations vector. See sys/kern/vnode_if.c.
2003 */
2004 vfs_ref(dead_rootmount);
2005 vfs_insmntque(vp, dead_rootmount);
2006
2007 vrele(vp);
2008 }
2009
2010 /*
2011 * Update outstanding I/O count and do wakeup if requested.
2012 */
2013 void
2014 vwakeup(struct buf *bp)
2015 {
2016 vnode_t *vp;
2017
2018 if ((vp = bp->b_vp) == NULL)
2019 return;
2020
2021 KASSERT(bp->b_objlock == vp->v_interlock);
2022 KASSERT(mutex_owned(bp->b_objlock));
2023
2024 if (--vp->v_numoutput < 0)
2025 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2026 if (vp->v_numoutput == 0)
2027 cv_broadcast(&vp->v_cv);
2028 }
2029
2030 /*
2031 * Test a vnode for being or becoming dead. Returns one of:
2032 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2033 * ENOENT: vnode is dead.
2034 * 0: otherwise.
2035 *
2036 * Whenever this function returns a non-zero value all future
2037 * calls will also return a non-zero value.
2038 */
2039 int
2040 vdead_check(struct vnode *vp, int flags)
2041 {
2042
2043 KASSERT(mutex_owned(vp->v_interlock));
2044
2045 if (! ISSET(flags, VDEAD_NOWAIT))
2046 VSTATE_WAIT_STABLE(vp);
2047
2048 if (VSTATE_GET(vp) == VS_RECLAIMING) {
2049 KASSERT(ISSET(flags, VDEAD_NOWAIT));
2050 return EBUSY;
2051 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2052 return ENOENT;
2053 }
2054
2055 return 0;
2056 }
2057
2058 int
2059 vfs_drainvnodes(void)
2060 {
2061 int i, gen;
2062
2063 mutex_enter(&vdrain_lock);
2064 for (i = 0; i < 2; i++) {
2065 gen = vdrain_gen;
2066 while (gen == vdrain_gen) {
2067 cv_broadcast(&vdrain_cv);
2068 cv_wait(&vdrain_gen_cv, &vdrain_lock);
2069 }
2070 }
2071 mutex_exit(&vdrain_lock);
2072
2073 if (numvnodes >= desiredvnodes)
2074 return EBUSY;
2075
2076 if (vcache_hashsize != desiredvnodes)
2077 vcache_reinit();
2078
2079 return 0;
2080 }
2081
2082 void
2083 vnpanic(vnode_t *vp, const char *fmt, ...)
2084 {
2085 va_list ap;
2086
2087 #ifdef DIAGNOSTIC
2088 vprint(NULL, vp);
2089 #endif
2090 va_start(ap, fmt);
2091 vpanic(fmt, ap);
2092 va_end(ap);
2093 }
2094
2095 void
2096 vshareilock(vnode_t *tvp, vnode_t *fvp)
2097 {
2098 kmutex_t *oldlock;
2099
2100 oldlock = tvp->v_interlock;
2101 mutex_obj_hold(fvp->v_interlock);
2102 tvp->v_interlock = fvp->v_interlock;
2103 mutex_obj_free(oldlock);
2104 }
2105