vfs_vnode.c revision 1.138 1 /* $NetBSD: vfs_vnode.c,v 1.138 2022/03/19 13:52:11 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * BLOCKED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * v_usecount is adjusted with atomic operations, however to change
147 * from a non-zero value to zero the interlock must also be held.
148 */
149
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.138 2022/03/19 13:52:11 hannken Exp $");
152
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178
179 #include <uvm/uvm.h>
180 #include <uvm/uvm_readahead.h>
181 #include <uvm/uvm_stat.h>
182
183 /* Flags to vrelel. */
184 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
185
186 #define LRU_VRELE 0
187 #define LRU_FREE 1
188 #define LRU_HOLD 2
189 #define LRU_COUNT 3
190
191 /*
192 * There are three lru lists: one holds vnodes waiting for async release,
193 * one is for vnodes which have no buffer/page references and one for those
194 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
195 * private cache line as vnodes migrate between them while under the same
196 * lock (vdrain_lock).
197 */
198 u_int numvnodes __cacheline_aligned;
199 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
200 static kmutex_t vdrain_lock __cacheline_aligned;
201 static kcondvar_t vdrain_cv;
202 static int vdrain_gen;
203 static kcondvar_t vdrain_gen_cv;
204 static bool vdrain_retry;
205 static lwp_t * vdrain_lwp;
206 SLIST_HEAD(hashhead, vnode_impl);
207 static kmutex_t vcache_lock __cacheline_aligned;
208 static kcondvar_t vcache_cv;
209 static u_int vcache_hashsize;
210 static u_long vcache_hashmask;
211 static struct hashhead *vcache_hashtab;
212 static pool_cache_t vcache_pool;
213 static void lru_requeue(vnode_t *, vnodelst_t *);
214 static vnodelst_t * lru_which(vnode_t *);
215 static vnode_impl_t * vcache_alloc(void);
216 static void vcache_dealloc(vnode_impl_t *);
217 static void vcache_free(vnode_impl_t *);
218 static void vcache_init(void);
219 static void vcache_reinit(void);
220 static void vcache_reclaim(vnode_t *);
221 static void vrelel(vnode_t *, int, int);
222 static void vdrain_thread(void *);
223 static void vnpanic(vnode_t *, const char *, ...)
224 __printflike(2, 3);
225
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount *dead_rootmount;
228 extern int (**dead_vnodeop_p)(void *);
229 extern int (**spec_vnodeop_p)(void *);
230 extern struct vfsops dead_vfsops;
231
232 /*
233 * The high bit of v_usecount is a gate for vcache_tryvget(). It's set
234 * only when the vnode state is LOADED.
235 * The next bit of v_usecount is a flag for vrelel(). It's set
236 * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
237 */
238 #define VUSECOUNT_MASK 0x3fffffff
239 #define VUSECOUNT_GATE 0x80000000
240 #define VUSECOUNT_VGET 0x40000000
241
242 /*
243 * Return the current usecount of a vnode.
244 */
245 inline int
246 vrefcnt(struct vnode *vp)
247 {
248
249 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
250 }
251
252 /* Vnode state operations and diagnostics. */
253
254 #if defined(DIAGNOSTIC)
255
256 #define VSTATE_VALID(state) \
257 ((state) != VS_ACTIVE && (state) != VS_MARKER)
258 #define VSTATE_GET(vp) \
259 vstate_assert_get((vp), __func__, __LINE__)
260 #define VSTATE_CHANGE(vp, from, to) \
261 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
262 #define VSTATE_WAIT_STABLE(vp) \
263 vstate_assert_wait_stable((vp), __func__, __LINE__)
264
265 void
266 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
267 bool has_lock)
268 {
269 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
270 int refcnt = vrefcnt(vp);
271
272 if (!has_lock) {
273 /*
274 * Prevent predictive loads from the CPU, but check the state
275 * without loooking first.
276 */
277 membar_enter();
278 if (state == VS_ACTIVE && refcnt > 0 &&
279 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
280 return;
281 if (vip->vi_state == state)
282 return;
283 mutex_enter((vp)->v_interlock);
284 }
285
286 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
287
288 if ((state == VS_ACTIVE && refcnt > 0 &&
289 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
290 vip->vi_state == state) {
291 if (!has_lock)
292 mutex_exit((vp)->v_interlock);
293 return;
294 }
295 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
296 vstate_name(vip->vi_state), refcnt,
297 vstate_name(state), func, line);
298 }
299
300 static enum vnode_state
301 vstate_assert_get(vnode_t *vp, const char *func, int line)
302 {
303 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
304
305 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
306 if (! VSTATE_VALID(vip->vi_state))
307 vnpanic(vp, "state is %s at %s:%d",
308 vstate_name(vip->vi_state), func, line);
309
310 return vip->vi_state;
311 }
312
313 static void
314 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
315 {
316 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
317
318 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
319 if (! VSTATE_VALID(vip->vi_state))
320 vnpanic(vp, "state is %s at %s:%d",
321 vstate_name(vip->vi_state), func, line);
322
323 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
324 cv_wait(&vp->v_cv, vp->v_interlock);
325
326 if (! VSTATE_VALID(vip->vi_state))
327 vnpanic(vp, "state is %s at %s:%d",
328 vstate_name(vip->vi_state), func, line);
329 }
330
331 static void
332 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
333 const char *func, int line)
334 {
335 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
336 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
337
338 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
339 if (from == VS_LOADING)
340 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
341
342 if (! VSTATE_VALID(from))
343 vnpanic(vp, "from is %s at %s:%d",
344 vstate_name(from), func, line);
345 if (! VSTATE_VALID(to))
346 vnpanic(vp, "to is %s at %s:%d",
347 vstate_name(to), func, line);
348 if (vip->vi_state != from)
349 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
350 vstate_name(vip->vi_state), vstate_name(from), func, line);
351 if ((from == VS_LOADED) != gated)
352 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
353 vstate_name(vip->vi_state), gated, func, line);
354
355 /* Open/close the gate for vcache_tryvget(). */
356 if (to == VS_LOADED) {
357 #ifndef __HAVE_ATOMIC_AS_MEMBAR
358 membar_exit();
359 #endif
360 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
361 } else {
362 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
363 }
364
365 vip->vi_state = to;
366 if (from == VS_LOADING)
367 cv_broadcast(&vcache_cv);
368 if (to == VS_LOADED || to == VS_RECLAIMED)
369 cv_broadcast(&vp->v_cv);
370 }
371
372 #else /* defined(DIAGNOSTIC) */
373
374 #define VSTATE_GET(vp) \
375 (VNODE_TO_VIMPL((vp))->vi_state)
376 #define VSTATE_CHANGE(vp, from, to) \
377 vstate_change((vp), (from), (to))
378 #define VSTATE_WAIT_STABLE(vp) \
379 vstate_wait_stable((vp))
380 void
381 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
382 bool has_lock)
383 {
384
385 }
386
387 static void
388 vstate_wait_stable(vnode_t *vp)
389 {
390 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
391
392 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
393 cv_wait(&vp->v_cv, vp->v_interlock);
394 }
395
396 static void
397 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
398 {
399 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
400
401 /* Open/close the gate for vcache_tryvget(). */
402 if (to == VS_LOADED) {
403 #ifndef __HAVE_ATOMIC_AS_MEMBAR
404 membar_exit();
405 #endif
406 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
407 } else {
408 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
409 }
410
411 vip->vi_state = to;
412 if (from == VS_LOADING)
413 cv_broadcast(&vcache_cv);
414 if (to == VS_LOADED || to == VS_RECLAIMED)
415 cv_broadcast(&vp->v_cv);
416 }
417
418 #endif /* defined(DIAGNOSTIC) */
419
420 void
421 vfs_vnode_sysinit(void)
422 {
423 int error __diagused, i;
424
425 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
426 KASSERT(dead_rootmount != NULL);
427 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
428
429 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
430 for (i = 0; i < LRU_COUNT; i++) {
431 TAILQ_INIT(&lru_list[i]);
432 }
433 vcache_init();
434
435 cv_init(&vdrain_cv, "vdrain");
436 cv_init(&vdrain_gen_cv, "vdrainwt");
437 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
438 NULL, &vdrain_lwp, "vdrain");
439 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
440 }
441
442 /*
443 * Allocate a new marker vnode.
444 */
445 vnode_t *
446 vnalloc_marker(struct mount *mp)
447 {
448 vnode_impl_t *vip;
449 vnode_t *vp;
450
451 vip = pool_cache_get(vcache_pool, PR_WAITOK);
452 memset(vip, 0, sizeof(*vip));
453 vp = VIMPL_TO_VNODE(vip);
454 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
455 vp->v_mount = mp;
456 vp->v_type = VBAD;
457 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
458 klist_init(&vp->v_klist);
459 vip->vi_state = VS_MARKER;
460
461 return vp;
462 }
463
464 /*
465 * Free a marker vnode.
466 */
467 void
468 vnfree_marker(vnode_t *vp)
469 {
470 vnode_impl_t *vip;
471
472 vip = VNODE_TO_VIMPL(vp);
473 KASSERT(vip->vi_state == VS_MARKER);
474 mutex_obj_free(vp->v_interlock);
475 uvm_obj_destroy(&vp->v_uobj, true);
476 klist_fini(&vp->v_klist);
477 pool_cache_put(vcache_pool, vip);
478 }
479
480 /*
481 * Test a vnode for being a marker vnode.
482 */
483 bool
484 vnis_marker(vnode_t *vp)
485 {
486
487 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
488 }
489
490 /*
491 * Return the lru list this node should be on.
492 */
493 static vnodelst_t *
494 lru_which(vnode_t *vp)
495 {
496
497 KASSERT(mutex_owned(vp->v_interlock));
498
499 if (vp->v_holdcnt > 0)
500 return &lru_list[LRU_HOLD];
501 else
502 return &lru_list[LRU_FREE];
503 }
504
505 /*
506 * Put vnode to end of given list.
507 * Both the current and the new list may be NULL, used on vnode alloc/free.
508 * Adjust numvnodes and signal vdrain thread if there is work.
509 */
510 static void
511 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
512 {
513 vnode_impl_t *vip;
514 int d;
515
516 /*
517 * If the vnode is on the correct list, and was put there recently,
518 * then leave it be, thus avoiding huge cache and lock contention.
519 */
520 vip = VNODE_TO_VIMPL(vp);
521 if (listhd == vip->vi_lrulisthd &&
522 (getticks() - vip->vi_lrulisttm) < hz) {
523 return;
524 }
525
526 mutex_enter(&vdrain_lock);
527 d = 0;
528 if (vip->vi_lrulisthd != NULL)
529 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
530 else
531 d++;
532 vip->vi_lrulisthd = listhd;
533 vip->vi_lrulisttm = getticks();
534 if (vip->vi_lrulisthd != NULL)
535 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
536 else
537 d--;
538 if (d != 0) {
539 /*
540 * Looks strange? This is not a bug. Don't store
541 * numvnodes unless there is a change - avoid false
542 * sharing on MP.
543 */
544 numvnodes += d;
545 }
546 if ((d > 0 && numvnodes > desiredvnodes) ||
547 listhd == &lru_list[LRU_VRELE])
548 cv_signal(&vdrain_cv);
549 mutex_exit(&vdrain_lock);
550 }
551
552 /*
553 * Release deferred vrele vnodes for this mount.
554 * Called with file system suspended.
555 */
556 void
557 vrele_flush(struct mount *mp)
558 {
559 vnode_impl_t *vip, *marker;
560 vnode_t *vp;
561 int when = 0;
562
563 KASSERT(fstrans_is_owner(mp));
564
565 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
566
567 mutex_enter(&vdrain_lock);
568 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
569
570 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
571 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
572 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
573 vi_lrulist);
574 vp = VIMPL_TO_VNODE(vip);
575 if (vnis_marker(vp))
576 continue;
577
578 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
579 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
580 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
581 vip->vi_lrulisttm = getticks();
582 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
583 mutex_exit(&vdrain_lock);
584
585 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
586 mutex_enter(vp->v_interlock);
587 vrelel(vp, 0, LK_EXCLUSIVE);
588
589 if (getticks() > when) {
590 yield();
591 when = getticks() + hz / 10;
592 }
593
594 mutex_enter(&vdrain_lock);
595 }
596
597 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
598 mutex_exit(&vdrain_lock);
599
600 vnfree_marker(VIMPL_TO_VNODE(marker));
601 }
602
603 /*
604 * Reclaim a cached vnode. Used from vdrain_thread only.
605 */
606 static __inline void
607 vdrain_remove(vnode_t *vp)
608 {
609 struct mount *mp;
610
611 KASSERT(mutex_owned(&vdrain_lock));
612
613 /* Probe usecount (unlocked). */
614 if (vrefcnt(vp) > 0)
615 return;
616 /* Try v_interlock -- we lock the wrong direction! */
617 if (!mutex_tryenter(vp->v_interlock))
618 return;
619 /* Probe usecount and state. */
620 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
621 mutex_exit(vp->v_interlock);
622 return;
623 }
624 mp = vp->v_mount;
625 if (fstrans_start_nowait(mp) != 0) {
626 mutex_exit(vp->v_interlock);
627 return;
628 }
629 vdrain_retry = true;
630 mutex_exit(&vdrain_lock);
631
632 if (vcache_vget(vp) == 0) {
633 if (!vrecycle(vp)) {
634 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
635 mutex_enter(vp->v_interlock);
636 vrelel(vp, 0, LK_EXCLUSIVE);
637 }
638 }
639 fstrans_done(mp);
640
641 mutex_enter(&vdrain_lock);
642 }
643
644 /*
645 * Release a cached vnode. Used from vdrain_thread only.
646 */
647 static __inline void
648 vdrain_vrele(vnode_t *vp)
649 {
650 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
651 struct mount *mp;
652
653 KASSERT(mutex_owned(&vdrain_lock));
654
655 mp = vp->v_mount;
656 if (fstrans_start_nowait(mp) != 0)
657 return;
658
659 /*
660 * First remove the vnode from the vrele list.
661 * Put it on the last lru list, the last vrele()
662 * will put it back onto the right list before
663 * its usecount reaches zero.
664 */
665 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
666 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
667 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
668 vip->vi_lrulisttm = getticks();
669 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
670
671 vdrain_retry = true;
672 mutex_exit(&vdrain_lock);
673
674 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
675 mutex_enter(vp->v_interlock);
676 vrelel(vp, 0, LK_EXCLUSIVE);
677 fstrans_done(mp);
678
679 mutex_enter(&vdrain_lock);
680 }
681
682 /*
683 * Helper thread to keep the number of vnodes below desiredvnodes
684 * and release vnodes from asynchronous vrele.
685 */
686 static void
687 vdrain_thread(void *cookie)
688 {
689 int i;
690 u_int target;
691 vnode_impl_t *vip, *marker;
692
693 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
694
695 mutex_enter(&vdrain_lock);
696
697 for (;;) {
698 vdrain_retry = false;
699 target = desiredvnodes - desiredvnodes/10;
700
701 for (i = 0; i < LRU_COUNT; i++) {
702 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
703 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
704 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
705 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
706 vi_lrulist);
707 if (vnis_marker(VIMPL_TO_VNODE(vip)))
708 continue;
709 if (i == LRU_VRELE)
710 vdrain_vrele(VIMPL_TO_VNODE(vip));
711 else if (numvnodes < target)
712 break;
713 else
714 vdrain_remove(VIMPL_TO_VNODE(vip));
715 }
716 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
717 }
718
719 if (vdrain_retry) {
720 kpause("vdrainrt", false, 1, &vdrain_lock);
721 } else {
722 vdrain_gen++;
723 cv_broadcast(&vdrain_gen_cv);
724 cv_wait(&vdrain_cv, &vdrain_lock);
725 }
726 }
727 }
728
729 /*
730 * Try to drop reference on a vnode. Abort if we are releasing the
731 * last reference. Note: this _must_ succeed if not the last reference.
732 */
733 static bool
734 vtryrele(vnode_t *vp)
735 {
736 u_int use, next;
737
738 #ifndef __HAVE_ATOMIC_AS_MEMBAR
739 membar_exit();
740 #endif
741 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
742 if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
743 return false;
744 }
745 KASSERT((use & VUSECOUNT_MASK) > 1);
746 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
747 if (__predict_true(next == use)) {
748 return true;
749 }
750 }
751 }
752
753 /*
754 * vput: unlock and release the reference.
755 */
756 void
757 vput(vnode_t *vp)
758 {
759 int lktype;
760
761 /*
762 * Do an unlocked check of the usecount. If it looks like we're not
763 * about to drop the last reference, then unlock the vnode and try
764 * to drop the reference. If it ends up being the last reference
765 * after all, vrelel() can fix it all up. Most of the time this
766 * will all go to plan.
767 */
768 if (vrefcnt(vp) > 1) {
769 VOP_UNLOCK(vp);
770 if (vtryrele(vp)) {
771 return;
772 }
773 lktype = LK_NONE;
774 } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
775 VOP_UNLOCK(vp);
776 lktype = LK_NONE;
777 } else {
778 lktype = VOP_ISLOCKED(vp);
779 KASSERT(lktype != LK_NONE);
780 }
781 mutex_enter(vp->v_interlock);
782 vrelel(vp, 0, lktype);
783 }
784
785 /*
786 * Vnode release. If reference count drops to zero, call inactive
787 * routine and either return to freelist or free to the pool.
788 */
789 static void
790 vrelel(vnode_t *vp, int flags, int lktype)
791 {
792 const bool async = ((flags & VRELEL_ASYNC) != 0);
793 bool recycle, defer, objlock_held;
794 u_int use, next;
795 int error;
796
797 objlock_held = false;
798
799 retry:
800 KASSERT(mutex_owned(vp->v_interlock));
801
802 if (__predict_false(vp->v_op == dead_vnodeop_p &&
803 VSTATE_GET(vp) != VS_RECLAIMED)) {
804 vnpanic(vp, "dead but not clean");
805 }
806
807 /*
808 * If not the last reference, just unlock and drop the reference count.
809 *
810 * Otherwise make sure we pass a point in time where we hold the
811 * last reference with VGET flag unset.
812 */
813 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
814 if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
815 if (objlock_held) {
816 objlock_held = false;
817 rw_exit(vp->v_uobj.vmobjlock);
818 }
819 if (lktype != LK_NONE) {
820 mutex_exit(vp->v_interlock);
821 lktype = LK_NONE;
822 VOP_UNLOCK(vp);
823 mutex_enter(vp->v_interlock);
824 }
825 if (vtryrele(vp)) {
826 mutex_exit(vp->v_interlock);
827 return;
828 }
829 next = atomic_load_relaxed(&vp->v_usecount);
830 continue;
831 }
832 KASSERT((use & VUSECOUNT_MASK) == 1);
833 next = use & ~VUSECOUNT_VGET;
834 if (next != use) {
835 next = atomic_cas_uint(&vp->v_usecount, use, next);
836 }
837 if (__predict_true(next == use)) {
838 break;
839 }
840 }
841 #ifndef __HAVE_ATOMIC_AS_MEMBAR
842 membar_enter();
843 #endif
844 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
845 vnpanic(vp, "%s: bad ref count", __func__);
846 }
847
848 #ifdef DIAGNOSTIC
849 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
850 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
851 vprint("vrelel: missing VOP_CLOSE()", vp);
852 }
853 #endif
854
855 /*
856 * If already clean there is no need to lock, defer or
857 * deactivate this node.
858 */
859 if (VSTATE_GET(vp) == VS_RECLAIMED) {
860 if (objlock_held) {
861 objlock_held = false;
862 rw_exit(vp->v_uobj.vmobjlock);
863 }
864 if (lktype != LK_NONE) {
865 mutex_exit(vp->v_interlock);
866 lktype = LK_NONE;
867 VOP_UNLOCK(vp);
868 mutex_enter(vp->v_interlock);
869 }
870 goto out;
871 }
872
873 /*
874 * First try to get the vnode locked for VOP_INACTIVE().
875 * Defer vnode release to vdrain_thread if caller requests
876 * it explicitly, is the pagedaemon or the lock failed.
877 */
878 defer = false;
879 if ((curlwp == uvm.pagedaemon_lwp) || async) {
880 defer = true;
881 } else if (lktype == LK_SHARED) {
882 /* Excellent chance of getting, if the last ref. */
883 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
884 if (error != 0) {
885 defer = true;
886 } else {
887 lktype = LK_EXCLUSIVE;
888 }
889 } else if (lktype == LK_NONE) {
890 /* Excellent chance of getting, if the last ref. */
891 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
892 if (error != 0) {
893 defer = true;
894 } else {
895 lktype = LK_EXCLUSIVE;
896 }
897 }
898 KASSERT(mutex_owned(vp->v_interlock));
899 if (defer) {
900 /*
901 * Defer reclaim to the kthread; it's not safe to
902 * clean it here. We donate it our last reference.
903 */
904 if (lktype != LK_NONE) {
905 mutex_exit(vp->v_interlock);
906 VOP_UNLOCK(vp);
907 mutex_enter(vp->v_interlock);
908 }
909 lru_requeue(vp, &lru_list[LRU_VRELE]);
910 mutex_exit(vp->v_interlock);
911 return;
912 }
913 KASSERT(lktype == LK_EXCLUSIVE);
914
915 /* If the node gained another reference, retry. */
916 use = atomic_load_relaxed(&vp->v_usecount);
917 if ((use & VUSECOUNT_VGET) != 0) {
918 goto retry;
919 }
920 KASSERT((use & VUSECOUNT_MASK) == 1);
921
922 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
923 (vp->v_vflag & VV_MAPPED) != 0) {
924 /* Take care of space accounting. */
925 if (!objlock_held) {
926 objlock_held = true;
927 if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
928 mutex_exit(vp->v_interlock);
929 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
930 mutex_enter(vp->v_interlock);
931 goto retry;
932 }
933 }
934 if ((vp->v_iflag & VI_EXECMAP) != 0) {
935 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
936 }
937 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
938 vp->v_vflag &= ~VV_MAPPED;
939 }
940 if (objlock_held) {
941 objlock_held = false;
942 rw_exit(vp->v_uobj.vmobjlock);
943 }
944
945 /*
946 * Deactivate the vnode, but preserve our reference across
947 * the call to VOP_INACTIVE().
948 *
949 * If VOP_INACTIVE() indicates that the file has been
950 * deleted, then recycle the vnode.
951 *
952 * Note that VOP_INACTIVE() will not drop the vnode lock.
953 */
954 mutex_exit(vp->v_interlock);
955 recycle = false;
956 VOP_INACTIVE(vp, &recycle);
957 if (!recycle) {
958 lktype = LK_NONE;
959 VOP_UNLOCK(vp);
960 }
961 mutex_enter(vp->v_interlock);
962
963 /*
964 * Block new references then check again to see if a
965 * new reference was acquired in the meantime. If
966 * it was, restore the vnode state and try again.
967 */
968 if (recycle) {
969 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
970 use = atomic_load_relaxed(&vp->v_usecount);
971 if ((use & VUSECOUNT_VGET) != 0) {
972 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
973 goto retry;
974 }
975 KASSERT((use & VUSECOUNT_MASK) == 1);
976 }
977
978 /*
979 * Recycle the vnode if the file is now unused (unlinked).
980 */
981 if (recycle) {
982 VSTATE_ASSERT(vp, VS_BLOCKED);
983 KASSERT(lktype == LK_EXCLUSIVE);
984 /* vcache_reclaim drops the lock. */
985 lktype = LK_NONE;
986 vcache_reclaim(vp);
987 }
988 KASSERT(vrefcnt(vp) > 0);
989 KASSERT(lktype == LK_NONE);
990
991 out:
992 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
993 if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
994 (use & VUSECOUNT_MASK) == 1)) {
995 /* Gained and released another reference, retry. */
996 goto retry;
997 }
998 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
999 if (__predict_true(next == use)) {
1000 if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
1001 /* Gained another reference. */
1002 mutex_exit(vp->v_interlock);
1003 return;
1004 }
1005 break;
1006 }
1007 }
1008 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1009 membar_enter();
1010 #endif
1011
1012 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
1013 /*
1014 * It's clean so destroy it. It isn't referenced
1015 * anywhere since it has been reclaimed.
1016 */
1017 vcache_free(VNODE_TO_VIMPL(vp));
1018 } else {
1019 /*
1020 * Otherwise, put it back onto the freelist. It
1021 * can't be destroyed while still associated with
1022 * a file system.
1023 */
1024 lru_requeue(vp, lru_which(vp));
1025 mutex_exit(vp->v_interlock);
1026 }
1027 }
1028
1029 void
1030 vrele(vnode_t *vp)
1031 {
1032
1033 if (vtryrele(vp)) {
1034 return;
1035 }
1036 mutex_enter(vp->v_interlock);
1037 vrelel(vp, 0, LK_NONE);
1038 }
1039
1040 /*
1041 * Asynchronous vnode release, vnode is released in different context.
1042 */
1043 void
1044 vrele_async(vnode_t *vp)
1045 {
1046
1047 if (vtryrele(vp)) {
1048 return;
1049 }
1050 mutex_enter(vp->v_interlock);
1051 vrelel(vp, VRELEL_ASYNC, LK_NONE);
1052 }
1053
1054 /*
1055 * Vnode reference, where a reference is already held by some other
1056 * object (for example, a file structure).
1057 *
1058 * NB: lockless code sequences may rely on this not blocking.
1059 */
1060 void
1061 vref(vnode_t *vp)
1062 {
1063
1064 KASSERT(vrefcnt(vp) > 0);
1065
1066 atomic_inc_uint(&vp->v_usecount);
1067 }
1068
1069 /*
1070 * Page or buffer structure gets a reference.
1071 * Called with v_interlock held.
1072 */
1073 void
1074 vholdl(vnode_t *vp)
1075 {
1076
1077 KASSERT(mutex_owned(vp->v_interlock));
1078
1079 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1080 lru_requeue(vp, lru_which(vp));
1081 }
1082
1083 /*
1084 * Page or buffer structure gets a reference.
1085 */
1086 void
1087 vhold(vnode_t *vp)
1088 {
1089
1090 mutex_enter(vp->v_interlock);
1091 vholdl(vp);
1092 mutex_exit(vp->v_interlock);
1093 }
1094
1095 /*
1096 * Page or buffer structure frees a reference.
1097 * Called with v_interlock held.
1098 */
1099 void
1100 holdrelel(vnode_t *vp)
1101 {
1102
1103 KASSERT(mutex_owned(vp->v_interlock));
1104
1105 if (vp->v_holdcnt <= 0) {
1106 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1107 }
1108
1109 vp->v_holdcnt--;
1110 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1111 lru_requeue(vp, lru_which(vp));
1112 }
1113
1114 /*
1115 * Page or buffer structure frees a reference.
1116 */
1117 void
1118 holdrele(vnode_t *vp)
1119 {
1120
1121 mutex_enter(vp->v_interlock);
1122 holdrelel(vp);
1123 mutex_exit(vp->v_interlock);
1124 }
1125
1126 /*
1127 * Recycle an unused vnode if caller holds the last reference.
1128 */
1129 bool
1130 vrecycle(vnode_t *vp)
1131 {
1132 int error __diagused;
1133
1134 mutex_enter(vp->v_interlock);
1135
1136 /* If the vnode is already clean we're done. */
1137 VSTATE_WAIT_STABLE(vp);
1138 if (VSTATE_GET(vp) != VS_LOADED) {
1139 VSTATE_ASSERT(vp, VS_RECLAIMED);
1140 vrelel(vp, 0, LK_NONE);
1141 return true;
1142 }
1143
1144 /* Prevent further references until the vnode is locked. */
1145 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1146
1147 /* Make sure we hold the last reference. */
1148 if (vrefcnt(vp) != 1) {
1149 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1150 mutex_exit(vp->v_interlock);
1151 return false;
1152 }
1153
1154 mutex_exit(vp->v_interlock);
1155
1156 /*
1157 * On a leaf file system this lock will always succeed as we hold
1158 * the last reference and prevent further references.
1159 * On layered file systems waiting for the lock would open a can of
1160 * deadlocks as the lower vnodes may have other active references.
1161 */
1162 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1163
1164 mutex_enter(vp->v_interlock);
1165 if (error) {
1166 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1167 mutex_exit(vp->v_interlock);
1168 return false;
1169 }
1170
1171 KASSERT(vrefcnt(vp) == 1);
1172 vcache_reclaim(vp);
1173 vrelel(vp, 0, LK_NONE);
1174
1175 return true;
1176 }
1177
1178 /*
1179 * Helper for vrevoke() to propagate suspension from lastmp
1180 * to thismp. Both args may be NULL.
1181 * Returns the currently suspended file system or NULL.
1182 */
1183 static struct mount *
1184 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1185 {
1186 int error;
1187
1188 if (lastmp == thismp)
1189 return thismp;
1190
1191 if (lastmp != NULL)
1192 vfs_resume(lastmp);
1193
1194 if (thismp == NULL)
1195 return NULL;
1196
1197 do {
1198 error = vfs_suspend(thismp, 0);
1199 } while (error == EINTR || error == ERESTART);
1200
1201 if (error == 0)
1202 return thismp;
1203
1204 KASSERT(error == EOPNOTSUPP || error == ENOENT);
1205 return NULL;
1206 }
1207
1208 /*
1209 * Eliminate all activity associated with the requested vnode
1210 * and with all vnodes aliased to the requested vnode.
1211 */
1212 void
1213 vrevoke(vnode_t *vp)
1214 {
1215 struct mount *mp;
1216 vnode_t *vq;
1217 enum vtype type;
1218 dev_t dev;
1219
1220 KASSERT(vrefcnt(vp) > 0);
1221
1222 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1223
1224 mutex_enter(vp->v_interlock);
1225 VSTATE_WAIT_STABLE(vp);
1226 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1227 mutex_exit(vp->v_interlock);
1228 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1229 atomic_inc_uint(&vp->v_usecount);
1230 mutex_exit(vp->v_interlock);
1231 vgone(vp);
1232 } else {
1233 dev = vp->v_rdev;
1234 type = vp->v_type;
1235 mutex_exit(vp->v_interlock);
1236
1237 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1238 mp = vrevoke_suspend_next(mp, vq->v_mount);
1239 vgone(vq);
1240 }
1241 }
1242 vrevoke_suspend_next(mp, NULL);
1243 }
1244
1245 /*
1246 * Eliminate all activity associated with a vnode in preparation for
1247 * reuse. Drops a reference from the vnode.
1248 */
1249 void
1250 vgone(vnode_t *vp)
1251 {
1252 int lktype;
1253
1254 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1255
1256 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1257 lktype = LK_EXCLUSIVE;
1258 mutex_enter(vp->v_interlock);
1259 VSTATE_WAIT_STABLE(vp);
1260 if (VSTATE_GET(vp) == VS_LOADED) {
1261 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1262 vcache_reclaim(vp);
1263 lktype = LK_NONE;
1264 }
1265 VSTATE_ASSERT(vp, VS_RECLAIMED);
1266 vrelel(vp, 0, lktype);
1267 }
1268
1269 static inline uint32_t
1270 vcache_hash(const struct vcache_key *key)
1271 {
1272 uint32_t hash = HASH32_BUF_INIT;
1273
1274 KASSERT(key->vk_key_len > 0);
1275
1276 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1277 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1278 return hash;
1279 }
1280
1281 static int
1282 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1283 {
1284 vnode_impl_t *vip;
1285 uint64_t chain;
1286
1287 strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1288 strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1289 if (!fill)
1290 return 0;
1291
1292 hs->hash_size = vcache_hashmask + 1;
1293
1294 for (size_t i = 0; i < hs->hash_size; i++) {
1295 chain = 0;
1296 mutex_enter(&vcache_lock);
1297 SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1298 chain++;
1299 }
1300 mutex_exit(&vcache_lock);
1301 if (chain > 0) {
1302 hs->hash_used++;
1303 hs->hash_items += chain;
1304 if (chain > hs->hash_maxchain)
1305 hs->hash_maxchain = chain;
1306 }
1307 preempt_point();
1308 }
1309
1310 return 0;
1311 }
1312
1313 static void
1314 vcache_init(void)
1315 {
1316
1317 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1318 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1319 KASSERT(vcache_pool != NULL);
1320 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1321 cv_init(&vcache_cv, "vcache");
1322 vcache_hashsize = desiredvnodes;
1323 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1324 &vcache_hashmask);
1325 hashstat_register("vcache", vcache_stats);
1326 }
1327
1328 static void
1329 vcache_reinit(void)
1330 {
1331 int i;
1332 uint32_t hash;
1333 u_long oldmask, newmask;
1334 struct hashhead *oldtab, *newtab;
1335 vnode_impl_t *vip;
1336
1337 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1338 mutex_enter(&vcache_lock);
1339 oldtab = vcache_hashtab;
1340 oldmask = vcache_hashmask;
1341 vcache_hashsize = desiredvnodes;
1342 vcache_hashtab = newtab;
1343 vcache_hashmask = newmask;
1344 for (i = 0; i <= oldmask; i++) {
1345 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1346 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1347 hash = vcache_hash(&vip->vi_key);
1348 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1349 vip, vi_hash);
1350 }
1351 }
1352 mutex_exit(&vcache_lock);
1353 hashdone(oldtab, HASH_SLIST, oldmask);
1354 }
1355
1356 static inline vnode_impl_t *
1357 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1358 {
1359 struct hashhead *hashp;
1360 vnode_impl_t *vip;
1361
1362 KASSERT(mutex_owned(&vcache_lock));
1363
1364 hashp = &vcache_hashtab[hash & vcache_hashmask];
1365 SLIST_FOREACH(vip, hashp, vi_hash) {
1366 if (key->vk_mount != vip->vi_key.vk_mount)
1367 continue;
1368 if (key->vk_key_len != vip->vi_key.vk_key_len)
1369 continue;
1370 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1371 continue;
1372 return vip;
1373 }
1374 return NULL;
1375 }
1376
1377 /*
1378 * Allocate a new, uninitialized vcache node.
1379 */
1380 static vnode_impl_t *
1381 vcache_alloc(void)
1382 {
1383 vnode_impl_t *vip;
1384 vnode_t *vp;
1385
1386 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1387 vp = VIMPL_TO_VNODE(vip);
1388 memset(vip, 0, sizeof(*vip));
1389
1390 rw_init(&vip->vi_lock);
1391 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1392
1393 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1394 klist_init(&vp->v_klist);
1395 cv_init(&vp->v_cv, "vnode");
1396 cache_vnode_init(vp);
1397
1398 vp->v_usecount = 1;
1399 vp->v_type = VNON;
1400 vp->v_size = vp->v_writesize = VSIZENOTSET;
1401
1402 vip->vi_state = VS_LOADING;
1403
1404 lru_requeue(vp, &lru_list[LRU_FREE]);
1405
1406 return vip;
1407 }
1408
1409 /*
1410 * Deallocate a vcache node in state VS_LOADING.
1411 *
1412 * vcache_lock held on entry and released on return.
1413 */
1414 static void
1415 vcache_dealloc(vnode_impl_t *vip)
1416 {
1417 vnode_t *vp;
1418
1419 KASSERT(mutex_owned(&vcache_lock));
1420
1421 vp = VIMPL_TO_VNODE(vip);
1422 vfs_ref(dead_rootmount);
1423 vfs_insmntque(vp, dead_rootmount);
1424 mutex_enter(vp->v_interlock);
1425 vp->v_op = dead_vnodeop_p;
1426 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1427 mutex_exit(&vcache_lock);
1428 vrelel(vp, 0, LK_NONE);
1429 }
1430
1431 /*
1432 * Free an unused, unreferenced vcache node.
1433 * v_interlock locked on entry.
1434 */
1435 static void
1436 vcache_free(vnode_impl_t *vip)
1437 {
1438 vnode_t *vp;
1439
1440 vp = VIMPL_TO_VNODE(vip);
1441 KASSERT(mutex_owned(vp->v_interlock));
1442
1443 KASSERT(vrefcnt(vp) == 0);
1444 KASSERT(vp->v_holdcnt == 0);
1445 KASSERT(vp->v_writecount == 0);
1446 lru_requeue(vp, NULL);
1447 mutex_exit(vp->v_interlock);
1448
1449 vfs_insmntque(vp, NULL);
1450 if (vp->v_type == VBLK || vp->v_type == VCHR)
1451 spec_node_destroy(vp);
1452
1453 mutex_obj_free(vp->v_interlock);
1454 rw_destroy(&vip->vi_lock);
1455 uvm_obj_destroy(&vp->v_uobj, true);
1456 klist_fini(&vp->v_klist);
1457 cv_destroy(&vp->v_cv);
1458 cache_vnode_fini(vp);
1459 pool_cache_put(vcache_pool, vip);
1460 }
1461
1462 /*
1463 * Try to get an initial reference on this cached vnode.
1464 * Returns zero on success or EBUSY if the vnode state is not LOADED.
1465 *
1466 * NB: lockless code sequences may rely on this not blocking.
1467 */
1468 int
1469 vcache_tryvget(vnode_t *vp)
1470 {
1471 u_int use, next;
1472
1473 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1474 if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1475 return EBUSY;
1476 }
1477 next = atomic_cas_uint(&vp->v_usecount,
1478 use, (use + 1) | VUSECOUNT_VGET);
1479 if (__predict_true(next == use)) {
1480 #ifndef __HAVE_ATOMIC_AS_MEMBAR
1481 membar_enter();
1482 #endif
1483 return 0;
1484 }
1485 }
1486 }
1487
1488 /*
1489 * Try to get an initial reference on this cached vnode.
1490 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1491 * Will wait for the vnode state to be stable.
1492 *
1493 * v_interlock locked on entry and unlocked on exit.
1494 */
1495 int
1496 vcache_vget(vnode_t *vp)
1497 {
1498 int error;
1499
1500 KASSERT(mutex_owned(vp->v_interlock));
1501
1502 /* Increment hold count to prevent vnode from disappearing. */
1503 vp->v_holdcnt++;
1504 VSTATE_WAIT_STABLE(vp);
1505 vp->v_holdcnt--;
1506
1507 /* If this was the last reference to a reclaimed vnode free it now. */
1508 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1509 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1510 vcache_free(VNODE_TO_VIMPL(vp));
1511 else
1512 mutex_exit(vp->v_interlock);
1513 return ENOENT;
1514 }
1515 VSTATE_ASSERT(vp, VS_LOADED);
1516 error = vcache_tryvget(vp);
1517 KASSERT(error == 0);
1518 mutex_exit(vp->v_interlock);
1519
1520 return 0;
1521 }
1522
1523 /*
1524 * Get a vnode / fs node pair by key and return it referenced through vpp.
1525 */
1526 int
1527 vcache_get(struct mount *mp, const void *key, size_t key_len,
1528 struct vnode **vpp)
1529 {
1530 int error;
1531 uint32_t hash;
1532 const void *new_key;
1533 struct vnode *vp;
1534 struct vcache_key vcache_key;
1535 vnode_impl_t *vip, *new_vip;
1536
1537 new_key = NULL;
1538 *vpp = NULL;
1539
1540 vcache_key.vk_mount = mp;
1541 vcache_key.vk_key = key;
1542 vcache_key.vk_key_len = key_len;
1543 hash = vcache_hash(&vcache_key);
1544
1545 again:
1546 mutex_enter(&vcache_lock);
1547 vip = vcache_hash_lookup(&vcache_key, hash);
1548
1549 /* If found, take a reference or retry. */
1550 if (__predict_true(vip != NULL)) {
1551 /*
1552 * If the vnode is loading we cannot take the v_interlock
1553 * here as it might change during load (see uvm_obj_setlock()).
1554 * As changing state from VS_LOADING requires both vcache_lock
1555 * and v_interlock it is safe to test with vcache_lock held.
1556 *
1557 * Wait for vnodes changing state from VS_LOADING and retry.
1558 */
1559 if (__predict_false(vip->vi_state == VS_LOADING)) {
1560 cv_wait(&vcache_cv, &vcache_lock);
1561 mutex_exit(&vcache_lock);
1562 goto again;
1563 }
1564 vp = VIMPL_TO_VNODE(vip);
1565 mutex_enter(vp->v_interlock);
1566 mutex_exit(&vcache_lock);
1567 error = vcache_vget(vp);
1568 if (error == ENOENT)
1569 goto again;
1570 if (error == 0)
1571 *vpp = vp;
1572 KASSERT((error != 0) == (*vpp == NULL));
1573 return error;
1574 }
1575 mutex_exit(&vcache_lock);
1576
1577 /* Allocate and initialize a new vcache / vnode pair. */
1578 error = vfs_busy(mp);
1579 if (error)
1580 return error;
1581 new_vip = vcache_alloc();
1582 new_vip->vi_key = vcache_key;
1583 vp = VIMPL_TO_VNODE(new_vip);
1584 mutex_enter(&vcache_lock);
1585 vip = vcache_hash_lookup(&vcache_key, hash);
1586 if (vip == NULL) {
1587 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1588 new_vip, vi_hash);
1589 vip = new_vip;
1590 }
1591
1592 /* If another thread beat us inserting this node, retry. */
1593 if (vip != new_vip) {
1594 vcache_dealloc(new_vip);
1595 vfs_unbusy(mp);
1596 goto again;
1597 }
1598 mutex_exit(&vcache_lock);
1599
1600 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1601 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1602 if (error) {
1603 mutex_enter(&vcache_lock);
1604 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1605 new_vip, vnode_impl, vi_hash);
1606 vcache_dealloc(new_vip);
1607 vfs_unbusy(mp);
1608 KASSERT(*vpp == NULL);
1609 return error;
1610 }
1611 KASSERT(new_key != NULL);
1612 KASSERT(memcmp(key, new_key, key_len) == 0);
1613 KASSERT(vp->v_op != NULL);
1614 vfs_insmntque(vp, mp);
1615 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1616 vp->v_vflag |= VV_MPSAFE;
1617 vfs_ref(mp);
1618 vfs_unbusy(mp);
1619
1620 /* Finished loading, finalize node. */
1621 mutex_enter(&vcache_lock);
1622 new_vip->vi_key.vk_key = new_key;
1623 mutex_enter(vp->v_interlock);
1624 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1625 mutex_exit(vp->v_interlock);
1626 mutex_exit(&vcache_lock);
1627 *vpp = vp;
1628 return 0;
1629 }
1630
1631 /*
1632 * Create a new vnode / fs node pair and return it referenced through vpp.
1633 */
1634 int
1635 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1636 kauth_cred_t cred, void *extra, struct vnode **vpp)
1637 {
1638 int error;
1639 uint32_t hash;
1640 struct vnode *vp, *ovp;
1641 vnode_impl_t *vip, *ovip;
1642
1643 *vpp = NULL;
1644
1645 /* Allocate and initialize a new vcache / vnode pair. */
1646 error = vfs_busy(mp);
1647 if (error)
1648 return error;
1649 vip = vcache_alloc();
1650 vip->vi_key.vk_mount = mp;
1651 vp = VIMPL_TO_VNODE(vip);
1652
1653 /* Create and load the fs node. */
1654 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1655 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1656 if (error) {
1657 mutex_enter(&vcache_lock);
1658 vcache_dealloc(vip);
1659 vfs_unbusy(mp);
1660 KASSERT(*vpp == NULL);
1661 return error;
1662 }
1663 KASSERT(vp->v_op != NULL);
1664 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1665 if (vip->vi_key.vk_key_len > 0) {
1666 KASSERT(vip->vi_key.vk_key != NULL);
1667 hash = vcache_hash(&vip->vi_key);
1668
1669 /*
1670 * Wait for previous instance to be reclaimed,
1671 * then insert new node.
1672 */
1673 mutex_enter(&vcache_lock);
1674 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1675 ovp = VIMPL_TO_VNODE(ovip);
1676 mutex_enter(ovp->v_interlock);
1677 mutex_exit(&vcache_lock);
1678 error = vcache_vget(ovp);
1679 KASSERT(error == ENOENT);
1680 mutex_enter(&vcache_lock);
1681 }
1682 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1683 vip, vi_hash);
1684 mutex_exit(&vcache_lock);
1685 }
1686 vfs_insmntque(vp, mp);
1687 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1688 vp->v_vflag |= VV_MPSAFE;
1689 vfs_ref(mp);
1690 vfs_unbusy(mp);
1691
1692 /* Finished loading, finalize node. */
1693 mutex_enter(&vcache_lock);
1694 mutex_enter(vp->v_interlock);
1695 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1696 mutex_exit(&vcache_lock);
1697 mutex_exit(vp->v_interlock);
1698 *vpp = vp;
1699 return 0;
1700 }
1701
1702 /*
1703 * Prepare key change: update old cache nodes key and lock new cache node.
1704 * Return an error if the new node already exists.
1705 */
1706 int
1707 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1708 const void *old_key, size_t old_key_len,
1709 const void *new_key, size_t new_key_len)
1710 {
1711 uint32_t old_hash, new_hash;
1712 struct vcache_key old_vcache_key, new_vcache_key;
1713 vnode_impl_t *vip, *new_vip;
1714
1715 old_vcache_key.vk_mount = mp;
1716 old_vcache_key.vk_key = old_key;
1717 old_vcache_key.vk_key_len = old_key_len;
1718 old_hash = vcache_hash(&old_vcache_key);
1719
1720 new_vcache_key.vk_mount = mp;
1721 new_vcache_key.vk_key = new_key;
1722 new_vcache_key.vk_key_len = new_key_len;
1723 new_hash = vcache_hash(&new_vcache_key);
1724
1725 new_vip = vcache_alloc();
1726 new_vip->vi_key = new_vcache_key;
1727
1728 /* Insert locked new node used as placeholder. */
1729 mutex_enter(&vcache_lock);
1730 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1731 if (vip != NULL) {
1732 vcache_dealloc(new_vip);
1733 return EEXIST;
1734 }
1735 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1736 new_vip, vi_hash);
1737
1738 /* Replace old nodes key with the temporary copy. */
1739 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1740 KASSERT(vip != NULL);
1741 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1742 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1743 vip->vi_key = old_vcache_key;
1744 mutex_exit(&vcache_lock);
1745 return 0;
1746 }
1747
1748 /*
1749 * Key change complete: update old node and remove placeholder.
1750 */
1751 void
1752 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1753 const void *old_key, size_t old_key_len,
1754 const void *new_key, size_t new_key_len)
1755 {
1756 uint32_t old_hash, new_hash;
1757 struct vcache_key old_vcache_key, new_vcache_key;
1758 vnode_impl_t *vip, *new_vip;
1759 struct vnode *new_vp;
1760
1761 old_vcache_key.vk_mount = mp;
1762 old_vcache_key.vk_key = old_key;
1763 old_vcache_key.vk_key_len = old_key_len;
1764 old_hash = vcache_hash(&old_vcache_key);
1765
1766 new_vcache_key.vk_mount = mp;
1767 new_vcache_key.vk_key = new_key;
1768 new_vcache_key.vk_key_len = new_key_len;
1769 new_hash = vcache_hash(&new_vcache_key);
1770
1771 mutex_enter(&vcache_lock);
1772
1773 /* Lookup old and new node. */
1774 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1775 KASSERT(vip != NULL);
1776 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1777
1778 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1779 KASSERT(new_vip != NULL);
1780 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1781 new_vp = VIMPL_TO_VNODE(new_vip);
1782 mutex_enter(new_vp->v_interlock);
1783 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1784 mutex_exit(new_vp->v_interlock);
1785
1786 /* Rekey old node and put it onto its new hashlist. */
1787 vip->vi_key = new_vcache_key;
1788 if (old_hash != new_hash) {
1789 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1790 vip, vnode_impl, vi_hash);
1791 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1792 vip, vi_hash);
1793 }
1794
1795 /* Remove new node used as placeholder. */
1796 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1797 new_vip, vnode_impl, vi_hash);
1798 vcache_dealloc(new_vip);
1799 }
1800
1801 /*
1802 * Disassociate the underlying file system from a vnode.
1803 *
1804 * Must be called with vnode locked and will return unlocked.
1805 * Must be called with the interlock held, and will return with it held.
1806 */
1807 static void
1808 vcache_reclaim(vnode_t *vp)
1809 {
1810 lwp_t *l = curlwp;
1811 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1812 struct mount *mp = vp->v_mount;
1813 uint32_t hash;
1814 uint8_t temp_buf[64], *temp_key;
1815 size_t temp_key_len;
1816 bool recycle, active;
1817 int error;
1818
1819 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1820 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1821 KASSERT(mutex_owned(vp->v_interlock));
1822 KASSERT(vrefcnt(vp) != 0);
1823
1824 active = (vrefcnt(vp) > 1);
1825 temp_key_len = vip->vi_key.vk_key_len;
1826 /*
1827 * Prevent the vnode from being recycled or brought into use
1828 * while we clean it out.
1829 */
1830 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1831 mutex_exit(vp->v_interlock);
1832
1833 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1834 mutex_enter(vp->v_interlock);
1835 if ((vp->v_iflag & VI_EXECMAP) != 0) {
1836 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1837 }
1838 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1839 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1840 mutex_exit(vp->v_interlock);
1841 rw_exit(vp->v_uobj.vmobjlock);
1842
1843 /*
1844 * With vnode state set to reclaiming, purge name cache immediately
1845 * to prevent new handles on vnode, and wait for existing threads
1846 * trying to get a handle to notice VS_RECLAIMED status and abort.
1847 */
1848 cache_purge(vp);
1849
1850 /* Replace the vnode key with a temporary copy. */
1851 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1852 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1853 } else {
1854 temp_key = temp_buf;
1855 }
1856 if (vip->vi_key.vk_key_len > 0) {
1857 mutex_enter(&vcache_lock);
1858 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1859 vip->vi_key.vk_key = temp_key;
1860 mutex_exit(&vcache_lock);
1861 }
1862
1863 fstrans_start(mp);
1864
1865 /*
1866 * Clean out any cached data associated with the vnode.
1867 * If purging an active vnode, it must be closed and
1868 * deactivated before being reclaimed.
1869 */
1870 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1871 if (error != 0) {
1872 if (wapbl_vphaswapbl(vp))
1873 WAPBL_DISCARD(wapbl_vptomp(vp));
1874 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1875 }
1876 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1877 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1878 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1879 spec_node_revoke(vp);
1880 }
1881
1882 /*
1883 * Disassociate the underlying file system from the vnode.
1884 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1885 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1886 * would no longer function.
1887 */
1888 VOP_INACTIVE(vp, &recycle);
1889 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1890 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1891 if (VOP_RECLAIM(vp)) {
1892 vnpanic(vp, "%s: cannot reclaim", __func__);
1893 }
1894
1895 KASSERT(vp->v_data == NULL);
1896 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1897
1898 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1899 uvm_ra_freectx(vp->v_ractx);
1900 vp->v_ractx = NULL;
1901 }
1902
1903 if (vip->vi_key.vk_key_len > 0) {
1904 /* Remove from vnode cache. */
1905 hash = vcache_hash(&vip->vi_key);
1906 mutex_enter(&vcache_lock);
1907 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1908 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1909 vip, vnode_impl, vi_hash);
1910 mutex_exit(&vcache_lock);
1911 }
1912 if (temp_key != temp_buf)
1913 kmem_free(temp_key, temp_key_len);
1914
1915 /* Done with purge, notify sleepers of the grim news. */
1916 mutex_enter(vp->v_interlock);
1917 vp->v_op = dead_vnodeop_p;
1918 vp->v_vflag |= VV_LOCKSWORK;
1919 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1920 vp->v_tag = VT_NON;
1921 /*
1922 * Don't check for interest in NOTE_REVOKE; it's always posted
1923 * because it sets EV_EOF.
1924 */
1925 KNOTE(&vp->v_klist, NOTE_REVOKE);
1926 mutex_exit(vp->v_interlock);
1927
1928 /*
1929 * Move to dead mount. Must be after changing the operations
1930 * vector as vnode operations enter the mount before using the
1931 * operations vector. See sys/kern/vnode_if.c.
1932 */
1933 vp->v_vflag &= ~VV_ROOT;
1934 vfs_ref(dead_rootmount);
1935 vfs_insmntque(vp, dead_rootmount);
1936
1937 #ifdef PAX_SEGVGUARD
1938 pax_segvguard_cleanup(vp);
1939 #endif /* PAX_SEGVGUARD */
1940
1941 mutex_enter(vp->v_interlock);
1942 fstrans_done(mp);
1943 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1944 }
1945
1946 /*
1947 * Disassociate the underlying file system from an open device vnode
1948 * and make it anonymous.
1949 *
1950 * Vnode unlocked on entry, drops a reference to the vnode.
1951 */
1952 void
1953 vcache_make_anon(vnode_t *vp)
1954 {
1955 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1956 uint32_t hash;
1957 bool recycle;
1958
1959 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1960 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1961 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1962
1963 /* Remove from vnode cache. */
1964 hash = vcache_hash(&vip->vi_key);
1965 mutex_enter(&vcache_lock);
1966 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1967 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1968 vip, vnode_impl, vi_hash);
1969 vip->vi_key.vk_mount = dead_rootmount;
1970 vip->vi_key.vk_key_len = 0;
1971 vip->vi_key.vk_key = NULL;
1972 mutex_exit(&vcache_lock);
1973
1974 /*
1975 * Disassociate the underlying file system from the vnode.
1976 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1977 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1978 * would no longer function.
1979 */
1980 if (vn_lock(vp, LK_EXCLUSIVE)) {
1981 vnpanic(vp, "%s: cannot lock", __func__);
1982 }
1983 VOP_INACTIVE(vp, &recycle);
1984 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1985 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1986 if (VOP_RECLAIM(vp)) {
1987 vnpanic(vp, "%s: cannot reclaim", __func__);
1988 }
1989
1990 /* Purge name cache. */
1991 cache_purge(vp);
1992
1993 /* Done with purge, change operations vector. */
1994 mutex_enter(vp->v_interlock);
1995 vp->v_op = spec_vnodeop_p;
1996 vp->v_vflag |= VV_MPSAFE | VV_LOCKSWORK;
1997 mutex_exit(vp->v_interlock);
1998
1999 /*
2000 * Move to dead mount. Must be after changing the operations
2001 * vector as vnode operations enter the mount before using the
2002 * operations vector. See sys/kern/vnode_if.c.
2003 */
2004 vfs_ref(dead_rootmount);
2005 vfs_insmntque(vp, dead_rootmount);
2006
2007 vrele(vp);
2008 }
2009
2010 /*
2011 * Update outstanding I/O count and do wakeup if requested.
2012 */
2013 void
2014 vwakeup(struct buf *bp)
2015 {
2016 vnode_t *vp;
2017
2018 if ((vp = bp->b_vp) == NULL)
2019 return;
2020
2021 KASSERT(bp->b_objlock == vp->v_interlock);
2022 KASSERT(mutex_owned(bp->b_objlock));
2023
2024 if (--vp->v_numoutput < 0)
2025 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2026 if (vp->v_numoutput == 0)
2027 cv_broadcast(&vp->v_cv);
2028 }
2029
2030 /*
2031 * Test a vnode for being or becoming dead. Returns one of:
2032 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2033 * ENOENT: vnode is dead.
2034 * 0: otherwise.
2035 *
2036 * Whenever this function returns a non-zero value all future
2037 * calls will also return a non-zero value.
2038 */
2039 int
2040 vdead_check(struct vnode *vp, int flags)
2041 {
2042
2043 KASSERT(mutex_owned(vp->v_interlock));
2044
2045 if (! ISSET(flags, VDEAD_NOWAIT))
2046 VSTATE_WAIT_STABLE(vp);
2047
2048 if (VSTATE_GET(vp) == VS_RECLAIMING) {
2049 KASSERT(ISSET(flags, VDEAD_NOWAIT));
2050 return EBUSY;
2051 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2052 return ENOENT;
2053 }
2054
2055 return 0;
2056 }
2057
2058 int
2059 vfs_drainvnodes(void)
2060 {
2061 int i, gen;
2062
2063 mutex_enter(&vdrain_lock);
2064 for (i = 0; i < 2; i++) {
2065 gen = vdrain_gen;
2066 while (gen == vdrain_gen) {
2067 cv_broadcast(&vdrain_cv);
2068 cv_wait(&vdrain_gen_cv, &vdrain_lock);
2069 }
2070 }
2071 mutex_exit(&vdrain_lock);
2072
2073 if (numvnodes >= desiredvnodes)
2074 return EBUSY;
2075
2076 if (vcache_hashsize != desiredvnodes)
2077 vcache_reinit();
2078
2079 return 0;
2080 }
2081
2082 void
2083 vnpanic(vnode_t *vp, const char *fmt, ...)
2084 {
2085 va_list ap;
2086
2087 #ifdef DIAGNOSTIC
2088 vprint(NULL, vp);
2089 #endif
2090 va_start(ap, fmt);
2091 vpanic(fmt, ap);
2092 va_end(ap);
2093 }
2094
2095 void
2096 vshareilock(vnode_t *tvp, vnode_t *fvp)
2097 {
2098 kmutex_t *oldlock;
2099
2100 oldlock = tvp->v_interlock;
2101 mutex_obj_hold(fvp->v_interlock);
2102 tvp->v_interlock = fvp->v_interlock;
2103 mutex_obj_free(oldlock);
2104 }
2105