vfs_vnode.c revision 1.134 1 /* $NetBSD: vfs_vnode.c,v 1.134 2022/02/28 08:44:04 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * BLOCKED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * v_usecount is adjusted with atomic operations, however to change
147 * from a non-zero value to zero the interlock must also be held.
148 */
149
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.134 2022/02/28 08:44:04 hannken Exp $");
152
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156
157 #include <sys/param.h>
158 #include <sys/kernel.h>
159
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/hash.h>
165 #include <sys/kauth.h>
166 #include <sys/kmem.h>
167 #include <sys/kthread.h>
168 #include <sys/module.h>
169 #include <sys/mount.h>
170 #include <sys/namei.h>
171 #include <sys/pax.h>
172 #include <sys/syscallargs.h>
173 #include <sys/sysctl.h>
174 #include <sys/systm.h>
175 #include <sys/vnode_impl.h>
176 #include <sys/wapbl.h>
177 #include <sys/fstrans.h>
178
179 #include <uvm/uvm.h>
180 #include <uvm/uvm_readahead.h>
181 #include <uvm/uvm_stat.h>
182
183 /* Flags to vrelel. */
184 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
185
186 #define LRU_VRELE 0
187 #define LRU_FREE 1
188 #define LRU_HOLD 2
189 #define LRU_COUNT 3
190
191 /*
192 * There are three lru lists: one holds vnodes waiting for async release,
193 * one is for vnodes which have no buffer/page references and one for those
194 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
195 * private cache line as vnodes migrate between them while under the same
196 * lock (vdrain_lock).
197 */
198 u_int numvnodes __cacheline_aligned;
199 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
200 static kmutex_t vdrain_lock __cacheline_aligned;
201 static kcondvar_t vdrain_cv;
202 static int vdrain_gen;
203 static kcondvar_t vdrain_gen_cv;
204 static bool vdrain_retry;
205 static lwp_t * vdrain_lwp;
206 SLIST_HEAD(hashhead, vnode_impl);
207 static kmutex_t vcache_lock __cacheline_aligned;
208 static kcondvar_t vcache_cv;
209 static u_int vcache_hashsize;
210 static u_long vcache_hashmask;
211 static struct hashhead *vcache_hashtab;
212 static pool_cache_t vcache_pool;
213 static void lru_requeue(vnode_t *, vnodelst_t *);
214 static vnodelst_t * lru_which(vnode_t *);
215 static vnode_impl_t * vcache_alloc(void);
216 static void vcache_dealloc(vnode_impl_t *);
217 static void vcache_free(vnode_impl_t *);
218 static void vcache_init(void);
219 static void vcache_reinit(void);
220 static void vcache_reclaim(vnode_t *);
221 static void vrelel(vnode_t *, int, int);
222 static void vdrain_thread(void *);
223 static void vnpanic(vnode_t *, const char *, ...)
224 __printflike(2, 3);
225
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount *dead_rootmount;
228 extern int (**dead_vnodeop_p)(void *);
229 extern int (**spec_vnodeop_p)(void *);
230 extern struct vfsops dead_vfsops;
231
232 /*
233 * The high bit of v_usecount is a gate for vcache_tryvget(). It's set
234 * only when the vnode state is LOADED.
235 * The next bit of v_usecount is a flag for vrelel(). It's set
236 * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
237 */
238 #define VUSECOUNT_MASK 0x3fffffff
239 #define VUSECOUNT_GATE 0x80000000
240 #define VUSECOUNT_VGET 0x40000000
241
242 /*
243 * Return the current usecount of a vnode.
244 */
245 inline int
246 vrefcnt(struct vnode *vp)
247 {
248
249 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
250 }
251
252 /* Vnode state operations and diagnostics. */
253
254 #if defined(DIAGNOSTIC)
255
256 #define VSTATE_VALID(state) \
257 ((state) != VS_ACTIVE && (state) != VS_MARKER)
258 #define VSTATE_GET(vp) \
259 vstate_assert_get((vp), __func__, __LINE__)
260 #define VSTATE_CHANGE(vp, from, to) \
261 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
262 #define VSTATE_WAIT_STABLE(vp) \
263 vstate_assert_wait_stable((vp), __func__, __LINE__)
264
265 void
266 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
267 bool has_lock)
268 {
269 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
270 int refcnt = vrefcnt(vp);
271
272 if (!has_lock) {
273 /*
274 * Prevent predictive loads from the CPU, but check the state
275 * without loooking first.
276 */
277 membar_enter();
278 if (state == VS_ACTIVE && refcnt > 0 &&
279 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
280 return;
281 if (vip->vi_state == state)
282 return;
283 mutex_enter((vp)->v_interlock);
284 }
285
286 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
287
288 if ((state == VS_ACTIVE && refcnt > 0 &&
289 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
290 vip->vi_state == state) {
291 if (!has_lock)
292 mutex_exit((vp)->v_interlock);
293 return;
294 }
295 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
296 vstate_name(vip->vi_state), refcnt,
297 vstate_name(state), func, line);
298 }
299
300 static enum vnode_state
301 vstate_assert_get(vnode_t *vp, const char *func, int line)
302 {
303 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
304
305 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
306 if (! VSTATE_VALID(vip->vi_state))
307 vnpanic(vp, "state is %s at %s:%d",
308 vstate_name(vip->vi_state), func, line);
309
310 return vip->vi_state;
311 }
312
313 static void
314 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
315 {
316 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
317
318 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
319 if (! VSTATE_VALID(vip->vi_state))
320 vnpanic(vp, "state is %s at %s:%d",
321 vstate_name(vip->vi_state), func, line);
322
323 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
324 cv_wait(&vp->v_cv, vp->v_interlock);
325
326 if (! VSTATE_VALID(vip->vi_state))
327 vnpanic(vp, "state is %s at %s:%d",
328 vstate_name(vip->vi_state), func, line);
329 }
330
331 static void
332 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
333 const char *func, int line)
334 {
335 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
336 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
337
338 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
339 if (from == VS_LOADING)
340 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
341
342 if (! VSTATE_VALID(from))
343 vnpanic(vp, "from is %s at %s:%d",
344 vstate_name(from), func, line);
345 if (! VSTATE_VALID(to))
346 vnpanic(vp, "to is %s at %s:%d",
347 vstate_name(to), func, line);
348 if (vip->vi_state != from)
349 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
350 vstate_name(vip->vi_state), vstate_name(from), func, line);
351 if ((from == VS_LOADED) != gated)
352 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
353 vstate_name(vip->vi_state), gated, func, line);
354
355 /* Open/close the gate for vcache_tryvget(). */
356 if (to == VS_LOADED)
357 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
358 else
359 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
360
361 vip->vi_state = to;
362 if (from == VS_LOADING)
363 cv_broadcast(&vcache_cv);
364 if (to == VS_LOADED || to == VS_RECLAIMED)
365 cv_broadcast(&vp->v_cv);
366 }
367
368 #else /* defined(DIAGNOSTIC) */
369
370 #define VSTATE_GET(vp) \
371 (VNODE_TO_VIMPL((vp))->vi_state)
372 #define VSTATE_CHANGE(vp, from, to) \
373 vstate_change((vp), (from), (to))
374 #define VSTATE_WAIT_STABLE(vp) \
375 vstate_wait_stable((vp))
376 void
377 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
378 bool has_lock)
379 {
380
381 }
382
383 static void
384 vstate_wait_stable(vnode_t *vp)
385 {
386 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
387
388 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
389 cv_wait(&vp->v_cv, vp->v_interlock);
390 }
391
392 static void
393 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
394 {
395 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
396
397 /* Open/close the gate for vcache_tryvget(). */
398 if (to == VS_LOADED)
399 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
400 else
401 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
402
403 vip->vi_state = to;
404 if (from == VS_LOADING)
405 cv_broadcast(&vcache_cv);
406 if (to == VS_LOADED || to == VS_RECLAIMED)
407 cv_broadcast(&vp->v_cv);
408 }
409
410 #endif /* defined(DIAGNOSTIC) */
411
412 void
413 vfs_vnode_sysinit(void)
414 {
415 int error __diagused, i;
416
417 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
418 KASSERT(dead_rootmount != NULL);
419 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
420
421 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
422 for (i = 0; i < LRU_COUNT; i++) {
423 TAILQ_INIT(&lru_list[i]);
424 }
425 vcache_init();
426
427 cv_init(&vdrain_cv, "vdrain");
428 cv_init(&vdrain_gen_cv, "vdrainwt");
429 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
430 NULL, &vdrain_lwp, "vdrain");
431 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
432 }
433
434 /*
435 * Allocate a new marker vnode.
436 */
437 vnode_t *
438 vnalloc_marker(struct mount *mp)
439 {
440 vnode_impl_t *vip;
441 vnode_t *vp;
442
443 vip = pool_cache_get(vcache_pool, PR_WAITOK);
444 memset(vip, 0, sizeof(*vip));
445 vp = VIMPL_TO_VNODE(vip);
446 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
447 vp->v_mount = mp;
448 vp->v_type = VBAD;
449 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
450 klist_init(&vp->v_klist);
451 vip->vi_state = VS_MARKER;
452
453 return vp;
454 }
455
456 /*
457 * Free a marker vnode.
458 */
459 void
460 vnfree_marker(vnode_t *vp)
461 {
462 vnode_impl_t *vip;
463
464 vip = VNODE_TO_VIMPL(vp);
465 KASSERT(vip->vi_state == VS_MARKER);
466 mutex_obj_free(vp->v_interlock);
467 uvm_obj_destroy(&vp->v_uobj, true);
468 klist_fini(&vp->v_klist);
469 pool_cache_put(vcache_pool, vip);
470 }
471
472 /*
473 * Test a vnode for being a marker vnode.
474 */
475 bool
476 vnis_marker(vnode_t *vp)
477 {
478
479 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
480 }
481
482 /*
483 * Return the lru list this node should be on.
484 */
485 static vnodelst_t *
486 lru_which(vnode_t *vp)
487 {
488
489 KASSERT(mutex_owned(vp->v_interlock));
490
491 if (vp->v_holdcnt > 0)
492 return &lru_list[LRU_HOLD];
493 else
494 return &lru_list[LRU_FREE];
495 }
496
497 /*
498 * Put vnode to end of given list.
499 * Both the current and the new list may be NULL, used on vnode alloc/free.
500 * Adjust numvnodes and signal vdrain thread if there is work.
501 */
502 static void
503 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
504 {
505 vnode_impl_t *vip;
506 int d;
507
508 /*
509 * If the vnode is on the correct list, and was put there recently,
510 * then leave it be, thus avoiding huge cache and lock contention.
511 */
512 vip = VNODE_TO_VIMPL(vp);
513 if (listhd == vip->vi_lrulisthd &&
514 (getticks() - vip->vi_lrulisttm) < hz) {
515 return;
516 }
517
518 mutex_enter(&vdrain_lock);
519 d = 0;
520 if (vip->vi_lrulisthd != NULL)
521 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
522 else
523 d++;
524 vip->vi_lrulisthd = listhd;
525 vip->vi_lrulisttm = getticks();
526 if (vip->vi_lrulisthd != NULL)
527 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
528 else
529 d--;
530 if (d != 0) {
531 /*
532 * Looks strange? This is not a bug. Don't store
533 * numvnodes unless there is a change - avoid false
534 * sharing on MP.
535 */
536 numvnodes += d;
537 }
538 if ((d > 0 && numvnodes > desiredvnodes) ||
539 listhd == &lru_list[LRU_VRELE])
540 cv_signal(&vdrain_cv);
541 mutex_exit(&vdrain_lock);
542 }
543
544 /*
545 * Release deferred vrele vnodes for this mount.
546 * Called with file system suspended.
547 */
548 void
549 vrele_flush(struct mount *mp)
550 {
551 vnode_impl_t *vip, *marker;
552 vnode_t *vp;
553 int when = 0;
554
555 KASSERT(fstrans_is_owner(mp));
556
557 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
558
559 mutex_enter(&vdrain_lock);
560 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
561
562 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
563 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
564 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
565 vi_lrulist);
566 vp = VIMPL_TO_VNODE(vip);
567 if (vnis_marker(vp))
568 continue;
569
570 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
571 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
572 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
573 vip->vi_lrulisttm = getticks();
574 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
575 mutex_exit(&vdrain_lock);
576
577 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
578 mutex_enter(vp->v_interlock);
579 vrelel(vp, 0, LK_EXCLUSIVE);
580
581 if (getticks() > when) {
582 yield();
583 when = getticks() + hz / 10;
584 }
585
586 mutex_enter(&vdrain_lock);
587 }
588
589 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
590 mutex_exit(&vdrain_lock);
591
592 vnfree_marker(VIMPL_TO_VNODE(marker));
593 }
594
595 /*
596 * Reclaim a cached vnode. Used from vdrain_thread only.
597 */
598 static __inline void
599 vdrain_remove(vnode_t *vp)
600 {
601 struct mount *mp;
602
603 KASSERT(mutex_owned(&vdrain_lock));
604
605 /* Probe usecount (unlocked). */
606 if (vrefcnt(vp) > 0)
607 return;
608 /* Try v_interlock -- we lock the wrong direction! */
609 if (!mutex_tryenter(vp->v_interlock))
610 return;
611 /* Probe usecount and state. */
612 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
613 mutex_exit(vp->v_interlock);
614 return;
615 }
616 mp = vp->v_mount;
617 if (fstrans_start_nowait(mp) != 0) {
618 mutex_exit(vp->v_interlock);
619 return;
620 }
621 vdrain_retry = true;
622 mutex_exit(&vdrain_lock);
623
624 if (vcache_vget(vp) == 0) {
625 if (!vrecycle(vp)) {
626 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
627 mutex_enter(vp->v_interlock);
628 vrelel(vp, 0, LK_EXCLUSIVE);
629 }
630 }
631 fstrans_done(mp);
632
633 mutex_enter(&vdrain_lock);
634 }
635
636 /*
637 * Release a cached vnode. Used from vdrain_thread only.
638 */
639 static __inline void
640 vdrain_vrele(vnode_t *vp)
641 {
642 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
643 struct mount *mp;
644
645 KASSERT(mutex_owned(&vdrain_lock));
646
647 mp = vp->v_mount;
648 if (fstrans_start_nowait(mp) != 0)
649 return;
650
651 /*
652 * First remove the vnode from the vrele list.
653 * Put it on the last lru list, the last vrele()
654 * will put it back onto the right list before
655 * its usecount reaches zero.
656 */
657 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
658 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
659 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
660 vip->vi_lrulisttm = getticks();
661 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
662
663 vdrain_retry = true;
664 mutex_exit(&vdrain_lock);
665
666 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
667 mutex_enter(vp->v_interlock);
668 vrelel(vp, 0, LK_EXCLUSIVE);
669 fstrans_done(mp);
670
671 mutex_enter(&vdrain_lock);
672 }
673
674 /*
675 * Helper thread to keep the number of vnodes below desiredvnodes
676 * and release vnodes from asynchronous vrele.
677 */
678 static void
679 vdrain_thread(void *cookie)
680 {
681 int i;
682 u_int target;
683 vnode_impl_t *vip, *marker;
684
685 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
686
687 mutex_enter(&vdrain_lock);
688
689 for (;;) {
690 vdrain_retry = false;
691 target = desiredvnodes - desiredvnodes/10;
692
693 for (i = 0; i < LRU_COUNT; i++) {
694 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
695 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
696 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
697 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
698 vi_lrulist);
699 if (vnis_marker(VIMPL_TO_VNODE(vip)))
700 continue;
701 if (i == LRU_VRELE)
702 vdrain_vrele(VIMPL_TO_VNODE(vip));
703 else if (numvnodes < target)
704 break;
705 else
706 vdrain_remove(VIMPL_TO_VNODE(vip));
707 }
708 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
709 }
710
711 if (vdrain_retry) {
712 kpause("vdrainrt", false, 1, &vdrain_lock);
713 } else {
714 vdrain_gen++;
715 cv_broadcast(&vdrain_gen_cv);
716 cv_wait(&vdrain_cv, &vdrain_lock);
717 }
718 }
719 }
720
721 /*
722 * Try to drop reference on a vnode. Abort if we are releasing the
723 * last reference. Note: this _must_ succeed if not the last reference.
724 */
725 static bool
726 vtryrele(vnode_t *vp)
727 {
728 u_int use, next;
729
730 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
731 if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
732 return false;
733 }
734 KASSERT((use & VUSECOUNT_MASK) > 1);
735 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
736 if (__predict_true(next == use)) {
737 return true;
738 }
739 }
740 }
741
742 /*
743 * vput: unlock and release the reference.
744 */
745 void
746 vput(vnode_t *vp)
747 {
748 int lktype;
749
750 /*
751 * Do an unlocked check of the usecount. If it looks like we're not
752 * about to drop the last reference, then unlock the vnode and try
753 * to drop the reference. If it ends up being the last reference
754 * after all, vrelel() can fix it all up. Most of the time this
755 * will all go to plan.
756 */
757 if (vrefcnt(vp) > 1) {
758 VOP_UNLOCK(vp);
759 if (vtryrele(vp)) {
760 return;
761 }
762 lktype = LK_NONE;
763 } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
764 VOP_UNLOCK(vp);
765 lktype = LK_NONE;
766 } else {
767 lktype = VOP_ISLOCKED(vp);
768 KASSERT(lktype != LK_NONE);
769 }
770 mutex_enter(vp->v_interlock);
771 vrelel(vp, 0, lktype);
772 }
773
774 /*
775 * Vnode release. If reference count drops to zero, call inactive
776 * routine and either return to freelist or free to the pool.
777 */
778 static void
779 vrelel(vnode_t *vp, int flags, int lktype)
780 {
781 const bool async = ((flags & VRELEL_ASYNC) != 0);
782 bool recycle, defer, objlock_held;
783 u_int use, next;
784 int error;
785
786 objlock_held = false;
787
788 retry:
789 KASSERT(mutex_owned(vp->v_interlock));
790
791 if (__predict_false(vp->v_op == dead_vnodeop_p &&
792 VSTATE_GET(vp) != VS_RECLAIMED)) {
793 vnpanic(vp, "dead but not clean");
794 }
795
796 /*
797 * If not the last reference, just unlock and drop the reference count.
798 *
799 * Otherwise make sure we pass a point in time where we hold the
800 * last reference with VGET flag unset.
801 */
802 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
803 if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
804 if (objlock_held) {
805 objlock_held = false;
806 rw_exit(vp->v_uobj.vmobjlock);
807 }
808 if (lktype != LK_NONE) {
809 mutex_exit(vp->v_interlock);
810 lktype = LK_NONE;
811 VOP_UNLOCK(vp);
812 mutex_enter(vp->v_interlock);
813 }
814 if (vtryrele(vp)) {
815 mutex_exit(vp->v_interlock);
816 return;
817 }
818 next = atomic_load_relaxed(&vp->v_usecount);
819 continue;
820 }
821 KASSERT((use & VUSECOUNT_MASK) == 1);
822 next = use & ~VUSECOUNT_VGET;
823 if (next != use) {
824 next = atomic_cas_uint(&vp->v_usecount, use, next);
825 }
826 if (__predict_true(next == use)) {
827 break;
828 }
829 }
830 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
831 vnpanic(vp, "%s: bad ref count", __func__);
832 }
833
834 #ifdef DIAGNOSTIC
835 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
836 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
837 vprint("vrelel: missing VOP_CLOSE()", vp);
838 }
839 #endif
840
841 /*
842 * If already clean there is no need to lock, defer or
843 * deactivate this node.
844 */
845 if (VSTATE_GET(vp) == VS_RECLAIMED) {
846 if (objlock_held) {
847 objlock_held = false;
848 rw_exit(vp->v_uobj.vmobjlock);
849 }
850 if (lktype != LK_NONE) {
851 mutex_exit(vp->v_interlock);
852 lktype = LK_NONE;
853 VOP_UNLOCK(vp);
854 mutex_enter(vp->v_interlock);
855 }
856 goto out;
857 }
858
859 /*
860 * First try to get the vnode locked for VOP_INACTIVE().
861 * Defer vnode release to vdrain_thread if caller requests
862 * it explicitly, is the pagedaemon or the lock failed.
863 */
864 defer = false;
865 if ((curlwp == uvm.pagedaemon_lwp) || async) {
866 defer = true;
867 } else if (lktype == LK_SHARED) {
868 /* Excellent chance of getting, if the last ref. */
869 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
870 if (error != 0) {
871 defer = true;
872 } else {
873 lktype = LK_EXCLUSIVE;
874 }
875 } else if (lktype == LK_NONE) {
876 /* Excellent chance of getting, if the last ref. */
877 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
878 if (error != 0) {
879 defer = true;
880 } else {
881 lktype = LK_EXCLUSIVE;
882 }
883 }
884 KASSERT(mutex_owned(vp->v_interlock));
885 if (defer) {
886 /*
887 * Defer reclaim to the kthread; it's not safe to
888 * clean it here. We donate it our last reference.
889 */
890 if (lktype != LK_NONE) {
891 mutex_exit(vp->v_interlock);
892 VOP_UNLOCK(vp);
893 mutex_enter(vp->v_interlock);
894 }
895 lru_requeue(vp, &lru_list[LRU_VRELE]);
896 mutex_exit(vp->v_interlock);
897 return;
898 }
899 KASSERT(lktype == LK_EXCLUSIVE);
900
901 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
902 (vp->v_vflag & VV_MAPPED) != 0) {
903 /* Take care of space accounting. */
904 if (!objlock_held) {
905 objlock_held = true;
906 if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
907 mutex_exit(vp->v_interlock);
908 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
909 mutex_enter(vp->v_interlock);
910 goto retry;
911 }
912 }
913 if ((vp->v_iflag & VI_EXECMAP) != 0) {
914 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
915 }
916 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
917 vp->v_vflag &= ~VV_MAPPED;
918 }
919 if (objlock_held) {
920 objlock_held = false;
921 rw_exit(vp->v_uobj.vmobjlock);
922 }
923
924 /*
925 * Deactivate the vnode, but preserve our reference across
926 * the call to VOP_INACTIVE().
927 *
928 * If VOP_INACTIVE() indicates that the file has been
929 * deleted, then recycle the vnode.
930 *
931 * Note that VOP_INACTIVE() will not drop the vnode lock.
932 */
933 mutex_exit(vp->v_interlock);
934 recycle = false;
935 VOP_INACTIVE(vp, &recycle);
936 if (!recycle) {
937 lktype = LK_NONE;
938 VOP_UNLOCK(vp);
939 }
940 mutex_enter(vp->v_interlock);
941
942 /*
943 * Block new references then check again to see if a
944 * new reference was acquired in the meantime. If
945 * it was, restore the vnode state and try again.
946 */
947 if (recycle) {
948 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
949 use = atomic_load_relaxed(&vp->v_usecount);
950 if ((use & VUSECOUNT_VGET) != 0 ||
951 (use & VUSECOUNT_MASK) != 1) {
952 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
953 goto retry;
954 }
955 }
956
957 /*
958 * Recycle the vnode if the file is now unused (unlinked).
959 */
960 if (recycle) {
961 VSTATE_ASSERT(vp, VS_BLOCKED);
962 KASSERT(lktype == LK_EXCLUSIVE);
963 /* vcache_reclaim drops the lock. */
964 lktype = LK_NONE;
965 vcache_reclaim(vp);
966 }
967 KASSERT(vrefcnt(vp) > 0);
968 KASSERT(lktype == LK_NONE);
969
970 out:
971 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
972 if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
973 (use & VUSECOUNT_MASK) == 1)) {
974 /* Gained and released another reference, retry. */
975 goto retry;
976 }
977 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
978 if (__predict_true(next == use)) {
979 if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
980 /* Gained another reference. */
981 mutex_exit(vp->v_interlock);
982 return;
983 }
984 break;
985 }
986 }
987
988 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
989 /*
990 * It's clean so destroy it. It isn't referenced
991 * anywhere since it has been reclaimed.
992 */
993 vcache_free(VNODE_TO_VIMPL(vp));
994 } else {
995 /*
996 * Otherwise, put it back onto the freelist. It
997 * can't be destroyed while still associated with
998 * a file system.
999 */
1000 lru_requeue(vp, lru_which(vp));
1001 mutex_exit(vp->v_interlock);
1002 }
1003 }
1004
1005 void
1006 vrele(vnode_t *vp)
1007 {
1008
1009 if (vtryrele(vp)) {
1010 return;
1011 }
1012 mutex_enter(vp->v_interlock);
1013 vrelel(vp, 0, LK_NONE);
1014 }
1015
1016 /*
1017 * Asynchronous vnode release, vnode is released in different context.
1018 */
1019 void
1020 vrele_async(vnode_t *vp)
1021 {
1022
1023 if (vtryrele(vp)) {
1024 return;
1025 }
1026 mutex_enter(vp->v_interlock);
1027 vrelel(vp, VRELEL_ASYNC, LK_NONE);
1028 }
1029
1030 /*
1031 * Vnode reference, where a reference is already held by some other
1032 * object (for example, a file structure).
1033 *
1034 * NB: lockless code sequences may rely on this not blocking.
1035 */
1036 void
1037 vref(vnode_t *vp)
1038 {
1039
1040 KASSERT(vrefcnt(vp) > 0);
1041
1042 atomic_inc_uint(&vp->v_usecount);
1043 }
1044
1045 /*
1046 * Page or buffer structure gets a reference.
1047 * Called with v_interlock held.
1048 */
1049 void
1050 vholdl(vnode_t *vp)
1051 {
1052
1053 KASSERT(mutex_owned(vp->v_interlock));
1054
1055 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1056 lru_requeue(vp, lru_which(vp));
1057 }
1058
1059 /*
1060 * Page or buffer structure gets a reference.
1061 */
1062 void
1063 vhold(vnode_t *vp)
1064 {
1065
1066 mutex_enter(vp->v_interlock);
1067 vholdl(vp);
1068 mutex_exit(vp->v_interlock);
1069 }
1070
1071 /*
1072 * Page or buffer structure frees a reference.
1073 * Called with v_interlock held.
1074 */
1075 void
1076 holdrelel(vnode_t *vp)
1077 {
1078
1079 KASSERT(mutex_owned(vp->v_interlock));
1080
1081 if (vp->v_holdcnt <= 0) {
1082 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1083 }
1084
1085 vp->v_holdcnt--;
1086 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1087 lru_requeue(vp, lru_which(vp));
1088 }
1089
1090 /*
1091 * Page or buffer structure frees a reference.
1092 */
1093 void
1094 holdrele(vnode_t *vp)
1095 {
1096
1097 mutex_enter(vp->v_interlock);
1098 holdrelel(vp);
1099 mutex_exit(vp->v_interlock);
1100 }
1101
1102 /*
1103 * Recycle an unused vnode if caller holds the last reference.
1104 */
1105 bool
1106 vrecycle(vnode_t *vp)
1107 {
1108 int error __diagused;
1109
1110 mutex_enter(vp->v_interlock);
1111
1112 /* If the vnode is already clean we're done. */
1113 VSTATE_WAIT_STABLE(vp);
1114 if (VSTATE_GET(vp) != VS_LOADED) {
1115 VSTATE_ASSERT(vp, VS_RECLAIMED);
1116 vrelel(vp, 0, LK_NONE);
1117 return true;
1118 }
1119
1120 /* Prevent further references until the vnode is locked. */
1121 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1122
1123 /* Make sure we hold the last reference. */
1124 if (vrefcnt(vp) != 1) {
1125 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1126 mutex_exit(vp->v_interlock);
1127 return false;
1128 }
1129
1130 mutex_exit(vp->v_interlock);
1131
1132 /*
1133 * On a leaf file system this lock will always succeed as we hold
1134 * the last reference and prevent further references.
1135 * On layered file systems waiting for the lock would open a can of
1136 * deadlocks as the lower vnodes may have other active references.
1137 */
1138 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1139
1140 mutex_enter(vp->v_interlock);
1141 if (error) {
1142 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1143 mutex_exit(vp->v_interlock);
1144 return false;
1145 }
1146
1147 KASSERT(vrefcnt(vp) == 1);
1148 vcache_reclaim(vp);
1149 vrelel(vp, 0, LK_NONE);
1150
1151 return true;
1152 }
1153
1154 /*
1155 * Helper for vrevoke() to propagate suspension from lastmp
1156 * to thismp. Both args may be NULL.
1157 * Returns the currently suspended file system or NULL.
1158 */
1159 static struct mount *
1160 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1161 {
1162 int error;
1163
1164 if (lastmp == thismp)
1165 return thismp;
1166
1167 if (lastmp != NULL)
1168 vfs_resume(lastmp);
1169
1170 if (thismp == NULL)
1171 return NULL;
1172
1173 do {
1174 error = vfs_suspend(thismp, 0);
1175 } while (error == EINTR || error == ERESTART);
1176
1177 if (error == 0)
1178 return thismp;
1179
1180 KASSERT(error == EOPNOTSUPP || error == ENOENT);
1181 return NULL;
1182 }
1183
1184 /*
1185 * Eliminate all activity associated with the requested vnode
1186 * and with all vnodes aliased to the requested vnode.
1187 */
1188 void
1189 vrevoke(vnode_t *vp)
1190 {
1191 struct mount *mp;
1192 vnode_t *vq;
1193 enum vtype type;
1194 dev_t dev;
1195
1196 KASSERT(vrefcnt(vp) > 0);
1197
1198 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1199
1200 mutex_enter(vp->v_interlock);
1201 VSTATE_WAIT_STABLE(vp);
1202 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1203 mutex_exit(vp->v_interlock);
1204 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1205 atomic_inc_uint(&vp->v_usecount);
1206 mutex_exit(vp->v_interlock);
1207 vgone(vp);
1208 } else {
1209 dev = vp->v_rdev;
1210 type = vp->v_type;
1211 mutex_exit(vp->v_interlock);
1212
1213 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1214 mp = vrevoke_suspend_next(mp, vq->v_mount);
1215 vgone(vq);
1216 }
1217 }
1218 vrevoke_suspend_next(mp, NULL);
1219 }
1220
1221 /*
1222 * Eliminate all activity associated with a vnode in preparation for
1223 * reuse. Drops a reference from the vnode.
1224 */
1225 void
1226 vgone(vnode_t *vp)
1227 {
1228 int lktype;
1229
1230 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1231
1232 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1233 lktype = LK_EXCLUSIVE;
1234 mutex_enter(vp->v_interlock);
1235 VSTATE_WAIT_STABLE(vp);
1236 if (VSTATE_GET(vp) == VS_LOADED) {
1237 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1238 vcache_reclaim(vp);
1239 lktype = LK_NONE;
1240 }
1241 VSTATE_ASSERT(vp, VS_RECLAIMED);
1242 vrelel(vp, 0, lktype);
1243 }
1244
1245 static inline uint32_t
1246 vcache_hash(const struct vcache_key *key)
1247 {
1248 uint32_t hash = HASH32_BUF_INIT;
1249
1250 KASSERT(key->vk_key_len > 0);
1251
1252 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1253 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1254 return hash;
1255 }
1256
1257 static int
1258 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1259 {
1260 vnode_impl_t *vip;
1261 uint64_t chain;
1262
1263 strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1264 strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1265 if (!fill)
1266 return 0;
1267
1268 hs->hash_size = vcache_hashmask + 1;
1269
1270 for (size_t i = 0; i < hs->hash_size; i++) {
1271 chain = 0;
1272 mutex_enter(&vcache_lock);
1273 SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1274 chain++;
1275 }
1276 mutex_exit(&vcache_lock);
1277 if (chain > 0) {
1278 hs->hash_used++;
1279 hs->hash_items += chain;
1280 if (chain > hs->hash_maxchain)
1281 hs->hash_maxchain = chain;
1282 }
1283 preempt_point();
1284 }
1285
1286 return 0;
1287 }
1288
1289 static void
1290 vcache_init(void)
1291 {
1292
1293 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1294 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1295 KASSERT(vcache_pool != NULL);
1296 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1297 cv_init(&vcache_cv, "vcache");
1298 vcache_hashsize = desiredvnodes;
1299 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1300 &vcache_hashmask);
1301 hashstat_register("vcache", vcache_stats);
1302 }
1303
1304 static void
1305 vcache_reinit(void)
1306 {
1307 int i;
1308 uint32_t hash;
1309 u_long oldmask, newmask;
1310 struct hashhead *oldtab, *newtab;
1311 vnode_impl_t *vip;
1312
1313 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1314 mutex_enter(&vcache_lock);
1315 oldtab = vcache_hashtab;
1316 oldmask = vcache_hashmask;
1317 vcache_hashsize = desiredvnodes;
1318 vcache_hashtab = newtab;
1319 vcache_hashmask = newmask;
1320 for (i = 0; i <= oldmask; i++) {
1321 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1322 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1323 hash = vcache_hash(&vip->vi_key);
1324 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1325 vip, vi_hash);
1326 }
1327 }
1328 mutex_exit(&vcache_lock);
1329 hashdone(oldtab, HASH_SLIST, oldmask);
1330 }
1331
1332 static inline vnode_impl_t *
1333 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1334 {
1335 struct hashhead *hashp;
1336 vnode_impl_t *vip;
1337
1338 KASSERT(mutex_owned(&vcache_lock));
1339
1340 hashp = &vcache_hashtab[hash & vcache_hashmask];
1341 SLIST_FOREACH(vip, hashp, vi_hash) {
1342 if (key->vk_mount != vip->vi_key.vk_mount)
1343 continue;
1344 if (key->vk_key_len != vip->vi_key.vk_key_len)
1345 continue;
1346 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1347 continue;
1348 return vip;
1349 }
1350 return NULL;
1351 }
1352
1353 /*
1354 * Allocate a new, uninitialized vcache node.
1355 */
1356 static vnode_impl_t *
1357 vcache_alloc(void)
1358 {
1359 vnode_impl_t *vip;
1360 vnode_t *vp;
1361
1362 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1363 vp = VIMPL_TO_VNODE(vip);
1364 memset(vip, 0, sizeof(*vip));
1365
1366 rw_init(&vip->vi_lock);
1367 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1368
1369 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1370 klist_init(&vp->v_klist);
1371 cv_init(&vp->v_cv, "vnode");
1372 cache_vnode_init(vp);
1373
1374 vp->v_usecount = 1;
1375 vp->v_type = VNON;
1376 vp->v_size = vp->v_writesize = VSIZENOTSET;
1377
1378 vip->vi_state = VS_LOADING;
1379
1380 lru_requeue(vp, &lru_list[LRU_FREE]);
1381
1382 return vip;
1383 }
1384
1385 /*
1386 * Deallocate a vcache node in state VS_LOADING.
1387 *
1388 * vcache_lock held on entry and released on return.
1389 */
1390 static void
1391 vcache_dealloc(vnode_impl_t *vip)
1392 {
1393 vnode_t *vp;
1394
1395 KASSERT(mutex_owned(&vcache_lock));
1396
1397 vp = VIMPL_TO_VNODE(vip);
1398 vfs_ref(dead_rootmount);
1399 vfs_insmntque(vp, dead_rootmount);
1400 mutex_enter(vp->v_interlock);
1401 vp->v_op = dead_vnodeop_p;
1402 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1403 mutex_exit(&vcache_lock);
1404 vrelel(vp, 0, LK_NONE);
1405 }
1406
1407 /*
1408 * Free an unused, unreferenced vcache node.
1409 * v_interlock locked on entry.
1410 */
1411 static void
1412 vcache_free(vnode_impl_t *vip)
1413 {
1414 vnode_t *vp;
1415
1416 vp = VIMPL_TO_VNODE(vip);
1417 KASSERT(mutex_owned(vp->v_interlock));
1418
1419 KASSERT(vrefcnt(vp) == 0);
1420 KASSERT(vp->v_holdcnt == 0);
1421 KASSERT(vp->v_writecount == 0);
1422 lru_requeue(vp, NULL);
1423 mutex_exit(vp->v_interlock);
1424
1425 vfs_insmntque(vp, NULL);
1426 if (vp->v_type == VBLK || vp->v_type == VCHR)
1427 spec_node_destroy(vp);
1428
1429 mutex_obj_free(vp->v_interlock);
1430 rw_destroy(&vip->vi_lock);
1431 uvm_obj_destroy(&vp->v_uobj, true);
1432 klist_fini(&vp->v_klist);
1433 cv_destroy(&vp->v_cv);
1434 cache_vnode_fini(vp);
1435 pool_cache_put(vcache_pool, vip);
1436 }
1437
1438 /*
1439 * Try to get an initial reference on this cached vnode.
1440 * Returns zero on success or EBUSY if the vnode state is not LOADED.
1441 *
1442 * NB: lockless code sequences may rely on this not blocking.
1443 */
1444 int
1445 vcache_tryvget(vnode_t *vp)
1446 {
1447 u_int use, next;
1448
1449 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1450 if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1451 return EBUSY;
1452 }
1453 next = atomic_cas_uint(&vp->v_usecount,
1454 use, (use + 1) | VUSECOUNT_VGET);
1455 if (__predict_true(next == use)) {
1456 return 0;
1457 }
1458 }
1459 }
1460
1461 /*
1462 * Try to get an initial reference on this cached vnode.
1463 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1464 * Will wait for the vnode state to be stable.
1465 *
1466 * v_interlock locked on entry and unlocked on exit.
1467 */
1468 int
1469 vcache_vget(vnode_t *vp)
1470 {
1471 int error;
1472
1473 KASSERT(mutex_owned(vp->v_interlock));
1474
1475 /* Increment hold count to prevent vnode from disappearing. */
1476 vp->v_holdcnt++;
1477 VSTATE_WAIT_STABLE(vp);
1478 vp->v_holdcnt--;
1479
1480 /* If this was the last reference to a reclaimed vnode free it now. */
1481 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1482 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1483 vcache_free(VNODE_TO_VIMPL(vp));
1484 else
1485 mutex_exit(vp->v_interlock);
1486 return ENOENT;
1487 }
1488 VSTATE_ASSERT(vp, VS_LOADED);
1489 error = vcache_tryvget(vp);
1490 KASSERT(error == 0);
1491 mutex_exit(vp->v_interlock);
1492
1493 return 0;
1494 }
1495
1496 /*
1497 * Get a vnode / fs node pair by key and return it referenced through vpp.
1498 */
1499 int
1500 vcache_get(struct mount *mp, const void *key, size_t key_len,
1501 struct vnode **vpp)
1502 {
1503 int error;
1504 uint32_t hash;
1505 const void *new_key;
1506 struct vnode *vp;
1507 struct vcache_key vcache_key;
1508 vnode_impl_t *vip, *new_vip;
1509
1510 new_key = NULL;
1511 *vpp = NULL;
1512
1513 vcache_key.vk_mount = mp;
1514 vcache_key.vk_key = key;
1515 vcache_key.vk_key_len = key_len;
1516 hash = vcache_hash(&vcache_key);
1517
1518 again:
1519 mutex_enter(&vcache_lock);
1520 vip = vcache_hash_lookup(&vcache_key, hash);
1521
1522 /* If found, take a reference or retry. */
1523 if (__predict_true(vip != NULL)) {
1524 /*
1525 * If the vnode is loading we cannot take the v_interlock
1526 * here as it might change during load (see uvm_obj_setlock()).
1527 * As changing state from VS_LOADING requires both vcache_lock
1528 * and v_interlock it is safe to test with vcache_lock held.
1529 *
1530 * Wait for vnodes changing state from VS_LOADING and retry.
1531 */
1532 if (__predict_false(vip->vi_state == VS_LOADING)) {
1533 cv_wait(&vcache_cv, &vcache_lock);
1534 mutex_exit(&vcache_lock);
1535 goto again;
1536 }
1537 vp = VIMPL_TO_VNODE(vip);
1538 mutex_enter(vp->v_interlock);
1539 mutex_exit(&vcache_lock);
1540 error = vcache_vget(vp);
1541 if (error == ENOENT)
1542 goto again;
1543 if (error == 0)
1544 *vpp = vp;
1545 KASSERT((error != 0) == (*vpp == NULL));
1546 return error;
1547 }
1548 mutex_exit(&vcache_lock);
1549
1550 /* Allocate and initialize a new vcache / vnode pair. */
1551 error = vfs_busy(mp);
1552 if (error)
1553 return error;
1554 new_vip = vcache_alloc();
1555 new_vip->vi_key = vcache_key;
1556 vp = VIMPL_TO_VNODE(new_vip);
1557 mutex_enter(&vcache_lock);
1558 vip = vcache_hash_lookup(&vcache_key, hash);
1559 if (vip == NULL) {
1560 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1561 new_vip, vi_hash);
1562 vip = new_vip;
1563 }
1564
1565 /* If another thread beat us inserting this node, retry. */
1566 if (vip != new_vip) {
1567 vcache_dealloc(new_vip);
1568 vfs_unbusy(mp);
1569 goto again;
1570 }
1571 mutex_exit(&vcache_lock);
1572
1573 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1574 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1575 if (error) {
1576 mutex_enter(&vcache_lock);
1577 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1578 new_vip, vnode_impl, vi_hash);
1579 vcache_dealloc(new_vip);
1580 vfs_unbusy(mp);
1581 KASSERT(*vpp == NULL);
1582 return error;
1583 }
1584 KASSERT(new_key != NULL);
1585 KASSERT(memcmp(key, new_key, key_len) == 0);
1586 KASSERT(vp->v_op != NULL);
1587 vfs_insmntque(vp, mp);
1588 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1589 vp->v_vflag |= VV_MPSAFE;
1590 vfs_ref(mp);
1591 vfs_unbusy(mp);
1592
1593 /* Finished loading, finalize node. */
1594 mutex_enter(&vcache_lock);
1595 new_vip->vi_key.vk_key = new_key;
1596 mutex_enter(vp->v_interlock);
1597 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1598 mutex_exit(vp->v_interlock);
1599 mutex_exit(&vcache_lock);
1600 *vpp = vp;
1601 return 0;
1602 }
1603
1604 /*
1605 * Create a new vnode / fs node pair and return it referenced through vpp.
1606 */
1607 int
1608 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1609 kauth_cred_t cred, void *extra, struct vnode **vpp)
1610 {
1611 int error;
1612 uint32_t hash;
1613 struct vnode *vp, *ovp;
1614 vnode_impl_t *vip, *ovip;
1615
1616 *vpp = NULL;
1617
1618 /* Allocate and initialize a new vcache / vnode pair. */
1619 error = vfs_busy(mp);
1620 if (error)
1621 return error;
1622 vip = vcache_alloc();
1623 vip->vi_key.vk_mount = mp;
1624 vp = VIMPL_TO_VNODE(vip);
1625
1626 /* Create and load the fs node. */
1627 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1628 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1629 if (error) {
1630 mutex_enter(&vcache_lock);
1631 vcache_dealloc(vip);
1632 vfs_unbusy(mp);
1633 KASSERT(*vpp == NULL);
1634 return error;
1635 }
1636 KASSERT(vp->v_op != NULL);
1637 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1638 if (vip->vi_key.vk_key_len > 0) {
1639 KASSERT(vip->vi_key.vk_key != NULL);
1640 hash = vcache_hash(&vip->vi_key);
1641
1642 /*
1643 * Wait for previous instance to be reclaimed,
1644 * then insert new node.
1645 */
1646 mutex_enter(&vcache_lock);
1647 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1648 ovp = VIMPL_TO_VNODE(ovip);
1649 mutex_enter(ovp->v_interlock);
1650 mutex_exit(&vcache_lock);
1651 error = vcache_vget(ovp);
1652 KASSERT(error == ENOENT);
1653 mutex_enter(&vcache_lock);
1654 }
1655 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1656 vip, vi_hash);
1657 mutex_exit(&vcache_lock);
1658 }
1659 vfs_insmntque(vp, mp);
1660 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1661 vp->v_vflag |= VV_MPSAFE;
1662 vfs_ref(mp);
1663 vfs_unbusy(mp);
1664
1665 /* Finished loading, finalize node. */
1666 mutex_enter(&vcache_lock);
1667 mutex_enter(vp->v_interlock);
1668 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1669 mutex_exit(&vcache_lock);
1670 mutex_exit(vp->v_interlock);
1671 *vpp = vp;
1672 return 0;
1673 }
1674
1675 /*
1676 * Prepare key change: update old cache nodes key and lock new cache node.
1677 * Return an error if the new node already exists.
1678 */
1679 int
1680 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1681 const void *old_key, size_t old_key_len,
1682 const void *new_key, size_t new_key_len)
1683 {
1684 uint32_t old_hash, new_hash;
1685 struct vcache_key old_vcache_key, new_vcache_key;
1686 vnode_impl_t *vip, *new_vip;
1687
1688 old_vcache_key.vk_mount = mp;
1689 old_vcache_key.vk_key = old_key;
1690 old_vcache_key.vk_key_len = old_key_len;
1691 old_hash = vcache_hash(&old_vcache_key);
1692
1693 new_vcache_key.vk_mount = mp;
1694 new_vcache_key.vk_key = new_key;
1695 new_vcache_key.vk_key_len = new_key_len;
1696 new_hash = vcache_hash(&new_vcache_key);
1697
1698 new_vip = vcache_alloc();
1699 new_vip->vi_key = new_vcache_key;
1700
1701 /* Insert locked new node used as placeholder. */
1702 mutex_enter(&vcache_lock);
1703 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1704 if (vip != NULL) {
1705 vcache_dealloc(new_vip);
1706 return EEXIST;
1707 }
1708 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1709 new_vip, vi_hash);
1710
1711 /* Replace old nodes key with the temporary copy. */
1712 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1713 KASSERT(vip != NULL);
1714 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1715 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1716 vip->vi_key = old_vcache_key;
1717 mutex_exit(&vcache_lock);
1718 return 0;
1719 }
1720
1721 /*
1722 * Key change complete: update old node and remove placeholder.
1723 */
1724 void
1725 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1726 const void *old_key, size_t old_key_len,
1727 const void *new_key, size_t new_key_len)
1728 {
1729 uint32_t old_hash, new_hash;
1730 struct vcache_key old_vcache_key, new_vcache_key;
1731 vnode_impl_t *vip, *new_vip;
1732 struct vnode *new_vp;
1733
1734 old_vcache_key.vk_mount = mp;
1735 old_vcache_key.vk_key = old_key;
1736 old_vcache_key.vk_key_len = old_key_len;
1737 old_hash = vcache_hash(&old_vcache_key);
1738
1739 new_vcache_key.vk_mount = mp;
1740 new_vcache_key.vk_key = new_key;
1741 new_vcache_key.vk_key_len = new_key_len;
1742 new_hash = vcache_hash(&new_vcache_key);
1743
1744 mutex_enter(&vcache_lock);
1745
1746 /* Lookup old and new node. */
1747 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1748 KASSERT(vip != NULL);
1749 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1750
1751 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1752 KASSERT(new_vip != NULL);
1753 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1754 new_vp = VIMPL_TO_VNODE(new_vip);
1755 mutex_enter(new_vp->v_interlock);
1756 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1757 mutex_exit(new_vp->v_interlock);
1758
1759 /* Rekey old node and put it onto its new hashlist. */
1760 vip->vi_key = new_vcache_key;
1761 if (old_hash != new_hash) {
1762 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1763 vip, vnode_impl, vi_hash);
1764 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1765 vip, vi_hash);
1766 }
1767
1768 /* Remove new node used as placeholder. */
1769 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1770 new_vip, vnode_impl, vi_hash);
1771 vcache_dealloc(new_vip);
1772 }
1773
1774 /*
1775 * Disassociate the underlying file system from a vnode.
1776 *
1777 * Must be called with vnode locked and will return unlocked.
1778 * Must be called with the interlock held, and will return with it held.
1779 */
1780 static void
1781 vcache_reclaim(vnode_t *vp)
1782 {
1783 lwp_t *l = curlwp;
1784 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1785 struct mount *mp = vp->v_mount;
1786 uint32_t hash;
1787 uint8_t temp_buf[64], *temp_key;
1788 size_t temp_key_len;
1789 bool recycle, active;
1790 int error;
1791
1792 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1793 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1794 KASSERT(mutex_owned(vp->v_interlock));
1795 KASSERT(vrefcnt(vp) != 0);
1796
1797 active = (vrefcnt(vp) > 1);
1798 temp_key_len = vip->vi_key.vk_key_len;
1799 /*
1800 * Prevent the vnode from being recycled or brought into use
1801 * while we clean it out.
1802 */
1803 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1804 mutex_exit(vp->v_interlock);
1805
1806 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1807 mutex_enter(vp->v_interlock);
1808 if ((vp->v_iflag & VI_EXECMAP) != 0) {
1809 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1810 }
1811 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1812 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1813 mutex_exit(vp->v_interlock);
1814 rw_exit(vp->v_uobj.vmobjlock);
1815
1816 /*
1817 * With vnode state set to reclaiming, purge name cache immediately
1818 * to prevent new handles on vnode, and wait for existing threads
1819 * trying to get a handle to notice VS_RECLAIMED status and abort.
1820 */
1821 cache_purge(vp);
1822
1823 /* Replace the vnode key with a temporary copy. */
1824 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1825 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1826 } else {
1827 temp_key = temp_buf;
1828 }
1829 if (vip->vi_key.vk_key_len > 0) {
1830 mutex_enter(&vcache_lock);
1831 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1832 vip->vi_key.vk_key = temp_key;
1833 mutex_exit(&vcache_lock);
1834 }
1835
1836 fstrans_start(mp);
1837
1838 /*
1839 * Clean out any cached data associated with the vnode.
1840 * If purging an active vnode, it must be closed and
1841 * deactivated before being reclaimed.
1842 */
1843 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1844 if (error != 0) {
1845 if (wapbl_vphaswapbl(vp))
1846 WAPBL_DISCARD(wapbl_vptomp(vp));
1847 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1848 }
1849 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1850 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1851 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1852 spec_node_revoke(vp);
1853 }
1854
1855 /*
1856 * Disassociate the underlying file system from the vnode.
1857 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1858 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1859 * would no longer function.
1860 */
1861 VOP_INACTIVE(vp, &recycle);
1862 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1863 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1864 if (VOP_RECLAIM(vp)) {
1865 vnpanic(vp, "%s: cannot reclaim", __func__);
1866 }
1867
1868 KASSERT(vp->v_data == NULL);
1869 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1870
1871 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1872 uvm_ra_freectx(vp->v_ractx);
1873 vp->v_ractx = NULL;
1874 }
1875
1876 if (vip->vi_key.vk_key_len > 0) {
1877 /* Remove from vnode cache. */
1878 hash = vcache_hash(&vip->vi_key);
1879 mutex_enter(&vcache_lock);
1880 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1881 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1882 vip, vnode_impl, vi_hash);
1883 mutex_exit(&vcache_lock);
1884 }
1885 if (temp_key != temp_buf)
1886 kmem_free(temp_key, temp_key_len);
1887
1888 /* Done with purge, notify sleepers of the grim news. */
1889 mutex_enter(vp->v_interlock);
1890 vp->v_op = dead_vnodeop_p;
1891 vp->v_vflag |= VV_LOCKSWORK;
1892 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1893 vp->v_tag = VT_NON;
1894 /*
1895 * Don't check for interest in NOTE_REVOKE; it's always posted
1896 * because it sets EV_EOF.
1897 */
1898 KNOTE(&vp->v_klist, NOTE_REVOKE);
1899 mutex_exit(vp->v_interlock);
1900
1901 /*
1902 * Move to dead mount. Must be after changing the operations
1903 * vector as vnode operations enter the mount before using the
1904 * operations vector. See sys/kern/vnode_if.c.
1905 */
1906 vp->v_vflag &= ~VV_ROOT;
1907 vfs_ref(dead_rootmount);
1908 vfs_insmntque(vp, dead_rootmount);
1909
1910 #ifdef PAX_SEGVGUARD
1911 pax_segvguard_cleanup(vp);
1912 #endif /* PAX_SEGVGUARD */
1913
1914 mutex_enter(vp->v_interlock);
1915 fstrans_done(mp);
1916 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1917 }
1918
1919 /*
1920 * Disassociate the underlying file system from an open device vnode
1921 * and make it anonymous.
1922 *
1923 * Vnode unlocked on entry, drops a reference to the vnode.
1924 */
1925 void
1926 vcache_make_anon(vnode_t *vp)
1927 {
1928 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1929 uint32_t hash;
1930 bool recycle;
1931
1932 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1933 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1934 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1935
1936 /* Remove from vnode cache. */
1937 hash = vcache_hash(&vip->vi_key);
1938 mutex_enter(&vcache_lock);
1939 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1940 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1941 vip, vnode_impl, vi_hash);
1942 vip->vi_key.vk_mount = dead_rootmount;
1943 vip->vi_key.vk_key_len = 0;
1944 vip->vi_key.vk_key = NULL;
1945 mutex_exit(&vcache_lock);
1946
1947 /*
1948 * Disassociate the underlying file system from the vnode.
1949 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1950 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1951 * would no longer function.
1952 */
1953 if (vn_lock(vp, LK_EXCLUSIVE)) {
1954 vnpanic(vp, "%s: cannot lock", __func__);
1955 }
1956 VOP_INACTIVE(vp, &recycle);
1957 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1958 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1959 if (VOP_RECLAIM(vp)) {
1960 vnpanic(vp, "%s: cannot reclaim", __func__);
1961 }
1962
1963 /* Purge name cache. */
1964 cache_purge(vp);
1965
1966 /* Done with purge, change operations vector. */
1967 mutex_enter(vp->v_interlock);
1968 vp->v_op = spec_vnodeop_p;
1969 vp->v_vflag |= VV_MPSAFE;
1970 vp->v_vflag &= ~VV_LOCKSWORK;
1971 mutex_exit(vp->v_interlock);
1972
1973 /*
1974 * Move to dead mount. Must be after changing the operations
1975 * vector as vnode operations enter the mount before using the
1976 * operations vector. See sys/kern/vnode_if.c.
1977 */
1978 vfs_ref(dead_rootmount);
1979 vfs_insmntque(vp, dead_rootmount);
1980
1981 vrele(vp);
1982 }
1983
1984 /*
1985 * Update outstanding I/O count and do wakeup if requested.
1986 */
1987 void
1988 vwakeup(struct buf *bp)
1989 {
1990 vnode_t *vp;
1991
1992 if ((vp = bp->b_vp) == NULL)
1993 return;
1994
1995 KASSERT(bp->b_objlock == vp->v_interlock);
1996 KASSERT(mutex_owned(bp->b_objlock));
1997
1998 if (--vp->v_numoutput < 0)
1999 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2000 if (vp->v_numoutput == 0)
2001 cv_broadcast(&vp->v_cv);
2002 }
2003
2004 /*
2005 * Test a vnode for being or becoming dead. Returns one of:
2006 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2007 * ENOENT: vnode is dead.
2008 * 0: otherwise.
2009 *
2010 * Whenever this function returns a non-zero value all future
2011 * calls will also return a non-zero value.
2012 */
2013 int
2014 vdead_check(struct vnode *vp, int flags)
2015 {
2016
2017 KASSERT(mutex_owned(vp->v_interlock));
2018
2019 if (! ISSET(flags, VDEAD_NOWAIT))
2020 VSTATE_WAIT_STABLE(vp);
2021
2022 if (VSTATE_GET(vp) == VS_RECLAIMING) {
2023 KASSERT(ISSET(flags, VDEAD_NOWAIT));
2024 return EBUSY;
2025 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2026 return ENOENT;
2027 }
2028
2029 return 0;
2030 }
2031
2032 int
2033 vfs_drainvnodes(void)
2034 {
2035 int i, gen;
2036
2037 mutex_enter(&vdrain_lock);
2038 for (i = 0; i < 2; i++) {
2039 gen = vdrain_gen;
2040 while (gen == vdrain_gen) {
2041 cv_broadcast(&vdrain_cv);
2042 cv_wait(&vdrain_gen_cv, &vdrain_lock);
2043 }
2044 }
2045 mutex_exit(&vdrain_lock);
2046
2047 if (numvnodes >= desiredvnodes)
2048 return EBUSY;
2049
2050 if (vcache_hashsize != desiredvnodes)
2051 vcache_reinit();
2052
2053 return 0;
2054 }
2055
2056 void
2057 vnpanic(vnode_t *vp, const char *fmt, ...)
2058 {
2059 va_list ap;
2060
2061 #ifdef DIAGNOSTIC
2062 vprint(NULL, vp);
2063 #endif
2064 va_start(ap, fmt);
2065 vpanic(fmt, ap);
2066 va_end(ap);
2067 }
2068
2069 void
2070 vshareilock(vnode_t *tvp, vnode_t *fvp)
2071 {
2072 kmutex_t *oldlock;
2073
2074 oldlock = tvp->v_interlock;
2075 mutex_obj_hold(fvp->v_interlock);
2076 tvp->v_interlock = fvp->v_interlock;
2077 mutex_obj_free(oldlock);
2078 }
2079