vfs_vnode.c revision 1.155 1 /* $NetBSD: vfs_vnode.c,v 1.155 2024/12/07 02:23:09 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * BLOCKED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * v_usecount is adjusted with atomic operations, however to change
147 * from a non-zero value to zero the interlock must also be held.
148 */
149
150 #include <sys/cdefs.h>
151 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.155 2024/12/07 02:23:09 riastradh Exp $");
152
153 #ifdef _KERNEL_OPT
154 #include "opt_pax.h"
155 #endif
156
157 #include <sys/param.h>
158 #include <sys/types.h>
159
160 #include <sys/atomic.h>
161 #include <sys/buf.h>
162 #include <sys/conf.h>
163 #include <sys/device.h>
164 #include <sys/fstrans.h>
165 #include <sys/hash.h>
166 #include <sys/kauth.h>
167 #include <sys/kernel.h>
168 #include <sys/kmem.h>
169 #include <sys/module.h>
170 #include <sys/mount.h>
171 #include <sys/namei.h>
172 #include <sys/pax.h>
173 #include <sys/syscallargs.h>
174 #include <sys/sysctl.h>
175 #include <sys/systm.h>
176 #include <sys/threadpool.h>
177 #include <sys/vnode_impl.h>
178 #include <sys/wapbl.h>
179
180 #include <miscfs/deadfs/deadfs.h>
181 #include <miscfs/specfs/specdev.h>
182
183 #include <uvm/uvm.h>
184 #include <uvm/uvm_readahead.h>
185 #include <uvm/uvm_stat.h>
186
187 /* Flags to vrelel. */
188 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
189
190 #define LRU_VRELE 0
191 #define LRU_FREE 1
192 #define LRU_HOLD 2
193 #define LRU_COUNT 3
194
195 /*
196 * There are three lru lists: one holds vnodes waiting for async release,
197 * one is for vnodes which have no buffer/page references and one for those
198 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
199 * private cache line as vnodes migrate between them while under the same
200 * lock (vdrain_lock).
201 */
202
203 typedef struct {
204 vnode_impl_t *li_marker;
205 } lru_iter_t;
206
207 u_int numvnodes __cacheline_aligned;
208 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
209 static struct threadpool *threadpool;
210 static struct threadpool_job vdrain_job;
211 static struct threadpool_job vrele_job;
212 static kmutex_t vdrain_lock __cacheline_aligned;
213 SLIST_HEAD(hashhead, vnode_impl);
214 static kmutex_t vcache_lock __cacheline_aligned;
215 static kcondvar_t vcache_cv;
216 static u_int vcache_hashsize;
217 static u_long vcache_hashmask;
218 static struct hashhead *vcache_hashtab;
219 static pool_cache_t vcache_pool;
220 static void lru_requeue(vnode_t *, vnodelst_t *);
221 static vnodelst_t * lru_which(vnode_t *);
222 static vnode_impl_t * lru_iter_first(int, lru_iter_t *);
223 static vnode_impl_t * lru_iter_next(lru_iter_t *);
224 static void lru_iter_release(lru_iter_t *);
225 static vnode_impl_t * vcache_alloc(void);
226 static void vcache_dealloc(vnode_impl_t *);
227 static void vcache_free(vnode_impl_t *);
228 static void vcache_init(void);
229 static void vcache_reinit(void);
230 static void vcache_reclaim(vnode_t *);
231 static void vrele_deferred(vnode_impl_t *);
232 static void vrelel(vnode_t *, int, int);
233 static void vnpanic(vnode_t *, const char *, ...)
234 __printflike(2, 3);
235 static bool vdrain_one(u_int);
236 static void vdrain_task(struct threadpool_job *);
237 static void vrele_task(struct threadpool_job *);
238
239 /* Routines having to do with the management of the vnode table. */
240
241 /*
242 * The high bit of v_usecount is a gate for vcache_tryvget(). It's set
243 * only when the vnode state is LOADED.
244 * The next bit of v_usecount is a flag for vrelel(). It's set
245 * from vcache_vget() and vcache_tryvget() whenever the operation succeeds.
246 */
247 #define VUSECOUNT_MASK 0x3fffffff
248 #define VUSECOUNT_GATE 0x80000000
249 #define VUSECOUNT_VGET 0x40000000
250
251 /*
252 * Return the current usecount of a vnode.
253 */
254 inline int
255 vrefcnt(struct vnode *vp)
256 {
257
258 return atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_MASK;
259 }
260
261 /* Vnode state operations and diagnostics. */
262
263 #if defined(DIAGNOSTIC)
264
265 #define VSTATE_VALID(state) \
266 ((state) != VS_ACTIVE && (state) != VS_MARKER)
267 #define VSTATE_GET(vp) \
268 vstate_assert_get((vp), __func__, __LINE__)
269 #define VSTATE_CHANGE(vp, from, to) \
270 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
271 #define VSTATE_WAIT_STABLE(vp) \
272 vstate_assert_wait_stable((vp), __func__, __LINE__)
273
274 void
275 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
276 bool has_lock)
277 {
278 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
279 int refcnt = vrefcnt(vp);
280
281 if (!has_lock) {
282 enum vnode_state vstate = atomic_load_relaxed(&vip->vi_state);
283
284 if (state == VS_ACTIVE && refcnt > 0 &&
285 (vstate == VS_LOADED || vstate == VS_BLOCKED))
286 return;
287 if (vstate == state)
288 return;
289 mutex_enter((vp)->v_interlock);
290 }
291
292 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
293
294 if ((state == VS_ACTIVE && refcnt > 0 &&
295 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
296 vip->vi_state == state) {
297 if (!has_lock)
298 mutex_exit((vp)->v_interlock);
299 return;
300 }
301 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
302 vstate_name(vip->vi_state), refcnt,
303 vstate_name(state), func, line);
304 }
305
306 static enum vnode_state
307 vstate_assert_get(vnode_t *vp, const char *func, int line)
308 {
309 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
310
311 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
312 if (! VSTATE_VALID(vip->vi_state))
313 vnpanic(vp, "state is %s at %s:%d",
314 vstate_name(vip->vi_state), func, line);
315
316 return vip->vi_state;
317 }
318
319 static void
320 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
321 {
322 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
323
324 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
325 if (! VSTATE_VALID(vip->vi_state))
326 vnpanic(vp, "state is %s at %s:%d",
327 vstate_name(vip->vi_state), func, line);
328
329 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
330 cv_wait(&vp->v_cv, vp->v_interlock);
331
332 if (! VSTATE_VALID(vip->vi_state))
333 vnpanic(vp, "state is %s at %s:%d",
334 vstate_name(vip->vi_state), func, line);
335 }
336
337 static void
338 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
339 const char *func, int line)
340 {
341 bool gated = (atomic_load_relaxed(&vp->v_usecount) & VUSECOUNT_GATE);
342 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
343
344 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
345 if (from == VS_LOADING)
346 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
347
348 if (! VSTATE_VALID(from))
349 vnpanic(vp, "from is %s at %s:%d",
350 vstate_name(from), func, line);
351 if (! VSTATE_VALID(to))
352 vnpanic(vp, "to is %s at %s:%d",
353 vstate_name(to), func, line);
354 if (vip->vi_state != from)
355 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
356 vstate_name(vip->vi_state), vstate_name(from), func, line);
357 if ((from == VS_LOADED) != gated)
358 vnpanic(vp, "state is %s, gate %d does not match at %s:%d\n",
359 vstate_name(vip->vi_state), gated, func, line);
360
361 /* Open/close the gate for vcache_tryvget(). */
362 if (to == VS_LOADED) {
363 membar_release();
364 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
365 } else {
366 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
367 }
368
369 atomic_store_relaxed(&vip->vi_state, to);
370 if (from == VS_LOADING)
371 cv_broadcast(&vcache_cv);
372 if (to == VS_LOADED || to == VS_RECLAIMED)
373 cv_broadcast(&vp->v_cv);
374 }
375
376 #else /* defined(DIAGNOSTIC) */
377
378 #define VSTATE_GET(vp) \
379 (VNODE_TO_VIMPL((vp))->vi_state)
380 #define VSTATE_CHANGE(vp, from, to) \
381 vstate_change((vp), (from), (to))
382 #define VSTATE_WAIT_STABLE(vp) \
383 vstate_wait_stable((vp))
384 void
385 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
386 bool has_lock)
387 {
388
389 }
390
391 static void
392 vstate_wait_stable(vnode_t *vp)
393 {
394 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
395
396 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
397 cv_wait(&vp->v_cv, vp->v_interlock);
398 }
399
400 static void
401 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
402 {
403 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
404
405 /* Open/close the gate for vcache_tryvget(). */
406 if (to == VS_LOADED) {
407 membar_release();
408 atomic_or_uint(&vp->v_usecount, VUSECOUNT_GATE);
409 } else {
410 atomic_and_uint(&vp->v_usecount, ~VUSECOUNT_GATE);
411 }
412
413 atomic_store_relaxed(&vip->vi_state, to);
414 if (from == VS_LOADING)
415 cv_broadcast(&vcache_cv);
416 if (to == VS_LOADED || to == VS_RECLAIMED)
417 cv_broadcast(&vp->v_cv);
418 }
419
420 #endif /* defined(DIAGNOSTIC) */
421
422 void
423 vfs_vnode_sysinit(void)
424 {
425 int error __diagused, i;
426
427 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
428 KASSERT(dead_rootmount != NULL);
429 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
430
431 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
432 for (i = 0; i < LRU_COUNT; i++) {
433 TAILQ_INIT(&lru_list[i]);
434 }
435 vcache_init();
436
437 error = threadpool_get(&threadpool, PRI_NONE);
438 KASSERTMSG((error == 0), "threadpool_get failed: %d", error);
439 threadpool_job_init(&vdrain_job, vdrain_task, &vdrain_lock, "vdrain");
440 threadpool_job_init(&vrele_job, vrele_task, &vdrain_lock, "vrele");
441 }
442
443 /*
444 * Allocate a new marker vnode.
445 */
446 vnode_t *
447 vnalloc_marker(struct mount *mp)
448 {
449 vnode_impl_t *vip;
450 vnode_t *vp;
451
452 vip = pool_cache_get(vcache_pool, PR_WAITOK);
453 memset(vip, 0, sizeof(*vip));
454 vp = VIMPL_TO_VNODE(vip);
455 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
456 vp->v_mount = mp;
457 vp->v_type = VBAD;
458 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
459 klist_init(&vip->vi_klist.vk_klist);
460 vp->v_klist = &vip->vi_klist;
461 vip->vi_state = VS_MARKER;
462
463 return vp;
464 }
465
466 /*
467 * Free a marker vnode.
468 */
469 void
470 vnfree_marker(vnode_t *vp)
471 {
472 vnode_impl_t *vip;
473
474 vip = VNODE_TO_VIMPL(vp);
475 KASSERT(vip->vi_state == VS_MARKER);
476 mutex_obj_free(vp->v_interlock);
477 uvm_obj_destroy(&vp->v_uobj, true);
478 klist_fini(&vip->vi_klist.vk_klist);
479 pool_cache_put(vcache_pool, vip);
480 }
481
482 /*
483 * Test a vnode for being a marker vnode.
484 */
485 bool
486 vnis_marker(vnode_t *vp)
487 {
488
489 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
490 }
491
492 /*
493 * Return the lru list this node should be on.
494 */
495 static vnodelst_t *
496 lru_which(vnode_t *vp)
497 {
498
499 KASSERT(mutex_owned(vp->v_interlock));
500
501 if (vp->v_holdcnt > 0)
502 return &lru_list[LRU_HOLD];
503 else
504 return &lru_list[LRU_FREE];
505 }
506
507 /*
508 * Put vnode to end of given list.
509 * Both the current and the new list may be NULL, used on vnode alloc/free.
510 * Adjust numvnodes and signal vdrain thread if there is work.
511 */
512 static void
513 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
514 {
515 vnode_impl_t *vip;
516 int d;
517
518 /*
519 * If the vnode is on the correct list, and was put there recently,
520 * then leave it be, thus avoiding huge cache and lock contention.
521 */
522 vip = VNODE_TO_VIMPL(vp);
523 if (listhd == vip->vi_lrulisthd &&
524 (getticks() - vip->vi_lrulisttm) < hz) {
525 return;
526 }
527
528 mutex_enter(&vdrain_lock);
529 d = 0;
530 if (vip->vi_lrulisthd != NULL)
531 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
532 else
533 d++;
534 vip->vi_lrulisthd = listhd;
535 vip->vi_lrulisttm = getticks();
536 if (vip->vi_lrulisthd != NULL)
537 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
538 else
539 d--;
540 if (d != 0) {
541 /*
542 * Looks strange? This is not a bug. Don't store
543 * numvnodes unless there is a change - avoid false
544 * sharing on MP.
545 */
546 numvnodes += d;
547 }
548 if (listhd == &lru_list[LRU_VRELE])
549 threadpool_schedule_job(threadpool, &vrele_job);
550 if (d > 0 && numvnodes > desiredvnodes)
551 threadpool_schedule_job(threadpool, &vdrain_job);
552 if (d > 0 && numvnodes > desiredvnodes + desiredvnodes / 16)
553 kpause("vnfull", false, MAX(1, mstohz(10)), &vdrain_lock);
554 mutex_exit(&vdrain_lock);
555 }
556
557 /*
558 * LRU list iterator.
559 * Caller holds vdrain_lock.
560 */
561 static vnode_impl_t *
562 lru_iter_first(int idx, lru_iter_t *iterp)
563 {
564 vnode_impl_t *marker;
565
566 KASSERT(mutex_owned(&vdrain_lock));
567
568 mutex_exit(&vdrain_lock);
569 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
570 mutex_enter(&vdrain_lock);
571 marker->vi_lrulisthd = &lru_list[idx];
572 iterp->li_marker = marker;
573
574 TAILQ_INSERT_HEAD(marker->vi_lrulisthd, marker, vi_lrulist);
575
576 return lru_iter_next(iterp);
577 }
578
579 static vnode_impl_t *
580 lru_iter_next(lru_iter_t *iter)
581 {
582 vnode_impl_t *vip, *marker;
583 vnodelst_t *listhd;
584
585 KASSERT(mutex_owned(&vdrain_lock));
586
587 marker = iter->li_marker;
588 listhd = marker->vi_lrulisthd;
589
590 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
591 TAILQ_REMOVE(listhd, marker, vi_lrulist);
592 TAILQ_INSERT_AFTER(listhd, vip, marker, vi_lrulist);
593 if (!vnis_marker(VIMPL_TO_VNODE(vip)))
594 break;
595 }
596
597 return vip;
598 }
599
600 static void
601 lru_iter_release(lru_iter_t *iter)
602 {
603 vnode_impl_t *marker;
604
605 KASSERT(mutex_owned(&vdrain_lock));
606
607 marker = iter->li_marker;
608 TAILQ_REMOVE(marker->vi_lrulisthd, marker, vi_lrulist);
609
610 mutex_exit(&vdrain_lock);
611 vnfree_marker(VIMPL_TO_VNODE(marker));
612 mutex_enter(&vdrain_lock);
613 }
614
615 /*
616 * Release deferred vrele vnodes for this mount.
617 * Called with file system suspended.
618 */
619 void
620 vrele_flush(struct mount *mp)
621 {
622 lru_iter_t iter;
623 vnode_impl_t *vip;
624
625 KASSERT(fstrans_is_owner(mp));
626
627 mutex_enter(&vdrain_lock);
628 for (vip = lru_iter_first(LRU_VRELE, &iter); vip != NULL;
629 vip = lru_iter_next(&iter)) {
630 if (VIMPL_TO_VNODE(vip)->v_mount != mp)
631 continue;
632 vrele_deferred(vip);
633 }
634 lru_iter_release(&iter);
635 mutex_exit(&vdrain_lock);
636 }
637
638 /*
639 * One pass through the LRU lists to keep the number of allocated
640 * vnodes below target. Returns true if target met.
641 */
642 static bool
643 vdrain_one(u_int target)
644 {
645 int ix, lists[] = { LRU_FREE, LRU_HOLD };
646 lru_iter_t iter;
647 vnode_impl_t *vip;
648 vnode_t *vp;
649 struct mount *mp;
650
651 KASSERT(mutex_owned(&vdrain_lock));
652
653 for (ix = 0; ix < __arraycount(lists); ix++) {
654 for (vip = lru_iter_first(lists[ix], &iter); vip != NULL;
655 vip = lru_iter_next(&iter)) {
656 if (numvnodes < target) {
657 lru_iter_release(&iter);
658 return true;
659 }
660
661 vp = VIMPL_TO_VNODE(vip);
662
663 /* Probe usecount (unlocked). */
664 if (vrefcnt(vp) > 0)
665 continue;
666 /* Try v_interlock -- we lock the wrong direction! */
667 if (!mutex_tryenter(vp->v_interlock))
668 continue;
669 /* Probe usecount and state. */
670 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
671 mutex_exit(vp->v_interlock);
672 continue;
673 }
674 mutex_exit(&vdrain_lock);
675
676 mp = vp->v_mount;
677 if (fstrans_start_nowait(mp) != 0) {
678 mutex_exit(vp->v_interlock);
679 mutex_enter(&vdrain_lock);
680 continue;
681 }
682
683 if (vcache_vget(vp) == 0) {
684 if (!vrecycle(vp)) {
685 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
686 mutex_enter(vp->v_interlock);
687 vrelel(vp, 0, LK_EXCLUSIVE);
688 }
689 }
690 fstrans_done(mp);
691
692 mutex_enter(&vdrain_lock);
693 }
694 lru_iter_release(&iter);
695 }
696
697 return false;
698 }
699
700 /*
701 * threadpool task to keep the number of vnodes below desiredvnodes.
702 */
703 static void
704 vdrain_task(struct threadpool_job *job)
705 {
706 u_int target;
707
708 target = desiredvnodes - desiredvnodes / 16;
709
710 mutex_enter(&vdrain_lock);
711
712 while (!vdrain_one(target))
713 kpause("vdrain", false, 1, &vdrain_lock);
714
715 threadpool_job_done(job);
716 mutex_exit(&vdrain_lock);
717 }
718
719 /*
720 * threadpool task to process asynchronous vrele.
721 */
722 static void
723 vrele_task(struct threadpool_job *job)
724 {
725 int skipped;
726 lru_iter_t iter;
727 vnode_impl_t *vip;
728 struct mount *mp;
729
730 mutex_enter(&vdrain_lock);
731 while ((vip = lru_iter_first(LRU_VRELE, &iter)) != NULL) {
732 for (skipped = 0; vip != NULL; vip = lru_iter_next(&iter)) {
733 mp = VIMPL_TO_VNODE(vip)->v_mount;
734 if (fstrans_start_nowait(mp) == 0) {
735 vrele_deferred(vip);
736 fstrans_done(mp);
737 } else {
738 skipped++;
739 }
740 }
741
742 lru_iter_release(&iter);
743 if (skipped) {
744 kpause("vrele", false, MAX(1, mstohz(10)),
745 &vdrain_lock);
746 }
747 }
748
749 threadpool_job_done(job);
750 lru_iter_release(&iter);
751 mutex_exit(&vdrain_lock);
752 }
753
754 /*
755 * Try to drop reference on a vnode. Abort if we are releasing the
756 * last reference. Note: this _must_ succeed if not the last reference.
757 */
758 static bool
759 vtryrele(vnode_t *vp)
760 {
761 u_int use, next;
762
763 membar_release();
764 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
765 if (__predict_false((use & VUSECOUNT_MASK) == 1)) {
766 return false;
767 }
768 KASSERT((use & VUSECOUNT_MASK) > 1);
769 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
770 if (__predict_true(next == use)) {
771 return true;
772 }
773 }
774 }
775
776 /*
777 * vput: unlock and release the reference.
778 */
779 void
780 vput(vnode_t *vp)
781 {
782 int lktype;
783
784 /*
785 * Do an unlocked check of the usecount. If it looks like we're not
786 * about to drop the last reference, then unlock the vnode and try
787 * to drop the reference. If it ends up being the last reference
788 * after all, vrelel() can fix it all up. Most of the time this
789 * will all go to plan.
790 */
791 if (vrefcnt(vp) > 1) {
792 VOP_UNLOCK(vp);
793 if (vtryrele(vp)) {
794 return;
795 }
796 lktype = LK_NONE;
797 } else {
798 lktype = VOP_ISLOCKED(vp);
799 KASSERT(lktype != LK_NONE);
800 }
801 mutex_enter(vp->v_interlock);
802 vrelel(vp, 0, lktype);
803 }
804
805 /*
806 * Release a vnode from the deferred list.
807 */
808 static void
809 vrele_deferred(vnode_impl_t *vip)
810 {
811 vnode_t *vp;
812
813 KASSERT(mutex_owned(&vdrain_lock));
814 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
815
816 vp = VIMPL_TO_VNODE(vip);
817
818 /*
819 * First remove the vnode from the vrele list.
820 * Put it on the last lru list, the last vrele()
821 * will put it back onto the right list before
822 * its usecount reaches zero.
823 */
824 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
825 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
826 vip->vi_lrulisttm = getticks();
827 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
828
829 mutex_exit(&vdrain_lock);
830
831 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
832 mutex_enter(vp->v_interlock);
833 vrelel(vp, 0, LK_EXCLUSIVE);
834
835 mutex_enter(&vdrain_lock);
836 }
837
838 /*
839 * Vnode release. If reference count drops to zero, call inactive
840 * routine and either return to freelist or free to the pool.
841 */
842 static void
843 vrelel(vnode_t *vp, int flags, int lktype)
844 {
845 const bool async = ((flags & VRELEL_ASYNC) != 0);
846 bool recycle, defer, objlock_held;
847 u_int use, next;
848 int error;
849
850 objlock_held = false;
851
852 retry:
853 KASSERT(mutex_owned(vp->v_interlock));
854
855 if (__predict_false(vp->v_op == dead_vnodeop_p &&
856 VSTATE_GET(vp) != VS_RECLAIMED)) {
857 vnpanic(vp, "dead but not clean");
858 }
859
860 /*
861 * If not the last reference, just unlock and drop the reference count.
862 *
863 * Otherwise make sure we pass a point in time where we hold the
864 * last reference with VGET flag unset.
865 */
866 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
867 if (__predict_false((use & VUSECOUNT_MASK) > 1)) {
868 if (objlock_held) {
869 objlock_held = false;
870 rw_exit(vp->v_uobj.vmobjlock);
871 }
872 if (lktype != LK_NONE) {
873 mutex_exit(vp->v_interlock);
874 lktype = LK_NONE;
875 VOP_UNLOCK(vp);
876 mutex_enter(vp->v_interlock);
877 }
878 if (vtryrele(vp)) {
879 mutex_exit(vp->v_interlock);
880 return;
881 }
882 next = atomic_load_relaxed(&vp->v_usecount);
883 continue;
884 }
885 KASSERT((use & VUSECOUNT_MASK) == 1);
886 next = use & ~VUSECOUNT_VGET;
887 if (next != use) {
888 next = atomic_cas_uint(&vp->v_usecount, use, next);
889 }
890 if (__predict_true(next == use)) {
891 break;
892 }
893 }
894 membar_acquire();
895 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
896 vnpanic(vp, "%s: bad ref count", __func__);
897 }
898
899 #ifdef DIAGNOSTIC
900 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
901 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
902 vprint("vrelel: missing VOP_CLOSE()", vp);
903 }
904 #endif
905
906 /*
907 * If already clean there is no need to lock, defer or
908 * deactivate this node.
909 */
910 if (VSTATE_GET(vp) == VS_RECLAIMED) {
911 if (objlock_held) {
912 objlock_held = false;
913 rw_exit(vp->v_uobj.vmobjlock);
914 }
915 if (lktype != LK_NONE) {
916 mutex_exit(vp->v_interlock);
917 lktype = LK_NONE;
918 VOP_UNLOCK(vp);
919 mutex_enter(vp->v_interlock);
920 }
921 goto out;
922 }
923
924 /*
925 * First try to get the vnode locked for VOP_INACTIVE().
926 * Defer vnode release to vrele task if caller requests
927 * it explicitly, is the pagedaemon or the lock failed.
928 */
929 defer = false;
930 if ((curlwp == uvm.pagedaemon_lwp) || async) {
931 defer = true;
932 } else if (lktype == LK_SHARED) {
933 /* Excellent chance of getting, if the last ref. */
934 error = vn_lock(vp, LK_UPGRADE | LK_RETRY | LK_NOWAIT);
935 if (error != 0) {
936 defer = true;
937 } else {
938 lktype = LK_EXCLUSIVE;
939 }
940 } else if (lktype == LK_NONE) {
941 /* Excellent chance of getting, if the last ref. */
942 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
943 if (error != 0) {
944 defer = true;
945 } else {
946 lktype = LK_EXCLUSIVE;
947 }
948 }
949 KASSERT(mutex_owned(vp->v_interlock));
950 if (defer) {
951 /*
952 * Defer reclaim to the vrele task; it's not safe to
953 * clean it here. We donate it our last reference.
954 */
955 if (lktype != LK_NONE) {
956 mutex_exit(vp->v_interlock);
957 VOP_UNLOCK(vp);
958 mutex_enter(vp->v_interlock);
959 }
960 lru_requeue(vp, &lru_list[LRU_VRELE]);
961 mutex_exit(vp->v_interlock);
962 return;
963 }
964 KASSERT(lktype == LK_EXCLUSIVE);
965
966 /* If the node gained another reference, retry. */
967 use = atomic_load_relaxed(&vp->v_usecount);
968 if ((use & VUSECOUNT_VGET) != 0) {
969 goto retry;
970 }
971 KASSERT((use & VUSECOUNT_MASK) == 1);
972
973 if ((vp->v_iflag & (VI_TEXT|VI_EXECMAP|VI_WRMAP)) != 0 ||
974 (vp->v_vflag & VV_MAPPED) != 0) {
975 /* Take care of space accounting. */
976 if (!objlock_held) {
977 objlock_held = true;
978 if (!rw_tryenter(vp->v_uobj.vmobjlock, RW_WRITER)) {
979 mutex_exit(vp->v_interlock);
980 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
981 mutex_enter(vp->v_interlock);
982 goto retry;
983 }
984 }
985 if ((vp->v_iflag & VI_EXECMAP) != 0) {
986 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
987 }
988 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
989 vp->v_vflag &= ~VV_MAPPED;
990 }
991 if (objlock_held) {
992 objlock_held = false;
993 rw_exit(vp->v_uobj.vmobjlock);
994 }
995
996 /*
997 * Deactivate the vnode, but preserve our reference across
998 * the call to VOP_INACTIVE().
999 *
1000 * If VOP_INACTIVE() indicates that the file has been
1001 * deleted, then recycle the vnode.
1002 *
1003 * Note that VOP_INACTIVE() will not drop the vnode lock.
1004 */
1005 mutex_exit(vp->v_interlock);
1006 recycle = false;
1007 VOP_INACTIVE(vp, &recycle);
1008 if (!recycle) {
1009 lktype = LK_NONE;
1010 VOP_UNLOCK(vp);
1011 }
1012 mutex_enter(vp->v_interlock);
1013
1014 /*
1015 * Block new references then check again to see if a
1016 * new reference was acquired in the meantime. If
1017 * it was, restore the vnode state and try again.
1018 */
1019 if (recycle) {
1020 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1021 use = atomic_load_relaxed(&vp->v_usecount);
1022 if ((use & VUSECOUNT_VGET) != 0) {
1023 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1024 goto retry;
1025 }
1026 KASSERT((use & VUSECOUNT_MASK) == 1);
1027 }
1028
1029 /*
1030 * Recycle the vnode if the file is now unused (unlinked).
1031 */
1032 if (recycle) {
1033 VSTATE_ASSERT(vp, VS_BLOCKED);
1034 KASSERT(lktype == LK_EXCLUSIVE);
1035 /* vcache_reclaim drops the lock. */
1036 lktype = LK_NONE;
1037 vcache_reclaim(vp);
1038 }
1039 KASSERT(vrefcnt(vp) > 0);
1040 KASSERT(lktype == LK_NONE);
1041
1042 out:
1043 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1044 if (__predict_false((use & VUSECOUNT_VGET) != 0 &&
1045 (use & VUSECOUNT_MASK) == 1)) {
1046 /* Gained and released another reference, retry. */
1047 goto retry;
1048 }
1049 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
1050 if (__predict_true(next == use)) {
1051 if (__predict_false((use & VUSECOUNT_MASK) != 1)) {
1052 /* Gained another reference. */
1053 mutex_exit(vp->v_interlock);
1054 return;
1055 }
1056 break;
1057 }
1058 }
1059 membar_acquire();
1060
1061 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
1062 /*
1063 * It's clean so destroy it. It isn't referenced
1064 * anywhere since it has been reclaimed.
1065 */
1066 vcache_free(VNODE_TO_VIMPL(vp));
1067 } else {
1068 /*
1069 * Otherwise, put it back onto the freelist. It
1070 * can't be destroyed while still associated with
1071 * a file system.
1072 */
1073 lru_requeue(vp, lru_which(vp));
1074 mutex_exit(vp->v_interlock);
1075 }
1076 }
1077
1078 void
1079 vrele(vnode_t *vp)
1080 {
1081
1082 if (vtryrele(vp)) {
1083 return;
1084 }
1085 mutex_enter(vp->v_interlock);
1086 vrelel(vp, 0, LK_NONE);
1087 }
1088
1089 /*
1090 * Asynchronous vnode release, vnode is released in different context.
1091 */
1092 void
1093 vrele_async(vnode_t *vp)
1094 {
1095
1096 if (vtryrele(vp)) {
1097 return;
1098 }
1099 mutex_enter(vp->v_interlock);
1100 vrelel(vp, VRELEL_ASYNC, LK_NONE);
1101 }
1102
1103 /*
1104 * Vnode reference, where a reference is already held by some other
1105 * object (for example, a file structure).
1106 *
1107 * NB: lockless code sequences may rely on this not blocking.
1108 */
1109 void
1110 vref(vnode_t *vp)
1111 {
1112
1113 KASSERT(vrefcnt(vp) > 0);
1114
1115 atomic_inc_uint(&vp->v_usecount);
1116 }
1117
1118 /*
1119 * Page or buffer structure gets a reference.
1120 * Called with v_interlock held.
1121 */
1122 void
1123 vholdl(vnode_t *vp)
1124 {
1125
1126 KASSERT(mutex_owned(vp->v_interlock));
1127
1128 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
1129 lru_requeue(vp, lru_which(vp));
1130 }
1131
1132 /*
1133 * Page or buffer structure gets a reference.
1134 */
1135 void
1136 vhold(vnode_t *vp)
1137 {
1138
1139 mutex_enter(vp->v_interlock);
1140 vholdl(vp);
1141 mutex_exit(vp->v_interlock);
1142 }
1143
1144 /*
1145 * Page or buffer structure frees a reference.
1146 * Called with v_interlock held.
1147 */
1148 void
1149 holdrelel(vnode_t *vp)
1150 {
1151
1152 KASSERT(mutex_owned(vp->v_interlock));
1153
1154 if (vp->v_holdcnt <= 0) {
1155 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
1156 }
1157
1158 vp->v_holdcnt--;
1159 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1160 lru_requeue(vp, lru_which(vp));
1161 }
1162
1163 /*
1164 * Page or buffer structure frees a reference.
1165 */
1166 void
1167 holdrele(vnode_t *vp)
1168 {
1169
1170 mutex_enter(vp->v_interlock);
1171 holdrelel(vp);
1172 mutex_exit(vp->v_interlock);
1173 }
1174
1175 /*
1176 * Recycle an unused vnode if caller holds the last reference.
1177 */
1178 bool
1179 vrecycle(vnode_t *vp)
1180 {
1181 int error __diagused;
1182
1183 mutex_enter(vp->v_interlock);
1184
1185 /* If the vnode is already clean we're done. */
1186 VSTATE_WAIT_STABLE(vp);
1187 if (VSTATE_GET(vp) != VS_LOADED) {
1188 VSTATE_ASSERT(vp, VS_RECLAIMED);
1189 vrelel(vp, 0, LK_NONE);
1190 return true;
1191 }
1192
1193 /* Prevent further references until the vnode is locked. */
1194 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1195
1196 /* Make sure we hold the last reference. */
1197 if (vrefcnt(vp) != 1) {
1198 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1199 mutex_exit(vp->v_interlock);
1200 return false;
1201 }
1202
1203 mutex_exit(vp->v_interlock);
1204
1205 /*
1206 * On a leaf file system this lock will always succeed as we hold
1207 * the last reference and prevent further references.
1208 * On layered file systems waiting for the lock would open a can of
1209 * deadlocks as the lower vnodes may have other active references.
1210 */
1211 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1212
1213 mutex_enter(vp->v_interlock);
1214 if (error) {
1215 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1216 mutex_exit(vp->v_interlock);
1217 return false;
1218 }
1219
1220 KASSERT(vrefcnt(vp) == 1);
1221 vcache_reclaim(vp);
1222 vrelel(vp, 0, LK_NONE);
1223
1224 return true;
1225 }
1226
1227 /*
1228 * Helper for vrevoke() to propagate suspension from lastmp
1229 * to thismp. Both args may be NULL.
1230 * Returns the currently suspended file system or NULL.
1231 */
1232 static struct mount *
1233 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1234 {
1235 int error;
1236
1237 if (lastmp == thismp)
1238 return thismp;
1239
1240 if (lastmp != NULL)
1241 vfs_resume(lastmp);
1242
1243 if (thismp == NULL)
1244 return NULL;
1245
1246 do {
1247 error = vfs_suspend(thismp, 0);
1248 } while (error == EINTR || error == ERESTART);
1249
1250 if (error == 0)
1251 return thismp;
1252
1253 KASSERT(error == EOPNOTSUPP || error == ENOENT);
1254 return NULL;
1255 }
1256
1257 /*
1258 * Eliminate all activity associated with the requested vnode
1259 * and with all vnodes aliased to the requested vnode.
1260 */
1261 void
1262 vrevoke(vnode_t *vp)
1263 {
1264 struct mount *mp;
1265 vnode_t *vq;
1266 enum vtype type;
1267 dev_t dev;
1268
1269 KASSERT(vrefcnt(vp) > 0);
1270
1271 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1272
1273 mutex_enter(vp->v_interlock);
1274 VSTATE_WAIT_STABLE(vp);
1275 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1276 mutex_exit(vp->v_interlock);
1277 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1278 atomic_inc_uint(&vp->v_usecount);
1279 mutex_exit(vp->v_interlock);
1280 vgone(vp);
1281 } else {
1282 dev = vp->v_rdev;
1283 type = vp->v_type;
1284 mutex_exit(vp->v_interlock);
1285
1286 while (spec_node_lookup_by_dev(type, dev, VDEAD_NOWAIT, &vq)
1287 == 0) {
1288 mp = vrevoke_suspend_next(mp, vq->v_mount);
1289 vgone(vq);
1290 }
1291 }
1292 vrevoke_suspend_next(mp, NULL);
1293 }
1294
1295 /*
1296 * Eliminate all activity associated with a vnode in preparation for
1297 * reuse. Drops a reference from the vnode.
1298 */
1299 void
1300 vgone(vnode_t *vp)
1301 {
1302 int lktype;
1303
1304 KASSERT(vp->v_mount == dead_rootmount ||
1305 fstrans_is_owner(vp->v_mount));
1306
1307 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1308 lktype = LK_EXCLUSIVE;
1309 mutex_enter(vp->v_interlock);
1310 VSTATE_WAIT_STABLE(vp);
1311 if (VSTATE_GET(vp) == VS_LOADED) {
1312 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1313 vcache_reclaim(vp);
1314 lktype = LK_NONE;
1315 }
1316 VSTATE_ASSERT(vp, VS_RECLAIMED);
1317 vrelel(vp, 0, lktype);
1318 }
1319
1320 static inline uint32_t
1321 vcache_hash(const struct vcache_key *key)
1322 {
1323 uint32_t hash = HASH32_BUF_INIT;
1324
1325 KASSERT(key->vk_key_len > 0);
1326
1327 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1328 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1329 return hash;
1330 }
1331
1332 static int
1333 vcache_stats(struct hashstat_sysctl *hs, bool fill)
1334 {
1335 vnode_impl_t *vip;
1336 uint64_t chain;
1337
1338 strlcpy(hs->hash_name, "vcache", sizeof(hs->hash_name));
1339 strlcpy(hs->hash_desc, "vnode cache hash", sizeof(hs->hash_desc));
1340 if (!fill)
1341 return 0;
1342
1343 hs->hash_size = vcache_hashmask + 1;
1344
1345 for (size_t i = 0; i < hs->hash_size; i++) {
1346 chain = 0;
1347 mutex_enter(&vcache_lock);
1348 SLIST_FOREACH(vip, &vcache_hashtab[i], vi_hash) {
1349 chain++;
1350 }
1351 mutex_exit(&vcache_lock);
1352 if (chain > 0) {
1353 hs->hash_used++;
1354 hs->hash_items += chain;
1355 if (chain > hs->hash_maxchain)
1356 hs->hash_maxchain = chain;
1357 }
1358 preempt_point();
1359 }
1360
1361 return 0;
1362 }
1363
1364 static void
1365 vcache_init(void)
1366 {
1367
1368 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1369 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1370 KASSERT(vcache_pool != NULL);
1371 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1372 cv_init(&vcache_cv, "vcache");
1373 vcache_hashsize = desiredvnodes;
1374 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1375 &vcache_hashmask);
1376 hashstat_register("vcache", vcache_stats);
1377 }
1378
1379 static void
1380 vcache_reinit(void)
1381 {
1382 int i;
1383 uint32_t hash;
1384 u_long oldmask, newmask;
1385 struct hashhead *oldtab, *newtab;
1386 vnode_impl_t *vip;
1387
1388 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1389 mutex_enter(&vcache_lock);
1390 oldtab = vcache_hashtab;
1391 oldmask = vcache_hashmask;
1392 vcache_hashsize = desiredvnodes;
1393 vcache_hashtab = newtab;
1394 vcache_hashmask = newmask;
1395 for (i = 0; i <= oldmask; i++) {
1396 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1397 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1398 hash = vcache_hash(&vip->vi_key);
1399 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1400 vip, vi_hash);
1401 }
1402 }
1403 mutex_exit(&vcache_lock);
1404 hashdone(oldtab, HASH_SLIST, oldmask);
1405 }
1406
1407 static inline vnode_impl_t *
1408 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1409 {
1410 struct hashhead *hashp;
1411 vnode_impl_t *vip;
1412
1413 KASSERT(mutex_owned(&vcache_lock));
1414
1415 hashp = &vcache_hashtab[hash & vcache_hashmask];
1416 SLIST_FOREACH(vip, hashp, vi_hash) {
1417 if (key->vk_mount != vip->vi_key.vk_mount)
1418 continue;
1419 if (key->vk_key_len != vip->vi_key.vk_key_len)
1420 continue;
1421 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1422 continue;
1423 return vip;
1424 }
1425 return NULL;
1426 }
1427
1428 /*
1429 * Allocate a new, uninitialized vcache node.
1430 */
1431 static vnode_impl_t *
1432 vcache_alloc(void)
1433 {
1434 vnode_impl_t *vip;
1435 vnode_t *vp;
1436
1437 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1438 vp = VIMPL_TO_VNODE(vip);
1439 memset(vip, 0, sizeof(*vip));
1440
1441 rw_init(&vip->vi_lock);
1442 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1443
1444 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1445 klist_init(&vip->vi_klist.vk_klist);
1446 vp->v_klist = &vip->vi_klist;
1447 cv_init(&vp->v_cv, "vnode");
1448 cache_vnode_init(vp);
1449
1450 vp->v_usecount = 1;
1451 vp->v_type = VNON;
1452 vp->v_size = vp->v_writesize = VSIZENOTSET;
1453
1454 vip->vi_state = VS_LOADING;
1455
1456 lru_requeue(vp, &lru_list[LRU_FREE]);
1457
1458 return vip;
1459 }
1460
1461 /*
1462 * Deallocate a vcache node in state VS_LOADING.
1463 *
1464 * vcache_lock held on entry and released on return.
1465 */
1466 static void
1467 vcache_dealloc(vnode_impl_t *vip)
1468 {
1469 vnode_t *vp;
1470
1471 KASSERT(mutex_owned(&vcache_lock));
1472
1473 vp = VIMPL_TO_VNODE(vip);
1474 vfs_ref(dead_rootmount);
1475 vfs_insmntque(vp, dead_rootmount);
1476 mutex_enter(vp->v_interlock);
1477 vp->v_op = dead_vnodeop_p;
1478 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1479 mutex_exit(&vcache_lock);
1480 vrelel(vp, 0, LK_NONE);
1481 }
1482
1483 /*
1484 * Free an unused, unreferenced vcache node.
1485 * v_interlock locked on entry.
1486 */
1487 static void
1488 vcache_free(vnode_impl_t *vip)
1489 {
1490 vnode_t *vp;
1491
1492 vp = VIMPL_TO_VNODE(vip);
1493 KASSERT(mutex_owned(vp->v_interlock));
1494
1495 KASSERT(vrefcnt(vp) == 0);
1496 KASSERT(vp->v_holdcnt == 0);
1497 KASSERT(vp->v_writecount == 0);
1498 lru_requeue(vp, NULL);
1499 mutex_exit(vp->v_interlock);
1500
1501 vfs_insmntque(vp, NULL);
1502 if (vp->v_type == VBLK || vp->v_type == VCHR)
1503 spec_node_destroy(vp);
1504
1505 mutex_obj_free(vp->v_interlock);
1506 rw_destroy(&vip->vi_lock);
1507 uvm_obj_destroy(&vp->v_uobj, true);
1508 KASSERT(vp->v_klist == &vip->vi_klist);
1509 klist_fini(&vip->vi_klist.vk_klist);
1510 cv_destroy(&vp->v_cv);
1511 cache_vnode_fini(vp);
1512 pool_cache_put(vcache_pool, vip);
1513 }
1514
1515 /*
1516 * Try to get an initial reference on this cached vnode.
1517 * Returns zero on success or EBUSY if the vnode state is not LOADED.
1518 *
1519 * NB: lockless code sequences may rely on this not blocking.
1520 */
1521 int
1522 vcache_tryvget(vnode_t *vp)
1523 {
1524 u_int use, next;
1525
1526 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
1527 if (__predict_false((use & VUSECOUNT_GATE) == 0)) {
1528 return EBUSY;
1529 }
1530 next = atomic_cas_uint(&vp->v_usecount,
1531 use, (use + 1) | VUSECOUNT_VGET);
1532 if (__predict_true(next == use)) {
1533 membar_acquire();
1534 return 0;
1535 }
1536 }
1537 }
1538
1539 /*
1540 * Try to get an initial reference on this cached vnode.
1541 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1542 * Will wait for the vnode state to be stable.
1543 *
1544 * v_interlock locked on entry and unlocked on exit.
1545 */
1546 int
1547 vcache_vget(vnode_t *vp)
1548 {
1549 int error;
1550
1551 KASSERT(mutex_owned(vp->v_interlock));
1552
1553 /* Increment hold count to prevent vnode from disappearing. */
1554 vp->v_holdcnt++;
1555 VSTATE_WAIT_STABLE(vp);
1556 vp->v_holdcnt--;
1557
1558 /* If this was the last reference to a reclaimed vnode free it now. */
1559 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1560 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1561 vcache_free(VNODE_TO_VIMPL(vp));
1562 else
1563 mutex_exit(vp->v_interlock);
1564 return ENOENT;
1565 }
1566 VSTATE_ASSERT(vp, VS_LOADED);
1567 error = vcache_tryvget(vp);
1568 KASSERT(error == 0);
1569 mutex_exit(vp->v_interlock);
1570
1571 return 0;
1572 }
1573
1574 /*
1575 * Get a vnode / fs node pair by key and return it referenced through vpp.
1576 */
1577 int
1578 vcache_get(struct mount *mp, const void *key, size_t key_len,
1579 struct vnode **vpp)
1580 {
1581 int error;
1582 uint32_t hash;
1583 const void *new_key;
1584 struct vnode *vp;
1585 struct vcache_key vcache_key;
1586 vnode_impl_t *vip, *new_vip;
1587
1588 new_key = NULL;
1589 *vpp = NULL;
1590
1591 vcache_key.vk_mount = mp;
1592 vcache_key.vk_key = key;
1593 vcache_key.vk_key_len = key_len;
1594 hash = vcache_hash(&vcache_key);
1595
1596 again:
1597 mutex_enter(&vcache_lock);
1598 vip = vcache_hash_lookup(&vcache_key, hash);
1599
1600 /* If found, take a reference or retry. */
1601 if (__predict_true(vip != NULL)) {
1602 /*
1603 * If the vnode is loading we cannot take the v_interlock
1604 * here as it might change during load (see uvm_obj_setlock()).
1605 * As changing state from VS_LOADING requires both vcache_lock
1606 * and v_interlock it is safe to test with vcache_lock held.
1607 *
1608 * Wait for vnodes changing state from VS_LOADING and retry.
1609 */
1610 if (__predict_false(vip->vi_state == VS_LOADING)) {
1611 cv_wait(&vcache_cv, &vcache_lock);
1612 mutex_exit(&vcache_lock);
1613 goto again;
1614 }
1615 vp = VIMPL_TO_VNODE(vip);
1616 mutex_enter(vp->v_interlock);
1617 mutex_exit(&vcache_lock);
1618 error = vcache_vget(vp);
1619 if (error == ENOENT)
1620 goto again;
1621 if (error == 0)
1622 *vpp = vp;
1623 KASSERT((error != 0) == (*vpp == NULL));
1624 return error;
1625 }
1626 mutex_exit(&vcache_lock);
1627
1628 /* Allocate and initialize a new vcache / vnode pair. */
1629 error = vfs_busy(mp);
1630 if (error)
1631 return error;
1632 new_vip = vcache_alloc();
1633 new_vip->vi_key = vcache_key;
1634 vp = VIMPL_TO_VNODE(new_vip);
1635 mutex_enter(&vcache_lock);
1636 vip = vcache_hash_lookup(&vcache_key, hash);
1637 if (vip == NULL) {
1638 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1639 new_vip, vi_hash);
1640 vip = new_vip;
1641 }
1642
1643 /* If another thread beat us inserting this node, retry. */
1644 if (vip != new_vip) {
1645 vcache_dealloc(new_vip);
1646 vfs_unbusy(mp);
1647 goto again;
1648 }
1649 mutex_exit(&vcache_lock);
1650
1651 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1652 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1653 if (error) {
1654 mutex_enter(&vcache_lock);
1655 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1656 new_vip, vnode_impl, vi_hash);
1657 vcache_dealloc(new_vip);
1658 vfs_unbusy(mp);
1659 KASSERT(*vpp == NULL);
1660 return error;
1661 }
1662 KASSERT(new_key != NULL);
1663 KASSERT(memcmp(key, new_key, key_len) == 0);
1664 KASSERT(vp->v_op != NULL);
1665 vfs_insmntque(vp, mp);
1666 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1667 vp->v_vflag |= VV_MPSAFE;
1668 vfs_ref(mp);
1669 vfs_unbusy(mp);
1670
1671 /* Finished loading, finalize node. */
1672 mutex_enter(&vcache_lock);
1673 new_vip->vi_key.vk_key = new_key;
1674 mutex_enter(vp->v_interlock);
1675 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1676 mutex_exit(vp->v_interlock);
1677 mutex_exit(&vcache_lock);
1678 *vpp = vp;
1679 return 0;
1680 }
1681
1682 /*
1683 * Create a new vnode / fs node pair and return it referenced through vpp.
1684 */
1685 int
1686 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1687 kauth_cred_t cred, void *extra, struct vnode **vpp)
1688 {
1689 int error;
1690 uint32_t hash;
1691 struct vnode *vp, *ovp;
1692 vnode_impl_t *vip, *ovip;
1693
1694 *vpp = NULL;
1695
1696 /* Allocate and initialize a new vcache / vnode pair. */
1697 error = vfs_busy(mp);
1698 if (error)
1699 return error;
1700 vip = vcache_alloc();
1701 vip->vi_key.vk_mount = mp;
1702 vp = VIMPL_TO_VNODE(vip);
1703
1704 /* Create and load the fs node. */
1705 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1706 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1707 if (error) {
1708 mutex_enter(&vcache_lock);
1709 vcache_dealloc(vip);
1710 vfs_unbusy(mp);
1711 KASSERT(*vpp == NULL);
1712 return error;
1713 }
1714 KASSERT(vp->v_op != NULL);
1715 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1716 if (vip->vi_key.vk_key_len > 0) {
1717 KASSERT(vip->vi_key.vk_key != NULL);
1718 hash = vcache_hash(&vip->vi_key);
1719
1720 /*
1721 * Wait for previous instance to be reclaimed,
1722 * then insert new node.
1723 */
1724 mutex_enter(&vcache_lock);
1725 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1726 ovp = VIMPL_TO_VNODE(ovip);
1727 mutex_enter(ovp->v_interlock);
1728 mutex_exit(&vcache_lock);
1729 error = vcache_vget(ovp);
1730 KASSERT(error == ENOENT);
1731 mutex_enter(&vcache_lock);
1732 }
1733 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1734 vip, vi_hash);
1735 mutex_exit(&vcache_lock);
1736 }
1737 vfs_insmntque(vp, mp);
1738 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1739 vp->v_vflag |= VV_MPSAFE;
1740 vfs_ref(mp);
1741 vfs_unbusy(mp);
1742
1743 /* Finished loading, finalize node. */
1744 mutex_enter(&vcache_lock);
1745 mutex_enter(vp->v_interlock);
1746 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1747 mutex_exit(&vcache_lock);
1748 mutex_exit(vp->v_interlock);
1749 *vpp = vp;
1750 return 0;
1751 }
1752
1753 /*
1754 * Prepare key change: update old cache nodes key and lock new cache node.
1755 * Return an error if the new node already exists.
1756 */
1757 int
1758 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1759 const void *old_key, size_t old_key_len,
1760 const void *new_key, size_t new_key_len)
1761 {
1762 uint32_t old_hash, new_hash;
1763 struct vcache_key old_vcache_key, new_vcache_key;
1764 vnode_impl_t *vip, *new_vip;
1765
1766 old_vcache_key.vk_mount = mp;
1767 old_vcache_key.vk_key = old_key;
1768 old_vcache_key.vk_key_len = old_key_len;
1769 old_hash = vcache_hash(&old_vcache_key);
1770
1771 new_vcache_key.vk_mount = mp;
1772 new_vcache_key.vk_key = new_key;
1773 new_vcache_key.vk_key_len = new_key_len;
1774 new_hash = vcache_hash(&new_vcache_key);
1775
1776 new_vip = vcache_alloc();
1777 new_vip->vi_key = new_vcache_key;
1778
1779 /* Insert locked new node used as placeholder. */
1780 mutex_enter(&vcache_lock);
1781 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1782 if (vip != NULL) {
1783 vcache_dealloc(new_vip);
1784 return EEXIST;
1785 }
1786 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1787 new_vip, vi_hash);
1788
1789 /* Replace old nodes key with the temporary copy. */
1790 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1791 KASSERT(vip != NULL);
1792 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1793 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1794 vip->vi_key = old_vcache_key;
1795 mutex_exit(&vcache_lock);
1796 return 0;
1797 }
1798
1799 /*
1800 * Key change complete: update old node and remove placeholder.
1801 */
1802 void
1803 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1804 const void *old_key, size_t old_key_len,
1805 const void *new_key, size_t new_key_len)
1806 {
1807 uint32_t old_hash, new_hash;
1808 struct vcache_key old_vcache_key, new_vcache_key;
1809 vnode_impl_t *vip, *new_vip;
1810 struct vnode *new_vp;
1811
1812 old_vcache_key.vk_mount = mp;
1813 old_vcache_key.vk_key = old_key;
1814 old_vcache_key.vk_key_len = old_key_len;
1815 old_hash = vcache_hash(&old_vcache_key);
1816
1817 new_vcache_key.vk_mount = mp;
1818 new_vcache_key.vk_key = new_key;
1819 new_vcache_key.vk_key_len = new_key_len;
1820 new_hash = vcache_hash(&new_vcache_key);
1821
1822 mutex_enter(&vcache_lock);
1823
1824 /* Lookup old and new node. */
1825 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1826 KASSERT(vip != NULL);
1827 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1828
1829 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1830 KASSERT(new_vip != NULL);
1831 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1832 new_vp = VIMPL_TO_VNODE(new_vip);
1833 mutex_enter(new_vp->v_interlock);
1834 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1835 mutex_exit(new_vp->v_interlock);
1836
1837 /* Rekey old node and put it onto its new hashlist. */
1838 vip->vi_key = new_vcache_key;
1839 if (old_hash != new_hash) {
1840 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1841 vip, vnode_impl, vi_hash);
1842 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1843 vip, vi_hash);
1844 }
1845
1846 /* Remove new node used as placeholder. */
1847 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1848 new_vip, vnode_impl, vi_hash);
1849 vcache_dealloc(new_vip);
1850 }
1851
1852 /*
1853 * Disassociate the underlying file system from a vnode.
1854 *
1855 * Must be called with vnode locked and will return unlocked.
1856 * Must be called with the interlock held, and will return with it held.
1857 */
1858 static void
1859 vcache_reclaim(vnode_t *vp)
1860 {
1861 lwp_t *l = curlwp;
1862 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1863 struct mount *mp = vp->v_mount;
1864 uint32_t hash;
1865 uint8_t temp_buf[64], *temp_key;
1866 size_t temp_key_len;
1867 bool recycle;
1868 int error;
1869
1870 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1871 KASSERT(mutex_owned(vp->v_interlock));
1872 KASSERT(vrefcnt(vp) != 0);
1873
1874 temp_key_len = vip->vi_key.vk_key_len;
1875 /*
1876 * Prevent the vnode from being recycled or brought into use
1877 * while we clean it out.
1878 */
1879 VSTATE_CHANGE(vp, VS_BLOCKED, VS_RECLAIMING);
1880
1881 /*
1882 * Send NOTE_REVOKE now, before we call VOP_RECLAIM(),
1883 * because VOP_RECLAIM() could cause vp->v_klist to
1884 * become invalid. Don't check for interest in NOTE_REVOKE
1885 * here; it's always posted because it sets EV_EOF.
1886 *
1887 * Once it's been posted, reset vp->v_klist to point to
1888 * our own local storage, in case we were sharing with
1889 * someone else.
1890 */
1891 KNOTE(&vp->v_klist->vk_klist, NOTE_REVOKE);
1892 vp->v_klist = &vip->vi_klist;
1893 mutex_exit(vp->v_interlock);
1894
1895 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1896 mutex_enter(vp->v_interlock);
1897 if ((vp->v_iflag & VI_EXECMAP) != 0) {
1898 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1899 }
1900 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1901 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1902 mutex_exit(vp->v_interlock);
1903 rw_exit(vp->v_uobj.vmobjlock);
1904
1905 /*
1906 * With vnode state set to reclaiming, purge name cache immediately
1907 * to prevent new handles on vnode, and wait for existing threads
1908 * trying to get a handle to notice VS_RECLAIMED status and abort.
1909 */
1910 cache_purge(vp);
1911
1912 /* Replace the vnode key with a temporary copy. */
1913 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1914 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1915 } else {
1916 temp_key = temp_buf;
1917 }
1918 if (vip->vi_key.vk_key_len > 0) {
1919 mutex_enter(&vcache_lock);
1920 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1921 vip->vi_key.vk_key = temp_key;
1922 mutex_exit(&vcache_lock);
1923 }
1924
1925 fstrans_start(mp);
1926
1927 /*
1928 * Clean out any cached data associated with the vnode.
1929 */
1930 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1931 if (error != 0) {
1932 if (wapbl_vphaswapbl(vp))
1933 WAPBL_DISCARD(wapbl_vptomp(vp));
1934 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1935 }
1936 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1937 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1938 if (vp->v_type == VBLK || vp->v_type == VCHR) {
1939 spec_node_revoke(vp);
1940 }
1941
1942 /*
1943 * Disassociate the underlying file system from the vnode.
1944 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1945 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1946 * would no longer function.
1947 */
1948 VOP_INACTIVE(vp, &recycle);
1949 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1950 if (VOP_RECLAIM(vp)) {
1951 vnpanic(vp, "%s: cannot reclaim", __func__);
1952 }
1953
1954 KASSERT(vp->v_data == NULL);
1955 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1956
1957 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1958 uvm_ra_freectx(vp->v_ractx);
1959 vp->v_ractx = NULL;
1960 }
1961
1962 if (vip->vi_key.vk_key_len > 0) {
1963 /* Remove from vnode cache. */
1964 hash = vcache_hash(&vip->vi_key);
1965 mutex_enter(&vcache_lock);
1966 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1967 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1968 vip, vnode_impl, vi_hash);
1969 mutex_exit(&vcache_lock);
1970 }
1971 if (temp_key != temp_buf)
1972 kmem_free(temp_key, temp_key_len);
1973
1974 /* Done with purge, notify sleepers of the grim news. */
1975 mutex_enter(vp->v_interlock);
1976 vp->v_op = dead_vnodeop_p;
1977 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1978 vp->v_tag = VT_NON;
1979 mutex_exit(vp->v_interlock);
1980
1981 /*
1982 * Move to dead mount. Must be after changing the operations
1983 * vector as vnode operations enter the mount before using the
1984 * operations vector. See sys/kern/vnode_if.c.
1985 */
1986 vp->v_vflag &= ~VV_ROOT;
1987 vfs_ref(dead_rootmount);
1988 vfs_insmntque(vp, dead_rootmount);
1989
1990 #ifdef PAX_SEGVGUARD
1991 pax_segvguard_cleanup(vp);
1992 #endif /* PAX_SEGVGUARD */
1993
1994 mutex_enter(vp->v_interlock);
1995 fstrans_done(mp);
1996 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1997 }
1998
1999 /*
2000 * Disassociate the underlying file system from an open device vnode
2001 * and make it anonymous.
2002 *
2003 * Vnode unlocked on entry, drops a reference to the vnode.
2004 */
2005 void
2006 vcache_make_anon(vnode_t *vp)
2007 {
2008 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
2009 uint32_t hash;
2010 bool recycle;
2011
2012 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
2013 KASSERT(vp->v_mount == dead_rootmount ||
2014 fstrans_is_owner(vp->v_mount));
2015 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
2016
2017 /* Remove from vnode cache. */
2018 hash = vcache_hash(&vip->vi_key);
2019 mutex_enter(&vcache_lock);
2020 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
2021 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
2022 vip, vnode_impl, vi_hash);
2023 vip->vi_key.vk_mount = dead_rootmount;
2024 vip->vi_key.vk_key_len = 0;
2025 vip->vi_key.vk_key = NULL;
2026 mutex_exit(&vcache_lock);
2027
2028 /*
2029 * Disassociate the underlying file system from the vnode.
2030 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
2031 * the vnode, and may destroy the vnode so that VOP_UNLOCK
2032 * would no longer function.
2033 */
2034 if (vn_lock(vp, LK_EXCLUSIVE)) {
2035 vnpanic(vp, "%s: cannot lock", __func__);
2036 }
2037 VOP_INACTIVE(vp, &recycle);
2038 KASSERT(VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
2039 if (VOP_RECLAIM(vp)) {
2040 vnpanic(vp, "%s: cannot reclaim", __func__);
2041 }
2042
2043 /* Purge name cache. */
2044 cache_purge(vp);
2045
2046 /* Done with purge, change operations vector. */
2047 mutex_enter(vp->v_interlock);
2048 vp->v_op = spec_vnodeop_p;
2049 vp->v_vflag |= VV_MPSAFE;
2050 mutex_exit(vp->v_interlock);
2051
2052 /*
2053 * Move to dead mount. Must be after changing the operations
2054 * vector as vnode operations enter the mount before using the
2055 * operations vector. See sys/kern/vnode_if.c.
2056 */
2057 vfs_ref(dead_rootmount);
2058 vfs_insmntque(vp, dead_rootmount);
2059
2060 vrele(vp);
2061 }
2062
2063 /*
2064 * Update outstanding I/O count and do wakeup if requested.
2065 */
2066 void
2067 vwakeup(struct buf *bp)
2068 {
2069 vnode_t *vp;
2070
2071 if ((vp = bp->b_vp) == NULL)
2072 return;
2073
2074 KASSERT(bp->b_objlock == vp->v_interlock);
2075 KASSERT(mutex_owned(bp->b_objlock));
2076
2077 if (--vp->v_numoutput < 0)
2078 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
2079 if (vp->v_numoutput == 0)
2080 cv_broadcast(&vp->v_cv);
2081 }
2082
2083 /*
2084 * Test a vnode for being or becoming dead. Returns one of:
2085 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
2086 * ENOENT: vnode is dead.
2087 * 0: otherwise.
2088 *
2089 * Whenever this function returns a non-zero value all future
2090 * calls will also return a non-zero value.
2091 */
2092 int
2093 vdead_check(struct vnode *vp, int flags)
2094 {
2095
2096 KASSERT(mutex_owned(vp->v_interlock));
2097
2098 if (! ISSET(flags, VDEAD_NOWAIT))
2099 VSTATE_WAIT_STABLE(vp);
2100
2101 if (VSTATE_GET(vp) == VS_RECLAIMING) {
2102 KASSERT(ISSET(flags, VDEAD_NOWAIT));
2103 return EBUSY;
2104 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
2105 return ENOENT;
2106 }
2107
2108 return 0;
2109 }
2110
2111 int
2112 vfs_drainvnodes(void)
2113 {
2114
2115 mutex_enter(&vdrain_lock);
2116
2117 if (!vdrain_one(desiredvnodes)) {
2118 mutex_exit(&vdrain_lock);
2119 return EBUSY;
2120 }
2121
2122 mutex_exit(&vdrain_lock);
2123
2124 if (vcache_hashsize != desiredvnodes)
2125 vcache_reinit();
2126
2127 return 0;
2128 }
2129
2130 void
2131 vnpanic(vnode_t *vp, const char *fmt, ...)
2132 {
2133 va_list ap;
2134
2135 #ifdef DIAGNOSTIC
2136 vprint(NULL, vp);
2137 #endif
2138 va_start(ap, fmt);
2139 vpanic(fmt, ap);
2140 va_end(ap);
2141 }
2142
2143 void
2144 vshareilock(vnode_t *tvp, vnode_t *fvp)
2145 {
2146 kmutex_t *oldlock;
2147
2148 oldlock = tvp->v_interlock;
2149 mutex_obj_hold(fvp->v_interlock);
2150 tvp->v_interlock = fvp->v_interlock;
2151 mutex_obj_free(oldlock);
2152 }
2153
2154 void
2155 vshareklist(vnode_t *tvp, vnode_t *fvp)
2156 {
2157 /*
2158 * If two vnodes share klist state, they must also share
2159 * an interlock.
2160 */
2161 KASSERT(tvp->v_interlock == fvp->v_interlock);
2162
2163 /*
2164 * We make the following assumptions:
2165 *
2166 * ==> Some other synchronization is happening outside of
2167 * our view to make this safe.
2168 *
2169 * ==> That the "to" vnode will have the necessary references
2170 * on the "from" vnode so that the storage for the klist
2171 * won't be yanked out from beneath us (the vnode_impl).
2172 *
2173 * ==> If "from" is also sharing, we then assume that "from"
2174 * has the necessary references, and so on.
2175 */
2176 tvp->v_klist = fvp->v_klist;
2177 }
2178