vfs_vnode.c revision 1.121 1 /* $NetBSD: vfs_vnode.c,v 1.121 2020/04/19 13:25:00 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019, 2020 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * LOADED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * Note on v_usecount and its locking
147 *
148 * At nearly all points it is known that v_usecount could be zero,
149 * the vnode_t::v_interlock will be held. To change the count away
150 * from zero, the interlock must be held. To change from a non-zero
151 * value to zero, again the interlock must be held.
152 *
153 * Changing the usecount from a non-zero value to a non-zero value can
154 * safely be done using atomic operations, without the interlock held.
155 */
156
157 #include <sys/cdefs.h>
158 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.121 2020/04/19 13:25:00 hannken Exp $");
159
160 #ifdef _KERNEL_OPT
161 #include "opt_pax.h"
162 #endif
163
164 #include <sys/param.h>
165 #include <sys/kernel.h>
166
167 #include <sys/atomic.h>
168 #include <sys/buf.h>
169 #include <sys/conf.h>
170 #include <sys/device.h>
171 #include <sys/hash.h>
172 #include <sys/kauth.h>
173 #include <sys/kmem.h>
174 #include <sys/kthread.h>
175 #include <sys/module.h>
176 #include <sys/mount.h>
177 #include <sys/namei.h>
178 #include <sys/pax.h>
179 #include <sys/syscallargs.h>
180 #include <sys/sysctl.h>
181 #include <sys/systm.h>
182 #include <sys/vnode_impl.h>
183 #include <sys/wapbl.h>
184 #include <sys/fstrans.h>
185
186 #include <uvm/uvm.h>
187 #include <uvm/uvm_readahead.h>
188 #include <uvm/uvm_stat.h>
189
190 /* Flags to vrelel. */
191 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
192
193 #define LRU_VRELE 0
194 #define LRU_FREE 1
195 #define LRU_HOLD 2
196 #define LRU_COUNT 3
197
198 /*
199 * There are three lru lists: one holds vnodes waiting for async release,
200 * one is for vnodes which have no buffer/page references and one for those
201 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
202 * private cache line as vnodes migrate between them while under the same
203 * lock (vdrain_lock).
204 */
205 u_int numvnodes __cacheline_aligned;
206 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
207 static kmutex_t vdrain_lock __cacheline_aligned;
208 static kcondvar_t vdrain_cv;
209 static int vdrain_gen;
210 static kcondvar_t vdrain_gen_cv;
211 static bool vdrain_retry;
212 static lwp_t * vdrain_lwp;
213 SLIST_HEAD(hashhead, vnode_impl);
214 static kmutex_t vcache_lock __cacheline_aligned;
215 static kcondvar_t vcache_cv;
216 static u_int vcache_hashsize;
217 static u_long vcache_hashmask;
218 static struct hashhead *vcache_hashtab;
219 static pool_cache_t vcache_pool;
220 static void lru_requeue(vnode_t *, vnodelst_t *);
221 static vnodelst_t * lru_which(vnode_t *);
222 static vnode_impl_t * vcache_alloc(void);
223 static void vcache_dealloc(vnode_impl_t *);
224 static void vcache_free(vnode_impl_t *);
225 static void vcache_init(void);
226 static void vcache_reinit(void);
227 static void vcache_reclaim(vnode_t *);
228 static void vrelel(vnode_t *, int, int);
229 static void vdrain_thread(void *);
230 static void vnpanic(vnode_t *, const char *, ...)
231 __printflike(2, 3);
232
233 /* Routines having to do with the management of the vnode table. */
234 extern struct mount *dead_rootmount;
235 extern int (**dead_vnodeop_p)(void *);
236 extern int (**spec_vnodeop_p)(void *);
237 extern struct vfsops dead_vfsops;
238
239 /*
240 * Return the current usecount of a vnode.
241 */
242 inline int
243 vrefcnt(struct vnode *vp)
244 {
245
246 return atomic_load_relaxed(&vp->v_usecount);
247 }
248
249 /* Vnode state operations and diagnostics. */
250
251 #if defined(DIAGNOSTIC)
252
253 #define VSTATE_VALID(state) \
254 ((state) != VS_ACTIVE && (state) != VS_MARKER)
255 #define VSTATE_GET(vp) \
256 vstate_assert_get((vp), __func__, __LINE__)
257 #define VSTATE_CHANGE(vp, from, to) \
258 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
259 #define VSTATE_WAIT_STABLE(vp) \
260 vstate_assert_wait_stable((vp), __func__, __LINE__)
261
262 void
263 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
264 bool has_lock)
265 {
266 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
267 int refcnt = vrefcnt(vp);
268
269 if (!has_lock) {
270 /*
271 * Prevent predictive loads from the CPU, but check the state
272 * without loooking first.
273 */
274 membar_enter();
275 if (state == VS_ACTIVE && refcnt > 0 &&
276 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
277 return;
278 if (vip->vi_state == state)
279 return;
280 mutex_enter((vp)->v_interlock);
281 }
282
283 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
284
285 if ((state == VS_ACTIVE && refcnt > 0 &&
286 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
287 vip->vi_state == state) {
288 if (!has_lock)
289 mutex_exit((vp)->v_interlock);
290 return;
291 }
292 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
293 vstate_name(vip->vi_state), refcnt,
294 vstate_name(state), func, line);
295 }
296
297 static enum vnode_state
298 vstate_assert_get(vnode_t *vp, const char *func, int line)
299 {
300 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
301
302 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
303 if (! VSTATE_VALID(vip->vi_state))
304 vnpanic(vp, "state is %s at %s:%d",
305 vstate_name(vip->vi_state), func, line);
306
307 return vip->vi_state;
308 }
309
310 static void
311 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
312 {
313 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
314
315 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
316 if (! VSTATE_VALID(vip->vi_state))
317 vnpanic(vp, "state is %s at %s:%d",
318 vstate_name(vip->vi_state), func, line);
319
320 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
321 cv_wait(&vp->v_cv, vp->v_interlock);
322
323 if (! VSTATE_VALID(vip->vi_state))
324 vnpanic(vp, "state is %s at %s:%d",
325 vstate_name(vip->vi_state), func, line);
326 }
327
328 static void
329 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
330 const char *func, int line)
331 {
332 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
333 int refcnt = vrefcnt(vp);
334
335 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
336 if (from == VS_LOADING)
337 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
338
339 if (! VSTATE_VALID(from))
340 vnpanic(vp, "from is %s at %s:%d",
341 vstate_name(from), func, line);
342 if (! VSTATE_VALID(to))
343 vnpanic(vp, "to is %s at %s:%d",
344 vstate_name(to), func, line);
345 if (vip->vi_state != from)
346 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
347 vstate_name(vip->vi_state), vstate_name(from), func, line);
348 if ((from == VS_BLOCKED || to == VS_BLOCKED) && refcnt != 1)
349 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
350 vstate_name(from), vstate_name(to), refcnt,
351 func, line);
352
353 vip->vi_state = to;
354 if (from == VS_LOADING)
355 cv_broadcast(&vcache_cv);
356 if (to == VS_LOADED || to == VS_RECLAIMED)
357 cv_broadcast(&vp->v_cv);
358 }
359
360 #else /* defined(DIAGNOSTIC) */
361
362 #define VSTATE_GET(vp) \
363 (VNODE_TO_VIMPL((vp))->vi_state)
364 #define VSTATE_CHANGE(vp, from, to) \
365 vstate_change((vp), (from), (to))
366 #define VSTATE_WAIT_STABLE(vp) \
367 vstate_wait_stable((vp))
368 void
369 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
370 bool has_lock)
371 {
372
373 }
374
375 static void
376 vstate_wait_stable(vnode_t *vp)
377 {
378 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
379
380 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
381 cv_wait(&vp->v_cv, vp->v_interlock);
382 }
383
384 static void
385 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
386 {
387 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
388
389 vip->vi_state = to;
390 if (from == VS_LOADING)
391 cv_broadcast(&vcache_cv);
392 if (to == VS_LOADED || to == VS_RECLAIMED)
393 cv_broadcast(&vp->v_cv);
394 }
395
396 #endif /* defined(DIAGNOSTIC) */
397
398 void
399 vfs_vnode_sysinit(void)
400 {
401 int error __diagused, i;
402
403 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
404 KASSERT(dead_rootmount != NULL);
405 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
406
407 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
408 for (i = 0; i < LRU_COUNT; i++) {
409 TAILQ_INIT(&lru_list[i]);
410 }
411 vcache_init();
412
413 cv_init(&vdrain_cv, "vdrain");
414 cv_init(&vdrain_gen_cv, "vdrainwt");
415 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
416 NULL, &vdrain_lwp, "vdrain");
417 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
418 }
419
420 /*
421 * Allocate a new marker vnode.
422 */
423 vnode_t *
424 vnalloc_marker(struct mount *mp)
425 {
426 vnode_impl_t *vip;
427 vnode_t *vp;
428
429 vip = pool_cache_get(vcache_pool, PR_WAITOK);
430 memset(vip, 0, sizeof(*vip));
431 vp = VIMPL_TO_VNODE(vip);
432 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
433 vp->v_mount = mp;
434 vp->v_type = VBAD;
435 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
436 vip->vi_state = VS_MARKER;
437
438 return vp;
439 }
440
441 /*
442 * Free a marker vnode.
443 */
444 void
445 vnfree_marker(vnode_t *vp)
446 {
447 vnode_impl_t *vip;
448
449 vip = VNODE_TO_VIMPL(vp);
450 KASSERT(vip->vi_state == VS_MARKER);
451 mutex_obj_free(vp->v_interlock);
452 uvm_obj_destroy(&vp->v_uobj, true);
453 pool_cache_put(vcache_pool, vip);
454 }
455
456 /*
457 * Test a vnode for being a marker vnode.
458 */
459 bool
460 vnis_marker(vnode_t *vp)
461 {
462
463 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
464 }
465
466 /*
467 * Return the lru list this node should be on.
468 */
469 static vnodelst_t *
470 lru_which(vnode_t *vp)
471 {
472
473 KASSERT(mutex_owned(vp->v_interlock));
474
475 if (vp->v_holdcnt > 0)
476 return &lru_list[LRU_HOLD];
477 else
478 return &lru_list[LRU_FREE];
479 }
480
481 /*
482 * Put vnode to end of given list.
483 * Both the current and the new list may be NULL, used on vnode alloc/free.
484 * Adjust numvnodes and signal vdrain thread if there is work.
485 */
486 static void
487 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
488 {
489 vnode_impl_t *vip;
490 int d;
491
492 /*
493 * If the vnode is on the correct list, and was put there recently,
494 * then leave it be, thus avoiding huge cache and lock contention.
495 */
496 vip = VNODE_TO_VIMPL(vp);
497 if (listhd == vip->vi_lrulisthd &&
498 (getticks() - vip->vi_lrulisttm) < hz) {
499 return;
500 }
501
502 mutex_enter(&vdrain_lock);
503 d = 0;
504 if (vip->vi_lrulisthd != NULL)
505 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
506 else
507 d++;
508 vip->vi_lrulisthd = listhd;
509 vip->vi_lrulisttm = getticks();
510 if (vip->vi_lrulisthd != NULL)
511 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
512 else
513 d--;
514 if (d != 0) {
515 /*
516 * Looks strange? This is not a bug. Don't store
517 * numvnodes unless there is a change - avoid false
518 * sharing on MP.
519 */
520 numvnodes += d;
521 }
522 if ((d > 0 && numvnodes > desiredvnodes) ||
523 listhd == &lru_list[LRU_VRELE])
524 cv_signal(&vdrain_cv);
525 mutex_exit(&vdrain_lock);
526 }
527
528 /*
529 * Release deferred vrele vnodes for this mount.
530 * Called with file system suspended.
531 */
532 void
533 vrele_flush(struct mount *mp)
534 {
535 vnode_impl_t *vip, *marker;
536 vnode_t *vp;
537
538 KASSERT(fstrans_is_owner(mp));
539
540 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
541
542 mutex_enter(&vdrain_lock);
543 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
544
545 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
546 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
547 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
548 vi_lrulist);
549 vp = VIMPL_TO_VNODE(vip);
550 if (vnis_marker(vp))
551 continue;
552
553 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
554 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
555 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
556 vip->vi_lrulisttm = getticks();
557 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
558 mutex_exit(&vdrain_lock);
559
560 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
561 mutex_enter(vp->v_interlock);
562 vrelel(vp, 0, LK_EXCLUSIVE);
563
564 mutex_enter(&vdrain_lock);
565 }
566
567 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
568 mutex_exit(&vdrain_lock);
569
570 vnfree_marker(VIMPL_TO_VNODE(marker));
571 }
572
573 /*
574 * Reclaim a cached vnode. Used from vdrain_thread only.
575 */
576 static __inline void
577 vdrain_remove(vnode_t *vp)
578 {
579 struct mount *mp;
580
581 KASSERT(mutex_owned(&vdrain_lock));
582
583 /* Probe usecount (unlocked). */
584 if (vrefcnt(vp) > 0)
585 return;
586 /* Try v_interlock -- we lock the wrong direction! */
587 if (!mutex_tryenter(vp->v_interlock))
588 return;
589 /* Probe usecount and state. */
590 if (vrefcnt(vp) > 0 || VSTATE_GET(vp) != VS_LOADED) {
591 mutex_exit(vp->v_interlock);
592 return;
593 }
594 mp = vp->v_mount;
595 if (fstrans_start_nowait(mp) != 0) {
596 mutex_exit(vp->v_interlock);
597 return;
598 }
599 vdrain_retry = true;
600 mutex_exit(&vdrain_lock);
601
602 if (vcache_vget(vp) == 0) {
603 if (!vrecycle(vp)) {
604 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
605 mutex_enter(vp->v_interlock);
606 vrelel(vp, 0, LK_EXCLUSIVE);
607 }
608 }
609 fstrans_done(mp);
610
611 mutex_enter(&vdrain_lock);
612 }
613
614 /*
615 * Release a cached vnode. Used from vdrain_thread only.
616 */
617 static __inline void
618 vdrain_vrele(vnode_t *vp)
619 {
620 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
621 struct mount *mp;
622
623 KASSERT(mutex_owned(&vdrain_lock));
624
625 mp = vp->v_mount;
626 if (fstrans_start_nowait(mp) != 0)
627 return;
628
629 /*
630 * First remove the vnode from the vrele list.
631 * Put it on the last lru list, the last vrele()
632 * will put it back onto the right list before
633 * its usecount reaches zero.
634 */
635 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
636 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
637 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
638 vip->vi_lrulisttm = getticks();
639 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
640
641 vdrain_retry = true;
642 mutex_exit(&vdrain_lock);
643
644 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
645 mutex_enter(vp->v_interlock);
646 vrelel(vp, 0, LK_EXCLUSIVE);
647 fstrans_done(mp);
648
649 mutex_enter(&vdrain_lock);
650 }
651
652 /*
653 * Helper thread to keep the number of vnodes below desiredvnodes
654 * and release vnodes from asynchronous vrele.
655 */
656 static void
657 vdrain_thread(void *cookie)
658 {
659 int i;
660 u_int target;
661 vnode_impl_t *vip, *marker;
662
663 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
664
665 mutex_enter(&vdrain_lock);
666
667 for (;;) {
668 vdrain_retry = false;
669 target = desiredvnodes - desiredvnodes/10;
670
671 for (i = 0; i < LRU_COUNT; i++) {
672 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
673 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
674 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
675 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
676 vi_lrulist);
677 if (vnis_marker(VIMPL_TO_VNODE(vip)))
678 continue;
679 if (i == LRU_VRELE)
680 vdrain_vrele(VIMPL_TO_VNODE(vip));
681 else if (numvnodes < target)
682 break;
683 else
684 vdrain_remove(VIMPL_TO_VNODE(vip));
685 }
686 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
687 }
688
689 if (vdrain_retry) {
690 kpause("vdrainrt", false, 1, &vdrain_lock);
691 } else {
692 vdrain_gen++;
693 cv_broadcast(&vdrain_gen_cv);
694 cv_wait(&vdrain_cv, &vdrain_lock);
695 }
696 }
697 }
698
699 /*
700 * Try to drop reference on a vnode. Abort if we are releasing the
701 * last reference. Note: this _must_ succeed if not the last reference.
702 */
703 static bool
704 vtryrele(vnode_t *vp)
705 {
706 u_int use, next;
707
708 for (use = atomic_load_relaxed(&vp->v_usecount);; use = next) {
709 if (__predict_false(use == 1)) {
710 return false;
711 }
712 KASSERT(use > 1);
713 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
714 if (__predict_true(next == use)) {
715 return true;
716 }
717 }
718 }
719
720 /*
721 * vput: unlock and release the reference.
722 */
723 void
724 vput(vnode_t *vp)
725 {
726 int lktype;
727
728 /*
729 * Do an unlocked check of the usecount. If it looks like we're not
730 * about to drop the last reference, then unlock the vnode and try
731 * to drop the reference. If it ends up being the last reference
732 * after all, vrelel() can fix it all up. Most of the time this
733 * will all go to plan.
734 */
735 if (vrefcnt(vp) > 1) {
736 VOP_UNLOCK(vp);
737 if (vtryrele(vp)) {
738 return;
739 }
740 lktype = LK_NONE;
741 } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
742 lktype = LK_EXCLUSIVE;
743 } else {
744 lktype = VOP_ISLOCKED(vp);
745 KASSERT(lktype != LK_NONE);
746 }
747 mutex_enter(vp->v_interlock);
748 vrelel(vp, 0, lktype);
749 }
750
751 /*
752 * Vnode release. If reference count drops to zero, call inactive
753 * routine and either return to freelist or free to the pool.
754 */
755 static void
756 vrelel(vnode_t *vp, int flags, int lktype)
757 {
758 const bool async = ((flags & VRELEL_ASYNC) != 0);
759 bool recycle, defer;
760 int error;
761
762 KASSERT(mutex_owned(vp->v_interlock));
763
764 if (__predict_false(vp->v_op == dead_vnodeop_p &&
765 VSTATE_GET(vp) != VS_RECLAIMED)) {
766 vnpanic(vp, "dead but not clean");
767 }
768
769 /*
770 * If not the last reference, just drop the reference count and
771 * unlock. VOP_UNLOCK() is called here without a vnode reference
772 * held, but is ok as the hold of v_interlock will stop the vnode
773 * from disappearing.
774 */
775 if (vtryrele(vp)) {
776 if (lktype != LK_NONE) {
777 VOP_UNLOCK(vp);
778 }
779 mutex_exit(vp->v_interlock);
780 return;
781 }
782 if (vrefcnt(vp) <= 0 || vp->v_writecount != 0) {
783 vnpanic(vp, "%s: bad ref count", __func__);
784 }
785
786 #ifdef DIAGNOSTIC
787 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
788 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
789 vprint("vrelel: missing VOP_CLOSE()", vp);
790 }
791 #endif
792
793 /*
794 * First try to get the vnode locked for VOP_INACTIVE().
795 * Defer vnode release to vdrain_thread if caller requests
796 * it explicitly, is the pagedaemon or the lock failed.
797 */
798 defer = false;
799 if ((curlwp == uvm.pagedaemon_lwp) || async) {
800 defer = true;
801 } else if (lktype == LK_SHARED) {
802 /* Excellent chance of getting, if the last ref. */
803 error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
804 LK_NOWAIT);
805 if (error != 0) {
806 defer = true;
807 } else {
808 lktype = LK_EXCLUSIVE;
809 }
810 } else if (lktype == LK_NONE) {
811 /* Excellent chance of getting, if the last ref. */
812 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
813 LK_NOWAIT);
814 if (error != 0) {
815 defer = true;
816 } else {
817 lktype = LK_EXCLUSIVE;
818 }
819 }
820 KASSERT(mutex_owned(vp->v_interlock));
821 if (defer) {
822 /*
823 * Defer reclaim to the kthread; it's not safe to
824 * clean it here. We donate it our last reference.
825 */
826 if (lktype != LK_NONE) {
827 VOP_UNLOCK(vp);
828 }
829 lru_requeue(vp, &lru_list[LRU_VRELE]);
830 mutex_exit(vp->v_interlock);
831 return;
832 }
833 KASSERT(lktype == LK_EXCLUSIVE);
834
835 /*
836 * If not clean, deactivate the vnode, but preserve
837 * our reference across the call to VOP_INACTIVE().
838 */
839 if (VSTATE_GET(vp) == VS_RECLAIMED) {
840 VOP_UNLOCK(vp);
841 } else {
842 /*
843 * The vnode must not gain another reference while being
844 * deactivated. If VOP_INACTIVE() indicates that
845 * the described file has been deleted, then recycle
846 * the vnode.
847 *
848 * Note that VOP_INACTIVE() will not drop the vnode lock.
849 */
850 mutex_exit(vp->v_interlock);
851 recycle = false;
852 VOP_INACTIVE(vp, &recycle);
853 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
854 mutex_enter(vp->v_interlock);
855 if (vtryrele(vp)) {
856 VOP_UNLOCK(vp);
857 mutex_exit(vp->v_interlock);
858 rw_exit(vp->v_uobj.vmobjlock);
859 return;
860 }
861
862 /* Take care of space accounting. */
863 if ((vp->v_iflag & VI_EXECMAP) != 0 &&
864 vp->v_uobj.uo_npages != 0) {
865 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
866 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
867 }
868 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
869 vp->v_vflag &= ~VV_MAPPED;
870 rw_exit(vp->v_uobj.vmobjlock);
871
872 /*
873 * Recycle the vnode if the file is now unused (unlinked),
874 * otherwise just free it.
875 */
876 if (recycle) {
877 VSTATE_ASSERT(vp, VS_LOADED);
878 /* vcache_reclaim drops the lock. */
879 vcache_reclaim(vp);
880 } else {
881 VOP_UNLOCK(vp);
882 }
883 KASSERT(vrefcnt(vp) > 0);
884 }
885
886 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
887 /* Gained another reference while being reclaimed. */
888 mutex_exit(vp->v_interlock);
889 return;
890 }
891
892 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
893 /*
894 * It's clean so destroy it. It isn't referenced
895 * anywhere since it has been reclaimed.
896 */
897 vcache_free(VNODE_TO_VIMPL(vp));
898 } else {
899 /*
900 * Otherwise, put it back onto the freelist. It
901 * can't be destroyed while still associated with
902 * a file system.
903 */
904 lru_requeue(vp, lru_which(vp));
905 mutex_exit(vp->v_interlock);
906 }
907 }
908
909 void
910 vrele(vnode_t *vp)
911 {
912
913 if (vtryrele(vp)) {
914 return;
915 }
916 mutex_enter(vp->v_interlock);
917 vrelel(vp, 0, LK_NONE);
918 }
919
920 /*
921 * Asynchronous vnode release, vnode is released in different context.
922 */
923 void
924 vrele_async(vnode_t *vp)
925 {
926
927 if (vtryrele(vp)) {
928 return;
929 }
930 mutex_enter(vp->v_interlock);
931 vrelel(vp, VRELEL_ASYNC, LK_NONE);
932 }
933
934 /*
935 * Vnode reference, where a reference is already held by some other
936 * object (for example, a file structure).
937 *
938 * NB: we have lockless code sequences that rely on this not blocking.
939 */
940 void
941 vref(vnode_t *vp)
942 {
943
944 KASSERT(vrefcnt(vp) > 0);
945
946 atomic_inc_uint(&vp->v_usecount);
947 }
948
949 /*
950 * Page or buffer structure gets a reference.
951 * Called with v_interlock held.
952 */
953 void
954 vholdl(vnode_t *vp)
955 {
956
957 KASSERT(mutex_owned(vp->v_interlock));
958
959 if (vp->v_holdcnt++ == 0 && vrefcnt(vp) == 0)
960 lru_requeue(vp, lru_which(vp));
961 }
962
963 /*
964 * Page or buffer structure gets a reference.
965 */
966 void
967 vhold(vnode_t *vp)
968 {
969
970 mutex_enter(vp->v_interlock);
971 vholdl(vp);
972 mutex_exit(vp->v_interlock);
973 }
974
975 /*
976 * Page or buffer structure frees a reference.
977 * Called with v_interlock held.
978 */
979 void
980 holdrelel(vnode_t *vp)
981 {
982
983 KASSERT(mutex_owned(vp->v_interlock));
984
985 if (vp->v_holdcnt <= 0) {
986 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
987 }
988
989 vp->v_holdcnt--;
990 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
991 lru_requeue(vp, lru_which(vp));
992 }
993
994 /*
995 * Page or buffer structure frees a reference.
996 */
997 void
998 holdrele(vnode_t *vp)
999 {
1000
1001 mutex_enter(vp->v_interlock);
1002 holdrelel(vp);
1003 mutex_exit(vp->v_interlock);
1004 }
1005
1006 /*
1007 * Recycle an unused vnode if caller holds the last reference.
1008 */
1009 bool
1010 vrecycle(vnode_t *vp)
1011 {
1012 int error __diagused;
1013
1014 mutex_enter(vp->v_interlock);
1015
1016 /* Make sure we hold the last reference. */
1017 VSTATE_WAIT_STABLE(vp);
1018 if (vrefcnt(vp) != 1) {
1019 mutex_exit(vp->v_interlock);
1020 return false;
1021 }
1022
1023 /* If the vnode is already clean we're done. */
1024 if (VSTATE_GET(vp) != VS_LOADED) {
1025 VSTATE_ASSERT(vp, VS_RECLAIMED);
1026 vrelel(vp, 0, LK_NONE);
1027 return true;
1028 }
1029
1030 /* Prevent further references until the vnode is locked. */
1031 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1032 mutex_exit(vp->v_interlock);
1033
1034 /*
1035 * On a leaf file system this lock will always succeed as we hold
1036 * the last reference and prevent further references.
1037 * On layered file systems waiting for the lock would open a can of
1038 * deadlocks as the lower vnodes may have other active references.
1039 */
1040 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1041
1042 mutex_enter(vp->v_interlock);
1043 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1044
1045 if (error) {
1046 mutex_exit(vp->v_interlock);
1047 return false;
1048 }
1049
1050 KASSERT(vrefcnt(vp) == 1);
1051 vcache_reclaim(vp);
1052 vrelel(vp, 0, LK_NONE);
1053
1054 return true;
1055 }
1056
1057 /*
1058 * Helper for vrevoke() to propagate suspension from lastmp
1059 * to thismp. Both args may be NULL.
1060 * Returns the currently suspended file system or NULL.
1061 */
1062 static struct mount *
1063 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1064 {
1065 int error;
1066
1067 if (lastmp == thismp)
1068 return thismp;
1069
1070 if (lastmp != NULL)
1071 vfs_resume(lastmp);
1072
1073 if (thismp == NULL)
1074 return NULL;
1075
1076 do {
1077 error = vfs_suspend(thismp, 0);
1078 } while (error == EINTR || error == ERESTART);
1079
1080 if (error == 0)
1081 return thismp;
1082
1083 KASSERT(error == EOPNOTSUPP);
1084 return NULL;
1085 }
1086
1087 /*
1088 * Eliminate all activity associated with the requested vnode
1089 * and with all vnodes aliased to the requested vnode.
1090 */
1091 void
1092 vrevoke(vnode_t *vp)
1093 {
1094 struct mount *mp;
1095 vnode_t *vq;
1096 enum vtype type;
1097 dev_t dev;
1098
1099 KASSERT(vrefcnt(vp) > 0);
1100
1101 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1102
1103 mutex_enter(vp->v_interlock);
1104 VSTATE_WAIT_STABLE(vp);
1105 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1106 mutex_exit(vp->v_interlock);
1107 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1108 atomic_inc_uint(&vp->v_usecount);
1109 mutex_exit(vp->v_interlock);
1110 vgone(vp);
1111 } else {
1112 dev = vp->v_rdev;
1113 type = vp->v_type;
1114 mutex_exit(vp->v_interlock);
1115
1116 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1117 mp = vrevoke_suspend_next(mp, vq->v_mount);
1118 vgone(vq);
1119 }
1120 }
1121 vrevoke_suspend_next(mp, NULL);
1122 }
1123
1124 /*
1125 * Eliminate all activity associated with a vnode in preparation for
1126 * reuse. Drops a reference from the vnode.
1127 */
1128 void
1129 vgone(vnode_t *vp)
1130 {
1131 int lktype;
1132
1133 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1134
1135 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1136 lktype = LK_EXCLUSIVE;
1137 mutex_enter(vp->v_interlock);
1138 VSTATE_WAIT_STABLE(vp);
1139 if (VSTATE_GET(vp) == VS_LOADED) {
1140 vcache_reclaim(vp);
1141 lktype = LK_NONE;
1142 }
1143 VSTATE_ASSERT(vp, VS_RECLAIMED);
1144 vrelel(vp, 0, lktype);
1145 }
1146
1147 static inline uint32_t
1148 vcache_hash(const struct vcache_key *key)
1149 {
1150 uint32_t hash = HASH32_BUF_INIT;
1151
1152 KASSERT(key->vk_key_len > 0);
1153
1154 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1155 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1156 return hash;
1157 }
1158
1159 static void
1160 vcache_init(void)
1161 {
1162
1163 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1164 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1165 KASSERT(vcache_pool != NULL);
1166 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1167 cv_init(&vcache_cv, "vcache");
1168 vcache_hashsize = desiredvnodes;
1169 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1170 &vcache_hashmask);
1171 }
1172
1173 static void
1174 vcache_reinit(void)
1175 {
1176 int i;
1177 uint32_t hash;
1178 u_long oldmask, newmask;
1179 struct hashhead *oldtab, *newtab;
1180 vnode_impl_t *vip;
1181
1182 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1183 mutex_enter(&vcache_lock);
1184 oldtab = vcache_hashtab;
1185 oldmask = vcache_hashmask;
1186 vcache_hashsize = desiredvnodes;
1187 vcache_hashtab = newtab;
1188 vcache_hashmask = newmask;
1189 for (i = 0; i <= oldmask; i++) {
1190 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1191 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1192 hash = vcache_hash(&vip->vi_key);
1193 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1194 vip, vi_hash);
1195 }
1196 }
1197 mutex_exit(&vcache_lock);
1198 hashdone(oldtab, HASH_SLIST, oldmask);
1199 }
1200
1201 static inline vnode_impl_t *
1202 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1203 {
1204 struct hashhead *hashp;
1205 vnode_impl_t *vip;
1206
1207 KASSERT(mutex_owned(&vcache_lock));
1208
1209 hashp = &vcache_hashtab[hash & vcache_hashmask];
1210 SLIST_FOREACH(vip, hashp, vi_hash) {
1211 if (key->vk_mount != vip->vi_key.vk_mount)
1212 continue;
1213 if (key->vk_key_len != vip->vi_key.vk_key_len)
1214 continue;
1215 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1216 continue;
1217 return vip;
1218 }
1219 return NULL;
1220 }
1221
1222 /*
1223 * Allocate a new, uninitialized vcache node.
1224 */
1225 static vnode_impl_t *
1226 vcache_alloc(void)
1227 {
1228 vnode_impl_t *vip;
1229 vnode_t *vp;
1230
1231 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1232 vp = VIMPL_TO_VNODE(vip);
1233 memset(vip, 0, sizeof(*vip));
1234
1235 rw_init(&vip->vi_lock);
1236 vp->v_interlock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_NONE);
1237
1238 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 1);
1239 cv_init(&vp->v_cv, "vnode");
1240 cache_vnode_init(vp);
1241
1242 vp->v_usecount = 1;
1243 vp->v_type = VNON;
1244 vp->v_size = vp->v_writesize = VSIZENOTSET;
1245
1246 vip->vi_state = VS_LOADING;
1247
1248 lru_requeue(vp, &lru_list[LRU_FREE]);
1249
1250 return vip;
1251 }
1252
1253 /*
1254 * Deallocate a vcache node in state VS_LOADING.
1255 *
1256 * vcache_lock held on entry and released on return.
1257 */
1258 static void
1259 vcache_dealloc(vnode_impl_t *vip)
1260 {
1261 vnode_t *vp;
1262
1263 KASSERT(mutex_owned(&vcache_lock));
1264
1265 vp = VIMPL_TO_VNODE(vip);
1266 vfs_ref(dead_rootmount);
1267 vfs_insmntque(vp, dead_rootmount);
1268 mutex_enter(vp->v_interlock);
1269 vp->v_op = dead_vnodeop_p;
1270 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1271 mutex_exit(&vcache_lock);
1272 vrelel(vp, 0, LK_NONE);
1273 }
1274
1275 /*
1276 * Free an unused, unreferenced vcache node.
1277 * v_interlock locked on entry.
1278 */
1279 static void
1280 vcache_free(vnode_impl_t *vip)
1281 {
1282 vnode_t *vp;
1283
1284 vp = VIMPL_TO_VNODE(vip);
1285 KASSERT(mutex_owned(vp->v_interlock));
1286
1287 KASSERT(vrefcnt(vp) == 0);
1288 KASSERT(vp->v_holdcnt == 0);
1289 KASSERT(vp->v_writecount == 0);
1290 lru_requeue(vp, NULL);
1291 mutex_exit(vp->v_interlock);
1292
1293 vfs_insmntque(vp, NULL);
1294 if (vp->v_type == VBLK || vp->v_type == VCHR)
1295 spec_node_destroy(vp);
1296
1297 mutex_obj_free(vp->v_interlock);
1298 rw_destroy(&vip->vi_lock);
1299 uvm_obj_destroy(&vp->v_uobj, true);
1300 cv_destroy(&vp->v_cv);
1301 cache_vnode_fini(vp);
1302 pool_cache_put(vcache_pool, vip);
1303 }
1304
1305 /*
1306 * Try to get an initial reference on this cached vnode.
1307 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1308 * EBUSY if the vnode state is unstable.
1309 *
1310 * v_interlock locked on entry and unlocked on exit.
1311 */
1312 int
1313 vcache_tryvget(vnode_t *vp)
1314 {
1315 int error = 0;
1316
1317 KASSERT(mutex_owned(vp->v_interlock));
1318
1319 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1320 error = ENOENT;
1321 else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1322 error = EBUSY;
1323 else if (vp->v_usecount == 0)
1324 vp->v_usecount = 1;
1325 else
1326 atomic_inc_uint(&vp->v_usecount);
1327
1328 mutex_exit(vp->v_interlock);
1329
1330 return error;
1331 }
1332
1333 /*
1334 * Try to get an initial reference on this cached vnode.
1335 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1336 * Will wait for the vnode state to be stable.
1337 *
1338 * v_interlock locked on entry and unlocked on exit.
1339 */
1340 int
1341 vcache_vget(vnode_t *vp)
1342 {
1343
1344 KASSERT(mutex_owned(vp->v_interlock));
1345
1346 /* Increment hold count to prevent vnode from disappearing. */
1347 vp->v_holdcnt++;
1348 VSTATE_WAIT_STABLE(vp);
1349 vp->v_holdcnt--;
1350
1351 /* If this was the last reference to a reclaimed vnode free it now. */
1352 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1353 if (vp->v_holdcnt == 0 && vrefcnt(vp) == 0)
1354 vcache_free(VNODE_TO_VIMPL(vp));
1355 else
1356 mutex_exit(vp->v_interlock);
1357 return ENOENT;
1358 }
1359 VSTATE_ASSERT(vp, VS_LOADED);
1360 if (vp->v_usecount == 0)
1361 vp->v_usecount = 1;
1362 else
1363 atomic_inc_uint(&vp->v_usecount);
1364 mutex_exit(vp->v_interlock);
1365
1366 return 0;
1367 }
1368
1369 /*
1370 * Get a vnode / fs node pair by key and return it referenced through vpp.
1371 */
1372 int
1373 vcache_get(struct mount *mp, const void *key, size_t key_len,
1374 struct vnode **vpp)
1375 {
1376 int error;
1377 uint32_t hash;
1378 const void *new_key;
1379 struct vnode *vp;
1380 struct vcache_key vcache_key;
1381 vnode_impl_t *vip, *new_vip;
1382
1383 new_key = NULL;
1384 *vpp = NULL;
1385
1386 vcache_key.vk_mount = mp;
1387 vcache_key.vk_key = key;
1388 vcache_key.vk_key_len = key_len;
1389 hash = vcache_hash(&vcache_key);
1390
1391 again:
1392 mutex_enter(&vcache_lock);
1393 vip = vcache_hash_lookup(&vcache_key, hash);
1394
1395 /* If found, take a reference or retry. */
1396 if (__predict_true(vip != NULL)) {
1397 /*
1398 * If the vnode is loading we cannot take the v_interlock
1399 * here as it might change during load (see uvm_obj_setlock()).
1400 * As changing state from VS_LOADING requires both vcache_lock
1401 * and v_interlock it is safe to test with vcache_lock held.
1402 *
1403 * Wait for vnodes changing state from VS_LOADING and retry.
1404 */
1405 if (__predict_false(vip->vi_state == VS_LOADING)) {
1406 cv_wait(&vcache_cv, &vcache_lock);
1407 mutex_exit(&vcache_lock);
1408 goto again;
1409 }
1410 vp = VIMPL_TO_VNODE(vip);
1411 mutex_enter(vp->v_interlock);
1412 mutex_exit(&vcache_lock);
1413 error = vcache_vget(vp);
1414 if (error == ENOENT)
1415 goto again;
1416 if (error == 0)
1417 *vpp = vp;
1418 KASSERT((error != 0) == (*vpp == NULL));
1419 return error;
1420 }
1421 mutex_exit(&vcache_lock);
1422
1423 /* Allocate and initialize a new vcache / vnode pair. */
1424 error = vfs_busy(mp);
1425 if (error)
1426 return error;
1427 new_vip = vcache_alloc();
1428 new_vip->vi_key = vcache_key;
1429 vp = VIMPL_TO_VNODE(new_vip);
1430 mutex_enter(&vcache_lock);
1431 vip = vcache_hash_lookup(&vcache_key, hash);
1432 if (vip == NULL) {
1433 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1434 new_vip, vi_hash);
1435 vip = new_vip;
1436 }
1437
1438 /* If another thread beat us inserting this node, retry. */
1439 if (vip != new_vip) {
1440 vcache_dealloc(new_vip);
1441 vfs_unbusy(mp);
1442 goto again;
1443 }
1444 mutex_exit(&vcache_lock);
1445
1446 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1447 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1448 if (error) {
1449 mutex_enter(&vcache_lock);
1450 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1451 new_vip, vnode_impl, vi_hash);
1452 vcache_dealloc(new_vip);
1453 vfs_unbusy(mp);
1454 KASSERT(*vpp == NULL);
1455 return error;
1456 }
1457 KASSERT(new_key != NULL);
1458 KASSERT(memcmp(key, new_key, key_len) == 0);
1459 KASSERT(vp->v_op != NULL);
1460 vfs_insmntque(vp, mp);
1461 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1462 vp->v_vflag |= VV_MPSAFE;
1463 vfs_ref(mp);
1464 vfs_unbusy(mp);
1465
1466 /* Finished loading, finalize node. */
1467 mutex_enter(&vcache_lock);
1468 new_vip->vi_key.vk_key = new_key;
1469 mutex_enter(vp->v_interlock);
1470 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1471 mutex_exit(vp->v_interlock);
1472 mutex_exit(&vcache_lock);
1473 *vpp = vp;
1474 return 0;
1475 }
1476
1477 /*
1478 * Create a new vnode / fs node pair and return it referenced through vpp.
1479 */
1480 int
1481 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1482 kauth_cred_t cred, void *extra, struct vnode **vpp)
1483 {
1484 int error;
1485 uint32_t hash;
1486 struct vnode *vp, *ovp;
1487 vnode_impl_t *vip, *ovip;
1488
1489 *vpp = NULL;
1490
1491 /* Allocate and initialize a new vcache / vnode pair. */
1492 error = vfs_busy(mp);
1493 if (error)
1494 return error;
1495 vip = vcache_alloc();
1496 vip->vi_key.vk_mount = mp;
1497 vp = VIMPL_TO_VNODE(vip);
1498
1499 /* Create and load the fs node. */
1500 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1501 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1502 if (error) {
1503 mutex_enter(&vcache_lock);
1504 vcache_dealloc(vip);
1505 vfs_unbusy(mp);
1506 KASSERT(*vpp == NULL);
1507 return error;
1508 }
1509 KASSERT(vp->v_op != NULL);
1510 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1511 if (vip->vi_key.vk_key_len > 0) {
1512 KASSERT(vip->vi_key.vk_key != NULL);
1513 hash = vcache_hash(&vip->vi_key);
1514
1515 /*
1516 * Wait for previous instance to be reclaimed,
1517 * then insert new node.
1518 */
1519 mutex_enter(&vcache_lock);
1520 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1521 ovp = VIMPL_TO_VNODE(ovip);
1522 mutex_enter(ovp->v_interlock);
1523 mutex_exit(&vcache_lock);
1524 error = vcache_vget(ovp);
1525 KASSERT(error == ENOENT);
1526 mutex_enter(&vcache_lock);
1527 }
1528 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1529 vip, vi_hash);
1530 mutex_exit(&vcache_lock);
1531 }
1532 vfs_insmntque(vp, mp);
1533 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1534 vp->v_vflag |= VV_MPSAFE;
1535 vfs_ref(mp);
1536 vfs_unbusy(mp);
1537
1538 /* Finished loading, finalize node. */
1539 mutex_enter(&vcache_lock);
1540 mutex_enter(vp->v_interlock);
1541 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1542 mutex_exit(&vcache_lock);
1543 mutex_exit(vp->v_interlock);
1544 *vpp = vp;
1545 return 0;
1546 }
1547
1548 /*
1549 * Prepare key change: update old cache nodes key and lock new cache node.
1550 * Return an error if the new node already exists.
1551 */
1552 int
1553 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1554 const void *old_key, size_t old_key_len,
1555 const void *new_key, size_t new_key_len)
1556 {
1557 uint32_t old_hash, new_hash;
1558 struct vcache_key old_vcache_key, new_vcache_key;
1559 vnode_impl_t *vip, *new_vip;
1560
1561 old_vcache_key.vk_mount = mp;
1562 old_vcache_key.vk_key = old_key;
1563 old_vcache_key.vk_key_len = old_key_len;
1564 old_hash = vcache_hash(&old_vcache_key);
1565
1566 new_vcache_key.vk_mount = mp;
1567 new_vcache_key.vk_key = new_key;
1568 new_vcache_key.vk_key_len = new_key_len;
1569 new_hash = vcache_hash(&new_vcache_key);
1570
1571 new_vip = vcache_alloc();
1572 new_vip->vi_key = new_vcache_key;
1573
1574 /* Insert locked new node used as placeholder. */
1575 mutex_enter(&vcache_lock);
1576 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1577 if (vip != NULL) {
1578 vcache_dealloc(new_vip);
1579 return EEXIST;
1580 }
1581 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1582 new_vip, vi_hash);
1583
1584 /* Replace old nodes key with the temporary copy. */
1585 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1586 KASSERT(vip != NULL);
1587 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1588 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1589 vip->vi_key = old_vcache_key;
1590 mutex_exit(&vcache_lock);
1591 return 0;
1592 }
1593
1594 /*
1595 * Key change complete: update old node and remove placeholder.
1596 */
1597 void
1598 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1599 const void *old_key, size_t old_key_len,
1600 const void *new_key, size_t new_key_len)
1601 {
1602 uint32_t old_hash, new_hash;
1603 struct vcache_key old_vcache_key, new_vcache_key;
1604 vnode_impl_t *vip, *new_vip;
1605 struct vnode *new_vp;
1606
1607 old_vcache_key.vk_mount = mp;
1608 old_vcache_key.vk_key = old_key;
1609 old_vcache_key.vk_key_len = old_key_len;
1610 old_hash = vcache_hash(&old_vcache_key);
1611
1612 new_vcache_key.vk_mount = mp;
1613 new_vcache_key.vk_key = new_key;
1614 new_vcache_key.vk_key_len = new_key_len;
1615 new_hash = vcache_hash(&new_vcache_key);
1616
1617 mutex_enter(&vcache_lock);
1618
1619 /* Lookup old and new node. */
1620 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1621 KASSERT(vip != NULL);
1622 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1623
1624 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1625 KASSERT(new_vip != NULL);
1626 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1627 new_vp = VIMPL_TO_VNODE(new_vip);
1628 mutex_enter(new_vp->v_interlock);
1629 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1630 mutex_exit(new_vp->v_interlock);
1631
1632 /* Rekey old node and put it onto its new hashlist. */
1633 vip->vi_key = new_vcache_key;
1634 if (old_hash != new_hash) {
1635 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1636 vip, vnode_impl, vi_hash);
1637 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1638 vip, vi_hash);
1639 }
1640
1641 /* Remove new node used as placeholder. */
1642 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1643 new_vip, vnode_impl, vi_hash);
1644 vcache_dealloc(new_vip);
1645 }
1646
1647 /*
1648 * Disassociate the underlying file system from a vnode.
1649 *
1650 * Must be called with vnode locked and will return unlocked.
1651 * Must be called with the interlock held, and will return with it held.
1652 */
1653 static void
1654 vcache_reclaim(vnode_t *vp)
1655 {
1656 lwp_t *l = curlwp;
1657 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1658 struct mount *mp = vp->v_mount;
1659 uint32_t hash;
1660 uint8_t temp_buf[64], *temp_key;
1661 size_t temp_key_len;
1662 bool recycle, active;
1663 int error;
1664
1665 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1666 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1667 KASSERT(mutex_owned(vp->v_interlock));
1668 KASSERT(vrefcnt(vp) != 0);
1669
1670 active = (vrefcnt(vp) > 1);
1671 temp_key_len = vip->vi_key.vk_key_len;
1672 /*
1673 * Prevent the vnode from being recycled or brought into use
1674 * while we clean it out.
1675 */
1676 VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1677 mutex_exit(vp->v_interlock);
1678
1679 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
1680 mutex_enter(vp->v_interlock);
1681 if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
1682 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1683 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
1684 }
1685 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1686 vp->v_iflag |= VI_DEADCHECK; /* for genfs_getpages() */
1687 mutex_exit(vp->v_interlock);
1688 rw_exit(vp->v_uobj.vmobjlock);
1689
1690 /*
1691 * With vnode state set to reclaiming, purge name cache immediately
1692 * to prevent new handles on vnode, and wait for existing threads
1693 * trying to get a handle to notice VS_RECLAIMED status and abort.
1694 */
1695 cache_purge(vp);
1696
1697 /* Replace the vnode key with a temporary copy. */
1698 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1699 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1700 } else {
1701 temp_key = temp_buf;
1702 }
1703 if (vip->vi_key.vk_key_len > 0) {
1704 mutex_enter(&vcache_lock);
1705 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1706 vip->vi_key.vk_key = temp_key;
1707 mutex_exit(&vcache_lock);
1708 }
1709
1710 fstrans_start(mp);
1711
1712 /*
1713 * Clean out any cached data associated with the vnode.
1714 * If purging an active vnode, it must be closed and
1715 * deactivated before being reclaimed.
1716 */
1717 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1718 if (error != 0) {
1719 if (wapbl_vphaswapbl(vp))
1720 WAPBL_DISCARD(wapbl_vptomp(vp));
1721 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1722 }
1723 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1724 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1725 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1726 spec_node_revoke(vp);
1727 }
1728
1729 /*
1730 * Disassociate the underlying file system from the vnode.
1731 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1732 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1733 * would no longer function.
1734 */
1735 VOP_INACTIVE(vp, &recycle);
1736 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1737 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1738 if (VOP_RECLAIM(vp)) {
1739 vnpanic(vp, "%s: cannot reclaim", __func__);
1740 }
1741
1742 KASSERT(vp->v_data == NULL);
1743 KASSERT((vp->v_iflag & VI_PAGES) == 0);
1744
1745 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1746 uvm_ra_freectx(vp->v_ractx);
1747 vp->v_ractx = NULL;
1748 }
1749
1750 if (vip->vi_key.vk_key_len > 0) {
1751 /* Remove from vnode cache. */
1752 hash = vcache_hash(&vip->vi_key);
1753 mutex_enter(&vcache_lock);
1754 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1755 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1756 vip, vnode_impl, vi_hash);
1757 mutex_exit(&vcache_lock);
1758 }
1759 if (temp_key != temp_buf)
1760 kmem_free(temp_key, temp_key_len);
1761
1762 /* Done with purge, notify sleepers of the grim news. */
1763 mutex_enter(vp->v_interlock);
1764 vp->v_op = dead_vnodeop_p;
1765 vp->v_vflag |= VV_LOCKSWORK;
1766 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1767 vp->v_tag = VT_NON;
1768 KNOTE(&vp->v_klist, NOTE_REVOKE);
1769 mutex_exit(vp->v_interlock);
1770
1771 /*
1772 * Move to dead mount. Must be after changing the operations
1773 * vector as vnode operations enter the mount before using the
1774 * operations vector. See sys/kern/vnode_if.c.
1775 */
1776 vp->v_vflag &= ~VV_ROOT;
1777 vfs_ref(dead_rootmount);
1778 vfs_insmntque(vp, dead_rootmount);
1779
1780 #ifdef PAX_SEGVGUARD
1781 pax_segvguard_cleanup(vp);
1782 #endif /* PAX_SEGVGUARD */
1783
1784 mutex_enter(vp->v_interlock);
1785 fstrans_done(mp);
1786 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1787 }
1788
1789 /*
1790 * Disassociate the underlying file system from an open device vnode
1791 * and make it anonymous.
1792 *
1793 * Vnode unlocked on entry, drops a reference to the vnode.
1794 */
1795 void
1796 vcache_make_anon(vnode_t *vp)
1797 {
1798 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1799 uint32_t hash;
1800 bool recycle;
1801
1802 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1803 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1804 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1805
1806 /* Remove from vnode cache. */
1807 hash = vcache_hash(&vip->vi_key);
1808 mutex_enter(&vcache_lock);
1809 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1810 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1811 vip, vnode_impl, vi_hash);
1812 vip->vi_key.vk_mount = dead_rootmount;
1813 vip->vi_key.vk_key_len = 0;
1814 vip->vi_key.vk_key = NULL;
1815 mutex_exit(&vcache_lock);
1816
1817 /*
1818 * Disassociate the underlying file system from the vnode.
1819 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1820 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1821 * would no longer function.
1822 */
1823 if (vn_lock(vp, LK_EXCLUSIVE)) {
1824 vnpanic(vp, "%s: cannot lock", __func__);
1825 }
1826 VOP_INACTIVE(vp, &recycle);
1827 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1828 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1829 if (VOP_RECLAIM(vp)) {
1830 vnpanic(vp, "%s: cannot reclaim", __func__);
1831 }
1832
1833 /* Purge name cache. */
1834 cache_purge(vp);
1835
1836 /* Done with purge, change operations vector. */
1837 mutex_enter(vp->v_interlock);
1838 vp->v_op = spec_vnodeop_p;
1839 vp->v_vflag |= VV_MPSAFE;
1840 vp->v_vflag &= ~VV_LOCKSWORK;
1841 mutex_exit(vp->v_interlock);
1842
1843 /*
1844 * Move to dead mount. Must be after changing the operations
1845 * vector as vnode operations enter the mount before using the
1846 * operations vector. See sys/kern/vnode_if.c.
1847 */
1848 vfs_ref(dead_rootmount);
1849 vfs_insmntque(vp, dead_rootmount);
1850
1851 vrele(vp);
1852 }
1853
1854 /*
1855 * Update outstanding I/O count and do wakeup if requested.
1856 */
1857 void
1858 vwakeup(struct buf *bp)
1859 {
1860 vnode_t *vp;
1861
1862 if ((vp = bp->b_vp) == NULL)
1863 return;
1864
1865 KASSERT(bp->b_objlock == vp->v_interlock);
1866 KASSERT(mutex_owned(bp->b_objlock));
1867
1868 if (--vp->v_numoutput < 0)
1869 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1870 if (vp->v_numoutput == 0)
1871 cv_broadcast(&vp->v_cv);
1872 }
1873
1874 /*
1875 * Test a vnode for being or becoming dead. Returns one of:
1876 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1877 * ENOENT: vnode is dead.
1878 * 0: otherwise.
1879 *
1880 * Whenever this function returns a non-zero value all future
1881 * calls will also return a non-zero value.
1882 */
1883 int
1884 vdead_check(struct vnode *vp, int flags)
1885 {
1886
1887 KASSERT(mutex_owned(vp->v_interlock));
1888
1889 if (! ISSET(flags, VDEAD_NOWAIT))
1890 VSTATE_WAIT_STABLE(vp);
1891
1892 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1893 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1894 return EBUSY;
1895 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1896 return ENOENT;
1897 }
1898
1899 return 0;
1900 }
1901
1902 int
1903 vfs_drainvnodes(void)
1904 {
1905 int i, gen;
1906
1907 mutex_enter(&vdrain_lock);
1908 for (i = 0; i < 2; i++) {
1909 gen = vdrain_gen;
1910 while (gen == vdrain_gen) {
1911 cv_broadcast(&vdrain_cv);
1912 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1913 }
1914 }
1915 mutex_exit(&vdrain_lock);
1916
1917 if (numvnodes >= desiredvnodes)
1918 return EBUSY;
1919
1920 if (vcache_hashsize != desiredvnodes)
1921 vcache_reinit();
1922
1923 return 0;
1924 }
1925
1926 void
1927 vnpanic(vnode_t *vp, const char *fmt, ...)
1928 {
1929 va_list ap;
1930
1931 #ifdef DIAGNOSTIC
1932 vprint(NULL, vp);
1933 #endif
1934 va_start(ap, fmt);
1935 vpanic(fmt, ap);
1936 va_end(ap);
1937 }
1938
1939 void
1940 vshareilock(vnode_t *tvp, vnode_t *fvp)
1941 {
1942 kmutex_t *oldlock;
1943
1944 oldlock = tvp->v_interlock;
1945 mutex_obj_hold(fvp->v_interlock);
1946 tvp->v_interlock = fvp->v_interlock;
1947 mutex_obj_free(oldlock);
1948 }
1949