vfs_vnode.c revision 1.105.2.1 1 /* $NetBSD: vfs_vnode.c,v 1.105.2.1 2020/01/08 11:02:16 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * LOADED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 */
147
148 #include <sys/cdefs.h>
149 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105.2.1 2020/01/08 11:02:16 ad Exp $");
150
151 #include <sys/param.h>
152 #include <sys/kernel.h>
153
154 #include <sys/atomic.h>
155 #include <sys/buf.h>
156 #include <sys/conf.h>
157 #include <sys/device.h>
158 #include <sys/hash.h>
159 #include <sys/kauth.h>
160 #include <sys/kmem.h>
161 #include <sys/kthread.h>
162 #include <sys/module.h>
163 #include <sys/mount.h>
164 #include <sys/namei.h>
165 #include <sys/syscallargs.h>
166 #include <sys/sysctl.h>
167 #include <sys/systm.h>
168 #include <sys/vnode_impl.h>
169 #include <sys/wapbl.h>
170 #include <sys/fstrans.h>
171
172 #include <uvm/uvm.h>
173 #include <uvm/uvm_readahead.h>
174 #include <uvm/uvm_stat.h>
175
176 /* Flags to vrelel. */
177 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
178 #define VRELEL_FORCE 0x0002 /* Must always succeed. */
179 #define VRELEL_NOINACT 0x0004 /* Don't bother calling VOP_INACTIVE(). */
180
181 #define LRU_VRELE 0
182 #define LRU_FREE 1
183 #define LRU_HOLD 2
184 #define LRU_COUNT 3
185
186 /*
187 * There are three lru lists: one holds vnodes waiting for async release,
188 * one is for vnodes which have no buffer/page references and one for those
189 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
190 * private cache line as vnodes migrate between them while under the same
191 * lock (vdrain_lock).
192 */
193 u_int numvnodes __cacheline_aligned;
194 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
195 static kmutex_t vdrain_lock __cacheline_aligned;
196 static kcondvar_t vdrain_cv;
197 static int vdrain_gen;
198 static kcondvar_t vdrain_gen_cv;
199 static bool vdrain_retry;
200 static lwp_t * vdrain_lwp;
201 SLIST_HEAD(hashhead, vnode_impl);
202 static kmutex_t vcache_lock __cacheline_aligned;
203 static kcondvar_t vcache_cv;
204 static u_int vcache_hashsize;
205 static u_long vcache_hashmask;
206 static struct hashhead *vcache_hashtab;
207 static pool_cache_t vcache_pool;
208 static void lru_requeue(vnode_t *, vnodelst_t *);
209 static vnodelst_t * lru_which(vnode_t *);
210 static vnode_impl_t * vcache_alloc(void);
211 static void vcache_dealloc(vnode_impl_t *);
212 static void vcache_free(vnode_impl_t *);
213 static void vcache_init(void);
214 static void vcache_reinit(void);
215 static void vcache_reclaim(vnode_t *);
216 static void vrelel(vnode_t *, int);
217 static void vdrain_thread(void *);
218 static void vnpanic(vnode_t *, const char *, ...)
219 __printflike(2, 3);
220
221 /* Routines having to do with the management of the vnode table. */
222 extern struct mount *dead_rootmount;
223 extern int (**dead_vnodeop_p)(void *);
224 extern int (**spec_vnodeop_p)(void *);
225 extern struct vfsops dead_vfsops;
226
227 /* Vnode state operations and diagnostics. */
228
229 #if defined(DIAGNOSTIC)
230
231 #define VSTATE_VALID(state) \
232 ((state) != VS_ACTIVE && (state) != VS_MARKER)
233 #define VSTATE_GET(vp) \
234 vstate_assert_get((vp), __func__, __LINE__)
235 #define VSTATE_CHANGE(vp, from, to) \
236 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
237 #define VSTATE_WAIT_STABLE(vp) \
238 vstate_assert_wait_stable((vp), __func__, __LINE__)
239
240 void
241 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
242 bool has_lock)
243 {
244 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
245
246 if (!has_lock) {
247 /*
248 * Prevent predictive loads from the CPU, but check the state
249 * without loooking first.
250 */
251 membar_enter();
252 if (state == VS_ACTIVE && vp->v_usecount > 0 &&
253 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
254 return;
255 if (vip->vi_state == state)
256 return;
257 mutex_enter((vp)->v_interlock);
258 }
259
260 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
261
262 if ((state == VS_ACTIVE && vp->v_usecount > 0 &&
263 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
264 vip->vi_state == state) {
265 if (!has_lock)
266 mutex_exit((vp)->v_interlock);
267 return;
268 }
269 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
270 vstate_name(vip->vi_state), vp->v_usecount,
271 vstate_name(state), func, line);
272 }
273
274 static enum vnode_state
275 vstate_assert_get(vnode_t *vp, const char *func, int line)
276 {
277 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
278
279 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
280 if (! VSTATE_VALID(vip->vi_state))
281 vnpanic(vp, "state is %s at %s:%d",
282 vstate_name(vip->vi_state), func, line);
283
284 return vip->vi_state;
285 }
286
287 static void
288 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
289 {
290 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
291
292 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
293 if (! VSTATE_VALID(vip->vi_state))
294 vnpanic(vp, "state is %s at %s:%d",
295 vstate_name(vip->vi_state), func, line);
296
297 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
298 cv_wait(&vp->v_cv, vp->v_interlock);
299
300 if (! VSTATE_VALID(vip->vi_state))
301 vnpanic(vp, "state is %s at %s:%d",
302 vstate_name(vip->vi_state), func, line);
303 }
304
305 static void
306 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
307 const char *func, int line)
308 {
309 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
310
311 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
312 if (from == VS_LOADING)
313 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
314
315 if (! VSTATE_VALID(from))
316 vnpanic(vp, "from is %s at %s:%d",
317 vstate_name(from), func, line);
318 if (! VSTATE_VALID(to))
319 vnpanic(vp, "to is %s at %s:%d",
320 vstate_name(to), func, line);
321 if (vip->vi_state != from)
322 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
323 vstate_name(vip->vi_state), vstate_name(from), func, line);
324 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
325 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
326 vstate_name(from), vstate_name(to), vp->v_usecount,
327 func, line);
328
329 vip->vi_state = to;
330 if (from == VS_LOADING)
331 cv_broadcast(&vcache_cv);
332 if (to == VS_LOADED || to == VS_RECLAIMED)
333 cv_broadcast(&vp->v_cv);
334 }
335
336 #else /* defined(DIAGNOSTIC) */
337
338 #define VSTATE_GET(vp) \
339 (VNODE_TO_VIMPL((vp))->vi_state)
340 #define VSTATE_CHANGE(vp, from, to) \
341 vstate_change((vp), (from), (to))
342 #define VSTATE_WAIT_STABLE(vp) \
343 vstate_wait_stable((vp))
344 void
345 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
346 bool has_lock)
347 {
348
349 }
350
351 static void
352 vstate_wait_stable(vnode_t *vp)
353 {
354 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
355
356 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
357 cv_wait(&vp->v_cv, vp->v_interlock);
358 }
359
360 static void
361 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
362 {
363 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
364
365 vip->vi_state = to;
366 if (from == VS_LOADING)
367 cv_broadcast(&vcache_cv);
368 if (to == VS_LOADED || to == VS_RECLAIMED)
369 cv_broadcast(&vp->v_cv);
370 }
371
372 #endif /* defined(DIAGNOSTIC) */
373
374 void
375 vfs_vnode_sysinit(void)
376 {
377 int error __diagused, i;
378
379 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
380 KASSERT(dead_rootmount != NULL);
381 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
382
383 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
384 for (i = 0; i < LRU_COUNT; i++) {
385 TAILQ_INIT(&lru_list[i]);
386 }
387 vcache_init();
388
389 cv_init(&vdrain_cv, "vdrain");
390 cv_init(&vdrain_gen_cv, "vdrainwt");
391 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
392 NULL, &vdrain_lwp, "vdrain");
393 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
394 }
395
396 /*
397 * Allocate a new marker vnode.
398 */
399 vnode_t *
400 vnalloc_marker(struct mount *mp)
401 {
402 vnode_impl_t *vip;
403 vnode_t *vp;
404
405 vip = pool_cache_get(vcache_pool, PR_WAITOK);
406 memset(vip, 0, sizeof(*vip));
407 vp = VIMPL_TO_VNODE(vip);
408 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
409 vp->v_mount = mp;
410 vp->v_type = VBAD;
411 vip->vi_state = VS_MARKER;
412
413 return vp;
414 }
415
416 /*
417 * Free a marker vnode.
418 */
419 void
420 vnfree_marker(vnode_t *vp)
421 {
422 vnode_impl_t *vip;
423
424 vip = VNODE_TO_VIMPL(vp);
425 KASSERT(vip->vi_state == VS_MARKER);
426 uvm_obj_destroy(&vp->v_uobj, true);
427 pool_cache_put(vcache_pool, vip);
428 }
429
430 /*
431 * Test a vnode for being a marker vnode.
432 */
433 bool
434 vnis_marker(vnode_t *vp)
435 {
436
437 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
438 }
439
440 /*
441 * Return the lru list this node should be on.
442 */
443 static vnodelst_t *
444 lru_which(vnode_t *vp)
445 {
446
447 KASSERT(mutex_owned(vp->v_interlock));
448
449 if (vp->v_holdcnt > 0)
450 return &lru_list[LRU_HOLD];
451 else
452 return &lru_list[LRU_FREE];
453 }
454
455 /*
456 * Put vnode to end of given list.
457 * Both the current and the new list may be NULL, used on vnode alloc/free.
458 * Adjust numvnodes and signal vdrain thread if there is work.
459 */
460 static void
461 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
462 {
463 vnode_impl_t *vip;
464 int d;
465
466 /*
467 * If the vnode is on the correct list, and was put there recently,
468 * then leave it be, thus avoiding huge cache and lock contention.
469 */
470 vip = VNODE_TO_VIMPL(vp);
471 if (listhd == vip->vi_lrulisthd &&
472 (hardclock_ticks - vip->vi_lrulisttm) < hz) {
473 return;
474 }
475
476 mutex_enter(&vdrain_lock);
477 d = 0;
478 if (vip->vi_lrulisthd != NULL)
479 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
480 else
481 d++;
482 vip->vi_lrulisthd = listhd;
483 vip->vi_lrulisttm = hardclock_ticks;
484 if (vip->vi_lrulisthd != NULL)
485 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
486 else
487 d--;
488 if (d != 0) {
489 /*
490 * Looks strange? This is not a bug. Don't store
491 * numvnodes unless there is a change - avoid false
492 * sharing on MP.
493 */
494 numvnodes += d;
495 }
496 if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
497 cv_broadcast(&vdrain_cv);
498 mutex_exit(&vdrain_lock);
499 }
500
501 /*
502 * Release deferred vrele vnodes for this mount.
503 * Called with file system suspended.
504 */
505 void
506 vrele_flush(struct mount *mp)
507 {
508 vnode_impl_t *vip, *marker;
509 vnode_t *vp;
510
511 KASSERT(fstrans_is_owner(mp));
512
513 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
514
515 mutex_enter(&vdrain_lock);
516 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
517
518 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
519 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
520 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
521 vi_lrulist);
522 vp = VIMPL_TO_VNODE(vip);
523 if (vnis_marker(vp))
524 continue;
525
526 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
527 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
528 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
529 vip->vi_lrulisttm = hardclock_ticks;
530 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
531 mutex_exit(&vdrain_lock);
532
533 mutex_enter(vp->v_interlock);
534 vrelel(vp, VRELEL_FORCE);
535
536 mutex_enter(&vdrain_lock);
537 }
538
539 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
540 mutex_exit(&vdrain_lock);
541
542 vnfree_marker(VIMPL_TO_VNODE(marker));
543 }
544
545 /*
546 * Reclaim a cached vnode. Used from vdrain_thread only.
547 */
548 static __inline void
549 vdrain_remove(vnode_t *vp)
550 {
551 struct mount *mp;
552
553 KASSERT(mutex_owned(&vdrain_lock));
554
555 /* Probe usecount (unlocked). */
556 if (vp->v_usecount > 0)
557 return;
558 /* Try v_interlock -- we lock the wrong direction! */
559 if (!mutex_tryenter(vp->v_interlock))
560 return;
561 /* Probe usecount and state. */
562 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
563 mutex_exit(vp->v_interlock);
564 return;
565 }
566 mp = vp->v_mount;
567 if (fstrans_start_nowait(mp) != 0) {
568 mutex_exit(vp->v_interlock);
569 return;
570 }
571 vdrain_retry = true;
572 mutex_exit(&vdrain_lock);
573
574 if (vcache_vget(vp) == 0) {
575 if (!vrecycle(vp)) {
576 mutex_enter(vp->v_interlock);
577 vrelel(vp, VRELEL_FORCE);
578 }
579 }
580 fstrans_done(mp);
581
582 mutex_enter(&vdrain_lock);
583 }
584
585 /*
586 * Release a cached vnode. Used from vdrain_thread only.
587 */
588 static __inline void
589 vdrain_vrele(vnode_t *vp)
590 {
591 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
592 struct mount *mp;
593
594 KASSERT(mutex_owned(&vdrain_lock));
595
596 mp = vp->v_mount;
597 if (fstrans_start_nowait(mp) != 0)
598 return;
599
600 /*
601 * First remove the vnode from the vrele list.
602 * Put it on the last lru list, the last vrele()
603 * will put it back onto the right list before
604 * its v_usecount reaches zero.
605 */
606 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
607 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
608 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
609 vip->vi_lrulisttm = hardclock_ticks;
610 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
611
612 vdrain_retry = true;
613 mutex_exit(&vdrain_lock);
614
615 mutex_enter(vp->v_interlock);
616 vrelel(vp, VRELEL_FORCE);
617 fstrans_done(mp);
618
619 mutex_enter(&vdrain_lock);
620 }
621
622 /*
623 * Helper thread to keep the number of vnodes below desiredvnodes
624 * and release vnodes from asynchronous vrele.
625 */
626 static void
627 vdrain_thread(void *cookie)
628 {
629 int i;
630 u_int target;
631 vnode_impl_t *vip, *marker;
632
633 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
634
635 mutex_enter(&vdrain_lock);
636
637 for (;;) {
638 vdrain_retry = false;
639 target = desiredvnodes - desiredvnodes/10;
640
641 for (i = 0; i < LRU_COUNT; i++) {
642 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
643 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
644 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
645 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
646 vi_lrulist);
647 if (vnis_marker(VIMPL_TO_VNODE(vip)))
648 continue;
649 if (i == LRU_VRELE)
650 vdrain_vrele(VIMPL_TO_VNODE(vip));
651 else if (numvnodes < target)
652 break;
653 else
654 vdrain_remove(VIMPL_TO_VNODE(vip));
655 }
656 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
657 }
658
659 if (vdrain_retry) {
660 mutex_exit(&vdrain_lock);
661 yield();
662 mutex_enter(&vdrain_lock);
663 } else {
664 vdrain_gen++;
665 cv_broadcast(&vdrain_gen_cv);
666 cv_wait(&vdrain_cv, &vdrain_lock);
667 }
668 }
669 }
670
671 /*
672 * vput: unlock and release the reference.
673 */
674 void
675 vput(vnode_t *vp)
676 {
677
678 VOP_UNLOCK(vp);
679 vrele(vp);
680 }
681
682 /*
683 * Vnode release. If reference count drops to zero, call inactive
684 * routine and either return to freelist or free to the pool.
685 */
686 static void
687 vrelel(vnode_t *vp, int flags)
688 {
689 const bool async = ((flags & VRELEL_ASYNC) != 0);
690 const bool force = ((flags & VRELEL_FORCE) != 0);
691 bool recycle, defer;
692 int error;
693
694 KASSERT(mutex_owned(vp->v_interlock));
695
696 if (__predict_false(vp->v_op == dead_vnodeop_p &&
697 VSTATE_GET(vp) != VS_RECLAIMED)) {
698 vnpanic(vp, "dead but not clean");
699 }
700
701 /*
702 * If not the last reference, just drop the reference count
703 * and unlock.
704 */
705 if (vp->v_usecount > 1) {
706 vp->v_usecount--;
707 mutex_exit(vp->v_interlock);
708 return;
709 }
710 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
711 vnpanic(vp, "%s: bad ref count", __func__);
712 }
713
714 #ifdef DIAGNOSTIC
715 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
716 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
717 vprint("vrelel: missing VOP_CLOSE()", vp);
718 }
719 #endif
720
721 /*
722 * First try to get the vnode locked for VOP_INACTIVE().
723 * Defer vnode release to vdrain_thread if caller requests
724 * it explicitly, is the pagedaemon or the lock failed.
725 */
726 if ((curlwp == uvm.pagedaemon_lwp) || async) {
727 defer = true;
728 } else if (force) {
729 mutex_exit(vp->v_interlock);
730 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
731 defer = (error != 0);
732 mutex_enter(vp->v_interlock);
733 } else {
734 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
735 defer = (error != 0);
736 }
737 KASSERT(mutex_owned(vp->v_interlock));
738 KASSERT(! (force && defer));
739 if (defer) {
740 /*
741 * Defer reclaim to the kthread; it's not safe to
742 * clean it here. We donate it our last reference.
743 */
744 lru_requeue(vp, &lru_list[LRU_VRELE]);
745 mutex_exit(vp->v_interlock);
746 return;
747 }
748
749 /*
750 * If the node got another reference while we
751 * released the interlock, don't try to inactivate it yet.
752 */
753 if (vp->v_usecount > 1) {
754 vp->v_usecount--;
755 VOP_UNLOCK(vp);
756 mutex_exit(vp->v_interlock);
757 return;
758 }
759
760 /*
761 * If not clean, deactivate the vnode, but preserve
762 * our reference across the call to VOP_INACTIVE().
763 */
764 if (VSTATE_GET(vp) == VS_RECLAIMED) {
765 VOP_UNLOCK(vp);
766 } else {
767 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
768 mutex_exit(vp->v_interlock);
769
770 /*
771 * The vnode must not gain another reference while being
772 * deactivated. If VOP_INACTIVE() indicates that
773 * the described file has been deleted, then recycle
774 * the vnode.
775 *
776 * Note that VOP_INACTIVE() will not drop the vnode lock.
777 */
778 recycle = false;
779 VOP_INACTIVE(vp, &recycle);
780 if (!recycle)
781 VOP_UNLOCK(vp);
782 mutex_enter(vp->v_interlock);
783 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
784 if (!recycle) {
785 if (vp->v_usecount > 1) {
786 vp->v_usecount--;
787 mutex_exit(vp->v_interlock);
788 return;
789 }
790 }
791
792 /* Take care of space accounting. */
793 if ((vp->v_iflag & VI_EXECMAP) != 0 &&
794 vp->v_uobj.uo_npages != 0) {
795 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
796 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
797 }
798 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
799 vp->v_vflag &= ~VV_MAPPED;
800
801 /*
802 * Recycle the vnode if the file is now unused (unlinked),
803 * otherwise just free it.
804 */
805 if (recycle) {
806 VSTATE_ASSERT(vp, VS_LOADED);
807 /* vcache_reclaim drops the lock. */
808 vcache_reclaim(vp);
809 }
810 KASSERT(vp->v_usecount > 0);
811 }
812
813 vp->v_usecount--;
814 if (vp->v_usecount != 0) {
815 /* Gained another reference while being reclaimed. */
816 mutex_exit(vp->v_interlock);
817 return;
818 }
819
820 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
821 /*
822 * It's clean so destroy it. It isn't referenced
823 * anywhere since it has been reclaimed.
824 */
825 vcache_free(VNODE_TO_VIMPL(vp));
826 } else {
827 /*
828 * Otherwise, put it back onto the freelist. It
829 * can't be destroyed while still associated with
830 * a file system.
831 */
832 lru_requeue(vp, lru_which(vp));
833 mutex_exit(vp->v_interlock);
834 }
835 }
836
837 void
838 vrele(vnode_t *vp)
839 {
840
841 mutex_enter(vp->v_interlock);
842 vrelel(vp, 0);
843 }
844
845 /*
846 * Asynchronous vnode release, vnode is released in different context.
847 */
848 void
849 vrele_async(vnode_t *vp)
850 {
851
852 mutex_enter(vp->v_interlock);
853 vrelel(vp, VRELEL_ASYNC);
854 }
855
856 /*
857 * Vnode reference, where a reference is already held by some other
858 * object (for example, a file structure).
859 */
860 void
861 vref(vnode_t *vp)
862 {
863
864 KASSERT(vp->v_usecount != 0);
865
866 mutex_enter(vp->v_interlock);
867 vp->v_usecount++;
868 mutex_exit(vp->v_interlock);
869 }
870
871 /*
872 * Page or buffer structure gets a reference.
873 * Called with v_interlock held.
874 */
875 void
876 vholdl(vnode_t *vp)
877 {
878
879 KASSERT(mutex_owned(vp->v_interlock));
880
881 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
882 lru_requeue(vp, lru_which(vp));
883 }
884
885 /*
886 * Page or buffer structure frees a reference.
887 * Called with v_interlock held.
888 */
889 void
890 holdrelel(vnode_t *vp)
891 {
892
893 KASSERT(mutex_owned(vp->v_interlock));
894
895 if (vp->v_holdcnt <= 0) {
896 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
897 }
898
899 vp->v_holdcnt--;
900 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
901 lru_requeue(vp, lru_which(vp));
902 }
903
904 /*
905 * Recycle an unused vnode if caller holds the last reference.
906 */
907 bool
908 vrecycle(vnode_t *vp)
909 {
910 int error __diagused;
911
912 mutex_enter(vp->v_interlock);
913
914 /* Make sure we hold the last reference. */
915 VSTATE_WAIT_STABLE(vp);
916 if (vp->v_usecount != 1) {
917 mutex_exit(vp->v_interlock);
918 return false;
919 }
920
921 /* If the vnode is already clean we're done. */
922 if (VSTATE_GET(vp) != VS_LOADED) {
923 VSTATE_ASSERT(vp, VS_RECLAIMED);
924 vrelel(vp, 0);
925 return true;
926 }
927
928 /* Prevent further references until the vnode is locked. */
929 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
930 mutex_exit(vp->v_interlock);
931
932 /*
933 * On a leaf file system this lock will always succeed as we hold
934 * the last reference and prevent further references.
935 * On layered file systems waiting for the lock would open a can of
936 * deadlocks as the lower vnodes may have other active references.
937 */
938 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
939
940 mutex_enter(vp->v_interlock);
941 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
942
943 if (error) {
944 mutex_exit(vp->v_interlock);
945 return false;
946 }
947
948 KASSERT(vp->v_usecount == 1);
949 vcache_reclaim(vp);
950 vrelel(vp, 0);
951
952 return true;
953 }
954
955 /*
956 * Helper for vrevoke() to propagate suspension from lastmp
957 * to thismp. Both args may be NULL.
958 * Returns the currently suspended file system or NULL.
959 */
960 static struct mount *
961 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
962 {
963 int error;
964
965 if (lastmp == thismp)
966 return thismp;
967
968 if (lastmp != NULL)
969 vfs_resume(lastmp);
970
971 if (thismp == NULL)
972 return NULL;
973
974 do {
975 error = vfs_suspend(thismp, 0);
976 } while (error == EINTR || error == ERESTART);
977
978 if (error == 0)
979 return thismp;
980
981 KASSERT(error == EOPNOTSUPP);
982 return NULL;
983 }
984
985 /*
986 * Eliminate all activity associated with the requested vnode
987 * and with all vnodes aliased to the requested vnode.
988 */
989 void
990 vrevoke(vnode_t *vp)
991 {
992 struct mount *mp;
993 vnode_t *vq;
994 enum vtype type;
995 dev_t dev;
996
997 KASSERT(vp->v_usecount > 0);
998
999 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1000
1001 mutex_enter(vp->v_interlock);
1002 VSTATE_WAIT_STABLE(vp);
1003 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1004 mutex_exit(vp->v_interlock);
1005 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1006 vp->v_usecount++;
1007 mutex_exit(vp->v_interlock);
1008 vgone(vp);
1009 } else {
1010 dev = vp->v_rdev;
1011 type = vp->v_type;
1012 mutex_exit(vp->v_interlock);
1013
1014 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1015 mp = vrevoke_suspend_next(mp, vq->v_mount);
1016 vgone(vq);
1017 }
1018 }
1019 vrevoke_suspend_next(mp, NULL);
1020 }
1021
1022 /*
1023 * Eliminate all activity associated with a vnode in preparation for
1024 * reuse. Drops a reference from the vnode.
1025 */
1026 void
1027 vgone(vnode_t *vp)
1028 {
1029
1030 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1031
1032 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1033 mutex_enter(vp->v_interlock);
1034 VSTATE_WAIT_STABLE(vp);
1035 if (VSTATE_GET(vp) == VS_LOADED)
1036 vcache_reclaim(vp);
1037 VSTATE_ASSERT(vp, VS_RECLAIMED);
1038 vrelel(vp, 0);
1039 }
1040
1041 static inline uint32_t
1042 vcache_hash(const struct vcache_key *key)
1043 {
1044 uint32_t hash = HASH32_BUF_INIT;
1045
1046 KASSERT(key->vk_key_len > 0);
1047
1048 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1049 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1050 return hash;
1051 }
1052
1053 static void
1054 vcache_init(void)
1055 {
1056
1057 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1058 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1059 KASSERT(vcache_pool != NULL);
1060 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1061 cv_init(&vcache_cv, "vcache");
1062 vcache_hashsize = desiredvnodes;
1063 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1064 &vcache_hashmask);
1065 }
1066
1067 static void
1068 vcache_reinit(void)
1069 {
1070 int i;
1071 uint32_t hash;
1072 u_long oldmask, newmask;
1073 struct hashhead *oldtab, *newtab;
1074 vnode_impl_t *vip;
1075
1076 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1077 mutex_enter(&vcache_lock);
1078 oldtab = vcache_hashtab;
1079 oldmask = vcache_hashmask;
1080 vcache_hashsize = desiredvnodes;
1081 vcache_hashtab = newtab;
1082 vcache_hashmask = newmask;
1083 for (i = 0; i <= oldmask; i++) {
1084 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1085 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1086 hash = vcache_hash(&vip->vi_key);
1087 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1088 vip, vi_hash);
1089 }
1090 }
1091 mutex_exit(&vcache_lock);
1092 hashdone(oldtab, HASH_SLIST, oldmask);
1093 }
1094
1095 static inline vnode_impl_t *
1096 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1097 {
1098 struct hashhead *hashp;
1099 vnode_impl_t *vip;
1100
1101 KASSERT(mutex_owned(&vcache_lock));
1102
1103 hashp = &vcache_hashtab[hash & vcache_hashmask];
1104 SLIST_FOREACH(vip, hashp, vi_hash) {
1105 if (key->vk_mount != vip->vi_key.vk_mount)
1106 continue;
1107 if (key->vk_key_len != vip->vi_key.vk_key_len)
1108 continue;
1109 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1110 continue;
1111 return vip;
1112 }
1113 return NULL;
1114 }
1115
1116 /*
1117 * Allocate a new, uninitialized vcache node.
1118 */
1119 static vnode_impl_t *
1120 vcache_alloc(void)
1121 {
1122 vnode_impl_t *vip;
1123 vnode_t *vp;
1124
1125 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1126 memset(vip, 0, sizeof(*vip));
1127
1128 vip->vi_lock = rw_obj_alloc();
1129 /* SLIST_INIT(&vip->vi_hash); */
1130 /* LIST_INIT(&vip->vi_dnclist); */
1131
1132 vp = VIMPL_TO_VNODE(vip);
1133 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1134 cv_init(&vp->v_cv, "vnode");
1135 cache_vnode_init(vp);
1136
1137 vp->v_usecount = 1;
1138 vp->v_type = VNON;
1139 vp->v_size = vp->v_writesize = VSIZENOTSET;
1140
1141 vip->vi_state = VS_LOADING;
1142
1143 lru_requeue(vp, &lru_list[LRU_FREE]);
1144
1145 return vip;
1146 }
1147
1148 /*
1149 * Deallocate a vcache node in state VS_LOADING.
1150 *
1151 * vcache_lock held on entry and released on return.
1152 */
1153 static void
1154 vcache_dealloc(vnode_impl_t *vip)
1155 {
1156 vnode_t *vp;
1157
1158 KASSERT(mutex_owned(&vcache_lock));
1159
1160 vp = VIMPL_TO_VNODE(vip);
1161 vfs_ref(dead_rootmount);
1162 vfs_insmntque(vp, dead_rootmount);
1163 mutex_enter(vp->v_interlock);
1164 vp->v_op = dead_vnodeop_p;
1165 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1166 mutex_exit(&vcache_lock);
1167 vrelel(vp, 0);
1168 }
1169
1170 /*
1171 * Free an unused, unreferenced vcache node.
1172 * v_interlock locked on entry.
1173 */
1174 static void
1175 vcache_free(vnode_impl_t *vip)
1176 {
1177 vnode_t *vp;
1178
1179 vp = VIMPL_TO_VNODE(vip);
1180 KASSERT(mutex_owned(vp->v_interlock));
1181
1182 KASSERT(vp->v_usecount == 0);
1183 KASSERT(vp->v_holdcnt == 0);
1184 KASSERT(vp->v_writecount == 0);
1185 lru_requeue(vp, NULL);
1186 mutex_exit(vp->v_interlock);
1187
1188 vfs_insmntque(vp, NULL);
1189 if (vp->v_type == VBLK || vp->v_type == VCHR)
1190 spec_node_destroy(vp);
1191
1192 rw_obj_free(vip->vi_lock);
1193 uvm_obj_destroy(&vp->v_uobj, true);
1194 cv_destroy(&vp->v_cv);
1195 cache_vnode_fini(vp);
1196 pool_cache_put(vcache_pool, vip);
1197 }
1198
1199 /*
1200 * Try to get an initial reference on this cached vnode.
1201 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1202 * EBUSY if the vnode state is unstable.
1203 *
1204 * v_interlock locked on entry and unlocked on exit.
1205 */
1206 int
1207 vcache_tryvget(vnode_t *vp)
1208 {
1209 int error = 0;
1210
1211 KASSERT(mutex_owned(vp->v_interlock));
1212
1213 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1214 error = ENOENT;
1215 else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1216 error = EBUSY;
1217 else
1218 vp->v_usecount++;
1219
1220 mutex_exit(vp->v_interlock);
1221
1222 return error;
1223 }
1224
1225 /*
1226 * Try to get an initial reference on this cached vnode.
1227 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1228 * Will wait for the vnode state to be stable.
1229 *
1230 * v_interlock locked on entry and unlocked on exit.
1231 */
1232 int
1233 vcache_vget(vnode_t *vp)
1234 {
1235
1236 KASSERT(mutex_owned(vp->v_interlock));
1237
1238 /* Increment hold count to prevent vnode from disappearing. */
1239 vp->v_holdcnt++;
1240 VSTATE_WAIT_STABLE(vp);
1241 vp->v_holdcnt--;
1242
1243 /* If this was the last reference to a reclaimed vnode free it now. */
1244 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1245 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1246 vcache_free(VNODE_TO_VIMPL(vp));
1247 else
1248 mutex_exit(vp->v_interlock);
1249 return ENOENT;
1250 }
1251 VSTATE_ASSERT(vp, VS_LOADED);
1252 vp->v_usecount++;
1253 mutex_exit(vp->v_interlock);
1254
1255 return 0;
1256 }
1257
1258 /*
1259 * Get a vnode / fs node pair by key and return it referenced through vpp.
1260 */
1261 int
1262 vcache_get(struct mount *mp, const void *key, size_t key_len,
1263 struct vnode **vpp)
1264 {
1265 int error;
1266 uint32_t hash;
1267 const void *new_key;
1268 struct vnode *vp;
1269 struct vcache_key vcache_key;
1270 vnode_impl_t *vip, *new_vip;
1271
1272 new_key = NULL;
1273 *vpp = NULL;
1274
1275 vcache_key.vk_mount = mp;
1276 vcache_key.vk_key = key;
1277 vcache_key.vk_key_len = key_len;
1278 hash = vcache_hash(&vcache_key);
1279
1280 again:
1281 mutex_enter(&vcache_lock);
1282 vip = vcache_hash_lookup(&vcache_key, hash);
1283
1284 /* If found, take a reference or retry. */
1285 if (__predict_true(vip != NULL)) {
1286 /*
1287 * If the vnode is loading we cannot take the v_interlock
1288 * here as it might change during load (see uvm_obj_setlock()).
1289 * As changing state from VS_LOADING requires both vcache_lock
1290 * and v_interlock it is safe to test with vcache_lock held.
1291 *
1292 * Wait for vnodes changing state from VS_LOADING and retry.
1293 */
1294 if (__predict_false(vip->vi_state == VS_LOADING)) {
1295 cv_wait(&vcache_cv, &vcache_lock);
1296 mutex_exit(&vcache_lock);
1297 goto again;
1298 }
1299 vp = VIMPL_TO_VNODE(vip);
1300 mutex_enter(vp->v_interlock);
1301 mutex_exit(&vcache_lock);
1302 error = vcache_vget(vp);
1303 if (error == ENOENT)
1304 goto again;
1305 if (error == 0)
1306 *vpp = vp;
1307 KASSERT((error != 0) == (*vpp == NULL));
1308 return error;
1309 }
1310 mutex_exit(&vcache_lock);
1311
1312 /* Allocate and initialize a new vcache / vnode pair. */
1313 error = vfs_busy(mp);
1314 if (error)
1315 return error;
1316 new_vip = vcache_alloc();
1317 new_vip->vi_key = vcache_key;
1318 vp = VIMPL_TO_VNODE(new_vip);
1319 mutex_enter(&vcache_lock);
1320 vip = vcache_hash_lookup(&vcache_key, hash);
1321 if (vip == NULL) {
1322 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1323 new_vip, vi_hash);
1324 vip = new_vip;
1325 }
1326
1327 /* If another thread beat us inserting this node, retry. */
1328 if (vip != new_vip) {
1329 vcache_dealloc(new_vip);
1330 vfs_unbusy(mp);
1331 goto again;
1332 }
1333 mutex_exit(&vcache_lock);
1334
1335 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1336 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1337 if (error) {
1338 mutex_enter(&vcache_lock);
1339 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1340 new_vip, vnode_impl, vi_hash);
1341 vcache_dealloc(new_vip);
1342 vfs_unbusy(mp);
1343 KASSERT(*vpp == NULL);
1344 return error;
1345 }
1346 KASSERT(new_key != NULL);
1347 KASSERT(memcmp(key, new_key, key_len) == 0);
1348 KASSERT(vp->v_op != NULL);
1349 vfs_insmntque(vp, mp);
1350 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1351 vp->v_vflag |= VV_MPSAFE;
1352 vfs_ref(mp);
1353 vfs_unbusy(mp);
1354
1355 /* Finished loading, finalize node. */
1356 mutex_enter(&vcache_lock);
1357 new_vip->vi_key.vk_key = new_key;
1358 mutex_enter(vp->v_interlock);
1359 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1360 mutex_exit(vp->v_interlock);
1361 mutex_exit(&vcache_lock);
1362 *vpp = vp;
1363 return 0;
1364 }
1365
1366 /*
1367 * Create a new vnode / fs node pair and return it referenced through vpp.
1368 */
1369 int
1370 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1371 kauth_cred_t cred, void *extra, struct vnode **vpp)
1372 {
1373 int error;
1374 uint32_t hash;
1375 struct vnode *vp, *ovp;
1376 vnode_impl_t *vip, *ovip;
1377
1378 *vpp = NULL;
1379
1380 /* Allocate and initialize a new vcache / vnode pair. */
1381 error = vfs_busy(mp);
1382 if (error)
1383 return error;
1384 vip = vcache_alloc();
1385 vip->vi_key.vk_mount = mp;
1386 vp = VIMPL_TO_VNODE(vip);
1387
1388 /* Create and load the fs node. */
1389 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1390 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1391 if (error) {
1392 mutex_enter(&vcache_lock);
1393 vcache_dealloc(vip);
1394 vfs_unbusy(mp);
1395 KASSERT(*vpp == NULL);
1396 return error;
1397 }
1398 KASSERT(vp->v_op != NULL);
1399 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1400 if (vip->vi_key.vk_key_len > 0) {
1401 KASSERT(vip->vi_key.vk_key != NULL);
1402 hash = vcache_hash(&vip->vi_key);
1403
1404 /*
1405 * Wait for previous instance to be reclaimed,
1406 * then insert new node.
1407 */
1408 mutex_enter(&vcache_lock);
1409 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1410 ovp = VIMPL_TO_VNODE(ovip);
1411 mutex_enter(ovp->v_interlock);
1412 mutex_exit(&vcache_lock);
1413 error = vcache_vget(ovp);
1414 KASSERT(error == ENOENT);
1415 mutex_enter(&vcache_lock);
1416 }
1417 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1418 vip, vi_hash);
1419 mutex_exit(&vcache_lock);
1420 }
1421 vfs_insmntque(vp, mp);
1422 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1423 vp->v_vflag |= VV_MPSAFE;
1424 vfs_ref(mp);
1425 vfs_unbusy(mp);
1426
1427 /* Finished loading, finalize node. */
1428 mutex_enter(&vcache_lock);
1429 mutex_enter(vp->v_interlock);
1430 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1431 mutex_exit(&vcache_lock);
1432 mutex_exit(vp->v_interlock);
1433 *vpp = vp;
1434 return 0;
1435 }
1436
1437 /*
1438 * Prepare key change: update old cache nodes key and lock new cache node.
1439 * Return an error if the new node already exists.
1440 */
1441 int
1442 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1443 const void *old_key, size_t old_key_len,
1444 const void *new_key, size_t new_key_len)
1445 {
1446 uint32_t old_hash, new_hash;
1447 struct vcache_key old_vcache_key, new_vcache_key;
1448 vnode_impl_t *vip, *new_vip;
1449
1450 old_vcache_key.vk_mount = mp;
1451 old_vcache_key.vk_key = old_key;
1452 old_vcache_key.vk_key_len = old_key_len;
1453 old_hash = vcache_hash(&old_vcache_key);
1454
1455 new_vcache_key.vk_mount = mp;
1456 new_vcache_key.vk_key = new_key;
1457 new_vcache_key.vk_key_len = new_key_len;
1458 new_hash = vcache_hash(&new_vcache_key);
1459
1460 new_vip = vcache_alloc();
1461 new_vip->vi_key = new_vcache_key;
1462
1463 /* Insert locked new node used as placeholder. */
1464 mutex_enter(&vcache_lock);
1465 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1466 if (vip != NULL) {
1467 vcache_dealloc(new_vip);
1468 return EEXIST;
1469 }
1470 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1471 new_vip, vi_hash);
1472
1473 /* Replace old nodes key with the temporary copy. */
1474 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1475 KASSERT(vip != NULL);
1476 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1477 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1478 vip->vi_key = old_vcache_key;
1479 mutex_exit(&vcache_lock);
1480 return 0;
1481 }
1482
1483 /*
1484 * Key change complete: update old node and remove placeholder.
1485 */
1486 void
1487 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1488 const void *old_key, size_t old_key_len,
1489 const void *new_key, size_t new_key_len)
1490 {
1491 uint32_t old_hash, new_hash;
1492 struct vcache_key old_vcache_key, new_vcache_key;
1493 vnode_impl_t *vip, *new_vip;
1494 struct vnode *new_vp;
1495
1496 old_vcache_key.vk_mount = mp;
1497 old_vcache_key.vk_key = old_key;
1498 old_vcache_key.vk_key_len = old_key_len;
1499 old_hash = vcache_hash(&old_vcache_key);
1500
1501 new_vcache_key.vk_mount = mp;
1502 new_vcache_key.vk_key = new_key;
1503 new_vcache_key.vk_key_len = new_key_len;
1504 new_hash = vcache_hash(&new_vcache_key);
1505
1506 mutex_enter(&vcache_lock);
1507
1508 /* Lookup old and new node. */
1509 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1510 KASSERT(vip != NULL);
1511 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1512
1513 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1514 KASSERT(new_vip != NULL);
1515 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1516 new_vp = VIMPL_TO_VNODE(new_vip);
1517 mutex_enter(new_vp->v_interlock);
1518 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1519 mutex_exit(new_vp->v_interlock);
1520
1521 /* Rekey old node and put it onto its new hashlist. */
1522 vip->vi_key = new_vcache_key;
1523 if (old_hash != new_hash) {
1524 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1525 vip, vnode_impl, vi_hash);
1526 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1527 vip, vi_hash);
1528 }
1529
1530 /* Remove new node used as placeholder. */
1531 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1532 new_vip, vnode_impl, vi_hash);
1533 vcache_dealloc(new_vip);
1534 }
1535
1536 /*
1537 * Disassociate the underlying file system from a vnode.
1538 *
1539 * Must be called with vnode locked and will return unlocked.
1540 * Must be called with the interlock held, and will return with it held.
1541 */
1542 static void
1543 vcache_reclaim(vnode_t *vp)
1544 {
1545 lwp_t *l = curlwp;
1546 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1547 struct mount *mp = vp->v_mount;
1548 uint32_t hash;
1549 uint8_t temp_buf[64], *temp_key;
1550 size_t temp_key_len;
1551 bool recycle, active;
1552 int error;
1553
1554 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1555 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1556 KASSERT(mutex_owned(vp->v_interlock));
1557 KASSERT(vp->v_usecount != 0);
1558
1559 active = (vp->v_usecount > 1);
1560 temp_key_len = vip->vi_key.vk_key_len;
1561 /*
1562 * Prevent the vnode from being recycled or brought into use
1563 * while we clean it out.
1564 */
1565 VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1566 if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
1567 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1568 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
1569 }
1570 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1571 mutex_exit(vp->v_interlock);
1572
1573 /* Replace the vnode key with a temporary copy. */
1574 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1575 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1576 } else {
1577 temp_key = temp_buf;
1578 }
1579 if (vip->vi_key.vk_key_len > 0) {
1580 mutex_enter(&vcache_lock);
1581 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1582 vip->vi_key.vk_key = temp_key;
1583 mutex_exit(&vcache_lock);
1584 }
1585
1586 fstrans_start(mp);
1587
1588 /*
1589 * Clean out any cached data associated with the vnode.
1590 * If purging an active vnode, it must be closed and
1591 * deactivated before being reclaimed.
1592 */
1593 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1594 if (error != 0) {
1595 if (wapbl_vphaswapbl(vp))
1596 WAPBL_DISCARD(wapbl_vptomp(vp));
1597 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1598 }
1599 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1600 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1601 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1602 spec_node_revoke(vp);
1603 }
1604
1605 /*
1606 * Disassociate the underlying file system from the vnode.
1607 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1608 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1609 * would no longer function.
1610 */
1611 VOP_INACTIVE(vp, &recycle);
1612 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1613 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1614 if (VOP_RECLAIM(vp)) {
1615 vnpanic(vp, "%s: cannot reclaim", __func__);
1616 }
1617
1618 KASSERT(vp->v_data == NULL);
1619 KASSERT(vp->v_uobj.uo_npages == 0);
1620
1621 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1622 uvm_ra_freectx(vp->v_ractx);
1623 vp->v_ractx = NULL;
1624 }
1625
1626 /* Purge name cache. */
1627 cache_purge(vp);
1628
1629 if (vip->vi_key.vk_key_len > 0) {
1630 /* Remove from vnode cache. */
1631 hash = vcache_hash(&vip->vi_key);
1632 mutex_enter(&vcache_lock);
1633 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1634 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1635 vip, vnode_impl, vi_hash);
1636 mutex_exit(&vcache_lock);
1637 }
1638 if (temp_key != temp_buf)
1639 kmem_free(temp_key, temp_key_len);
1640
1641 /* Done with purge, notify sleepers of the grim news. */
1642 mutex_enter(vp->v_interlock);
1643 vp->v_op = dead_vnodeop_p;
1644 vp->v_vflag |= VV_LOCKSWORK;
1645 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1646 vp->v_tag = VT_NON;
1647 KNOTE(&vp->v_klist, NOTE_REVOKE);
1648 mutex_exit(vp->v_interlock);
1649
1650 /*
1651 * Move to dead mount. Must be after changing the operations
1652 * vector as vnode operations enter the mount before using the
1653 * operations vector. See sys/kern/vnode_if.c.
1654 */
1655 vp->v_vflag &= ~VV_ROOT;
1656 vfs_ref(dead_rootmount);
1657 vfs_insmntque(vp, dead_rootmount);
1658
1659 mutex_enter(vp->v_interlock);
1660 fstrans_done(mp);
1661 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1662 }
1663
1664 /*
1665 * Disassociate the underlying file system from an open device vnode
1666 * and make it anonymous.
1667 *
1668 * Vnode unlocked on entry, drops a reference to the vnode.
1669 */
1670 void
1671 vcache_make_anon(vnode_t *vp)
1672 {
1673 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1674 uint32_t hash;
1675 bool recycle;
1676
1677 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1678 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1679 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1680
1681 /* Remove from vnode cache. */
1682 hash = vcache_hash(&vip->vi_key);
1683 mutex_enter(&vcache_lock);
1684 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1685 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1686 vip, vnode_impl, vi_hash);
1687 vip->vi_key.vk_mount = dead_rootmount;
1688 vip->vi_key.vk_key_len = 0;
1689 vip->vi_key.vk_key = NULL;
1690 mutex_exit(&vcache_lock);
1691
1692 /*
1693 * Disassociate the underlying file system from the vnode.
1694 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1695 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1696 * would no longer function.
1697 */
1698 if (vn_lock(vp, LK_EXCLUSIVE)) {
1699 vnpanic(vp, "%s: cannot lock", __func__);
1700 }
1701 VOP_INACTIVE(vp, &recycle);
1702 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1703 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1704 if (VOP_RECLAIM(vp)) {
1705 vnpanic(vp, "%s: cannot reclaim", __func__);
1706 }
1707
1708 /* Purge name cache. */
1709 cache_purge(vp);
1710
1711 /* Done with purge, change operations vector. */
1712 mutex_enter(vp->v_interlock);
1713 vp->v_op = spec_vnodeop_p;
1714 vp->v_vflag |= VV_MPSAFE;
1715 vp->v_vflag &= ~VV_LOCKSWORK;
1716 mutex_exit(vp->v_interlock);
1717
1718 /*
1719 * Move to dead mount. Must be after changing the operations
1720 * vector as vnode operations enter the mount before using the
1721 * operations vector. See sys/kern/vnode_if.c.
1722 */
1723 vfs_ref(dead_rootmount);
1724 vfs_insmntque(vp, dead_rootmount);
1725
1726 vrele(vp);
1727 }
1728
1729 /*
1730 * Update outstanding I/O count and do wakeup if requested.
1731 */
1732 void
1733 vwakeup(struct buf *bp)
1734 {
1735 vnode_t *vp;
1736
1737 if ((vp = bp->b_vp) == NULL)
1738 return;
1739
1740 KASSERT(bp->b_objlock == vp->v_interlock);
1741 KASSERT(mutex_owned(bp->b_objlock));
1742
1743 if (--vp->v_numoutput < 0)
1744 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1745 if (vp->v_numoutput == 0)
1746 cv_broadcast(&vp->v_cv);
1747 }
1748
1749 /*
1750 * Test a vnode for being or becoming dead. Returns one of:
1751 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1752 * ENOENT: vnode is dead.
1753 * 0: otherwise.
1754 *
1755 * Whenever this function returns a non-zero value all future
1756 * calls will also return a non-zero value.
1757 */
1758 int
1759 vdead_check(struct vnode *vp, int flags)
1760 {
1761
1762 KASSERT(mutex_owned(vp->v_interlock));
1763
1764 if (! ISSET(flags, VDEAD_NOWAIT))
1765 VSTATE_WAIT_STABLE(vp);
1766
1767 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1768 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1769 return EBUSY;
1770 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1771 return ENOENT;
1772 }
1773
1774 return 0;
1775 }
1776
1777 int
1778 vfs_drainvnodes(void)
1779 {
1780 int i, gen;
1781
1782 mutex_enter(&vdrain_lock);
1783 for (i = 0; i < 2; i++) {
1784 gen = vdrain_gen;
1785 while (gen == vdrain_gen) {
1786 cv_broadcast(&vdrain_cv);
1787 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1788 }
1789 }
1790 mutex_exit(&vdrain_lock);
1791
1792 if (numvnodes >= desiredvnodes)
1793 return EBUSY;
1794
1795 if (vcache_hashsize != desiredvnodes)
1796 vcache_reinit();
1797
1798 return 0;
1799 }
1800
1801 void
1802 vnpanic(vnode_t *vp, const char *fmt, ...)
1803 {
1804 va_list ap;
1805
1806 #ifdef DIAGNOSTIC
1807 vprint(NULL, vp);
1808 #endif
1809 va_start(ap, fmt);
1810 vpanic(fmt, ap);
1811 va_end(ap);
1812 }
1813