vfs_vnode.c revision 1.108 1 /* $NetBSD: vfs_vnode.c,v 1.108 2020/01/23 10:21:14 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * LOADED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 */
147
148 #include <sys/cdefs.h>
149 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.108 2020/01/23 10:21:14 ad Exp $");
150
151 #include "opt_pax.h"
152
153 #include <sys/param.h>
154 #include <sys/kernel.h>
155
156 #include <sys/atomic.h>
157 #include <sys/buf.h>
158 #include <sys/conf.h>
159 #include <sys/device.h>
160 #include <sys/hash.h>
161 #include <sys/kauth.h>
162 #include <sys/kmem.h>
163 #include <sys/kthread.h>
164 #include <sys/module.h>
165 #include <sys/mount.h>
166 #include <sys/namei.h>
167 #include <sys/pax.h>
168 #include <sys/syscallargs.h>
169 #include <sys/sysctl.h>
170 #include <sys/systm.h>
171 #include <sys/vnode_impl.h>
172 #include <sys/wapbl.h>
173 #include <sys/fstrans.h>
174
175 #include <uvm/uvm.h>
176 #include <uvm/uvm_readahead.h>
177 #include <uvm/uvm_stat.h>
178
179 /* Flags to vrelel. */
180 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
181
182 #define LRU_VRELE 0
183 #define LRU_FREE 1
184 #define LRU_HOLD 2
185 #define LRU_COUNT 3
186
187 /*
188 * There are three lru lists: one holds vnodes waiting for async release,
189 * one is for vnodes which have no buffer/page references and one for those
190 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
191 * private cache line as vnodes migrate between them while under the same
192 * lock (vdrain_lock).
193 */
194 u_int numvnodes __cacheline_aligned;
195 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
196 static kmutex_t vdrain_lock __cacheline_aligned;
197 static kcondvar_t vdrain_cv;
198 static int vdrain_gen;
199 static kcondvar_t vdrain_gen_cv;
200 static bool vdrain_retry;
201 static lwp_t * vdrain_lwp;
202 SLIST_HEAD(hashhead, vnode_impl);
203 static kmutex_t vcache_lock __cacheline_aligned;
204 static kcondvar_t vcache_cv;
205 static u_int vcache_hashsize;
206 static u_long vcache_hashmask;
207 static struct hashhead *vcache_hashtab;
208 static pool_cache_t vcache_pool;
209 static void lru_requeue(vnode_t *, vnodelst_t *);
210 static vnodelst_t * lru_which(vnode_t *);
211 static vnode_impl_t * vcache_alloc(void);
212 static void vcache_dealloc(vnode_impl_t *);
213 static void vcache_free(vnode_impl_t *);
214 static void vcache_init(void);
215 static void vcache_reinit(void);
216 static void vcache_reclaim(vnode_t *);
217 static void vrelel(vnode_t *, int, int);
218 static void vdrain_thread(void *);
219 static void vnpanic(vnode_t *, const char *, ...)
220 __printflike(2, 3);
221
222 /* Routines having to do with the management of the vnode table. */
223 extern struct mount *dead_rootmount;
224 extern int (**dead_vnodeop_p)(void *);
225 extern int (**spec_vnodeop_p)(void *);
226 extern struct vfsops dead_vfsops;
227
228 /* Vnode state operations and diagnostics. */
229
230 #if defined(DIAGNOSTIC)
231
232 #define VSTATE_VALID(state) \
233 ((state) != VS_ACTIVE && (state) != VS_MARKER)
234 #define VSTATE_GET(vp) \
235 vstate_assert_get((vp), __func__, __LINE__)
236 #define VSTATE_CHANGE(vp, from, to) \
237 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
238 #define VSTATE_WAIT_STABLE(vp) \
239 vstate_assert_wait_stable((vp), __func__, __LINE__)
240
241 void
242 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
243 bool has_lock)
244 {
245 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
246
247 if (!has_lock) {
248 /*
249 * Prevent predictive loads from the CPU, but check the state
250 * without loooking first.
251 */
252 membar_enter();
253 if (state == VS_ACTIVE && vp->v_usecount > 0 &&
254 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
255 return;
256 if (vip->vi_state == state)
257 return;
258 mutex_enter((vp)->v_interlock);
259 }
260
261 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
262
263 if ((state == VS_ACTIVE && vp->v_usecount > 0 &&
264 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
265 vip->vi_state == state) {
266 if (!has_lock)
267 mutex_exit((vp)->v_interlock);
268 return;
269 }
270 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
271 vstate_name(vip->vi_state), vp->v_usecount,
272 vstate_name(state), func, line);
273 }
274
275 static enum vnode_state
276 vstate_assert_get(vnode_t *vp, const char *func, int line)
277 {
278 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
279
280 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
281 if (! VSTATE_VALID(vip->vi_state))
282 vnpanic(vp, "state is %s at %s:%d",
283 vstate_name(vip->vi_state), func, line);
284
285 return vip->vi_state;
286 }
287
288 static void
289 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
290 {
291 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
292
293 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
294 if (! VSTATE_VALID(vip->vi_state))
295 vnpanic(vp, "state is %s at %s:%d",
296 vstate_name(vip->vi_state), func, line);
297
298 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
299 cv_wait(&vp->v_cv, vp->v_interlock);
300
301 if (! VSTATE_VALID(vip->vi_state))
302 vnpanic(vp, "state is %s at %s:%d",
303 vstate_name(vip->vi_state), func, line);
304 }
305
306 static void
307 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
308 const char *func, int line)
309 {
310 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
311
312 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
313 if (from == VS_LOADING)
314 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
315
316 if (! VSTATE_VALID(from))
317 vnpanic(vp, "from is %s at %s:%d",
318 vstate_name(from), func, line);
319 if (! VSTATE_VALID(to))
320 vnpanic(vp, "to is %s at %s:%d",
321 vstate_name(to), func, line);
322 if (vip->vi_state != from)
323 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
324 vstate_name(vip->vi_state), vstate_name(from), func, line);
325 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
326 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
327 vstate_name(from), vstate_name(to), vp->v_usecount,
328 func, line);
329
330 vip->vi_state = to;
331 if (from == VS_LOADING)
332 cv_broadcast(&vcache_cv);
333 if (to == VS_LOADED || to == VS_RECLAIMED)
334 cv_broadcast(&vp->v_cv);
335 }
336
337 #else /* defined(DIAGNOSTIC) */
338
339 #define VSTATE_GET(vp) \
340 (VNODE_TO_VIMPL((vp))->vi_state)
341 #define VSTATE_CHANGE(vp, from, to) \
342 vstate_change((vp), (from), (to))
343 #define VSTATE_WAIT_STABLE(vp) \
344 vstate_wait_stable((vp))
345 void
346 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
347 bool has_lock)
348 {
349
350 }
351
352 static void
353 vstate_wait_stable(vnode_t *vp)
354 {
355 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
356
357 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
358 cv_wait(&vp->v_cv, vp->v_interlock);
359 }
360
361 static void
362 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
363 {
364 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
365
366 vip->vi_state = to;
367 if (from == VS_LOADING)
368 cv_broadcast(&vcache_cv);
369 if (to == VS_LOADED || to == VS_RECLAIMED)
370 cv_broadcast(&vp->v_cv);
371 }
372
373 #endif /* defined(DIAGNOSTIC) */
374
375 void
376 vfs_vnode_sysinit(void)
377 {
378 int error __diagused, i;
379
380 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
381 KASSERT(dead_rootmount != NULL);
382 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
383
384 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
385 for (i = 0; i < LRU_COUNT; i++) {
386 TAILQ_INIT(&lru_list[i]);
387 }
388 vcache_init();
389
390 cv_init(&vdrain_cv, "vdrain");
391 cv_init(&vdrain_gen_cv, "vdrainwt");
392 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
393 NULL, &vdrain_lwp, "vdrain");
394 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
395 }
396
397 /*
398 * Allocate a new marker vnode.
399 */
400 vnode_t *
401 vnalloc_marker(struct mount *mp)
402 {
403 vnode_impl_t *vip;
404 vnode_t *vp;
405
406 vip = pool_cache_get(vcache_pool, PR_WAITOK);
407 memset(vip, 0, sizeof(*vip));
408 vp = VIMPL_TO_VNODE(vip);
409 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
410 vp->v_mount = mp;
411 vp->v_type = VBAD;
412 vip->vi_state = VS_MARKER;
413
414 return vp;
415 }
416
417 /*
418 * Free a marker vnode.
419 */
420 void
421 vnfree_marker(vnode_t *vp)
422 {
423 vnode_impl_t *vip;
424
425 vip = VNODE_TO_VIMPL(vp);
426 KASSERT(vip->vi_state == VS_MARKER);
427 uvm_obj_destroy(&vp->v_uobj, true);
428 pool_cache_put(vcache_pool, vip);
429 }
430
431 /*
432 * Test a vnode for being a marker vnode.
433 */
434 bool
435 vnis_marker(vnode_t *vp)
436 {
437
438 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
439 }
440
441 /*
442 * Return the lru list this node should be on.
443 */
444 static vnodelst_t *
445 lru_which(vnode_t *vp)
446 {
447
448 KASSERT(mutex_owned(vp->v_interlock));
449
450 if (vp->v_holdcnt > 0)
451 return &lru_list[LRU_HOLD];
452 else
453 return &lru_list[LRU_FREE];
454 }
455
456 /*
457 * Put vnode to end of given list.
458 * Both the current and the new list may be NULL, used on vnode alloc/free.
459 * Adjust numvnodes and signal vdrain thread if there is work.
460 */
461 static void
462 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
463 {
464 vnode_impl_t *vip;
465 int d;
466
467 /*
468 * If the vnode is on the correct list, and was put there recently,
469 * then leave it be, thus avoiding huge cache and lock contention.
470 */
471 vip = VNODE_TO_VIMPL(vp);
472 if (listhd == vip->vi_lrulisthd &&
473 (hardclock_ticks - vip->vi_lrulisttm) < hz) {
474 return;
475 }
476
477 mutex_enter(&vdrain_lock);
478 d = 0;
479 if (vip->vi_lrulisthd != NULL)
480 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
481 else
482 d++;
483 vip->vi_lrulisthd = listhd;
484 vip->vi_lrulisttm = hardclock_ticks;
485 if (vip->vi_lrulisthd != NULL)
486 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
487 else
488 d--;
489 if (d != 0) {
490 /*
491 * Looks strange? This is not a bug. Don't store
492 * numvnodes unless there is a change - avoid false
493 * sharing on MP.
494 */
495 numvnodes += d;
496 }
497 if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
498 cv_broadcast(&vdrain_cv);
499 mutex_exit(&vdrain_lock);
500 }
501
502 /*
503 * Release deferred vrele vnodes for this mount.
504 * Called with file system suspended.
505 */
506 void
507 vrele_flush(struct mount *mp)
508 {
509 vnode_impl_t *vip, *marker;
510 vnode_t *vp;
511
512 KASSERT(fstrans_is_owner(mp));
513
514 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
515
516 mutex_enter(&vdrain_lock);
517 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
518
519 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
520 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
521 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
522 vi_lrulist);
523 vp = VIMPL_TO_VNODE(vip);
524 if (vnis_marker(vp))
525 continue;
526
527 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
528 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
529 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
530 vip->vi_lrulisttm = hardclock_ticks;
531 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
532 mutex_exit(&vdrain_lock);
533
534 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
535 mutex_enter(vp->v_interlock);
536 vrelel(vp, 0, LK_EXCLUSIVE);
537
538 mutex_enter(&vdrain_lock);
539 }
540
541 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
542 mutex_exit(&vdrain_lock);
543
544 vnfree_marker(VIMPL_TO_VNODE(marker));
545 }
546
547 /*
548 * Reclaim a cached vnode. Used from vdrain_thread only.
549 */
550 static __inline void
551 vdrain_remove(vnode_t *vp)
552 {
553 struct mount *mp;
554
555 KASSERT(mutex_owned(&vdrain_lock));
556
557 /* Probe usecount (unlocked). */
558 if (vp->v_usecount > 0)
559 return;
560 /* Try v_interlock -- we lock the wrong direction! */
561 if (!mutex_tryenter(vp->v_interlock))
562 return;
563 /* Probe usecount and state. */
564 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
565 mutex_exit(vp->v_interlock);
566 return;
567 }
568 mp = vp->v_mount;
569 if (fstrans_start_nowait(mp) != 0) {
570 mutex_exit(vp->v_interlock);
571 return;
572 }
573 vdrain_retry = true;
574 mutex_exit(&vdrain_lock);
575
576 if (vcache_vget(vp) == 0) {
577 if (!vrecycle(vp)) {
578 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
579 mutex_enter(vp->v_interlock);
580 vrelel(vp, 0, LK_EXCLUSIVE);
581 }
582 }
583 fstrans_done(mp);
584
585 mutex_enter(&vdrain_lock);
586 }
587
588 /*
589 * Release a cached vnode. Used from vdrain_thread only.
590 */
591 static __inline void
592 vdrain_vrele(vnode_t *vp)
593 {
594 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
595 struct mount *mp;
596
597 KASSERT(mutex_owned(&vdrain_lock));
598
599 mp = vp->v_mount;
600 if (fstrans_start_nowait(mp) != 0)
601 return;
602
603 /*
604 * First remove the vnode from the vrele list.
605 * Put it on the last lru list, the last vrele()
606 * will put it back onto the right list before
607 * its v_usecount reaches zero.
608 */
609 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
610 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
611 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
612 vip->vi_lrulisttm = hardclock_ticks;
613 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
614
615 vdrain_retry = true;
616 mutex_exit(&vdrain_lock);
617
618 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
619 mutex_enter(vp->v_interlock);
620 vrelel(vp, 0, LK_EXCLUSIVE);
621 fstrans_done(mp);
622
623 mutex_enter(&vdrain_lock);
624 }
625
626 /*
627 * Helper thread to keep the number of vnodes below desiredvnodes
628 * and release vnodes from asynchronous vrele.
629 */
630 static void
631 vdrain_thread(void *cookie)
632 {
633 int i;
634 u_int target;
635 vnode_impl_t *vip, *marker;
636
637 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
638
639 mutex_enter(&vdrain_lock);
640
641 for (;;) {
642 vdrain_retry = false;
643 target = desiredvnodes - desiredvnodes/10;
644
645 for (i = 0; i < LRU_COUNT; i++) {
646 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
647 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
648 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
649 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
650 vi_lrulist);
651 if (vnis_marker(VIMPL_TO_VNODE(vip)))
652 continue;
653 if (i == LRU_VRELE)
654 vdrain_vrele(VIMPL_TO_VNODE(vip));
655 else if (numvnodes < target)
656 break;
657 else
658 vdrain_remove(VIMPL_TO_VNODE(vip));
659 }
660 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
661 }
662
663 if (vdrain_retry) {
664 mutex_exit(&vdrain_lock);
665 yield();
666 mutex_enter(&vdrain_lock);
667 } else {
668 vdrain_gen++;
669 cv_broadcast(&vdrain_gen_cv);
670 cv_wait(&vdrain_cv, &vdrain_lock);
671 }
672 }
673 }
674
675 /*
676 * vput: unlock and release the reference.
677 */
678 void
679 vput(vnode_t *vp)
680 {
681 int lktype;
682
683 if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
684 lktype = LK_EXCLUSIVE;
685 } else {
686 lktype = VOP_ISLOCKED(vp);
687 KASSERT(lktype != LK_NONE);
688 }
689 mutex_enter(vp->v_interlock);
690 vrelel(vp, 0, lktype);
691 }
692
693 /*
694 * Vnode release. If reference count drops to zero, call inactive
695 * routine and either return to freelist or free to the pool.
696 */
697 static void
698 vrelel(vnode_t *vp, int flags, int lktype)
699 {
700 const bool async = ((flags & VRELEL_ASYNC) != 0);
701 bool recycle, defer;
702 int error;
703
704 KASSERT(mutex_owned(vp->v_interlock));
705
706 if (__predict_false(vp->v_op == dead_vnodeop_p &&
707 VSTATE_GET(vp) != VS_RECLAIMED)) {
708 vnpanic(vp, "dead but not clean");
709 }
710
711 /*
712 * If not the last reference, just drop the reference count
713 * and unlock.
714 */
715 if (vp->v_usecount > 1) {
716 if (lktype != LK_NONE) {
717 VOP_UNLOCK(vp);
718 }
719 vp->v_usecount--;
720 mutex_exit(vp->v_interlock);
721 return;
722 }
723 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
724 vnpanic(vp, "%s: bad ref count", __func__);
725 }
726
727 #ifdef DIAGNOSTIC
728 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
729 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
730 vprint("vrelel: missing VOP_CLOSE()", vp);
731 }
732 #endif
733
734 /*
735 * First try to get the vnode locked for VOP_INACTIVE().
736 * Defer vnode release to vdrain_thread if caller requests
737 * it explicitly, is the pagedaemon or the lock failed.
738 */
739 defer = false;
740 if ((curlwp == uvm.pagedaemon_lwp) || async) {
741 defer = true;
742 } else if (lktype == LK_SHARED) {
743 /* Excellent chance of getting, if the last ref. */
744 error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
745 LK_NOWAIT);
746 if (error != 0) {
747 defer = true;
748 } else {
749 lktype = LK_EXCLUSIVE;
750 }
751 } else if (lktype == LK_NONE) {
752 /* Excellent chance of getting, if the last ref. */
753 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
754 LK_NOWAIT);
755 if (error != 0) {
756 defer = true;
757 } else {
758 lktype = LK_EXCLUSIVE;
759 }
760 }
761 KASSERT(mutex_owned(vp->v_interlock));
762 if (defer) {
763 /*
764 * Defer reclaim to the kthread; it's not safe to
765 * clean it here. We donate it our last reference.
766 */
767 if (lktype != LK_NONE) {
768 VOP_UNLOCK(vp);
769 }
770 lru_requeue(vp, &lru_list[LRU_VRELE]);
771 mutex_exit(vp->v_interlock);
772 return;
773 }
774 KASSERT(lktype == LK_EXCLUSIVE);
775
776 /*
777 * If not clean, deactivate the vnode, but preserve
778 * our reference across the call to VOP_INACTIVE().
779 */
780 if (VSTATE_GET(vp) == VS_RECLAIMED) {
781 VOP_UNLOCK(vp);
782 } else {
783 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
784 mutex_exit(vp->v_interlock);
785
786 /*
787 * The vnode must not gain another reference while being
788 * deactivated. If VOP_INACTIVE() indicates that
789 * the described file has been deleted, then recycle
790 * the vnode.
791 *
792 * Note that VOP_INACTIVE() will not drop the vnode lock.
793 */
794 recycle = false;
795 VOP_INACTIVE(vp, &recycle);
796 if (!recycle)
797 VOP_UNLOCK(vp);
798 mutex_enter(vp->v_interlock);
799 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
800 if (!recycle) {
801 if (vp->v_usecount > 1) {
802 vp->v_usecount--;
803 mutex_exit(vp->v_interlock);
804 return;
805 }
806 }
807
808 /* Take care of space accounting. */
809 if ((vp->v_iflag & VI_EXECMAP) != 0 &&
810 vp->v_uobj.uo_npages != 0) {
811 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
812 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
813 }
814 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
815 vp->v_vflag &= ~VV_MAPPED;
816
817 /*
818 * Recycle the vnode if the file is now unused (unlinked),
819 * otherwise just free it.
820 */
821 if (recycle) {
822 VSTATE_ASSERT(vp, VS_LOADED);
823 /* vcache_reclaim drops the lock. */
824 vcache_reclaim(vp);
825 }
826 KASSERT(vp->v_usecount > 0);
827 }
828
829 vp->v_usecount--;
830 if (vp->v_usecount != 0) {
831 /* Gained another reference while being reclaimed. */
832 mutex_exit(vp->v_interlock);
833 return;
834 }
835
836 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
837 /*
838 * It's clean so destroy it. It isn't referenced
839 * anywhere since it has been reclaimed.
840 */
841 vcache_free(VNODE_TO_VIMPL(vp));
842 } else {
843 /*
844 * Otherwise, put it back onto the freelist. It
845 * can't be destroyed while still associated with
846 * a file system.
847 */
848 lru_requeue(vp, lru_which(vp));
849 mutex_exit(vp->v_interlock);
850 }
851 }
852
853 void
854 vrele(vnode_t *vp)
855 {
856
857 mutex_enter(vp->v_interlock);
858 vrelel(vp, 0, LK_NONE);
859 }
860
861 /*
862 * Asynchronous vnode release, vnode is released in different context.
863 */
864 void
865 vrele_async(vnode_t *vp)
866 {
867
868 mutex_enter(vp->v_interlock);
869 vrelel(vp, VRELEL_ASYNC, LK_NONE);
870 }
871
872 /*
873 * Vnode reference, where a reference is already held by some other
874 * object (for example, a file structure).
875 */
876 void
877 vref(vnode_t *vp)
878 {
879
880 KASSERT(vp->v_usecount != 0);
881
882 mutex_enter(vp->v_interlock);
883 vp->v_usecount++;
884 mutex_exit(vp->v_interlock);
885 }
886
887 /*
888 * Page or buffer structure gets a reference.
889 * Called with v_interlock held.
890 */
891 void
892 vholdl(vnode_t *vp)
893 {
894
895 KASSERT(mutex_owned(vp->v_interlock));
896
897 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
898 lru_requeue(vp, lru_which(vp));
899 }
900
901 /*
902 * Page or buffer structure frees a reference.
903 * Called with v_interlock held.
904 */
905 void
906 holdrelel(vnode_t *vp)
907 {
908
909 KASSERT(mutex_owned(vp->v_interlock));
910
911 if (vp->v_holdcnt <= 0) {
912 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
913 }
914
915 vp->v_holdcnt--;
916 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
917 lru_requeue(vp, lru_which(vp));
918 }
919
920 /*
921 * Recycle an unused vnode if caller holds the last reference.
922 */
923 bool
924 vrecycle(vnode_t *vp)
925 {
926 int error __diagused;
927
928 mutex_enter(vp->v_interlock);
929
930 /* Make sure we hold the last reference. */
931 VSTATE_WAIT_STABLE(vp);
932 if (vp->v_usecount != 1) {
933 mutex_exit(vp->v_interlock);
934 return false;
935 }
936
937 /* If the vnode is already clean we're done. */
938 if (VSTATE_GET(vp) != VS_LOADED) {
939 VSTATE_ASSERT(vp, VS_RECLAIMED);
940 vrelel(vp, 0, LK_NONE);
941 return true;
942 }
943
944 /* Prevent further references until the vnode is locked. */
945 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
946 mutex_exit(vp->v_interlock);
947
948 /*
949 * On a leaf file system this lock will always succeed as we hold
950 * the last reference and prevent further references.
951 * On layered file systems waiting for the lock would open a can of
952 * deadlocks as the lower vnodes may have other active references.
953 */
954 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
955
956 mutex_enter(vp->v_interlock);
957 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
958
959 if (error) {
960 mutex_exit(vp->v_interlock);
961 return false;
962 }
963
964 KASSERT(vp->v_usecount == 1);
965 vcache_reclaim(vp);
966 vrelel(vp, 0, LK_NONE);
967
968 return true;
969 }
970
971 /*
972 * Helper for vrevoke() to propagate suspension from lastmp
973 * to thismp. Both args may be NULL.
974 * Returns the currently suspended file system or NULL.
975 */
976 static struct mount *
977 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
978 {
979 int error;
980
981 if (lastmp == thismp)
982 return thismp;
983
984 if (lastmp != NULL)
985 vfs_resume(lastmp);
986
987 if (thismp == NULL)
988 return NULL;
989
990 do {
991 error = vfs_suspend(thismp, 0);
992 } while (error == EINTR || error == ERESTART);
993
994 if (error == 0)
995 return thismp;
996
997 KASSERT(error == EOPNOTSUPP);
998 return NULL;
999 }
1000
1001 /*
1002 * Eliminate all activity associated with the requested vnode
1003 * and with all vnodes aliased to the requested vnode.
1004 */
1005 void
1006 vrevoke(vnode_t *vp)
1007 {
1008 struct mount *mp;
1009 vnode_t *vq;
1010 enum vtype type;
1011 dev_t dev;
1012
1013 KASSERT(vp->v_usecount > 0);
1014
1015 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1016
1017 mutex_enter(vp->v_interlock);
1018 VSTATE_WAIT_STABLE(vp);
1019 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1020 mutex_exit(vp->v_interlock);
1021 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1022 vp->v_usecount++;
1023 mutex_exit(vp->v_interlock);
1024 vgone(vp);
1025 } else {
1026 dev = vp->v_rdev;
1027 type = vp->v_type;
1028 mutex_exit(vp->v_interlock);
1029
1030 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1031 mp = vrevoke_suspend_next(mp, vq->v_mount);
1032 vgone(vq);
1033 }
1034 }
1035 vrevoke_suspend_next(mp, NULL);
1036 }
1037
1038 /*
1039 * Eliminate all activity associated with a vnode in preparation for
1040 * reuse. Drops a reference from the vnode.
1041 */
1042 void
1043 vgone(vnode_t *vp)
1044 {
1045 int lktype;
1046
1047 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1048
1049 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1050 lktype = LK_EXCLUSIVE;
1051 mutex_enter(vp->v_interlock);
1052 VSTATE_WAIT_STABLE(vp);
1053 if (VSTATE_GET(vp) == VS_LOADED) {
1054 vcache_reclaim(vp);
1055 lktype = LK_NONE;
1056 }
1057 VSTATE_ASSERT(vp, VS_RECLAIMED);
1058 vrelel(vp, 0, lktype);
1059 }
1060
1061 static inline uint32_t
1062 vcache_hash(const struct vcache_key *key)
1063 {
1064 uint32_t hash = HASH32_BUF_INIT;
1065
1066 KASSERT(key->vk_key_len > 0);
1067
1068 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1069 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1070 return hash;
1071 }
1072
1073 static void
1074 vcache_init(void)
1075 {
1076
1077 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1078 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1079 KASSERT(vcache_pool != NULL);
1080 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1081 cv_init(&vcache_cv, "vcache");
1082 vcache_hashsize = desiredvnodes;
1083 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1084 &vcache_hashmask);
1085 }
1086
1087 static void
1088 vcache_reinit(void)
1089 {
1090 int i;
1091 uint32_t hash;
1092 u_long oldmask, newmask;
1093 struct hashhead *oldtab, *newtab;
1094 vnode_impl_t *vip;
1095
1096 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1097 mutex_enter(&vcache_lock);
1098 oldtab = vcache_hashtab;
1099 oldmask = vcache_hashmask;
1100 vcache_hashsize = desiredvnodes;
1101 vcache_hashtab = newtab;
1102 vcache_hashmask = newmask;
1103 for (i = 0; i <= oldmask; i++) {
1104 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1105 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1106 hash = vcache_hash(&vip->vi_key);
1107 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1108 vip, vi_hash);
1109 }
1110 }
1111 mutex_exit(&vcache_lock);
1112 hashdone(oldtab, HASH_SLIST, oldmask);
1113 }
1114
1115 static inline vnode_impl_t *
1116 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1117 {
1118 struct hashhead *hashp;
1119 vnode_impl_t *vip;
1120
1121 KASSERT(mutex_owned(&vcache_lock));
1122
1123 hashp = &vcache_hashtab[hash & vcache_hashmask];
1124 SLIST_FOREACH(vip, hashp, vi_hash) {
1125 if (key->vk_mount != vip->vi_key.vk_mount)
1126 continue;
1127 if (key->vk_key_len != vip->vi_key.vk_key_len)
1128 continue;
1129 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1130 continue;
1131 return vip;
1132 }
1133 return NULL;
1134 }
1135
1136 /*
1137 * Allocate a new, uninitialized vcache node.
1138 */
1139 static vnode_impl_t *
1140 vcache_alloc(void)
1141 {
1142 vnode_impl_t *vip;
1143 vnode_t *vp;
1144
1145 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1146 memset(vip, 0, sizeof(*vip));
1147
1148 vip->vi_lock = rw_obj_alloc();
1149 /* SLIST_INIT(&vip->vi_hash); */
1150 TAILQ_INIT(&vip->vi_nclist);
1151 /* LIST_INIT(&vip->vi_dnclist); */
1152
1153 vp = VIMPL_TO_VNODE(vip);
1154 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1155 cv_init(&vp->v_cv, "vnode");
1156
1157 vp->v_usecount = 1;
1158 vp->v_type = VNON;
1159 vp->v_size = vp->v_writesize = VSIZENOTSET;
1160
1161 vip->vi_state = VS_LOADING;
1162
1163 lru_requeue(vp, &lru_list[LRU_FREE]);
1164
1165 return vip;
1166 }
1167
1168 /*
1169 * Deallocate a vcache node in state VS_LOADING.
1170 *
1171 * vcache_lock held on entry and released on return.
1172 */
1173 static void
1174 vcache_dealloc(vnode_impl_t *vip)
1175 {
1176 vnode_t *vp;
1177
1178 KASSERT(mutex_owned(&vcache_lock));
1179
1180 vp = VIMPL_TO_VNODE(vip);
1181 vfs_ref(dead_rootmount);
1182 vfs_insmntque(vp, dead_rootmount);
1183 mutex_enter(vp->v_interlock);
1184 vp->v_op = dead_vnodeop_p;
1185 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1186 mutex_exit(&vcache_lock);
1187 vrelel(vp, 0, LK_NONE);
1188 }
1189
1190 /*
1191 * Free an unused, unreferenced vcache node.
1192 * v_interlock locked on entry.
1193 */
1194 static void
1195 vcache_free(vnode_impl_t *vip)
1196 {
1197 vnode_t *vp;
1198
1199 vp = VIMPL_TO_VNODE(vip);
1200 KASSERT(mutex_owned(vp->v_interlock));
1201
1202 KASSERT(vp->v_usecount == 0);
1203 KASSERT(vp->v_holdcnt == 0);
1204 KASSERT(vp->v_writecount == 0);
1205 lru_requeue(vp, NULL);
1206 mutex_exit(vp->v_interlock);
1207
1208 vfs_insmntque(vp, NULL);
1209 if (vp->v_type == VBLK || vp->v_type == VCHR)
1210 spec_node_destroy(vp);
1211
1212 rw_obj_free(vip->vi_lock);
1213 uvm_obj_destroy(&vp->v_uobj, true);
1214 cv_destroy(&vp->v_cv);
1215 pool_cache_put(vcache_pool, vip);
1216 }
1217
1218 /*
1219 * Try to get an initial reference on this cached vnode.
1220 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1221 * EBUSY if the vnode state is unstable.
1222 *
1223 * v_interlock locked on entry and unlocked on exit.
1224 */
1225 int
1226 vcache_tryvget(vnode_t *vp)
1227 {
1228 int error = 0;
1229
1230 KASSERT(mutex_owned(vp->v_interlock));
1231
1232 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1233 error = ENOENT;
1234 else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1235 error = EBUSY;
1236 else
1237 vp->v_usecount++;
1238
1239 mutex_exit(vp->v_interlock);
1240
1241 return error;
1242 }
1243
1244 /*
1245 * Try to get an initial reference on this cached vnode.
1246 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1247 * Will wait for the vnode state to be stable.
1248 *
1249 * v_interlock locked on entry and unlocked on exit.
1250 */
1251 int
1252 vcache_vget(vnode_t *vp)
1253 {
1254
1255 KASSERT(mutex_owned(vp->v_interlock));
1256
1257 /* Increment hold count to prevent vnode from disappearing. */
1258 vp->v_holdcnt++;
1259 VSTATE_WAIT_STABLE(vp);
1260 vp->v_holdcnt--;
1261
1262 /* If this was the last reference to a reclaimed vnode free it now. */
1263 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1264 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1265 vcache_free(VNODE_TO_VIMPL(vp));
1266 else
1267 mutex_exit(vp->v_interlock);
1268 return ENOENT;
1269 }
1270 VSTATE_ASSERT(vp, VS_LOADED);
1271 vp->v_usecount++;
1272 mutex_exit(vp->v_interlock);
1273
1274 return 0;
1275 }
1276
1277 /*
1278 * Get a vnode / fs node pair by key and return it referenced through vpp.
1279 */
1280 int
1281 vcache_get(struct mount *mp, const void *key, size_t key_len,
1282 struct vnode **vpp)
1283 {
1284 int error;
1285 uint32_t hash;
1286 const void *new_key;
1287 struct vnode *vp;
1288 struct vcache_key vcache_key;
1289 vnode_impl_t *vip, *new_vip;
1290
1291 new_key = NULL;
1292 *vpp = NULL;
1293
1294 vcache_key.vk_mount = mp;
1295 vcache_key.vk_key = key;
1296 vcache_key.vk_key_len = key_len;
1297 hash = vcache_hash(&vcache_key);
1298
1299 again:
1300 mutex_enter(&vcache_lock);
1301 vip = vcache_hash_lookup(&vcache_key, hash);
1302
1303 /* If found, take a reference or retry. */
1304 if (__predict_true(vip != NULL)) {
1305 /*
1306 * If the vnode is loading we cannot take the v_interlock
1307 * here as it might change during load (see uvm_obj_setlock()).
1308 * As changing state from VS_LOADING requires both vcache_lock
1309 * and v_interlock it is safe to test with vcache_lock held.
1310 *
1311 * Wait for vnodes changing state from VS_LOADING and retry.
1312 */
1313 if (__predict_false(vip->vi_state == VS_LOADING)) {
1314 cv_wait(&vcache_cv, &vcache_lock);
1315 mutex_exit(&vcache_lock);
1316 goto again;
1317 }
1318 vp = VIMPL_TO_VNODE(vip);
1319 mutex_enter(vp->v_interlock);
1320 mutex_exit(&vcache_lock);
1321 error = vcache_vget(vp);
1322 if (error == ENOENT)
1323 goto again;
1324 if (error == 0)
1325 *vpp = vp;
1326 KASSERT((error != 0) == (*vpp == NULL));
1327 return error;
1328 }
1329 mutex_exit(&vcache_lock);
1330
1331 /* Allocate and initialize a new vcache / vnode pair. */
1332 error = vfs_busy(mp);
1333 if (error)
1334 return error;
1335 new_vip = vcache_alloc();
1336 new_vip->vi_key = vcache_key;
1337 vp = VIMPL_TO_VNODE(new_vip);
1338 mutex_enter(&vcache_lock);
1339 vip = vcache_hash_lookup(&vcache_key, hash);
1340 if (vip == NULL) {
1341 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1342 new_vip, vi_hash);
1343 vip = new_vip;
1344 }
1345
1346 /* If another thread beat us inserting this node, retry. */
1347 if (vip != new_vip) {
1348 vcache_dealloc(new_vip);
1349 vfs_unbusy(mp);
1350 goto again;
1351 }
1352 mutex_exit(&vcache_lock);
1353
1354 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1355 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1356 if (error) {
1357 mutex_enter(&vcache_lock);
1358 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1359 new_vip, vnode_impl, vi_hash);
1360 vcache_dealloc(new_vip);
1361 vfs_unbusy(mp);
1362 KASSERT(*vpp == NULL);
1363 return error;
1364 }
1365 KASSERT(new_key != NULL);
1366 KASSERT(memcmp(key, new_key, key_len) == 0);
1367 KASSERT(vp->v_op != NULL);
1368 vfs_insmntque(vp, mp);
1369 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1370 vp->v_vflag |= VV_MPSAFE;
1371 vfs_ref(mp);
1372 vfs_unbusy(mp);
1373
1374 /* Finished loading, finalize node. */
1375 mutex_enter(&vcache_lock);
1376 new_vip->vi_key.vk_key = new_key;
1377 mutex_enter(vp->v_interlock);
1378 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1379 mutex_exit(vp->v_interlock);
1380 mutex_exit(&vcache_lock);
1381 *vpp = vp;
1382 return 0;
1383 }
1384
1385 /*
1386 * Create a new vnode / fs node pair and return it referenced through vpp.
1387 */
1388 int
1389 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1390 kauth_cred_t cred, void *extra, struct vnode **vpp)
1391 {
1392 int error;
1393 uint32_t hash;
1394 struct vnode *vp, *ovp;
1395 vnode_impl_t *vip, *ovip;
1396
1397 *vpp = NULL;
1398
1399 /* Allocate and initialize a new vcache / vnode pair. */
1400 error = vfs_busy(mp);
1401 if (error)
1402 return error;
1403 vip = vcache_alloc();
1404 vip->vi_key.vk_mount = mp;
1405 vp = VIMPL_TO_VNODE(vip);
1406
1407 /* Create and load the fs node. */
1408 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1409 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1410 if (error) {
1411 mutex_enter(&vcache_lock);
1412 vcache_dealloc(vip);
1413 vfs_unbusy(mp);
1414 KASSERT(*vpp == NULL);
1415 return error;
1416 }
1417 KASSERT(vp->v_op != NULL);
1418 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1419 if (vip->vi_key.vk_key_len > 0) {
1420 KASSERT(vip->vi_key.vk_key != NULL);
1421 hash = vcache_hash(&vip->vi_key);
1422
1423 /*
1424 * Wait for previous instance to be reclaimed,
1425 * then insert new node.
1426 */
1427 mutex_enter(&vcache_lock);
1428 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1429 ovp = VIMPL_TO_VNODE(ovip);
1430 mutex_enter(ovp->v_interlock);
1431 mutex_exit(&vcache_lock);
1432 error = vcache_vget(ovp);
1433 KASSERT(error == ENOENT);
1434 mutex_enter(&vcache_lock);
1435 }
1436 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1437 vip, vi_hash);
1438 mutex_exit(&vcache_lock);
1439 }
1440 vfs_insmntque(vp, mp);
1441 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1442 vp->v_vflag |= VV_MPSAFE;
1443 vfs_ref(mp);
1444 vfs_unbusy(mp);
1445
1446 /* Finished loading, finalize node. */
1447 mutex_enter(&vcache_lock);
1448 mutex_enter(vp->v_interlock);
1449 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1450 mutex_exit(&vcache_lock);
1451 mutex_exit(vp->v_interlock);
1452 *vpp = vp;
1453 return 0;
1454 }
1455
1456 /*
1457 * Prepare key change: update old cache nodes key and lock new cache node.
1458 * Return an error if the new node already exists.
1459 */
1460 int
1461 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1462 const void *old_key, size_t old_key_len,
1463 const void *new_key, size_t new_key_len)
1464 {
1465 uint32_t old_hash, new_hash;
1466 struct vcache_key old_vcache_key, new_vcache_key;
1467 vnode_impl_t *vip, *new_vip;
1468
1469 old_vcache_key.vk_mount = mp;
1470 old_vcache_key.vk_key = old_key;
1471 old_vcache_key.vk_key_len = old_key_len;
1472 old_hash = vcache_hash(&old_vcache_key);
1473
1474 new_vcache_key.vk_mount = mp;
1475 new_vcache_key.vk_key = new_key;
1476 new_vcache_key.vk_key_len = new_key_len;
1477 new_hash = vcache_hash(&new_vcache_key);
1478
1479 new_vip = vcache_alloc();
1480 new_vip->vi_key = new_vcache_key;
1481
1482 /* Insert locked new node used as placeholder. */
1483 mutex_enter(&vcache_lock);
1484 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1485 if (vip != NULL) {
1486 vcache_dealloc(new_vip);
1487 return EEXIST;
1488 }
1489 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1490 new_vip, vi_hash);
1491
1492 /* Replace old nodes key with the temporary copy. */
1493 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1494 KASSERT(vip != NULL);
1495 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1496 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1497 vip->vi_key = old_vcache_key;
1498 mutex_exit(&vcache_lock);
1499 return 0;
1500 }
1501
1502 /*
1503 * Key change complete: update old node and remove placeholder.
1504 */
1505 void
1506 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1507 const void *old_key, size_t old_key_len,
1508 const void *new_key, size_t new_key_len)
1509 {
1510 uint32_t old_hash, new_hash;
1511 struct vcache_key old_vcache_key, new_vcache_key;
1512 vnode_impl_t *vip, *new_vip;
1513 struct vnode *new_vp;
1514
1515 old_vcache_key.vk_mount = mp;
1516 old_vcache_key.vk_key = old_key;
1517 old_vcache_key.vk_key_len = old_key_len;
1518 old_hash = vcache_hash(&old_vcache_key);
1519
1520 new_vcache_key.vk_mount = mp;
1521 new_vcache_key.vk_key = new_key;
1522 new_vcache_key.vk_key_len = new_key_len;
1523 new_hash = vcache_hash(&new_vcache_key);
1524
1525 mutex_enter(&vcache_lock);
1526
1527 /* Lookup old and new node. */
1528 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1529 KASSERT(vip != NULL);
1530 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1531
1532 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1533 KASSERT(new_vip != NULL);
1534 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1535 new_vp = VIMPL_TO_VNODE(new_vip);
1536 mutex_enter(new_vp->v_interlock);
1537 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1538 mutex_exit(new_vp->v_interlock);
1539
1540 /* Rekey old node and put it onto its new hashlist. */
1541 vip->vi_key = new_vcache_key;
1542 if (old_hash != new_hash) {
1543 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1544 vip, vnode_impl, vi_hash);
1545 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1546 vip, vi_hash);
1547 }
1548
1549 /* Remove new node used as placeholder. */
1550 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1551 new_vip, vnode_impl, vi_hash);
1552 vcache_dealloc(new_vip);
1553 }
1554
1555 /*
1556 * Disassociate the underlying file system from a vnode.
1557 *
1558 * Must be called with vnode locked and will return unlocked.
1559 * Must be called with the interlock held, and will return with it held.
1560 */
1561 static void
1562 vcache_reclaim(vnode_t *vp)
1563 {
1564 lwp_t *l = curlwp;
1565 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1566 struct mount *mp = vp->v_mount;
1567 uint32_t hash;
1568 uint8_t temp_buf[64], *temp_key;
1569 size_t temp_key_len;
1570 bool recycle, active;
1571 int error;
1572
1573 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1574 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1575 KASSERT(mutex_owned(vp->v_interlock));
1576 KASSERT(vp->v_usecount != 0);
1577
1578 active = (vp->v_usecount > 1);
1579 temp_key_len = vip->vi_key.vk_key_len;
1580 /*
1581 * Prevent the vnode from being recycled or brought into use
1582 * while we clean it out.
1583 */
1584 VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1585 if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
1586 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1587 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
1588 }
1589 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1590 mutex_exit(vp->v_interlock);
1591
1592 /* Replace the vnode key with a temporary copy. */
1593 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1594 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1595 } else {
1596 temp_key = temp_buf;
1597 }
1598 if (vip->vi_key.vk_key_len > 0) {
1599 mutex_enter(&vcache_lock);
1600 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1601 vip->vi_key.vk_key = temp_key;
1602 mutex_exit(&vcache_lock);
1603 }
1604
1605 fstrans_start(mp);
1606
1607 /*
1608 * Clean out any cached data associated with the vnode.
1609 * If purging an active vnode, it must be closed and
1610 * deactivated before being reclaimed.
1611 */
1612 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1613 if (error != 0) {
1614 if (wapbl_vphaswapbl(vp))
1615 WAPBL_DISCARD(wapbl_vptomp(vp));
1616 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1617 }
1618 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1619 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1620 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1621 spec_node_revoke(vp);
1622 }
1623
1624 /*
1625 * Disassociate the underlying file system from the vnode.
1626 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1627 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1628 * would no longer function.
1629 */
1630 VOP_INACTIVE(vp, &recycle);
1631 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1632 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1633 if (VOP_RECLAIM(vp)) {
1634 vnpanic(vp, "%s: cannot reclaim", __func__);
1635 }
1636
1637 KASSERT(vp->v_data == NULL);
1638 KASSERT(vp->v_uobj.uo_npages == 0);
1639
1640 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1641 uvm_ra_freectx(vp->v_ractx);
1642 vp->v_ractx = NULL;
1643 }
1644
1645 /* Purge name cache. */
1646 cache_purge(vp);
1647
1648 if (vip->vi_key.vk_key_len > 0) {
1649 /* Remove from vnode cache. */
1650 hash = vcache_hash(&vip->vi_key);
1651 mutex_enter(&vcache_lock);
1652 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1653 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1654 vip, vnode_impl, vi_hash);
1655 mutex_exit(&vcache_lock);
1656 }
1657 if (temp_key != temp_buf)
1658 kmem_free(temp_key, temp_key_len);
1659
1660 /* Done with purge, notify sleepers of the grim news. */
1661 mutex_enter(vp->v_interlock);
1662 vp->v_op = dead_vnodeop_p;
1663 vp->v_vflag |= VV_LOCKSWORK;
1664 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1665 vp->v_tag = VT_NON;
1666 KNOTE(&vp->v_klist, NOTE_REVOKE);
1667 mutex_exit(vp->v_interlock);
1668
1669 /*
1670 * Move to dead mount. Must be after changing the operations
1671 * vector as vnode operations enter the mount before using the
1672 * operations vector. See sys/kern/vnode_if.c.
1673 */
1674 vp->v_vflag &= ~VV_ROOT;
1675 vfs_ref(dead_rootmount);
1676 vfs_insmntque(vp, dead_rootmount);
1677
1678 mutex_enter(vp->v_interlock);
1679 fstrans_done(mp);
1680 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1681
1682 #ifdef PAX_SEGVGUARD
1683 pax_segvguard_cleanup(vp);
1684 #endif /* PAX_SEGVGUARD */
1685 }
1686
1687 /*
1688 * Disassociate the underlying file system from an open device vnode
1689 * and make it anonymous.
1690 *
1691 * Vnode unlocked on entry, drops a reference to the vnode.
1692 */
1693 void
1694 vcache_make_anon(vnode_t *vp)
1695 {
1696 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1697 uint32_t hash;
1698 bool recycle;
1699
1700 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1701 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1702 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1703
1704 /* Remove from vnode cache. */
1705 hash = vcache_hash(&vip->vi_key);
1706 mutex_enter(&vcache_lock);
1707 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1708 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1709 vip, vnode_impl, vi_hash);
1710 vip->vi_key.vk_mount = dead_rootmount;
1711 vip->vi_key.vk_key_len = 0;
1712 vip->vi_key.vk_key = NULL;
1713 mutex_exit(&vcache_lock);
1714
1715 /*
1716 * Disassociate the underlying file system from the vnode.
1717 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1718 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1719 * would no longer function.
1720 */
1721 if (vn_lock(vp, LK_EXCLUSIVE)) {
1722 vnpanic(vp, "%s: cannot lock", __func__);
1723 }
1724 VOP_INACTIVE(vp, &recycle);
1725 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1726 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1727 if (VOP_RECLAIM(vp)) {
1728 vnpanic(vp, "%s: cannot reclaim", __func__);
1729 }
1730
1731 /* Purge name cache. */
1732 cache_purge(vp);
1733
1734 /* Done with purge, change operations vector. */
1735 mutex_enter(vp->v_interlock);
1736 vp->v_op = spec_vnodeop_p;
1737 vp->v_vflag |= VV_MPSAFE;
1738 vp->v_vflag &= ~VV_LOCKSWORK;
1739 mutex_exit(vp->v_interlock);
1740
1741 /*
1742 * Move to dead mount. Must be after changing the operations
1743 * vector as vnode operations enter the mount before using the
1744 * operations vector. See sys/kern/vnode_if.c.
1745 */
1746 vfs_ref(dead_rootmount);
1747 vfs_insmntque(vp, dead_rootmount);
1748
1749 vrele(vp);
1750 }
1751
1752 /*
1753 * Update outstanding I/O count and do wakeup if requested.
1754 */
1755 void
1756 vwakeup(struct buf *bp)
1757 {
1758 vnode_t *vp;
1759
1760 if ((vp = bp->b_vp) == NULL)
1761 return;
1762
1763 KASSERT(bp->b_objlock == vp->v_interlock);
1764 KASSERT(mutex_owned(bp->b_objlock));
1765
1766 if (--vp->v_numoutput < 0)
1767 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1768 if (vp->v_numoutput == 0)
1769 cv_broadcast(&vp->v_cv);
1770 }
1771
1772 /*
1773 * Test a vnode for being or becoming dead. Returns one of:
1774 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1775 * ENOENT: vnode is dead.
1776 * 0: otherwise.
1777 *
1778 * Whenever this function returns a non-zero value all future
1779 * calls will also return a non-zero value.
1780 */
1781 int
1782 vdead_check(struct vnode *vp, int flags)
1783 {
1784
1785 KASSERT(mutex_owned(vp->v_interlock));
1786
1787 if (! ISSET(flags, VDEAD_NOWAIT))
1788 VSTATE_WAIT_STABLE(vp);
1789
1790 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1791 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1792 return EBUSY;
1793 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1794 return ENOENT;
1795 }
1796
1797 return 0;
1798 }
1799
1800 int
1801 vfs_drainvnodes(void)
1802 {
1803 int i, gen;
1804
1805 mutex_enter(&vdrain_lock);
1806 for (i = 0; i < 2; i++) {
1807 gen = vdrain_gen;
1808 while (gen == vdrain_gen) {
1809 cv_broadcast(&vdrain_cv);
1810 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1811 }
1812 }
1813 mutex_exit(&vdrain_lock);
1814
1815 if (numvnodes >= desiredvnodes)
1816 return EBUSY;
1817
1818 if (vcache_hashsize != desiredvnodes)
1819 vcache_reinit();
1820
1821 return 0;
1822 }
1823
1824 void
1825 vnpanic(vnode_t *vp, const char *fmt, ...)
1826 {
1827 va_list ap;
1828
1829 #ifdef DIAGNOSTIC
1830 vprint(NULL, vp);
1831 #endif
1832 va_start(ap, fmt);
1833 vpanic(fmt, ap);
1834 va_end(ap);
1835 }
1836