vfs_vnode.c revision 1.53.2.1 1 /* $NetBSD: vfs_vnode.c,v 1.53.2.1 2016/11/04 14:49:17 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vclean(9), which calls VOP_RECLAIM(9) to disassociate
93 * underlying file system from the vnode, and finally destroyed.
94 *
95 * Vnode state
96 *
97 * Vnode is always in one of six states:
98 * - MARKER This is a marker vnode to help list traversal. It
99 * will never change its state.
100 * - LOADING Vnode is associating underlying file system and not
101 * yet ready to use.
102 * - ACTIVE Vnode has associated underlying file system and is
103 * ready to use.
104 * - BLOCKED Vnode is active but cannot get new references.
105 * - RECLAIMING Vnode is disassociating from the underlying file
106 * system.
107 * - RECLAIMED Vnode has disassociated from underlying file system
108 * and is dead.
109 *
110 * Valid state changes are:
111 * LOADING -> ACTIVE
112 * Vnode has been initialised in vcache_get() or
113 * vcache_new() and is ready to use.
114 * ACTIVE -> RECLAIMING
115 * Vnode starts disassociation from underlying file
116 * system in vclean().
117 * RECLAIMING -> RECLAIMED
118 * Vnode finished disassociation from underlying file
119 * system in vclean().
120 * ACTIVE -> BLOCKED
121 * Either vcache_rekey*() is changing the vnode key or
122 * vrelel() is about to call VOP_INACTIVE().
123 * BLOCKED -> ACTIVE
124 * The block condition is over.
125 * LOADING -> RECLAIMED
126 * Either vcache_get() or vcache_new() failed to
127 * associate the underlying file system or vcache_rekey*()
128 * drops a vnode used as placeholder.
129 *
130 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
131 * and it is possible to wait for state change.
132 *
133 * State is protected with v_interlock with one exception:
134 * to change from LOADING both v_interlock and vcache.lock must be held
135 * so it is possible to check "state == LOADING" without holding
136 * v_interlock. See vcache_get() for details.
137 *
138 * Reference counting
139 *
140 * Vnode is considered active, if reference count (vnode_t::v_usecount)
141 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
142 * as vput(9), routines. Common points holding references are e.g.
143 * file openings, current working directory, mount points, etc.
144 *
145 * Note on v_usecount and its locking
146 *
147 * At nearly all points it is known that v_usecount could be zero,
148 * the vnode_t::v_interlock will be held. To change v_usecount away
149 * from zero, the interlock must be held. To change from a non-zero
150 * value to zero, again the interlock must be held.
151 *
152 * Changing the usecount from a non-zero value to a non-zero value can
153 * safely be done using atomic operations, without the interlock held.
154 *
155 */
156
157 #include <sys/cdefs.h>
158 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.53.2.1 2016/11/04 14:49:17 pgoyette Exp $");
159
160 #include <sys/param.h>
161 #include <sys/kernel.h>
162
163 #include <sys/atomic.h>
164 #include <sys/buf.h>
165 #include <sys/conf.h>
166 #include <sys/device.h>
167 #include <sys/hash.h>
168 #include <sys/kauth.h>
169 #include <sys/kmem.h>
170 #include <sys/kthread.h>
171 #include <sys/module.h>
172 #include <sys/mount.h>
173 #include <sys/namei.h>
174 #include <sys/syscallargs.h>
175 #include <sys/sysctl.h>
176 #include <sys/systm.h>
177 #include <sys/vnode_impl.h>
178 #include <sys/wapbl.h>
179 #include <sys/fstrans.h>
180
181 #include <uvm/uvm.h>
182 #include <uvm/uvm_readahead.h>
183
184 /* Flags to vrelel. */
185 #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
186
187 u_int numvnodes __cacheline_aligned;
188
189 /*
190 * There are two free lists: one is for vnodes which have no buffer/page
191 * references and one for those which do (i.e. v_holdcnt is non-zero).
192 * Vnode recycling mechanism first attempts to look into the former list.
193 */
194 static kmutex_t vnode_free_list_lock __cacheline_aligned;
195 static vnodelst_t vnode_free_list __cacheline_aligned;
196 static vnodelst_t vnode_hold_list __cacheline_aligned;
197 static kcondvar_t vdrain_cv __cacheline_aligned;
198
199 static vnodelst_t vrele_list __cacheline_aligned;
200 static kmutex_t vrele_lock __cacheline_aligned;
201 static kcondvar_t vrele_cv __cacheline_aligned;
202 static lwp_t * vrele_lwp __cacheline_aligned;
203 static int vrele_pending __cacheline_aligned;
204 static int vrele_gen __cacheline_aligned;
205
206 SLIST_HEAD(hashhead, vnode_impl);
207 static struct {
208 kmutex_t lock;
209 kcondvar_t cv;
210 u_long hashmask;
211 struct hashhead *hashtab;
212 pool_cache_t pool;
213 } vcache __cacheline_aligned;
214
215 static int cleanvnode(void);
216 static vnode_impl_t *vcache_alloc(void);
217 static void vcache_free(vnode_impl_t *);
218 static void vcache_init(void);
219 static void vcache_reinit(void);
220 static void vclean(vnode_t *);
221 static void vrelel(vnode_t *, int);
222 static void vdrain_thread(void *);
223 static void vrele_thread(void *);
224 static void vnpanic(vnode_t *, const char *, ...)
225 __printflike(2, 3);
226
227 /* Routines having to do with the management of the vnode table. */
228 extern struct mount *dead_rootmount;
229 extern int (**dead_vnodeop_p)(void *);
230 extern struct vfsops dead_vfsops;
231
232 /* Vnode state operations and diagnostics. */
233
234 #if defined(DIAGNOSTIC)
235
236 #define VSTATE_GET(vp) \
237 vstate_assert_get((vp), __func__, __LINE__)
238 #define VSTATE_CHANGE(vp, from, to) \
239 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
240 #define VSTATE_WAIT_STABLE(vp) \
241 vstate_assert_wait_stable((vp), __func__, __LINE__)
242 #define VSTATE_ASSERT(vp, state) \
243 vstate_assert((vp), (state), __func__, __LINE__)
244
245 static void
246 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
247 {
248 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
249
250 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
251
252 if (__predict_true(node->vi_state == state))
253 return;
254 vnpanic(vp, "state is %s, expected %s at %s:%d",
255 vstate_name(node->vi_state), vstate_name(state), func, line);
256 }
257
258 static enum vnode_state
259 vstate_assert_get(vnode_t *vp, const char *func, int line)
260 {
261 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
262
263 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
264 if (node->vi_state == VS_MARKER)
265 vnpanic(vp, "state is %s at %s:%d",
266 vstate_name(node->vi_state), func, line);
267
268 return node->vi_state;
269 }
270
271 static void
272 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
273 {
274 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
275
276 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
277 if (node->vi_state == VS_MARKER)
278 vnpanic(vp, "state is %s at %s:%d",
279 vstate_name(node->vi_state), func, line);
280
281 while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
282 cv_wait(&vp->v_cv, vp->v_interlock);
283
284 if (node->vi_state == VS_MARKER)
285 vnpanic(vp, "state is %s at %s:%d",
286 vstate_name(node->vi_state), func, line);
287 }
288
289 static void
290 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
291 const char *func, int line)
292 {
293 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
294
295 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
296 if (from == VS_LOADING)
297 KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
298
299 if (from == VS_MARKER)
300 vnpanic(vp, "from is %s at %s:%d",
301 vstate_name(from), func, line);
302 if (to == VS_MARKER)
303 vnpanic(vp, "to is %s at %s:%d",
304 vstate_name(to), func, line);
305 if (node->vi_state != from)
306 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
307 vstate_name(node->vi_state), vstate_name(from), func, line);
308
309 node->vi_state = to;
310 if (from == VS_LOADING)
311 cv_broadcast(&vcache.cv);
312 if (to == VS_ACTIVE || to == VS_RECLAIMED)
313 cv_broadcast(&vp->v_cv);
314 }
315
316 #else /* defined(DIAGNOSTIC) */
317
318 #define VSTATE_GET(vp) \
319 (VNODE_TO_VIMPL((vp))->vi_state)
320 #define VSTATE_CHANGE(vp, from, to) \
321 vstate_change((vp), (from), (to))
322 #define VSTATE_WAIT_STABLE(vp) \
323 vstate_wait_stable((vp))
324 #define VSTATE_ASSERT(vp, state)
325
326 static void
327 vstate_wait_stable(vnode_t *vp)
328 {
329 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
330
331 while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
332 cv_wait(&vp->v_cv, vp->v_interlock);
333 }
334
335 static void
336 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
337 {
338 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
339
340 node->vi_state = to;
341 if (from == VS_LOADING)
342 cv_broadcast(&vcache.cv);
343 if (to == VS_ACTIVE || to == VS_RECLAIMED)
344 cv_broadcast(&vp->v_cv);
345 }
346
347 #endif /* defined(DIAGNOSTIC) */
348
349 void
350 vfs_vnode_sysinit(void)
351 {
352 int error __diagused;
353
354 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
355 KASSERT(dead_rootmount != NULL);
356 dead_rootmount->mnt_iflag = IMNT_MPSAFE;
357
358 mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
359 TAILQ_INIT(&vnode_free_list);
360 TAILQ_INIT(&vnode_hold_list);
361 TAILQ_INIT(&vrele_list);
362
363 vcache_init();
364
365 mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
366 cv_init(&vdrain_cv, "vdrain");
367 cv_init(&vrele_cv, "vrele");
368 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
369 NULL, NULL, "vdrain");
370 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
371 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
372 NULL, &vrele_lwp, "vrele");
373 KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
374 }
375
376 /*
377 * Allocate a new marker vnode.
378 */
379 vnode_t *
380 vnalloc_marker(struct mount *mp)
381 {
382 vnode_impl_t *node;
383 vnode_t *vp;
384
385 node = pool_cache_get(vcache.pool, PR_WAITOK);
386 memset(node, 0, sizeof(*node));
387 vp = VIMPL_TO_VNODE(node);
388 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
389 vp->v_mount = mp;
390 vp->v_type = VBAD;
391 node->vi_state = VS_MARKER;
392
393 return vp;
394 }
395
396 /*
397 * Free a marker vnode.
398 */
399 void
400 vnfree_marker(vnode_t *vp)
401 {
402 vnode_impl_t *node;
403
404 node = VNODE_TO_VIMPL(vp);
405 KASSERT(node->vi_state == VS_MARKER);
406 uvm_obj_destroy(&vp->v_uobj, true);
407 pool_cache_put(vcache.pool, node);
408 }
409
410 /*
411 * Test a vnode for being a marker vnode.
412 */
413 bool
414 vnis_marker(vnode_t *vp)
415 {
416
417 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
418 }
419
420 /*
421 * cleanvnode: grab a vnode from freelist, clean and free it.
422 *
423 * => Releases vnode_free_list_lock.
424 */
425 static int
426 cleanvnode(void)
427 {
428 vnode_t *vp;
429 vnodelst_t *listhd;
430 struct mount *mp;
431
432 KASSERT(mutex_owned(&vnode_free_list_lock));
433
434 listhd = &vnode_free_list;
435 try_nextlist:
436 TAILQ_FOREACH(vp, listhd, v_freelist) {
437 /*
438 * It's safe to test v_usecount and v_iflag
439 * without holding the interlock here, since
440 * these vnodes should never appear on the
441 * lists.
442 */
443 KASSERT(vp->v_usecount == 0);
444 KASSERT(vp->v_freelisthd == listhd);
445
446 if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
447 continue;
448 if (!mutex_tryenter(vp->v_interlock)) {
449 VOP_UNLOCK(vp);
450 continue;
451 }
452 mp = vp->v_mount;
453 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
454 mutex_exit(vp->v_interlock);
455 VOP_UNLOCK(vp);
456 continue;
457 }
458 break;
459 }
460
461 if (vp == NULL) {
462 if (listhd == &vnode_free_list) {
463 listhd = &vnode_hold_list;
464 goto try_nextlist;
465 }
466 mutex_exit(&vnode_free_list_lock);
467 return EBUSY;
468 }
469
470 /* Remove it from the freelist. */
471 TAILQ_REMOVE(listhd, vp, v_freelist);
472 vp->v_freelisthd = NULL;
473 mutex_exit(&vnode_free_list_lock);
474
475 KASSERT(vp->v_usecount == 0);
476
477 /*
478 * The vnode is still associated with a file system, so we must
479 * clean it out before freeing it. We need to add a reference
480 * before doing this.
481 */
482 vp->v_usecount = 1;
483 vclean(vp);
484 vrelel(vp, 0);
485 fstrans_done(mp);
486
487 return 0;
488 }
489
490 /*
491 * Helper thread to keep the number of vnodes below desiredvnodes.
492 */
493 static void
494 vdrain_thread(void *cookie)
495 {
496 int error;
497
498 mutex_enter(&vnode_free_list_lock);
499
500 for (;;) {
501 cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
502 while (numvnodes > desiredvnodes) {
503 error = cleanvnode();
504 if (error)
505 kpause("vndsbusy", false, hz, NULL);
506 mutex_enter(&vnode_free_list_lock);
507 if (error)
508 break;
509 }
510 }
511 }
512
513 /*
514 * Remove a vnode from its freelist.
515 */
516 void
517 vremfree(vnode_t *vp)
518 {
519
520 KASSERT(mutex_owned(vp->v_interlock));
521 KASSERT(vp->v_usecount == 0);
522
523 /*
524 * Note that the reference count must not change until
525 * the vnode is removed.
526 */
527 mutex_enter(&vnode_free_list_lock);
528 if (vp->v_holdcnt > 0) {
529 KASSERT(vp->v_freelisthd == &vnode_hold_list);
530 } else {
531 KASSERT(vp->v_freelisthd == &vnode_free_list);
532 }
533 TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
534 vp->v_freelisthd = NULL;
535 mutex_exit(&vnode_free_list_lock);
536 }
537
538 /*
539 * vget: get a particular vnode from the free list, increment its reference
540 * count and return it.
541 *
542 * => Must be called with v_interlock held.
543 *
544 * If state is VS_RECLAIMING, the vnode may be eliminated in vcache_reclaim().
545 * In that case, we cannot grab the vnode, so the process is awakened when
546 * the transition is completed, and an error returned to indicate that the
547 * vnode is no longer usable.
548 *
549 * If state is VS_LOADING or VS_BLOCKED, wait until the vnode enters a
550 * stable state (VS_ACTIVE or VS_RECLAIMED).
551 */
552 int
553 vget(vnode_t *vp, int flags, bool waitok)
554 {
555
556 KASSERT(mutex_owned(vp->v_interlock));
557 KASSERT((flags & ~LK_NOWAIT) == 0);
558 KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
559
560 /*
561 * Before adding a reference, we must remove the vnode
562 * from its freelist.
563 */
564 if (vp->v_usecount == 0) {
565 vremfree(vp);
566 vp->v_usecount = 1;
567 } else {
568 atomic_inc_uint(&vp->v_usecount);
569 }
570
571 /*
572 * If the vnode is in the process of changing state we wait
573 * for the change to complete and take care not to return
574 * a clean vnode.
575 */
576 if (! ISSET(flags, LK_NOWAIT))
577 VSTATE_WAIT_STABLE(vp);
578 if (VSTATE_GET(vp) == VS_RECLAIMED) {
579 vrelel(vp, 0);
580 return ENOENT;
581 } else if (VSTATE_GET(vp) != VS_ACTIVE) {
582 KASSERT(ISSET(flags, LK_NOWAIT));
583 vrelel(vp, 0);
584 return EBUSY;
585 }
586
587 /*
588 * Ok, we got it in good shape.
589 */
590 VSTATE_ASSERT(vp, VS_ACTIVE);
591 mutex_exit(vp->v_interlock);
592
593 return 0;
594 }
595
596 /*
597 * vput: unlock and release the reference.
598 */
599 void
600 vput(vnode_t *vp)
601 {
602
603 VOP_UNLOCK(vp);
604 vrele(vp);
605 }
606
607 /*
608 * Try to drop reference on a vnode. Abort if we are releasing the
609 * last reference. Note: this _must_ succeed if not the last reference.
610 */
611 static inline bool
612 vtryrele(vnode_t *vp)
613 {
614 u_int use, next;
615
616 for (use = vp->v_usecount;; use = next) {
617 if (use == 1) {
618 return false;
619 }
620 KASSERT(use > 1);
621 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
622 if (__predict_true(next == use)) {
623 return true;
624 }
625 }
626 }
627
628 /*
629 * Vnode release. If reference count drops to zero, call inactive
630 * routine and either return to freelist or free to the pool.
631 */
632 static void
633 vrelel(vnode_t *vp, int flags)
634 {
635 bool recycle, defer;
636 int error;
637
638 KASSERT(mutex_owned(vp->v_interlock));
639 KASSERT(vp->v_freelisthd == NULL);
640
641 if (__predict_false(vp->v_op == dead_vnodeop_p &&
642 VSTATE_GET(vp) != VS_RECLAIMED)) {
643 vnpanic(vp, "dead but not clean");
644 }
645
646 /*
647 * If not the last reference, just drop the reference count
648 * and unlock.
649 */
650 if (vtryrele(vp)) {
651 mutex_exit(vp->v_interlock);
652 return;
653 }
654 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
655 vnpanic(vp, "%s: bad ref count", __func__);
656 }
657
658 #ifdef DIAGNOSTIC
659 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
660 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
661 vprint("vrelel: missing VOP_CLOSE()", vp);
662 }
663 #endif
664
665 /*
666 * If not clean, deactivate the vnode, but preserve
667 * our reference across the call to VOP_INACTIVE().
668 */
669 if (VSTATE_GET(vp) != VS_RECLAIMED) {
670 recycle = false;
671
672 /*
673 * XXX This ugly block can be largely eliminated if
674 * locking is pushed down into the file systems.
675 *
676 * Defer vnode release to vrele_thread if caller
677 * requests it explicitly or is the pagedaemon.
678 */
679 if ((curlwp == uvm.pagedaemon_lwp) ||
680 (flags & VRELEL_ASYNC_RELE) != 0) {
681 defer = true;
682 } else if (curlwp == vrele_lwp) {
683 /*
684 * We have to try harder.
685 */
686 mutex_exit(vp->v_interlock);
687 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
688 KASSERTMSG((error == 0), "vn_lock failed: %d", error);
689 mutex_enter(vp->v_interlock);
690 defer = false;
691 } else {
692 /* If we can't acquire the lock, then defer. */
693 mutex_exit(vp->v_interlock);
694 error = vn_lock(vp,
695 LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
696 defer = (error != 0);
697 mutex_enter(vp->v_interlock);
698 }
699
700 KASSERT(mutex_owned(vp->v_interlock));
701 KASSERT(! (curlwp == vrele_lwp && defer));
702
703 if (defer) {
704 /*
705 * Defer reclaim to the kthread; it's not safe to
706 * clean it here. We donate it our last reference.
707 */
708 mutex_enter(&vrele_lock);
709 TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
710 if (++vrele_pending > (desiredvnodes >> 8))
711 cv_signal(&vrele_cv);
712 mutex_exit(&vrele_lock);
713 mutex_exit(vp->v_interlock);
714 return;
715 }
716
717 /*
718 * If the node got another reference while we
719 * released the interlock, don't try to inactivate it yet.
720 */
721 if (__predict_false(vtryrele(vp))) {
722 VOP_UNLOCK(vp);
723 mutex_exit(vp->v_interlock);
724 return;
725 }
726 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
727 mutex_exit(vp->v_interlock);
728
729 /*
730 * The vnode must not gain another reference while being
731 * deactivated. If VOP_INACTIVE() indicates that
732 * the described file has been deleted, then recycle
733 * the vnode.
734 *
735 * Note that VOP_INACTIVE() will drop the vnode lock.
736 */
737 VOP_INACTIVE(vp, &recycle);
738 if (recycle) {
739 /* vclean() below will drop the lock. */
740 if (vn_lock(vp, LK_EXCLUSIVE) != 0)
741 recycle = false;
742 }
743 mutex_enter(vp->v_interlock);
744 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
745 if (!recycle) {
746 if (vtryrele(vp)) {
747 mutex_exit(vp->v_interlock);
748 return;
749 }
750 }
751
752 /* Take care of space accounting. */
753 if (vp->v_iflag & VI_EXECMAP) {
754 atomic_add_int(&uvmexp.execpages,
755 -vp->v_uobj.uo_npages);
756 atomic_add_int(&uvmexp.filepages,
757 vp->v_uobj.uo_npages);
758 }
759 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
760 vp->v_vflag &= ~VV_MAPPED;
761
762 /*
763 * Recycle the vnode if the file is now unused (unlinked),
764 * otherwise just free it.
765 */
766 if (recycle) {
767 VSTATE_ASSERT(vp, VS_ACTIVE);
768 vcache_reclaim(vp);
769 }
770 KASSERT(vp->v_usecount > 0);
771 }
772
773 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
774 /* Gained another reference while being reclaimed. */
775 mutex_exit(vp->v_interlock);
776 return;
777 }
778
779 if (VSTATE_GET(vp) == VS_RECLAIMED) {
780 /*
781 * It's clean so destroy it. It isn't referenced
782 * anywhere since it has been reclaimed.
783 */
784 KASSERT(vp->v_holdcnt == 0);
785 KASSERT(vp->v_writecount == 0);
786 mutex_exit(vp->v_interlock);
787 vfs_insmntque(vp, NULL);
788 if (vp->v_type == VBLK || vp->v_type == VCHR) {
789 spec_node_destroy(vp);
790 }
791 vcache_free(VNODE_TO_VIMPL(vp));
792 } else {
793 /*
794 * Otherwise, put it back onto the freelist. It
795 * can't be destroyed while still associated with
796 * a file system.
797 */
798 mutex_enter(&vnode_free_list_lock);
799 if (vp->v_holdcnt > 0) {
800 vp->v_freelisthd = &vnode_hold_list;
801 } else {
802 vp->v_freelisthd = &vnode_free_list;
803 }
804 TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
805 mutex_exit(&vnode_free_list_lock);
806 mutex_exit(vp->v_interlock);
807 }
808 }
809
810 void
811 vrele(vnode_t *vp)
812 {
813
814 if (vtryrele(vp)) {
815 return;
816 }
817 mutex_enter(vp->v_interlock);
818 vrelel(vp, 0);
819 }
820
821 /*
822 * Asynchronous vnode release, vnode is released in different context.
823 */
824 void
825 vrele_async(vnode_t *vp)
826 {
827
828 if (vtryrele(vp)) {
829 return;
830 }
831 mutex_enter(vp->v_interlock);
832 vrelel(vp, VRELEL_ASYNC_RELE);
833 }
834
835 static void
836 vrele_thread(void *cookie)
837 {
838 vnodelst_t skip_list;
839 vnode_t *vp;
840 struct mount *mp;
841
842 TAILQ_INIT(&skip_list);
843
844 mutex_enter(&vrele_lock);
845 for (;;) {
846 while (TAILQ_EMPTY(&vrele_list)) {
847 vrele_gen++;
848 cv_broadcast(&vrele_cv);
849 cv_timedwait(&vrele_cv, &vrele_lock, hz);
850 TAILQ_CONCAT(&vrele_list, &skip_list, v_freelist);
851 }
852 vp = TAILQ_FIRST(&vrele_list);
853 mp = vp->v_mount;
854 TAILQ_REMOVE(&vrele_list, vp, v_freelist);
855 if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
856 TAILQ_INSERT_TAIL(&skip_list, vp, v_freelist);
857 continue;
858 }
859 vrele_pending--;
860 mutex_exit(&vrele_lock);
861
862 /*
863 * If not the last reference, then ignore the vnode
864 * and look for more work.
865 */
866 mutex_enter(vp->v_interlock);
867 vrelel(vp, 0);
868 fstrans_done(mp);
869 mutex_enter(&vrele_lock);
870 }
871 }
872
873 void
874 vrele_flush(void)
875 {
876 int gen;
877
878 mutex_enter(&vrele_lock);
879 gen = vrele_gen;
880 while (vrele_pending && gen == vrele_gen) {
881 cv_broadcast(&vrele_cv);
882 cv_wait(&vrele_cv, &vrele_lock);
883 }
884 mutex_exit(&vrele_lock);
885 }
886
887 /*
888 * Vnode reference, where a reference is already held by some other
889 * object (for example, a file structure).
890 */
891 void
892 vref(vnode_t *vp)
893 {
894
895 KASSERT(vp->v_usecount != 0);
896
897 atomic_inc_uint(&vp->v_usecount);
898 }
899
900 /*
901 * Page or buffer structure gets a reference.
902 * Called with v_interlock held.
903 */
904 void
905 vholdl(vnode_t *vp)
906 {
907
908 KASSERT(mutex_owned(vp->v_interlock));
909
910 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
911 mutex_enter(&vnode_free_list_lock);
912 KASSERT(vp->v_freelisthd == &vnode_free_list);
913 TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
914 vp->v_freelisthd = &vnode_hold_list;
915 TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
916 mutex_exit(&vnode_free_list_lock);
917 }
918 }
919
920 /*
921 * Page or buffer structure frees a reference.
922 * Called with v_interlock held.
923 */
924 void
925 holdrelel(vnode_t *vp)
926 {
927
928 KASSERT(mutex_owned(vp->v_interlock));
929
930 if (vp->v_holdcnt <= 0) {
931 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
932 }
933
934 vp->v_holdcnt--;
935 if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
936 mutex_enter(&vnode_free_list_lock);
937 KASSERT(vp->v_freelisthd == &vnode_hold_list);
938 TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
939 vp->v_freelisthd = &vnode_free_list;
940 TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
941 mutex_exit(&vnode_free_list_lock);
942 }
943 }
944
945 /*
946 * Disassociate the underlying file system from a vnode.
947 *
948 * Must be called with vnode locked and will return unlocked.
949 * Must be called with the interlock held, and will return with it held.
950 */
951 static void
952 vclean(vnode_t *vp)
953 {
954 lwp_t *l = curlwp;
955 bool recycle, active;
956 int error;
957
958 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
959 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
960 KASSERT(mutex_owned(vp->v_interlock));
961 KASSERT(vp->v_usecount != 0);
962
963 active = (vp->v_usecount > 1);
964 /*
965 * Prevent the vnode from being recycled or brought into use
966 * while we clean it out.
967 */
968 VSTATE_CHANGE(vp, VN_ACTIVE, VN_RECLAIMING);
969 if (vp->v_iflag & VI_EXECMAP) {
970 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
971 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
972 }
973 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
974 mutex_exit(vp->v_interlock);
975
976 /*
977 * Clean out any cached data associated with the vnode.
978 * If purging an active vnode, it must be closed and
979 * deactivated before being reclaimed. Note that the
980 * VOP_INACTIVE will unlock the vnode.
981 */
982 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
983 if (error != 0) {
984 if (wapbl_vphaswapbl(vp))
985 WAPBL_DISCARD(wapbl_vptomp(vp));
986 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
987 }
988 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
989 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
990 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
991 spec_node_revoke(vp);
992 }
993 if (active) {
994 VOP_INACTIVE(vp, &recycle);
995 } else {
996 /*
997 * Any other processes trying to obtain this lock must first
998 * wait for VN_RECLAIMED, then call the new lock operation.
999 */
1000 VOP_UNLOCK(vp);
1001 }
1002
1003 /* Disassociate the underlying file system from the vnode. */
1004 if (VOP_RECLAIM(vp)) {
1005 vnpanic(vp, "%s: cannot reclaim", __func__);
1006 }
1007
1008 KASSERT(vp->v_data == NULL);
1009 KASSERT(vp->v_uobj.uo_npages == 0);
1010
1011 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1012 uvm_ra_freectx(vp->v_ractx);
1013 vp->v_ractx = NULL;
1014 }
1015
1016 /* Purge name cache. */
1017 cache_purge(vp);
1018
1019 /* Move to dead mount. */
1020 vp->v_vflag &= ~VV_ROOT;
1021 atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1022 vfs_insmntque(vp, dead_rootmount);
1023
1024 /* Done with purge, notify sleepers of the grim news. */
1025 mutex_enter(vp->v_interlock);
1026 vp->v_op = dead_vnodeop_p;
1027 vp->v_vflag |= VV_LOCKSWORK;
1028 VSTATE_CHANGE(vp, VN_RECLAIMING, VN_RECLAIMED);
1029 vp->v_tag = VT_NON;
1030 KNOTE(&vp->v_klist, NOTE_REVOKE);
1031
1032 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1033 }
1034
1035 /*
1036 * Recycle an unused vnode if caller holds the last reference.
1037 */
1038 bool
1039 vrecycle(vnode_t *vp)
1040 {
1041
1042 if (vn_lock(vp, LK_EXCLUSIVE) != 0)
1043 return false;
1044
1045 mutex_enter(vp->v_interlock);
1046
1047 if (vp->v_usecount != 1) {
1048 mutex_exit(vp->v_interlock);
1049 VOP_UNLOCK(vp);
1050 return false;
1051 }
1052 vclean(vp);
1053 vrelel(vp, 0);
1054 return true;
1055 }
1056
1057 /*
1058 * Eliminate all activity associated with the requested vnode
1059 * and with all vnodes aliased to the requested vnode.
1060 */
1061 void
1062 vrevoke(vnode_t *vp)
1063 {
1064 vnode_t *vq;
1065 enum vtype type;
1066 dev_t dev;
1067
1068 KASSERT(vp->v_usecount > 0);
1069
1070 mutex_enter(vp->v_interlock);
1071 VSTATE_WAIT_STABLE(vp);
1072 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1073 mutex_exit(vp->v_interlock);
1074 return;
1075 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1076 atomic_inc_uint(&vp->v_usecount);
1077 mutex_exit(vp->v_interlock);
1078 vgone(vp);
1079 return;
1080 } else {
1081 dev = vp->v_rdev;
1082 type = vp->v_type;
1083 mutex_exit(vp->v_interlock);
1084 }
1085
1086 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1087 vgone(vq);
1088 }
1089 }
1090
1091 /*
1092 * Eliminate all activity associated with a vnode in preparation for
1093 * reuse. Drops a reference from the vnode.
1094 */
1095 void
1096 vgone(vnode_t *vp)
1097 {
1098
1099 if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
1100 VSTATE_ASSERT(vp, VS_RECLAIMED);
1101 vrele(vp);
1102 }
1103
1104 mutex_enter(vp->v_interlock);
1105 vclean(vp);
1106 vrelel(vp, 0);
1107 }
1108
1109 static inline uint32_t
1110 vcache_hash(const struct vcache_key *key)
1111 {
1112 uint32_t hash = HASH32_BUF_INIT;
1113
1114 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1115 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1116 return hash;
1117 }
1118
1119 static void
1120 vcache_init(void)
1121 {
1122
1123 vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1124 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1125 KASSERT(vcache.pool != NULL);
1126 mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1127 cv_init(&vcache.cv, "vcache");
1128 vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1129 &vcache.hashmask);
1130 }
1131
1132 static void
1133 vcache_reinit(void)
1134 {
1135 int i;
1136 uint32_t hash;
1137 u_long oldmask, newmask;
1138 struct hashhead *oldtab, *newtab;
1139 vnode_impl_t *node;
1140
1141 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1142 mutex_enter(&vcache.lock);
1143 oldtab = vcache.hashtab;
1144 oldmask = vcache.hashmask;
1145 vcache.hashtab = newtab;
1146 vcache.hashmask = newmask;
1147 for (i = 0; i <= oldmask; i++) {
1148 while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1149 SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
1150 hash = vcache_hash(&node->vi_key);
1151 SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1152 node, vi_hash);
1153 }
1154 }
1155 mutex_exit(&vcache.lock);
1156 hashdone(oldtab, HASH_SLIST, oldmask);
1157 }
1158
1159 static inline vnode_impl_t *
1160 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1161 {
1162 struct hashhead *hashp;
1163 vnode_impl_t *node;
1164
1165 KASSERT(mutex_owned(&vcache.lock));
1166
1167 hashp = &vcache.hashtab[hash & vcache.hashmask];
1168 SLIST_FOREACH(node, hashp, vi_hash) {
1169 if (key->vk_mount != node->vi_key.vk_mount)
1170 continue;
1171 if (key->vk_key_len != node->vi_key.vk_key_len)
1172 continue;
1173 if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
1174 continue;
1175 return node;
1176 }
1177 return NULL;
1178 }
1179
1180 /*
1181 * Allocate a new, uninitialized vcache node.
1182 */
1183 static vnode_impl_t *
1184 vcache_alloc(void)
1185 {
1186 vnode_impl_t *node;
1187 vnode_t *vp;
1188
1189 node = pool_cache_get(vcache.pool, PR_WAITOK);
1190 memset(node, 0, sizeof(*node));
1191
1192 /* SLIST_INIT(&node->vi_hash); */
1193
1194 vp = VIMPL_TO_VNODE(node);
1195 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1196 cv_init(&vp->v_cv, "vnode");
1197 /* LIST_INIT(&vp->v_nclist); */
1198 /* LIST_INIT(&vp->v_dnclist); */
1199
1200 mutex_enter(&vnode_free_list_lock);
1201 numvnodes++;
1202 if (numvnodes > desiredvnodes + desiredvnodes / 10)
1203 cv_signal(&vdrain_cv);
1204 mutex_exit(&vnode_free_list_lock);
1205
1206 rw_init(&vp->v_lock);
1207 vp->v_usecount = 1;
1208 vp->v_type = VNON;
1209 vp->v_size = vp->v_writesize = VSIZENOTSET;
1210
1211 node->vi_state = VS_LOADING;
1212
1213 return node;
1214 }
1215
1216 /*
1217 * Free an unused, unreferenced vcache node.
1218 */
1219 static void
1220 vcache_free(vnode_impl_t *node)
1221 {
1222 vnode_t *vp;
1223
1224 vp = VIMPL_TO_VNODE(node);
1225
1226 KASSERT(vp->v_usecount == 0);
1227
1228 rw_destroy(&vp->v_lock);
1229 mutex_enter(&vnode_free_list_lock);
1230 numvnodes--;
1231 mutex_exit(&vnode_free_list_lock);
1232
1233 uvm_obj_destroy(&vp->v_uobj, true);
1234 cv_destroy(&vp->v_cv);
1235 pool_cache_put(vcache.pool, node);
1236 }
1237
1238 /*
1239 * Get a vnode / fs node pair by key and return it referenced through vpp.
1240 */
1241 int
1242 vcache_get(struct mount *mp, const void *key, size_t key_len,
1243 struct vnode **vpp)
1244 {
1245 int error;
1246 uint32_t hash;
1247 const void *new_key;
1248 struct vnode *vp;
1249 struct vcache_key vcache_key;
1250 vnode_impl_t *node, *new_node;
1251
1252 new_key = NULL;
1253 *vpp = NULL;
1254
1255 vcache_key.vk_mount = mp;
1256 vcache_key.vk_key = key;
1257 vcache_key.vk_key_len = key_len;
1258 hash = vcache_hash(&vcache_key);
1259
1260 again:
1261 mutex_enter(&vcache.lock);
1262 node = vcache_hash_lookup(&vcache_key, hash);
1263
1264 /* If found, take a reference or retry. */
1265 if (__predict_true(node != NULL)) {
1266 /*
1267 * If the vnode is loading we cannot take the v_interlock
1268 * here as it might change during load (see uvm_obj_setlock()).
1269 * As changing state from VS_LOADING requires both vcache.lock
1270 * and v_interlock it is safe to test with vcache.lock held.
1271 *
1272 * Wait for vnodes changing state from VS_LOADING and retry.
1273 */
1274 if (__predict_false(node->vi_state == VS_LOADING)) {
1275 cv_wait(&vcache.cv, &vcache.lock);
1276 mutex_exit(&vcache.lock);
1277 goto again;
1278 }
1279 vp = VIMPL_TO_VNODE(node);
1280 mutex_enter(vp->v_interlock);
1281 mutex_exit(&vcache.lock);
1282 error = vget(vp, 0, true /* wait */);
1283 if (error == ENOENT)
1284 goto again;
1285 if (error == 0)
1286 *vpp = vp;
1287 KASSERT((error != 0) == (*vpp == NULL));
1288 return error;
1289 }
1290 mutex_exit(&vcache.lock);
1291
1292 /* Allocate and initialize a new vcache / vnode pair. */
1293 error = vfs_busy(mp, NULL);
1294 if (error)
1295 return error;
1296 new_node = vcache_alloc();
1297 new_node->vi_key = vcache_key;
1298 vp = VIMPL_TO_VNODE(new_node);
1299 mutex_enter(&vcache.lock);
1300 node = vcache_hash_lookup(&vcache_key, hash);
1301 if (node == NULL) {
1302 SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1303 new_node, vi_hash);
1304 node = new_node;
1305 }
1306
1307 /* If another thread beat us inserting this node, retry. */
1308 if (node != new_node) {
1309 mutex_enter(vp->v_interlock);
1310 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1311 mutex_exit(&vcache.lock);
1312 vrelel(vp, 0);
1313 vfs_unbusy(mp, false, NULL);
1314 goto again;
1315 }
1316 mutex_exit(&vcache.lock);
1317
1318 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1319 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1320 if (error) {
1321 mutex_enter(&vcache.lock);
1322 SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1323 new_node, vnode_impl, vi_hash);
1324 mutex_enter(vp->v_interlock);
1325 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1326 mutex_exit(&vcache.lock);
1327 vrelel(vp, 0);
1328 vfs_unbusy(mp, false, NULL);
1329 KASSERT(*vpp == NULL);
1330 return error;
1331 }
1332 KASSERT(new_key != NULL);
1333 KASSERT(memcmp(key, new_key, key_len) == 0);
1334 KASSERT(vp->v_op != NULL);
1335 vfs_insmntque(vp, mp);
1336 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1337 vp->v_vflag |= VV_MPSAFE;
1338 vfs_unbusy(mp, true, NULL);
1339
1340 /* Finished loading, finalize node. */
1341 mutex_enter(&vcache.lock);
1342 new_node->vi_key.vk_key = new_key;
1343 mutex_enter(vp->v_interlock);
1344 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1345 mutex_exit(vp->v_interlock);
1346 mutex_exit(&vcache.lock);
1347 *vpp = vp;
1348 return 0;
1349 }
1350
1351 /*
1352 * Create a new vnode / fs node pair and return it referenced through vpp.
1353 */
1354 int
1355 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1356 kauth_cred_t cred, struct vnode **vpp)
1357 {
1358 int error;
1359 uint32_t hash;
1360 struct vnode *ovp, *vp;
1361 vnode_impl_t *new_node;
1362 vnode_impl_t *old_node __diagused;
1363
1364 *vpp = NULL;
1365
1366 /* Allocate and initialize a new vcache / vnode pair. */
1367 error = vfs_busy(mp, NULL);
1368 if (error)
1369 return error;
1370 new_node = vcache_alloc();
1371 new_node->vi_key.vk_mount = mp;
1372 vp = VIMPL_TO_VNODE(new_node);
1373
1374 /* Create and load the fs node. */
1375 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1376 &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
1377 if (error) {
1378 mutex_enter(&vcache.lock);
1379 mutex_enter(vp->v_interlock);
1380 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1381 mutex_exit(&vcache.lock);
1382 vrelel(vp, 0);
1383 vfs_unbusy(mp, false, NULL);
1384 KASSERT(*vpp == NULL);
1385 return error;
1386 }
1387 KASSERT(new_node->vi_key.vk_key != NULL);
1388 KASSERT(vp->v_op != NULL);
1389 hash = vcache_hash(&new_node->vi_key);
1390
1391 /* Wait for previous instance to be reclaimed, then insert new node. */
1392 mutex_enter(&vcache.lock);
1393 while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
1394 ovp = VIMPL_TO_VNODE(old_node);
1395 mutex_enter(ovp->v_interlock);
1396 mutex_exit(&vcache.lock);
1397 error = vget(ovp, 0, true /* wait */);
1398 KASSERT(error == ENOENT);
1399 mutex_enter(&vcache.lock);
1400 }
1401 SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1402 new_node, vi_hash);
1403 mutex_exit(&vcache.lock);
1404 vfs_insmntque(vp, mp);
1405 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1406 vp->v_vflag |= VV_MPSAFE;
1407 vfs_unbusy(mp, true, NULL);
1408
1409 /* Finished loading, finalize node. */
1410 mutex_enter(&vcache.lock);
1411 mutex_enter(vp->v_interlock);
1412 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1413 mutex_exit(&vcache.lock);
1414 mutex_exit(vp->v_interlock);
1415 *vpp = vp;
1416 return 0;
1417 }
1418
1419 /*
1420 * Prepare key change: lock old and new cache node.
1421 * Return an error if the new node already exists.
1422 */
1423 int
1424 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1425 const void *old_key, size_t old_key_len,
1426 const void *new_key, size_t new_key_len)
1427 {
1428 uint32_t old_hash, new_hash;
1429 struct vcache_key old_vcache_key, new_vcache_key;
1430 vnode_impl_t *node, *new_node;
1431 struct vnode *tvp;
1432
1433 old_vcache_key.vk_mount = mp;
1434 old_vcache_key.vk_key = old_key;
1435 old_vcache_key.vk_key_len = old_key_len;
1436 old_hash = vcache_hash(&old_vcache_key);
1437
1438 new_vcache_key.vk_mount = mp;
1439 new_vcache_key.vk_key = new_key;
1440 new_vcache_key.vk_key_len = new_key_len;
1441 new_hash = vcache_hash(&new_vcache_key);
1442
1443 new_node = vcache_alloc();
1444 new_node->vi_key = new_vcache_key;
1445 tvp = VIMPL_TO_VNODE(new_node);
1446
1447 /* Insert locked new node used as placeholder. */
1448 mutex_enter(&vcache.lock);
1449 node = vcache_hash_lookup(&new_vcache_key, new_hash);
1450 if (node != NULL) {
1451 mutex_enter(tvp->v_interlock);
1452 VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1453 mutex_exit(&vcache.lock);
1454 vrelel(tvp, 0);
1455 return EEXIST;
1456 }
1457 SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1458 new_node, vi_hash);
1459
1460 /* Lock old node. */
1461 node = vcache_hash_lookup(&old_vcache_key, old_hash);
1462 KASSERT(node != NULL);
1463 KASSERT(VIMPL_TO_VNODE(node) == vp);
1464 mutex_enter(vp->v_interlock);
1465 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
1466 node->vi_key = old_vcache_key;
1467 mutex_exit(vp->v_interlock);
1468 mutex_exit(&vcache.lock);
1469 return 0;
1470 }
1471
1472 /*
1473 * Key change complete: remove old node and unlock new node.
1474 */
1475 void
1476 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1477 const void *old_key, size_t old_key_len,
1478 const void *new_key, size_t new_key_len)
1479 {
1480 uint32_t old_hash, new_hash;
1481 struct vcache_key old_vcache_key, new_vcache_key;
1482 vnode_impl_t *old_node, *new_node;
1483 struct vnode *tvp;
1484
1485 old_vcache_key.vk_mount = mp;
1486 old_vcache_key.vk_key = old_key;
1487 old_vcache_key.vk_key_len = old_key_len;
1488 old_hash = vcache_hash(&old_vcache_key);
1489
1490 new_vcache_key.vk_mount = mp;
1491 new_vcache_key.vk_key = new_key;
1492 new_vcache_key.vk_key_len = new_key_len;
1493 new_hash = vcache_hash(&new_vcache_key);
1494
1495 mutex_enter(&vcache.lock);
1496
1497 /* Lookup old and new node. */
1498 old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1499 KASSERT(old_node != NULL);
1500 KASSERT(VIMPL_TO_VNODE(old_node) == vp);
1501 mutex_enter(vp->v_interlock);
1502 VSTATE_ASSERT(vp, VS_BLOCKED);
1503
1504 new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1505 KASSERT(new_node != NULL);
1506 KASSERT(new_node->vi_key.vk_key_len == new_key_len);
1507 tvp = VIMPL_TO_VNODE(new_node);
1508 mutex_enter(tvp->v_interlock);
1509 VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
1510
1511 /* Rekey old node and put it onto its new hashlist. */
1512 old_node->vi_key = new_vcache_key;
1513 if (old_hash != new_hash) {
1514 SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1515 old_node, vnode_impl, vi_hash);
1516 SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1517 old_node, vi_hash);
1518 }
1519 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
1520 mutex_exit(vp->v_interlock);
1521
1522 /* Remove new node used as placeholder. */
1523 SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
1524 new_node, vnode_impl, vi_hash);
1525 VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1526 mutex_exit(&vcache.lock);
1527 vrelel(tvp, 0);
1528 }
1529
1530 /*
1531 * Remove a vnode / fs node pair from the cache.
1532 */
1533 void
1534 vcache_remove(struct mount *mp, const void *key, size_t key_len)
1535 {
1536 lwp_t *l = curlwp;
1537 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
1538 uint32_t hash;
1539 struct vcache_key vcache_key;
1540 struct vcache_node *node;
1541
1542 vcache_key.vk_mount = mp;
1543 vcache_key.vk_key = key;
1544 vcache_key.vk_key_len = key_len;
1545 hash = vcache_hash(&vcache_key);
1546
1547 active = (vp->v_usecount > 1);
1548 temp_key_len = node->vi_key.vk_key_len;
1549 /*
1550 * Prevent the vnode from being recycled or brought into use
1551 * while we clean it out.
1552 */
1553 VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
1554 if (vp->v_iflag & VI_EXECMAP) {
1555 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1556 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1557 }
1558 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1559 mutex_exit(vp->v_interlock);
1560
1561 /* Replace the vnode key with a temporary copy. */
1562 if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
1563 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1564 } else {
1565 temp_key = temp_buf;
1566 }
1567 mutex_enter(&vcache.lock);
1568 memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
1569 node->vi_key.vk_key = temp_key;
1570 mutex_exit(&vcache.lock);
1571
1572 /*
1573 * Clean out any cached data associated with the vnode.
1574 * If purging an active vnode, it must be closed and
1575 * deactivated before being reclaimed. Note that the
1576 * VOP_INACTIVE will unlock the vnode.
1577 */
1578 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1579 if (error != 0) {
1580 if (wapbl_vphaswapbl(vp))
1581 WAPBL_DISCARD(wapbl_vptomp(vp));
1582 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1583 }
1584 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1585 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1586 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1587 spec_node_revoke(vp);
1588 }
1589 if (active) {
1590 VOP_INACTIVE(vp, &recycle);
1591 } else {
1592 /*
1593 * Any other processes trying to obtain this lock must first
1594 * wait for VS_RECLAIMED, then call the new lock operation.
1595 */
1596 VOP_UNLOCK(vp);
1597 }
1598
1599 /* Disassociate the underlying file system from the vnode. */
1600 if (VOP_RECLAIM(vp)) {
1601 vnpanic(vp, "%s: cannot reclaim", __func__);
1602 }
1603
1604 KASSERT(vp->v_data == NULL);
1605 KASSERT(vp->v_uobj.uo_npages == 0);
1606
1607 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1608 uvm_ra_freectx(vp->v_ractx);
1609 vp->v_ractx = NULL;
1610 }
1611
1612 /* Purge name cache. */
1613 cache_purge(vp);
1614
1615 /* Move to dead mount. */
1616 vp->v_vflag &= ~VV_ROOT;
1617 atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1618 vfs_insmntque(vp, dead_rootmount);
1619
1620 /* Remove from vnode cache. */
1621 hash = vcache_hash(&node->vi_key);
1622 mutex_enter(&vcache.lock);
1623 KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
1624 SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1625 node, vnode_impl, vi_hash);
1626 mutex_exit(&vcache.lock);
1627 if (temp_key != temp_buf)
1628 kmem_free(temp_key, temp_key_len);
1629
1630 /* Done with purge, notify sleepers of the grim news. */
1631 mutex_enter(vp->v_interlock);
1632 vp->v_op = dead_vnodeop_p;
1633 vp->v_vflag |= VV_LOCKSWORK;
1634 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1635 vp->v_tag = VT_NON;
1636 KNOTE(&vp->v_klist, NOTE_REVOKE);
1637
1638 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1639 }
1640
1641 /*
1642 * Update outstanding I/O count and do wakeup if requested.
1643 */
1644 void
1645 vwakeup(struct buf *bp)
1646 {
1647 vnode_t *vp;
1648
1649 if ((vp = bp->b_vp) == NULL)
1650 return;
1651
1652 KASSERT(bp->b_objlock == vp->v_interlock);
1653 KASSERT(mutex_owned(bp->b_objlock));
1654
1655 if (--vp->v_numoutput < 0)
1656 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1657 if (vp->v_numoutput == 0)
1658 cv_broadcast(&vp->v_cv);
1659 }
1660
1661 /*
1662 * Test a vnode for being or becoming dead. Returns one of:
1663 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1664 * ENOENT: vnode is dead.
1665 * 0: otherwise.
1666 *
1667 * Whenever this function returns a non-zero value all future
1668 * calls will also return a non-zero value.
1669 */
1670 int
1671 vdead_check(struct vnode *vp, int flags)
1672 {
1673
1674 KASSERT(mutex_owned(vp->v_interlock));
1675
1676 if (! ISSET(flags, VDEAD_NOWAIT))
1677 VSTATE_WAIT_STABLE(vp);
1678
1679 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1680 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1681 return EBUSY;
1682 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1683 return ENOENT;
1684 }
1685
1686 return 0;
1687 }
1688
1689 int
1690 vfs_drainvnodes(long target)
1691 {
1692 int error;
1693
1694 mutex_enter(&vnode_free_list_lock);
1695
1696 while (numvnodes > target) {
1697 error = cleanvnode();
1698 if (error != 0)
1699 return error;
1700 mutex_enter(&vnode_free_list_lock);
1701 }
1702
1703 mutex_exit(&vnode_free_list_lock);
1704
1705 vcache_reinit();
1706
1707 return 0;
1708 }
1709
1710 void
1711 vnpanic(vnode_t *vp, const char *fmt, ...)
1712 {
1713 va_list ap;
1714
1715 #ifdef DIAGNOSTIC
1716 vprint(NULL, vp);
1717 #endif
1718 va_start(ap, fmt);
1719 vpanic(fmt, ap);
1720 va_end(ap);
1721 }
1722