vfs_vnode.c revision 1.69 1 /* $NetBSD: vfs_vnode.c,v 1.69 2017/01/04 17:13:50 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - ACTIVE Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> ACTIVE
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * ACTIVE -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * ACTIVE -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> ACTIVE
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * Note on v_usecount and its locking
147 *
148 * At nearly all points it is known that v_usecount could be zero,
149 * the vnode_t::v_interlock will be held. To change v_usecount away
150 * from zero, the interlock must be held. To change from a non-zero
151 * value to zero, again the interlock must be held.
152 *
153 * Changing the usecount from a non-zero value to a non-zero value can
154 * safely be done using atomic operations, without the interlock held.
155 *
156 */
157
158 #include <sys/cdefs.h>
159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.69 2017/01/04 17:13:50 hannken Exp $");
160
161 #include <sys/param.h>
162 #include <sys/kernel.h>
163
164 #include <sys/atomic.h>
165 #include <sys/buf.h>
166 #include <sys/conf.h>
167 #include <sys/device.h>
168 #include <sys/hash.h>
169 #include <sys/kauth.h>
170 #include <sys/kmem.h>
171 #include <sys/kthread.h>
172 #include <sys/module.h>
173 #include <sys/mount.h>
174 #include <sys/namei.h>
175 #include <sys/syscallargs.h>
176 #include <sys/sysctl.h>
177 #include <sys/systm.h>
178 #include <sys/vnode_impl.h>
179 #include <sys/wapbl.h>
180 #include <sys/fstrans.h>
181
182 #include <uvm/uvm.h>
183 #include <uvm/uvm_readahead.h>
184
185 /* Flags to vrelel. */
186 #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
187
188 u_int numvnodes __cacheline_aligned;
189
190 /*
191 * There are three lru lists: one holds vnodes waiting for async release,
192 * one is for vnodes which have no buffer/page references and
193 * one for those which do (i.e. v_holdcnt is non-zero).
194 */
195 static vnodelst_t lru_vrele_list __cacheline_aligned;
196 static vnodelst_t lru_free_list __cacheline_aligned;
197 static vnodelst_t lru_hold_list __cacheline_aligned;
198 static kmutex_t vdrain_lock __cacheline_aligned;
199 static kcondvar_t vdrain_cv __cacheline_aligned;
200 static int vdrain_gen;
201 static kcondvar_t vdrain_gen_cv;
202 static bool vdrain_retry;
203 static lwp_t * vdrain_lwp;
204 SLIST_HEAD(hashhead, vnode_impl);
205 static kmutex_t vcache_lock __cacheline_aligned;
206 static kcondvar_t vcache_cv __cacheline_aligned;
207 static u_int vcache_hashsize;
208 static u_long vcache_hashmask;
209 static struct hashhead *vcache_hashtab __cacheline_aligned;
210 static pool_cache_t vcache_pool;
211 static void lru_requeue(vnode_t *, vnodelst_t *);
212 static vnodelst_t * lru_which(vnode_t *);
213 static vnode_impl_t * vcache_alloc(void);
214 static void vcache_free(vnode_impl_t *);
215 static void vcache_init(void);
216 static void vcache_reinit(void);
217 static void vcache_reclaim(vnode_t *);
218 static void vrelel(vnode_t *, int);
219 static void vdrain_thread(void *);
220 static void vnpanic(vnode_t *, const char *, ...)
221 __printflike(2, 3);
222
223 /* Routines having to do with the management of the vnode table. */
224 extern struct mount *dead_rootmount;
225 extern int (**dead_vnodeop_p)(void *);
226 extern struct vfsops dead_vfsops;
227
228 /* Vnode state operations and diagnostics. */
229
230 #if defined(DIAGNOSTIC)
231
232 #define VSTATE_GET(vp) \
233 vstate_assert_get((vp), __func__, __LINE__)
234 #define VSTATE_CHANGE(vp, from, to) \
235 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
236 #define VSTATE_WAIT_STABLE(vp) \
237 vstate_assert_wait_stable((vp), __func__, __LINE__)
238 #define VSTATE_ASSERT(vp, state) \
239 vstate_assert((vp), (state), __func__, __LINE__)
240
241 static void
242 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
243 {
244 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
245
246 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
247
248 if (__predict_true(node->vi_state == state))
249 return;
250 vnpanic(vp, "state is %s, expected %s at %s:%d",
251 vstate_name(node->vi_state), vstate_name(state), func, line);
252 }
253
254 static enum vnode_state
255 vstate_assert_get(vnode_t *vp, const char *func, int line)
256 {
257 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
258
259 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
260 if (node->vi_state == VS_MARKER)
261 vnpanic(vp, "state is %s at %s:%d",
262 vstate_name(node->vi_state), func, line);
263
264 return node->vi_state;
265 }
266
267 static void
268 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
269 {
270 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
271
272 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
273 if (node->vi_state == VS_MARKER)
274 vnpanic(vp, "state is %s at %s:%d",
275 vstate_name(node->vi_state), func, line);
276
277 while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
278 cv_wait(&vp->v_cv, vp->v_interlock);
279
280 if (node->vi_state == VS_MARKER)
281 vnpanic(vp, "state is %s at %s:%d",
282 vstate_name(node->vi_state), func, line);
283 }
284
285 static void
286 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
287 const char *func, int line)
288 {
289 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
290
291 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
292 if (from == VS_LOADING)
293 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
294
295 if (from == VS_MARKER)
296 vnpanic(vp, "from is %s at %s:%d",
297 vstate_name(from), func, line);
298 if (to == VS_MARKER)
299 vnpanic(vp, "to is %s at %s:%d",
300 vstate_name(to), func, line);
301 if (node->vi_state != from)
302 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
303 vstate_name(node->vi_state), vstate_name(from), func, line);
304 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
305 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
306 vstate_name(from), vstate_name(to), vp->v_usecount,
307 func, line);
308
309 node->vi_state = to;
310 if (from == VS_LOADING)
311 cv_broadcast(&vcache_cv);
312 if (to == VS_ACTIVE || to == VS_RECLAIMED)
313 cv_broadcast(&vp->v_cv);
314 }
315
316 #else /* defined(DIAGNOSTIC) */
317
318 #define VSTATE_GET(vp) \
319 (VNODE_TO_VIMPL((vp))->vi_state)
320 #define VSTATE_CHANGE(vp, from, to) \
321 vstate_change((vp), (from), (to))
322 #define VSTATE_WAIT_STABLE(vp) \
323 vstate_wait_stable((vp))
324 #define VSTATE_ASSERT(vp, state)
325
326 static void
327 vstate_wait_stable(vnode_t *vp)
328 {
329 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
330
331 while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
332 cv_wait(&vp->v_cv, vp->v_interlock);
333 }
334
335 static void
336 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
337 {
338 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
339
340 node->vi_state = to;
341 if (from == VS_LOADING)
342 cv_broadcast(&vcache_cv);
343 if (to == VS_ACTIVE || to == VS_RECLAIMED)
344 cv_broadcast(&vp->v_cv);
345 }
346
347 #endif /* defined(DIAGNOSTIC) */
348
349 void
350 vfs_vnode_sysinit(void)
351 {
352 int error __diagused;
353
354 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
355 KASSERT(dead_rootmount != NULL);
356 dead_rootmount->mnt_iflag = IMNT_MPSAFE;
357
358 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
359 TAILQ_INIT(&lru_free_list);
360 TAILQ_INIT(&lru_hold_list);
361 TAILQ_INIT(&lru_vrele_list);
362
363 vcache_init();
364
365 cv_init(&vdrain_cv, "vdrain");
366 cv_init(&vdrain_gen_cv, "vdrainwt");
367 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
368 NULL, &vdrain_lwp, "vdrain");
369 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
370 }
371
372 /*
373 * Allocate a new marker vnode.
374 */
375 vnode_t *
376 vnalloc_marker(struct mount *mp)
377 {
378 vnode_impl_t *node;
379 vnode_t *vp;
380
381 node = pool_cache_get(vcache_pool, PR_WAITOK);
382 memset(node, 0, sizeof(*node));
383 vp = VIMPL_TO_VNODE(node);
384 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
385 vp->v_mount = mp;
386 vp->v_type = VBAD;
387 node->vi_state = VS_MARKER;
388
389 return vp;
390 }
391
392 /*
393 * Free a marker vnode.
394 */
395 void
396 vnfree_marker(vnode_t *vp)
397 {
398 vnode_impl_t *node;
399
400 node = VNODE_TO_VIMPL(vp);
401 KASSERT(node->vi_state == VS_MARKER);
402 uvm_obj_destroy(&vp->v_uobj, true);
403 pool_cache_put(vcache_pool, node);
404 }
405
406 /*
407 * Test a vnode for being a marker vnode.
408 */
409 bool
410 vnis_marker(vnode_t *vp)
411 {
412
413 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
414 }
415
416 /*
417 * Return the lru list this node should be on.
418 */
419 static vnodelst_t *
420 lru_which(vnode_t *vp)
421 {
422
423 KASSERT(mutex_owned(vp->v_interlock));
424
425 if (vp->v_holdcnt > 0)
426 return &lru_hold_list;
427 else
428 return &lru_free_list;
429 }
430
431 /*
432 * Put vnode to end of given list.
433 * Both the current and the new list may be NULL, used on vnode alloc/free.
434 * Adjust numvnodes and signal vdrain thread if there is work.
435 */
436 static void
437 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
438 {
439 vnode_impl_t *node;
440
441 mutex_enter(&vdrain_lock);
442 node = VNODE_TO_VIMPL(vp);
443 if (node->vi_lrulisthd != NULL)
444 TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
445 else
446 numvnodes++;
447 node->vi_lrulisthd = listhd;
448 if (node->vi_lrulisthd != NULL)
449 TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
450 else
451 numvnodes--;
452 if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
453 cv_broadcast(&vdrain_cv);
454 mutex_exit(&vdrain_lock);
455 }
456
457 /*
458 * Reclaim a cached vnode. Used from vdrain_thread only.
459 */
460 static __inline void
461 vdrain_remove(vnode_t *vp)
462 {
463 struct mount *mp;
464
465 KASSERT(mutex_owned(&vdrain_lock));
466
467 /* Probe usecount (unlocked). */
468 if (vp->v_usecount > 0)
469 return;
470 /* Try v_interlock -- we lock the wrong direction! */
471 if (!mutex_tryenter(vp->v_interlock))
472 return;
473 /* Probe usecount and state. */
474 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
475 mutex_exit(vp->v_interlock);
476 return;
477 }
478 mp = vp->v_mount;
479 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
480 mutex_exit(vp->v_interlock);
481 return;
482 }
483 vdrain_retry = true;
484 mutex_exit(&vdrain_lock);
485
486 if (vcache_vget(vp) == 0) {
487 if (!vrecycle(vp))
488 vrele(vp);
489 }
490 fstrans_done(mp);
491
492 mutex_enter(&vdrain_lock);
493 }
494
495 /*
496 * Release a cached vnode. Used from vdrain_thread only.
497 */
498 static __inline void
499 vdrain_vrele(vnode_t *vp)
500 {
501 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
502 struct mount *mp;
503
504 KASSERT(mutex_owned(&vdrain_lock));
505
506 mp = vp->v_mount;
507 if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0)
508 return;
509
510 /*
511 * First remove the vnode from the vrele list.
512 * Put it on the last lru list, the last vrele()
513 * will put it back onto the right list before
514 * its v_usecount reaches zero.
515 */
516 KASSERT(node->vi_lrulisthd == &lru_vrele_list);
517 TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
518 node->vi_lrulisthd = &lru_hold_list;
519 TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
520
521 vdrain_retry = true;
522 mutex_exit(&vdrain_lock);
523
524 mutex_enter(vp->v_interlock);
525 vrelel(vp, 0);
526 fstrans_done(mp);
527
528 mutex_enter(&vdrain_lock);
529 }
530
531 /*
532 * Helper thread to keep the number of vnodes below desiredvnodes
533 * and release vnodes from asynchronous vrele.
534 */
535 static void
536 vdrain_thread(void *cookie)
537 {
538 vnodelst_t *listhd[] = {
539 &lru_vrele_list, &lru_free_list, &lru_hold_list
540 };
541 int i;
542 u_int target;
543 vnode_impl_t *node, *marker;
544
545 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
546
547 mutex_enter(&vdrain_lock);
548
549 for (;;) {
550 vdrain_retry = false;
551 target = desiredvnodes - desiredvnodes/10;
552
553 for (i = 0; i < __arraycount(listhd); i++) {
554 TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
555 while ((node = TAILQ_NEXT(marker, vi_lrulist))) {
556 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
557 TAILQ_INSERT_AFTER(listhd[i], node, marker,
558 vi_lrulist);
559 if (listhd[i] == &lru_vrele_list)
560 vdrain_vrele(VIMPL_TO_VNODE(node));
561 else if (numvnodes < target)
562 break;
563 else
564 vdrain_remove(VIMPL_TO_VNODE(node));
565 }
566 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
567 }
568
569 if (vdrain_retry) {
570 mutex_exit(&vdrain_lock);
571 yield();
572 mutex_enter(&vdrain_lock);
573 } else {
574 vdrain_gen++;
575 cv_broadcast(&vdrain_gen_cv);
576 cv_wait(&vdrain_cv, &vdrain_lock);
577 }
578 }
579 }
580
581 /*
582 * vput: unlock and release the reference.
583 */
584 void
585 vput(vnode_t *vp)
586 {
587
588 VOP_UNLOCK(vp);
589 vrele(vp);
590 }
591
592 /*
593 * Try to drop reference on a vnode. Abort if we are releasing the
594 * last reference. Note: this _must_ succeed if not the last reference.
595 */
596 static inline bool
597 vtryrele(vnode_t *vp)
598 {
599 u_int use, next;
600
601 for (use = vp->v_usecount;; use = next) {
602 if (use == 1) {
603 return false;
604 }
605 KASSERT(use > 1);
606 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
607 if (__predict_true(next == use)) {
608 return true;
609 }
610 }
611 }
612
613 /*
614 * Vnode release. If reference count drops to zero, call inactive
615 * routine and either return to freelist or free to the pool.
616 */
617 static void
618 vrelel(vnode_t *vp, int flags)
619 {
620 bool recycle, defer;
621 int error;
622
623 KASSERT(mutex_owned(vp->v_interlock));
624
625 if (__predict_false(vp->v_op == dead_vnodeop_p &&
626 VSTATE_GET(vp) != VS_RECLAIMED)) {
627 vnpanic(vp, "dead but not clean");
628 }
629
630 /*
631 * If not the last reference, just drop the reference count
632 * and unlock.
633 */
634 if (vtryrele(vp)) {
635 mutex_exit(vp->v_interlock);
636 return;
637 }
638 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
639 vnpanic(vp, "%s: bad ref count", __func__);
640 }
641
642 #ifdef DIAGNOSTIC
643 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
644 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
645 vprint("vrelel: missing VOP_CLOSE()", vp);
646 }
647 #endif
648
649 /*
650 * If not clean, deactivate the vnode, but preserve
651 * our reference across the call to VOP_INACTIVE().
652 */
653 if (VSTATE_GET(vp) != VS_RECLAIMED) {
654 recycle = false;
655
656 /*
657 * XXX This ugly block can be largely eliminated if
658 * locking is pushed down into the file systems.
659 *
660 * Defer vnode release to vdrain_thread if caller
661 * requests it explicitly or is the pagedaemon.
662 */
663 if ((curlwp == uvm.pagedaemon_lwp) ||
664 (flags & VRELEL_ASYNC_RELE) != 0) {
665 defer = true;
666 } else if (curlwp == vdrain_lwp) {
667 /*
668 * We have to try harder.
669 */
670 mutex_exit(vp->v_interlock);
671 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
672 KASSERTMSG((error == 0), "vn_lock failed: %d", error);
673 mutex_enter(vp->v_interlock);
674 defer = false;
675 } else {
676 /* If we can't acquire the lock, then defer. */
677 mutex_exit(vp->v_interlock);
678 error = vn_lock(vp,
679 LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
680 defer = (error != 0);
681 mutex_enter(vp->v_interlock);
682 }
683
684 KASSERT(mutex_owned(vp->v_interlock));
685 KASSERT(! (curlwp == vdrain_lwp && defer));
686
687 if (defer) {
688 /*
689 * Defer reclaim to the kthread; it's not safe to
690 * clean it here. We donate it our last reference.
691 */
692 lru_requeue(vp, &lru_vrele_list);
693 mutex_exit(vp->v_interlock);
694 return;
695 }
696
697 /*
698 * If the node got another reference while we
699 * released the interlock, don't try to inactivate it yet.
700 */
701 if (__predict_false(vtryrele(vp))) {
702 VOP_UNLOCK(vp);
703 mutex_exit(vp->v_interlock);
704 return;
705 }
706 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
707 mutex_exit(vp->v_interlock);
708
709 /*
710 * The vnode must not gain another reference while being
711 * deactivated. If VOP_INACTIVE() indicates that
712 * the described file has been deleted, then recycle
713 * the vnode.
714 *
715 * Note that VOP_INACTIVE() will drop the vnode lock.
716 */
717 VOP_INACTIVE(vp, &recycle);
718 if (recycle) {
719 /* vcache_reclaim() below will drop the lock. */
720 if (vn_lock(vp, LK_EXCLUSIVE) != 0)
721 recycle = false;
722 }
723 mutex_enter(vp->v_interlock);
724 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
725 if (!recycle) {
726 if (vtryrele(vp)) {
727 mutex_exit(vp->v_interlock);
728 return;
729 }
730 }
731
732 /* Take care of space accounting. */
733 if (vp->v_iflag & VI_EXECMAP) {
734 atomic_add_int(&uvmexp.execpages,
735 -vp->v_uobj.uo_npages);
736 atomic_add_int(&uvmexp.filepages,
737 vp->v_uobj.uo_npages);
738 }
739 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
740 vp->v_vflag &= ~VV_MAPPED;
741
742 /*
743 * Recycle the vnode if the file is now unused (unlinked),
744 * otherwise just free it.
745 */
746 if (recycle) {
747 VSTATE_ASSERT(vp, VS_ACTIVE);
748 vcache_reclaim(vp);
749 }
750 KASSERT(vp->v_usecount > 0);
751 }
752
753 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
754 /* Gained another reference while being reclaimed. */
755 mutex_exit(vp->v_interlock);
756 return;
757 }
758
759 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
760 /*
761 * It's clean so destroy it. It isn't referenced
762 * anywhere since it has been reclaimed.
763 */
764 vcache_free(VNODE_TO_VIMPL(vp));
765 } else {
766 /*
767 * Otherwise, put it back onto the freelist. It
768 * can't be destroyed while still associated with
769 * a file system.
770 */
771 lru_requeue(vp, lru_which(vp));
772 mutex_exit(vp->v_interlock);
773 }
774 }
775
776 void
777 vrele(vnode_t *vp)
778 {
779
780 if (vtryrele(vp)) {
781 return;
782 }
783 mutex_enter(vp->v_interlock);
784 vrelel(vp, 0);
785 }
786
787 /*
788 * Asynchronous vnode release, vnode is released in different context.
789 */
790 void
791 vrele_async(vnode_t *vp)
792 {
793
794 if (vtryrele(vp)) {
795 return;
796 }
797 mutex_enter(vp->v_interlock);
798 vrelel(vp, VRELEL_ASYNC_RELE);
799 }
800
801 /*
802 * Vnode reference, where a reference is already held by some other
803 * object (for example, a file structure).
804 */
805 void
806 vref(vnode_t *vp)
807 {
808
809 KASSERT(vp->v_usecount != 0);
810
811 atomic_inc_uint(&vp->v_usecount);
812 }
813
814 /*
815 * Page or buffer structure gets a reference.
816 * Called with v_interlock held.
817 */
818 void
819 vholdl(vnode_t *vp)
820 {
821
822 KASSERT(mutex_owned(vp->v_interlock));
823
824 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
825 lru_requeue(vp, lru_which(vp));
826 }
827
828 /*
829 * Page or buffer structure frees a reference.
830 * Called with v_interlock held.
831 */
832 void
833 holdrelel(vnode_t *vp)
834 {
835
836 KASSERT(mutex_owned(vp->v_interlock));
837
838 if (vp->v_holdcnt <= 0) {
839 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
840 }
841
842 vp->v_holdcnt--;
843 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
844 lru_requeue(vp, lru_which(vp));
845 }
846
847 /*
848 * Recycle an unused vnode if caller holds the last reference.
849 */
850 bool
851 vrecycle(vnode_t *vp)
852 {
853 int error __diagused;
854
855 mutex_enter(vp->v_interlock);
856
857 /* Make sure we hold the last reference. */
858 VSTATE_WAIT_STABLE(vp);
859 if (vp->v_usecount != 1) {
860 mutex_exit(vp->v_interlock);
861 return false;
862 }
863
864 /* If the vnode is already clean we're done. */
865 if (VSTATE_GET(vp) != VS_ACTIVE) {
866 VSTATE_ASSERT(vp, VS_RECLAIMED);
867 vrelel(vp, 0);
868 return true;
869 }
870
871 /* Prevent further references until the vnode is locked. */
872 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
873 mutex_exit(vp->v_interlock);
874
875 error = vn_lock(vp, LK_EXCLUSIVE);
876 KASSERT(error == 0);
877
878 mutex_enter(vp->v_interlock);
879 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
880
881 KASSERT(vp->v_usecount == 1);
882 vcache_reclaim(vp);
883 vrelel(vp, 0);
884
885 return true;
886 }
887
888 /*
889 * Eliminate all activity associated with the requested vnode
890 * and with all vnodes aliased to the requested vnode.
891 */
892 void
893 vrevoke(vnode_t *vp)
894 {
895 vnode_t *vq;
896 enum vtype type;
897 dev_t dev;
898
899 KASSERT(vp->v_usecount > 0);
900
901 mutex_enter(vp->v_interlock);
902 VSTATE_WAIT_STABLE(vp);
903 if (VSTATE_GET(vp) == VS_RECLAIMED) {
904 mutex_exit(vp->v_interlock);
905 return;
906 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
907 atomic_inc_uint(&vp->v_usecount);
908 mutex_exit(vp->v_interlock);
909 vgone(vp);
910 return;
911 } else {
912 dev = vp->v_rdev;
913 type = vp->v_type;
914 mutex_exit(vp->v_interlock);
915 }
916
917 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
918 vgone(vq);
919 }
920 }
921
922 /*
923 * Eliminate all activity associated with a vnode in preparation for
924 * reuse. Drops a reference from the vnode.
925 */
926 void
927 vgone(vnode_t *vp)
928 {
929
930 if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
931 VSTATE_ASSERT(vp, VS_RECLAIMED);
932 vrele(vp);
933 }
934
935 mutex_enter(vp->v_interlock);
936 vcache_reclaim(vp);
937 vrelel(vp, 0);
938 }
939
940 static inline uint32_t
941 vcache_hash(const struct vcache_key *key)
942 {
943 uint32_t hash = HASH32_BUF_INIT;
944
945 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
946 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
947 return hash;
948 }
949
950 static void
951 vcache_init(void)
952 {
953
954 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
955 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
956 KASSERT(vcache_pool != NULL);
957 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
958 cv_init(&vcache_cv, "vcache");
959 vcache_hashsize = desiredvnodes;
960 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
961 &vcache_hashmask);
962 }
963
964 static void
965 vcache_reinit(void)
966 {
967 int i;
968 uint32_t hash;
969 u_long oldmask, newmask;
970 struct hashhead *oldtab, *newtab;
971 vnode_impl_t *node;
972
973 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
974 mutex_enter(&vcache_lock);
975 oldtab = vcache_hashtab;
976 oldmask = vcache_hashmask;
977 vcache_hashsize = desiredvnodes;
978 vcache_hashtab = newtab;
979 vcache_hashmask = newmask;
980 for (i = 0; i <= oldmask; i++) {
981 while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
982 SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
983 hash = vcache_hash(&node->vi_key);
984 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
985 node, vi_hash);
986 }
987 }
988 mutex_exit(&vcache_lock);
989 hashdone(oldtab, HASH_SLIST, oldmask);
990 }
991
992 static inline vnode_impl_t *
993 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
994 {
995 struct hashhead *hashp;
996 vnode_impl_t *node;
997
998 KASSERT(mutex_owned(&vcache_lock));
999
1000 hashp = &vcache_hashtab[hash & vcache_hashmask];
1001 SLIST_FOREACH(node, hashp, vi_hash) {
1002 if (key->vk_mount != node->vi_key.vk_mount)
1003 continue;
1004 if (key->vk_key_len != node->vi_key.vk_key_len)
1005 continue;
1006 if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
1007 continue;
1008 return node;
1009 }
1010 return NULL;
1011 }
1012
1013 /*
1014 * Allocate a new, uninitialized vcache node.
1015 */
1016 static vnode_impl_t *
1017 vcache_alloc(void)
1018 {
1019 vnode_impl_t *node;
1020 vnode_t *vp;
1021
1022 node = pool_cache_get(vcache_pool, PR_WAITOK);
1023 memset(node, 0, sizeof(*node));
1024
1025 /* SLIST_INIT(&node->vi_hash); */
1026
1027 vp = VIMPL_TO_VNODE(node);
1028 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1029 cv_init(&vp->v_cv, "vnode");
1030 /* LIST_INIT(&vp->v_nclist); */
1031 /* LIST_INIT(&vp->v_dnclist); */
1032
1033 rw_init(&vp->v_lock);
1034 vp->v_usecount = 1;
1035 vp->v_type = VNON;
1036 vp->v_size = vp->v_writesize = VSIZENOTSET;
1037
1038 node->vi_state = VS_LOADING;
1039
1040 lru_requeue(vp, &lru_free_list);
1041
1042 return node;
1043 }
1044
1045 /*
1046 * Free an unused, unreferenced vcache node.
1047 * v_interlock locked on entry.
1048 */
1049 static void
1050 vcache_free(vnode_impl_t *node)
1051 {
1052 vnode_t *vp;
1053
1054 vp = VIMPL_TO_VNODE(node);
1055 KASSERT(mutex_owned(vp->v_interlock));
1056
1057 KASSERT(vp->v_usecount == 0);
1058 KASSERT(vp->v_holdcnt == 0);
1059 KASSERT(vp->v_writecount == 0);
1060 lru_requeue(vp, NULL);
1061 mutex_exit(vp->v_interlock);
1062
1063 vfs_insmntque(vp, NULL);
1064 if (vp->v_type == VBLK || vp->v_type == VCHR)
1065 spec_node_destroy(vp);
1066
1067 rw_destroy(&vp->v_lock);
1068 uvm_obj_destroy(&vp->v_uobj, true);
1069 cv_destroy(&vp->v_cv);
1070 pool_cache_put(vcache_pool, node);
1071 }
1072
1073 /*
1074 * Try to get an initial reference on this cached vnode.
1075 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1076 * EBUSY if the vnode state is unstable.
1077 *
1078 * v_interlock locked on entry and unlocked on exit.
1079 */
1080 int
1081 vcache_tryvget(vnode_t *vp)
1082 {
1083 int error = 0;
1084
1085 KASSERT(mutex_owned(vp->v_interlock));
1086
1087 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1088 error = ENOENT;
1089 else if (__predict_false(VSTATE_GET(vp) != VS_ACTIVE))
1090 error = EBUSY;
1091 else if (vp->v_usecount == 0)
1092 vp->v_usecount = 1;
1093 else
1094 atomic_inc_uint(&vp->v_usecount);
1095
1096 mutex_exit(vp->v_interlock);
1097
1098 return error;
1099 }
1100
1101 /*
1102 * Try to get an initial reference on this cached vnode.
1103 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1104 * Will wait for the vnode state to be stable.
1105 *
1106 * v_interlock locked on entry and unlocked on exit.
1107 */
1108 int
1109 vcache_vget(vnode_t *vp)
1110 {
1111
1112 KASSERT(mutex_owned(vp->v_interlock));
1113
1114 /* Increment hold count to prevent vnode from disappearing. */
1115 vp->v_holdcnt++;
1116 VSTATE_WAIT_STABLE(vp);
1117 vp->v_holdcnt--;
1118
1119 /* If this was the last reference to a reclaimed vnode free it now. */
1120 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1121 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1122 vcache_free(VNODE_TO_VIMPL(vp));
1123 else
1124 mutex_exit(vp->v_interlock);
1125 return ENOENT;
1126 }
1127 VSTATE_ASSERT(vp, VS_ACTIVE);
1128 if (vp->v_usecount == 0)
1129 vp->v_usecount = 1;
1130 else
1131 atomic_inc_uint(&vp->v_usecount);
1132
1133 mutex_exit(vp->v_interlock);
1134
1135 return 0;
1136 }
1137
1138 /*
1139 * Get a vnode / fs node pair by key and return it referenced through vpp.
1140 */
1141 int
1142 vcache_get(struct mount *mp, const void *key, size_t key_len,
1143 struct vnode **vpp)
1144 {
1145 int error;
1146 uint32_t hash;
1147 const void *new_key;
1148 struct vnode *vp;
1149 struct vcache_key vcache_key;
1150 vnode_impl_t *node, *new_node;
1151
1152 new_key = NULL;
1153 *vpp = NULL;
1154
1155 vcache_key.vk_mount = mp;
1156 vcache_key.vk_key = key;
1157 vcache_key.vk_key_len = key_len;
1158 hash = vcache_hash(&vcache_key);
1159
1160 again:
1161 mutex_enter(&vcache_lock);
1162 node = vcache_hash_lookup(&vcache_key, hash);
1163
1164 /* If found, take a reference or retry. */
1165 if (__predict_true(node != NULL)) {
1166 /*
1167 * If the vnode is loading we cannot take the v_interlock
1168 * here as it might change during load (see uvm_obj_setlock()).
1169 * As changing state from VS_LOADING requires both vcache_lock
1170 * and v_interlock it is safe to test with vcache_lock held.
1171 *
1172 * Wait for vnodes changing state from VS_LOADING and retry.
1173 */
1174 if (__predict_false(node->vi_state == VS_LOADING)) {
1175 cv_wait(&vcache_cv, &vcache_lock);
1176 mutex_exit(&vcache_lock);
1177 goto again;
1178 }
1179 vp = VIMPL_TO_VNODE(node);
1180 mutex_enter(vp->v_interlock);
1181 mutex_exit(&vcache_lock);
1182 error = vcache_vget(vp);
1183 if (error == ENOENT)
1184 goto again;
1185 if (error == 0)
1186 *vpp = vp;
1187 KASSERT((error != 0) == (*vpp == NULL));
1188 return error;
1189 }
1190 mutex_exit(&vcache_lock);
1191
1192 /* Allocate and initialize a new vcache / vnode pair. */
1193 error = vfs_busy(mp, NULL);
1194 if (error)
1195 return error;
1196 new_node = vcache_alloc();
1197 new_node->vi_key = vcache_key;
1198 vp = VIMPL_TO_VNODE(new_node);
1199 mutex_enter(&vcache_lock);
1200 node = vcache_hash_lookup(&vcache_key, hash);
1201 if (node == NULL) {
1202 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1203 new_node, vi_hash);
1204 node = new_node;
1205 }
1206
1207 /* If another thread beat us inserting this node, retry. */
1208 if (node != new_node) {
1209 mutex_enter(vp->v_interlock);
1210 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1211 mutex_exit(&vcache_lock);
1212 vrelel(vp, 0);
1213 vfs_unbusy(mp, false, NULL);
1214 goto again;
1215 }
1216 mutex_exit(&vcache_lock);
1217
1218 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1219 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1220 if (error) {
1221 mutex_enter(&vcache_lock);
1222 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1223 new_node, vnode_impl, vi_hash);
1224 mutex_enter(vp->v_interlock);
1225 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1226 mutex_exit(&vcache_lock);
1227 vrelel(vp, 0);
1228 vfs_unbusy(mp, false, NULL);
1229 KASSERT(*vpp == NULL);
1230 return error;
1231 }
1232 KASSERT(new_key != NULL);
1233 KASSERT(memcmp(key, new_key, key_len) == 0);
1234 KASSERT(vp->v_op != NULL);
1235 vfs_insmntque(vp, mp);
1236 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1237 vp->v_vflag |= VV_MPSAFE;
1238 vfs_unbusy(mp, true, NULL);
1239
1240 /* Finished loading, finalize node. */
1241 mutex_enter(&vcache_lock);
1242 new_node->vi_key.vk_key = new_key;
1243 mutex_enter(vp->v_interlock);
1244 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1245 mutex_exit(vp->v_interlock);
1246 mutex_exit(&vcache_lock);
1247 *vpp = vp;
1248 return 0;
1249 }
1250
1251 /*
1252 * Create a new vnode / fs node pair and return it referenced through vpp.
1253 */
1254 int
1255 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1256 kauth_cred_t cred, struct vnode **vpp)
1257 {
1258 int error;
1259 uint32_t hash;
1260 struct vnode *ovp, *vp;
1261 vnode_impl_t *new_node;
1262 vnode_impl_t *old_node __diagused;
1263
1264 *vpp = NULL;
1265
1266 /* Allocate and initialize a new vcache / vnode pair. */
1267 error = vfs_busy(mp, NULL);
1268 if (error)
1269 return error;
1270 new_node = vcache_alloc();
1271 new_node->vi_key.vk_mount = mp;
1272 vp = VIMPL_TO_VNODE(new_node);
1273
1274 /* Create and load the fs node. */
1275 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1276 &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
1277 if (error) {
1278 mutex_enter(&vcache_lock);
1279 mutex_enter(vp->v_interlock);
1280 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1281 mutex_exit(&vcache_lock);
1282 vrelel(vp, 0);
1283 vfs_unbusy(mp, false, NULL);
1284 KASSERT(*vpp == NULL);
1285 return error;
1286 }
1287 KASSERT(new_node->vi_key.vk_key != NULL);
1288 KASSERT(vp->v_op != NULL);
1289 hash = vcache_hash(&new_node->vi_key);
1290
1291 /* Wait for previous instance to be reclaimed, then insert new node. */
1292 mutex_enter(&vcache_lock);
1293 while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
1294 ovp = VIMPL_TO_VNODE(old_node);
1295 mutex_enter(ovp->v_interlock);
1296 mutex_exit(&vcache_lock);
1297 error = vcache_vget(ovp);
1298 KASSERT(error == ENOENT);
1299 mutex_enter(&vcache_lock);
1300 }
1301 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1302 new_node, vi_hash);
1303 mutex_exit(&vcache_lock);
1304 vfs_insmntque(vp, mp);
1305 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1306 vp->v_vflag |= VV_MPSAFE;
1307 vfs_unbusy(mp, true, NULL);
1308
1309 /* Finished loading, finalize node. */
1310 mutex_enter(&vcache_lock);
1311 mutex_enter(vp->v_interlock);
1312 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1313 mutex_exit(&vcache_lock);
1314 mutex_exit(vp->v_interlock);
1315 *vpp = vp;
1316 return 0;
1317 }
1318
1319 /*
1320 * Prepare key change: update old cache nodes key and lock new cache node.
1321 * Return an error if the new node already exists.
1322 */
1323 int
1324 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1325 const void *old_key, size_t old_key_len,
1326 const void *new_key, size_t new_key_len)
1327 {
1328 uint32_t old_hash, new_hash;
1329 struct vcache_key old_vcache_key, new_vcache_key;
1330 vnode_impl_t *node, *new_node;
1331 struct vnode *tvp;
1332
1333 old_vcache_key.vk_mount = mp;
1334 old_vcache_key.vk_key = old_key;
1335 old_vcache_key.vk_key_len = old_key_len;
1336 old_hash = vcache_hash(&old_vcache_key);
1337
1338 new_vcache_key.vk_mount = mp;
1339 new_vcache_key.vk_key = new_key;
1340 new_vcache_key.vk_key_len = new_key_len;
1341 new_hash = vcache_hash(&new_vcache_key);
1342
1343 new_node = vcache_alloc();
1344 new_node->vi_key = new_vcache_key;
1345 tvp = VIMPL_TO_VNODE(new_node);
1346
1347 /* Insert locked new node used as placeholder. */
1348 mutex_enter(&vcache_lock);
1349 node = vcache_hash_lookup(&new_vcache_key, new_hash);
1350 if (node != NULL) {
1351 mutex_enter(tvp->v_interlock);
1352 VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1353 mutex_exit(&vcache_lock);
1354 vrelel(tvp, 0);
1355 return EEXIST;
1356 }
1357 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1358 new_node, vi_hash);
1359
1360 /* Replace old nodes key with the temporary copy. */
1361 node = vcache_hash_lookup(&old_vcache_key, old_hash);
1362 KASSERT(node != NULL);
1363 KASSERT(VIMPL_TO_VNODE(node) == vp);
1364 KASSERT(node->vi_key.vk_key != old_vcache_key.vk_key);
1365 node->vi_key = old_vcache_key;
1366 mutex_exit(&vcache_lock);
1367 return 0;
1368 }
1369
1370 /*
1371 * Key change complete: update old node and remove placeholder.
1372 */
1373 void
1374 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1375 const void *old_key, size_t old_key_len,
1376 const void *new_key, size_t new_key_len)
1377 {
1378 uint32_t old_hash, new_hash;
1379 struct vcache_key old_vcache_key, new_vcache_key;
1380 vnode_impl_t *old_node, *new_node;
1381 struct vnode *tvp;
1382
1383 old_vcache_key.vk_mount = mp;
1384 old_vcache_key.vk_key = old_key;
1385 old_vcache_key.vk_key_len = old_key_len;
1386 old_hash = vcache_hash(&old_vcache_key);
1387
1388 new_vcache_key.vk_mount = mp;
1389 new_vcache_key.vk_key = new_key;
1390 new_vcache_key.vk_key_len = new_key_len;
1391 new_hash = vcache_hash(&new_vcache_key);
1392
1393 mutex_enter(&vcache_lock);
1394
1395 /* Lookup old and new node. */
1396 old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1397 KASSERT(old_node != NULL);
1398 KASSERT(VIMPL_TO_VNODE(old_node) == vp);
1399
1400 new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1401 KASSERT(new_node != NULL);
1402 KASSERT(new_node->vi_key.vk_key_len == new_key_len);
1403 tvp = VIMPL_TO_VNODE(new_node);
1404 mutex_enter(tvp->v_interlock);
1405 VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
1406
1407 /* Rekey old node and put it onto its new hashlist. */
1408 old_node->vi_key = new_vcache_key;
1409 if (old_hash != new_hash) {
1410 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1411 old_node, vnode_impl, vi_hash);
1412 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1413 old_node, vi_hash);
1414 }
1415
1416 /* Remove new node used as placeholder. */
1417 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1418 new_node, vnode_impl, vi_hash);
1419 VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1420 mutex_exit(&vcache_lock);
1421 vrelel(tvp, 0);
1422 }
1423
1424 /*
1425 * Disassociate the underlying file system from a vnode.
1426 *
1427 * Must be called with vnode locked and will return unlocked.
1428 * Must be called with the interlock held, and will return with it held.
1429 */
1430 static void
1431 vcache_reclaim(vnode_t *vp)
1432 {
1433 lwp_t *l = curlwp;
1434 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
1435 uint32_t hash;
1436 uint8_t temp_buf[64], *temp_key;
1437 size_t temp_key_len;
1438 bool recycle, active;
1439 int error;
1440
1441 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1442 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1443 KASSERT(mutex_owned(vp->v_interlock));
1444 KASSERT(vp->v_usecount != 0);
1445
1446 active = (vp->v_usecount > 1);
1447 temp_key_len = node->vi_key.vk_key_len;
1448 /*
1449 * Prevent the vnode from being recycled or brought into use
1450 * while we clean it out.
1451 */
1452 VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
1453 if (vp->v_iflag & VI_EXECMAP) {
1454 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1455 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1456 }
1457 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1458 mutex_exit(vp->v_interlock);
1459
1460 /* Replace the vnode key with a temporary copy. */
1461 if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
1462 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1463 } else {
1464 temp_key = temp_buf;
1465 }
1466 mutex_enter(&vcache_lock);
1467 memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
1468 node->vi_key.vk_key = temp_key;
1469 mutex_exit(&vcache_lock);
1470
1471 /*
1472 * Clean out any cached data associated with the vnode.
1473 * If purging an active vnode, it must be closed and
1474 * deactivated before being reclaimed.
1475 */
1476 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1477 if (error != 0) {
1478 if (wapbl_vphaswapbl(vp))
1479 WAPBL_DISCARD(wapbl_vptomp(vp));
1480 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1481 }
1482 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1483 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1484 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1485 spec_node_revoke(vp);
1486 }
1487
1488 /*
1489 * Disassociate the underlying file system from the vnode.
1490 * Note that the VOP_INACTIVE will unlock the vnode.
1491 */
1492 VOP_INACTIVE(vp, &recycle);
1493 if (VOP_RECLAIM(vp)) {
1494 vnpanic(vp, "%s: cannot reclaim", __func__);
1495 }
1496
1497 KASSERT(vp->v_data == NULL);
1498 KASSERT(vp->v_uobj.uo_npages == 0);
1499
1500 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1501 uvm_ra_freectx(vp->v_ractx);
1502 vp->v_ractx = NULL;
1503 }
1504
1505 /* Purge name cache. */
1506 cache_purge(vp);
1507
1508 /* Move to dead mount. */
1509 vp->v_vflag &= ~VV_ROOT;
1510 atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1511 vfs_insmntque(vp, dead_rootmount);
1512
1513 /* Remove from vnode cache. */
1514 hash = vcache_hash(&node->vi_key);
1515 mutex_enter(&vcache_lock);
1516 KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
1517 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1518 node, vnode_impl, vi_hash);
1519 mutex_exit(&vcache_lock);
1520 if (temp_key != temp_buf)
1521 kmem_free(temp_key, temp_key_len);
1522
1523 /* Done with purge, notify sleepers of the grim news. */
1524 mutex_enter(vp->v_interlock);
1525 vp->v_op = dead_vnodeop_p;
1526 vp->v_vflag |= VV_LOCKSWORK;
1527 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1528 vp->v_tag = VT_NON;
1529 KNOTE(&vp->v_klist, NOTE_REVOKE);
1530
1531 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1532 }
1533
1534 /*
1535 * Update outstanding I/O count and do wakeup if requested.
1536 */
1537 void
1538 vwakeup(struct buf *bp)
1539 {
1540 vnode_t *vp;
1541
1542 if ((vp = bp->b_vp) == NULL)
1543 return;
1544
1545 KASSERT(bp->b_objlock == vp->v_interlock);
1546 KASSERT(mutex_owned(bp->b_objlock));
1547
1548 if (--vp->v_numoutput < 0)
1549 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1550 if (vp->v_numoutput == 0)
1551 cv_broadcast(&vp->v_cv);
1552 }
1553
1554 /*
1555 * Test a vnode for being or becoming dead. Returns one of:
1556 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1557 * ENOENT: vnode is dead.
1558 * 0: otherwise.
1559 *
1560 * Whenever this function returns a non-zero value all future
1561 * calls will also return a non-zero value.
1562 */
1563 int
1564 vdead_check(struct vnode *vp, int flags)
1565 {
1566
1567 KASSERT(mutex_owned(vp->v_interlock));
1568
1569 if (! ISSET(flags, VDEAD_NOWAIT))
1570 VSTATE_WAIT_STABLE(vp);
1571
1572 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1573 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1574 return EBUSY;
1575 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1576 return ENOENT;
1577 }
1578
1579 return 0;
1580 }
1581
1582 int
1583 vfs_drainvnodes(void)
1584 {
1585 int i, gen;
1586
1587 mutex_enter(&vdrain_lock);
1588 for (i = 0; i < 2; i++) {
1589 gen = vdrain_gen;
1590 while (gen == vdrain_gen) {
1591 cv_broadcast(&vdrain_cv);
1592 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1593 }
1594 }
1595 mutex_exit(&vdrain_lock);
1596
1597 if (numvnodes >= desiredvnodes)
1598 return EBUSY;
1599
1600 if (vcache_hashsize != desiredvnodes)
1601 vcache_reinit();
1602
1603 return 0;
1604 }
1605
1606 void
1607 vnpanic(vnode_t *vp, const char *fmt, ...)
1608 {
1609 va_list ap;
1610
1611 #ifdef DIAGNOSTIC
1612 vprint(NULL, vp);
1613 #endif
1614 va_start(ap, fmt);
1615 vpanic(fmt, ap);
1616 va_end(ap);
1617 }
1618