vfs_vnode.c revision 1.64 1 /* $NetBSD: vfs_vnode.c,v 1.64 2016/12/20 10:02:21 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - ACTIVE Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> ACTIVE
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * ACTIVE -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * ACTIVE -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> ACTIVE
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache.lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * Note on v_usecount and its locking
147 *
148 * At nearly all points it is known that v_usecount could be zero,
149 * the vnode_t::v_interlock will be held. To change v_usecount away
150 * from zero, the interlock must be held. To change from a non-zero
151 * value to zero, again the interlock must be held.
152 *
153 * Changing the usecount from a non-zero value to a non-zero value can
154 * safely be done using atomic operations, without the interlock held.
155 *
156 */
157
158 #include <sys/cdefs.h>
159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.64 2016/12/20 10:02:21 hannken Exp $");
160
161 #include <sys/param.h>
162 #include <sys/kernel.h>
163
164 #include <sys/atomic.h>
165 #include <sys/buf.h>
166 #include <sys/conf.h>
167 #include <sys/device.h>
168 #include <sys/hash.h>
169 #include <sys/kauth.h>
170 #include <sys/kmem.h>
171 #include <sys/kthread.h>
172 #include <sys/module.h>
173 #include <sys/mount.h>
174 #include <sys/namei.h>
175 #include <sys/syscallargs.h>
176 #include <sys/sysctl.h>
177 #include <sys/systm.h>
178 #include <sys/vnode_impl.h>
179 #include <sys/wapbl.h>
180 #include <sys/fstrans.h>
181
182 #include <uvm/uvm.h>
183 #include <uvm/uvm_readahead.h>
184
185 /* Flags to vrelel. */
186 #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
187
188 u_int numvnodes __cacheline_aligned;
189
190 /*
191 * There are three lru lists: one holds vnodes waiting for async release,
192 * one is for vnodes which have no buffer/page references and
193 * one for those which do (i.e. v_holdcnt is non-zero).
194 */
195 static vnodelst_t lru_vrele_list __cacheline_aligned;
196 static vnodelst_t lru_free_list __cacheline_aligned;
197 static vnodelst_t lru_hold_list __cacheline_aligned;
198 static kmutex_t vdrain_lock __cacheline_aligned;
199 static kcondvar_t vdrain_cv __cacheline_aligned;
200 static int vdrain_gen;
201 static kcondvar_t vdrain_gen_cv;
202 static bool vdrain_retry;
203 static lwp_t * vdrain_lwp;
204 SLIST_HEAD(hashhead, vnode_impl);
205 static struct {
206 kmutex_t lock;
207 kcondvar_t cv;
208 u_int hashsize;
209 u_long hashmask;
210 struct hashhead *hashtab;
211 pool_cache_t pool;
212 } vcache __cacheline_aligned;
213
214 static void lru_requeue(vnode_t *, vnodelst_t *);
215 static vnodelst_t * lru_which(vnode_t *);
216 static vnode_impl_t * vcache_alloc(void);
217 static void vcache_free(vnode_impl_t *);
218 static void vcache_init(void);
219 static void vcache_reinit(void);
220 static void vcache_reclaim(vnode_t *);
221 static void vrelel(vnode_t *, int);
222 static void vdrain_thread(void *);
223 static void vnpanic(vnode_t *, const char *, ...)
224 __printflike(2, 3);
225
226 /* Routines having to do with the management of the vnode table. */
227 extern struct mount *dead_rootmount;
228 extern int (**dead_vnodeop_p)(void *);
229 extern struct vfsops dead_vfsops;
230
231 /* Vnode state operations and diagnostics. */
232
233 #if defined(DIAGNOSTIC)
234
235 #define VSTATE_GET(vp) \
236 vstate_assert_get((vp), __func__, __LINE__)
237 #define VSTATE_CHANGE(vp, from, to) \
238 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
239 #define VSTATE_WAIT_STABLE(vp) \
240 vstate_assert_wait_stable((vp), __func__, __LINE__)
241 #define VSTATE_ASSERT(vp, state) \
242 vstate_assert((vp), (state), __func__, __LINE__)
243
244 static void
245 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
246 {
247 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
248
249 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
250
251 if (__predict_true(node->vi_state == state))
252 return;
253 vnpanic(vp, "state is %s, expected %s at %s:%d",
254 vstate_name(node->vi_state), vstate_name(state), func, line);
255 }
256
257 static enum vnode_state
258 vstate_assert_get(vnode_t *vp, const char *func, int line)
259 {
260 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
261
262 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
263 if (node->vi_state == VS_MARKER)
264 vnpanic(vp, "state is %s at %s:%d",
265 vstate_name(node->vi_state), func, line);
266
267 return node->vi_state;
268 }
269
270 static void
271 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
272 {
273 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
274
275 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
276 if (node->vi_state == VS_MARKER)
277 vnpanic(vp, "state is %s at %s:%d",
278 vstate_name(node->vi_state), func, line);
279
280 while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
281 cv_wait(&vp->v_cv, vp->v_interlock);
282
283 if (node->vi_state == VS_MARKER)
284 vnpanic(vp, "state is %s at %s:%d",
285 vstate_name(node->vi_state), func, line);
286 }
287
288 static void
289 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
290 const char *func, int line)
291 {
292 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
293
294 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
295 if (from == VS_LOADING)
296 KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
297
298 if (from == VS_MARKER)
299 vnpanic(vp, "from is %s at %s:%d",
300 vstate_name(from), func, line);
301 if (to == VS_MARKER)
302 vnpanic(vp, "to is %s at %s:%d",
303 vstate_name(to), func, line);
304 if (node->vi_state != from)
305 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
306 vstate_name(node->vi_state), vstate_name(from), func, line);
307
308 node->vi_state = to;
309 if (from == VS_LOADING)
310 cv_broadcast(&vcache.cv);
311 if (to == VS_ACTIVE || to == VS_RECLAIMED)
312 cv_broadcast(&vp->v_cv);
313 }
314
315 #else /* defined(DIAGNOSTIC) */
316
317 #define VSTATE_GET(vp) \
318 (VNODE_TO_VIMPL((vp))->vi_state)
319 #define VSTATE_CHANGE(vp, from, to) \
320 vstate_change((vp), (from), (to))
321 #define VSTATE_WAIT_STABLE(vp) \
322 vstate_wait_stable((vp))
323 #define VSTATE_ASSERT(vp, state)
324
325 static void
326 vstate_wait_stable(vnode_t *vp)
327 {
328 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
329
330 while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
331 cv_wait(&vp->v_cv, vp->v_interlock);
332 }
333
334 static void
335 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
336 {
337 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
338
339 node->vi_state = to;
340 if (from == VS_LOADING)
341 cv_broadcast(&vcache.cv);
342 if (to == VS_ACTIVE || to == VS_RECLAIMED)
343 cv_broadcast(&vp->v_cv);
344 }
345
346 #endif /* defined(DIAGNOSTIC) */
347
348 void
349 vfs_vnode_sysinit(void)
350 {
351 int error __diagused;
352
353 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
354 KASSERT(dead_rootmount != NULL);
355 dead_rootmount->mnt_iflag = IMNT_MPSAFE;
356
357 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
358 TAILQ_INIT(&lru_free_list);
359 TAILQ_INIT(&lru_hold_list);
360 TAILQ_INIT(&lru_vrele_list);
361
362 vcache_init();
363
364 cv_init(&vdrain_cv, "vdrain");
365 cv_init(&vdrain_gen_cv, "vdrainwt");
366 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
367 NULL, &vdrain_lwp, "vdrain");
368 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
369 }
370
371 /*
372 * Allocate a new marker vnode.
373 */
374 vnode_t *
375 vnalloc_marker(struct mount *mp)
376 {
377 vnode_impl_t *node;
378 vnode_t *vp;
379
380 node = pool_cache_get(vcache.pool, PR_WAITOK);
381 memset(node, 0, sizeof(*node));
382 vp = VIMPL_TO_VNODE(node);
383 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
384 vp->v_mount = mp;
385 vp->v_type = VBAD;
386 node->vi_state = VS_MARKER;
387
388 return vp;
389 }
390
391 /*
392 * Free a marker vnode.
393 */
394 void
395 vnfree_marker(vnode_t *vp)
396 {
397 vnode_impl_t *node;
398
399 node = VNODE_TO_VIMPL(vp);
400 KASSERT(node->vi_state == VS_MARKER);
401 uvm_obj_destroy(&vp->v_uobj, true);
402 pool_cache_put(vcache.pool, node);
403 }
404
405 /*
406 * Test a vnode for being a marker vnode.
407 */
408 bool
409 vnis_marker(vnode_t *vp)
410 {
411
412 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
413 }
414
415 /*
416 * Return the lru list this node should be on.
417 */
418 static vnodelst_t *
419 lru_which(vnode_t *vp)
420 {
421
422 KASSERT(mutex_owned(vp->v_interlock));
423
424 if (vp->v_holdcnt > 0)
425 return &lru_hold_list;
426 else
427 return &lru_free_list;
428 }
429
430 /*
431 * Put vnode to end of given list.
432 * Both the current and the new list may be NULL, used on vnode alloc/free.
433 * Adjust numvnodes and signal vdrain thread if there is work.
434 */
435 static void
436 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
437 {
438 vnode_impl_t *node;
439
440 mutex_enter(&vdrain_lock);
441 node = VNODE_TO_VIMPL(vp);
442 if (node->vi_lrulisthd != NULL)
443 TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
444 else
445 numvnodes++;
446 node->vi_lrulisthd = listhd;
447 if (node->vi_lrulisthd != NULL)
448 TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
449 else
450 numvnodes--;
451 if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
452 cv_broadcast(&vdrain_cv);
453 mutex_exit(&vdrain_lock);
454 }
455
456 /*
457 * Reclaim a cached vnode. Used from vdrain_thread only.
458 */
459 static __inline void
460 vdrain_remove(vnode_t *vp)
461 {
462 struct mount *mp;
463
464 KASSERT(mutex_owned(&vdrain_lock));
465
466 /* Probe usecount (unlocked). */
467 if (vp->v_usecount > 0)
468 return;
469 /* Try v_interlock -- we lock the wrong direction! */
470 if (!mutex_tryenter(vp->v_interlock))
471 return;
472 /* Probe usecount and state. */
473 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
474 mutex_exit(vp->v_interlock);
475 return;
476 }
477 mp = vp->v_mount;
478 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
479 mutex_exit(vp->v_interlock);
480 return;
481 }
482 vdrain_retry = true;
483 mutex_exit(&vdrain_lock);
484
485 if (vget(vp, 0, true /* wait */) == 0) {
486 if (!vrecycle(vp))
487 vrele(vp);
488 }
489 fstrans_done(mp);
490
491 mutex_enter(&vdrain_lock);
492 }
493
494 /*
495 * Release a cached vnode. Used from vdrain_thread only.
496 */
497 static __inline void
498 vdrain_vrele(vnode_t *vp)
499 {
500 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
501 struct mount *mp;
502
503 KASSERT(mutex_owned(&vdrain_lock));
504
505 mp = vp->v_mount;
506 if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0)
507 return;
508
509 /*
510 * First remove the vnode from the vrele list.
511 * Put it on the last lru list, the last vrele()
512 * will put it back onto the right list before
513 * its v_usecount reaches zero.
514 */
515 KASSERT(node->vi_lrulisthd == &lru_vrele_list);
516 TAILQ_REMOVE(node->vi_lrulisthd, node, vi_lrulist);
517 node->vi_lrulisthd = &lru_hold_list;
518 TAILQ_INSERT_TAIL(node->vi_lrulisthd, node, vi_lrulist);
519
520 vdrain_retry = true;
521 mutex_exit(&vdrain_lock);
522
523 mutex_enter(vp->v_interlock);
524 vrelel(vp, 0);
525 fstrans_done(mp);
526
527 mutex_enter(&vdrain_lock);
528 }
529
530 /*
531 * Helper thread to keep the number of vnodes below desiredvnodes
532 * and release vnodes from asynchronous vrele.
533 */
534 static void
535 vdrain_thread(void *cookie)
536 {
537 vnodelst_t *listhd[] = {
538 &lru_vrele_list, &lru_free_list, &lru_hold_list
539 };
540 int i;
541 u_int target;
542 vnode_impl_t *node, *marker;
543
544 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
545
546 mutex_enter(&vdrain_lock);
547
548 for (;;) {
549 vdrain_retry = false;
550 target = desiredvnodes - desiredvnodes/10;
551
552 for (i = 0; i < __arraycount(listhd); i++) {
553 TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
554 while ((node = TAILQ_NEXT(marker, vi_lrulist))) {
555 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
556 TAILQ_INSERT_AFTER(listhd[i], node, marker,
557 vi_lrulist);
558 if (listhd[i] == &lru_vrele_list)
559 vdrain_vrele(VIMPL_TO_VNODE(node));
560 else if (numvnodes < target)
561 break;
562 else
563 vdrain_remove(VIMPL_TO_VNODE(node));
564 }
565 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
566 }
567
568 if (vdrain_retry) {
569 mutex_exit(&vdrain_lock);
570 yield();
571 mutex_enter(&vdrain_lock);
572 } else {
573 vdrain_gen++;
574 cv_broadcast(&vdrain_gen_cv);
575 cv_wait(&vdrain_cv, &vdrain_lock);
576 }
577 }
578 }
579
580 /*
581 * vget: get a particular vnode from the free list, increment its reference
582 * count and return it.
583 *
584 * => Must be called with v_interlock held.
585 *
586 * If state is VS_RECLAIMING, the vnode may be eliminated in vcache_reclaim().
587 * In that case, we cannot grab the vnode, so the process is awakened when
588 * the transition is completed, and an error returned to indicate that the
589 * vnode is no longer usable.
590 *
591 * If state is VS_LOADING or VS_BLOCKED, wait until the vnode enters a
592 * stable state (VS_ACTIVE or VS_RECLAIMED).
593 */
594 int
595 vget(vnode_t *vp, int flags, bool waitok)
596 {
597
598 KASSERT(mutex_owned(vp->v_interlock));
599 KASSERT((flags & ~LK_NOWAIT) == 0);
600 KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
601
602 /*
603 * Before adding a reference, we must remove the vnode
604 * from its freelist.
605 */
606 if (vp->v_usecount == 0) {
607 vp->v_usecount = 1;
608 } else {
609 atomic_inc_uint(&vp->v_usecount);
610 }
611
612 /*
613 * If the vnode is in the process of changing state we wait
614 * for the change to complete and take care not to return
615 * a clean vnode.
616 */
617 if (! ISSET(flags, LK_NOWAIT))
618 VSTATE_WAIT_STABLE(vp);
619 if (VSTATE_GET(vp) == VS_RECLAIMED) {
620 vrelel(vp, 0);
621 return ENOENT;
622 } else if (VSTATE_GET(vp) != VS_ACTIVE) {
623 KASSERT(ISSET(flags, LK_NOWAIT));
624 vrelel(vp, 0);
625 return EBUSY;
626 }
627
628 /*
629 * Ok, we got it in good shape.
630 */
631 VSTATE_ASSERT(vp, VS_ACTIVE);
632 mutex_exit(vp->v_interlock);
633
634 return 0;
635 }
636
637 /*
638 * vput: unlock and release the reference.
639 */
640 void
641 vput(vnode_t *vp)
642 {
643
644 VOP_UNLOCK(vp);
645 vrele(vp);
646 }
647
648 /*
649 * Try to drop reference on a vnode. Abort if we are releasing the
650 * last reference. Note: this _must_ succeed if not the last reference.
651 */
652 static inline bool
653 vtryrele(vnode_t *vp)
654 {
655 u_int use, next;
656
657 for (use = vp->v_usecount;; use = next) {
658 if (use == 1) {
659 return false;
660 }
661 KASSERT(use > 1);
662 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
663 if (__predict_true(next == use)) {
664 return true;
665 }
666 }
667 }
668
669 /*
670 * Vnode release. If reference count drops to zero, call inactive
671 * routine and either return to freelist or free to the pool.
672 */
673 static void
674 vrelel(vnode_t *vp, int flags)
675 {
676 bool recycle, defer;
677 int error;
678
679 KASSERT(mutex_owned(vp->v_interlock));
680
681 if (__predict_false(vp->v_op == dead_vnodeop_p &&
682 VSTATE_GET(vp) != VS_RECLAIMED)) {
683 vnpanic(vp, "dead but not clean");
684 }
685
686 /*
687 * If not the last reference, just drop the reference count
688 * and unlock.
689 */
690 if (vtryrele(vp)) {
691 mutex_exit(vp->v_interlock);
692 return;
693 }
694 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
695 vnpanic(vp, "%s: bad ref count", __func__);
696 }
697
698 #ifdef DIAGNOSTIC
699 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
700 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
701 vprint("vrelel: missing VOP_CLOSE()", vp);
702 }
703 #endif
704
705 /*
706 * If not clean, deactivate the vnode, but preserve
707 * our reference across the call to VOP_INACTIVE().
708 */
709 if (VSTATE_GET(vp) != VS_RECLAIMED) {
710 recycle = false;
711
712 /*
713 * XXX This ugly block can be largely eliminated if
714 * locking is pushed down into the file systems.
715 *
716 * Defer vnode release to vdrain_thread if caller
717 * requests it explicitly or is the pagedaemon.
718 */
719 if ((curlwp == uvm.pagedaemon_lwp) ||
720 (flags & VRELEL_ASYNC_RELE) != 0) {
721 defer = true;
722 } else if (curlwp == vdrain_lwp) {
723 /*
724 * We have to try harder.
725 */
726 mutex_exit(vp->v_interlock);
727 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
728 KASSERTMSG((error == 0), "vn_lock failed: %d", error);
729 mutex_enter(vp->v_interlock);
730 defer = false;
731 } else {
732 /* If we can't acquire the lock, then defer. */
733 mutex_exit(vp->v_interlock);
734 error = vn_lock(vp,
735 LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
736 defer = (error != 0);
737 mutex_enter(vp->v_interlock);
738 }
739
740 KASSERT(mutex_owned(vp->v_interlock));
741 KASSERT(! (curlwp == vdrain_lwp && defer));
742
743 if (defer) {
744 /*
745 * Defer reclaim to the kthread; it's not safe to
746 * clean it here. We donate it our last reference.
747 */
748 lru_requeue(vp, &lru_vrele_list);
749 mutex_exit(vp->v_interlock);
750 return;
751 }
752
753 /*
754 * If the node got another reference while we
755 * released the interlock, don't try to inactivate it yet.
756 */
757 if (__predict_false(vtryrele(vp))) {
758 VOP_UNLOCK(vp);
759 mutex_exit(vp->v_interlock);
760 return;
761 }
762 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
763 mutex_exit(vp->v_interlock);
764
765 /*
766 * The vnode must not gain another reference while being
767 * deactivated. If VOP_INACTIVE() indicates that
768 * the described file has been deleted, then recycle
769 * the vnode.
770 *
771 * Note that VOP_INACTIVE() will drop the vnode lock.
772 */
773 VOP_INACTIVE(vp, &recycle);
774 if (recycle) {
775 /* vcache_reclaim() below will drop the lock. */
776 if (vn_lock(vp, LK_EXCLUSIVE) != 0)
777 recycle = false;
778 }
779 mutex_enter(vp->v_interlock);
780 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
781 if (!recycle) {
782 if (vtryrele(vp)) {
783 mutex_exit(vp->v_interlock);
784 return;
785 }
786 }
787
788 /* Take care of space accounting. */
789 if (vp->v_iflag & VI_EXECMAP) {
790 atomic_add_int(&uvmexp.execpages,
791 -vp->v_uobj.uo_npages);
792 atomic_add_int(&uvmexp.filepages,
793 vp->v_uobj.uo_npages);
794 }
795 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
796 vp->v_vflag &= ~VV_MAPPED;
797
798 /*
799 * Recycle the vnode if the file is now unused (unlinked),
800 * otherwise just free it.
801 */
802 if (recycle) {
803 VSTATE_ASSERT(vp, VS_ACTIVE);
804 vcache_reclaim(vp);
805 }
806 KASSERT(vp->v_usecount > 0);
807 }
808
809 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
810 /* Gained another reference while being reclaimed. */
811 mutex_exit(vp->v_interlock);
812 return;
813 }
814
815 if (VSTATE_GET(vp) == VS_RECLAIMED) {
816 /*
817 * It's clean so destroy it. It isn't referenced
818 * anywhere since it has been reclaimed.
819 */
820 KASSERT(vp->v_holdcnt == 0);
821 KASSERT(vp->v_writecount == 0);
822 mutex_exit(vp->v_interlock);
823 vfs_insmntque(vp, NULL);
824 if (vp->v_type == VBLK || vp->v_type == VCHR) {
825 spec_node_destroy(vp);
826 }
827 vcache_free(VNODE_TO_VIMPL(vp));
828 } else {
829 /*
830 * Otherwise, put it back onto the freelist. It
831 * can't be destroyed while still associated with
832 * a file system.
833 */
834 lru_requeue(vp, lru_which(vp));
835 mutex_exit(vp->v_interlock);
836 }
837 }
838
839 void
840 vrele(vnode_t *vp)
841 {
842
843 if (vtryrele(vp)) {
844 return;
845 }
846 mutex_enter(vp->v_interlock);
847 vrelel(vp, 0);
848 }
849
850 /*
851 * Asynchronous vnode release, vnode is released in different context.
852 */
853 void
854 vrele_async(vnode_t *vp)
855 {
856
857 if (vtryrele(vp)) {
858 return;
859 }
860 mutex_enter(vp->v_interlock);
861 vrelel(vp, VRELEL_ASYNC_RELE);
862 }
863
864 /*
865 * Vnode reference, where a reference is already held by some other
866 * object (for example, a file structure).
867 */
868 void
869 vref(vnode_t *vp)
870 {
871
872 KASSERT(vp->v_usecount != 0);
873
874 atomic_inc_uint(&vp->v_usecount);
875 }
876
877 /*
878 * Page or buffer structure gets a reference.
879 * Called with v_interlock held.
880 */
881 void
882 vholdl(vnode_t *vp)
883 {
884
885 KASSERT(mutex_owned(vp->v_interlock));
886
887 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
888 lru_requeue(vp, lru_which(vp));
889 }
890
891 /*
892 * Page or buffer structure frees a reference.
893 * Called with v_interlock held.
894 */
895 void
896 holdrelel(vnode_t *vp)
897 {
898
899 KASSERT(mutex_owned(vp->v_interlock));
900
901 if (vp->v_holdcnt <= 0) {
902 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
903 }
904
905 vp->v_holdcnt--;
906 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
907 lru_requeue(vp, lru_which(vp));
908 }
909
910 /*
911 * Recycle an unused vnode if caller holds the last reference.
912 */
913 bool
914 vrecycle(vnode_t *vp)
915 {
916 int error __diagused;
917
918 mutex_enter(vp->v_interlock);
919
920 /* Make sure we hold the last reference. */
921 VSTATE_WAIT_STABLE(vp);
922 if (vp->v_usecount != 1) {
923 mutex_exit(vp->v_interlock);
924 return false;
925 }
926
927 /* If the vnode is already clean we're done. */
928 if (VSTATE_GET(vp) != VS_ACTIVE) {
929 VSTATE_ASSERT(vp, VS_RECLAIMED);
930 vrelel(vp, 0);
931 return true;
932 }
933
934 /* Prevent further references until the vnode is locked. */
935 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
936 mutex_exit(vp->v_interlock);
937
938 error = vn_lock(vp, LK_EXCLUSIVE);
939 KASSERT(error == 0);
940
941 mutex_enter(vp->v_interlock);
942 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
943
944 vcache_reclaim(vp);
945 vrelel(vp, 0);
946
947 return true;
948 }
949
950 /*
951 * Eliminate all activity associated with the requested vnode
952 * and with all vnodes aliased to the requested vnode.
953 */
954 void
955 vrevoke(vnode_t *vp)
956 {
957 vnode_t *vq;
958 enum vtype type;
959 dev_t dev;
960
961 KASSERT(vp->v_usecount > 0);
962
963 mutex_enter(vp->v_interlock);
964 VSTATE_WAIT_STABLE(vp);
965 if (VSTATE_GET(vp) == VS_RECLAIMED) {
966 mutex_exit(vp->v_interlock);
967 return;
968 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
969 atomic_inc_uint(&vp->v_usecount);
970 mutex_exit(vp->v_interlock);
971 vgone(vp);
972 return;
973 } else {
974 dev = vp->v_rdev;
975 type = vp->v_type;
976 mutex_exit(vp->v_interlock);
977 }
978
979 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
980 vgone(vq);
981 }
982 }
983
984 /*
985 * Eliminate all activity associated with a vnode in preparation for
986 * reuse. Drops a reference from the vnode.
987 */
988 void
989 vgone(vnode_t *vp)
990 {
991
992 if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
993 VSTATE_ASSERT(vp, VS_RECLAIMED);
994 vrele(vp);
995 }
996
997 mutex_enter(vp->v_interlock);
998 vcache_reclaim(vp);
999 vrelel(vp, 0);
1000 }
1001
1002 static inline uint32_t
1003 vcache_hash(const struct vcache_key *key)
1004 {
1005 uint32_t hash = HASH32_BUF_INIT;
1006
1007 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1008 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1009 return hash;
1010 }
1011
1012 static void
1013 vcache_init(void)
1014 {
1015
1016 vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1017 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1018 KASSERT(vcache.pool != NULL);
1019 mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1020 cv_init(&vcache.cv, "vcache");
1021 vcache.hashsize = desiredvnodes;
1022 vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1023 &vcache.hashmask);
1024 }
1025
1026 static void
1027 vcache_reinit(void)
1028 {
1029 int i;
1030 uint32_t hash;
1031 u_long oldmask, newmask;
1032 struct hashhead *oldtab, *newtab;
1033 vnode_impl_t *node;
1034
1035 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1036 mutex_enter(&vcache.lock);
1037 oldtab = vcache.hashtab;
1038 oldmask = vcache.hashmask;
1039 vcache.hashsize = desiredvnodes;
1040 vcache.hashtab = newtab;
1041 vcache.hashmask = newmask;
1042 for (i = 0; i <= oldmask; i++) {
1043 while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1044 SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
1045 hash = vcache_hash(&node->vi_key);
1046 SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1047 node, vi_hash);
1048 }
1049 }
1050 mutex_exit(&vcache.lock);
1051 hashdone(oldtab, HASH_SLIST, oldmask);
1052 }
1053
1054 static inline vnode_impl_t *
1055 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1056 {
1057 struct hashhead *hashp;
1058 vnode_impl_t *node;
1059
1060 KASSERT(mutex_owned(&vcache.lock));
1061
1062 hashp = &vcache.hashtab[hash & vcache.hashmask];
1063 SLIST_FOREACH(node, hashp, vi_hash) {
1064 if (key->vk_mount != node->vi_key.vk_mount)
1065 continue;
1066 if (key->vk_key_len != node->vi_key.vk_key_len)
1067 continue;
1068 if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
1069 continue;
1070 return node;
1071 }
1072 return NULL;
1073 }
1074
1075 /*
1076 * Allocate a new, uninitialized vcache node.
1077 */
1078 static vnode_impl_t *
1079 vcache_alloc(void)
1080 {
1081 vnode_impl_t *node;
1082 vnode_t *vp;
1083
1084 node = pool_cache_get(vcache.pool, PR_WAITOK);
1085 memset(node, 0, sizeof(*node));
1086
1087 /* SLIST_INIT(&node->vi_hash); */
1088
1089 vp = VIMPL_TO_VNODE(node);
1090 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1091 cv_init(&vp->v_cv, "vnode");
1092 /* LIST_INIT(&vp->v_nclist); */
1093 /* LIST_INIT(&vp->v_dnclist); */
1094
1095 rw_init(&vp->v_lock);
1096 vp->v_usecount = 1;
1097 vp->v_type = VNON;
1098 vp->v_size = vp->v_writesize = VSIZENOTSET;
1099
1100 node->vi_state = VS_LOADING;
1101
1102 lru_requeue(vp, &lru_free_list);
1103
1104 return node;
1105 }
1106
1107 /*
1108 * Free an unused, unreferenced vcache node.
1109 */
1110 static void
1111 vcache_free(vnode_impl_t *node)
1112 {
1113 vnode_t *vp;
1114
1115 vp = VIMPL_TO_VNODE(node);
1116
1117 KASSERT(vp->v_usecount == 0);
1118
1119 lru_requeue(vp, NULL);
1120 rw_destroy(&vp->v_lock);
1121 uvm_obj_destroy(&vp->v_uobj, true);
1122 cv_destroy(&vp->v_cv);
1123 pool_cache_put(vcache.pool, node);
1124 }
1125
1126 /*
1127 * Get a vnode / fs node pair by key and return it referenced through vpp.
1128 */
1129 int
1130 vcache_get(struct mount *mp, const void *key, size_t key_len,
1131 struct vnode **vpp)
1132 {
1133 int error;
1134 uint32_t hash;
1135 const void *new_key;
1136 struct vnode *vp;
1137 struct vcache_key vcache_key;
1138 vnode_impl_t *node, *new_node;
1139
1140 new_key = NULL;
1141 *vpp = NULL;
1142
1143 vcache_key.vk_mount = mp;
1144 vcache_key.vk_key = key;
1145 vcache_key.vk_key_len = key_len;
1146 hash = vcache_hash(&vcache_key);
1147
1148 again:
1149 mutex_enter(&vcache.lock);
1150 node = vcache_hash_lookup(&vcache_key, hash);
1151
1152 /* If found, take a reference or retry. */
1153 if (__predict_true(node != NULL)) {
1154 /*
1155 * If the vnode is loading we cannot take the v_interlock
1156 * here as it might change during load (see uvm_obj_setlock()).
1157 * As changing state from VS_LOADING requires both vcache.lock
1158 * and v_interlock it is safe to test with vcache.lock held.
1159 *
1160 * Wait for vnodes changing state from VS_LOADING and retry.
1161 */
1162 if (__predict_false(node->vi_state == VS_LOADING)) {
1163 cv_wait(&vcache.cv, &vcache.lock);
1164 mutex_exit(&vcache.lock);
1165 goto again;
1166 }
1167 vp = VIMPL_TO_VNODE(node);
1168 mutex_enter(vp->v_interlock);
1169 mutex_exit(&vcache.lock);
1170 error = vget(vp, 0, true /* wait */);
1171 if (error == ENOENT)
1172 goto again;
1173 if (error == 0)
1174 *vpp = vp;
1175 KASSERT((error != 0) == (*vpp == NULL));
1176 return error;
1177 }
1178 mutex_exit(&vcache.lock);
1179
1180 /* Allocate and initialize a new vcache / vnode pair. */
1181 error = vfs_busy(mp, NULL);
1182 if (error)
1183 return error;
1184 new_node = vcache_alloc();
1185 new_node->vi_key = vcache_key;
1186 vp = VIMPL_TO_VNODE(new_node);
1187 mutex_enter(&vcache.lock);
1188 node = vcache_hash_lookup(&vcache_key, hash);
1189 if (node == NULL) {
1190 SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1191 new_node, vi_hash);
1192 node = new_node;
1193 }
1194
1195 /* If another thread beat us inserting this node, retry. */
1196 if (node != new_node) {
1197 mutex_enter(vp->v_interlock);
1198 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1199 mutex_exit(&vcache.lock);
1200 vrelel(vp, 0);
1201 vfs_unbusy(mp, false, NULL);
1202 goto again;
1203 }
1204 mutex_exit(&vcache.lock);
1205
1206 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1207 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1208 if (error) {
1209 mutex_enter(&vcache.lock);
1210 SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1211 new_node, vnode_impl, vi_hash);
1212 mutex_enter(vp->v_interlock);
1213 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1214 mutex_exit(&vcache.lock);
1215 vrelel(vp, 0);
1216 vfs_unbusy(mp, false, NULL);
1217 KASSERT(*vpp == NULL);
1218 return error;
1219 }
1220 KASSERT(new_key != NULL);
1221 KASSERT(memcmp(key, new_key, key_len) == 0);
1222 KASSERT(vp->v_op != NULL);
1223 vfs_insmntque(vp, mp);
1224 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1225 vp->v_vflag |= VV_MPSAFE;
1226 vfs_unbusy(mp, true, NULL);
1227
1228 /* Finished loading, finalize node. */
1229 mutex_enter(&vcache.lock);
1230 new_node->vi_key.vk_key = new_key;
1231 mutex_enter(vp->v_interlock);
1232 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1233 mutex_exit(vp->v_interlock);
1234 mutex_exit(&vcache.lock);
1235 *vpp = vp;
1236 return 0;
1237 }
1238
1239 /*
1240 * Create a new vnode / fs node pair and return it referenced through vpp.
1241 */
1242 int
1243 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1244 kauth_cred_t cred, struct vnode **vpp)
1245 {
1246 int error;
1247 uint32_t hash;
1248 struct vnode *ovp, *vp;
1249 vnode_impl_t *new_node;
1250 vnode_impl_t *old_node __diagused;
1251
1252 *vpp = NULL;
1253
1254 /* Allocate and initialize a new vcache / vnode pair. */
1255 error = vfs_busy(mp, NULL);
1256 if (error)
1257 return error;
1258 new_node = vcache_alloc();
1259 new_node->vi_key.vk_mount = mp;
1260 vp = VIMPL_TO_VNODE(new_node);
1261
1262 /* Create and load the fs node. */
1263 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1264 &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
1265 if (error) {
1266 mutex_enter(&vcache.lock);
1267 mutex_enter(vp->v_interlock);
1268 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1269 mutex_exit(&vcache.lock);
1270 vrelel(vp, 0);
1271 vfs_unbusy(mp, false, NULL);
1272 KASSERT(*vpp == NULL);
1273 return error;
1274 }
1275 KASSERT(new_node->vi_key.vk_key != NULL);
1276 KASSERT(vp->v_op != NULL);
1277 hash = vcache_hash(&new_node->vi_key);
1278
1279 /* Wait for previous instance to be reclaimed, then insert new node. */
1280 mutex_enter(&vcache.lock);
1281 while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
1282 ovp = VIMPL_TO_VNODE(old_node);
1283 mutex_enter(ovp->v_interlock);
1284 mutex_exit(&vcache.lock);
1285 error = vget(ovp, 0, true /* wait */);
1286 KASSERT(error == ENOENT);
1287 mutex_enter(&vcache.lock);
1288 }
1289 SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1290 new_node, vi_hash);
1291 mutex_exit(&vcache.lock);
1292 vfs_insmntque(vp, mp);
1293 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1294 vp->v_vflag |= VV_MPSAFE;
1295 vfs_unbusy(mp, true, NULL);
1296
1297 /* Finished loading, finalize node. */
1298 mutex_enter(&vcache.lock);
1299 mutex_enter(vp->v_interlock);
1300 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1301 mutex_exit(&vcache.lock);
1302 mutex_exit(vp->v_interlock);
1303 *vpp = vp;
1304 return 0;
1305 }
1306
1307 /*
1308 * Prepare key change: lock old and new cache node.
1309 * Return an error if the new node already exists.
1310 */
1311 int
1312 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1313 const void *old_key, size_t old_key_len,
1314 const void *new_key, size_t new_key_len)
1315 {
1316 uint32_t old_hash, new_hash;
1317 struct vcache_key old_vcache_key, new_vcache_key;
1318 vnode_impl_t *node, *new_node;
1319 struct vnode *tvp;
1320
1321 old_vcache_key.vk_mount = mp;
1322 old_vcache_key.vk_key = old_key;
1323 old_vcache_key.vk_key_len = old_key_len;
1324 old_hash = vcache_hash(&old_vcache_key);
1325
1326 new_vcache_key.vk_mount = mp;
1327 new_vcache_key.vk_key = new_key;
1328 new_vcache_key.vk_key_len = new_key_len;
1329 new_hash = vcache_hash(&new_vcache_key);
1330
1331 new_node = vcache_alloc();
1332 new_node->vi_key = new_vcache_key;
1333 tvp = VIMPL_TO_VNODE(new_node);
1334
1335 /* Insert locked new node used as placeholder. */
1336 mutex_enter(&vcache.lock);
1337 node = vcache_hash_lookup(&new_vcache_key, new_hash);
1338 if (node != NULL) {
1339 mutex_enter(tvp->v_interlock);
1340 VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1341 mutex_exit(&vcache.lock);
1342 vrelel(tvp, 0);
1343 return EEXIST;
1344 }
1345 SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1346 new_node, vi_hash);
1347
1348 /* Lock old node. */
1349 node = vcache_hash_lookup(&old_vcache_key, old_hash);
1350 KASSERT(node != NULL);
1351 KASSERT(VIMPL_TO_VNODE(node) == vp);
1352 mutex_enter(vp->v_interlock);
1353 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
1354 node->vi_key = old_vcache_key;
1355 mutex_exit(vp->v_interlock);
1356 mutex_exit(&vcache.lock);
1357 return 0;
1358 }
1359
1360 /*
1361 * Key change complete: remove old node and unlock new node.
1362 */
1363 void
1364 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1365 const void *old_key, size_t old_key_len,
1366 const void *new_key, size_t new_key_len)
1367 {
1368 uint32_t old_hash, new_hash;
1369 struct vcache_key old_vcache_key, new_vcache_key;
1370 vnode_impl_t *old_node, *new_node;
1371 struct vnode *tvp;
1372
1373 old_vcache_key.vk_mount = mp;
1374 old_vcache_key.vk_key = old_key;
1375 old_vcache_key.vk_key_len = old_key_len;
1376 old_hash = vcache_hash(&old_vcache_key);
1377
1378 new_vcache_key.vk_mount = mp;
1379 new_vcache_key.vk_key = new_key;
1380 new_vcache_key.vk_key_len = new_key_len;
1381 new_hash = vcache_hash(&new_vcache_key);
1382
1383 mutex_enter(&vcache.lock);
1384
1385 /* Lookup old and new node. */
1386 old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1387 KASSERT(old_node != NULL);
1388 KASSERT(VIMPL_TO_VNODE(old_node) == vp);
1389 mutex_enter(vp->v_interlock);
1390 VSTATE_ASSERT(vp, VS_BLOCKED);
1391
1392 new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1393 KASSERT(new_node != NULL);
1394 KASSERT(new_node->vi_key.vk_key_len == new_key_len);
1395 tvp = VIMPL_TO_VNODE(new_node);
1396 mutex_enter(tvp->v_interlock);
1397 VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
1398
1399 /* Rekey old node and put it onto its new hashlist. */
1400 old_node->vi_key = new_vcache_key;
1401 if (old_hash != new_hash) {
1402 SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1403 old_node, vnode_impl, vi_hash);
1404 SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1405 old_node, vi_hash);
1406 }
1407 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
1408 mutex_exit(vp->v_interlock);
1409
1410 /* Remove new node used as placeholder. */
1411 SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
1412 new_node, vnode_impl, vi_hash);
1413 VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1414 mutex_exit(&vcache.lock);
1415 vrelel(tvp, 0);
1416 }
1417
1418 /*
1419 * Disassociate the underlying file system from a vnode.
1420 *
1421 * Must be called with vnode locked and will return unlocked.
1422 * Must be called with the interlock held, and will return with it held.
1423 */
1424 static void
1425 vcache_reclaim(vnode_t *vp)
1426 {
1427 lwp_t *l = curlwp;
1428 vnode_impl_t *node = VNODE_TO_VIMPL(vp);
1429 uint32_t hash;
1430 uint8_t temp_buf[64], *temp_key;
1431 size_t temp_key_len;
1432 bool recycle, active;
1433 int error;
1434
1435 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1436 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1437 KASSERT(mutex_owned(vp->v_interlock));
1438 KASSERT(vp->v_usecount != 0);
1439
1440 active = (vp->v_usecount > 1);
1441 temp_key_len = node->vi_key.vk_key_len;
1442 /*
1443 * Prevent the vnode from being recycled or brought into use
1444 * while we clean it out.
1445 */
1446 VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
1447 if (vp->v_iflag & VI_EXECMAP) {
1448 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1449 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1450 }
1451 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1452 mutex_exit(vp->v_interlock);
1453
1454 /* Replace the vnode key with a temporary copy. */
1455 if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
1456 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1457 } else {
1458 temp_key = temp_buf;
1459 }
1460 mutex_enter(&vcache.lock);
1461 memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
1462 node->vi_key.vk_key = temp_key;
1463 mutex_exit(&vcache.lock);
1464
1465 /*
1466 * Clean out any cached data associated with the vnode.
1467 * If purging an active vnode, it must be closed and
1468 * deactivated before being reclaimed.
1469 */
1470 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1471 if (error != 0) {
1472 if (wapbl_vphaswapbl(vp))
1473 WAPBL_DISCARD(wapbl_vptomp(vp));
1474 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1475 }
1476 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1477 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1478 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1479 spec_node_revoke(vp);
1480 }
1481
1482 /*
1483 * Disassociate the underlying file system from the vnode.
1484 * Note that the VOP_INACTIVE will unlock the vnode.
1485 */
1486 VOP_INACTIVE(vp, &recycle);
1487 if (VOP_RECLAIM(vp)) {
1488 vnpanic(vp, "%s: cannot reclaim", __func__);
1489 }
1490
1491 KASSERT(vp->v_data == NULL);
1492 KASSERT(vp->v_uobj.uo_npages == 0);
1493
1494 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1495 uvm_ra_freectx(vp->v_ractx);
1496 vp->v_ractx = NULL;
1497 }
1498
1499 /* Purge name cache. */
1500 cache_purge(vp);
1501
1502 /* Move to dead mount. */
1503 vp->v_vflag &= ~VV_ROOT;
1504 atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1505 vfs_insmntque(vp, dead_rootmount);
1506
1507 /* Remove from vnode cache. */
1508 hash = vcache_hash(&node->vi_key);
1509 mutex_enter(&vcache.lock);
1510 KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
1511 SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1512 node, vnode_impl, vi_hash);
1513 mutex_exit(&vcache.lock);
1514 if (temp_key != temp_buf)
1515 kmem_free(temp_key, temp_key_len);
1516
1517 /* Done with purge, notify sleepers of the grim news. */
1518 mutex_enter(vp->v_interlock);
1519 vp->v_op = dead_vnodeop_p;
1520 vp->v_vflag |= VV_LOCKSWORK;
1521 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1522 vp->v_tag = VT_NON;
1523 KNOTE(&vp->v_klist, NOTE_REVOKE);
1524
1525 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1526 }
1527
1528 /*
1529 * Update outstanding I/O count and do wakeup if requested.
1530 */
1531 void
1532 vwakeup(struct buf *bp)
1533 {
1534 vnode_t *vp;
1535
1536 if ((vp = bp->b_vp) == NULL)
1537 return;
1538
1539 KASSERT(bp->b_objlock == vp->v_interlock);
1540 KASSERT(mutex_owned(bp->b_objlock));
1541
1542 if (--vp->v_numoutput < 0)
1543 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1544 if (vp->v_numoutput == 0)
1545 cv_broadcast(&vp->v_cv);
1546 }
1547
1548 /*
1549 * Test a vnode for being or becoming dead. Returns one of:
1550 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1551 * ENOENT: vnode is dead.
1552 * 0: otherwise.
1553 *
1554 * Whenever this function returns a non-zero value all future
1555 * calls will also return a non-zero value.
1556 */
1557 int
1558 vdead_check(struct vnode *vp, int flags)
1559 {
1560
1561 KASSERT(mutex_owned(vp->v_interlock));
1562
1563 if (! ISSET(flags, VDEAD_NOWAIT))
1564 VSTATE_WAIT_STABLE(vp);
1565
1566 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1567 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1568 return EBUSY;
1569 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1570 return ENOENT;
1571 }
1572
1573 return 0;
1574 }
1575
1576 int
1577 vfs_drainvnodes(void)
1578 {
1579 int i, gen;
1580
1581 mutex_enter(&vdrain_lock);
1582 for (i = 0; i < 2; i++) {
1583 gen = vdrain_gen;
1584 while (gen == vdrain_gen) {
1585 cv_broadcast(&vdrain_cv);
1586 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1587 }
1588 }
1589 mutex_exit(&vdrain_lock);
1590
1591 if (numvnodes >= desiredvnodes)
1592 return EBUSY;
1593
1594 if (vcache.hashsize != desiredvnodes)
1595 vcache_reinit();
1596
1597 return 0;
1598 }
1599
1600 void
1601 vnpanic(vnode_t *vp, const char *fmt, ...)
1602 {
1603 va_list ap;
1604
1605 #ifdef DIAGNOSTIC
1606 vprint(NULL, vp);
1607 #endif
1608 va_start(ap, fmt);
1609 vpanic(fmt, ap);
1610 va_end(ap);
1611 }
1612