vfs_vnode.c revision 1.80 1 /* $NetBSD: vfs_vnode.c,v 1.80 2017/03/30 09:15:51 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - ACTIVE Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> ACTIVE
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * ACTIVE -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * ACTIVE -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> ACTIVE
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * Note on v_usecount and its locking
147 *
148 * At nearly all points it is known that v_usecount could be zero,
149 * the vnode_t::v_interlock will be held. To change v_usecount away
150 * from zero, the interlock must be held. To change from a non-zero
151 * value to zero, again the interlock must be held.
152 *
153 * Changing the usecount from a non-zero value to a non-zero value can
154 * safely be done using atomic operations, without the interlock held.
155 *
156 */
157
158 #include <sys/cdefs.h>
159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.80 2017/03/30 09:15:51 hannken Exp $");
160
161 #include <sys/param.h>
162 #include <sys/kernel.h>
163
164 #include <sys/atomic.h>
165 #include <sys/buf.h>
166 #include <sys/conf.h>
167 #include <sys/device.h>
168 #include <sys/hash.h>
169 #include <sys/kauth.h>
170 #include <sys/kmem.h>
171 #include <sys/kthread.h>
172 #include <sys/module.h>
173 #include <sys/mount.h>
174 #include <sys/namei.h>
175 #include <sys/syscallargs.h>
176 #include <sys/sysctl.h>
177 #include <sys/systm.h>
178 #include <sys/vnode_impl.h>
179 #include <sys/wapbl.h>
180 #include <sys/fstrans.h>
181
182 #include <uvm/uvm.h>
183 #include <uvm/uvm_readahead.h>
184
185 /* Flags to vrelel. */
186 #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
187 #define VRELEL_FORCE_RELE 0x0002 /* Must always succeed. */
188
189 u_int numvnodes __cacheline_aligned;
190
191 /*
192 * There are three lru lists: one holds vnodes waiting for async release,
193 * one is for vnodes which have no buffer/page references and
194 * one for those which do (i.e. v_holdcnt is non-zero).
195 */
196 static vnodelst_t lru_vrele_list __cacheline_aligned;
197 static vnodelst_t lru_free_list __cacheline_aligned;
198 static vnodelst_t lru_hold_list __cacheline_aligned;
199 static kmutex_t vdrain_lock __cacheline_aligned;
200 static kcondvar_t vdrain_cv __cacheline_aligned;
201 static int vdrain_gen;
202 static kcondvar_t vdrain_gen_cv;
203 static bool vdrain_retry;
204 static lwp_t * vdrain_lwp;
205 SLIST_HEAD(hashhead, vnode_impl);
206 static kmutex_t vcache_lock __cacheline_aligned;
207 static kcondvar_t vcache_cv __cacheline_aligned;
208 static u_int vcache_hashsize;
209 static u_long vcache_hashmask;
210 static struct hashhead *vcache_hashtab __cacheline_aligned;
211 static pool_cache_t vcache_pool;
212 static void lru_requeue(vnode_t *, vnodelst_t *);
213 static vnodelst_t * lru_which(vnode_t *);
214 static vnode_impl_t * vcache_alloc(void);
215 static void vcache_dealloc(vnode_impl_t *);
216 static void vcache_free(vnode_impl_t *);
217 static void vcache_init(void);
218 static void vcache_reinit(void);
219 static void vcache_reclaim(vnode_t *);
220 static void vrelel(vnode_t *, int);
221 static void vdrain_thread(void *);
222 static void vnpanic(vnode_t *, const char *, ...)
223 __printflike(2, 3);
224
225 /* Routines having to do with the management of the vnode table. */
226 extern struct mount *dead_rootmount;
227 extern int (**dead_vnodeop_p)(void *);
228 extern struct vfsops dead_vfsops;
229
230 /* Vnode state operations and diagnostics. */
231
232 #if defined(DIAGNOSTIC)
233
234 #define VSTATE_GET(vp) \
235 vstate_assert_get((vp), __func__, __LINE__)
236 #define VSTATE_CHANGE(vp, from, to) \
237 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
238 #define VSTATE_WAIT_STABLE(vp) \
239 vstate_assert_wait_stable((vp), __func__, __LINE__)
240 #define VSTATE_ASSERT(vp, state) \
241 vstate_assert((vp), (state), __func__, __LINE__)
242
243 static void
244 vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
245 {
246 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
247
248 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
249
250 if (__predict_true(vip->vi_state == state))
251 return;
252 vnpanic(vp, "state is %s, expected %s at %s:%d",
253 vstate_name(vip->vi_state), vstate_name(state), func, line);
254 }
255
256 static enum vnode_state
257 vstate_assert_get(vnode_t *vp, const char *func, int line)
258 {
259 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
260
261 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
262 if (vip->vi_state == VS_MARKER)
263 vnpanic(vp, "state is %s at %s:%d",
264 vstate_name(vip->vi_state), func, line);
265
266 return vip->vi_state;
267 }
268
269 static void
270 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
271 {
272 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
273
274 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
275 if (vip->vi_state == VS_MARKER)
276 vnpanic(vp, "state is %s at %s:%d",
277 vstate_name(vip->vi_state), func, line);
278
279 while (vip->vi_state != VS_ACTIVE && vip->vi_state != VS_RECLAIMED)
280 cv_wait(&vp->v_cv, vp->v_interlock);
281
282 if (vip->vi_state == VS_MARKER)
283 vnpanic(vp, "state is %s at %s:%d",
284 vstate_name(vip->vi_state), func, line);
285 }
286
287 static void
288 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
289 const char *func, int line)
290 {
291 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
292
293 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
294 if (from == VS_LOADING)
295 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
296
297 if (from == VS_MARKER)
298 vnpanic(vp, "from is %s at %s:%d",
299 vstate_name(from), func, line);
300 if (to == VS_MARKER)
301 vnpanic(vp, "to is %s at %s:%d",
302 vstate_name(to), func, line);
303 if (vip->vi_state != from)
304 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
305 vstate_name(vip->vi_state), vstate_name(from), func, line);
306 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
307 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
308 vstate_name(from), vstate_name(to), vp->v_usecount,
309 func, line);
310
311 vip->vi_state = to;
312 if (from == VS_LOADING)
313 cv_broadcast(&vcache_cv);
314 if (to == VS_ACTIVE || to == VS_RECLAIMED)
315 cv_broadcast(&vp->v_cv);
316 }
317
318 #else /* defined(DIAGNOSTIC) */
319
320 #define VSTATE_GET(vp) \
321 (VNODE_TO_VIMPL((vp))->vi_state)
322 #define VSTATE_CHANGE(vp, from, to) \
323 vstate_change((vp), (from), (to))
324 #define VSTATE_WAIT_STABLE(vp) \
325 vstate_wait_stable((vp))
326 #define VSTATE_ASSERT(vp, state)
327
328 static void
329 vstate_wait_stable(vnode_t *vp)
330 {
331 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
332
333 while (vip->vi_state != VS_ACTIVE && vip->vi_state != VS_RECLAIMED)
334 cv_wait(&vp->v_cv, vp->v_interlock);
335 }
336
337 static void
338 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
339 {
340 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
341
342 vip->vi_state = to;
343 if (from == VS_LOADING)
344 cv_broadcast(&vcache_cv);
345 if (to == VS_ACTIVE || to == VS_RECLAIMED)
346 cv_broadcast(&vp->v_cv);
347 }
348
349 #endif /* defined(DIAGNOSTIC) */
350
351 void
352 vfs_vnode_sysinit(void)
353 {
354 int error __diagused;
355
356 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
357 KASSERT(dead_rootmount != NULL);
358 dead_rootmount->mnt_iflag = IMNT_MPSAFE;
359
360 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
361 TAILQ_INIT(&lru_free_list);
362 TAILQ_INIT(&lru_hold_list);
363 TAILQ_INIT(&lru_vrele_list);
364
365 vcache_init();
366
367 cv_init(&vdrain_cv, "vdrain");
368 cv_init(&vdrain_gen_cv, "vdrainwt");
369 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
370 NULL, &vdrain_lwp, "vdrain");
371 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
372 }
373
374 /*
375 * Allocate a new marker vnode.
376 */
377 vnode_t *
378 vnalloc_marker(struct mount *mp)
379 {
380 vnode_impl_t *vip;
381 vnode_t *vp;
382
383 vip = pool_cache_get(vcache_pool, PR_WAITOK);
384 memset(vip, 0, sizeof(*vip));
385 vp = VIMPL_TO_VNODE(vip);
386 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
387 vp->v_mount = mp;
388 vp->v_type = VBAD;
389 vip->vi_state = VS_MARKER;
390
391 return vp;
392 }
393
394 /*
395 * Free a marker vnode.
396 */
397 void
398 vnfree_marker(vnode_t *vp)
399 {
400 vnode_impl_t *vip;
401
402 vip = VNODE_TO_VIMPL(vp);
403 KASSERT(vip->vi_state == VS_MARKER);
404 uvm_obj_destroy(&vp->v_uobj, true);
405 pool_cache_put(vcache_pool, vip);
406 }
407
408 /*
409 * Test a vnode for being a marker vnode.
410 */
411 bool
412 vnis_marker(vnode_t *vp)
413 {
414
415 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
416 }
417
418 /*
419 * Return the lru list this node should be on.
420 */
421 static vnodelst_t *
422 lru_which(vnode_t *vp)
423 {
424
425 KASSERT(mutex_owned(vp->v_interlock));
426
427 if (vp->v_holdcnt > 0)
428 return &lru_hold_list;
429 else
430 return &lru_free_list;
431 }
432
433 /*
434 * Put vnode to end of given list.
435 * Both the current and the new list may be NULL, used on vnode alloc/free.
436 * Adjust numvnodes and signal vdrain thread if there is work.
437 */
438 static void
439 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
440 {
441 vnode_impl_t *vip;
442
443 mutex_enter(&vdrain_lock);
444 vip = VNODE_TO_VIMPL(vp);
445 if (vip->vi_lrulisthd != NULL)
446 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
447 else
448 numvnodes++;
449 vip->vi_lrulisthd = listhd;
450 if (vip->vi_lrulisthd != NULL)
451 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
452 else
453 numvnodes--;
454 if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
455 cv_broadcast(&vdrain_cv);
456 mutex_exit(&vdrain_lock);
457 }
458
459 /*
460 * Release deferred vrele vnodes for this mount.
461 * Called with file system suspended.
462 */
463 void
464 vrele_flush(struct mount *mp)
465 {
466 vnode_impl_t *vip, *marker;
467
468 KASSERT(fstrans_is_owner(mp));
469
470 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
471
472 mutex_enter(&vdrain_lock);
473 TAILQ_INSERT_HEAD(&lru_vrele_list, marker, vi_lrulist);
474
475 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
476 TAILQ_REMOVE(&lru_vrele_list, marker, vi_lrulist);
477 TAILQ_INSERT_AFTER(&lru_vrele_list, vip, marker, vi_lrulist);
478 if (vnis_marker(VIMPL_TO_VNODE(vip)))
479 continue;
480
481 KASSERT(vip->vi_lrulisthd == &lru_vrele_list);
482 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
483 vip->vi_lrulisthd = &lru_hold_list;
484 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
485 mutex_exit(&vdrain_lock);
486
487 mutex_enter(VIMPL_TO_VNODE(vip)->v_interlock);
488 vrelel(VIMPL_TO_VNODE(vip), VRELEL_FORCE_RELE);
489
490 mutex_enter(&vdrain_lock);
491 }
492
493 TAILQ_REMOVE(&lru_vrele_list, marker, vi_lrulist);
494 mutex_exit(&vdrain_lock);
495
496 vnfree_marker(VIMPL_TO_VNODE(marker));
497 }
498
499 /*
500 * Reclaim a cached vnode. Used from vdrain_thread only.
501 */
502 static __inline void
503 vdrain_remove(vnode_t *vp)
504 {
505 struct mount *mp;
506
507 KASSERT(mutex_owned(&vdrain_lock));
508
509 /* Probe usecount (unlocked). */
510 if (vp->v_usecount > 0)
511 return;
512 /* Try v_interlock -- we lock the wrong direction! */
513 if (!mutex_tryenter(vp->v_interlock))
514 return;
515 /* Probe usecount and state. */
516 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_ACTIVE) {
517 mutex_exit(vp->v_interlock);
518 return;
519 }
520 mp = vp->v_mount;
521 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
522 mutex_exit(vp->v_interlock);
523 return;
524 }
525 vdrain_retry = true;
526 mutex_exit(&vdrain_lock);
527
528 if (vcache_vget(vp) == 0) {
529 if (!vrecycle(vp)) {
530 mutex_enter(vp->v_interlock);
531 vrelel(vp, VRELEL_FORCE_RELE);
532 }
533 }
534 fstrans_done(mp);
535
536 mutex_enter(&vdrain_lock);
537 }
538
539 /*
540 * Release a cached vnode. Used from vdrain_thread only.
541 */
542 static __inline void
543 vdrain_vrele(vnode_t *vp)
544 {
545 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
546 struct mount *mp;
547
548 KASSERT(mutex_owned(&vdrain_lock));
549
550 mp = vp->v_mount;
551 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0)
552 return;
553
554 /*
555 * First remove the vnode from the vrele list.
556 * Put it on the last lru list, the last vrele()
557 * will put it back onto the right list before
558 * its v_usecount reaches zero.
559 */
560 KASSERT(vip->vi_lrulisthd == &lru_vrele_list);
561 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
562 vip->vi_lrulisthd = &lru_hold_list;
563 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
564
565 vdrain_retry = true;
566 mutex_exit(&vdrain_lock);
567
568 mutex_enter(vp->v_interlock);
569 vrelel(vp, VRELEL_FORCE_RELE);
570 fstrans_done(mp);
571
572 mutex_enter(&vdrain_lock);
573 }
574
575 /*
576 * Helper thread to keep the number of vnodes below desiredvnodes
577 * and release vnodes from asynchronous vrele.
578 */
579 static void
580 vdrain_thread(void *cookie)
581 {
582 vnodelst_t *listhd[] = {
583 &lru_vrele_list, &lru_free_list, &lru_hold_list
584 };
585 int i;
586 u_int target;
587 vnode_impl_t *vip, *marker;
588
589 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
590
591 mutex_enter(&vdrain_lock);
592
593 for (;;) {
594 vdrain_retry = false;
595 target = desiredvnodes - desiredvnodes/10;
596
597 for (i = 0; i < __arraycount(listhd); i++) {
598 TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
599 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
600 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
601 TAILQ_INSERT_AFTER(listhd[i], vip, marker,
602 vi_lrulist);
603 if (vnis_marker(VIMPL_TO_VNODE(vip)))
604 continue;
605 if (listhd[i] == &lru_vrele_list)
606 vdrain_vrele(VIMPL_TO_VNODE(vip));
607 else if (numvnodes < target)
608 break;
609 else
610 vdrain_remove(VIMPL_TO_VNODE(vip));
611 }
612 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
613 }
614
615 if (vdrain_retry) {
616 mutex_exit(&vdrain_lock);
617 yield();
618 mutex_enter(&vdrain_lock);
619 } else {
620 vdrain_gen++;
621 cv_broadcast(&vdrain_gen_cv);
622 cv_wait(&vdrain_cv, &vdrain_lock);
623 }
624 }
625 }
626
627 /*
628 * vput: unlock and release the reference.
629 */
630 void
631 vput(vnode_t *vp)
632 {
633
634 VOP_UNLOCK(vp);
635 vrele(vp);
636 }
637
638 /*
639 * Try to drop reference on a vnode. Abort if we are releasing the
640 * last reference. Note: this _must_ succeed if not the last reference.
641 */
642 static inline bool
643 vtryrele(vnode_t *vp)
644 {
645 u_int use, next;
646
647 for (use = vp->v_usecount;; use = next) {
648 if (use == 1) {
649 return false;
650 }
651 KASSERT(use > 1);
652 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
653 if (__predict_true(next == use)) {
654 return true;
655 }
656 }
657 }
658
659 /*
660 * Vnode release. If reference count drops to zero, call inactive
661 * routine and either return to freelist or free to the pool.
662 */
663 static void
664 vrelel(vnode_t *vp, int flags)
665 {
666 const bool async = ((flags & VRELEL_ASYNC_RELE) != 0);
667 const bool force = ((flags & VRELEL_FORCE_RELE) != 0);
668 bool recycle, defer;
669 int error;
670
671 KASSERT(mutex_owned(vp->v_interlock));
672
673 if (__predict_false(vp->v_op == dead_vnodeop_p &&
674 VSTATE_GET(vp) != VS_RECLAIMED)) {
675 vnpanic(vp, "dead but not clean");
676 }
677
678 /*
679 * If not the last reference, just drop the reference count
680 * and unlock.
681 */
682 if (vtryrele(vp)) {
683 mutex_exit(vp->v_interlock);
684 return;
685 }
686 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
687 vnpanic(vp, "%s: bad ref count", __func__);
688 }
689
690 #ifdef DIAGNOSTIC
691 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
692 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
693 vprint("vrelel: missing VOP_CLOSE()", vp);
694 }
695 #endif
696
697 /*
698 * First try to get the vnode locked for VOP_INACTIVE().
699 * Defer vnode release to vdrain_thread if caller requests
700 * it explicitly, is the pagedaemon or the lock failed.
701 */
702 if ((curlwp == uvm.pagedaemon_lwp) || async) {
703 defer = true;
704 } else {
705 mutex_exit(vp->v_interlock);
706 error = vn_lock(vp,
707 LK_EXCLUSIVE | LK_RETRY | (force ? 0 : LK_NOWAIT));
708 defer = (error != 0);
709 mutex_enter(vp->v_interlock);
710 }
711 KASSERT(mutex_owned(vp->v_interlock));
712 KASSERT(! (force && defer));
713 if (defer) {
714 /*
715 * Defer reclaim to the kthread; it's not safe to
716 * clean it here. We donate it our last reference.
717 */
718 lru_requeue(vp, &lru_vrele_list);
719 mutex_exit(vp->v_interlock);
720 return;
721 }
722
723 /*
724 * If the node got another reference while we
725 * released the interlock, don't try to inactivate it yet.
726 */
727 if (__predict_false(vtryrele(vp))) {
728 VOP_UNLOCK(vp);
729 mutex_exit(vp->v_interlock);
730 return;
731 }
732
733 /*
734 * If not clean, deactivate the vnode, but preserve
735 * our reference across the call to VOP_INACTIVE().
736 */
737 if (VSTATE_GET(vp) == VS_RECLAIMED) {
738 VOP_UNLOCK(vp);
739 } else {
740 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
741 mutex_exit(vp->v_interlock);
742
743 /*
744 * The vnode must not gain another reference while being
745 * deactivated. If VOP_INACTIVE() indicates that
746 * the described file has been deleted, then recycle
747 * the vnode.
748 *
749 * Note that VOP_INACTIVE() will drop the vnode lock.
750 */
751 recycle = false;
752 VOP_INACTIVE(vp, &recycle);
753 if (recycle) {
754 /* vcache_reclaim() below will drop the lock. */
755 if (vn_lock(vp, LK_EXCLUSIVE) != 0)
756 recycle = false;
757 }
758 mutex_enter(vp->v_interlock);
759 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
760 if (!recycle) {
761 if (vtryrele(vp)) {
762 mutex_exit(vp->v_interlock);
763 return;
764 }
765 }
766
767 /* Take care of space accounting. */
768 if (vp->v_iflag & VI_EXECMAP) {
769 atomic_add_int(&uvmexp.execpages,
770 -vp->v_uobj.uo_npages);
771 atomic_add_int(&uvmexp.filepages,
772 vp->v_uobj.uo_npages);
773 }
774 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
775 vp->v_vflag &= ~VV_MAPPED;
776
777 /*
778 * Recycle the vnode if the file is now unused (unlinked),
779 * otherwise just free it.
780 */
781 if (recycle) {
782 VSTATE_ASSERT(vp, VS_ACTIVE);
783 vcache_reclaim(vp);
784 }
785 KASSERT(vp->v_usecount > 0);
786 }
787
788 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
789 /* Gained another reference while being reclaimed. */
790 mutex_exit(vp->v_interlock);
791 return;
792 }
793
794 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
795 /*
796 * It's clean so destroy it. It isn't referenced
797 * anywhere since it has been reclaimed.
798 */
799 vcache_free(VNODE_TO_VIMPL(vp));
800 } else {
801 /*
802 * Otherwise, put it back onto the freelist. It
803 * can't be destroyed while still associated with
804 * a file system.
805 */
806 lru_requeue(vp, lru_which(vp));
807 mutex_exit(vp->v_interlock);
808 }
809 }
810
811 void
812 vrele(vnode_t *vp)
813 {
814
815 if (vtryrele(vp)) {
816 return;
817 }
818 mutex_enter(vp->v_interlock);
819 vrelel(vp, 0);
820 }
821
822 /*
823 * Asynchronous vnode release, vnode is released in different context.
824 */
825 void
826 vrele_async(vnode_t *vp)
827 {
828
829 if (vtryrele(vp)) {
830 return;
831 }
832 mutex_enter(vp->v_interlock);
833 vrelel(vp, VRELEL_ASYNC_RELE);
834 }
835
836 /*
837 * Vnode reference, where a reference is already held by some other
838 * object (for example, a file structure).
839 */
840 void
841 vref(vnode_t *vp)
842 {
843
844 KASSERT(vp->v_usecount != 0);
845
846 atomic_inc_uint(&vp->v_usecount);
847 }
848
849 /*
850 * Page or buffer structure gets a reference.
851 * Called with v_interlock held.
852 */
853 void
854 vholdl(vnode_t *vp)
855 {
856
857 KASSERT(mutex_owned(vp->v_interlock));
858
859 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
860 lru_requeue(vp, lru_which(vp));
861 }
862
863 /*
864 * Page or buffer structure frees a reference.
865 * Called with v_interlock held.
866 */
867 void
868 holdrelel(vnode_t *vp)
869 {
870
871 KASSERT(mutex_owned(vp->v_interlock));
872
873 if (vp->v_holdcnt <= 0) {
874 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
875 }
876
877 vp->v_holdcnt--;
878 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
879 lru_requeue(vp, lru_which(vp));
880 }
881
882 /*
883 * Recycle an unused vnode if caller holds the last reference.
884 */
885 bool
886 vrecycle(vnode_t *vp)
887 {
888 int error __diagused;
889
890 mutex_enter(vp->v_interlock);
891
892 /* Make sure we hold the last reference. */
893 VSTATE_WAIT_STABLE(vp);
894 if (vp->v_usecount != 1) {
895 mutex_exit(vp->v_interlock);
896 return false;
897 }
898
899 /* If the vnode is already clean we're done. */
900 if (VSTATE_GET(vp) != VS_ACTIVE) {
901 VSTATE_ASSERT(vp, VS_RECLAIMED);
902 vrelel(vp, 0);
903 return true;
904 }
905
906 /* Prevent further references until the vnode is locked. */
907 VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
908 mutex_exit(vp->v_interlock);
909
910 /*
911 * On a leaf file system this lock will always succeed as we hold
912 * the last reference and prevent further references.
913 * On layered file systems waiting for the lock would open a can of
914 * deadlocks as the lower vnodes may have other active references.
915 */
916 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
917
918 mutex_enter(vp->v_interlock);
919 VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
920
921 if (error) {
922 mutex_exit(vp->v_interlock);
923 return false;
924 }
925
926 KASSERT(vp->v_usecount == 1);
927 vcache_reclaim(vp);
928 vrelel(vp, 0);
929
930 return true;
931 }
932
933 /*
934 * Eliminate all activity associated with the requested vnode
935 * and with all vnodes aliased to the requested vnode.
936 */
937 void
938 vrevoke(vnode_t *vp)
939 {
940 vnode_t *vq;
941 enum vtype type;
942 dev_t dev;
943
944 KASSERT(vp->v_usecount > 0);
945
946 mutex_enter(vp->v_interlock);
947 VSTATE_WAIT_STABLE(vp);
948 if (VSTATE_GET(vp) == VS_RECLAIMED) {
949 mutex_exit(vp->v_interlock);
950 return;
951 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
952 atomic_inc_uint(&vp->v_usecount);
953 mutex_exit(vp->v_interlock);
954 vgone(vp);
955 return;
956 } else {
957 dev = vp->v_rdev;
958 type = vp->v_type;
959 mutex_exit(vp->v_interlock);
960 }
961
962 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
963 vgone(vq);
964 }
965 }
966
967 /*
968 * Eliminate all activity associated with a vnode in preparation for
969 * reuse. Drops a reference from the vnode.
970 */
971 void
972 vgone(vnode_t *vp)
973 {
974
975 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
976 mutex_enter(vp->v_interlock);
977 VSTATE_WAIT_STABLE(vp);
978 if (VSTATE_GET(vp) == VS_ACTIVE)
979 vcache_reclaim(vp);
980 VSTATE_ASSERT(vp, VS_RECLAIMED);
981 vrelel(vp, 0);
982 }
983
984 static inline uint32_t
985 vcache_hash(const struct vcache_key *key)
986 {
987 uint32_t hash = HASH32_BUF_INIT;
988
989 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
990 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
991 return hash;
992 }
993
994 static void
995 vcache_init(void)
996 {
997
998 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
999 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1000 KASSERT(vcache_pool != NULL);
1001 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1002 cv_init(&vcache_cv, "vcache");
1003 vcache_hashsize = desiredvnodes;
1004 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1005 &vcache_hashmask);
1006 }
1007
1008 static void
1009 vcache_reinit(void)
1010 {
1011 int i;
1012 uint32_t hash;
1013 u_long oldmask, newmask;
1014 struct hashhead *oldtab, *newtab;
1015 vnode_impl_t *vip;
1016
1017 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1018 mutex_enter(&vcache_lock);
1019 oldtab = vcache_hashtab;
1020 oldmask = vcache_hashmask;
1021 vcache_hashsize = desiredvnodes;
1022 vcache_hashtab = newtab;
1023 vcache_hashmask = newmask;
1024 for (i = 0; i <= oldmask; i++) {
1025 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1026 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1027 hash = vcache_hash(&vip->vi_key);
1028 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1029 vip, vi_hash);
1030 }
1031 }
1032 mutex_exit(&vcache_lock);
1033 hashdone(oldtab, HASH_SLIST, oldmask);
1034 }
1035
1036 static inline vnode_impl_t *
1037 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1038 {
1039 struct hashhead *hashp;
1040 vnode_impl_t *vip;
1041
1042 KASSERT(mutex_owned(&vcache_lock));
1043
1044 hashp = &vcache_hashtab[hash & vcache_hashmask];
1045 SLIST_FOREACH(vip, hashp, vi_hash) {
1046 if (key->vk_mount != vip->vi_key.vk_mount)
1047 continue;
1048 if (key->vk_key_len != vip->vi_key.vk_key_len)
1049 continue;
1050 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1051 continue;
1052 return vip;
1053 }
1054 return NULL;
1055 }
1056
1057 /*
1058 * Allocate a new, uninitialized vcache node.
1059 */
1060 static vnode_impl_t *
1061 vcache_alloc(void)
1062 {
1063 vnode_impl_t *vip;
1064 vnode_t *vp;
1065
1066 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1067 memset(vip, 0, sizeof(*vip));
1068
1069 rw_init(&vip->vi_lock);
1070 /* SLIST_INIT(&vip->vi_hash); */
1071 /* LIST_INIT(&vip->vi_nclist); */
1072 /* LIST_INIT(&vip->vi_dnclist); */
1073
1074 vp = VIMPL_TO_VNODE(vip);
1075 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1076 cv_init(&vp->v_cv, "vnode");
1077
1078 vp->v_usecount = 1;
1079 vp->v_type = VNON;
1080 vp->v_size = vp->v_writesize = VSIZENOTSET;
1081
1082 vip->vi_state = VS_LOADING;
1083
1084 lru_requeue(vp, &lru_free_list);
1085
1086 return vip;
1087 }
1088
1089 /*
1090 * Deallocate a vcache node in state VS_LOADING.
1091 *
1092 * vcache_lock held on entry and released on return.
1093 */
1094 static void
1095 vcache_dealloc(vnode_impl_t *vip)
1096 {
1097 vnode_t *vp;
1098
1099 KASSERT(mutex_owned(&vcache_lock));
1100
1101 vp = VIMPL_TO_VNODE(vip);
1102 mutex_enter(vp->v_interlock);
1103 vp->v_op = dead_vnodeop_p;
1104 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1105 mutex_exit(&vcache_lock);
1106 vrelel(vp, 0);
1107 }
1108
1109 /*
1110 * Free an unused, unreferenced vcache node.
1111 * v_interlock locked on entry.
1112 */
1113 static void
1114 vcache_free(vnode_impl_t *vip)
1115 {
1116 vnode_t *vp;
1117
1118 vp = VIMPL_TO_VNODE(vip);
1119 KASSERT(mutex_owned(vp->v_interlock));
1120
1121 KASSERT(vp->v_usecount == 0);
1122 KASSERT(vp->v_holdcnt == 0);
1123 KASSERT(vp->v_writecount == 0);
1124 lru_requeue(vp, NULL);
1125 mutex_exit(vp->v_interlock);
1126
1127 vfs_insmntque(vp, NULL);
1128 if (vp->v_type == VBLK || vp->v_type == VCHR)
1129 spec_node_destroy(vp);
1130
1131 rw_destroy(&vip->vi_lock);
1132 uvm_obj_destroy(&vp->v_uobj, true);
1133 cv_destroy(&vp->v_cv);
1134 pool_cache_put(vcache_pool, vip);
1135 }
1136
1137 /*
1138 * Try to get an initial reference on this cached vnode.
1139 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1140 * EBUSY if the vnode state is unstable.
1141 *
1142 * v_interlock locked on entry and unlocked on exit.
1143 */
1144 int
1145 vcache_tryvget(vnode_t *vp)
1146 {
1147 int error = 0;
1148
1149 KASSERT(mutex_owned(vp->v_interlock));
1150
1151 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1152 error = ENOENT;
1153 else if (__predict_false(VSTATE_GET(vp) != VS_ACTIVE))
1154 error = EBUSY;
1155 else if (vp->v_usecount == 0)
1156 vp->v_usecount = 1;
1157 else
1158 atomic_inc_uint(&vp->v_usecount);
1159
1160 mutex_exit(vp->v_interlock);
1161
1162 return error;
1163 }
1164
1165 /*
1166 * Try to get an initial reference on this cached vnode.
1167 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1168 * Will wait for the vnode state to be stable.
1169 *
1170 * v_interlock locked on entry and unlocked on exit.
1171 */
1172 int
1173 vcache_vget(vnode_t *vp)
1174 {
1175
1176 KASSERT(mutex_owned(vp->v_interlock));
1177
1178 /* Increment hold count to prevent vnode from disappearing. */
1179 vp->v_holdcnt++;
1180 VSTATE_WAIT_STABLE(vp);
1181 vp->v_holdcnt--;
1182
1183 /* If this was the last reference to a reclaimed vnode free it now. */
1184 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1185 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1186 vcache_free(VNODE_TO_VIMPL(vp));
1187 else
1188 mutex_exit(vp->v_interlock);
1189 return ENOENT;
1190 }
1191 VSTATE_ASSERT(vp, VS_ACTIVE);
1192 if (vp->v_usecount == 0)
1193 vp->v_usecount = 1;
1194 else
1195 atomic_inc_uint(&vp->v_usecount);
1196
1197 mutex_exit(vp->v_interlock);
1198
1199 return 0;
1200 }
1201
1202 /*
1203 * Get a vnode / fs node pair by key and return it referenced through vpp.
1204 */
1205 int
1206 vcache_get(struct mount *mp, const void *key, size_t key_len,
1207 struct vnode **vpp)
1208 {
1209 int error;
1210 uint32_t hash;
1211 const void *new_key;
1212 struct vnode *vp;
1213 struct vcache_key vcache_key;
1214 vnode_impl_t *vip, *new_vip;
1215
1216 new_key = NULL;
1217 *vpp = NULL;
1218
1219 vcache_key.vk_mount = mp;
1220 vcache_key.vk_key = key;
1221 vcache_key.vk_key_len = key_len;
1222 hash = vcache_hash(&vcache_key);
1223
1224 again:
1225 mutex_enter(&vcache_lock);
1226 vip = vcache_hash_lookup(&vcache_key, hash);
1227
1228 /* If found, take a reference or retry. */
1229 if (__predict_true(vip != NULL)) {
1230 /*
1231 * If the vnode is loading we cannot take the v_interlock
1232 * here as it might change during load (see uvm_obj_setlock()).
1233 * As changing state from VS_LOADING requires both vcache_lock
1234 * and v_interlock it is safe to test with vcache_lock held.
1235 *
1236 * Wait for vnodes changing state from VS_LOADING and retry.
1237 */
1238 if (__predict_false(vip->vi_state == VS_LOADING)) {
1239 cv_wait(&vcache_cv, &vcache_lock);
1240 mutex_exit(&vcache_lock);
1241 goto again;
1242 }
1243 vp = VIMPL_TO_VNODE(vip);
1244 mutex_enter(vp->v_interlock);
1245 mutex_exit(&vcache_lock);
1246 error = vcache_vget(vp);
1247 if (error == ENOENT)
1248 goto again;
1249 if (error == 0)
1250 *vpp = vp;
1251 KASSERT((error != 0) == (*vpp == NULL));
1252 return error;
1253 }
1254 mutex_exit(&vcache_lock);
1255
1256 /* Allocate and initialize a new vcache / vnode pair. */
1257 error = vfs_busy(mp, NULL);
1258 if (error)
1259 return error;
1260 new_vip = vcache_alloc();
1261 new_vip->vi_key = vcache_key;
1262 vp = VIMPL_TO_VNODE(new_vip);
1263 mutex_enter(&vcache_lock);
1264 vip = vcache_hash_lookup(&vcache_key, hash);
1265 if (vip == NULL) {
1266 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1267 new_vip, vi_hash);
1268 vip = new_vip;
1269 }
1270
1271 /* If another thread beat us inserting this node, retry. */
1272 if (vip != new_vip) {
1273 vcache_dealloc(new_vip);
1274 vfs_unbusy(mp, false, NULL);
1275 goto again;
1276 }
1277 mutex_exit(&vcache_lock);
1278
1279 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1280 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1281 if (error) {
1282 mutex_enter(&vcache_lock);
1283 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1284 new_vip, vnode_impl, vi_hash);
1285 vcache_dealloc(new_vip);
1286 vfs_unbusy(mp, false, NULL);
1287 KASSERT(*vpp == NULL);
1288 return error;
1289 }
1290 KASSERT(new_key != NULL);
1291 KASSERT(memcmp(key, new_key, key_len) == 0);
1292 KASSERT(vp->v_op != NULL);
1293 vfs_insmntque(vp, mp);
1294 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1295 vp->v_vflag |= VV_MPSAFE;
1296 vfs_unbusy(mp, true, NULL);
1297
1298 /* Finished loading, finalize node. */
1299 mutex_enter(&vcache_lock);
1300 new_vip->vi_key.vk_key = new_key;
1301 mutex_enter(vp->v_interlock);
1302 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1303 mutex_exit(vp->v_interlock);
1304 mutex_exit(&vcache_lock);
1305 *vpp = vp;
1306 return 0;
1307 }
1308
1309 /*
1310 * Create a new vnode / fs node pair and return it referenced through vpp.
1311 */
1312 int
1313 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1314 kauth_cred_t cred, struct vnode **vpp)
1315 {
1316 int error;
1317 uint32_t hash;
1318 struct vnode *vp, *ovp;
1319 vnode_impl_t *vip, *ovip;
1320
1321 *vpp = NULL;
1322
1323 /* Allocate and initialize a new vcache / vnode pair. */
1324 error = vfs_busy(mp, NULL);
1325 if (error)
1326 return error;
1327 vip = vcache_alloc();
1328 vip->vi_key.vk_mount = mp;
1329 vp = VIMPL_TO_VNODE(vip);
1330
1331 /* Create and load the fs node. */
1332 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1333 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1334 if (error) {
1335 mutex_enter(&vcache_lock);
1336 vcache_dealloc(vip);
1337 vfs_unbusy(mp, false, NULL);
1338 KASSERT(*vpp == NULL);
1339 return error;
1340 }
1341 KASSERT(vip->vi_key.vk_key != NULL);
1342 KASSERT(vp->v_op != NULL);
1343 hash = vcache_hash(&vip->vi_key);
1344
1345 /* Wait for previous instance to be reclaimed, then insert new node. */
1346 mutex_enter(&vcache_lock);
1347 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1348 ovp = VIMPL_TO_VNODE(ovip);
1349 mutex_enter(ovp->v_interlock);
1350 mutex_exit(&vcache_lock);
1351 error = vcache_vget(ovp);
1352 KASSERT(error == ENOENT);
1353 mutex_enter(&vcache_lock);
1354 }
1355 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1356 vip, vi_hash);
1357 mutex_exit(&vcache_lock);
1358 vfs_insmntque(vp, mp);
1359 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1360 vp->v_vflag |= VV_MPSAFE;
1361 vfs_unbusy(mp, true, NULL);
1362
1363 /* Finished loading, finalize node. */
1364 mutex_enter(&vcache_lock);
1365 mutex_enter(vp->v_interlock);
1366 VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1367 mutex_exit(&vcache_lock);
1368 mutex_exit(vp->v_interlock);
1369 *vpp = vp;
1370 return 0;
1371 }
1372
1373 /*
1374 * Prepare key change: update old cache nodes key and lock new cache node.
1375 * Return an error if the new node already exists.
1376 */
1377 int
1378 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1379 const void *old_key, size_t old_key_len,
1380 const void *new_key, size_t new_key_len)
1381 {
1382 uint32_t old_hash, new_hash;
1383 struct vcache_key old_vcache_key, new_vcache_key;
1384 vnode_impl_t *vip, *new_vip;
1385
1386 old_vcache_key.vk_mount = mp;
1387 old_vcache_key.vk_key = old_key;
1388 old_vcache_key.vk_key_len = old_key_len;
1389 old_hash = vcache_hash(&old_vcache_key);
1390
1391 new_vcache_key.vk_mount = mp;
1392 new_vcache_key.vk_key = new_key;
1393 new_vcache_key.vk_key_len = new_key_len;
1394 new_hash = vcache_hash(&new_vcache_key);
1395
1396 new_vip = vcache_alloc();
1397 new_vip->vi_key = new_vcache_key;
1398
1399 /* Insert locked new node used as placeholder. */
1400 mutex_enter(&vcache_lock);
1401 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1402 if (vip != NULL) {
1403 vcache_dealloc(new_vip);
1404 return EEXIST;
1405 }
1406 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1407 new_vip, vi_hash);
1408
1409 /* Replace old nodes key with the temporary copy. */
1410 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1411 KASSERT(vip != NULL);
1412 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1413 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1414 vip->vi_key = old_vcache_key;
1415 mutex_exit(&vcache_lock);
1416 return 0;
1417 }
1418
1419 /*
1420 * Key change complete: update old node and remove placeholder.
1421 */
1422 void
1423 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1424 const void *old_key, size_t old_key_len,
1425 const void *new_key, size_t new_key_len)
1426 {
1427 uint32_t old_hash, new_hash;
1428 struct vcache_key old_vcache_key, new_vcache_key;
1429 vnode_impl_t *vip, *new_vip;
1430 struct vnode *new_vp;
1431
1432 old_vcache_key.vk_mount = mp;
1433 old_vcache_key.vk_key = old_key;
1434 old_vcache_key.vk_key_len = old_key_len;
1435 old_hash = vcache_hash(&old_vcache_key);
1436
1437 new_vcache_key.vk_mount = mp;
1438 new_vcache_key.vk_key = new_key;
1439 new_vcache_key.vk_key_len = new_key_len;
1440 new_hash = vcache_hash(&new_vcache_key);
1441
1442 mutex_enter(&vcache_lock);
1443
1444 /* Lookup old and new node. */
1445 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1446 KASSERT(vip != NULL);
1447 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1448
1449 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1450 KASSERT(new_vip != NULL);
1451 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1452 new_vp = VIMPL_TO_VNODE(new_vip);
1453 mutex_enter(new_vp->v_interlock);
1454 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1455 mutex_exit(new_vp->v_interlock);
1456
1457 /* Rekey old node and put it onto its new hashlist. */
1458 vip->vi_key = new_vcache_key;
1459 if (old_hash != new_hash) {
1460 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1461 vip, vnode_impl, vi_hash);
1462 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1463 vip, vi_hash);
1464 }
1465
1466 /* Remove new node used as placeholder. */
1467 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1468 new_vip, vnode_impl, vi_hash);
1469 vcache_dealloc(new_vip);
1470 }
1471
1472 /*
1473 * Disassociate the underlying file system from a vnode.
1474 *
1475 * Must be called with vnode locked and will return unlocked.
1476 * Must be called with the interlock held, and will return with it held.
1477 */
1478 static void
1479 vcache_reclaim(vnode_t *vp)
1480 {
1481 lwp_t *l = curlwp;
1482 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1483 struct mount *mp = vp->v_mount;
1484 uint32_t hash;
1485 uint8_t temp_buf[64], *temp_key;
1486 size_t temp_key_len;
1487 bool recycle, active;
1488 int error;
1489
1490 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1491 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1492 KASSERT(mutex_owned(vp->v_interlock));
1493 KASSERT(vp->v_usecount != 0);
1494
1495 active = (vp->v_usecount > 1);
1496 temp_key_len = vip->vi_key.vk_key_len;
1497 /*
1498 * Prevent the vnode from being recycled or brought into use
1499 * while we clean it out.
1500 */
1501 VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
1502 if (vp->v_iflag & VI_EXECMAP) {
1503 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1504 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1505 }
1506 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1507 mutex_exit(vp->v_interlock);
1508
1509 /* Replace the vnode key with a temporary copy. */
1510 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1511 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1512 } else {
1513 temp_key = temp_buf;
1514 }
1515 mutex_enter(&vcache_lock);
1516 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1517 vip->vi_key.vk_key = temp_key;
1518 mutex_exit(&vcache_lock);
1519
1520 fstrans_start(mp, FSTRANS_SHARED);
1521
1522 /*
1523 * Clean out any cached data associated with the vnode.
1524 * If purging an active vnode, it must be closed and
1525 * deactivated before being reclaimed.
1526 */
1527 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1528 if (error != 0) {
1529 if (wapbl_vphaswapbl(vp))
1530 WAPBL_DISCARD(wapbl_vptomp(vp));
1531 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1532 }
1533 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1534 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1535 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1536 spec_node_revoke(vp);
1537 }
1538
1539 /*
1540 * Disassociate the underlying file system from the vnode.
1541 * Note that the VOP_INACTIVE will unlock the vnode.
1542 */
1543 VOP_INACTIVE(vp, &recycle);
1544 if (VOP_RECLAIM(vp)) {
1545 vnpanic(vp, "%s: cannot reclaim", __func__);
1546 }
1547
1548 KASSERT(vp->v_data == NULL);
1549 KASSERT(vp->v_uobj.uo_npages == 0);
1550
1551 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1552 uvm_ra_freectx(vp->v_ractx);
1553 vp->v_ractx = NULL;
1554 }
1555
1556 /* Purge name cache. */
1557 cache_purge(vp);
1558
1559 /* Remove from vnode cache. */
1560 hash = vcache_hash(&vip->vi_key);
1561 mutex_enter(&vcache_lock);
1562 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1563 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1564 vip, vnode_impl, vi_hash);
1565 mutex_exit(&vcache_lock);
1566 if (temp_key != temp_buf)
1567 kmem_free(temp_key, temp_key_len);
1568
1569 /* Done with purge, notify sleepers of the grim news. */
1570 mutex_enter(vp->v_interlock);
1571 vp->v_op = dead_vnodeop_p;
1572 vp->v_vflag |= VV_LOCKSWORK;
1573 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1574 vp->v_tag = VT_NON;
1575 KNOTE(&vp->v_klist, NOTE_REVOKE);
1576 mutex_exit(vp->v_interlock);
1577
1578 /*
1579 * Move to dead mount. Must be after changing the operations
1580 * vector as vnode operations enter the mount before using the
1581 * operations vector. See sys/kern/vnode_if.c.
1582 */
1583 vp->v_vflag &= ~VV_ROOT;
1584 atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1585 vfs_insmntque(vp, dead_rootmount);
1586
1587 mutex_enter(vp->v_interlock);
1588 fstrans_done(mp);
1589 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1590 }
1591
1592 /*
1593 * Update outstanding I/O count and do wakeup if requested.
1594 */
1595 void
1596 vwakeup(struct buf *bp)
1597 {
1598 vnode_t *vp;
1599
1600 if ((vp = bp->b_vp) == NULL)
1601 return;
1602
1603 KASSERT(bp->b_objlock == vp->v_interlock);
1604 KASSERT(mutex_owned(bp->b_objlock));
1605
1606 if (--vp->v_numoutput < 0)
1607 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1608 if (vp->v_numoutput == 0)
1609 cv_broadcast(&vp->v_cv);
1610 }
1611
1612 /*
1613 * Test a vnode for being or becoming dead. Returns one of:
1614 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1615 * ENOENT: vnode is dead.
1616 * 0: otherwise.
1617 *
1618 * Whenever this function returns a non-zero value all future
1619 * calls will also return a non-zero value.
1620 */
1621 int
1622 vdead_check(struct vnode *vp, int flags)
1623 {
1624
1625 KASSERT(mutex_owned(vp->v_interlock));
1626
1627 if (! ISSET(flags, VDEAD_NOWAIT))
1628 VSTATE_WAIT_STABLE(vp);
1629
1630 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1631 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1632 return EBUSY;
1633 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1634 return ENOENT;
1635 }
1636
1637 return 0;
1638 }
1639
1640 int
1641 vfs_drainvnodes(void)
1642 {
1643 int i, gen;
1644
1645 mutex_enter(&vdrain_lock);
1646 for (i = 0; i < 2; i++) {
1647 gen = vdrain_gen;
1648 while (gen == vdrain_gen) {
1649 cv_broadcast(&vdrain_cv);
1650 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1651 }
1652 }
1653 mutex_exit(&vdrain_lock);
1654
1655 if (numvnodes >= desiredvnodes)
1656 return EBUSY;
1657
1658 if (vcache_hashsize != desiredvnodes)
1659 vcache_reinit();
1660
1661 return 0;
1662 }
1663
1664 void
1665 vnpanic(vnode_t *vp, const char *fmt, ...)
1666 {
1667 va_list ap;
1668
1669 #ifdef DIAGNOSTIC
1670 vprint(NULL, vp);
1671 #endif
1672 va_start(ap, fmt);
1673 vpanic(fmt, ap);
1674 va_end(ap);
1675 }
1676