vfs_vnode.c revision 1.94 1 /* $NetBSD: vfs_vnode.c,v 1.94 2017/06/04 07:58:29 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * LOADED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * Either vcache_rekey*() is changing the vnode key or
123 * vrelel() is about to call VOP_INACTIVE().
124 * BLOCKED -> LOADED
125 * The block condition is over.
126 * LOADING -> RECLAIMED
127 * Either vcache_get() or vcache_new() failed to
128 * associate the underlying file system or vcache_rekey*()
129 * drops a vnode used as placeholder.
130 *
131 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 * and it is possible to wait for state change.
133 *
134 * State is protected with v_interlock with one exception:
135 * to change from LOADING both v_interlock and vcache_lock must be held
136 * so it is possible to check "state == LOADING" without holding
137 * v_interlock. See vcache_get() for details.
138 *
139 * Reference counting
140 *
141 * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 * as vput(9), routines. Common points holding references are e.g.
144 * file openings, current working directory, mount points, etc.
145 *
146 * Note on v_usecount and its locking
147 *
148 * At nearly all points it is known that v_usecount could be zero,
149 * the vnode_t::v_interlock will be held. To change v_usecount away
150 * from zero, the interlock must be held. To change from a non-zero
151 * value to zero, again the interlock must be held.
152 *
153 * Changing the usecount from a non-zero value to a non-zero value can
154 * safely be done using atomic operations, without the interlock held.
155 *
156 */
157
158 #include <sys/cdefs.h>
159 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.94 2017/06/04 07:58:29 hannken Exp $");
160
161 #include <sys/param.h>
162 #include <sys/kernel.h>
163
164 #include <sys/atomic.h>
165 #include <sys/buf.h>
166 #include <sys/conf.h>
167 #include <sys/device.h>
168 #include <sys/hash.h>
169 #include <sys/kauth.h>
170 #include <sys/kmem.h>
171 #include <sys/kthread.h>
172 #include <sys/module.h>
173 #include <sys/mount.h>
174 #include <sys/namei.h>
175 #include <sys/syscallargs.h>
176 #include <sys/sysctl.h>
177 #include <sys/systm.h>
178 #include <sys/vnode_impl.h>
179 #include <sys/wapbl.h>
180 #include <sys/fstrans.h>
181
182 #include <uvm/uvm.h>
183 #include <uvm/uvm_readahead.h>
184
185 /* Flags to vrelel. */
186 #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
187 #define VRELEL_FORCE_RELE 0x0002 /* Must always succeed. */
188
189 u_int numvnodes __cacheline_aligned;
190
191 /*
192 * There are three lru lists: one holds vnodes waiting for async release,
193 * one is for vnodes which have no buffer/page references and
194 * one for those which do (i.e. v_holdcnt is non-zero).
195 */
196 static vnodelst_t lru_vrele_list __cacheline_aligned;
197 static vnodelst_t lru_free_list __cacheline_aligned;
198 static vnodelst_t lru_hold_list __cacheline_aligned;
199 static kmutex_t vdrain_lock __cacheline_aligned;
200 static kcondvar_t vdrain_cv __cacheline_aligned;
201 static int vdrain_gen;
202 static kcondvar_t vdrain_gen_cv;
203 static bool vdrain_retry;
204 static lwp_t * vdrain_lwp;
205 SLIST_HEAD(hashhead, vnode_impl);
206 static kmutex_t vcache_lock __cacheline_aligned;
207 static kcondvar_t vcache_cv __cacheline_aligned;
208 static u_int vcache_hashsize;
209 static u_long vcache_hashmask;
210 static struct hashhead *vcache_hashtab __cacheline_aligned;
211 static pool_cache_t vcache_pool;
212 static void lru_requeue(vnode_t *, vnodelst_t *);
213 static vnodelst_t * lru_which(vnode_t *);
214 static vnode_impl_t * vcache_alloc(void);
215 static void vcache_dealloc(vnode_impl_t *);
216 static void vcache_free(vnode_impl_t *);
217 static void vcache_init(void);
218 static void vcache_reinit(void);
219 static void vcache_reclaim(vnode_t *);
220 static void vrelel(vnode_t *, int);
221 static void vdrain_thread(void *);
222 static void vnpanic(vnode_t *, const char *, ...)
223 __printflike(2, 3);
224
225 /* Routines having to do with the management of the vnode table. */
226 extern struct mount *dead_rootmount;
227 extern int (**dead_vnodeop_p)(void *);
228 extern struct vfsops dead_vfsops;
229
230 /* Vnode state operations and diagnostics. */
231
232 #if defined(DIAGNOSTIC)
233
234 #define VSTATE_VALID(state) \
235 ((state) != VS_ACTIVE && (state) != VS_MARKER)
236 #define VSTATE_GET(vp) \
237 vstate_assert_get((vp), __func__, __LINE__)
238 #define VSTATE_CHANGE(vp, from, to) \
239 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
240 #define VSTATE_WAIT_STABLE(vp) \
241 vstate_assert_wait_stable((vp), __func__, __LINE__)
242
243 void
244 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
245 {
246 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
247
248 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
249
250 if (state == VS_ACTIVE && vp->v_usecount > 0 &&
251 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
252 return;
253 if (vip->vi_state == state)
254 return;
255 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
256 vstate_name(vip->vi_state), vp->v_usecount,
257 vstate_name(state), func, line);
258 }
259
260 static enum vnode_state
261 vstate_assert_get(vnode_t *vp, const char *func, int line)
262 {
263 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
264
265 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
266 if (! VSTATE_VALID(vip->vi_state))
267 vnpanic(vp, "state is %s at %s:%d",
268 vstate_name(vip->vi_state), func, line);
269
270 return vip->vi_state;
271 }
272
273 static void
274 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
275 {
276 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
277
278 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
279 if (! VSTATE_VALID(vip->vi_state))
280 vnpanic(vp, "state is %s at %s:%d",
281 vstate_name(vip->vi_state), func, line);
282
283 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
284 cv_wait(&vp->v_cv, vp->v_interlock);
285
286 if (! VSTATE_VALID(vip->vi_state))
287 vnpanic(vp, "state is %s at %s:%d",
288 vstate_name(vip->vi_state), func, line);
289 }
290
291 static void
292 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
293 const char *func, int line)
294 {
295 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
296
297 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
298 if (from == VS_LOADING)
299 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
300
301 if (! VSTATE_VALID(from))
302 vnpanic(vp, "from is %s at %s:%d",
303 vstate_name(from), func, line);
304 if (! VSTATE_VALID(to))
305 vnpanic(vp, "to is %s at %s:%d",
306 vstate_name(to), func, line);
307 if (vip->vi_state != from)
308 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
309 vstate_name(vip->vi_state), vstate_name(from), func, line);
310 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
311 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
312 vstate_name(from), vstate_name(to), vp->v_usecount,
313 func, line);
314
315 vip->vi_state = to;
316 if (from == VS_LOADING)
317 cv_broadcast(&vcache_cv);
318 if (to == VS_LOADED || to == VS_RECLAIMED)
319 cv_broadcast(&vp->v_cv);
320 }
321
322 #else /* defined(DIAGNOSTIC) */
323
324 #define VSTATE_GET(vp) \
325 (VNODE_TO_VIMPL((vp))->vi_state)
326 #define VSTATE_CHANGE(vp, from, to) \
327 vstate_change((vp), (from), (to))
328 #define VSTATE_WAIT_STABLE(vp) \
329 vstate_wait_stable((vp))
330 void
331 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
332 {
333
334 }
335
336 static void
337 vstate_wait_stable(vnode_t *vp)
338 {
339 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
340
341 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
342 cv_wait(&vp->v_cv, vp->v_interlock);
343 }
344
345 static void
346 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
347 {
348 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
349
350 vip->vi_state = to;
351 if (from == VS_LOADING)
352 cv_broadcast(&vcache_cv);
353 if (to == VS_LOADED || to == VS_RECLAIMED)
354 cv_broadcast(&vp->v_cv);
355 }
356
357 #endif /* defined(DIAGNOSTIC) */
358
359 void
360 vfs_vnode_sysinit(void)
361 {
362 int error __diagused;
363
364 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
365 KASSERT(dead_rootmount != NULL);
366 dead_rootmount->mnt_iflag = IMNT_MPSAFE;
367
368 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
369 TAILQ_INIT(&lru_free_list);
370 TAILQ_INIT(&lru_hold_list);
371 TAILQ_INIT(&lru_vrele_list);
372
373 vcache_init();
374
375 cv_init(&vdrain_cv, "vdrain");
376 cv_init(&vdrain_gen_cv, "vdrainwt");
377 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
378 NULL, &vdrain_lwp, "vdrain");
379 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
380 }
381
382 /*
383 * Allocate a new marker vnode.
384 */
385 vnode_t *
386 vnalloc_marker(struct mount *mp)
387 {
388 vnode_impl_t *vip;
389 vnode_t *vp;
390
391 vip = pool_cache_get(vcache_pool, PR_WAITOK);
392 memset(vip, 0, sizeof(*vip));
393 vp = VIMPL_TO_VNODE(vip);
394 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
395 vp->v_mount = mp;
396 vp->v_type = VBAD;
397 vip->vi_state = VS_MARKER;
398
399 return vp;
400 }
401
402 /*
403 * Free a marker vnode.
404 */
405 void
406 vnfree_marker(vnode_t *vp)
407 {
408 vnode_impl_t *vip;
409
410 vip = VNODE_TO_VIMPL(vp);
411 KASSERT(vip->vi_state == VS_MARKER);
412 uvm_obj_destroy(&vp->v_uobj, true);
413 pool_cache_put(vcache_pool, vip);
414 }
415
416 /*
417 * Test a vnode for being a marker vnode.
418 */
419 bool
420 vnis_marker(vnode_t *vp)
421 {
422
423 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
424 }
425
426 /*
427 * Set vnode to share another vnodes lock.
428 */
429 void
430 vshare_lock(vnode_t *vp, vnode_t *src_vp)
431 {
432 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
433 vnode_impl_t *src_vip = VNODE_TO_VIMPL(src_vp);
434 krwlock_t *oldlock = vip->vi_lock;
435
436 rw_obj_hold(src_vip->vi_lock);
437 vip->vi_lock = src_vip->vi_lock;
438 rw_obj_free(oldlock);
439 }
440
441 /*
442 * Return the lru list this node should be on.
443 */
444 static vnodelst_t *
445 lru_which(vnode_t *vp)
446 {
447
448 KASSERT(mutex_owned(vp->v_interlock));
449
450 if (vp->v_holdcnt > 0)
451 return &lru_hold_list;
452 else
453 return &lru_free_list;
454 }
455
456 /*
457 * Put vnode to end of given list.
458 * Both the current and the new list may be NULL, used on vnode alloc/free.
459 * Adjust numvnodes and signal vdrain thread if there is work.
460 */
461 static void
462 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
463 {
464 vnode_impl_t *vip;
465
466 mutex_enter(&vdrain_lock);
467 vip = VNODE_TO_VIMPL(vp);
468 if (vip->vi_lrulisthd != NULL)
469 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
470 else
471 numvnodes++;
472 vip->vi_lrulisthd = listhd;
473 if (vip->vi_lrulisthd != NULL)
474 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
475 else
476 numvnodes--;
477 if (numvnodes > desiredvnodes || listhd == &lru_vrele_list)
478 cv_broadcast(&vdrain_cv);
479 mutex_exit(&vdrain_lock);
480 }
481
482 /*
483 * Release deferred vrele vnodes for this mount.
484 * Called with file system suspended.
485 */
486 void
487 vrele_flush(struct mount *mp)
488 {
489 vnode_impl_t *vip, *marker;
490
491 KASSERT(fstrans_is_owner(mp));
492
493 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
494
495 mutex_enter(&vdrain_lock);
496 TAILQ_INSERT_HEAD(&lru_vrele_list, marker, vi_lrulist);
497
498 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
499 TAILQ_REMOVE(&lru_vrele_list, marker, vi_lrulist);
500 TAILQ_INSERT_AFTER(&lru_vrele_list, vip, marker, vi_lrulist);
501 if (vnis_marker(VIMPL_TO_VNODE(vip)))
502 continue;
503
504 KASSERT(vip->vi_lrulisthd == &lru_vrele_list);
505 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
506 vip->vi_lrulisthd = &lru_hold_list;
507 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
508 mutex_exit(&vdrain_lock);
509
510 mutex_enter(VIMPL_TO_VNODE(vip)->v_interlock);
511 vrelel(VIMPL_TO_VNODE(vip), VRELEL_FORCE_RELE);
512
513 mutex_enter(&vdrain_lock);
514 }
515
516 TAILQ_REMOVE(&lru_vrele_list, marker, vi_lrulist);
517 mutex_exit(&vdrain_lock);
518
519 vnfree_marker(VIMPL_TO_VNODE(marker));
520 }
521
522 /*
523 * Reclaim a cached vnode. Used from vdrain_thread only.
524 */
525 static __inline void
526 vdrain_remove(vnode_t *vp)
527 {
528 struct mount *mp;
529
530 KASSERT(mutex_owned(&vdrain_lock));
531
532 /* Probe usecount (unlocked). */
533 if (vp->v_usecount > 0)
534 return;
535 /* Try v_interlock -- we lock the wrong direction! */
536 if (!mutex_tryenter(vp->v_interlock))
537 return;
538 /* Probe usecount and state. */
539 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
540 mutex_exit(vp->v_interlock);
541 return;
542 }
543 mp = vp->v_mount;
544 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
545 mutex_exit(vp->v_interlock);
546 return;
547 }
548 vdrain_retry = true;
549 mutex_exit(&vdrain_lock);
550
551 if (vcache_vget(vp) == 0) {
552 if (!vrecycle(vp)) {
553 mutex_enter(vp->v_interlock);
554 vrelel(vp, VRELEL_FORCE_RELE);
555 }
556 }
557 fstrans_done(mp);
558
559 mutex_enter(&vdrain_lock);
560 }
561
562 /*
563 * Release a cached vnode. Used from vdrain_thread only.
564 */
565 static __inline void
566 vdrain_vrele(vnode_t *vp)
567 {
568 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
569 struct mount *mp;
570
571 KASSERT(mutex_owned(&vdrain_lock));
572
573 mp = vp->v_mount;
574 if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0)
575 return;
576
577 /*
578 * First remove the vnode from the vrele list.
579 * Put it on the last lru list, the last vrele()
580 * will put it back onto the right list before
581 * its v_usecount reaches zero.
582 */
583 KASSERT(vip->vi_lrulisthd == &lru_vrele_list);
584 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
585 vip->vi_lrulisthd = &lru_hold_list;
586 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
587
588 vdrain_retry = true;
589 mutex_exit(&vdrain_lock);
590
591 mutex_enter(vp->v_interlock);
592 vrelel(vp, VRELEL_FORCE_RELE);
593 fstrans_done(mp);
594
595 mutex_enter(&vdrain_lock);
596 }
597
598 /*
599 * Helper thread to keep the number of vnodes below desiredvnodes
600 * and release vnodes from asynchronous vrele.
601 */
602 static void
603 vdrain_thread(void *cookie)
604 {
605 vnodelst_t *listhd[] = {
606 &lru_vrele_list, &lru_free_list, &lru_hold_list
607 };
608 int i;
609 u_int target;
610 vnode_impl_t *vip, *marker;
611
612 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
613
614 mutex_enter(&vdrain_lock);
615
616 for (;;) {
617 vdrain_retry = false;
618 target = desiredvnodes - desiredvnodes/10;
619
620 for (i = 0; i < __arraycount(listhd); i++) {
621 TAILQ_INSERT_HEAD(listhd[i], marker, vi_lrulist);
622 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
623 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
624 TAILQ_INSERT_AFTER(listhd[i], vip, marker,
625 vi_lrulist);
626 if (vnis_marker(VIMPL_TO_VNODE(vip)))
627 continue;
628 if (listhd[i] == &lru_vrele_list)
629 vdrain_vrele(VIMPL_TO_VNODE(vip));
630 else if (numvnodes < target)
631 break;
632 else
633 vdrain_remove(VIMPL_TO_VNODE(vip));
634 }
635 TAILQ_REMOVE(listhd[i], marker, vi_lrulist);
636 }
637
638 if (vdrain_retry) {
639 mutex_exit(&vdrain_lock);
640 yield();
641 mutex_enter(&vdrain_lock);
642 } else {
643 vdrain_gen++;
644 cv_broadcast(&vdrain_gen_cv);
645 cv_wait(&vdrain_cv, &vdrain_lock);
646 }
647 }
648 }
649
650 /*
651 * vput: unlock and release the reference.
652 */
653 void
654 vput(vnode_t *vp)
655 {
656
657 VOP_UNLOCK(vp);
658 vrele(vp);
659 }
660
661 /*
662 * Try to drop reference on a vnode. Abort if we are releasing the
663 * last reference. Note: this _must_ succeed if not the last reference.
664 */
665 static inline bool
666 vtryrele(vnode_t *vp)
667 {
668 u_int use, next;
669
670 for (use = vp->v_usecount;; use = next) {
671 if (use == 1) {
672 return false;
673 }
674 KASSERT(use > 1);
675 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
676 if (__predict_true(next == use)) {
677 return true;
678 }
679 }
680 }
681
682 /*
683 * Vnode release. If reference count drops to zero, call inactive
684 * routine and either return to freelist or free to the pool.
685 */
686 static void
687 vrelel(vnode_t *vp, int flags)
688 {
689 const bool async = ((flags & VRELEL_ASYNC_RELE) != 0);
690 const bool force = ((flags & VRELEL_FORCE_RELE) != 0);
691 bool recycle, defer;
692 int error;
693
694 KASSERT(mutex_owned(vp->v_interlock));
695
696 if (__predict_false(vp->v_op == dead_vnodeop_p &&
697 VSTATE_GET(vp) != VS_RECLAIMED)) {
698 vnpanic(vp, "dead but not clean");
699 }
700
701 /*
702 * If not the last reference, just drop the reference count
703 * and unlock.
704 */
705 if (vtryrele(vp)) {
706 mutex_exit(vp->v_interlock);
707 return;
708 }
709 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
710 vnpanic(vp, "%s: bad ref count", __func__);
711 }
712
713 #ifdef DIAGNOSTIC
714 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
715 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
716 vprint("vrelel: missing VOP_CLOSE()", vp);
717 }
718 #endif
719
720 /*
721 * First try to get the vnode locked for VOP_INACTIVE().
722 * Defer vnode release to vdrain_thread if caller requests
723 * it explicitly, is the pagedaemon or the lock failed.
724 */
725 if ((curlwp == uvm.pagedaemon_lwp) || async) {
726 defer = true;
727 } else {
728 mutex_exit(vp->v_interlock);
729 error = vn_lock(vp,
730 LK_EXCLUSIVE | LK_RETRY | (force ? 0 : LK_NOWAIT));
731 defer = (error != 0);
732 mutex_enter(vp->v_interlock);
733 }
734 KASSERT(mutex_owned(vp->v_interlock));
735 KASSERT(! (force && defer));
736 if (defer) {
737 /*
738 * Defer reclaim to the kthread; it's not safe to
739 * clean it here. We donate it our last reference.
740 */
741 lru_requeue(vp, &lru_vrele_list);
742 mutex_exit(vp->v_interlock);
743 return;
744 }
745
746 /*
747 * If the node got another reference while we
748 * released the interlock, don't try to inactivate it yet.
749 */
750 if (__predict_false(vtryrele(vp))) {
751 VOP_UNLOCK(vp);
752 mutex_exit(vp->v_interlock);
753 return;
754 }
755
756 /*
757 * If not clean, deactivate the vnode, but preserve
758 * our reference across the call to VOP_INACTIVE().
759 */
760 if (VSTATE_GET(vp) == VS_RECLAIMED) {
761 VOP_UNLOCK(vp);
762 } else {
763 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
764 mutex_exit(vp->v_interlock);
765
766 /*
767 * The vnode must not gain another reference while being
768 * deactivated. If VOP_INACTIVE() indicates that
769 * the described file has been deleted, then recycle
770 * the vnode.
771 *
772 * Note that VOP_INACTIVE() will not drop the vnode lock.
773 */
774 recycle = false;
775 VOP_INACTIVE(vp, &recycle);
776 if (!recycle)
777 VOP_UNLOCK(vp);
778 mutex_enter(vp->v_interlock);
779 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
780 if (!recycle) {
781 if (vtryrele(vp)) {
782 mutex_exit(vp->v_interlock);
783 return;
784 }
785 }
786
787 /* Take care of space accounting. */
788 if (vp->v_iflag & VI_EXECMAP) {
789 atomic_add_int(&uvmexp.execpages,
790 -vp->v_uobj.uo_npages);
791 atomic_add_int(&uvmexp.filepages,
792 vp->v_uobj.uo_npages);
793 }
794 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
795 vp->v_vflag &= ~VV_MAPPED;
796
797 /*
798 * Recycle the vnode if the file is now unused (unlinked),
799 * otherwise just free it.
800 */
801 if (recycle) {
802 VSTATE_ASSERT(vp, VS_LOADED);
803 /* vcache_reclaim drops the lock. */
804 vcache_reclaim(vp);
805 }
806 KASSERT(vp->v_usecount > 0);
807 }
808
809 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
810 /* Gained another reference while being reclaimed. */
811 mutex_exit(vp->v_interlock);
812 return;
813 }
814
815 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
816 /*
817 * It's clean so destroy it. It isn't referenced
818 * anywhere since it has been reclaimed.
819 */
820 vcache_free(VNODE_TO_VIMPL(vp));
821 } else {
822 /*
823 * Otherwise, put it back onto the freelist. It
824 * can't be destroyed while still associated with
825 * a file system.
826 */
827 lru_requeue(vp, lru_which(vp));
828 mutex_exit(vp->v_interlock);
829 }
830 }
831
832 void
833 vrele(vnode_t *vp)
834 {
835
836 if (vtryrele(vp)) {
837 return;
838 }
839 mutex_enter(vp->v_interlock);
840 vrelel(vp, 0);
841 }
842
843 /*
844 * Asynchronous vnode release, vnode is released in different context.
845 */
846 void
847 vrele_async(vnode_t *vp)
848 {
849
850 if (vtryrele(vp)) {
851 return;
852 }
853 mutex_enter(vp->v_interlock);
854 vrelel(vp, VRELEL_ASYNC_RELE);
855 }
856
857 /*
858 * Vnode reference, where a reference is already held by some other
859 * object (for example, a file structure).
860 */
861 void
862 vref(vnode_t *vp)
863 {
864
865 KASSERT(vp->v_usecount != 0);
866
867 atomic_inc_uint(&vp->v_usecount);
868 }
869
870 /*
871 * Page or buffer structure gets a reference.
872 * Called with v_interlock held.
873 */
874 void
875 vholdl(vnode_t *vp)
876 {
877
878 KASSERT(mutex_owned(vp->v_interlock));
879
880 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0)
881 lru_requeue(vp, lru_which(vp));
882 }
883
884 /*
885 * Page or buffer structure frees a reference.
886 * Called with v_interlock held.
887 */
888 void
889 holdrelel(vnode_t *vp)
890 {
891
892 KASSERT(mutex_owned(vp->v_interlock));
893
894 if (vp->v_holdcnt <= 0) {
895 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
896 }
897
898 vp->v_holdcnt--;
899 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
900 lru_requeue(vp, lru_which(vp));
901 }
902
903 /*
904 * Recycle an unused vnode if caller holds the last reference.
905 */
906 bool
907 vrecycle(vnode_t *vp)
908 {
909 int error __diagused;
910
911 mutex_enter(vp->v_interlock);
912
913 /* Make sure we hold the last reference. */
914 VSTATE_WAIT_STABLE(vp);
915 if (vp->v_usecount != 1) {
916 mutex_exit(vp->v_interlock);
917 return false;
918 }
919
920 /* If the vnode is already clean we're done. */
921 if (VSTATE_GET(vp) != VS_LOADED) {
922 VSTATE_ASSERT(vp, VS_RECLAIMED);
923 vrelel(vp, 0);
924 return true;
925 }
926
927 /* Prevent further references until the vnode is locked. */
928 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
929 mutex_exit(vp->v_interlock);
930
931 /*
932 * On a leaf file system this lock will always succeed as we hold
933 * the last reference and prevent further references.
934 * On layered file systems waiting for the lock would open a can of
935 * deadlocks as the lower vnodes may have other active references.
936 */
937 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
938
939 mutex_enter(vp->v_interlock);
940 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
941
942 if (error) {
943 mutex_exit(vp->v_interlock);
944 return false;
945 }
946
947 KASSERT(vp->v_usecount == 1);
948 vcache_reclaim(vp);
949 vrelel(vp, 0);
950
951 return true;
952 }
953
954 /*
955 * Helper for vrevoke() to propagate suspension from lastmp
956 * to thismp. Both args may be NULL.
957 * Returns the currently suspended file system or NULL.
958 */
959 static struct mount *
960 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
961 {
962 int error;
963
964 if (lastmp == thismp)
965 return thismp;
966
967 if (lastmp != NULL)
968 vfs_resume(lastmp);
969
970 if (thismp == NULL)
971 return NULL;
972
973 do {
974 error = vfs_suspend(thismp, 0);
975 } while (error == EINTR || error == ERESTART);
976
977 if (error == 0)
978 return thismp;
979
980 KASSERT(error == EOPNOTSUPP);
981 return NULL;
982 }
983
984 /*
985 * Eliminate all activity associated with the requested vnode
986 * and with all vnodes aliased to the requested vnode.
987 */
988 void
989 vrevoke(vnode_t *vp)
990 {
991 struct mount *mp;
992 vnode_t *vq;
993 enum vtype type;
994 dev_t dev;
995
996 KASSERT(vp->v_usecount > 0);
997
998 mp = vrevoke_suspend_next(NULL, vp->v_mount);
999
1000 mutex_enter(vp->v_interlock);
1001 VSTATE_WAIT_STABLE(vp);
1002 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1003 mutex_exit(vp->v_interlock);
1004 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1005 atomic_inc_uint(&vp->v_usecount);
1006 mutex_exit(vp->v_interlock);
1007 vgone(vp);
1008 } else {
1009 dev = vp->v_rdev;
1010 type = vp->v_type;
1011 mutex_exit(vp->v_interlock);
1012
1013 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1014 mp = vrevoke_suspend_next(mp, vq->v_mount);
1015 vgone(vq);
1016 }
1017 }
1018 vrevoke_suspend_next(mp, NULL);
1019 }
1020
1021 /*
1022 * Eliminate all activity associated with a vnode in preparation for
1023 * reuse. Drops a reference from the vnode.
1024 */
1025 void
1026 vgone(vnode_t *vp)
1027 {
1028
1029 KASSERT((vp->v_mount->mnt_iflag & IMNT_HAS_TRANS) == 0 ||
1030 fstrans_is_owner(vp->v_mount));
1031
1032 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1033 mutex_enter(vp->v_interlock);
1034 VSTATE_WAIT_STABLE(vp);
1035 if (VSTATE_GET(vp) == VS_LOADED)
1036 vcache_reclaim(vp);
1037 VSTATE_ASSERT(vp, VS_RECLAIMED);
1038 vrelel(vp, 0);
1039 }
1040
1041 static inline uint32_t
1042 vcache_hash(const struct vcache_key *key)
1043 {
1044 uint32_t hash = HASH32_BUF_INIT;
1045
1046 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1047 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1048 return hash;
1049 }
1050
1051 static void
1052 vcache_init(void)
1053 {
1054
1055 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1056 "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1057 KASSERT(vcache_pool != NULL);
1058 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1059 cv_init(&vcache_cv, "vcache");
1060 vcache_hashsize = desiredvnodes;
1061 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1062 &vcache_hashmask);
1063 }
1064
1065 static void
1066 vcache_reinit(void)
1067 {
1068 int i;
1069 uint32_t hash;
1070 u_long oldmask, newmask;
1071 struct hashhead *oldtab, *newtab;
1072 vnode_impl_t *vip;
1073
1074 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1075 mutex_enter(&vcache_lock);
1076 oldtab = vcache_hashtab;
1077 oldmask = vcache_hashmask;
1078 vcache_hashsize = desiredvnodes;
1079 vcache_hashtab = newtab;
1080 vcache_hashmask = newmask;
1081 for (i = 0; i <= oldmask; i++) {
1082 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1083 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1084 hash = vcache_hash(&vip->vi_key);
1085 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1086 vip, vi_hash);
1087 }
1088 }
1089 mutex_exit(&vcache_lock);
1090 hashdone(oldtab, HASH_SLIST, oldmask);
1091 }
1092
1093 static inline vnode_impl_t *
1094 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1095 {
1096 struct hashhead *hashp;
1097 vnode_impl_t *vip;
1098
1099 KASSERT(mutex_owned(&vcache_lock));
1100
1101 hashp = &vcache_hashtab[hash & vcache_hashmask];
1102 SLIST_FOREACH(vip, hashp, vi_hash) {
1103 if (key->vk_mount != vip->vi_key.vk_mount)
1104 continue;
1105 if (key->vk_key_len != vip->vi_key.vk_key_len)
1106 continue;
1107 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1108 continue;
1109 return vip;
1110 }
1111 return NULL;
1112 }
1113
1114 /*
1115 * Allocate a new, uninitialized vcache node.
1116 */
1117 static vnode_impl_t *
1118 vcache_alloc(void)
1119 {
1120 vnode_impl_t *vip;
1121 vnode_t *vp;
1122
1123 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1124 memset(vip, 0, sizeof(*vip));
1125
1126 vip->vi_lock = rw_obj_alloc();
1127 /* SLIST_INIT(&vip->vi_hash); */
1128 /* LIST_INIT(&vip->vi_nclist); */
1129 /* LIST_INIT(&vip->vi_dnclist); */
1130
1131 vp = VIMPL_TO_VNODE(vip);
1132 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1133 cv_init(&vp->v_cv, "vnode");
1134
1135 vp->v_usecount = 1;
1136 vp->v_type = VNON;
1137 vp->v_size = vp->v_writesize = VSIZENOTSET;
1138
1139 vip->vi_state = VS_LOADING;
1140
1141 lru_requeue(vp, &lru_free_list);
1142
1143 return vip;
1144 }
1145
1146 /*
1147 * Deallocate a vcache node in state VS_LOADING.
1148 *
1149 * vcache_lock held on entry and released on return.
1150 */
1151 static void
1152 vcache_dealloc(vnode_impl_t *vip)
1153 {
1154 vnode_t *vp;
1155
1156 KASSERT(mutex_owned(&vcache_lock));
1157
1158 vp = VIMPL_TO_VNODE(vip);
1159 mutex_enter(vp->v_interlock);
1160 vp->v_op = dead_vnodeop_p;
1161 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1162 mutex_exit(&vcache_lock);
1163 vrelel(vp, 0);
1164 }
1165
1166 /*
1167 * Free an unused, unreferenced vcache node.
1168 * v_interlock locked on entry.
1169 */
1170 static void
1171 vcache_free(vnode_impl_t *vip)
1172 {
1173 vnode_t *vp;
1174
1175 vp = VIMPL_TO_VNODE(vip);
1176 KASSERT(mutex_owned(vp->v_interlock));
1177
1178 KASSERT(vp->v_usecount == 0);
1179 KASSERT(vp->v_holdcnt == 0);
1180 KASSERT(vp->v_writecount == 0);
1181 lru_requeue(vp, NULL);
1182 mutex_exit(vp->v_interlock);
1183
1184 vfs_insmntque(vp, NULL);
1185 if (vp->v_type == VBLK || vp->v_type == VCHR)
1186 spec_node_destroy(vp);
1187
1188 rw_obj_free(vip->vi_lock);
1189 uvm_obj_destroy(&vp->v_uobj, true);
1190 cv_destroy(&vp->v_cv);
1191 pool_cache_put(vcache_pool, vip);
1192 }
1193
1194 /*
1195 * Try to get an initial reference on this cached vnode.
1196 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1197 * EBUSY if the vnode state is unstable.
1198 *
1199 * v_interlock locked on entry and unlocked on exit.
1200 */
1201 int
1202 vcache_tryvget(vnode_t *vp)
1203 {
1204 int error = 0;
1205
1206 KASSERT(mutex_owned(vp->v_interlock));
1207
1208 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1209 error = ENOENT;
1210 else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1211 error = EBUSY;
1212 else if (vp->v_usecount == 0)
1213 vp->v_usecount = 1;
1214 else
1215 atomic_inc_uint(&vp->v_usecount);
1216
1217 mutex_exit(vp->v_interlock);
1218
1219 return error;
1220 }
1221
1222 /*
1223 * Try to get an initial reference on this cached vnode.
1224 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1225 * Will wait for the vnode state to be stable.
1226 *
1227 * v_interlock locked on entry and unlocked on exit.
1228 */
1229 int
1230 vcache_vget(vnode_t *vp)
1231 {
1232
1233 KASSERT(mutex_owned(vp->v_interlock));
1234
1235 /* Increment hold count to prevent vnode from disappearing. */
1236 vp->v_holdcnt++;
1237 VSTATE_WAIT_STABLE(vp);
1238 vp->v_holdcnt--;
1239
1240 /* If this was the last reference to a reclaimed vnode free it now. */
1241 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1242 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1243 vcache_free(VNODE_TO_VIMPL(vp));
1244 else
1245 mutex_exit(vp->v_interlock);
1246 return ENOENT;
1247 }
1248 VSTATE_ASSERT(vp, VS_LOADED);
1249 if (vp->v_usecount == 0)
1250 vp->v_usecount = 1;
1251 else
1252 atomic_inc_uint(&vp->v_usecount);
1253
1254 mutex_exit(vp->v_interlock);
1255
1256 return 0;
1257 }
1258
1259 /*
1260 * Get a vnode / fs node pair by key and return it referenced through vpp.
1261 */
1262 int
1263 vcache_get(struct mount *mp, const void *key, size_t key_len,
1264 struct vnode **vpp)
1265 {
1266 int error;
1267 uint32_t hash;
1268 const void *new_key;
1269 struct vnode *vp;
1270 struct vcache_key vcache_key;
1271 vnode_impl_t *vip, *new_vip;
1272
1273 new_key = NULL;
1274 *vpp = NULL;
1275
1276 vcache_key.vk_mount = mp;
1277 vcache_key.vk_key = key;
1278 vcache_key.vk_key_len = key_len;
1279 hash = vcache_hash(&vcache_key);
1280
1281 again:
1282 mutex_enter(&vcache_lock);
1283 vip = vcache_hash_lookup(&vcache_key, hash);
1284
1285 /* If found, take a reference or retry. */
1286 if (__predict_true(vip != NULL)) {
1287 /*
1288 * If the vnode is loading we cannot take the v_interlock
1289 * here as it might change during load (see uvm_obj_setlock()).
1290 * As changing state from VS_LOADING requires both vcache_lock
1291 * and v_interlock it is safe to test with vcache_lock held.
1292 *
1293 * Wait for vnodes changing state from VS_LOADING and retry.
1294 */
1295 if (__predict_false(vip->vi_state == VS_LOADING)) {
1296 cv_wait(&vcache_cv, &vcache_lock);
1297 mutex_exit(&vcache_lock);
1298 goto again;
1299 }
1300 vp = VIMPL_TO_VNODE(vip);
1301 mutex_enter(vp->v_interlock);
1302 mutex_exit(&vcache_lock);
1303 error = vcache_vget(vp);
1304 if (error == ENOENT)
1305 goto again;
1306 if (error == 0)
1307 *vpp = vp;
1308 KASSERT((error != 0) == (*vpp == NULL));
1309 return error;
1310 }
1311 mutex_exit(&vcache_lock);
1312
1313 /* Allocate and initialize a new vcache / vnode pair. */
1314 error = vfs_busy(mp);
1315 if (error)
1316 return error;
1317 new_vip = vcache_alloc();
1318 new_vip->vi_key = vcache_key;
1319 vp = VIMPL_TO_VNODE(new_vip);
1320 mutex_enter(&vcache_lock);
1321 vip = vcache_hash_lookup(&vcache_key, hash);
1322 if (vip == NULL) {
1323 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1324 new_vip, vi_hash);
1325 vip = new_vip;
1326 }
1327
1328 /* If another thread beat us inserting this node, retry. */
1329 if (vip != new_vip) {
1330 vcache_dealloc(new_vip);
1331 vfs_unbusy(mp);
1332 goto again;
1333 }
1334 mutex_exit(&vcache_lock);
1335
1336 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1337 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1338 if (error) {
1339 mutex_enter(&vcache_lock);
1340 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1341 new_vip, vnode_impl, vi_hash);
1342 vcache_dealloc(new_vip);
1343 vfs_unbusy(mp);
1344 KASSERT(*vpp == NULL);
1345 return error;
1346 }
1347 KASSERT(new_key != NULL);
1348 KASSERT(memcmp(key, new_key, key_len) == 0);
1349 KASSERT(vp->v_op != NULL);
1350 vfs_insmntque(vp, mp);
1351 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1352 vp->v_vflag |= VV_MPSAFE;
1353 vfs_ref(mp);
1354 vfs_unbusy(mp);
1355
1356 /* Finished loading, finalize node. */
1357 mutex_enter(&vcache_lock);
1358 new_vip->vi_key.vk_key = new_key;
1359 mutex_enter(vp->v_interlock);
1360 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1361 mutex_exit(vp->v_interlock);
1362 mutex_exit(&vcache_lock);
1363 *vpp = vp;
1364 return 0;
1365 }
1366
1367 /*
1368 * Create a new vnode / fs node pair and return it referenced through vpp.
1369 */
1370 int
1371 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1372 kauth_cred_t cred, struct vnode **vpp)
1373 {
1374 int error;
1375 uint32_t hash;
1376 struct vnode *vp, *ovp;
1377 vnode_impl_t *vip, *ovip;
1378
1379 *vpp = NULL;
1380
1381 /* Allocate and initialize a new vcache / vnode pair. */
1382 error = vfs_busy(mp);
1383 if (error)
1384 return error;
1385 vip = vcache_alloc();
1386 vip->vi_key.vk_mount = mp;
1387 vp = VIMPL_TO_VNODE(vip);
1388
1389 /* Create and load the fs node. */
1390 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1391 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1392 if (error) {
1393 mutex_enter(&vcache_lock);
1394 vcache_dealloc(vip);
1395 vfs_unbusy(mp);
1396 KASSERT(*vpp == NULL);
1397 return error;
1398 }
1399 KASSERT(vip->vi_key.vk_key != NULL);
1400 KASSERT(vp->v_op != NULL);
1401 hash = vcache_hash(&vip->vi_key);
1402
1403 /* Wait for previous instance to be reclaimed, then insert new node. */
1404 mutex_enter(&vcache_lock);
1405 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1406 ovp = VIMPL_TO_VNODE(ovip);
1407 mutex_enter(ovp->v_interlock);
1408 mutex_exit(&vcache_lock);
1409 error = vcache_vget(ovp);
1410 KASSERT(error == ENOENT);
1411 mutex_enter(&vcache_lock);
1412 }
1413 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1414 vip, vi_hash);
1415 mutex_exit(&vcache_lock);
1416 vfs_insmntque(vp, mp);
1417 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1418 vp->v_vflag |= VV_MPSAFE;
1419 vfs_ref(mp);
1420 vfs_unbusy(mp);
1421
1422 /* Finished loading, finalize node. */
1423 mutex_enter(&vcache_lock);
1424 mutex_enter(vp->v_interlock);
1425 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1426 mutex_exit(&vcache_lock);
1427 mutex_exit(vp->v_interlock);
1428 *vpp = vp;
1429 return 0;
1430 }
1431
1432 /*
1433 * Prepare key change: update old cache nodes key and lock new cache node.
1434 * Return an error if the new node already exists.
1435 */
1436 int
1437 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1438 const void *old_key, size_t old_key_len,
1439 const void *new_key, size_t new_key_len)
1440 {
1441 uint32_t old_hash, new_hash;
1442 struct vcache_key old_vcache_key, new_vcache_key;
1443 vnode_impl_t *vip, *new_vip;
1444
1445 old_vcache_key.vk_mount = mp;
1446 old_vcache_key.vk_key = old_key;
1447 old_vcache_key.vk_key_len = old_key_len;
1448 old_hash = vcache_hash(&old_vcache_key);
1449
1450 new_vcache_key.vk_mount = mp;
1451 new_vcache_key.vk_key = new_key;
1452 new_vcache_key.vk_key_len = new_key_len;
1453 new_hash = vcache_hash(&new_vcache_key);
1454
1455 new_vip = vcache_alloc();
1456 new_vip->vi_key = new_vcache_key;
1457
1458 /* Insert locked new node used as placeholder. */
1459 mutex_enter(&vcache_lock);
1460 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1461 if (vip != NULL) {
1462 vcache_dealloc(new_vip);
1463 return EEXIST;
1464 }
1465 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1466 new_vip, vi_hash);
1467
1468 /* Replace old nodes key with the temporary copy. */
1469 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1470 KASSERT(vip != NULL);
1471 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1472 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1473 vip->vi_key = old_vcache_key;
1474 mutex_exit(&vcache_lock);
1475 return 0;
1476 }
1477
1478 /*
1479 * Key change complete: update old node and remove placeholder.
1480 */
1481 void
1482 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1483 const void *old_key, size_t old_key_len,
1484 const void *new_key, size_t new_key_len)
1485 {
1486 uint32_t old_hash, new_hash;
1487 struct vcache_key old_vcache_key, new_vcache_key;
1488 vnode_impl_t *vip, *new_vip;
1489 struct vnode *new_vp;
1490
1491 old_vcache_key.vk_mount = mp;
1492 old_vcache_key.vk_key = old_key;
1493 old_vcache_key.vk_key_len = old_key_len;
1494 old_hash = vcache_hash(&old_vcache_key);
1495
1496 new_vcache_key.vk_mount = mp;
1497 new_vcache_key.vk_key = new_key;
1498 new_vcache_key.vk_key_len = new_key_len;
1499 new_hash = vcache_hash(&new_vcache_key);
1500
1501 mutex_enter(&vcache_lock);
1502
1503 /* Lookup old and new node. */
1504 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1505 KASSERT(vip != NULL);
1506 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1507
1508 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1509 KASSERT(new_vip != NULL);
1510 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1511 new_vp = VIMPL_TO_VNODE(new_vip);
1512 mutex_enter(new_vp->v_interlock);
1513 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1514 mutex_exit(new_vp->v_interlock);
1515
1516 /* Rekey old node and put it onto its new hashlist. */
1517 vip->vi_key = new_vcache_key;
1518 if (old_hash != new_hash) {
1519 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1520 vip, vnode_impl, vi_hash);
1521 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1522 vip, vi_hash);
1523 }
1524
1525 /* Remove new node used as placeholder. */
1526 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1527 new_vip, vnode_impl, vi_hash);
1528 vcache_dealloc(new_vip);
1529 }
1530
1531 /*
1532 * Disassociate the underlying file system from a vnode.
1533 *
1534 * Must be called with vnode locked and will return unlocked.
1535 * Must be called with the interlock held, and will return with it held.
1536 */
1537 static void
1538 vcache_reclaim(vnode_t *vp)
1539 {
1540 lwp_t *l = curlwp;
1541 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1542 struct mount *mp = vp->v_mount;
1543 uint32_t hash;
1544 uint8_t temp_buf[64], *temp_key;
1545 size_t temp_key_len;
1546 bool recycle, active;
1547 int error;
1548
1549 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1550 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1551 KASSERT(mutex_owned(vp->v_interlock));
1552 KASSERT(vp->v_usecount != 0);
1553
1554 active = (vp->v_usecount > 1);
1555 temp_key_len = vip->vi_key.vk_key_len;
1556 /*
1557 * Prevent the vnode from being recycled or brought into use
1558 * while we clean it out.
1559 */
1560 VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1561 if (vp->v_iflag & VI_EXECMAP) {
1562 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1563 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1564 }
1565 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1566 mutex_exit(vp->v_interlock);
1567
1568 /* Replace the vnode key with a temporary copy. */
1569 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1570 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1571 } else {
1572 temp_key = temp_buf;
1573 }
1574 mutex_enter(&vcache_lock);
1575 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1576 vip->vi_key.vk_key = temp_key;
1577 mutex_exit(&vcache_lock);
1578
1579 fstrans_start(mp, FSTRANS_SHARED);
1580
1581 /*
1582 * Clean out any cached data associated with the vnode.
1583 * If purging an active vnode, it must be closed and
1584 * deactivated before being reclaimed.
1585 */
1586 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1587 if (error != 0) {
1588 if (wapbl_vphaswapbl(vp))
1589 WAPBL_DISCARD(wapbl_vptomp(vp));
1590 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1591 }
1592 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1593 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1594 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1595 spec_node_revoke(vp);
1596 }
1597
1598 /*
1599 * Disassociate the underlying file system from the vnode.
1600 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1601 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1602 * would no longer function.
1603 */
1604 VOP_INACTIVE(vp, &recycle);
1605 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1606 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1607 if (VOP_RECLAIM(vp)) {
1608 vnpanic(vp, "%s: cannot reclaim", __func__);
1609 }
1610
1611 KASSERT(vp->v_data == NULL);
1612 KASSERT(vp->v_uobj.uo_npages == 0);
1613
1614 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1615 uvm_ra_freectx(vp->v_ractx);
1616 vp->v_ractx = NULL;
1617 }
1618
1619 /* Purge name cache. */
1620 cache_purge(vp);
1621
1622 /* Remove from vnode cache. */
1623 hash = vcache_hash(&vip->vi_key);
1624 mutex_enter(&vcache_lock);
1625 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1626 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1627 vip, vnode_impl, vi_hash);
1628 mutex_exit(&vcache_lock);
1629 if (temp_key != temp_buf)
1630 kmem_free(temp_key, temp_key_len);
1631
1632 /* Done with purge, notify sleepers of the grim news. */
1633 mutex_enter(vp->v_interlock);
1634 vp->v_op = dead_vnodeop_p;
1635 vp->v_vflag |= VV_LOCKSWORK;
1636 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1637 vp->v_tag = VT_NON;
1638 KNOTE(&vp->v_klist, NOTE_REVOKE);
1639 mutex_exit(vp->v_interlock);
1640
1641 /*
1642 * Move to dead mount. Must be after changing the operations
1643 * vector as vnode operations enter the mount before using the
1644 * operations vector. See sys/kern/vnode_if.c.
1645 */
1646 vp->v_vflag &= ~VV_ROOT;
1647 vfs_ref(dead_rootmount);
1648 vfs_insmntque(vp, dead_rootmount);
1649
1650 mutex_enter(vp->v_interlock);
1651 fstrans_done(mp);
1652 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1653 }
1654
1655 /*
1656 * Update outstanding I/O count and do wakeup if requested.
1657 */
1658 void
1659 vwakeup(struct buf *bp)
1660 {
1661 vnode_t *vp;
1662
1663 if ((vp = bp->b_vp) == NULL)
1664 return;
1665
1666 KASSERT(bp->b_objlock == vp->v_interlock);
1667 KASSERT(mutex_owned(bp->b_objlock));
1668
1669 if (--vp->v_numoutput < 0)
1670 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1671 if (vp->v_numoutput == 0)
1672 cv_broadcast(&vp->v_cv);
1673 }
1674
1675 /*
1676 * Test a vnode for being or becoming dead. Returns one of:
1677 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1678 * ENOENT: vnode is dead.
1679 * 0: otherwise.
1680 *
1681 * Whenever this function returns a non-zero value all future
1682 * calls will also return a non-zero value.
1683 */
1684 int
1685 vdead_check(struct vnode *vp, int flags)
1686 {
1687
1688 KASSERT(mutex_owned(vp->v_interlock));
1689
1690 if (! ISSET(flags, VDEAD_NOWAIT))
1691 VSTATE_WAIT_STABLE(vp);
1692
1693 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1694 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1695 return EBUSY;
1696 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1697 return ENOENT;
1698 }
1699
1700 return 0;
1701 }
1702
1703 int
1704 vfs_drainvnodes(void)
1705 {
1706 int i, gen;
1707
1708 mutex_enter(&vdrain_lock);
1709 for (i = 0; i < 2; i++) {
1710 gen = vdrain_gen;
1711 while (gen == vdrain_gen) {
1712 cv_broadcast(&vdrain_cv);
1713 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1714 }
1715 }
1716 mutex_exit(&vdrain_lock);
1717
1718 if (numvnodes >= desiredvnodes)
1719 return EBUSY;
1720
1721 if (vcache_hashsize != desiredvnodes)
1722 vcache_reinit();
1723
1724 return 0;
1725 }
1726
1727 void
1728 vnpanic(vnode_t *vp, const char *fmt, ...)
1729 {
1730 va_list ap;
1731
1732 #ifdef DIAGNOSTIC
1733 vprint(NULL, vp);
1734 #endif
1735 va_start(ap, fmt);
1736 vpanic(fmt, ap);
1737 va_end(ap);
1738 }
1739