vfs_vnode.c revision 1.53 1 1.53 msaitoh /* $NetBSD: vfs_vnode.c,v 1.53 2016/07/07 06:55:43 msaitoh Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.2 rmind * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rmind * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 rmind * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 1.1 rmind *
11 1.1 rmind * Redistribution and use in source and binary forms, with or without
12 1.1 rmind * modification, are permitted provided that the following conditions
13 1.1 rmind * are met:
14 1.1 rmind * 1. Redistributions of source code must retain the above copyright
15 1.1 rmind * notice, this list of conditions and the following disclaimer.
16 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 rmind * notice, this list of conditions and the following disclaimer in the
18 1.1 rmind * documentation and/or other materials provided with the distribution.
19 1.1 rmind *
20 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
31 1.1 rmind */
32 1.1 rmind
33 1.1 rmind /*
34 1.1 rmind * Copyright (c) 1989, 1993
35 1.1 rmind * The Regents of the University of California. All rights reserved.
36 1.1 rmind * (c) UNIX System Laboratories, Inc.
37 1.1 rmind * All or some portions of this file are derived from material licensed
38 1.1 rmind * to the University of California by American Telephone and Telegraph
39 1.1 rmind * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 1.1 rmind * the permission of UNIX System Laboratories, Inc.
41 1.1 rmind *
42 1.1 rmind * Redistribution and use in source and binary forms, with or without
43 1.1 rmind * modification, are permitted provided that the following conditions
44 1.1 rmind * are met:
45 1.1 rmind * 1. Redistributions of source code must retain the above copyright
46 1.1 rmind * notice, this list of conditions and the following disclaimer.
47 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
48 1.1 rmind * notice, this list of conditions and the following disclaimer in the
49 1.1 rmind * documentation and/or other materials provided with the distribution.
50 1.1 rmind * 3. Neither the name of the University nor the names of its contributors
51 1.1 rmind * may be used to endorse or promote products derived from this software
52 1.1 rmind * without specific prior written permission.
53 1.1 rmind *
54 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 1.1 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.1 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.1 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 1.1 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.1 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.1 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.1 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.1 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.1 rmind * SUCH DAMAGE.
65 1.1 rmind *
66 1.1 rmind * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 1.1 rmind */
68 1.1 rmind
69 1.1 rmind /*
70 1.8 rmind * The vnode cache subsystem.
71 1.1 rmind *
72 1.8 rmind * Life-cycle
73 1.1 rmind *
74 1.8 rmind * Normally, there are two points where new vnodes are created:
75 1.8 rmind * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 1.8 rmind * starts in one of the following ways:
77 1.8 rmind *
78 1.45 hannken * - Allocation, via vcache_get(9) or vcache_new(9).
79 1.8 rmind * - Reclamation of inactive vnode, via vget(9).
80 1.8 rmind *
81 1.16 rmind * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 1.16 rmind * was another, traditional way. Currently, only the draining thread
83 1.16 rmind * recycles the vnodes. This behaviour might be revisited.
84 1.16 rmind *
85 1.8 rmind * The life-cycle ends when the last reference is dropped, usually
86 1.8 rmind * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 1.8 rmind * the file system that vnode is inactive. Via this call, file system
88 1.16 rmind * indicates whether vnode can be recycled (usually, it checks its own
89 1.16 rmind * references, e.g. count of links, whether the file was removed).
90 1.8 rmind *
91 1.8 rmind * Depending on indication, vnode can be put into a free list (cache),
92 1.8 rmind * or cleaned via vclean(9), which calls VOP_RECLAIM(9) to disassociate
93 1.8 rmind * underlying file system from the vnode, and finally destroyed.
94 1.8 rmind *
95 1.52 hannken * Vnode state
96 1.52 hannken *
97 1.52 hannken * Vnode is always in one of six states:
98 1.52 hannken * - MARKER This is a marker vnode to help list traversal. It
99 1.52 hannken * will never change its state.
100 1.52 hannken * - LOADING Vnode is associating underlying file system and not
101 1.52 hannken * yet ready to use.
102 1.52 hannken * - ACTIVE Vnode has associated underlying file system and is
103 1.52 hannken * ready to use.
104 1.52 hannken * - BLOCKED Vnode is active but cannot get new references.
105 1.52 hannken * - RECLAIMING Vnode is disassociating from the underlying file
106 1.52 hannken * system.
107 1.52 hannken * - RECLAIMED Vnode has disassociated from underlying file system
108 1.52 hannken * and is dead.
109 1.52 hannken *
110 1.52 hannken * Valid state changes are:
111 1.52 hannken * LOADING -> ACTIVE
112 1.52 hannken * Vnode has been initialised in vcache_get() or
113 1.52 hannken * vcache_new() and is ready to use.
114 1.52 hannken * ACTIVE -> RECLAIMING
115 1.52 hannken * Vnode starts disassociation from underlying file
116 1.52 hannken * system in vclean().
117 1.52 hannken * RECLAIMING -> RECLAIMED
118 1.52 hannken * Vnode finished disassociation from underlying file
119 1.52 hannken * system in vclean().
120 1.52 hannken * ACTIVE -> BLOCKED
121 1.52 hannken * Either vcache_rekey*() is changing the vnode key or
122 1.52 hannken * vrelel() is about to call VOP_INACTIVE().
123 1.52 hannken * BLOCKED -> ACTIVE
124 1.52 hannken * The block condition is over.
125 1.52 hannken * LOADING -> RECLAIMED
126 1.52 hannken * Either vcache_get() or vcache_new() failed to
127 1.52 hannken * associate the underlying file system or vcache_rekey*()
128 1.52 hannken * drops a vnode used as placeholder.
129 1.52 hannken *
130 1.52 hannken * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
131 1.52 hannken * and it is possible to wait for state change.
132 1.52 hannken *
133 1.52 hannken * State is protected with v_interlock with one exception:
134 1.52 hannken * to change from LOADING both v_interlock and vcache.lock must be held
135 1.52 hannken * so it is possible to check "state == LOADING" without holding
136 1.52 hannken * v_interlock. See vcache_get() for details.
137 1.52 hannken *
138 1.8 rmind * Reference counting
139 1.8 rmind *
140 1.8 rmind * Vnode is considered active, if reference count (vnode_t::v_usecount)
141 1.8 rmind * is non-zero. It is maintained using: vref(9) and vrele(9), as well
142 1.8 rmind * as vput(9), routines. Common points holding references are e.g.
143 1.8 rmind * file openings, current working directory, mount points, etc.
144 1.8 rmind *
145 1.8 rmind * Note on v_usecount and its locking
146 1.8 rmind *
147 1.8 rmind * At nearly all points it is known that v_usecount could be zero,
148 1.8 rmind * the vnode_t::v_interlock will be held. To change v_usecount away
149 1.8 rmind * from zero, the interlock must be held. To change from a non-zero
150 1.8 rmind * value to zero, again the interlock must be held.
151 1.8 rmind *
152 1.24 hannken * Changing the usecount from a non-zero value to a non-zero value can
153 1.24 hannken * safely be done using atomic operations, without the interlock held.
154 1.8 rmind *
155 1.1 rmind */
156 1.1 rmind
157 1.1 rmind #include <sys/cdefs.h>
158 1.53 msaitoh __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.53 2016/07/07 06:55:43 msaitoh Exp $");
159 1.23 hannken
160 1.23 hannken #define _VFS_VNODE_PRIVATE
161 1.1 rmind
162 1.1 rmind #include <sys/param.h>
163 1.1 rmind #include <sys/kernel.h>
164 1.1 rmind
165 1.1 rmind #include <sys/atomic.h>
166 1.1 rmind #include <sys/buf.h>
167 1.1 rmind #include <sys/conf.h>
168 1.1 rmind #include <sys/device.h>
169 1.36 hannken #include <sys/hash.h>
170 1.1 rmind #include <sys/kauth.h>
171 1.1 rmind #include <sys/kmem.h>
172 1.1 rmind #include <sys/kthread.h>
173 1.1 rmind #include <sys/module.h>
174 1.1 rmind #include <sys/mount.h>
175 1.1 rmind #include <sys/namei.h>
176 1.1 rmind #include <sys/syscallargs.h>
177 1.1 rmind #include <sys/sysctl.h>
178 1.1 rmind #include <sys/systm.h>
179 1.1 rmind #include <sys/vnode.h>
180 1.1 rmind #include <sys/wapbl.h>
181 1.24 hannken #include <sys/fstrans.h>
182 1.1 rmind
183 1.1 rmind #include <uvm/uvm.h>
184 1.1 rmind #include <uvm/uvm_readahead.h>
185 1.1 rmind
186 1.23 hannken /* Flags to vrelel. */
187 1.23 hannken #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
188 1.23 hannken
189 1.51 hannken enum vcache_state {
190 1.51 hannken VN_MARKER, /* Stable, used as marker. Will not change. */
191 1.51 hannken VN_LOADING, /* Intermediate, initialising the fs node. */
192 1.51 hannken VN_ACTIVE, /* Stable, valid fs node attached. */
193 1.51 hannken VN_BLOCKED, /* Intermediate, active, no new references allowed. */
194 1.51 hannken VN_RECLAIMING, /* Intermediate, detaching the fs node. */
195 1.51 hannken VN_RECLAIMED /* Stable, no fs node attached. */
196 1.51 hannken };
197 1.36 hannken struct vcache_key {
198 1.36 hannken struct mount *vk_mount;
199 1.36 hannken const void *vk_key;
200 1.36 hannken size_t vk_key_len;
201 1.36 hannken };
202 1.36 hannken struct vcache_node {
203 1.52 hannken struct vnode vn_vnode;
204 1.51 hannken enum vcache_state vn_state;
205 1.36 hannken SLIST_ENTRY(vcache_node) vn_hash;
206 1.36 hannken struct vcache_key vn_key;
207 1.36 hannken };
208 1.36 hannken
209 1.50 hannken #define VN_TO_VP(node) ((vnode_t *)(node))
210 1.50 hannken #define VP_TO_VN(vp) ((struct vcache_node *)(vp))
211 1.50 hannken
212 1.6 rmind u_int numvnodes __cacheline_aligned;
213 1.1 rmind
214 1.16 rmind /*
215 1.16 rmind * There are two free lists: one is for vnodes which have no buffer/page
216 1.16 rmind * references and one for those which do (i.e. v_holdcnt is non-zero).
217 1.16 rmind * Vnode recycling mechanism first attempts to look into the former list.
218 1.16 rmind */
219 1.6 rmind static kmutex_t vnode_free_list_lock __cacheline_aligned;
220 1.6 rmind static vnodelst_t vnode_free_list __cacheline_aligned;
221 1.6 rmind static vnodelst_t vnode_hold_list __cacheline_aligned;
222 1.16 rmind static kcondvar_t vdrain_cv __cacheline_aligned;
223 1.16 rmind
224 1.6 rmind static vnodelst_t vrele_list __cacheline_aligned;
225 1.6 rmind static kmutex_t vrele_lock __cacheline_aligned;
226 1.6 rmind static kcondvar_t vrele_cv __cacheline_aligned;
227 1.6 rmind static lwp_t * vrele_lwp __cacheline_aligned;
228 1.6 rmind static int vrele_pending __cacheline_aligned;
229 1.6 rmind static int vrele_gen __cacheline_aligned;
230 1.1 rmind
231 1.38 matt SLIST_HEAD(hashhead, vcache_node);
232 1.36 hannken static struct {
233 1.36 hannken kmutex_t lock;
234 1.51 hannken kcondvar_t cv;
235 1.36 hannken u_long hashmask;
236 1.38 matt struct hashhead *hashtab;
237 1.36 hannken pool_cache_t pool;
238 1.36 hannken } vcache __cacheline_aligned;
239 1.36 hannken
240 1.12 hannken static int cleanvnode(void);
241 1.50 hannken static struct vcache_node *vcache_alloc(void);
242 1.50 hannken static void vcache_free(struct vcache_node *);
243 1.36 hannken static void vcache_init(void);
244 1.36 hannken static void vcache_reinit(void);
245 1.25 hannken static void vclean(vnode_t *);
246 1.23 hannken static void vrelel(vnode_t *, int);
247 1.12 hannken static void vdrain_thread(void *);
248 1.1 rmind static void vrele_thread(void *);
249 1.11 christos static void vnpanic(vnode_t *, const char *, ...)
250 1.18 christos __printflike(2, 3);
251 1.1 rmind
252 1.1 rmind /* Routines having to do with the management of the vnode table. */
253 1.44 hannken extern struct mount *dead_rootmount;
254 1.1 rmind extern int (**dead_vnodeop_p)(void *);
255 1.31 hannken extern struct vfsops dead_vfsops;
256 1.1 rmind
257 1.51 hannken /* Vnode state operations and diagnostics. */
258 1.51 hannken
259 1.51 hannken static const char *
260 1.51 hannken vstate_name(enum vcache_state state)
261 1.51 hannken {
262 1.51 hannken
263 1.51 hannken switch (state) {
264 1.51 hannken case VN_MARKER:
265 1.51 hannken return "MARKER";
266 1.51 hannken case VN_LOADING:
267 1.51 hannken return "LOADING";
268 1.51 hannken case VN_ACTIVE:
269 1.51 hannken return "ACTIVE";
270 1.51 hannken case VN_BLOCKED:
271 1.51 hannken return "BLOCKED";
272 1.51 hannken case VN_RECLAIMING:
273 1.51 hannken return "RECLAIMING";
274 1.51 hannken case VN_RECLAIMED:
275 1.51 hannken return "RECLAIMED";
276 1.51 hannken default:
277 1.51 hannken return "ILLEGAL";
278 1.51 hannken }
279 1.51 hannken }
280 1.51 hannken
281 1.51 hannken #if defined(DIAGNOSTIC)
282 1.51 hannken
283 1.51 hannken #define VSTATE_GET(vp) \
284 1.51 hannken vstate_assert_get((vp), __func__, __LINE__)
285 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
286 1.51 hannken vstate_assert_change((vp), (from), (to), __func__, __LINE__)
287 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
288 1.51 hannken vstate_assert_wait_stable((vp), __func__, __LINE__)
289 1.51 hannken #define VSTATE_ASSERT(vp, state) \
290 1.51 hannken vstate_assert((vp), (state), __func__, __LINE__)
291 1.51 hannken
292 1.52 hannken static void
293 1.51 hannken vstate_assert(vnode_t *vp, enum vcache_state state, const char *func, int line)
294 1.51 hannken {
295 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
296 1.51 hannken
297 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
298 1.51 hannken
299 1.51 hannken if (__predict_true(node->vn_state == state))
300 1.51 hannken return;
301 1.51 hannken vnpanic(vp, "state is %s, expected %s at %s:%d",
302 1.51 hannken vstate_name(node->vn_state), vstate_name(state), func, line);
303 1.51 hannken }
304 1.51 hannken
305 1.52 hannken static enum vcache_state
306 1.51 hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
307 1.51 hannken {
308 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
309 1.51 hannken
310 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
311 1.51 hannken if (node->vn_state == VN_MARKER)
312 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
313 1.51 hannken vstate_name(node->vn_state), func, line);
314 1.51 hannken
315 1.51 hannken return node->vn_state;
316 1.51 hannken }
317 1.51 hannken
318 1.52 hannken static void
319 1.51 hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
320 1.51 hannken {
321 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
322 1.51 hannken
323 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
324 1.51 hannken if (node->vn_state == VN_MARKER)
325 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
326 1.51 hannken vstate_name(node->vn_state), func, line);
327 1.51 hannken
328 1.51 hannken while (node->vn_state != VN_ACTIVE && node->vn_state != VN_RECLAIMED)
329 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
330 1.51 hannken
331 1.51 hannken if (node->vn_state == VN_MARKER)
332 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
333 1.51 hannken vstate_name(node->vn_state), func, line);
334 1.51 hannken }
335 1.51 hannken
336 1.52 hannken static void
337 1.51 hannken vstate_assert_change(vnode_t *vp, enum vcache_state from, enum vcache_state to,
338 1.51 hannken const char *func, int line)
339 1.51 hannken {
340 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
341 1.51 hannken
342 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
343 1.51 hannken if (from == VN_LOADING)
344 1.51 hannken KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
345 1.51 hannken
346 1.51 hannken if (from == VN_MARKER)
347 1.51 hannken vnpanic(vp, "from is %s at %s:%d",
348 1.51 hannken vstate_name(from), func, line);
349 1.51 hannken if (to == VN_MARKER)
350 1.51 hannken vnpanic(vp, "to is %s at %s:%d",
351 1.51 hannken vstate_name(to), func, line);
352 1.51 hannken if (node->vn_state != from)
353 1.51 hannken vnpanic(vp, "from is %s, expected %s at %s:%d\n",
354 1.51 hannken vstate_name(node->vn_state), vstate_name(from), func, line);
355 1.51 hannken
356 1.51 hannken node->vn_state = to;
357 1.51 hannken if (from == VN_LOADING)
358 1.51 hannken cv_broadcast(&vcache.cv);
359 1.51 hannken if (to == VN_ACTIVE || to == VN_RECLAIMED)
360 1.51 hannken cv_broadcast(&vp->v_cv);
361 1.51 hannken }
362 1.51 hannken
363 1.51 hannken #else /* defined(DIAGNOSTIC) */
364 1.51 hannken
365 1.51 hannken #define VSTATE_GET(vp) \
366 1.51 hannken (VP_TO_VN((vp))->vn_state)
367 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
368 1.51 hannken vstate_change((vp), (from), (to))
369 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
370 1.51 hannken vstate_wait_stable((vp))
371 1.51 hannken #define VSTATE_ASSERT(vp, state)
372 1.51 hannken
373 1.52 hannken static void
374 1.51 hannken vstate_wait_stable(vnode_t *vp)
375 1.51 hannken {
376 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
377 1.51 hannken
378 1.51 hannken while (node->vn_state != VN_ACTIVE && node->vn_state != VN_RECLAIMED)
379 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
380 1.51 hannken }
381 1.51 hannken
382 1.52 hannken static void
383 1.51 hannken vstate_change(vnode_t *vp, enum vcache_state from, enum vcache_state to)
384 1.51 hannken {
385 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
386 1.51 hannken
387 1.51 hannken node->vn_state = to;
388 1.51 hannken if (from == VN_LOADING)
389 1.51 hannken cv_broadcast(&vcache.cv);
390 1.51 hannken if (to == VN_ACTIVE || to == VN_RECLAIMED)
391 1.51 hannken cv_broadcast(&vp->v_cv);
392 1.51 hannken }
393 1.51 hannken
394 1.51 hannken #endif /* defined(DIAGNOSTIC) */
395 1.51 hannken
396 1.1 rmind void
397 1.1 rmind vfs_vnode_sysinit(void)
398 1.1 rmind {
399 1.22 martin int error __diagused;
400 1.1 rmind
401 1.44 hannken dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
402 1.44 hannken KASSERT(dead_rootmount != NULL);
403 1.44 hannken dead_rootmount->mnt_iflag = IMNT_MPSAFE;
404 1.31 hannken
405 1.1 rmind mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
406 1.1 rmind TAILQ_INIT(&vnode_free_list);
407 1.1 rmind TAILQ_INIT(&vnode_hold_list);
408 1.1 rmind TAILQ_INIT(&vrele_list);
409 1.1 rmind
410 1.36 hannken vcache_init();
411 1.36 hannken
412 1.1 rmind mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
413 1.12 hannken cv_init(&vdrain_cv, "vdrain");
414 1.1 rmind cv_init(&vrele_cv, "vrele");
415 1.12 hannken error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
416 1.12 hannken NULL, NULL, "vdrain");
417 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
418 1.1 rmind error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
419 1.1 rmind NULL, &vrele_lwp, "vrele");
420 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
421 1.1 rmind }
422 1.1 rmind
423 1.1 rmind /*
424 1.48 hannken * Allocate a new marker vnode.
425 1.48 hannken */
426 1.48 hannken vnode_t *
427 1.48 hannken vnalloc_marker(struct mount *mp)
428 1.48 hannken {
429 1.50 hannken struct vcache_node *node;
430 1.50 hannken vnode_t *vp;
431 1.50 hannken
432 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
433 1.50 hannken memset(node, 0, sizeof(*node));
434 1.50 hannken vp = VN_TO_VP(node);
435 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
436 1.50 hannken vp->v_mount = mp;
437 1.50 hannken vp->v_type = VBAD;
438 1.52 hannken node->vn_state = VN_MARKER;
439 1.48 hannken
440 1.50 hannken return vp;
441 1.48 hannken }
442 1.48 hannken
443 1.48 hannken /*
444 1.48 hannken * Free a marker vnode.
445 1.48 hannken */
446 1.48 hannken void
447 1.48 hannken vnfree_marker(vnode_t *vp)
448 1.48 hannken {
449 1.50 hannken struct vcache_node *node;
450 1.48 hannken
451 1.50 hannken node = VP_TO_VN(vp);
452 1.52 hannken KASSERT(node->vn_state == VN_MARKER);
453 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
454 1.50 hannken pool_cache_put(vcache.pool, node);
455 1.48 hannken }
456 1.48 hannken
457 1.48 hannken /*
458 1.48 hannken * Test a vnode for being a marker vnode.
459 1.48 hannken */
460 1.48 hannken bool
461 1.48 hannken vnis_marker(vnode_t *vp)
462 1.48 hannken {
463 1.48 hannken
464 1.52 hannken return (VP_TO_VN(vp)->vn_state == VN_MARKER);
465 1.48 hannken }
466 1.48 hannken
467 1.48 hannken /*
468 1.12 hannken * cleanvnode: grab a vnode from freelist, clean and free it.
469 1.5 rmind *
470 1.5 rmind * => Releases vnode_free_list_lock.
471 1.1 rmind */
472 1.12 hannken static int
473 1.12 hannken cleanvnode(void)
474 1.1 rmind {
475 1.1 rmind vnode_t *vp;
476 1.1 rmind vnodelst_t *listhd;
477 1.24 hannken struct mount *mp;
478 1.1 rmind
479 1.1 rmind KASSERT(mutex_owned(&vnode_free_list_lock));
480 1.24 hannken
481 1.1 rmind listhd = &vnode_free_list;
482 1.1 rmind try_nextlist:
483 1.1 rmind TAILQ_FOREACH(vp, listhd, v_freelist) {
484 1.1 rmind /*
485 1.1 rmind * It's safe to test v_usecount and v_iflag
486 1.1 rmind * without holding the interlock here, since
487 1.1 rmind * these vnodes should never appear on the
488 1.1 rmind * lists.
489 1.1 rmind */
490 1.5 rmind KASSERT(vp->v_usecount == 0);
491 1.5 rmind KASSERT(vp->v_freelisthd == listhd);
492 1.5 rmind
493 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
494 1.1 rmind continue;
495 1.46 hannken if (!mutex_tryenter(vp->v_interlock)) {
496 1.46 hannken VOP_UNLOCK(vp);
497 1.24 hannken continue;
498 1.24 hannken }
499 1.24 hannken mp = vp->v_mount;
500 1.24 hannken if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
501 1.24 hannken mutex_exit(vp->v_interlock);
502 1.46 hannken VOP_UNLOCK(vp);
503 1.24 hannken continue;
504 1.24 hannken }
505 1.24 hannken break;
506 1.1 rmind }
507 1.1 rmind
508 1.1 rmind if (vp == NULL) {
509 1.1 rmind if (listhd == &vnode_free_list) {
510 1.1 rmind listhd = &vnode_hold_list;
511 1.1 rmind goto try_nextlist;
512 1.1 rmind }
513 1.1 rmind mutex_exit(&vnode_free_list_lock);
514 1.12 hannken return EBUSY;
515 1.1 rmind }
516 1.1 rmind
517 1.1 rmind /* Remove it from the freelist. */
518 1.1 rmind TAILQ_REMOVE(listhd, vp, v_freelist);
519 1.1 rmind vp->v_freelisthd = NULL;
520 1.1 rmind mutex_exit(&vnode_free_list_lock);
521 1.1 rmind
522 1.1 rmind KASSERT(vp->v_usecount == 0);
523 1.1 rmind
524 1.1 rmind /*
525 1.1 rmind * The vnode is still associated with a file system, so we must
526 1.12 hannken * clean it out before freeing it. We need to add a reference
527 1.24 hannken * before doing this.
528 1.1 rmind */
529 1.24 hannken vp->v_usecount = 1;
530 1.25 hannken vclean(vp);
531 1.52 hannken vrelel(vp, 0);
532 1.24 hannken fstrans_done(mp);
533 1.12 hannken
534 1.12 hannken return 0;
535 1.1 rmind }
536 1.1 rmind
537 1.1 rmind /*
538 1.12 hannken * Helper thread to keep the number of vnodes below desiredvnodes.
539 1.12 hannken */
540 1.12 hannken static void
541 1.12 hannken vdrain_thread(void *cookie)
542 1.12 hannken {
543 1.12 hannken int error;
544 1.12 hannken
545 1.12 hannken mutex_enter(&vnode_free_list_lock);
546 1.12 hannken
547 1.12 hannken for (;;) {
548 1.12 hannken cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
549 1.12 hannken while (numvnodes > desiredvnodes) {
550 1.12 hannken error = cleanvnode();
551 1.12 hannken if (error)
552 1.12 hannken kpause("vndsbusy", false, hz, NULL);
553 1.12 hannken mutex_enter(&vnode_free_list_lock);
554 1.12 hannken if (error)
555 1.12 hannken break;
556 1.12 hannken }
557 1.12 hannken }
558 1.12 hannken }
559 1.12 hannken
560 1.12 hannken /*
561 1.1 rmind * Remove a vnode from its freelist.
562 1.1 rmind */
563 1.1 rmind void
564 1.1 rmind vremfree(vnode_t *vp)
565 1.1 rmind {
566 1.1 rmind
567 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
568 1.1 rmind KASSERT(vp->v_usecount == 0);
569 1.1 rmind
570 1.1 rmind /*
571 1.1 rmind * Note that the reference count must not change until
572 1.1 rmind * the vnode is removed.
573 1.1 rmind */
574 1.1 rmind mutex_enter(&vnode_free_list_lock);
575 1.1 rmind if (vp->v_holdcnt > 0) {
576 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
577 1.1 rmind } else {
578 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
579 1.1 rmind }
580 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
581 1.1 rmind vp->v_freelisthd = NULL;
582 1.1 rmind mutex_exit(&vnode_free_list_lock);
583 1.1 rmind }
584 1.1 rmind
585 1.1 rmind /*
586 1.4 rmind * vget: get a particular vnode from the free list, increment its reference
587 1.52 hannken * count and return it.
588 1.4 rmind *
589 1.52 hannken * => Must be called with v_interlock held.
590 1.4 rmind *
591 1.52 hannken * If state is VN_RECLAIMING, the vnode may be eliminated in vgone()/vclean().
592 1.4 rmind * In that case, we cannot grab the vnode, so the process is awakened when
593 1.4 rmind * the transition is completed, and an error returned to indicate that the
594 1.29 christos * vnode is no longer usable.
595 1.52 hannken *
596 1.52 hannken * If state is VN_LOADING or VN_BLOCKED, wait until the vnode enters a
597 1.52 hannken * stable state (VN_ACTIVE or VN_RECLAIMED).
598 1.1 rmind */
599 1.1 rmind int
600 1.41 riastrad vget(vnode_t *vp, int flags, bool waitok)
601 1.1 rmind {
602 1.1 rmind
603 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
604 1.41 riastrad KASSERT((flags & ~LK_NOWAIT) == 0);
605 1.41 riastrad KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
606 1.1 rmind
607 1.1 rmind /*
608 1.1 rmind * Before adding a reference, we must remove the vnode
609 1.1 rmind * from its freelist.
610 1.1 rmind */
611 1.1 rmind if (vp->v_usecount == 0) {
612 1.1 rmind vremfree(vp);
613 1.1 rmind vp->v_usecount = 1;
614 1.1 rmind } else {
615 1.1 rmind atomic_inc_uint(&vp->v_usecount);
616 1.1 rmind }
617 1.1 rmind
618 1.1 rmind /*
619 1.29 christos * If the vnode is in the process of changing state we wait
620 1.29 christos * for the change to complete and take care not to return
621 1.29 christos * a clean vnode.
622 1.1 rmind */
623 1.52 hannken if (! ISSET(flags, LK_NOWAIT))
624 1.52 hannken VSTATE_WAIT_STABLE(vp);
625 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMED) {
626 1.52 hannken vrelel(vp, 0);
627 1.52 hannken return ENOENT;
628 1.52 hannken } else if (VSTATE_GET(vp) != VN_ACTIVE) {
629 1.52 hannken KASSERT(ISSET(flags, LK_NOWAIT));
630 1.52 hannken vrelel(vp, 0);
631 1.52 hannken return EBUSY;
632 1.17 hannken }
633 1.17 hannken
634 1.1 rmind /*
635 1.41 riastrad * Ok, we got it in good shape.
636 1.1 rmind */
637 1.52 hannken VSTATE_ASSERT(vp, VN_ACTIVE);
638 1.9 rmind mutex_exit(vp->v_interlock);
639 1.52 hannken
640 1.52 hannken return 0;
641 1.1 rmind }
642 1.1 rmind
643 1.1 rmind /*
644 1.4 rmind * vput: unlock and release the reference.
645 1.1 rmind */
646 1.1 rmind void
647 1.1 rmind vput(vnode_t *vp)
648 1.1 rmind {
649 1.1 rmind
650 1.1 rmind VOP_UNLOCK(vp);
651 1.1 rmind vrele(vp);
652 1.1 rmind }
653 1.1 rmind
654 1.1 rmind /*
655 1.1 rmind * Try to drop reference on a vnode. Abort if we are releasing the
656 1.1 rmind * last reference. Note: this _must_ succeed if not the last reference.
657 1.1 rmind */
658 1.1 rmind static inline bool
659 1.1 rmind vtryrele(vnode_t *vp)
660 1.1 rmind {
661 1.1 rmind u_int use, next;
662 1.1 rmind
663 1.1 rmind for (use = vp->v_usecount;; use = next) {
664 1.1 rmind if (use == 1) {
665 1.1 rmind return false;
666 1.1 rmind }
667 1.24 hannken KASSERT(use > 1);
668 1.1 rmind next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
669 1.1 rmind if (__predict_true(next == use)) {
670 1.1 rmind return true;
671 1.1 rmind }
672 1.1 rmind }
673 1.1 rmind }
674 1.1 rmind
675 1.1 rmind /*
676 1.1 rmind * Vnode release. If reference count drops to zero, call inactive
677 1.1 rmind * routine and either return to freelist or free to the pool.
678 1.1 rmind */
679 1.23 hannken static void
680 1.1 rmind vrelel(vnode_t *vp, int flags)
681 1.1 rmind {
682 1.1 rmind bool recycle, defer;
683 1.1 rmind int error;
684 1.1 rmind
685 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
686 1.1 rmind KASSERT(vp->v_freelisthd == NULL);
687 1.1 rmind
688 1.1 rmind if (__predict_false(vp->v_op == dead_vnodeop_p &&
689 1.52 hannken VSTATE_GET(vp) != VN_RECLAIMED)) {
690 1.11 christos vnpanic(vp, "dead but not clean");
691 1.1 rmind }
692 1.1 rmind
693 1.1 rmind /*
694 1.1 rmind * If not the last reference, just drop the reference count
695 1.1 rmind * and unlock.
696 1.1 rmind */
697 1.1 rmind if (vtryrele(vp)) {
698 1.9 rmind mutex_exit(vp->v_interlock);
699 1.1 rmind return;
700 1.1 rmind }
701 1.1 rmind if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
702 1.11 christos vnpanic(vp, "%s: bad ref count", __func__);
703 1.1 rmind }
704 1.1 rmind
705 1.15 hannken #ifdef DIAGNOSTIC
706 1.15 hannken if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
707 1.15 hannken vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
708 1.15 hannken vprint("vrelel: missing VOP_CLOSE()", vp);
709 1.15 hannken }
710 1.15 hannken #endif
711 1.15 hannken
712 1.1 rmind /*
713 1.1 rmind * If not clean, deactivate the vnode, but preserve
714 1.1 rmind * our reference across the call to VOP_INACTIVE().
715 1.1 rmind */
716 1.52 hannken if (VSTATE_GET(vp) != VN_RECLAIMED) {
717 1.1 rmind recycle = false;
718 1.1 rmind
719 1.1 rmind /*
720 1.1 rmind * XXX This ugly block can be largely eliminated if
721 1.1 rmind * locking is pushed down into the file systems.
722 1.1 rmind *
723 1.1 rmind * Defer vnode release to vrele_thread if caller
724 1.30 hannken * requests it explicitly or is the pagedaemon.
725 1.1 rmind */
726 1.1 rmind if ((curlwp == uvm.pagedaemon_lwp) ||
727 1.1 rmind (flags & VRELEL_ASYNC_RELE) != 0) {
728 1.1 rmind defer = true;
729 1.1 rmind } else if (curlwp == vrele_lwp) {
730 1.17 hannken /*
731 1.29 christos * We have to try harder.
732 1.17 hannken */
733 1.9 rmind mutex_exit(vp->v_interlock);
734 1.32 hannken error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
735 1.47 riastrad KASSERTMSG((error == 0), "vn_lock failed: %d", error);
736 1.17 hannken mutex_enter(vp->v_interlock);
737 1.1 rmind defer = false;
738 1.4 rmind } else {
739 1.1 rmind /* If we can't acquire the lock, then defer. */
740 1.32 hannken mutex_exit(vp->v_interlock);
741 1.32 hannken error = vn_lock(vp,
742 1.32 hannken LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
743 1.30 hannken defer = (error != 0);
744 1.32 hannken mutex_enter(vp->v_interlock);
745 1.1 rmind }
746 1.1 rmind
747 1.30 hannken KASSERT(mutex_owned(vp->v_interlock));
748 1.30 hannken KASSERT(! (curlwp == vrele_lwp && defer));
749 1.30 hannken
750 1.1 rmind if (defer) {
751 1.1 rmind /*
752 1.1 rmind * Defer reclaim to the kthread; it's not safe to
753 1.1 rmind * clean it here. We donate it our last reference.
754 1.1 rmind */
755 1.1 rmind mutex_enter(&vrele_lock);
756 1.1 rmind TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
757 1.1 rmind if (++vrele_pending > (desiredvnodes >> 8))
758 1.53 msaitoh cv_signal(&vrele_cv);
759 1.1 rmind mutex_exit(&vrele_lock);
760 1.9 rmind mutex_exit(vp->v_interlock);
761 1.1 rmind return;
762 1.1 rmind }
763 1.1 rmind
764 1.32 hannken /*
765 1.32 hannken * If the node got another reference while we
766 1.32 hannken * released the interlock, don't try to inactivate it yet.
767 1.32 hannken */
768 1.32 hannken if (__predict_false(vtryrele(vp))) {
769 1.32 hannken VOP_UNLOCK(vp);
770 1.32 hannken mutex_exit(vp->v_interlock);
771 1.32 hannken return;
772 1.32 hannken }
773 1.52 hannken VSTATE_CHANGE(vp, VN_ACTIVE, VN_BLOCKED);
774 1.29 christos mutex_exit(vp->v_interlock);
775 1.29 christos
776 1.1 rmind /*
777 1.52 hannken * The vnode must not gain another reference while being
778 1.1 rmind * deactivated. If VOP_INACTIVE() indicates that
779 1.1 rmind * the described file has been deleted, then recycle
780 1.52 hannken * the vnode.
781 1.1 rmind *
782 1.1 rmind * Note that VOP_INACTIVE() will drop the vnode lock.
783 1.1 rmind */
784 1.1 rmind VOP_INACTIVE(vp, &recycle);
785 1.46 hannken if (recycle) {
786 1.46 hannken /* vclean() below will drop the lock. */
787 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
788 1.46 hannken recycle = false;
789 1.46 hannken }
790 1.9 rmind mutex_enter(vp->v_interlock);
791 1.52 hannken VSTATE_CHANGE(vp, VN_BLOCKED, VN_ACTIVE);
792 1.1 rmind if (!recycle) {
793 1.1 rmind if (vtryrele(vp)) {
794 1.9 rmind mutex_exit(vp->v_interlock);
795 1.1 rmind return;
796 1.1 rmind }
797 1.1 rmind }
798 1.1 rmind
799 1.1 rmind /* Take care of space accounting. */
800 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
801 1.1 rmind atomic_add_int(&uvmexp.execpages,
802 1.1 rmind -vp->v_uobj.uo_npages);
803 1.1 rmind atomic_add_int(&uvmexp.filepages,
804 1.1 rmind vp->v_uobj.uo_npages);
805 1.1 rmind }
806 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
807 1.1 rmind vp->v_vflag &= ~VV_MAPPED;
808 1.1 rmind
809 1.1 rmind /*
810 1.1 rmind * Recycle the vnode if the file is now unused (unlinked),
811 1.1 rmind * otherwise just free it.
812 1.1 rmind */
813 1.1 rmind if (recycle) {
814 1.52 hannken VSTATE_ASSERT(vp, VN_ACTIVE);
815 1.25 hannken vclean(vp);
816 1.1 rmind }
817 1.1 rmind KASSERT(vp->v_usecount > 0);
818 1.1 rmind }
819 1.1 rmind
820 1.1 rmind if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
821 1.1 rmind /* Gained another reference while being reclaimed. */
822 1.9 rmind mutex_exit(vp->v_interlock);
823 1.1 rmind return;
824 1.1 rmind }
825 1.1 rmind
826 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMED) {
827 1.1 rmind /*
828 1.1 rmind * It's clean so destroy it. It isn't referenced
829 1.1 rmind * anywhere since it has been reclaimed.
830 1.1 rmind */
831 1.1 rmind KASSERT(vp->v_holdcnt == 0);
832 1.1 rmind KASSERT(vp->v_writecount == 0);
833 1.9 rmind mutex_exit(vp->v_interlock);
834 1.1 rmind vfs_insmntque(vp, NULL);
835 1.1 rmind if (vp->v_type == VBLK || vp->v_type == VCHR) {
836 1.1 rmind spec_node_destroy(vp);
837 1.1 rmind }
838 1.50 hannken vcache_free(VP_TO_VN(vp));
839 1.1 rmind } else {
840 1.1 rmind /*
841 1.1 rmind * Otherwise, put it back onto the freelist. It
842 1.1 rmind * can't be destroyed while still associated with
843 1.1 rmind * a file system.
844 1.1 rmind */
845 1.1 rmind mutex_enter(&vnode_free_list_lock);
846 1.1 rmind if (vp->v_holdcnt > 0) {
847 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
848 1.1 rmind } else {
849 1.1 rmind vp->v_freelisthd = &vnode_free_list;
850 1.1 rmind }
851 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
852 1.1 rmind mutex_exit(&vnode_free_list_lock);
853 1.9 rmind mutex_exit(vp->v_interlock);
854 1.1 rmind }
855 1.1 rmind }
856 1.1 rmind
857 1.1 rmind void
858 1.1 rmind vrele(vnode_t *vp)
859 1.1 rmind {
860 1.1 rmind
861 1.29 christos if (vtryrele(vp)) {
862 1.1 rmind return;
863 1.1 rmind }
864 1.9 rmind mutex_enter(vp->v_interlock);
865 1.1 rmind vrelel(vp, 0);
866 1.1 rmind }
867 1.1 rmind
868 1.1 rmind /*
869 1.1 rmind * Asynchronous vnode release, vnode is released in different context.
870 1.1 rmind */
871 1.1 rmind void
872 1.1 rmind vrele_async(vnode_t *vp)
873 1.1 rmind {
874 1.1 rmind
875 1.29 christos if (vtryrele(vp)) {
876 1.1 rmind return;
877 1.1 rmind }
878 1.9 rmind mutex_enter(vp->v_interlock);
879 1.1 rmind vrelel(vp, VRELEL_ASYNC_RELE);
880 1.1 rmind }
881 1.1 rmind
882 1.1 rmind static void
883 1.1 rmind vrele_thread(void *cookie)
884 1.1 rmind {
885 1.34 hannken vnodelst_t skip_list;
886 1.1 rmind vnode_t *vp;
887 1.34 hannken struct mount *mp;
888 1.34 hannken
889 1.34 hannken TAILQ_INIT(&skip_list);
890 1.1 rmind
891 1.34 hannken mutex_enter(&vrele_lock);
892 1.1 rmind for (;;) {
893 1.1 rmind while (TAILQ_EMPTY(&vrele_list)) {
894 1.1 rmind vrele_gen++;
895 1.1 rmind cv_broadcast(&vrele_cv);
896 1.1 rmind cv_timedwait(&vrele_cv, &vrele_lock, hz);
897 1.34 hannken TAILQ_CONCAT(&vrele_list, &skip_list, v_freelist);
898 1.1 rmind }
899 1.1 rmind vp = TAILQ_FIRST(&vrele_list);
900 1.34 hannken mp = vp->v_mount;
901 1.1 rmind TAILQ_REMOVE(&vrele_list, vp, v_freelist);
902 1.34 hannken if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
903 1.34 hannken TAILQ_INSERT_TAIL(&skip_list, vp, v_freelist);
904 1.34 hannken continue;
905 1.34 hannken }
906 1.1 rmind vrele_pending--;
907 1.1 rmind mutex_exit(&vrele_lock);
908 1.1 rmind
909 1.1 rmind /*
910 1.1 rmind * If not the last reference, then ignore the vnode
911 1.1 rmind * and look for more work.
912 1.1 rmind */
913 1.9 rmind mutex_enter(vp->v_interlock);
914 1.1 rmind vrelel(vp, 0);
915 1.34 hannken fstrans_done(mp);
916 1.34 hannken mutex_enter(&vrele_lock);
917 1.1 rmind }
918 1.1 rmind }
919 1.1 rmind
920 1.2 rmind void
921 1.2 rmind vrele_flush(void)
922 1.2 rmind {
923 1.2 rmind int gen;
924 1.2 rmind
925 1.2 rmind mutex_enter(&vrele_lock);
926 1.2 rmind gen = vrele_gen;
927 1.2 rmind while (vrele_pending && gen == vrele_gen) {
928 1.2 rmind cv_broadcast(&vrele_cv);
929 1.2 rmind cv_wait(&vrele_cv, &vrele_lock);
930 1.2 rmind }
931 1.2 rmind mutex_exit(&vrele_lock);
932 1.2 rmind }
933 1.2 rmind
934 1.1 rmind /*
935 1.1 rmind * Vnode reference, where a reference is already held by some other
936 1.1 rmind * object (for example, a file structure).
937 1.1 rmind */
938 1.1 rmind void
939 1.1 rmind vref(vnode_t *vp)
940 1.1 rmind {
941 1.1 rmind
942 1.1 rmind KASSERT(vp->v_usecount != 0);
943 1.1 rmind
944 1.1 rmind atomic_inc_uint(&vp->v_usecount);
945 1.1 rmind }
946 1.1 rmind
947 1.1 rmind /*
948 1.1 rmind * Page or buffer structure gets a reference.
949 1.1 rmind * Called with v_interlock held.
950 1.1 rmind */
951 1.1 rmind void
952 1.1 rmind vholdl(vnode_t *vp)
953 1.1 rmind {
954 1.1 rmind
955 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
956 1.1 rmind
957 1.1 rmind if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
958 1.1 rmind mutex_enter(&vnode_free_list_lock);
959 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
960 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
961 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
962 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
963 1.1 rmind mutex_exit(&vnode_free_list_lock);
964 1.1 rmind }
965 1.1 rmind }
966 1.1 rmind
967 1.1 rmind /*
968 1.1 rmind * Page or buffer structure frees a reference.
969 1.1 rmind * Called with v_interlock held.
970 1.1 rmind */
971 1.1 rmind void
972 1.1 rmind holdrelel(vnode_t *vp)
973 1.1 rmind {
974 1.1 rmind
975 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
976 1.1 rmind
977 1.1 rmind if (vp->v_holdcnt <= 0) {
978 1.11 christos vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
979 1.1 rmind }
980 1.1 rmind
981 1.1 rmind vp->v_holdcnt--;
982 1.1 rmind if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
983 1.1 rmind mutex_enter(&vnode_free_list_lock);
984 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
985 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
986 1.1 rmind vp->v_freelisthd = &vnode_free_list;
987 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
988 1.1 rmind mutex_exit(&vnode_free_list_lock);
989 1.1 rmind }
990 1.1 rmind }
991 1.1 rmind
992 1.1 rmind /*
993 1.1 rmind * Disassociate the underlying file system from a vnode.
994 1.1 rmind *
995 1.46 hannken * Must be called with vnode locked and will return unlocked.
996 1.1 rmind * Must be called with the interlock held, and will return with it held.
997 1.1 rmind */
998 1.25 hannken static void
999 1.25 hannken vclean(vnode_t *vp)
1000 1.1 rmind {
1001 1.1 rmind lwp_t *l = curlwp;
1002 1.43 hannken bool recycle, active;
1003 1.1 rmind int error;
1004 1.1 rmind
1005 1.46 hannken KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1006 1.46 hannken VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1007 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
1008 1.1 rmind KASSERT(vp->v_usecount != 0);
1009 1.1 rmind
1010 1.32 hannken active = (vp->v_usecount > 1);
1011 1.1 rmind /*
1012 1.1 rmind * Prevent the vnode from being recycled or brought into use
1013 1.1 rmind * while we clean it out.
1014 1.1 rmind */
1015 1.52 hannken VSTATE_CHANGE(vp, VN_ACTIVE, VN_RECLAIMING);
1016 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
1017 1.1 rmind atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1018 1.1 rmind atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1019 1.1 rmind }
1020 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1021 1.9 rmind mutex_exit(vp->v_interlock);
1022 1.23 hannken
1023 1.1 rmind /*
1024 1.1 rmind * Clean out any cached data associated with the vnode.
1025 1.1 rmind * If purging an active vnode, it must be closed and
1026 1.1 rmind * deactivated before being reclaimed. Note that the
1027 1.1 rmind * VOP_INACTIVE will unlock the vnode.
1028 1.1 rmind */
1029 1.43 hannken error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1030 1.43 hannken if (error != 0) {
1031 1.43 hannken if (wapbl_vphaswapbl(vp))
1032 1.43 hannken WAPBL_DISCARD(wapbl_vptomp(vp));
1033 1.43 hannken error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1034 1.43 hannken }
1035 1.47 riastrad KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1036 1.43 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1037 1.43 hannken if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1038 1.43 hannken spec_node_revoke(vp);
1039 1.1 rmind }
1040 1.1 rmind if (active) {
1041 1.1 rmind VOP_INACTIVE(vp, &recycle);
1042 1.1 rmind } else {
1043 1.1 rmind /*
1044 1.1 rmind * Any other processes trying to obtain this lock must first
1045 1.52 hannken * wait for VN_RECLAIMED, then call the new lock operation.
1046 1.1 rmind */
1047 1.1 rmind VOP_UNLOCK(vp);
1048 1.1 rmind }
1049 1.1 rmind
1050 1.1 rmind /* Disassociate the underlying file system from the vnode. */
1051 1.1 rmind if (VOP_RECLAIM(vp)) {
1052 1.11 christos vnpanic(vp, "%s: cannot reclaim", __func__);
1053 1.1 rmind }
1054 1.1 rmind
1055 1.7 rmind KASSERT(vp->v_data == NULL);
1056 1.1 rmind KASSERT(vp->v_uobj.uo_npages == 0);
1057 1.7 rmind
1058 1.1 rmind if (vp->v_type == VREG && vp->v_ractx != NULL) {
1059 1.1 rmind uvm_ra_freectx(vp->v_ractx);
1060 1.1 rmind vp->v_ractx = NULL;
1061 1.1 rmind }
1062 1.7 rmind
1063 1.7 rmind /* Purge name cache. */
1064 1.1 rmind cache_purge(vp);
1065 1.1 rmind
1066 1.31 hannken /* Move to dead mount. */
1067 1.31 hannken vp->v_vflag &= ~VV_ROOT;
1068 1.44 hannken atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1069 1.44 hannken vfs_insmntque(vp, dead_rootmount);
1070 1.23 hannken
1071 1.1 rmind /* Done with purge, notify sleepers of the grim news. */
1072 1.9 rmind mutex_enter(vp->v_interlock);
1073 1.43 hannken vp->v_op = dead_vnodeop_p;
1074 1.43 hannken vp->v_vflag |= VV_LOCKSWORK;
1075 1.52 hannken VSTATE_CHANGE(vp, VN_RECLAIMING, VN_RECLAIMED);
1076 1.1 rmind vp->v_tag = VT_NON;
1077 1.1 rmind KNOTE(&vp->v_klist, NOTE_REVOKE);
1078 1.1 rmind
1079 1.1 rmind KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1080 1.1 rmind }
1081 1.1 rmind
1082 1.1 rmind /*
1083 1.33 hannken * Recycle an unused vnode if caller holds the last reference.
1084 1.1 rmind */
1085 1.33 hannken bool
1086 1.33 hannken vrecycle(vnode_t *vp)
1087 1.1 rmind {
1088 1.1 rmind
1089 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
1090 1.46 hannken return false;
1091 1.46 hannken
1092 1.33 hannken mutex_enter(vp->v_interlock);
1093 1.33 hannken
1094 1.33 hannken if (vp->v_usecount != 1) {
1095 1.33 hannken mutex_exit(vp->v_interlock);
1096 1.46 hannken VOP_UNLOCK(vp);
1097 1.33 hannken return false;
1098 1.1 rmind }
1099 1.25 hannken vclean(vp);
1100 1.52 hannken vrelel(vp, 0);
1101 1.33 hannken return true;
1102 1.1 rmind }
1103 1.1 rmind
1104 1.1 rmind /*
1105 1.1 rmind * Eliminate all activity associated with the requested vnode
1106 1.1 rmind * and with all vnodes aliased to the requested vnode.
1107 1.1 rmind */
1108 1.1 rmind void
1109 1.1 rmind vrevoke(vnode_t *vp)
1110 1.1 rmind {
1111 1.19 hannken vnode_t *vq;
1112 1.1 rmind enum vtype type;
1113 1.1 rmind dev_t dev;
1114 1.1 rmind
1115 1.1 rmind KASSERT(vp->v_usecount > 0);
1116 1.1 rmind
1117 1.9 rmind mutex_enter(vp->v_interlock);
1118 1.52 hannken VSTATE_WAIT_STABLE(vp);
1119 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMED) {
1120 1.9 rmind mutex_exit(vp->v_interlock);
1121 1.1 rmind return;
1122 1.1 rmind } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1123 1.1 rmind atomic_inc_uint(&vp->v_usecount);
1124 1.29 christos mutex_exit(vp->v_interlock);
1125 1.29 christos vgone(vp);
1126 1.1 rmind return;
1127 1.1 rmind } else {
1128 1.1 rmind dev = vp->v_rdev;
1129 1.1 rmind type = vp->v_type;
1130 1.9 rmind mutex_exit(vp->v_interlock);
1131 1.1 rmind }
1132 1.1 rmind
1133 1.19 hannken while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1134 1.29 christos vgone(vq);
1135 1.1 rmind }
1136 1.1 rmind }
1137 1.1 rmind
1138 1.1 rmind /*
1139 1.1 rmind * Eliminate all activity associated with a vnode in preparation for
1140 1.1 rmind * reuse. Drops a reference from the vnode.
1141 1.1 rmind */
1142 1.1 rmind void
1143 1.1 rmind vgone(vnode_t *vp)
1144 1.1 rmind {
1145 1.1 rmind
1146 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
1147 1.52 hannken VSTATE_ASSERT(vp, VN_RECLAIMED);
1148 1.46 hannken vrele(vp);
1149 1.46 hannken }
1150 1.46 hannken
1151 1.9 rmind mutex_enter(vp->v_interlock);
1152 1.25 hannken vclean(vp);
1153 1.52 hannken vrelel(vp, 0);
1154 1.1 rmind }
1155 1.1 rmind
1156 1.36 hannken static inline uint32_t
1157 1.36 hannken vcache_hash(const struct vcache_key *key)
1158 1.36 hannken {
1159 1.36 hannken uint32_t hash = HASH32_BUF_INIT;
1160 1.36 hannken
1161 1.36 hannken hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1162 1.36 hannken hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1163 1.36 hannken return hash;
1164 1.36 hannken }
1165 1.36 hannken
1166 1.36 hannken static void
1167 1.36 hannken vcache_init(void)
1168 1.36 hannken {
1169 1.36 hannken
1170 1.36 hannken vcache.pool = pool_cache_init(sizeof(struct vcache_node), 0, 0, 0,
1171 1.36 hannken "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1172 1.36 hannken KASSERT(vcache.pool != NULL);
1173 1.36 hannken mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1174 1.51 hannken cv_init(&vcache.cv, "vcache");
1175 1.36 hannken vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1176 1.36 hannken &vcache.hashmask);
1177 1.36 hannken }
1178 1.36 hannken
1179 1.36 hannken static void
1180 1.36 hannken vcache_reinit(void)
1181 1.36 hannken {
1182 1.36 hannken int i;
1183 1.36 hannken uint32_t hash;
1184 1.36 hannken u_long oldmask, newmask;
1185 1.36 hannken struct hashhead *oldtab, *newtab;
1186 1.36 hannken struct vcache_node *node;
1187 1.36 hannken
1188 1.36 hannken newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1189 1.36 hannken mutex_enter(&vcache.lock);
1190 1.36 hannken oldtab = vcache.hashtab;
1191 1.36 hannken oldmask = vcache.hashmask;
1192 1.36 hannken vcache.hashtab = newtab;
1193 1.36 hannken vcache.hashmask = newmask;
1194 1.36 hannken for (i = 0; i <= oldmask; i++) {
1195 1.36 hannken while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1196 1.36 hannken SLIST_REMOVE(&oldtab[i], node, vcache_node, vn_hash);
1197 1.36 hannken hash = vcache_hash(&node->vn_key);
1198 1.36 hannken SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1199 1.36 hannken node, vn_hash);
1200 1.36 hannken }
1201 1.36 hannken }
1202 1.36 hannken mutex_exit(&vcache.lock);
1203 1.36 hannken hashdone(oldtab, HASH_SLIST, oldmask);
1204 1.36 hannken }
1205 1.36 hannken
1206 1.36 hannken static inline struct vcache_node *
1207 1.36 hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1208 1.36 hannken {
1209 1.36 hannken struct hashhead *hashp;
1210 1.36 hannken struct vcache_node *node;
1211 1.36 hannken
1212 1.36 hannken KASSERT(mutex_owned(&vcache.lock));
1213 1.36 hannken
1214 1.36 hannken hashp = &vcache.hashtab[hash & vcache.hashmask];
1215 1.36 hannken SLIST_FOREACH(node, hashp, vn_hash) {
1216 1.36 hannken if (key->vk_mount != node->vn_key.vk_mount)
1217 1.36 hannken continue;
1218 1.36 hannken if (key->vk_key_len != node->vn_key.vk_key_len)
1219 1.36 hannken continue;
1220 1.36 hannken if (memcmp(key->vk_key, node->vn_key.vk_key, key->vk_key_len))
1221 1.36 hannken continue;
1222 1.36 hannken return node;
1223 1.36 hannken }
1224 1.36 hannken return NULL;
1225 1.36 hannken }
1226 1.36 hannken
1227 1.36 hannken /*
1228 1.50 hannken * Allocate a new, uninitialized vcache node.
1229 1.50 hannken */
1230 1.50 hannken static struct vcache_node *
1231 1.50 hannken vcache_alloc(void)
1232 1.50 hannken {
1233 1.50 hannken struct vcache_node *node;
1234 1.50 hannken vnode_t *vp;
1235 1.50 hannken
1236 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
1237 1.50 hannken memset(node, 0, sizeof(*node));
1238 1.50 hannken
1239 1.50 hannken /* SLIST_INIT(&node->vn_hash); */
1240 1.50 hannken
1241 1.50 hannken vp = VN_TO_VP(node);
1242 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1243 1.50 hannken cv_init(&vp->v_cv, "vnode");
1244 1.50 hannken /* LIST_INIT(&vp->v_nclist); */
1245 1.50 hannken /* LIST_INIT(&vp->v_dnclist); */
1246 1.50 hannken
1247 1.50 hannken mutex_enter(&vnode_free_list_lock);
1248 1.50 hannken numvnodes++;
1249 1.50 hannken if (numvnodes > desiredvnodes + desiredvnodes / 10)
1250 1.50 hannken cv_signal(&vdrain_cv);
1251 1.50 hannken mutex_exit(&vnode_free_list_lock);
1252 1.50 hannken
1253 1.50 hannken rw_init(&vp->v_lock);
1254 1.50 hannken vp->v_usecount = 1;
1255 1.50 hannken vp->v_type = VNON;
1256 1.50 hannken vp->v_size = vp->v_writesize = VSIZENOTSET;
1257 1.50 hannken
1258 1.51 hannken node->vn_state = VN_LOADING;
1259 1.51 hannken
1260 1.50 hannken return node;
1261 1.50 hannken }
1262 1.50 hannken
1263 1.50 hannken /*
1264 1.50 hannken * Free an unused, unreferenced vcache node.
1265 1.50 hannken */
1266 1.50 hannken static void
1267 1.50 hannken vcache_free(struct vcache_node *node)
1268 1.50 hannken {
1269 1.50 hannken vnode_t *vp;
1270 1.50 hannken
1271 1.50 hannken vp = VN_TO_VP(node);
1272 1.50 hannken
1273 1.50 hannken KASSERT(vp->v_usecount == 0);
1274 1.50 hannken
1275 1.50 hannken rw_destroy(&vp->v_lock);
1276 1.50 hannken mutex_enter(&vnode_free_list_lock);
1277 1.50 hannken numvnodes--;
1278 1.50 hannken mutex_exit(&vnode_free_list_lock);
1279 1.50 hannken
1280 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
1281 1.50 hannken cv_destroy(&vp->v_cv);
1282 1.50 hannken pool_cache_put(vcache.pool, node);
1283 1.50 hannken }
1284 1.50 hannken
1285 1.50 hannken /*
1286 1.36 hannken * Get a vnode / fs node pair by key and return it referenced through vpp.
1287 1.36 hannken */
1288 1.36 hannken int
1289 1.36 hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
1290 1.36 hannken struct vnode **vpp)
1291 1.36 hannken {
1292 1.36 hannken int error;
1293 1.36 hannken uint32_t hash;
1294 1.36 hannken const void *new_key;
1295 1.36 hannken struct vnode *vp;
1296 1.36 hannken struct vcache_key vcache_key;
1297 1.36 hannken struct vcache_node *node, *new_node;
1298 1.36 hannken
1299 1.36 hannken new_key = NULL;
1300 1.36 hannken *vpp = NULL;
1301 1.36 hannken
1302 1.36 hannken vcache_key.vk_mount = mp;
1303 1.36 hannken vcache_key.vk_key = key;
1304 1.36 hannken vcache_key.vk_key_len = key_len;
1305 1.36 hannken hash = vcache_hash(&vcache_key);
1306 1.36 hannken
1307 1.36 hannken again:
1308 1.36 hannken mutex_enter(&vcache.lock);
1309 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1310 1.36 hannken
1311 1.36 hannken /* If found, take a reference or retry. */
1312 1.52 hannken if (__predict_true(node != NULL)) {
1313 1.52 hannken /*
1314 1.52 hannken * If the vnode is loading we cannot take the v_interlock
1315 1.52 hannken * here as it might change during load (see uvm_obj_setlock()).
1316 1.52 hannken * As changing state from VN_LOADING requires both vcache.lock
1317 1.52 hannken * and v_interlock it is safe to test with vcache.lock held.
1318 1.52 hannken *
1319 1.52 hannken * Wait for vnodes changing state from VN_LOADING and retry.
1320 1.52 hannken */
1321 1.52 hannken if (__predict_false(node->vn_state == VN_LOADING)) {
1322 1.52 hannken cv_wait(&vcache.cv, &vcache.lock);
1323 1.52 hannken mutex_exit(&vcache.lock);
1324 1.52 hannken goto again;
1325 1.52 hannken }
1326 1.52 hannken vp = VN_TO_VP(node);
1327 1.36 hannken mutex_enter(vp->v_interlock);
1328 1.36 hannken mutex_exit(&vcache.lock);
1329 1.41 riastrad error = vget(vp, 0, true /* wait */);
1330 1.36 hannken if (error == ENOENT)
1331 1.36 hannken goto again;
1332 1.36 hannken if (error == 0)
1333 1.36 hannken *vpp = vp;
1334 1.36 hannken KASSERT((error != 0) == (*vpp == NULL));
1335 1.36 hannken return error;
1336 1.36 hannken }
1337 1.36 hannken mutex_exit(&vcache.lock);
1338 1.36 hannken
1339 1.36 hannken /* Allocate and initialize a new vcache / vnode pair. */
1340 1.36 hannken error = vfs_busy(mp, NULL);
1341 1.36 hannken if (error)
1342 1.36 hannken return error;
1343 1.50 hannken new_node = vcache_alloc();
1344 1.36 hannken new_node->vn_key = vcache_key;
1345 1.50 hannken vp = VN_TO_VP(new_node);
1346 1.36 hannken mutex_enter(&vcache.lock);
1347 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1348 1.36 hannken if (node == NULL) {
1349 1.36 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1350 1.36 hannken new_node, vn_hash);
1351 1.36 hannken node = new_node;
1352 1.36 hannken }
1353 1.36 hannken
1354 1.36 hannken /* If another thread beat us inserting this node, retry. */
1355 1.36 hannken if (node != new_node) {
1356 1.52 hannken mutex_enter(vp->v_interlock);
1357 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_RECLAIMED);
1358 1.52 hannken mutex_exit(&vcache.lock);
1359 1.52 hannken vrelel(vp, 0);
1360 1.36 hannken vfs_unbusy(mp, false, NULL);
1361 1.36 hannken goto again;
1362 1.36 hannken }
1363 1.52 hannken mutex_exit(&vcache.lock);
1364 1.36 hannken
1365 1.52 hannken /* Load the fs node. Exclusive as new_node is VN_LOADING. */
1366 1.36 hannken error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1367 1.36 hannken if (error) {
1368 1.36 hannken mutex_enter(&vcache.lock);
1369 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1370 1.36 hannken new_node, vcache_node, vn_hash);
1371 1.52 hannken mutex_enter(vp->v_interlock);
1372 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_RECLAIMED);
1373 1.36 hannken mutex_exit(&vcache.lock);
1374 1.52 hannken vrelel(vp, 0);
1375 1.36 hannken vfs_unbusy(mp, false, NULL);
1376 1.36 hannken KASSERT(*vpp == NULL);
1377 1.36 hannken return error;
1378 1.36 hannken }
1379 1.36 hannken KASSERT(new_key != NULL);
1380 1.36 hannken KASSERT(memcmp(key, new_key, key_len) == 0);
1381 1.36 hannken KASSERT(vp->v_op != NULL);
1382 1.36 hannken vfs_insmntque(vp, mp);
1383 1.36 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1384 1.36 hannken vp->v_vflag |= VV_MPSAFE;
1385 1.36 hannken vfs_unbusy(mp, true, NULL);
1386 1.36 hannken
1387 1.36 hannken /* Finished loading, finalize node. */
1388 1.36 hannken mutex_enter(&vcache.lock);
1389 1.36 hannken new_node->vn_key.vk_key = new_key;
1390 1.39 hannken mutex_enter(vp->v_interlock);
1391 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_ACTIVE);
1392 1.39 hannken mutex_exit(vp->v_interlock);
1393 1.52 hannken mutex_exit(&vcache.lock);
1394 1.36 hannken *vpp = vp;
1395 1.36 hannken return 0;
1396 1.36 hannken }
1397 1.36 hannken
1398 1.36 hannken /*
1399 1.40 hannken * Create a new vnode / fs node pair and return it referenced through vpp.
1400 1.40 hannken */
1401 1.40 hannken int
1402 1.40 hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1403 1.40 hannken kauth_cred_t cred, struct vnode **vpp)
1404 1.40 hannken {
1405 1.40 hannken int error;
1406 1.40 hannken uint32_t hash;
1407 1.52 hannken struct vnode *ovp, *vp;
1408 1.40 hannken struct vcache_node *new_node;
1409 1.40 hannken struct vcache_node *old_node __diagused;
1410 1.40 hannken
1411 1.40 hannken *vpp = NULL;
1412 1.40 hannken
1413 1.40 hannken /* Allocate and initialize a new vcache / vnode pair. */
1414 1.40 hannken error = vfs_busy(mp, NULL);
1415 1.40 hannken if (error)
1416 1.40 hannken return error;
1417 1.50 hannken new_node = vcache_alloc();
1418 1.40 hannken new_node->vn_key.vk_mount = mp;
1419 1.50 hannken vp = VN_TO_VP(new_node);
1420 1.40 hannken
1421 1.40 hannken /* Create and load the fs node. */
1422 1.40 hannken error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1423 1.40 hannken &new_node->vn_key.vk_key_len, &new_node->vn_key.vk_key);
1424 1.40 hannken if (error) {
1425 1.52 hannken mutex_enter(&vcache.lock);
1426 1.52 hannken mutex_enter(vp->v_interlock);
1427 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_RECLAIMED);
1428 1.52 hannken mutex_exit(&vcache.lock);
1429 1.52 hannken vrelel(vp, 0);
1430 1.40 hannken vfs_unbusy(mp, false, NULL);
1431 1.40 hannken KASSERT(*vpp == NULL);
1432 1.40 hannken return error;
1433 1.40 hannken }
1434 1.40 hannken KASSERT(new_node->vn_key.vk_key != NULL);
1435 1.40 hannken KASSERT(vp->v_op != NULL);
1436 1.40 hannken hash = vcache_hash(&new_node->vn_key);
1437 1.40 hannken
1438 1.40 hannken /* Wait for previous instance to be reclaimed, then insert new node. */
1439 1.40 hannken mutex_enter(&vcache.lock);
1440 1.40 hannken while ((old_node = vcache_hash_lookup(&new_node->vn_key, hash))) {
1441 1.52 hannken ovp = VN_TO_VP(old_node);
1442 1.52 hannken mutex_enter(ovp->v_interlock);
1443 1.40 hannken mutex_exit(&vcache.lock);
1444 1.52 hannken error = vget(ovp, 0, true /* wait */);
1445 1.52 hannken KASSERT(error == ENOENT);
1446 1.40 hannken mutex_enter(&vcache.lock);
1447 1.40 hannken }
1448 1.40 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1449 1.40 hannken new_node, vn_hash);
1450 1.40 hannken mutex_exit(&vcache.lock);
1451 1.40 hannken vfs_insmntque(vp, mp);
1452 1.40 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1453 1.40 hannken vp->v_vflag |= VV_MPSAFE;
1454 1.40 hannken vfs_unbusy(mp, true, NULL);
1455 1.40 hannken
1456 1.40 hannken /* Finished loading, finalize node. */
1457 1.40 hannken mutex_enter(&vcache.lock);
1458 1.52 hannken mutex_enter(vp->v_interlock);
1459 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_ACTIVE);
1460 1.40 hannken mutex_exit(&vcache.lock);
1461 1.40 hannken mutex_exit(vp->v_interlock);
1462 1.40 hannken *vpp = vp;
1463 1.40 hannken return 0;
1464 1.40 hannken }
1465 1.40 hannken
1466 1.40 hannken /*
1467 1.37 hannken * Prepare key change: lock old and new cache node.
1468 1.37 hannken * Return an error if the new node already exists.
1469 1.37 hannken */
1470 1.37 hannken int
1471 1.37 hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1472 1.37 hannken const void *old_key, size_t old_key_len,
1473 1.37 hannken const void *new_key, size_t new_key_len)
1474 1.37 hannken {
1475 1.37 hannken uint32_t old_hash, new_hash;
1476 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1477 1.37 hannken struct vcache_node *node, *new_node;
1478 1.52 hannken struct vnode *tvp;
1479 1.37 hannken
1480 1.37 hannken old_vcache_key.vk_mount = mp;
1481 1.37 hannken old_vcache_key.vk_key = old_key;
1482 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1483 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1484 1.37 hannken
1485 1.37 hannken new_vcache_key.vk_mount = mp;
1486 1.37 hannken new_vcache_key.vk_key = new_key;
1487 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1488 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1489 1.37 hannken
1490 1.50 hannken new_node = vcache_alloc();
1491 1.37 hannken new_node->vn_key = new_vcache_key;
1492 1.52 hannken tvp = VN_TO_VP(new_node);
1493 1.37 hannken
1494 1.52 hannken /* Insert locked new node used as placeholder. */
1495 1.37 hannken mutex_enter(&vcache.lock);
1496 1.37 hannken node = vcache_hash_lookup(&new_vcache_key, new_hash);
1497 1.37 hannken if (node != NULL) {
1498 1.52 hannken mutex_enter(tvp->v_interlock);
1499 1.52 hannken VSTATE_CHANGE(tvp, VN_LOADING, VN_RECLAIMED);
1500 1.37 hannken mutex_exit(&vcache.lock);
1501 1.52 hannken vrelel(tvp, 0);
1502 1.37 hannken return EEXIST;
1503 1.37 hannken }
1504 1.37 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1505 1.37 hannken new_node, vn_hash);
1506 1.49 hannken
1507 1.49 hannken /* Lock old node. */
1508 1.37 hannken node = vcache_hash_lookup(&old_vcache_key, old_hash);
1509 1.37 hannken KASSERT(node != NULL);
1510 1.52 hannken KASSERT(VN_TO_VP(node) == vp);
1511 1.52 hannken mutex_enter(vp->v_interlock);
1512 1.52 hannken VSTATE_CHANGE(vp, VN_ACTIVE, VN_BLOCKED);
1513 1.37 hannken node->vn_key = old_vcache_key;
1514 1.52 hannken mutex_exit(vp->v_interlock);
1515 1.37 hannken mutex_exit(&vcache.lock);
1516 1.37 hannken return 0;
1517 1.37 hannken }
1518 1.37 hannken
1519 1.37 hannken /*
1520 1.37 hannken * Key change complete: remove old node and unlock new node.
1521 1.37 hannken */
1522 1.37 hannken void
1523 1.37 hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1524 1.37 hannken const void *old_key, size_t old_key_len,
1525 1.37 hannken const void *new_key, size_t new_key_len)
1526 1.37 hannken {
1527 1.37 hannken uint32_t old_hash, new_hash;
1528 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1529 1.49 hannken struct vcache_node *old_node, *new_node;
1530 1.52 hannken struct vnode *tvp;
1531 1.37 hannken
1532 1.37 hannken old_vcache_key.vk_mount = mp;
1533 1.37 hannken old_vcache_key.vk_key = old_key;
1534 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1535 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1536 1.37 hannken
1537 1.37 hannken new_vcache_key.vk_mount = mp;
1538 1.37 hannken new_vcache_key.vk_key = new_key;
1539 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1540 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1541 1.37 hannken
1542 1.37 hannken mutex_enter(&vcache.lock);
1543 1.49 hannken
1544 1.49 hannken /* Lookup old and new node. */
1545 1.49 hannken old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1546 1.49 hannken KASSERT(old_node != NULL);
1547 1.52 hannken KASSERT(VN_TO_VP(old_node) == vp);
1548 1.52 hannken mutex_enter(vp->v_interlock);
1549 1.52 hannken VSTATE_ASSERT(vp, VN_BLOCKED);
1550 1.52 hannken
1551 1.49 hannken new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1552 1.52 hannken KASSERT(new_node != NULL);
1553 1.49 hannken KASSERT(new_node->vn_key.vk_key_len == new_key_len);
1554 1.52 hannken tvp = VN_TO_VP(new_node);
1555 1.52 hannken mutex_enter(tvp->v_interlock);
1556 1.52 hannken VSTATE_ASSERT(VN_TO_VP(new_node), VN_LOADING);
1557 1.49 hannken
1558 1.49 hannken /* Rekey old node and put it onto its new hashlist. */
1559 1.49 hannken old_node->vn_key = new_vcache_key;
1560 1.49 hannken if (old_hash != new_hash) {
1561 1.49 hannken SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1562 1.49 hannken old_node, vcache_node, vn_hash);
1563 1.49 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1564 1.49 hannken old_node, vn_hash);
1565 1.49 hannken }
1566 1.52 hannken VSTATE_CHANGE(vp, VN_BLOCKED, VN_ACTIVE);
1567 1.52 hannken mutex_exit(vp->v_interlock);
1568 1.49 hannken
1569 1.49 hannken /* Remove new node used as placeholder. */
1570 1.49 hannken SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
1571 1.49 hannken new_node, vcache_node, vn_hash);
1572 1.52 hannken VSTATE_CHANGE(tvp, VN_LOADING, VN_RECLAIMED);
1573 1.37 hannken mutex_exit(&vcache.lock);
1574 1.52 hannken vrelel(tvp, 0);
1575 1.37 hannken }
1576 1.37 hannken
1577 1.37 hannken /*
1578 1.36 hannken * Remove a vnode / fs node pair from the cache.
1579 1.36 hannken */
1580 1.36 hannken void
1581 1.36 hannken vcache_remove(struct mount *mp, const void *key, size_t key_len)
1582 1.36 hannken {
1583 1.36 hannken uint32_t hash;
1584 1.36 hannken struct vcache_key vcache_key;
1585 1.36 hannken struct vcache_node *node;
1586 1.36 hannken
1587 1.36 hannken vcache_key.vk_mount = mp;
1588 1.36 hannken vcache_key.vk_key = key;
1589 1.36 hannken vcache_key.vk_key_len = key_len;
1590 1.36 hannken hash = vcache_hash(&vcache_key);
1591 1.36 hannken
1592 1.36 hannken mutex_enter(&vcache.lock);
1593 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1594 1.36 hannken KASSERT(node != NULL);
1595 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1596 1.36 hannken node, vcache_node, vn_hash);
1597 1.36 hannken mutex_exit(&vcache.lock);
1598 1.50 hannken }
1599 1.50 hannken
1600 1.50 hannken /*
1601 1.50 hannken * Print a vcache node.
1602 1.50 hannken */
1603 1.50 hannken void
1604 1.50 hannken vcache_print(vnode_t *vp, const char *prefix, void (*pr)(const char *, ...))
1605 1.50 hannken {
1606 1.50 hannken int n;
1607 1.50 hannken const uint8_t *cp;
1608 1.50 hannken struct vcache_node *node;
1609 1.50 hannken
1610 1.50 hannken node = VP_TO_VN(vp);
1611 1.50 hannken n = node->vn_key.vk_key_len;
1612 1.50 hannken cp = node->vn_key.vk_key;
1613 1.50 hannken
1614 1.51 hannken (*pr)("%sstate %s, key(%d)", prefix, vstate_name(node->vn_state), n);
1615 1.50 hannken
1616 1.50 hannken while (n-- > 0)
1617 1.50 hannken (*pr)(" %02x", *cp++);
1618 1.50 hannken (*pr)("\n");
1619 1.36 hannken }
1620 1.36 hannken
1621 1.1 rmind /*
1622 1.1 rmind * Update outstanding I/O count and do wakeup if requested.
1623 1.1 rmind */
1624 1.1 rmind void
1625 1.1 rmind vwakeup(struct buf *bp)
1626 1.1 rmind {
1627 1.1 rmind vnode_t *vp;
1628 1.1 rmind
1629 1.1 rmind if ((vp = bp->b_vp) == NULL)
1630 1.1 rmind return;
1631 1.1 rmind
1632 1.9 rmind KASSERT(bp->b_objlock == vp->v_interlock);
1633 1.1 rmind KASSERT(mutex_owned(bp->b_objlock));
1634 1.1 rmind
1635 1.1 rmind if (--vp->v_numoutput < 0)
1636 1.11 christos vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1637 1.1 rmind if (vp->v_numoutput == 0)
1638 1.1 rmind cv_broadcast(&vp->v_cv);
1639 1.1 rmind }
1640 1.1 rmind
1641 1.1 rmind /*
1642 1.35 hannken * Test a vnode for being or becoming dead. Returns one of:
1643 1.35 hannken * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1644 1.35 hannken * ENOENT: vnode is dead.
1645 1.35 hannken * 0: otherwise.
1646 1.35 hannken *
1647 1.35 hannken * Whenever this function returns a non-zero value all future
1648 1.35 hannken * calls will also return a non-zero value.
1649 1.35 hannken */
1650 1.35 hannken int
1651 1.35 hannken vdead_check(struct vnode *vp, int flags)
1652 1.35 hannken {
1653 1.35 hannken
1654 1.35 hannken KASSERT(mutex_owned(vp->v_interlock));
1655 1.35 hannken
1656 1.52 hannken if (! ISSET(flags, VDEAD_NOWAIT))
1657 1.52 hannken VSTATE_WAIT_STABLE(vp);
1658 1.1 rmind
1659 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMING) {
1660 1.52 hannken KASSERT(ISSET(flags, VDEAD_NOWAIT));
1661 1.52 hannken return EBUSY;
1662 1.52 hannken } else if (VSTATE_GET(vp) == VN_RECLAIMED) {
1663 1.52 hannken return ENOENT;
1664 1.52 hannken }
1665 1.1 rmind
1666 1.52 hannken return 0;
1667 1.1 rmind }
1668 1.1 rmind
1669 1.1 rmind int
1670 1.3 rmind vfs_drainvnodes(long target)
1671 1.1 rmind {
1672 1.12 hannken int error;
1673 1.12 hannken
1674 1.12 hannken mutex_enter(&vnode_free_list_lock);
1675 1.1 rmind
1676 1.1 rmind while (numvnodes > target) {
1677 1.12 hannken error = cleanvnode();
1678 1.12 hannken if (error != 0)
1679 1.12 hannken return error;
1680 1.1 rmind mutex_enter(&vnode_free_list_lock);
1681 1.1 rmind }
1682 1.12 hannken
1683 1.12 hannken mutex_exit(&vnode_free_list_lock);
1684 1.12 hannken
1685 1.36 hannken vcache_reinit();
1686 1.36 hannken
1687 1.1 rmind return 0;
1688 1.1 rmind }
1689 1.1 rmind
1690 1.1 rmind void
1691 1.11 christos vnpanic(vnode_t *vp, const char *fmt, ...)
1692 1.1 rmind {
1693 1.11 christos va_list ap;
1694 1.11 christos
1695 1.1 rmind #ifdef DIAGNOSTIC
1696 1.1 rmind vprint(NULL, vp);
1697 1.1 rmind #endif
1698 1.11 christos va_start(ap, fmt);
1699 1.11 christos vpanic(fmt, ap);
1700 1.11 christos va_end(ap);
1701 1.1 rmind }
1702