vfs_vnode.c revision 1.56 1 1.56 hannken /* $NetBSD: vfs_vnode.c,v 1.56 2016/08/20 12:37:08 hannken Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.2 rmind * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rmind * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 rmind * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 1.1 rmind *
11 1.1 rmind * Redistribution and use in source and binary forms, with or without
12 1.1 rmind * modification, are permitted provided that the following conditions
13 1.1 rmind * are met:
14 1.1 rmind * 1. Redistributions of source code must retain the above copyright
15 1.1 rmind * notice, this list of conditions and the following disclaimer.
16 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 rmind * notice, this list of conditions and the following disclaimer in the
18 1.1 rmind * documentation and/or other materials provided with the distribution.
19 1.1 rmind *
20 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
31 1.1 rmind */
32 1.1 rmind
33 1.1 rmind /*
34 1.1 rmind * Copyright (c) 1989, 1993
35 1.1 rmind * The Regents of the University of California. All rights reserved.
36 1.1 rmind * (c) UNIX System Laboratories, Inc.
37 1.1 rmind * All or some portions of this file are derived from material licensed
38 1.1 rmind * to the University of California by American Telephone and Telegraph
39 1.1 rmind * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 1.1 rmind * the permission of UNIX System Laboratories, Inc.
41 1.1 rmind *
42 1.1 rmind * Redistribution and use in source and binary forms, with or without
43 1.1 rmind * modification, are permitted provided that the following conditions
44 1.1 rmind * are met:
45 1.1 rmind * 1. Redistributions of source code must retain the above copyright
46 1.1 rmind * notice, this list of conditions and the following disclaimer.
47 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
48 1.1 rmind * notice, this list of conditions and the following disclaimer in the
49 1.1 rmind * documentation and/or other materials provided with the distribution.
50 1.1 rmind * 3. Neither the name of the University nor the names of its contributors
51 1.1 rmind * may be used to endorse or promote products derived from this software
52 1.1 rmind * without specific prior written permission.
53 1.1 rmind *
54 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 1.1 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.1 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.1 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 1.1 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.1 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.1 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.1 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.1 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.1 rmind * SUCH DAMAGE.
65 1.1 rmind *
66 1.1 rmind * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 1.1 rmind */
68 1.1 rmind
69 1.1 rmind /*
70 1.8 rmind * The vnode cache subsystem.
71 1.1 rmind *
72 1.8 rmind * Life-cycle
73 1.1 rmind *
74 1.8 rmind * Normally, there are two points where new vnodes are created:
75 1.8 rmind * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 1.8 rmind * starts in one of the following ways:
77 1.8 rmind *
78 1.45 hannken * - Allocation, via vcache_get(9) or vcache_new(9).
79 1.8 rmind * - Reclamation of inactive vnode, via vget(9).
80 1.8 rmind *
81 1.16 rmind * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 1.16 rmind * was another, traditional way. Currently, only the draining thread
83 1.16 rmind * recycles the vnodes. This behaviour might be revisited.
84 1.16 rmind *
85 1.8 rmind * The life-cycle ends when the last reference is dropped, usually
86 1.8 rmind * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 1.8 rmind * the file system that vnode is inactive. Via this call, file system
88 1.16 rmind * indicates whether vnode can be recycled (usually, it checks its own
89 1.16 rmind * references, e.g. count of links, whether the file was removed).
90 1.8 rmind *
91 1.8 rmind * Depending on indication, vnode can be put into a free list (cache),
92 1.54 hannken * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 1.54 hannken * disassociate underlying file system from the vnode, and finally
94 1.54 hannken * destroyed.
95 1.8 rmind *
96 1.52 hannken * Vnode state
97 1.52 hannken *
98 1.52 hannken * Vnode is always in one of six states:
99 1.52 hannken * - MARKER This is a marker vnode to help list traversal. It
100 1.52 hannken * will never change its state.
101 1.52 hannken * - LOADING Vnode is associating underlying file system and not
102 1.52 hannken * yet ready to use.
103 1.52 hannken * - ACTIVE Vnode has associated underlying file system and is
104 1.52 hannken * ready to use.
105 1.52 hannken * - BLOCKED Vnode is active but cannot get new references.
106 1.52 hannken * - RECLAIMING Vnode is disassociating from the underlying file
107 1.52 hannken * system.
108 1.52 hannken * - RECLAIMED Vnode has disassociated from underlying file system
109 1.52 hannken * and is dead.
110 1.52 hannken *
111 1.52 hannken * Valid state changes are:
112 1.52 hannken * LOADING -> ACTIVE
113 1.52 hannken * Vnode has been initialised in vcache_get() or
114 1.52 hannken * vcache_new() and is ready to use.
115 1.52 hannken * ACTIVE -> RECLAIMING
116 1.52 hannken * Vnode starts disassociation from underlying file
117 1.54 hannken * system in vcache_reclaim().
118 1.52 hannken * RECLAIMING -> RECLAIMED
119 1.52 hannken * Vnode finished disassociation from underlying file
120 1.54 hannken * system in vcache_reclaim().
121 1.52 hannken * ACTIVE -> BLOCKED
122 1.52 hannken * Either vcache_rekey*() is changing the vnode key or
123 1.52 hannken * vrelel() is about to call VOP_INACTIVE().
124 1.52 hannken * BLOCKED -> ACTIVE
125 1.52 hannken * The block condition is over.
126 1.52 hannken * LOADING -> RECLAIMED
127 1.52 hannken * Either vcache_get() or vcache_new() failed to
128 1.52 hannken * associate the underlying file system or vcache_rekey*()
129 1.52 hannken * drops a vnode used as placeholder.
130 1.52 hannken *
131 1.52 hannken * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 1.52 hannken * and it is possible to wait for state change.
133 1.52 hannken *
134 1.52 hannken * State is protected with v_interlock with one exception:
135 1.52 hannken * to change from LOADING both v_interlock and vcache.lock must be held
136 1.52 hannken * so it is possible to check "state == LOADING" without holding
137 1.52 hannken * v_interlock. See vcache_get() for details.
138 1.52 hannken *
139 1.8 rmind * Reference counting
140 1.8 rmind *
141 1.8 rmind * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 1.8 rmind * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 1.8 rmind * as vput(9), routines. Common points holding references are e.g.
144 1.8 rmind * file openings, current working directory, mount points, etc.
145 1.8 rmind *
146 1.8 rmind * Note on v_usecount and its locking
147 1.8 rmind *
148 1.8 rmind * At nearly all points it is known that v_usecount could be zero,
149 1.8 rmind * the vnode_t::v_interlock will be held. To change v_usecount away
150 1.8 rmind * from zero, the interlock must be held. To change from a non-zero
151 1.8 rmind * value to zero, again the interlock must be held.
152 1.8 rmind *
153 1.24 hannken * Changing the usecount from a non-zero value to a non-zero value can
154 1.24 hannken * safely be done using atomic operations, without the interlock held.
155 1.8 rmind *
156 1.1 rmind */
157 1.1 rmind
158 1.1 rmind #include <sys/cdefs.h>
159 1.56 hannken __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.56 2016/08/20 12:37:08 hannken Exp $");
160 1.23 hannken
161 1.23 hannken #define _VFS_VNODE_PRIVATE
162 1.1 rmind
163 1.1 rmind #include <sys/param.h>
164 1.1 rmind #include <sys/kernel.h>
165 1.1 rmind
166 1.1 rmind #include <sys/atomic.h>
167 1.1 rmind #include <sys/buf.h>
168 1.1 rmind #include <sys/conf.h>
169 1.1 rmind #include <sys/device.h>
170 1.36 hannken #include <sys/hash.h>
171 1.1 rmind #include <sys/kauth.h>
172 1.1 rmind #include <sys/kmem.h>
173 1.1 rmind #include <sys/kthread.h>
174 1.1 rmind #include <sys/module.h>
175 1.1 rmind #include <sys/mount.h>
176 1.1 rmind #include <sys/namei.h>
177 1.1 rmind #include <sys/syscallargs.h>
178 1.1 rmind #include <sys/sysctl.h>
179 1.1 rmind #include <sys/systm.h>
180 1.1 rmind #include <sys/vnode.h>
181 1.1 rmind #include <sys/wapbl.h>
182 1.24 hannken #include <sys/fstrans.h>
183 1.1 rmind
184 1.1 rmind #include <uvm/uvm.h>
185 1.1 rmind #include <uvm/uvm_readahead.h>
186 1.1 rmind
187 1.23 hannken /* Flags to vrelel. */
188 1.23 hannken #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
189 1.23 hannken
190 1.51 hannken enum vcache_state {
191 1.51 hannken VN_MARKER, /* Stable, used as marker. Will not change. */
192 1.51 hannken VN_LOADING, /* Intermediate, initialising the fs node. */
193 1.51 hannken VN_ACTIVE, /* Stable, valid fs node attached. */
194 1.51 hannken VN_BLOCKED, /* Intermediate, active, no new references allowed. */
195 1.51 hannken VN_RECLAIMING, /* Intermediate, detaching the fs node. */
196 1.51 hannken VN_RECLAIMED /* Stable, no fs node attached. */
197 1.51 hannken };
198 1.36 hannken struct vcache_key {
199 1.36 hannken struct mount *vk_mount;
200 1.36 hannken const void *vk_key;
201 1.36 hannken size_t vk_key_len;
202 1.36 hannken };
203 1.36 hannken struct vcache_node {
204 1.52 hannken struct vnode vn_vnode;
205 1.51 hannken enum vcache_state vn_state;
206 1.36 hannken SLIST_ENTRY(vcache_node) vn_hash;
207 1.36 hannken struct vcache_key vn_key;
208 1.36 hannken };
209 1.36 hannken
210 1.50 hannken #define VN_TO_VP(node) ((vnode_t *)(node))
211 1.50 hannken #define VP_TO_VN(vp) ((struct vcache_node *)(vp))
212 1.50 hannken
213 1.6 rmind u_int numvnodes __cacheline_aligned;
214 1.1 rmind
215 1.16 rmind /*
216 1.16 rmind * There are two free lists: one is for vnodes which have no buffer/page
217 1.16 rmind * references and one for those which do (i.e. v_holdcnt is non-zero).
218 1.16 rmind * Vnode recycling mechanism first attempts to look into the former list.
219 1.16 rmind */
220 1.6 rmind static kmutex_t vnode_free_list_lock __cacheline_aligned;
221 1.6 rmind static vnodelst_t vnode_free_list __cacheline_aligned;
222 1.6 rmind static vnodelst_t vnode_hold_list __cacheline_aligned;
223 1.16 rmind static kcondvar_t vdrain_cv __cacheline_aligned;
224 1.16 rmind
225 1.6 rmind static vnodelst_t vrele_list __cacheline_aligned;
226 1.6 rmind static kmutex_t vrele_lock __cacheline_aligned;
227 1.6 rmind static kcondvar_t vrele_cv __cacheline_aligned;
228 1.6 rmind static lwp_t * vrele_lwp __cacheline_aligned;
229 1.6 rmind static int vrele_pending __cacheline_aligned;
230 1.6 rmind static int vrele_gen __cacheline_aligned;
231 1.1 rmind
232 1.38 matt SLIST_HEAD(hashhead, vcache_node);
233 1.36 hannken static struct {
234 1.36 hannken kmutex_t lock;
235 1.51 hannken kcondvar_t cv;
236 1.36 hannken u_long hashmask;
237 1.38 matt struct hashhead *hashtab;
238 1.36 hannken pool_cache_t pool;
239 1.36 hannken } vcache __cacheline_aligned;
240 1.36 hannken
241 1.12 hannken static int cleanvnode(void);
242 1.50 hannken static struct vcache_node *vcache_alloc(void);
243 1.50 hannken static void vcache_free(struct vcache_node *);
244 1.36 hannken static void vcache_init(void);
245 1.36 hannken static void vcache_reinit(void);
246 1.54 hannken static void vcache_reclaim(vnode_t *);
247 1.23 hannken static void vrelel(vnode_t *, int);
248 1.12 hannken static void vdrain_thread(void *);
249 1.1 rmind static void vrele_thread(void *);
250 1.11 christos static void vnpanic(vnode_t *, const char *, ...)
251 1.18 christos __printflike(2, 3);
252 1.1 rmind
253 1.1 rmind /* Routines having to do with the management of the vnode table. */
254 1.44 hannken extern struct mount *dead_rootmount;
255 1.1 rmind extern int (**dead_vnodeop_p)(void *);
256 1.31 hannken extern struct vfsops dead_vfsops;
257 1.1 rmind
258 1.51 hannken /* Vnode state operations and diagnostics. */
259 1.51 hannken
260 1.51 hannken static const char *
261 1.51 hannken vstate_name(enum vcache_state state)
262 1.51 hannken {
263 1.51 hannken
264 1.51 hannken switch (state) {
265 1.51 hannken case VN_MARKER:
266 1.51 hannken return "MARKER";
267 1.51 hannken case VN_LOADING:
268 1.51 hannken return "LOADING";
269 1.51 hannken case VN_ACTIVE:
270 1.51 hannken return "ACTIVE";
271 1.51 hannken case VN_BLOCKED:
272 1.51 hannken return "BLOCKED";
273 1.51 hannken case VN_RECLAIMING:
274 1.51 hannken return "RECLAIMING";
275 1.51 hannken case VN_RECLAIMED:
276 1.51 hannken return "RECLAIMED";
277 1.51 hannken default:
278 1.51 hannken return "ILLEGAL";
279 1.51 hannken }
280 1.51 hannken }
281 1.51 hannken
282 1.51 hannken #if defined(DIAGNOSTIC)
283 1.51 hannken
284 1.51 hannken #define VSTATE_GET(vp) \
285 1.51 hannken vstate_assert_get((vp), __func__, __LINE__)
286 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
287 1.51 hannken vstate_assert_change((vp), (from), (to), __func__, __LINE__)
288 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
289 1.51 hannken vstate_assert_wait_stable((vp), __func__, __LINE__)
290 1.51 hannken #define VSTATE_ASSERT(vp, state) \
291 1.51 hannken vstate_assert((vp), (state), __func__, __LINE__)
292 1.51 hannken
293 1.52 hannken static void
294 1.51 hannken vstate_assert(vnode_t *vp, enum vcache_state state, const char *func, int line)
295 1.51 hannken {
296 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
297 1.51 hannken
298 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
299 1.51 hannken
300 1.51 hannken if (__predict_true(node->vn_state == state))
301 1.51 hannken return;
302 1.51 hannken vnpanic(vp, "state is %s, expected %s at %s:%d",
303 1.51 hannken vstate_name(node->vn_state), vstate_name(state), func, line);
304 1.51 hannken }
305 1.51 hannken
306 1.52 hannken static enum vcache_state
307 1.51 hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
308 1.51 hannken {
309 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
310 1.51 hannken
311 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
312 1.51 hannken if (node->vn_state == VN_MARKER)
313 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
314 1.51 hannken vstate_name(node->vn_state), func, line);
315 1.51 hannken
316 1.51 hannken return node->vn_state;
317 1.51 hannken }
318 1.51 hannken
319 1.52 hannken static void
320 1.51 hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
321 1.51 hannken {
322 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
323 1.51 hannken
324 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
325 1.51 hannken if (node->vn_state == VN_MARKER)
326 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
327 1.51 hannken vstate_name(node->vn_state), func, line);
328 1.51 hannken
329 1.51 hannken while (node->vn_state != VN_ACTIVE && node->vn_state != VN_RECLAIMED)
330 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
331 1.51 hannken
332 1.51 hannken if (node->vn_state == VN_MARKER)
333 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
334 1.51 hannken vstate_name(node->vn_state), func, line);
335 1.51 hannken }
336 1.51 hannken
337 1.52 hannken static void
338 1.51 hannken vstate_assert_change(vnode_t *vp, enum vcache_state from, enum vcache_state to,
339 1.51 hannken const char *func, int line)
340 1.51 hannken {
341 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
342 1.51 hannken
343 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
344 1.51 hannken if (from == VN_LOADING)
345 1.51 hannken KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
346 1.51 hannken
347 1.51 hannken if (from == VN_MARKER)
348 1.51 hannken vnpanic(vp, "from is %s at %s:%d",
349 1.51 hannken vstate_name(from), func, line);
350 1.51 hannken if (to == VN_MARKER)
351 1.51 hannken vnpanic(vp, "to is %s at %s:%d",
352 1.51 hannken vstate_name(to), func, line);
353 1.51 hannken if (node->vn_state != from)
354 1.51 hannken vnpanic(vp, "from is %s, expected %s at %s:%d\n",
355 1.51 hannken vstate_name(node->vn_state), vstate_name(from), func, line);
356 1.51 hannken
357 1.51 hannken node->vn_state = to;
358 1.51 hannken if (from == VN_LOADING)
359 1.51 hannken cv_broadcast(&vcache.cv);
360 1.51 hannken if (to == VN_ACTIVE || to == VN_RECLAIMED)
361 1.51 hannken cv_broadcast(&vp->v_cv);
362 1.51 hannken }
363 1.51 hannken
364 1.51 hannken #else /* defined(DIAGNOSTIC) */
365 1.51 hannken
366 1.51 hannken #define VSTATE_GET(vp) \
367 1.51 hannken (VP_TO_VN((vp))->vn_state)
368 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
369 1.51 hannken vstate_change((vp), (from), (to))
370 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
371 1.51 hannken vstate_wait_stable((vp))
372 1.51 hannken #define VSTATE_ASSERT(vp, state)
373 1.51 hannken
374 1.52 hannken static void
375 1.51 hannken vstate_wait_stable(vnode_t *vp)
376 1.51 hannken {
377 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
378 1.51 hannken
379 1.51 hannken while (node->vn_state != VN_ACTIVE && node->vn_state != VN_RECLAIMED)
380 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
381 1.51 hannken }
382 1.51 hannken
383 1.52 hannken static void
384 1.51 hannken vstate_change(vnode_t *vp, enum vcache_state from, enum vcache_state to)
385 1.51 hannken {
386 1.51 hannken struct vcache_node *node = VP_TO_VN(vp);
387 1.51 hannken
388 1.51 hannken node->vn_state = to;
389 1.51 hannken if (from == VN_LOADING)
390 1.51 hannken cv_broadcast(&vcache.cv);
391 1.51 hannken if (to == VN_ACTIVE || to == VN_RECLAIMED)
392 1.51 hannken cv_broadcast(&vp->v_cv);
393 1.51 hannken }
394 1.51 hannken
395 1.51 hannken #endif /* defined(DIAGNOSTIC) */
396 1.51 hannken
397 1.1 rmind void
398 1.1 rmind vfs_vnode_sysinit(void)
399 1.1 rmind {
400 1.22 martin int error __diagused;
401 1.1 rmind
402 1.44 hannken dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
403 1.44 hannken KASSERT(dead_rootmount != NULL);
404 1.44 hannken dead_rootmount->mnt_iflag = IMNT_MPSAFE;
405 1.31 hannken
406 1.1 rmind mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
407 1.1 rmind TAILQ_INIT(&vnode_free_list);
408 1.1 rmind TAILQ_INIT(&vnode_hold_list);
409 1.1 rmind TAILQ_INIT(&vrele_list);
410 1.1 rmind
411 1.36 hannken vcache_init();
412 1.36 hannken
413 1.1 rmind mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
414 1.12 hannken cv_init(&vdrain_cv, "vdrain");
415 1.1 rmind cv_init(&vrele_cv, "vrele");
416 1.12 hannken error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
417 1.12 hannken NULL, NULL, "vdrain");
418 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
419 1.1 rmind error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
420 1.1 rmind NULL, &vrele_lwp, "vrele");
421 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
422 1.1 rmind }
423 1.1 rmind
424 1.1 rmind /*
425 1.48 hannken * Allocate a new marker vnode.
426 1.48 hannken */
427 1.48 hannken vnode_t *
428 1.48 hannken vnalloc_marker(struct mount *mp)
429 1.48 hannken {
430 1.50 hannken struct vcache_node *node;
431 1.50 hannken vnode_t *vp;
432 1.50 hannken
433 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
434 1.50 hannken memset(node, 0, sizeof(*node));
435 1.50 hannken vp = VN_TO_VP(node);
436 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
437 1.50 hannken vp->v_mount = mp;
438 1.50 hannken vp->v_type = VBAD;
439 1.52 hannken node->vn_state = VN_MARKER;
440 1.48 hannken
441 1.50 hannken return vp;
442 1.48 hannken }
443 1.48 hannken
444 1.48 hannken /*
445 1.48 hannken * Free a marker vnode.
446 1.48 hannken */
447 1.48 hannken void
448 1.48 hannken vnfree_marker(vnode_t *vp)
449 1.48 hannken {
450 1.50 hannken struct vcache_node *node;
451 1.48 hannken
452 1.50 hannken node = VP_TO_VN(vp);
453 1.52 hannken KASSERT(node->vn_state == VN_MARKER);
454 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
455 1.50 hannken pool_cache_put(vcache.pool, node);
456 1.48 hannken }
457 1.48 hannken
458 1.48 hannken /*
459 1.48 hannken * Test a vnode for being a marker vnode.
460 1.48 hannken */
461 1.48 hannken bool
462 1.48 hannken vnis_marker(vnode_t *vp)
463 1.48 hannken {
464 1.48 hannken
465 1.52 hannken return (VP_TO_VN(vp)->vn_state == VN_MARKER);
466 1.48 hannken }
467 1.48 hannken
468 1.48 hannken /*
469 1.12 hannken * cleanvnode: grab a vnode from freelist, clean and free it.
470 1.5 rmind *
471 1.5 rmind * => Releases vnode_free_list_lock.
472 1.1 rmind */
473 1.12 hannken static int
474 1.12 hannken cleanvnode(void)
475 1.1 rmind {
476 1.1 rmind vnode_t *vp;
477 1.1 rmind vnodelst_t *listhd;
478 1.24 hannken struct mount *mp;
479 1.1 rmind
480 1.1 rmind KASSERT(mutex_owned(&vnode_free_list_lock));
481 1.24 hannken
482 1.1 rmind listhd = &vnode_free_list;
483 1.1 rmind try_nextlist:
484 1.1 rmind TAILQ_FOREACH(vp, listhd, v_freelist) {
485 1.1 rmind /*
486 1.1 rmind * It's safe to test v_usecount and v_iflag
487 1.1 rmind * without holding the interlock here, since
488 1.1 rmind * these vnodes should never appear on the
489 1.1 rmind * lists.
490 1.1 rmind */
491 1.5 rmind KASSERT(vp->v_usecount == 0);
492 1.5 rmind KASSERT(vp->v_freelisthd == listhd);
493 1.5 rmind
494 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
495 1.1 rmind continue;
496 1.46 hannken if (!mutex_tryenter(vp->v_interlock)) {
497 1.46 hannken VOP_UNLOCK(vp);
498 1.24 hannken continue;
499 1.24 hannken }
500 1.24 hannken mp = vp->v_mount;
501 1.24 hannken if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
502 1.24 hannken mutex_exit(vp->v_interlock);
503 1.46 hannken VOP_UNLOCK(vp);
504 1.24 hannken continue;
505 1.24 hannken }
506 1.24 hannken break;
507 1.1 rmind }
508 1.1 rmind
509 1.1 rmind if (vp == NULL) {
510 1.1 rmind if (listhd == &vnode_free_list) {
511 1.1 rmind listhd = &vnode_hold_list;
512 1.1 rmind goto try_nextlist;
513 1.1 rmind }
514 1.1 rmind mutex_exit(&vnode_free_list_lock);
515 1.12 hannken return EBUSY;
516 1.1 rmind }
517 1.1 rmind
518 1.1 rmind /* Remove it from the freelist. */
519 1.1 rmind TAILQ_REMOVE(listhd, vp, v_freelist);
520 1.1 rmind vp->v_freelisthd = NULL;
521 1.1 rmind mutex_exit(&vnode_free_list_lock);
522 1.1 rmind
523 1.1 rmind KASSERT(vp->v_usecount == 0);
524 1.1 rmind
525 1.1 rmind /*
526 1.1 rmind * The vnode is still associated with a file system, so we must
527 1.12 hannken * clean it out before freeing it. We need to add a reference
528 1.24 hannken * before doing this.
529 1.1 rmind */
530 1.24 hannken vp->v_usecount = 1;
531 1.54 hannken vcache_reclaim(vp);
532 1.52 hannken vrelel(vp, 0);
533 1.24 hannken fstrans_done(mp);
534 1.12 hannken
535 1.12 hannken return 0;
536 1.1 rmind }
537 1.1 rmind
538 1.1 rmind /*
539 1.12 hannken * Helper thread to keep the number of vnodes below desiredvnodes.
540 1.12 hannken */
541 1.12 hannken static void
542 1.12 hannken vdrain_thread(void *cookie)
543 1.12 hannken {
544 1.12 hannken int error;
545 1.12 hannken
546 1.12 hannken mutex_enter(&vnode_free_list_lock);
547 1.12 hannken
548 1.12 hannken for (;;) {
549 1.12 hannken cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
550 1.12 hannken while (numvnodes > desiredvnodes) {
551 1.12 hannken error = cleanvnode();
552 1.12 hannken if (error)
553 1.12 hannken kpause("vndsbusy", false, hz, NULL);
554 1.12 hannken mutex_enter(&vnode_free_list_lock);
555 1.12 hannken if (error)
556 1.12 hannken break;
557 1.12 hannken }
558 1.12 hannken }
559 1.12 hannken }
560 1.12 hannken
561 1.12 hannken /*
562 1.1 rmind * Remove a vnode from its freelist.
563 1.1 rmind */
564 1.1 rmind void
565 1.1 rmind vremfree(vnode_t *vp)
566 1.1 rmind {
567 1.1 rmind
568 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
569 1.1 rmind KASSERT(vp->v_usecount == 0);
570 1.1 rmind
571 1.1 rmind /*
572 1.1 rmind * Note that the reference count must not change until
573 1.1 rmind * the vnode is removed.
574 1.1 rmind */
575 1.1 rmind mutex_enter(&vnode_free_list_lock);
576 1.1 rmind if (vp->v_holdcnt > 0) {
577 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
578 1.1 rmind } else {
579 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
580 1.1 rmind }
581 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
582 1.1 rmind vp->v_freelisthd = NULL;
583 1.1 rmind mutex_exit(&vnode_free_list_lock);
584 1.1 rmind }
585 1.1 rmind
586 1.1 rmind /*
587 1.4 rmind * vget: get a particular vnode from the free list, increment its reference
588 1.52 hannken * count and return it.
589 1.4 rmind *
590 1.52 hannken * => Must be called with v_interlock held.
591 1.4 rmind *
592 1.54 hannken * If state is VN_RECLAIMING, the vnode may be eliminated in vcache_reclaim().
593 1.4 rmind * In that case, we cannot grab the vnode, so the process is awakened when
594 1.4 rmind * the transition is completed, and an error returned to indicate that the
595 1.29 christos * vnode is no longer usable.
596 1.52 hannken *
597 1.52 hannken * If state is VN_LOADING or VN_BLOCKED, wait until the vnode enters a
598 1.52 hannken * stable state (VN_ACTIVE or VN_RECLAIMED).
599 1.1 rmind */
600 1.1 rmind int
601 1.41 riastrad vget(vnode_t *vp, int flags, bool waitok)
602 1.1 rmind {
603 1.1 rmind
604 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
605 1.41 riastrad KASSERT((flags & ~LK_NOWAIT) == 0);
606 1.41 riastrad KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
607 1.1 rmind
608 1.1 rmind /*
609 1.1 rmind * Before adding a reference, we must remove the vnode
610 1.1 rmind * from its freelist.
611 1.1 rmind */
612 1.1 rmind if (vp->v_usecount == 0) {
613 1.1 rmind vremfree(vp);
614 1.1 rmind vp->v_usecount = 1;
615 1.1 rmind } else {
616 1.1 rmind atomic_inc_uint(&vp->v_usecount);
617 1.1 rmind }
618 1.1 rmind
619 1.1 rmind /*
620 1.29 christos * If the vnode is in the process of changing state we wait
621 1.29 christos * for the change to complete and take care not to return
622 1.29 christos * a clean vnode.
623 1.1 rmind */
624 1.52 hannken if (! ISSET(flags, LK_NOWAIT))
625 1.52 hannken VSTATE_WAIT_STABLE(vp);
626 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMED) {
627 1.52 hannken vrelel(vp, 0);
628 1.52 hannken return ENOENT;
629 1.52 hannken } else if (VSTATE_GET(vp) != VN_ACTIVE) {
630 1.52 hannken KASSERT(ISSET(flags, LK_NOWAIT));
631 1.52 hannken vrelel(vp, 0);
632 1.52 hannken return EBUSY;
633 1.17 hannken }
634 1.17 hannken
635 1.1 rmind /*
636 1.41 riastrad * Ok, we got it in good shape.
637 1.1 rmind */
638 1.52 hannken VSTATE_ASSERT(vp, VN_ACTIVE);
639 1.9 rmind mutex_exit(vp->v_interlock);
640 1.52 hannken
641 1.52 hannken return 0;
642 1.1 rmind }
643 1.1 rmind
644 1.1 rmind /*
645 1.4 rmind * vput: unlock and release the reference.
646 1.1 rmind */
647 1.1 rmind void
648 1.1 rmind vput(vnode_t *vp)
649 1.1 rmind {
650 1.1 rmind
651 1.1 rmind VOP_UNLOCK(vp);
652 1.1 rmind vrele(vp);
653 1.1 rmind }
654 1.1 rmind
655 1.1 rmind /*
656 1.1 rmind * Try to drop reference on a vnode. Abort if we are releasing the
657 1.1 rmind * last reference. Note: this _must_ succeed if not the last reference.
658 1.1 rmind */
659 1.1 rmind static inline bool
660 1.1 rmind vtryrele(vnode_t *vp)
661 1.1 rmind {
662 1.1 rmind u_int use, next;
663 1.1 rmind
664 1.1 rmind for (use = vp->v_usecount;; use = next) {
665 1.1 rmind if (use == 1) {
666 1.1 rmind return false;
667 1.1 rmind }
668 1.24 hannken KASSERT(use > 1);
669 1.1 rmind next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
670 1.1 rmind if (__predict_true(next == use)) {
671 1.1 rmind return true;
672 1.1 rmind }
673 1.1 rmind }
674 1.1 rmind }
675 1.1 rmind
676 1.1 rmind /*
677 1.1 rmind * Vnode release. If reference count drops to zero, call inactive
678 1.1 rmind * routine and either return to freelist or free to the pool.
679 1.1 rmind */
680 1.23 hannken static void
681 1.1 rmind vrelel(vnode_t *vp, int flags)
682 1.1 rmind {
683 1.1 rmind bool recycle, defer;
684 1.1 rmind int error;
685 1.1 rmind
686 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
687 1.1 rmind KASSERT(vp->v_freelisthd == NULL);
688 1.1 rmind
689 1.1 rmind if (__predict_false(vp->v_op == dead_vnodeop_p &&
690 1.52 hannken VSTATE_GET(vp) != VN_RECLAIMED)) {
691 1.11 christos vnpanic(vp, "dead but not clean");
692 1.1 rmind }
693 1.1 rmind
694 1.1 rmind /*
695 1.1 rmind * If not the last reference, just drop the reference count
696 1.1 rmind * and unlock.
697 1.1 rmind */
698 1.1 rmind if (vtryrele(vp)) {
699 1.9 rmind mutex_exit(vp->v_interlock);
700 1.1 rmind return;
701 1.1 rmind }
702 1.1 rmind if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
703 1.11 christos vnpanic(vp, "%s: bad ref count", __func__);
704 1.1 rmind }
705 1.1 rmind
706 1.15 hannken #ifdef DIAGNOSTIC
707 1.15 hannken if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
708 1.15 hannken vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
709 1.15 hannken vprint("vrelel: missing VOP_CLOSE()", vp);
710 1.15 hannken }
711 1.15 hannken #endif
712 1.15 hannken
713 1.1 rmind /*
714 1.1 rmind * If not clean, deactivate the vnode, but preserve
715 1.1 rmind * our reference across the call to VOP_INACTIVE().
716 1.1 rmind */
717 1.52 hannken if (VSTATE_GET(vp) != VN_RECLAIMED) {
718 1.1 rmind recycle = false;
719 1.1 rmind
720 1.1 rmind /*
721 1.1 rmind * XXX This ugly block can be largely eliminated if
722 1.1 rmind * locking is pushed down into the file systems.
723 1.1 rmind *
724 1.1 rmind * Defer vnode release to vrele_thread if caller
725 1.30 hannken * requests it explicitly or is the pagedaemon.
726 1.1 rmind */
727 1.1 rmind if ((curlwp == uvm.pagedaemon_lwp) ||
728 1.1 rmind (flags & VRELEL_ASYNC_RELE) != 0) {
729 1.1 rmind defer = true;
730 1.1 rmind } else if (curlwp == vrele_lwp) {
731 1.17 hannken /*
732 1.29 christos * We have to try harder.
733 1.17 hannken */
734 1.9 rmind mutex_exit(vp->v_interlock);
735 1.32 hannken error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
736 1.47 riastrad KASSERTMSG((error == 0), "vn_lock failed: %d", error);
737 1.17 hannken mutex_enter(vp->v_interlock);
738 1.1 rmind defer = false;
739 1.4 rmind } else {
740 1.1 rmind /* If we can't acquire the lock, then defer. */
741 1.32 hannken mutex_exit(vp->v_interlock);
742 1.32 hannken error = vn_lock(vp,
743 1.32 hannken LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
744 1.30 hannken defer = (error != 0);
745 1.32 hannken mutex_enter(vp->v_interlock);
746 1.1 rmind }
747 1.1 rmind
748 1.30 hannken KASSERT(mutex_owned(vp->v_interlock));
749 1.30 hannken KASSERT(! (curlwp == vrele_lwp && defer));
750 1.30 hannken
751 1.1 rmind if (defer) {
752 1.1 rmind /*
753 1.1 rmind * Defer reclaim to the kthread; it's not safe to
754 1.1 rmind * clean it here. We donate it our last reference.
755 1.1 rmind */
756 1.1 rmind mutex_enter(&vrele_lock);
757 1.1 rmind TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
758 1.1 rmind if (++vrele_pending > (desiredvnodes >> 8))
759 1.53 msaitoh cv_signal(&vrele_cv);
760 1.1 rmind mutex_exit(&vrele_lock);
761 1.9 rmind mutex_exit(vp->v_interlock);
762 1.1 rmind return;
763 1.1 rmind }
764 1.1 rmind
765 1.32 hannken /*
766 1.32 hannken * If the node got another reference while we
767 1.32 hannken * released the interlock, don't try to inactivate it yet.
768 1.32 hannken */
769 1.32 hannken if (__predict_false(vtryrele(vp))) {
770 1.32 hannken VOP_UNLOCK(vp);
771 1.32 hannken mutex_exit(vp->v_interlock);
772 1.32 hannken return;
773 1.32 hannken }
774 1.52 hannken VSTATE_CHANGE(vp, VN_ACTIVE, VN_BLOCKED);
775 1.29 christos mutex_exit(vp->v_interlock);
776 1.29 christos
777 1.1 rmind /*
778 1.52 hannken * The vnode must not gain another reference while being
779 1.1 rmind * deactivated. If VOP_INACTIVE() indicates that
780 1.1 rmind * the described file has been deleted, then recycle
781 1.52 hannken * the vnode.
782 1.1 rmind *
783 1.1 rmind * Note that VOP_INACTIVE() will drop the vnode lock.
784 1.1 rmind */
785 1.1 rmind VOP_INACTIVE(vp, &recycle);
786 1.46 hannken if (recycle) {
787 1.54 hannken /* vcache_reclaim() below will drop the lock. */
788 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
789 1.46 hannken recycle = false;
790 1.46 hannken }
791 1.9 rmind mutex_enter(vp->v_interlock);
792 1.52 hannken VSTATE_CHANGE(vp, VN_BLOCKED, VN_ACTIVE);
793 1.1 rmind if (!recycle) {
794 1.1 rmind if (vtryrele(vp)) {
795 1.9 rmind mutex_exit(vp->v_interlock);
796 1.1 rmind return;
797 1.1 rmind }
798 1.1 rmind }
799 1.1 rmind
800 1.1 rmind /* Take care of space accounting. */
801 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
802 1.1 rmind atomic_add_int(&uvmexp.execpages,
803 1.1 rmind -vp->v_uobj.uo_npages);
804 1.1 rmind atomic_add_int(&uvmexp.filepages,
805 1.1 rmind vp->v_uobj.uo_npages);
806 1.1 rmind }
807 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
808 1.1 rmind vp->v_vflag &= ~VV_MAPPED;
809 1.1 rmind
810 1.1 rmind /*
811 1.1 rmind * Recycle the vnode if the file is now unused (unlinked),
812 1.1 rmind * otherwise just free it.
813 1.1 rmind */
814 1.1 rmind if (recycle) {
815 1.52 hannken VSTATE_ASSERT(vp, VN_ACTIVE);
816 1.54 hannken vcache_reclaim(vp);
817 1.1 rmind }
818 1.1 rmind KASSERT(vp->v_usecount > 0);
819 1.1 rmind }
820 1.1 rmind
821 1.1 rmind if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
822 1.1 rmind /* Gained another reference while being reclaimed. */
823 1.9 rmind mutex_exit(vp->v_interlock);
824 1.1 rmind return;
825 1.1 rmind }
826 1.1 rmind
827 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMED) {
828 1.1 rmind /*
829 1.1 rmind * It's clean so destroy it. It isn't referenced
830 1.1 rmind * anywhere since it has been reclaimed.
831 1.1 rmind */
832 1.1 rmind KASSERT(vp->v_holdcnt == 0);
833 1.1 rmind KASSERT(vp->v_writecount == 0);
834 1.9 rmind mutex_exit(vp->v_interlock);
835 1.1 rmind vfs_insmntque(vp, NULL);
836 1.1 rmind if (vp->v_type == VBLK || vp->v_type == VCHR) {
837 1.1 rmind spec_node_destroy(vp);
838 1.1 rmind }
839 1.50 hannken vcache_free(VP_TO_VN(vp));
840 1.1 rmind } else {
841 1.1 rmind /*
842 1.1 rmind * Otherwise, put it back onto the freelist. It
843 1.1 rmind * can't be destroyed while still associated with
844 1.1 rmind * a file system.
845 1.1 rmind */
846 1.1 rmind mutex_enter(&vnode_free_list_lock);
847 1.1 rmind if (vp->v_holdcnt > 0) {
848 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
849 1.1 rmind } else {
850 1.1 rmind vp->v_freelisthd = &vnode_free_list;
851 1.1 rmind }
852 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
853 1.1 rmind mutex_exit(&vnode_free_list_lock);
854 1.9 rmind mutex_exit(vp->v_interlock);
855 1.1 rmind }
856 1.1 rmind }
857 1.1 rmind
858 1.1 rmind void
859 1.1 rmind vrele(vnode_t *vp)
860 1.1 rmind {
861 1.1 rmind
862 1.29 christos if (vtryrele(vp)) {
863 1.1 rmind return;
864 1.1 rmind }
865 1.9 rmind mutex_enter(vp->v_interlock);
866 1.1 rmind vrelel(vp, 0);
867 1.1 rmind }
868 1.1 rmind
869 1.1 rmind /*
870 1.1 rmind * Asynchronous vnode release, vnode is released in different context.
871 1.1 rmind */
872 1.1 rmind void
873 1.1 rmind vrele_async(vnode_t *vp)
874 1.1 rmind {
875 1.1 rmind
876 1.29 christos if (vtryrele(vp)) {
877 1.1 rmind return;
878 1.1 rmind }
879 1.9 rmind mutex_enter(vp->v_interlock);
880 1.1 rmind vrelel(vp, VRELEL_ASYNC_RELE);
881 1.1 rmind }
882 1.1 rmind
883 1.1 rmind static void
884 1.1 rmind vrele_thread(void *cookie)
885 1.1 rmind {
886 1.34 hannken vnodelst_t skip_list;
887 1.1 rmind vnode_t *vp;
888 1.34 hannken struct mount *mp;
889 1.34 hannken
890 1.34 hannken TAILQ_INIT(&skip_list);
891 1.1 rmind
892 1.34 hannken mutex_enter(&vrele_lock);
893 1.1 rmind for (;;) {
894 1.1 rmind while (TAILQ_EMPTY(&vrele_list)) {
895 1.1 rmind vrele_gen++;
896 1.1 rmind cv_broadcast(&vrele_cv);
897 1.1 rmind cv_timedwait(&vrele_cv, &vrele_lock, hz);
898 1.34 hannken TAILQ_CONCAT(&vrele_list, &skip_list, v_freelist);
899 1.1 rmind }
900 1.1 rmind vp = TAILQ_FIRST(&vrele_list);
901 1.34 hannken mp = vp->v_mount;
902 1.1 rmind TAILQ_REMOVE(&vrele_list, vp, v_freelist);
903 1.34 hannken if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
904 1.34 hannken TAILQ_INSERT_TAIL(&skip_list, vp, v_freelist);
905 1.34 hannken continue;
906 1.34 hannken }
907 1.1 rmind vrele_pending--;
908 1.1 rmind mutex_exit(&vrele_lock);
909 1.1 rmind
910 1.1 rmind /*
911 1.1 rmind * If not the last reference, then ignore the vnode
912 1.1 rmind * and look for more work.
913 1.1 rmind */
914 1.9 rmind mutex_enter(vp->v_interlock);
915 1.1 rmind vrelel(vp, 0);
916 1.34 hannken fstrans_done(mp);
917 1.34 hannken mutex_enter(&vrele_lock);
918 1.1 rmind }
919 1.1 rmind }
920 1.1 rmind
921 1.2 rmind void
922 1.2 rmind vrele_flush(void)
923 1.2 rmind {
924 1.2 rmind int gen;
925 1.2 rmind
926 1.2 rmind mutex_enter(&vrele_lock);
927 1.2 rmind gen = vrele_gen;
928 1.2 rmind while (vrele_pending && gen == vrele_gen) {
929 1.2 rmind cv_broadcast(&vrele_cv);
930 1.2 rmind cv_wait(&vrele_cv, &vrele_lock);
931 1.2 rmind }
932 1.2 rmind mutex_exit(&vrele_lock);
933 1.2 rmind }
934 1.2 rmind
935 1.1 rmind /*
936 1.1 rmind * Vnode reference, where a reference is already held by some other
937 1.1 rmind * object (for example, a file structure).
938 1.1 rmind */
939 1.1 rmind void
940 1.1 rmind vref(vnode_t *vp)
941 1.1 rmind {
942 1.1 rmind
943 1.1 rmind KASSERT(vp->v_usecount != 0);
944 1.1 rmind
945 1.1 rmind atomic_inc_uint(&vp->v_usecount);
946 1.1 rmind }
947 1.1 rmind
948 1.1 rmind /*
949 1.1 rmind * Page or buffer structure gets a reference.
950 1.1 rmind * Called with v_interlock held.
951 1.1 rmind */
952 1.1 rmind void
953 1.1 rmind vholdl(vnode_t *vp)
954 1.1 rmind {
955 1.1 rmind
956 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
957 1.1 rmind
958 1.1 rmind if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
959 1.1 rmind mutex_enter(&vnode_free_list_lock);
960 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
961 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
962 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
963 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
964 1.1 rmind mutex_exit(&vnode_free_list_lock);
965 1.1 rmind }
966 1.1 rmind }
967 1.1 rmind
968 1.1 rmind /*
969 1.1 rmind * Page or buffer structure frees a reference.
970 1.1 rmind * Called with v_interlock held.
971 1.1 rmind */
972 1.1 rmind void
973 1.1 rmind holdrelel(vnode_t *vp)
974 1.1 rmind {
975 1.1 rmind
976 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
977 1.1 rmind
978 1.1 rmind if (vp->v_holdcnt <= 0) {
979 1.11 christos vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
980 1.1 rmind }
981 1.1 rmind
982 1.1 rmind vp->v_holdcnt--;
983 1.1 rmind if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
984 1.1 rmind mutex_enter(&vnode_free_list_lock);
985 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
986 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
987 1.1 rmind vp->v_freelisthd = &vnode_free_list;
988 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
989 1.1 rmind mutex_exit(&vnode_free_list_lock);
990 1.1 rmind }
991 1.1 rmind }
992 1.1 rmind
993 1.1 rmind /*
994 1.33 hannken * Recycle an unused vnode if caller holds the last reference.
995 1.1 rmind */
996 1.33 hannken bool
997 1.33 hannken vrecycle(vnode_t *vp)
998 1.1 rmind {
999 1.1 rmind
1000 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
1001 1.46 hannken return false;
1002 1.46 hannken
1003 1.33 hannken mutex_enter(vp->v_interlock);
1004 1.33 hannken
1005 1.33 hannken if (vp->v_usecount != 1) {
1006 1.33 hannken mutex_exit(vp->v_interlock);
1007 1.46 hannken VOP_UNLOCK(vp);
1008 1.33 hannken return false;
1009 1.1 rmind }
1010 1.54 hannken vcache_reclaim(vp);
1011 1.52 hannken vrelel(vp, 0);
1012 1.33 hannken return true;
1013 1.1 rmind }
1014 1.1 rmind
1015 1.1 rmind /*
1016 1.1 rmind * Eliminate all activity associated with the requested vnode
1017 1.1 rmind * and with all vnodes aliased to the requested vnode.
1018 1.1 rmind */
1019 1.1 rmind void
1020 1.1 rmind vrevoke(vnode_t *vp)
1021 1.1 rmind {
1022 1.19 hannken vnode_t *vq;
1023 1.1 rmind enum vtype type;
1024 1.1 rmind dev_t dev;
1025 1.1 rmind
1026 1.1 rmind KASSERT(vp->v_usecount > 0);
1027 1.1 rmind
1028 1.9 rmind mutex_enter(vp->v_interlock);
1029 1.52 hannken VSTATE_WAIT_STABLE(vp);
1030 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMED) {
1031 1.9 rmind mutex_exit(vp->v_interlock);
1032 1.1 rmind return;
1033 1.1 rmind } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1034 1.1 rmind atomic_inc_uint(&vp->v_usecount);
1035 1.29 christos mutex_exit(vp->v_interlock);
1036 1.29 christos vgone(vp);
1037 1.1 rmind return;
1038 1.1 rmind } else {
1039 1.1 rmind dev = vp->v_rdev;
1040 1.1 rmind type = vp->v_type;
1041 1.9 rmind mutex_exit(vp->v_interlock);
1042 1.1 rmind }
1043 1.1 rmind
1044 1.19 hannken while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1045 1.29 christos vgone(vq);
1046 1.1 rmind }
1047 1.1 rmind }
1048 1.1 rmind
1049 1.1 rmind /*
1050 1.1 rmind * Eliminate all activity associated with a vnode in preparation for
1051 1.1 rmind * reuse. Drops a reference from the vnode.
1052 1.1 rmind */
1053 1.1 rmind void
1054 1.1 rmind vgone(vnode_t *vp)
1055 1.1 rmind {
1056 1.1 rmind
1057 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
1058 1.52 hannken VSTATE_ASSERT(vp, VN_RECLAIMED);
1059 1.46 hannken vrele(vp);
1060 1.46 hannken }
1061 1.46 hannken
1062 1.9 rmind mutex_enter(vp->v_interlock);
1063 1.54 hannken vcache_reclaim(vp);
1064 1.52 hannken vrelel(vp, 0);
1065 1.1 rmind }
1066 1.1 rmind
1067 1.36 hannken static inline uint32_t
1068 1.36 hannken vcache_hash(const struct vcache_key *key)
1069 1.36 hannken {
1070 1.36 hannken uint32_t hash = HASH32_BUF_INIT;
1071 1.36 hannken
1072 1.36 hannken hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1073 1.36 hannken hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1074 1.36 hannken return hash;
1075 1.36 hannken }
1076 1.36 hannken
1077 1.36 hannken static void
1078 1.36 hannken vcache_init(void)
1079 1.36 hannken {
1080 1.36 hannken
1081 1.36 hannken vcache.pool = pool_cache_init(sizeof(struct vcache_node), 0, 0, 0,
1082 1.36 hannken "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1083 1.36 hannken KASSERT(vcache.pool != NULL);
1084 1.36 hannken mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1085 1.51 hannken cv_init(&vcache.cv, "vcache");
1086 1.36 hannken vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1087 1.36 hannken &vcache.hashmask);
1088 1.36 hannken }
1089 1.36 hannken
1090 1.36 hannken static void
1091 1.36 hannken vcache_reinit(void)
1092 1.36 hannken {
1093 1.36 hannken int i;
1094 1.36 hannken uint32_t hash;
1095 1.36 hannken u_long oldmask, newmask;
1096 1.36 hannken struct hashhead *oldtab, *newtab;
1097 1.36 hannken struct vcache_node *node;
1098 1.36 hannken
1099 1.36 hannken newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1100 1.36 hannken mutex_enter(&vcache.lock);
1101 1.36 hannken oldtab = vcache.hashtab;
1102 1.36 hannken oldmask = vcache.hashmask;
1103 1.36 hannken vcache.hashtab = newtab;
1104 1.36 hannken vcache.hashmask = newmask;
1105 1.36 hannken for (i = 0; i <= oldmask; i++) {
1106 1.36 hannken while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1107 1.36 hannken SLIST_REMOVE(&oldtab[i], node, vcache_node, vn_hash);
1108 1.36 hannken hash = vcache_hash(&node->vn_key);
1109 1.36 hannken SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1110 1.36 hannken node, vn_hash);
1111 1.36 hannken }
1112 1.36 hannken }
1113 1.36 hannken mutex_exit(&vcache.lock);
1114 1.36 hannken hashdone(oldtab, HASH_SLIST, oldmask);
1115 1.36 hannken }
1116 1.36 hannken
1117 1.36 hannken static inline struct vcache_node *
1118 1.36 hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1119 1.36 hannken {
1120 1.36 hannken struct hashhead *hashp;
1121 1.36 hannken struct vcache_node *node;
1122 1.36 hannken
1123 1.36 hannken KASSERT(mutex_owned(&vcache.lock));
1124 1.36 hannken
1125 1.36 hannken hashp = &vcache.hashtab[hash & vcache.hashmask];
1126 1.36 hannken SLIST_FOREACH(node, hashp, vn_hash) {
1127 1.36 hannken if (key->vk_mount != node->vn_key.vk_mount)
1128 1.36 hannken continue;
1129 1.36 hannken if (key->vk_key_len != node->vn_key.vk_key_len)
1130 1.36 hannken continue;
1131 1.36 hannken if (memcmp(key->vk_key, node->vn_key.vk_key, key->vk_key_len))
1132 1.36 hannken continue;
1133 1.36 hannken return node;
1134 1.36 hannken }
1135 1.36 hannken return NULL;
1136 1.36 hannken }
1137 1.36 hannken
1138 1.36 hannken /*
1139 1.50 hannken * Allocate a new, uninitialized vcache node.
1140 1.50 hannken */
1141 1.50 hannken static struct vcache_node *
1142 1.50 hannken vcache_alloc(void)
1143 1.50 hannken {
1144 1.50 hannken struct vcache_node *node;
1145 1.50 hannken vnode_t *vp;
1146 1.50 hannken
1147 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
1148 1.50 hannken memset(node, 0, sizeof(*node));
1149 1.50 hannken
1150 1.50 hannken /* SLIST_INIT(&node->vn_hash); */
1151 1.50 hannken
1152 1.50 hannken vp = VN_TO_VP(node);
1153 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1154 1.50 hannken cv_init(&vp->v_cv, "vnode");
1155 1.50 hannken /* LIST_INIT(&vp->v_nclist); */
1156 1.50 hannken /* LIST_INIT(&vp->v_dnclist); */
1157 1.50 hannken
1158 1.50 hannken mutex_enter(&vnode_free_list_lock);
1159 1.50 hannken numvnodes++;
1160 1.50 hannken if (numvnodes > desiredvnodes + desiredvnodes / 10)
1161 1.50 hannken cv_signal(&vdrain_cv);
1162 1.50 hannken mutex_exit(&vnode_free_list_lock);
1163 1.50 hannken
1164 1.50 hannken rw_init(&vp->v_lock);
1165 1.50 hannken vp->v_usecount = 1;
1166 1.50 hannken vp->v_type = VNON;
1167 1.50 hannken vp->v_size = vp->v_writesize = VSIZENOTSET;
1168 1.50 hannken
1169 1.51 hannken node->vn_state = VN_LOADING;
1170 1.51 hannken
1171 1.50 hannken return node;
1172 1.50 hannken }
1173 1.50 hannken
1174 1.50 hannken /*
1175 1.50 hannken * Free an unused, unreferenced vcache node.
1176 1.50 hannken */
1177 1.50 hannken static void
1178 1.50 hannken vcache_free(struct vcache_node *node)
1179 1.50 hannken {
1180 1.50 hannken vnode_t *vp;
1181 1.50 hannken
1182 1.50 hannken vp = VN_TO_VP(node);
1183 1.50 hannken
1184 1.50 hannken KASSERT(vp->v_usecount == 0);
1185 1.50 hannken
1186 1.50 hannken rw_destroy(&vp->v_lock);
1187 1.50 hannken mutex_enter(&vnode_free_list_lock);
1188 1.50 hannken numvnodes--;
1189 1.50 hannken mutex_exit(&vnode_free_list_lock);
1190 1.50 hannken
1191 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
1192 1.50 hannken cv_destroy(&vp->v_cv);
1193 1.50 hannken pool_cache_put(vcache.pool, node);
1194 1.50 hannken }
1195 1.50 hannken
1196 1.50 hannken /*
1197 1.36 hannken * Get a vnode / fs node pair by key and return it referenced through vpp.
1198 1.36 hannken */
1199 1.36 hannken int
1200 1.36 hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
1201 1.36 hannken struct vnode **vpp)
1202 1.36 hannken {
1203 1.36 hannken int error;
1204 1.36 hannken uint32_t hash;
1205 1.36 hannken const void *new_key;
1206 1.36 hannken struct vnode *vp;
1207 1.36 hannken struct vcache_key vcache_key;
1208 1.36 hannken struct vcache_node *node, *new_node;
1209 1.36 hannken
1210 1.36 hannken new_key = NULL;
1211 1.36 hannken *vpp = NULL;
1212 1.36 hannken
1213 1.36 hannken vcache_key.vk_mount = mp;
1214 1.36 hannken vcache_key.vk_key = key;
1215 1.36 hannken vcache_key.vk_key_len = key_len;
1216 1.36 hannken hash = vcache_hash(&vcache_key);
1217 1.36 hannken
1218 1.36 hannken again:
1219 1.36 hannken mutex_enter(&vcache.lock);
1220 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1221 1.36 hannken
1222 1.36 hannken /* If found, take a reference or retry. */
1223 1.52 hannken if (__predict_true(node != NULL)) {
1224 1.52 hannken /*
1225 1.52 hannken * If the vnode is loading we cannot take the v_interlock
1226 1.52 hannken * here as it might change during load (see uvm_obj_setlock()).
1227 1.52 hannken * As changing state from VN_LOADING requires both vcache.lock
1228 1.52 hannken * and v_interlock it is safe to test with vcache.lock held.
1229 1.52 hannken *
1230 1.52 hannken * Wait for vnodes changing state from VN_LOADING and retry.
1231 1.52 hannken */
1232 1.52 hannken if (__predict_false(node->vn_state == VN_LOADING)) {
1233 1.52 hannken cv_wait(&vcache.cv, &vcache.lock);
1234 1.52 hannken mutex_exit(&vcache.lock);
1235 1.52 hannken goto again;
1236 1.52 hannken }
1237 1.52 hannken vp = VN_TO_VP(node);
1238 1.36 hannken mutex_enter(vp->v_interlock);
1239 1.36 hannken mutex_exit(&vcache.lock);
1240 1.41 riastrad error = vget(vp, 0, true /* wait */);
1241 1.36 hannken if (error == ENOENT)
1242 1.36 hannken goto again;
1243 1.36 hannken if (error == 0)
1244 1.36 hannken *vpp = vp;
1245 1.36 hannken KASSERT((error != 0) == (*vpp == NULL));
1246 1.36 hannken return error;
1247 1.36 hannken }
1248 1.36 hannken mutex_exit(&vcache.lock);
1249 1.36 hannken
1250 1.36 hannken /* Allocate and initialize a new vcache / vnode pair. */
1251 1.36 hannken error = vfs_busy(mp, NULL);
1252 1.36 hannken if (error)
1253 1.36 hannken return error;
1254 1.50 hannken new_node = vcache_alloc();
1255 1.36 hannken new_node->vn_key = vcache_key;
1256 1.50 hannken vp = VN_TO_VP(new_node);
1257 1.36 hannken mutex_enter(&vcache.lock);
1258 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1259 1.36 hannken if (node == NULL) {
1260 1.36 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1261 1.36 hannken new_node, vn_hash);
1262 1.36 hannken node = new_node;
1263 1.36 hannken }
1264 1.36 hannken
1265 1.36 hannken /* If another thread beat us inserting this node, retry. */
1266 1.36 hannken if (node != new_node) {
1267 1.52 hannken mutex_enter(vp->v_interlock);
1268 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_RECLAIMED);
1269 1.52 hannken mutex_exit(&vcache.lock);
1270 1.52 hannken vrelel(vp, 0);
1271 1.36 hannken vfs_unbusy(mp, false, NULL);
1272 1.36 hannken goto again;
1273 1.36 hannken }
1274 1.52 hannken mutex_exit(&vcache.lock);
1275 1.36 hannken
1276 1.52 hannken /* Load the fs node. Exclusive as new_node is VN_LOADING. */
1277 1.36 hannken error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1278 1.36 hannken if (error) {
1279 1.36 hannken mutex_enter(&vcache.lock);
1280 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1281 1.36 hannken new_node, vcache_node, vn_hash);
1282 1.52 hannken mutex_enter(vp->v_interlock);
1283 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_RECLAIMED);
1284 1.36 hannken mutex_exit(&vcache.lock);
1285 1.52 hannken vrelel(vp, 0);
1286 1.36 hannken vfs_unbusy(mp, false, NULL);
1287 1.36 hannken KASSERT(*vpp == NULL);
1288 1.36 hannken return error;
1289 1.36 hannken }
1290 1.36 hannken KASSERT(new_key != NULL);
1291 1.36 hannken KASSERT(memcmp(key, new_key, key_len) == 0);
1292 1.36 hannken KASSERT(vp->v_op != NULL);
1293 1.36 hannken vfs_insmntque(vp, mp);
1294 1.36 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1295 1.36 hannken vp->v_vflag |= VV_MPSAFE;
1296 1.36 hannken vfs_unbusy(mp, true, NULL);
1297 1.36 hannken
1298 1.36 hannken /* Finished loading, finalize node. */
1299 1.36 hannken mutex_enter(&vcache.lock);
1300 1.36 hannken new_node->vn_key.vk_key = new_key;
1301 1.39 hannken mutex_enter(vp->v_interlock);
1302 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_ACTIVE);
1303 1.39 hannken mutex_exit(vp->v_interlock);
1304 1.52 hannken mutex_exit(&vcache.lock);
1305 1.36 hannken *vpp = vp;
1306 1.36 hannken return 0;
1307 1.36 hannken }
1308 1.36 hannken
1309 1.36 hannken /*
1310 1.40 hannken * Create a new vnode / fs node pair and return it referenced through vpp.
1311 1.40 hannken */
1312 1.40 hannken int
1313 1.40 hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1314 1.40 hannken kauth_cred_t cred, struct vnode **vpp)
1315 1.40 hannken {
1316 1.40 hannken int error;
1317 1.40 hannken uint32_t hash;
1318 1.52 hannken struct vnode *ovp, *vp;
1319 1.40 hannken struct vcache_node *new_node;
1320 1.40 hannken struct vcache_node *old_node __diagused;
1321 1.40 hannken
1322 1.40 hannken *vpp = NULL;
1323 1.40 hannken
1324 1.40 hannken /* Allocate and initialize a new vcache / vnode pair. */
1325 1.40 hannken error = vfs_busy(mp, NULL);
1326 1.40 hannken if (error)
1327 1.40 hannken return error;
1328 1.50 hannken new_node = vcache_alloc();
1329 1.40 hannken new_node->vn_key.vk_mount = mp;
1330 1.50 hannken vp = VN_TO_VP(new_node);
1331 1.40 hannken
1332 1.40 hannken /* Create and load the fs node. */
1333 1.40 hannken error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1334 1.40 hannken &new_node->vn_key.vk_key_len, &new_node->vn_key.vk_key);
1335 1.40 hannken if (error) {
1336 1.52 hannken mutex_enter(&vcache.lock);
1337 1.52 hannken mutex_enter(vp->v_interlock);
1338 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_RECLAIMED);
1339 1.52 hannken mutex_exit(&vcache.lock);
1340 1.52 hannken vrelel(vp, 0);
1341 1.40 hannken vfs_unbusy(mp, false, NULL);
1342 1.40 hannken KASSERT(*vpp == NULL);
1343 1.40 hannken return error;
1344 1.40 hannken }
1345 1.40 hannken KASSERT(new_node->vn_key.vk_key != NULL);
1346 1.40 hannken KASSERT(vp->v_op != NULL);
1347 1.40 hannken hash = vcache_hash(&new_node->vn_key);
1348 1.40 hannken
1349 1.40 hannken /* Wait for previous instance to be reclaimed, then insert new node. */
1350 1.40 hannken mutex_enter(&vcache.lock);
1351 1.40 hannken while ((old_node = vcache_hash_lookup(&new_node->vn_key, hash))) {
1352 1.52 hannken ovp = VN_TO_VP(old_node);
1353 1.52 hannken mutex_enter(ovp->v_interlock);
1354 1.40 hannken mutex_exit(&vcache.lock);
1355 1.52 hannken error = vget(ovp, 0, true /* wait */);
1356 1.52 hannken KASSERT(error == ENOENT);
1357 1.40 hannken mutex_enter(&vcache.lock);
1358 1.40 hannken }
1359 1.40 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1360 1.40 hannken new_node, vn_hash);
1361 1.40 hannken mutex_exit(&vcache.lock);
1362 1.40 hannken vfs_insmntque(vp, mp);
1363 1.40 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1364 1.40 hannken vp->v_vflag |= VV_MPSAFE;
1365 1.40 hannken vfs_unbusy(mp, true, NULL);
1366 1.40 hannken
1367 1.40 hannken /* Finished loading, finalize node. */
1368 1.40 hannken mutex_enter(&vcache.lock);
1369 1.52 hannken mutex_enter(vp->v_interlock);
1370 1.52 hannken VSTATE_CHANGE(vp, VN_LOADING, VN_ACTIVE);
1371 1.40 hannken mutex_exit(&vcache.lock);
1372 1.40 hannken mutex_exit(vp->v_interlock);
1373 1.40 hannken *vpp = vp;
1374 1.40 hannken return 0;
1375 1.40 hannken }
1376 1.40 hannken
1377 1.40 hannken /*
1378 1.37 hannken * Prepare key change: lock old and new cache node.
1379 1.37 hannken * Return an error if the new node already exists.
1380 1.37 hannken */
1381 1.37 hannken int
1382 1.37 hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1383 1.37 hannken const void *old_key, size_t old_key_len,
1384 1.37 hannken const void *new_key, size_t new_key_len)
1385 1.37 hannken {
1386 1.37 hannken uint32_t old_hash, new_hash;
1387 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1388 1.37 hannken struct vcache_node *node, *new_node;
1389 1.52 hannken struct vnode *tvp;
1390 1.37 hannken
1391 1.37 hannken old_vcache_key.vk_mount = mp;
1392 1.37 hannken old_vcache_key.vk_key = old_key;
1393 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1394 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1395 1.37 hannken
1396 1.37 hannken new_vcache_key.vk_mount = mp;
1397 1.37 hannken new_vcache_key.vk_key = new_key;
1398 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1399 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1400 1.37 hannken
1401 1.50 hannken new_node = vcache_alloc();
1402 1.37 hannken new_node->vn_key = new_vcache_key;
1403 1.52 hannken tvp = VN_TO_VP(new_node);
1404 1.37 hannken
1405 1.52 hannken /* Insert locked new node used as placeholder. */
1406 1.37 hannken mutex_enter(&vcache.lock);
1407 1.37 hannken node = vcache_hash_lookup(&new_vcache_key, new_hash);
1408 1.37 hannken if (node != NULL) {
1409 1.52 hannken mutex_enter(tvp->v_interlock);
1410 1.52 hannken VSTATE_CHANGE(tvp, VN_LOADING, VN_RECLAIMED);
1411 1.37 hannken mutex_exit(&vcache.lock);
1412 1.52 hannken vrelel(tvp, 0);
1413 1.37 hannken return EEXIST;
1414 1.37 hannken }
1415 1.37 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1416 1.37 hannken new_node, vn_hash);
1417 1.49 hannken
1418 1.49 hannken /* Lock old node. */
1419 1.37 hannken node = vcache_hash_lookup(&old_vcache_key, old_hash);
1420 1.37 hannken KASSERT(node != NULL);
1421 1.52 hannken KASSERT(VN_TO_VP(node) == vp);
1422 1.52 hannken mutex_enter(vp->v_interlock);
1423 1.52 hannken VSTATE_CHANGE(vp, VN_ACTIVE, VN_BLOCKED);
1424 1.37 hannken node->vn_key = old_vcache_key;
1425 1.52 hannken mutex_exit(vp->v_interlock);
1426 1.37 hannken mutex_exit(&vcache.lock);
1427 1.37 hannken return 0;
1428 1.37 hannken }
1429 1.37 hannken
1430 1.37 hannken /*
1431 1.37 hannken * Key change complete: remove old node and unlock new node.
1432 1.37 hannken */
1433 1.37 hannken void
1434 1.37 hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1435 1.37 hannken const void *old_key, size_t old_key_len,
1436 1.37 hannken const void *new_key, size_t new_key_len)
1437 1.37 hannken {
1438 1.37 hannken uint32_t old_hash, new_hash;
1439 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1440 1.49 hannken struct vcache_node *old_node, *new_node;
1441 1.52 hannken struct vnode *tvp;
1442 1.37 hannken
1443 1.37 hannken old_vcache_key.vk_mount = mp;
1444 1.37 hannken old_vcache_key.vk_key = old_key;
1445 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1446 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1447 1.37 hannken
1448 1.37 hannken new_vcache_key.vk_mount = mp;
1449 1.37 hannken new_vcache_key.vk_key = new_key;
1450 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1451 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1452 1.37 hannken
1453 1.37 hannken mutex_enter(&vcache.lock);
1454 1.49 hannken
1455 1.49 hannken /* Lookup old and new node. */
1456 1.49 hannken old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1457 1.49 hannken KASSERT(old_node != NULL);
1458 1.52 hannken KASSERT(VN_TO_VP(old_node) == vp);
1459 1.52 hannken mutex_enter(vp->v_interlock);
1460 1.52 hannken VSTATE_ASSERT(vp, VN_BLOCKED);
1461 1.52 hannken
1462 1.49 hannken new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1463 1.52 hannken KASSERT(new_node != NULL);
1464 1.49 hannken KASSERT(new_node->vn_key.vk_key_len == new_key_len);
1465 1.52 hannken tvp = VN_TO_VP(new_node);
1466 1.52 hannken mutex_enter(tvp->v_interlock);
1467 1.52 hannken VSTATE_ASSERT(VN_TO_VP(new_node), VN_LOADING);
1468 1.49 hannken
1469 1.49 hannken /* Rekey old node and put it onto its new hashlist. */
1470 1.49 hannken old_node->vn_key = new_vcache_key;
1471 1.49 hannken if (old_hash != new_hash) {
1472 1.49 hannken SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1473 1.49 hannken old_node, vcache_node, vn_hash);
1474 1.49 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1475 1.49 hannken old_node, vn_hash);
1476 1.49 hannken }
1477 1.52 hannken VSTATE_CHANGE(vp, VN_BLOCKED, VN_ACTIVE);
1478 1.52 hannken mutex_exit(vp->v_interlock);
1479 1.49 hannken
1480 1.49 hannken /* Remove new node used as placeholder. */
1481 1.49 hannken SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
1482 1.49 hannken new_node, vcache_node, vn_hash);
1483 1.52 hannken VSTATE_CHANGE(tvp, VN_LOADING, VN_RECLAIMED);
1484 1.37 hannken mutex_exit(&vcache.lock);
1485 1.52 hannken vrelel(tvp, 0);
1486 1.37 hannken }
1487 1.37 hannken
1488 1.37 hannken /*
1489 1.54 hannken * Disassociate the underlying file system from a vnode.
1490 1.54 hannken *
1491 1.54 hannken * Must be called with vnode locked and will return unlocked.
1492 1.54 hannken * Must be called with the interlock held, and will return with it held.
1493 1.54 hannken */
1494 1.54 hannken static void
1495 1.54 hannken vcache_reclaim(vnode_t *vp)
1496 1.54 hannken {
1497 1.54 hannken lwp_t *l = curlwp;
1498 1.55 hannken struct vcache_node *node = VP_TO_VN(vp);
1499 1.55 hannken uint32_t hash;
1500 1.55 hannken uint8_t temp_buf[64], *temp_key;
1501 1.55 hannken size_t temp_key_len;
1502 1.54 hannken bool recycle, active;
1503 1.54 hannken int error;
1504 1.54 hannken
1505 1.54 hannken KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1506 1.54 hannken VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1507 1.54 hannken KASSERT(mutex_owned(vp->v_interlock));
1508 1.54 hannken KASSERT(vp->v_usecount != 0);
1509 1.54 hannken
1510 1.54 hannken active = (vp->v_usecount > 1);
1511 1.55 hannken temp_key_len = node->vn_key.vk_key_len;
1512 1.54 hannken /*
1513 1.54 hannken * Prevent the vnode from being recycled or brought into use
1514 1.54 hannken * while we clean it out.
1515 1.54 hannken */
1516 1.54 hannken VSTATE_CHANGE(vp, VN_ACTIVE, VN_RECLAIMING);
1517 1.54 hannken if (vp->v_iflag & VI_EXECMAP) {
1518 1.54 hannken atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1519 1.54 hannken atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1520 1.54 hannken }
1521 1.54 hannken vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1522 1.54 hannken mutex_exit(vp->v_interlock);
1523 1.54 hannken
1524 1.55 hannken /* Replace the vnode key with a temporary copy. */
1525 1.55 hannken if (node->vn_key.vk_key_len > sizeof(temp_buf)) {
1526 1.55 hannken temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1527 1.55 hannken } else {
1528 1.55 hannken temp_key = temp_buf;
1529 1.55 hannken }
1530 1.55 hannken mutex_enter(&vcache.lock);
1531 1.55 hannken memcpy(temp_key, node->vn_key.vk_key, temp_key_len);
1532 1.55 hannken node->vn_key.vk_key = temp_key;
1533 1.55 hannken mutex_exit(&vcache.lock);
1534 1.55 hannken
1535 1.54 hannken /*
1536 1.54 hannken * Clean out any cached data associated with the vnode.
1537 1.54 hannken * If purging an active vnode, it must be closed and
1538 1.54 hannken * deactivated before being reclaimed. Note that the
1539 1.54 hannken * VOP_INACTIVE will unlock the vnode.
1540 1.54 hannken */
1541 1.54 hannken error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1542 1.54 hannken if (error != 0) {
1543 1.54 hannken if (wapbl_vphaswapbl(vp))
1544 1.54 hannken WAPBL_DISCARD(wapbl_vptomp(vp));
1545 1.54 hannken error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1546 1.54 hannken }
1547 1.54 hannken KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1548 1.54 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1549 1.54 hannken if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1550 1.54 hannken spec_node_revoke(vp);
1551 1.54 hannken }
1552 1.54 hannken if (active) {
1553 1.54 hannken VOP_INACTIVE(vp, &recycle);
1554 1.54 hannken } else {
1555 1.54 hannken /*
1556 1.54 hannken * Any other processes trying to obtain this lock must first
1557 1.54 hannken * wait for VN_RECLAIMED, then call the new lock operation.
1558 1.54 hannken */
1559 1.54 hannken VOP_UNLOCK(vp);
1560 1.54 hannken }
1561 1.54 hannken
1562 1.54 hannken /* Disassociate the underlying file system from the vnode. */
1563 1.54 hannken if (VOP_RECLAIM(vp)) {
1564 1.54 hannken vnpanic(vp, "%s: cannot reclaim", __func__);
1565 1.54 hannken }
1566 1.54 hannken
1567 1.54 hannken KASSERT(vp->v_data == NULL);
1568 1.54 hannken KASSERT(vp->v_uobj.uo_npages == 0);
1569 1.54 hannken
1570 1.54 hannken if (vp->v_type == VREG && vp->v_ractx != NULL) {
1571 1.54 hannken uvm_ra_freectx(vp->v_ractx);
1572 1.54 hannken vp->v_ractx = NULL;
1573 1.54 hannken }
1574 1.54 hannken
1575 1.54 hannken /* Purge name cache. */
1576 1.54 hannken cache_purge(vp);
1577 1.54 hannken
1578 1.54 hannken /* Move to dead mount. */
1579 1.54 hannken vp->v_vflag &= ~VV_ROOT;
1580 1.54 hannken atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1581 1.54 hannken vfs_insmntque(vp, dead_rootmount);
1582 1.54 hannken
1583 1.55 hannken /* Remove from vnode cache. */
1584 1.55 hannken hash = vcache_hash(&node->vn_key);
1585 1.55 hannken mutex_enter(&vcache.lock);
1586 1.55 hannken KASSERT(node == vcache_hash_lookup(&node->vn_key, hash));
1587 1.55 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1588 1.55 hannken node, vcache_node, vn_hash);
1589 1.55 hannken mutex_exit(&vcache.lock);
1590 1.55 hannken if (temp_key != temp_buf)
1591 1.55 hannken kmem_free(temp_key, temp_key_len);
1592 1.55 hannken
1593 1.54 hannken /* Done with purge, notify sleepers of the grim news. */
1594 1.54 hannken mutex_enter(vp->v_interlock);
1595 1.54 hannken vp->v_op = dead_vnodeop_p;
1596 1.54 hannken vp->v_vflag |= VV_LOCKSWORK;
1597 1.54 hannken VSTATE_CHANGE(vp, VN_RECLAIMING, VN_RECLAIMED);
1598 1.54 hannken vp->v_tag = VT_NON;
1599 1.54 hannken KNOTE(&vp->v_klist, NOTE_REVOKE);
1600 1.54 hannken
1601 1.54 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1602 1.54 hannken }
1603 1.54 hannken
1604 1.54 hannken /*
1605 1.50 hannken * Print a vcache node.
1606 1.50 hannken */
1607 1.50 hannken void
1608 1.50 hannken vcache_print(vnode_t *vp, const char *prefix, void (*pr)(const char *, ...))
1609 1.50 hannken {
1610 1.50 hannken int n;
1611 1.50 hannken const uint8_t *cp;
1612 1.50 hannken struct vcache_node *node;
1613 1.50 hannken
1614 1.50 hannken node = VP_TO_VN(vp);
1615 1.50 hannken n = node->vn_key.vk_key_len;
1616 1.50 hannken cp = node->vn_key.vk_key;
1617 1.50 hannken
1618 1.51 hannken (*pr)("%sstate %s, key(%d)", prefix, vstate_name(node->vn_state), n);
1619 1.50 hannken
1620 1.50 hannken while (n-- > 0)
1621 1.50 hannken (*pr)(" %02x", *cp++);
1622 1.50 hannken (*pr)("\n");
1623 1.36 hannken }
1624 1.36 hannken
1625 1.1 rmind /*
1626 1.1 rmind * Update outstanding I/O count and do wakeup if requested.
1627 1.1 rmind */
1628 1.1 rmind void
1629 1.1 rmind vwakeup(struct buf *bp)
1630 1.1 rmind {
1631 1.1 rmind vnode_t *vp;
1632 1.1 rmind
1633 1.1 rmind if ((vp = bp->b_vp) == NULL)
1634 1.1 rmind return;
1635 1.1 rmind
1636 1.9 rmind KASSERT(bp->b_objlock == vp->v_interlock);
1637 1.1 rmind KASSERT(mutex_owned(bp->b_objlock));
1638 1.1 rmind
1639 1.1 rmind if (--vp->v_numoutput < 0)
1640 1.11 christos vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1641 1.1 rmind if (vp->v_numoutput == 0)
1642 1.1 rmind cv_broadcast(&vp->v_cv);
1643 1.1 rmind }
1644 1.1 rmind
1645 1.1 rmind /*
1646 1.35 hannken * Test a vnode for being or becoming dead. Returns one of:
1647 1.35 hannken * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1648 1.35 hannken * ENOENT: vnode is dead.
1649 1.35 hannken * 0: otherwise.
1650 1.35 hannken *
1651 1.35 hannken * Whenever this function returns a non-zero value all future
1652 1.35 hannken * calls will also return a non-zero value.
1653 1.35 hannken */
1654 1.35 hannken int
1655 1.35 hannken vdead_check(struct vnode *vp, int flags)
1656 1.35 hannken {
1657 1.35 hannken
1658 1.35 hannken KASSERT(mutex_owned(vp->v_interlock));
1659 1.35 hannken
1660 1.52 hannken if (! ISSET(flags, VDEAD_NOWAIT))
1661 1.52 hannken VSTATE_WAIT_STABLE(vp);
1662 1.1 rmind
1663 1.52 hannken if (VSTATE_GET(vp) == VN_RECLAIMING) {
1664 1.52 hannken KASSERT(ISSET(flags, VDEAD_NOWAIT));
1665 1.52 hannken return EBUSY;
1666 1.52 hannken } else if (VSTATE_GET(vp) == VN_RECLAIMED) {
1667 1.52 hannken return ENOENT;
1668 1.52 hannken }
1669 1.1 rmind
1670 1.52 hannken return 0;
1671 1.1 rmind }
1672 1.1 rmind
1673 1.1 rmind int
1674 1.3 rmind vfs_drainvnodes(long target)
1675 1.1 rmind {
1676 1.12 hannken int error;
1677 1.12 hannken
1678 1.12 hannken mutex_enter(&vnode_free_list_lock);
1679 1.1 rmind
1680 1.1 rmind while (numvnodes > target) {
1681 1.12 hannken error = cleanvnode();
1682 1.12 hannken if (error != 0)
1683 1.12 hannken return error;
1684 1.1 rmind mutex_enter(&vnode_free_list_lock);
1685 1.1 rmind }
1686 1.12 hannken
1687 1.12 hannken mutex_exit(&vnode_free_list_lock);
1688 1.12 hannken
1689 1.36 hannken vcache_reinit();
1690 1.36 hannken
1691 1.1 rmind return 0;
1692 1.1 rmind }
1693 1.1 rmind
1694 1.1 rmind void
1695 1.11 christos vnpanic(vnode_t *vp, const char *fmt, ...)
1696 1.1 rmind {
1697 1.11 christos va_list ap;
1698 1.11 christos
1699 1.1 rmind #ifdef DIAGNOSTIC
1700 1.1 rmind vprint(NULL, vp);
1701 1.1 rmind #endif
1702 1.11 christos va_start(ap, fmt);
1703 1.11 christos vpanic(fmt, ap);
1704 1.11 christos va_end(ap);
1705 1.1 rmind }
1706