vfs_vnode.c revision 1.62 1 1.62 hannken /* $NetBSD: vfs_vnode.c,v 1.62 2016/12/14 15:48:55 hannken Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.2 rmind * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rmind * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 rmind * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 1.1 rmind *
11 1.1 rmind * Redistribution and use in source and binary forms, with or without
12 1.1 rmind * modification, are permitted provided that the following conditions
13 1.1 rmind * are met:
14 1.1 rmind * 1. Redistributions of source code must retain the above copyright
15 1.1 rmind * notice, this list of conditions and the following disclaimer.
16 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 rmind * notice, this list of conditions and the following disclaimer in the
18 1.1 rmind * documentation and/or other materials provided with the distribution.
19 1.1 rmind *
20 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
31 1.1 rmind */
32 1.1 rmind
33 1.1 rmind /*
34 1.1 rmind * Copyright (c) 1989, 1993
35 1.1 rmind * The Regents of the University of California. All rights reserved.
36 1.1 rmind * (c) UNIX System Laboratories, Inc.
37 1.1 rmind * All or some portions of this file are derived from material licensed
38 1.1 rmind * to the University of California by American Telephone and Telegraph
39 1.1 rmind * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 1.1 rmind * the permission of UNIX System Laboratories, Inc.
41 1.1 rmind *
42 1.1 rmind * Redistribution and use in source and binary forms, with or without
43 1.1 rmind * modification, are permitted provided that the following conditions
44 1.1 rmind * are met:
45 1.1 rmind * 1. Redistributions of source code must retain the above copyright
46 1.1 rmind * notice, this list of conditions and the following disclaimer.
47 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
48 1.1 rmind * notice, this list of conditions and the following disclaimer in the
49 1.1 rmind * documentation and/or other materials provided with the distribution.
50 1.1 rmind * 3. Neither the name of the University nor the names of its contributors
51 1.1 rmind * may be used to endorse or promote products derived from this software
52 1.1 rmind * without specific prior written permission.
53 1.1 rmind *
54 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 1.1 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.1 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.1 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 1.1 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.1 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.1 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.1 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.1 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.1 rmind * SUCH DAMAGE.
65 1.1 rmind *
66 1.1 rmind * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 1.1 rmind */
68 1.1 rmind
69 1.1 rmind /*
70 1.8 rmind * The vnode cache subsystem.
71 1.1 rmind *
72 1.8 rmind * Life-cycle
73 1.1 rmind *
74 1.8 rmind * Normally, there are two points where new vnodes are created:
75 1.8 rmind * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 1.8 rmind * starts in one of the following ways:
77 1.8 rmind *
78 1.45 hannken * - Allocation, via vcache_get(9) or vcache_new(9).
79 1.8 rmind * - Reclamation of inactive vnode, via vget(9).
80 1.8 rmind *
81 1.16 rmind * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 1.16 rmind * was another, traditional way. Currently, only the draining thread
83 1.16 rmind * recycles the vnodes. This behaviour might be revisited.
84 1.16 rmind *
85 1.8 rmind * The life-cycle ends when the last reference is dropped, usually
86 1.8 rmind * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 1.8 rmind * the file system that vnode is inactive. Via this call, file system
88 1.16 rmind * indicates whether vnode can be recycled (usually, it checks its own
89 1.16 rmind * references, e.g. count of links, whether the file was removed).
90 1.8 rmind *
91 1.8 rmind * Depending on indication, vnode can be put into a free list (cache),
92 1.54 hannken * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 1.54 hannken * disassociate underlying file system from the vnode, and finally
94 1.54 hannken * destroyed.
95 1.8 rmind *
96 1.52 hannken * Vnode state
97 1.52 hannken *
98 1.52 hannken * Vnode is always in one of six states:
99 1.52 hannken * - MARKER This is a marker vnode to help list traversal. It
100 1.52 hannken * will never change its state.
101 1.52 hannken * - LOADING Vnode is associating underlying file system and not
102 1.52 hannken * yet ready to use.
103 1.52 hannken * - ACTIVE Vnode has associated underlying file system and is
104 1.52 hannken * ready to use.
105 1.52 hannken * - BLOCKED Vnode is active but cannot get new references.
106 1.52 hannken * - RECLAIMING Vnode is disassociating from the underlying file
107 1.52 hannken * system.
108 1.52 hannken * - RECLAIMED Vnode has disassociated from underlying file system
109 1.52 hannken * and is dead.
110 1.52 hannken *
111 1.52 hannken * Valid state changes are:
112 1.52 hannken * LOADING -> ACTIVE
113 1.52 hannken * Vnode has been initialised in vcache_get() or
114 1.52 hannken * vcache_new() and is ready to use.
115 1.52 hannken * ACTIVE -> RECLAIMING
116 1.52 hannken * Vnode starts disassociation from underlying file
117 1.54 hannken * system in vcache_reclaim().
118 1.52 hannken * RECLAIMING -> RECLAIMED
119 1.52 hannken * Vnode finished disassociation from underlying file
120 1.54 hannken * system in vcache_reclaim().
121 1.52 hannken * ACTIVE -> BLOCKED
122 1.52 hannken * Either vcache_rekey*() is changing the vnode key or
123 1.52 hannken * vrelel() is about to call VOP_INACTIVE().
124 1.52 hannken * BLOCKED -> ACTIVE
125 1.52 hannken * The block condition is over.
126 1.52 hannken * LOADING -> RECLAIMED
127 1.52 hannken * Either vcache_get() or vcache_new() failed to
128 1.52 hannken * associate the underlying file system or vcache_rekey*()
129 1.52 hannken * drops a vnode used as placeholder.
130 1.52 hannken *
131 1.52 hannken * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 1.52 hannken * and it is possible to wait for state change.
133 1.52 hannken *
134 1.52 hannken * State is protected with v_interlock with one exception:
135 1.52 hannken * to change from LOADING both v_interlock and vcache.lock must be held
136 1.52 hannken * so it is possible to check "state == LOADING" without holding
137 1.52 hannken * v_interlock. See vcache_get() for details.
138 1.52 hannken *
139 1.8 rmind * Reference counting
140 1.8 rmind *
141 1.8 rmind * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 1.8 rmind * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 1.8 rmind * as vput(9), routines. Common points holding references are e.g.
144 1.8 rmind * file openings, current working directory, mount points, etc.
145 1.8 rmind *
146 1.8 rmind * Note on v_usecount and its locking
147 1.8 rmind *
148 1.8 rmind * At nearly all points it is known that v_usecount could be zero,
149 1.8 rmind * the vnode_t::v_interlock will be held. To change v_usecount away
150 1.8 rmind * from zero, the interlock must be held. To change from a non-zero
151 1.8 rmind * value to zero, again the interlock must be held.
152 1.8 rmind *
153 1.24 hannken * Changing the usecount from a non-zero value to a non-zero value can
154 1.24 hannken * safely be done using atomic operations, without the interlock held.
155 1.8 rmind *
156 1.1 rmind */
157 1.1 rmind
158 1.1 rmind #include <sys/cdefs.h>
159 1.62 hannken __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.62 2016/12/14 15:48:55 hannken Exp $");
160 1.1 rmind
161 1.1 rmind #include <sys/param.h>
162 1.1 rmind #include <sys/kernel.h>
163 1.1 rmind
164 1.1 rmind #include <sys/atomic.h>
165 1.1 rmind #include <sys/buf.h>
166 1.1 rmind #include <sys/conf.h>
167 1.1 rmind #include <sys/device.h>
168 1.36 hannken #include <sys/hash.h>
169 1.1 rmind #include <sys/kauth.h>
170 1.1 rmind #include <sys/kmem.h>
171 1.1 rmind #include <sys/kthread.h>
172 1.1 rmind #include <sys/module.h>
173 1.1 rmind #include <sys/mount.h>
174 1.1 rmind #include <sys/namei.h>
175 1.1 rmind #include <sys/syscallargs.h>
176 1.1 rmind #include <sys/sysctl.h>
177 1.1 rmind #include <sys/systm.h>
178 1.58 hannken #include <sys/vnode_impl.h>
179 1.1 rmind #include <sys/wapbl.h>
180 1.24 hannken #include <sys/fstrans.h>
181 1.1 rmind
182 1.1 rmind #include <uvm/uvm.h>
183 1.1 rmind #include <uvm/uvm_readahead.h>
184 1.1 rmind
185 1.23 hannken /* Flags to vrelel. */
186 1.23 hannken #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
187 1.23 hannken
188 1.6 rmind u_int numvnodes __cacheline_aligned;
189 1.1 rmind
190 1.16 rmind /*
191 1.16 rmind * There are two free lists: one is for vnodes which have no buffer/page
192 1.16 rmind * references and one for those which do (i.e. v_holdcnt is non-zero).
193 1.16 rmind * Vnode recycling mechanism first attempts to look into the former list.
194 1.16 rmind */
195 1.6 rmind static kmutex_t vnode_free_list_lock __cacheline_aligned;
196 1.6 rmind static vnodelst_t vnode_free_list __cacheline_aligned;
197 1.6 rmind static vnodelst_t vnode_hold_list __cacheline_aligned;
198 1.16 rmind static kcondvar_t vdrain_cv __cacheline_aligned;
199 1.16 rmind
200 1.6 rmind static vnodelst_t vrele_list __cacheline_aligned;
201 1.6 rmind static kmutex_t vrele_lock __cacheline_aligned;
202 1.6 rmind static kcondvar_t vrele_cv __cacheline_aligned;
203 1.6 rmind static lwp_t * vrele_lwp __cacheline_aligned;
204 1.6 rmind static int vrele_pending __cacheline_aligned;
205 1.6 rmind static int vrele_gen __cacheline_aligned;
206 1.1 rmind
207 1.57 hannken SLIST_HEAD(hashhead, vnode_impl);
208 1.36 hannken static struct {
209 1.36 hannken kmutex_t lock;
210 1.51 hannken kcondvar_t cv;
211 1.61 hannken u_int hashsize;
212 1.36 hannken u_long hashmask;
213 1.38 matt struct hashhead *hashtab;
214 1.36 hannken pool_cache_t pool;
215 1.36 hannken } vcache __cacheline_aligned;
216 1.36 hannken
217 1.12 hannken static int cleanvnode(void);
218 1.57 hannken static vnode_impl_t *vcache_alloc(void);
219 1.57 hannken static void vcache_free(vnode_impl_t *);
220 1.36 hannken static void vcache_init(void);
221 1.36 hannken static void vcache_reinit(void);
222 1.54 hannken static void vcache_reclaim(vnode_t *);
223 1.23 hannken static void vrelel(vnode_t *, int);
224 1.12 hannken static void vdrain_thread(void *);
225 1.1 rmind static void vrele_thread(void *);
226 1.11 christos static void vnpanic(vnode_t *, const char *, ...)
227 1.18 christos __printflike(2, 3);
228 1.1 rmind
229 1.1 rmind /* Routines having to do with the management of the vnode table. */
230 1.44 hannken extern struct mount *dead_rootmount;
231 1.1 rmind extern int (**dead_vnodeop_p)(void *);
232 1.31 hannken extern struct vfsops dead_vfsops;
233 1.1 rmind
234 1.51 hannken /* Vnode state operations and diagnostics. */
235 1.51 hannken
236 1.51 hannken #if defined(DIAGNOSTIC)
237 1.51 hannken
238 1.51 hannken #define VSTATE_GET(vp) \
239 1.51 hannken vstate_assert_get((vp), __func__, __LINE__)
240 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
241 1.51 hannken vstate_assert_change((vp), (from), (to), __func__, __LINE__)
242 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
243 1.51 hannken vstate_assert_wait_stable((vp), __func__, __LINE__)
244 1.51 hannken #define VSTATE_ASSERT(vp, state) \
245 1.51 hannken vstate_assert((vp), (state), __func__, __LINE__)
246 1.51 hannken
247 1.52 hannken static void
248 1.57 hannken vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
249 1.51 hannken {
250 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
251 1.51 hannken
252 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
253 1.51 hannken
254 1.57 hannken if (__predict_true(node->vi_state == state))
255 1.51 hannken return;
256 1.51 hannken vnpanic(vp, "state is %s, expected %s at %s:%d",
257 1.57 hannken vstate_name(node->vi_state), vstate_name(state), func, line);
258 1.51 hannken }
259 1.51 hannken
260 1.57 hannken static enum vnode_state
261 1.51 hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
262 1.51 hannken {
263 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
264 1.51 hannken
265 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
266 1.57 hannken if (node->vi_state == VS_MARKER)
267 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
268 1.57 hannken vstate_name(node->vi_state), func, line);
269 1.51 hannken
270 1.57 hannken return node->vi_state;
271 1.51 hannken }
272 1.51 hannken
273 1.52 hannken static void
274 1.51 hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
275 1.51 hannken {
276 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
277 1.51 hannken
278 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
279 1.57 hannken if (node->vi_state == VS_MARKER)
280 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
281 1.57 hannken vstate_name(node->vi_state), func, line);
282 1.51 hannken
283 1.57 hannken while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
284 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
285 1.51 hannken
286 1.57 hannken if (node->vi_state == VS_MARKER)
287 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
288 1.57 hannken vstate_name(node->vi_state), func, line);
289 1.51 hannken }
290 1.51 hannken
291 1.52 hannken static void
292 1.57 hannken vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
293 1.51 hannken const char *func, int line)
294 1.51 hannken {
295 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
296 1.51 hannken
297 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
298 1.57 hannken if (from == VS_LOADING)
299 1.51 hannken KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
300 1.51 hannken
301 1.57 hannken if (from == VS_MARKER)
302 1.51 hannken vnpanic(vp, "from is %s at %s:%d",
303 1.51 hannken vstate_name(from), func, line);
304 1.57 hannken if (to == VS_MARKER)
305 1.51 hannken vnpanic(vp, "to is %s at %s:%d",
306 1.51 hannken vstate_name(to), func, line);
307 1.57 hannken if (node->vi_state != from)
308 1.51 hannken vnpanic(vp, "from is %s, expected %s at %s:%d\n",
309 1.57 hannken vstate_name(node->vi_state), vstate_name(from), func, line);
310 1.51 hannken
311 1.57 hannken node->vi_state = to;
312 1.57 hannken if (from == VS_LOADING)
313 1.51 hannken cv_broadcast(&vcache.cv);
314 1.57 hannken if (to == VS_ACTIVE || to == VS_RECLAIMED)
315 1.51 hannken cv_broadcast(&vp->v_cv);
316 1.51 hannken }
317 1.51 hannken
318 1.51 hannken #else /* defined(DIAGNOSTIC) */
319 1.51 hannken
320 1.51 hannken #define VSTATE_GET(vp) \
321 1.57 hannken (VNODE_TO_VIMPL((vp))->vi_state)
322 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
323 1.51 hannken vstate_change((vp), (from), (to))
324 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
325 1.51 hannken vstate_wait_stable((vp))
326 1.51 hannken #define VSTATE_ASSERT(vp, state)
327 1.51 hannken
328 1.52 hannken static void
329 1.51 hannken vstate_wait_stable(vnode_t *vp)
330 1.51 hannken {
331 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
332 1.51 hannken
333 1.57 hannken while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
334 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
335 1.51 hannken }
336 1.51 hannken
337 1.52 hannken static void
338 1.57 hannken vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
339 1.51 hannken {
340 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
341 1.51 hannken
342 1.57 hannken node->vi_state = to;
343 1.57 hannken if (from == VS_LOADING)
344 1.51 hannken cv_broadcast(&vcache.cv);
345 1.57 hannken if (to == VS_ACTIVE || to == VS_RECLAIMED)
346 1.51 hannken cv_broadcast(&vp->v_cv);
347 1.51 hannken }
348 1.51 hannken
349 1.51 hannken #endif /* defined(DIAGNOSTIC) */
350 1.51 hannken
351 1.1 rmind void
352 1.1 rmind vfs_vnode_sysinit(void)
353 1.1 rmind {
354 1.22 martin int error __diagused;
355 1.1 rmind
356 1.44 hannken dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
357 1.44 hannken KASSERT(dead_rootmount != NULL);
358 1.44 hannken dead_rootmount->mnt_iflag = IMNT_MPSAFE;
359 1.31 hannken
360 1.1 rmind mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
361 1.1 rmind TAILQ_INIT(&vnode_free_list);
362 1.1 rmind TAILQ_INIT(&vnode_hold_list);
363 1.1 rmind TAILQ_INIT(&vrele_list);
364 1.1 rmind
365 1.36 hannken vcache_init();
366 1.36 hannken
367 1.1 rmind mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
368 1.12 hannken cv_init(&vdrain_cv, "vdrain");
369 1.1 rmind cv_init(&vrele_cv, "vrele");
370 1.12 hannken error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
371 1.12 hannken NULL, NULL, "vdrain");
372 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
373 1.1 rmind error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
374 1.1 rmind NULL, &vrele_lwp, "vrele");
375 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
376 1.1 rmind }
377 1.1 rmind
378 1.1 rmind /*
379 1.48 hannken * Allocate a new marker vnode.
380 1.48 hannken */
381 1.48 hannken vnode_t *
382 1.48 hannken vnalloc_marker(struct mount *mp)
383 1.48 hannken {
384 1.57 hannken vnode_impl_t *node;
385 1.50 hannken vnode_t *vp;
386 1.50 hannken
387 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
388 1.50 hannken memset(node, 0, sizeof(*node));
389 1.57 hannken vp = VIMPL_TO_VNODE(node);
390 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
391 1.50 hannken vp->v_mount = mp;
392 1.50 hannken vp->v_type = VBAD;
393 1.57 hannken node->vi_state = VS_MARKER;
394 1.48 hannken
395 1.50 hannken return vp;
396 1.48 hannken }
397 1.48 hannken
398 1.48 hannken /*
399 1.48 hannken * Free a marker vnode.
400 1.48 hannken */
401 1.48 hannken void
402 1.48 hannken vnfree_marker(vnode_t *vp)
403 1.48 hannken {
404 1.57 hannken vnode_impl_t *node;
405 1.48 hannken
406 1.57 hannken node = VNODE_TO_VIMPL(vp);
407 1.57 hannken KASSERT(node->vi_state == VS_MARKER);
408 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
409 1.50 hannken pool_cache_put(vcache.pool, node);
410 1.48 hannken }
411 1.48 hannken
412 1.48 hannken /*
413 1.48 hannken * Test a vnode for being a marker vnode.
414 1.48 hannken */
415 1.48 hannken bool
416 1.48 hannken vnis_marker(vnode_t *vp)
417 1.48 hannken {
418 1.48 hannken
419 1.57 hannken return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
420 1.48 hannken }
421 1.48 hannken
422 1.48 hannken /*
423 1.12 hannken * cleanvnode: grab a vnode from freelist, clean and free it.
424 1.5 rmind *
425 1.5 rmind * => Releases vnode_free_list_lock.
426 1.1 rmind */
427 1.12 hannken static int
428 1.12 hannken cleanvnode(void)
429 1.1 rmind {
430 1.1 rmind vnode_t *vp;
431 1.62 hannken vnode_impl_t *vi;
432 1.1 rmind vnodelst_t *listhd;
433 1.24 hannken struct mount *mp;
434 1.1 rmind
435 1.1 rmind KASSERT(mutex_owned(&vnode_free_list_lock));
436 1.24 hannken
437 1.1 rmind listhd = &vnode_free_list;
438 1.1 rmind try_nextlist:
439 1.62 hannken TAILQ_FOREACH(vi, listhd, vi_lrulist) {
440 1.62 hannken vp = VIMPL_TO_VNODE(vi);
441 1.1 rmind /*
442 1.1 rmind * It's safe to test v_usecount and v_iflag
443 1.1 rmind * without holding the interlock here, since
444 1.1 rmind * these vnodes should never appear on the
445 1.1 rmind * lists.
446 1.1 rmind */
447 1.5 rmind KASSERT(vp->v_usecount == 0);
448 1.62 hannken KASSERT(vi->vi_lrulisthd == listhd);
449 1.5 rmind
450 1.60 hannken if (!mutex_tryenter(vp->v_interlock))
451 1.1 rmind continue;
452 1.24 hannken mp = vp->v_mount;
453 1.24 hannken if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
454 1.24 hannken mutex_exit(vp->v_interlock);
455 1.24 hannken continue;
456 1.24 hannken }
457 1.24 hannken break;
458 1.1 rmind }
459 1.1 rmind
460 1.62 hannken if (vi == NULL) {
461 1.1 rmind if (listhd == &vnode_free_list) {
462 1.1 rmind listhd = &vnode_hold_list;
463 1.1 rmind goto try_nextlist;
464 1.1 rmind }
465 1.1 rmind mutex_exit(&vnode_free_list_lock);
466 1.12 hannken return EBUSY;
467 1.1 rmind }
468 1.1 rmind
469 1.1 rmind mutex_exit(&vnode_free_list_lock);
470 1.1 rmind
471 1.60 hannken if (vget(vp, 0, true /* wait */) == 0) {
472 1.60 hannken if (!vrecycle(vp))
473 1.60 hannken vrele(vp);
474 1.60 hannken }
475 1.24 hannken fstrans_done(mp);
476 1.12 hannken
477 1.12 hannken return 0;
478 1.1 rmind }
479 1.1 rmind
480 1.1 rmind /*
481 1.12 hannken * Helper thread to keep the number of vnodes below desiredvnodes.
482 1.12 hannken */
483 1.12 hannken static void
484 1.12 hannken vdrain_thread(void *cookie)
485 1.12 hannken {
486 1.12 hannken int error;
487 1.12 hannken
488 1.12 hannken mutex_enter(&vnode_free_list_lock);
489 1.12 hannken
490 1.12 hannken for (;;) {
491 1.12 hannken cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
492 1.12 hannken while (numvnodes > desiredvnodes) {
493 1.12 hannken error = cleanvnode();
494 1.12 hannken if (error)
495 1.12 hannken kpause("vndsbusy", false, hz, NULL);
496 1.12 hannken mutex_enter(&vnode_free_list_lock);
497 1.12 hannken if (error)
498 1.12 hannken break;
499 1.12 hannken }
500 1.12 hannken }
501 1.12 hannken }
502 1.12 hannken
503 1.12 hannken /*
504 1.1 rmind * Remove a vnode from its freelist.
505 1.1 rmind */
506 1.1 rmind void
507 1.1 rmind vremfree(vnode_t *vp)
508 1.1 rmind {
509 1.62 hannken vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
510 1.1 rmind
511 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
512 1.1 rmind KASSERT(vp->v_usecount == 0);
513 1.1 rmind
514 1.1 rmind /*
515 1.1 rmind * Note that the reference count must not change until
516 1.1 rmind * the vnode is removed.
517 1.1 rmind */
518 1.1 rmind mutex_enter(&vnode_free_list_lock);
519 1.1 rmind if (vp->v_holdcnt > 0) {
520 1.62 hannken KASSERT(vi->vi_lrulisthd == &vnode_hold_list);
521 1.1 rmind } else {
522 1.62 hannken KASSERT(vi->vi_lrulisthd == &vnode_free_list);
523 1.1 rmind }
524 1.62 hannken TAILQ_REMOVE(vi->vi_lrulisthd, vi, vi_lrulist);
525 1.62 hannken vi->vi_lrulisthd = NULL;
526 1.1 rmind mutex_exit(&vnode_free_list_lock);
527 1.1 rmind }
528 1.1 rmind
529 1.1 rmind /*
530 1.4 rmind * vget: get a particular vnode from the free list, increment its reference
531 1.52 hannken * count and return it.
532 1.4 rmind *
533 1.52 hannken * => Must be called with v_interlock held.
534 1.4 rmind *
535 1.57 hannken * If state is VS_RECLAIMING, the vnode may be eliminated in vcache_reclaim().
536 1.4 rmind * In that case, we cannot grab the vnode, so the process is awakened when
537 1.4 rmind * the transition is completed, and an error returned to indicate that the
538 1.29 christos * vnode is no longer usable.
539 1.52 hannken *
540 1.57 hannken * If state is VS_LOADING or VS_BLOCKED, wait until the vnode enters a
541 1.57 hannken * stable state (VS_ACTIVE or VS_RECLAIMED).
542 1.1 rmind */
543 1.1 rmind int
544 1.41 riastrad vget(vnode_t *vp, int flags, bool waitok)
545 1.1 rmind {
546 1.1 rmind
547 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
548 1.41 riastrad KASSERT((flags & ~LK_NOWAIT) == 0);
549 1.41 riastrad KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
550 1.1 rmind
551 1.1 rmind /*
552 1.1 rmind * Before adding a reference, we must remove the vnode
553 1.1 rmind * from its freelist.
554 1.1 rmind */
555 1.1 rmind if (vp->v_usecount == 0) {
556 1.1 rmind vremfree(vp);
557 1.1 rmind vp->v_usecount = 1;
558 1.1 rmind } else {
559 1.1 rmind atomic_inc_uint(&vp->v_usecount);
560 1.1 rmind }
561 1.1 rmind
562 1.1 rmind /*
563 1.29 christos * If the vnode is in the process of changing state we wait
564 1.29 christos * for the change to complete and take care not to return
565 1.29 christos * a clean vnode.
566 1.1 rmind */
567 1.52 hannken if (! ISSET(flags, LK_NOWAIT))
568 1.52 hannken VSTATE_WAIT_STABLE(vp);
569 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMED) {
570 1.52 hannken vrelel(vp, 0);
571 1.52 hannken return ENOENT;
572 1.57 hannken } else if (VSTATE_GET(vp) != VS_ACTIVE) {
573 1.52 hannken KASSERT(ISSET(flags, LK_NOWAIT));
574 1.52 hannken vrelel(vp, 0);
575 1.52 hannken return EBUSY;
576 1.17 hannken }
577 1.17 hannken
578 1.1 rmind /*
579 1.41 riastrad * Ok, we got it in good shape.
580 1.1 rmind */
581 1.57 hannken VSTATE_ASSERT(vp, VS_ACTIVE);
582 1.9 rmind mutex_exit(vp->v_interlock);
583 1.52 hannken
584 1.52 hannken return 0;
585 1.1 rmind }
586 1.1 rmind
587 1.1 rmind /*
588 1.4 rmind * vput: unlock and release the reference.
589 1.1 rmind */
590 1.1 rmind void
591 1.1 rmind vput(vnode_t *vp)
592 1.1 rmind {
593 1.1 rmind
594 1.1 rmind VOP_UNLOCK(vp);
595 1.1 rmind vrele(vp);
596 1.1 rmind }
597 1.1 rmind
598 1.1 rmind /*
599 1.1 rmind * Try to drop reference on a vnode. Abort if we are releasing the
600 1.1 rmind * last reference. Note: this _must_ succeed if not the last reference.
601 1.1 rmind */
602 1.1 rmind static inline bool
603 1.1 rmind vtryrele(vnode_t *vp)
604 1.1 rmind {
605 1.1 rmind u_int use, next;
606 1.1 rmind
607 1.1 rmind for (use = vp->v_usecount;; use = next) {
608 1.1 rmind if (use == 1) {
609 1.1 rmind return false;
610 1.1 rmind }
611 1.24 hannken KASSERT(use > 1);
612 1.1 rmind next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
613 1.1 rmind if (__predict_true(next == use)) {
614 1.1 rmind return true;
615 1.1 rmind }
616 1.1 rmind }
617 1.1 rmind }
618 1.1 rmind
619 1.1 rmind /*
620 1.1 rmind * Vnode release. If reference count drops to zero, call inactive
621 1.1 rmind * routine and either return to freelist or free to the pool.
622 1.1 rmind */
623 1.23 hannken static void
624 1.1 rmind vrelel(vnode_t *vp, int flags)
625 1.1 rmind {
626 1.62 hannken vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
627 1.1 rmind bool recycle, defer;
628 1.1 rmind int error;
629 1.1 rmind
630 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
631 1.62 hannken KASSERT(vi->vi_lrulisthd == NULL);
632 1.1 rmind
633 1.1 rmind if (__predict_false(vp->v_op == dead_vnodeop_p &&
634 1.57 hannken VSTATE_GET(vp) != VS_RECLAIMED)) {
635 1.11 christos vnpanic(vp, "dead but not clean");
636 1.1 rmind }
637 1.1 rmind
638 1.1 rmind /*
639 1.1 rmind * If not the last reference, just drop the reference count
640 1.1 rmind * and unlock.
641 1.1 rmind */
642 1.1 rmind if (vtryrele(vp)) {
643 1.9 rmind mutex_exit(vp->v_interlock);
644 1.1 rmind return;
645 1.1 rmind }
646 1.1 rmind if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
647 1.11 christos vnpanic(vp, "%s: bad ref count", __func__);
648 1.1 rmind }
649 1.1 rmind
650 1.15 hannken #ifdef DIAGNOSTIC
651 1.15 hannken if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
652 1.15 hannken vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
653 1.15 hannken vprint("vrelel: missing VOP_CLOSE()", vp);
654 1.15 hannken }
655 1.15 hannken #endif
656 1.15 hannken
657 1.1 rmind /*
658 1.1 rmind * If not clean, deactivate the vnode, but preserve
659 1.1 rmind * our reference across the call to VOP_INACTIVE().
660 1.1 rmind */
661 1.57 hannken if (VSTATE_GET(vp) != VS_RECLAIMED) {
662 1.1 rmind recycle = false;
663 1.1 rmind
664 1.1 rmind /*
665 1.1 rmind * XXX This ugly block can be largely eliminated if
666 1.1 rmind * locking is pushed down into the file systems.
667 1.1 rmind *
668 1.1 rmind * Defer vnode release to vrele_thread if caller
669 1.30 hannken * requests it explicitly or is the pagedaemon.
670 1.1 rmind */
671 1.1 rmind if ((curlwp == uvm.pagedaemon_lwp) ||
672 1.1 rmind (flags & VRELEL_ASYNC_RELE) != 0) {
673 1.1 rmind defer = true;
674 1.1 rmind } else if (curlwp == vrele_lwp) {
675 1.17 hannken /*
676 1.29 christos * We have to try harder.
677 1.17 hannken */
678 1.9 rmind mutex_exit(vp->v_interlock);
679 1.32 hannken error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
680 1.47 riastrad KASSERTMSG((error == 0), "vn_lock failed: %d", error);
681 1.17 hannken mutex_enter(vp->v_interlock);
682 1.1 rmind defer = false;
683 1.4 rmind } else {
684 1.1 rmind /* If we can't acquire the lock, then defer. */
685 1.32 hannken mutex_exit(vp->v_interlock);
686 1.32 hannken error = vn_lock(vp,
687 1.32 hannken LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
688 1.30 hannken defer = (error != 0);
689 1.32 hannken mutex_enter(vp->v_interlock);
690 1.1 rmind }
691 1.1 rmind
692 1.30 hannken KASSERT(mutex_owned(vp->v_interlock));
693 1.30 hannken KASSERT(! (curlwp == vrele_lwp && defer));
694 1.30 hannken
695 1.1 rmind if (defer) {
696 1.1 rmind /*
697 1.1 rmind * Defer reclaim to the kthread; it's not safe to
698 1.1 rmind * clean it here. We donate it our last reference.
699 1.1 rmind */
700 1.1 rmind mutex_enter(&vrele_lock);
701 1.62 hannken TAILQ_INSERT_TAIL(&vrele_list, vi, vi_lrulist);
702 1.1 rmind if (++vrele_pending > (desiredvnodes >> 8))
703 1.53 msaitoh cv_signal(&vrele_cv);
704 1.1 rmind mutex_exit(&vrele_lock);
705 1.9 rmind mutex_exit(vp->v_interlock);
706 1.1 rmind return;
707 1.1 rmind }
708 1.1 rmind
709 1.32 hannken /*
710 1.32 hannken * If the node got another reference while we
711 1.32 hannken * released the interlock, don't try to inactivate it yet.
712 1.32 hannken */
713 1.32 hannken if (__predict_false(vtryrele(vp))) {
714 1.32 hannken VOP_UNLOCK(vp);
715 1.32 hannken mutex_exit(vp->v_interlock);
716 1.32 hannken return;
717 1.32 hannken }
718 1.57 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
719 1.29 christos mutex_exit(vp->v_interlock);
720 1.29 christos
721 1.1 rmind /*
722 1.52 hannken * The vnode must not gain another reference while being
723 1.1 rmind * deactivated. If VOP_INACTIVE() indicates that
724 1.1 rmind * the described file has been deleted, then recycle
725 1.52 hannken * the vnode.
726 1.1 rmind *
727 1.1 rmind * Note that VOP_INACTIVE() will drop the vnode lock.
728 1.1 rmind */
729 1.1 rmind VOP_INACTIVE(vp, &recycle);
730 1.46 hannken if (recycle) {
731 1.54 hannken /* vcache_reclaim() below will drop the lock. */
732 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
733 1.46 hannken recycle = false;
734 1.46 hannken }
735 1.9 rmind mutex_enter(vp->v_interlock);
736 1.57 hannken VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
737 1.1 rmind if (!recycle) {
738 1.1 rmind if (vtryrele(vp)) {
739 1.9 rmind mutex_exit(vp->v_interlock);
740 1.1 rmind return;
741 1.1 rmind }
742 1.1 rmind }
743 1.1 rmind
744 1.1 rmind /* Take care of space accounting. */
745 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
746 1.1 rmind atomic_add_int(&uvmexp.execpages,
747 1.1 rmind -vp->v_uobj.uo_npages);
748 1.1 rmind atomic_add_int(&uvmexp.filepages,
749 1.1 rmind vp->v_uobj.uo_npages);
750 1.1 rmind }
751 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
752 1.1 rmind vp->v_vflag &= ~VV_MAPPED;
753 1.1 rmind
754 1.1 rmind /*
755 1.1 rmind * Recycle the vnode if the file is now unused (unlinked),
756 1.1 rmind * otherwise just free it.
757 1.1 rmind */
758 1.1 rmind if (recycle) {
759 1.57 hannken VSTATE_ASSERT(vp, VS_ACTIVE);
760 1.54 hannken vcache_reclaim(vp);
761 1.1 rmind }
762 1.1 rmind KASSERT(vp->v_usecount > 0);
763 1.1 rmind }
764 1.1 rmind
765 1.1 rmind if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
766 1.1 rmind /* Gained another reference while being reclaimed. */
767 1.9 rmind mutex_exit(vp->v_interlock);
768 1.1 rmind return;
769 1.1 rmind }
770 1.1 rmind
771 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMED) {
772 1.1 rmind /*
773 1.1 rmind * It's clean so destroy it. It isn't referenced
774 1.1 rmind * anywhere since it has been reclaimed.
775 1.1 rmind */
776 1.1 rmind KASSERT(vp->v_holdcnt == 0);
777 1.1 rmind KASSERT(vp->v_writecount == 0);
778 1.9 rmind mutex_exit(vp->v_interlock);
779 1.1 rmind vfs_insmntque(vp, NULL);
780 1.1 rmind if (vp->v_type == VBLK || vp->v_type == VCHR) {
781 1.1 rmind spec_node_destroy(vp);
782 1.1 rmind }
783 1.57 hannken vcache_free(VNODE_TO_VIMPL(vp));
784 1.1 rmind } else {
785 1.1 rmind /*
786 1.1 rmind * Otherwise, put it back onto the freelist. It
787 1.1 rmind * can't be destroyed while still associated with
788 1.1 rmind * a file system.
789 1.1 rmind */
790 1.1 rmind mutex_enter(&vnode_free_list_lock);
791 1.1 rmind if (vp->v_holdcnt > 0) {
792 1.62 hannken vi->vi_lrulisthd = &vnode_hold_list;
793 1.1 rmind } else {
794 1.62 hannken vi->vi_lrulisthd = &vnode_free_list;
795 1.1 rmind }
796 1.62 hannken TAILQ_INSERT_TAIL(vi->vi_lrulisthd, vi, vi_lrulist);
797 1.1 rmind mutex_exit(&vnode_free_list_lock);
798 1.9 rmind mutex_exit(vp->v_interlock);
799 1.1 rmind }
800 1.1 rmind }
801 1.1 rmind
802 1.1 rmind void
803 1.1 rmind vrele(vnode_t *vp)
804 1.1 rmind {
805 1.1 rmind
806 1.29 christos if (vtryrele(vp)) {
807 1.1 rmind return;
808 1.1 rmind }
809 1.9 rmind mutex_enter(vp->v_interlock);
810 1.1 rmind vrelel(vp, 0);
811 1.1 rmind }
812 1.1 rmind
813 1.1 rmind /*
814 1.1 rmind * Asynchronous vnode release, vnode is released in different context.
815 1.1 rmind */
816 1.1 rmind void
817 1.1 rmind vrele_async(vnode_t *vp)
818 1.1 rmind {
819 1.1 rmind
820 1.29 christos if (vtryrele(vp)) {
821 1.1 rmind return;
822 1.1 rmind }
823 1.9 rmind mutex_enter(vp->v_interlock);
824 1.1 rmind vrelel(vp, VRELEL_ASYNC_RELE);
825 1.1 rmind }
826 1.1 rmind
827 1.1 rmind static void
828 1.1 rmind vrele_thread(void *cookie)
829 1.1 rmind {
830 1.34 hannken vnodelst_t skip_list;
831 1.1 rmind vnode_t *vp;
832 1.62 hannken vnode_impl_t *vi;
833 1.34 hannken struct mount *mp;
834 1.34 hannken
835 1.34 hannken TAILQ_INIT(&skip_list);
836 1.1 rmind
837 1.34 hannken mutex_enter(&vrele_lock);
838 1.1 rmind for (;;) {
839 1.1 rmind while (TAILQ_EMPTY(&vrele_list)) {
840 1.1 rmind vrele_gen++;
841 1.1 rmind cv_broadcast(&vrele_cv);
842 1.1 rmind cv_timedwait(&vrele_cv, &vrele_lock, hz);
843 1.62 hannken TAILQ_CONCAT(&vrele_list, &skip_list, vi_lrulist);
844 1.1 rmind }
845 1.62 hannken vi = TAILQ_FIRST(&vrele_list);
846 1.62 hannken vp = VIMPL_TO_VNODE(vi);
847 1.34 hannken mp = vp->v_mount;
848 1.62 hannken TAILQ_REMOVE(&vrele_list, vi, vi_lrulist);
849 1.34 hannken if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
850 1.62 hannken TAILQ_INSERT_TAIL(&skip_list, vi, vi_lrulist);
851 1.34 hannken continue;
852 1.34 hannken }
853 1.1 rmind vrele_pending--;
854 1.1 rmind mutex_exit(&vrele_lock);
855 1.1 rmind
856 1.1 rmind /*
857 1.1 rmind * If not the last reference, then ignore the vnode
858 1.1 rmind * and look for more work.
859 1.1 rmind */
860 1.9 rmind mutex_enter(vp->v_interlock);
861 1.1 rmind vrelel(vp, 0);
862 1.34 hannken fstrans_done(mp);
863 1.34 hannken mutex_enter(&vrele_lock);
864 1.1 rmind }
865 1.1 rmind }
866 1.1 rmind
867 1.1 rmind /*
868 1.1 rmind * Vnode reference, where a reference is already held by some other
869 1.1 rmind * object (for example, a file structure).
870 1.1 rmind */
871 1.1 rmind void
872 1.1 rmind vref(vnode_t *vp)
873 1.1 rmind {
874 1.1 rmind
875 1.1 rmind KASSERT(vp->v_usecount != 0);
876 1.1 rmind
877 1.1 rmind atomic_inc_uint(&vp->v_usecount);
878 1.1 rmind }
879 1.1 rmind
880 1.1 rmind /*
881 1.1 rmind * Page or buffer structure gets a reference.
882 1.1 rmind * Called with v_interlock held.
883 1.1 rmind */
884 1.1 rmind void
885 1.1 rmind vholdl(vnode_t *vp)
886 1.1 rmind {
887 1.62 hannken vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
888 1.1 rmind
889 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
890 1.1 rmind
891 1.1 rmind if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
892 1.1 rmind mutex_enter(&vnode_free_list_lock);
893 1.62 hannken KASSERT(vi->vi_lrulisthd == &vnode_free_list);
894 1.62 hannken TAILQ_REMOVE(vi->vi_lrulisthd, vi, vi_lrulist);
895 1.62 hannken vi->vi_lrulisthd = &vnode_hold_list;
896 1.62 hannken TAILQ_INSERT_TAIL(vi->vi_lrulisthd, vi, vi_lrulist);
897 1.1 rmind mutex_exit(&vnode_free_list_lock);
898 1.1 rmind }
899 1.1 rmind }
900 1.1 rmind
901 1.1 rmind /*
902 1.1 rmind * Page or buffer structure frees a reference.
903 1.1 rmind * Called with v_interlock held.
904 1.1 rmind */
905 1.1 rmind void
906 1.1 rmind holdrelel(vnode_t *vp)
907 1.1 rmind {
908 1.62 hannken vnode_impl_t *vi = VNODE_TO_VIMPL(vp);
909 1.1 rmind
910 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
911 1.1 rmind
912 1.1 rmind if (vp->v_holdcnt <= 0) {
913 1.11 christos vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
914 1.1 rmind }
915 1.1 rmind
916 1.1 rmind vp->v_holdcnt--;
917 1.1 rmind if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
918 1.1 rmind mutex_enter(&vnode_free_list_lock);
919 1.62 hannken KASSERT(vi->vi_lrulisthd == &vnode_hold_list);
920 1.62 hannken TAILQ_REMOVE(vi->vi_lrulisthd, vi, vi_lrulist);
921 1.62 hannken vi->vi_lrulisthd = &vnode_free_list;
922 1.62 hannken TAILQ_INSERT_TAIL(vi->vi_lrulisthd, vi, vi_lrulist);
923 1.1 rmind mutex_exit(&vnode_free_list_lock);
924 1.1 rmind }
925 1.1 rmind }
926 1.1 rmind
927 1.1 rmind /*
928 1.33 hannken * Recycle an unused vnode if caller holds the last reference.
929 1.1 rmind */
930 1.33 hannken bool
931 1.33 hannken vrecycle(vnode_t *vp)
932 1.1 rmind {
933 1.60 hannken int error __diagused;
934 1.46 hannken
935 1.33 hannken mutex_enter(vp->v_interlock);
936 1.33 hannken
937 1.60 hannken /* Make sure we hold the last reference. */
938 1.60 hannken VSTATE_WAIT_STABLE(vp);
939 1.33 hannken if (vp->v_usecount != 1) {
940 1.33 hannken mutex_exit(vp->v_interlock);
941 1.33 hannken return false;
942 1.1 rmind }
943 1.60 hannken
944 1.60 hannken /* If the vnode is already clean we're done. */
945 1.60 hannken if (VSTATE_GET(vp) != VS_ACTIVE) {
946 1.60 hannken VSTATE_ASSERT(vp, VS_RECLAIMED);
947 1.60 hannken vrelel(vp, 0);
948 1.60 hannken return true;
949 1.60 hannken }
950 1.60 hannken
951 1.60 hannken /* Prevent further references until the vnode is locked. */
952 1.60 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
953 1.60 hannken mutex_exit(vp->v_interlock);
954 1.60 hannken
955 1.60 hannken error = vn_lock(vp, LK_EXCLUSIVE);
956 1.60 hannken KASSERT(error == 0);
957 1.60 hannken
958 1.60 hannken mutex_enter(vp->v_interlock);
959 1.60 hannken VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
960 1.60 hannken
961 1.54 hannken vcache_reclaim(vp);
962 1.52 hannken vrelel(vp, 0);
963 1.60 hannken
964 1.33 hannken return true;
965 1.1 rmind }
966 1.1 rmind
967 1.1 rmind /*
968 1.1 rmind * Eliminate all activity associated with the requested vnode
969 1.1 rmind * and with all vnodes aliased to the requested vnode.
970 1.1 rmind */
971 1.1 rmind void
972 1.1 rmind vrevoke(vnode_t *vp)
973 1.1 rmind {
974 1.19 hannken vnode_t *vq;
975 1.1 rmind enum vtype type;
976 1.1 rmind dev_t dev;
977 1.1 rmind
978 1.1 rmind KASSERT(vp->v_usecount > 0);
979 1.1 rmind
980 1.9 rmind mutex_enter(vp->v_interlock);
981 1.52 hannken VSTATE_WAIT_STABLE(vp);
982 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMED) {
983 1.9 rmind mutex_exit(vp->v_interlock);
984 1.1 rmind return;
985 1.1 rmind } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
986 1.1 rmind atomic_inc_uint(&vp->v_usecount);
987 1.29 christos mutex_exit(vp->v_interlock);
988 1.29 christos vgone(vp);
989 1.1 rmind return;
990 1.1 rmind } else {
991 1.1 rmind dev = vp->v_rdev;
992 1.1 rmind type = vp->v_type;
993 1.9 rmind mutex_exit(vp->v_interlock);
994 1.1 rmind }
995 1.1 rmind
996 1.19 hannken while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
997 1.29 christos vgone(vq);
998 1.1 rmind }
999 1.1 rmind }
1000 1.1 rmind
1001 1.1 rmind /*
1002 1.1 rmind * Eliminate all activity associated with a vnode in preparation for
1003 1.1 rmind * reuse. Drops a reference from the vnode.
1004 1.1 rmind */
1005 1.1 rmind void
1006 1.1 rmind vgone(vnode_t *vp)
1007 1.1 rmind {
1008 1.1 rmind
1009 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
1010 1.57 hannken VSTATE_ASSERT(vp, VS_RECLAIMED);
1011 1.46 hannken vrele(vp);
1012 1.46 hannken }
1013 1.46 hannken
1014 1.9 rmind mutex_enter(vp->v_interlock);
1015 1.54 hannken vcache_reclaim(vp);
1016 1.52 hannken vrelel(vp, 0);
1017 1.1 rmind }
1018 1.1 rmind
1019 1.36 hannken static inline uint32_t
1020 1.36 hannken vcache_hash(const struct vcache_key *key)
1021 1.36 hannken {
1022 1.36 hannken uint32_t hash = HASH32_BUF_INIT;
1023 1.36 hannken
1024 1.36 hannken hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1025 1.36 hannken hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1026 1.36 hannken return hash;
1027 1.36 hannken }
1028 1.36 hannken
1029 1.36 hannken static void
1030 1.36 hannken vcache_init(void)
1031 1.36 hannken {
1032 1.36 hannken
1033 1.57 hannken vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1034 1.36 hannken "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1035 1.36 hannken KASSERT(vcache.pool != NULL);
1036 1.36 hannken mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1037 1.51 hannken cv_init(&vcache.cv, "vcache");
1038 1.61 hannken vcache.hashsize = desiredvnodes;
1039 1.36 hannken vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1040 1.36 hannken &vcache.hashmask);
1041 1.36 hannken }
1042 1.36 hannken
1043 1.36 hannken static void
1044 1.36 hannken vcache_reinit(void)
1045 1.36 hannken {
1046 1.36 hannken int i;
1047 1.36 hannken uint32_t hash;
1048 1.36 hannken u_long oldmask, newmask;
1049 1.36 hannken struct hashhead *oldtab, *newtab;
1050 1.57 hannken vnode_impl_t *node;
1051 1.36 hannken
1052 1.36 hannken newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1053 1.36 hannken mutex_enter(&vcache.lock);
1054 1.36 hannken oldtab = vcache.hashtab;
1055 1.36 hannken oldmask = vcache.hashmask;
1056 1.61 hannken vcache.hashsize = desiredvnodes;
1057 1.36 hannken vcache.hashtab = newtab;
1058 1.36 hannken vcache.hashmask = newmask;
1059 1.36 hannken for (i = 0; i <= oldmask; i++) {
1060 1.36 hannken while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1061 1.57 hannken SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
1062 1.57 hannken hash = vcache_hash(&node->vi_key);
1063 1.36 hannken SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1064 1.57 hannken node, vi_hash);
1065 1.36 hannken }
1066 1.36 hannken }
1067 1.36 hannken mutex_exit(&vcache.lock);
1068 1.36 hannken hashdone(oldtab, HASH_SLIST, oldmask);
1069 1.36 hannken }
1070 1.36 hannken
1071 1.57 hannken static inline vnode_impl_t *
1072 1.36 hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1073 1.36 hannken {
1074 1.36 hannken struct hashhead *hashp;
1075 1.57 hannken vnode_impl_t *node;
1076 1.36 hannken
1077 1.36 hannken KASSERT(mutex_owned(&vcache.lock));
1078 1.36 hannken
1079 1.36 hannken hashp = &vcache.hashtab[hash & vcache.hashmask];
1080 1.57 hannken SLIST_FOREACH(node, hashp, vi_hash) {
1081 1.57 hannken if (key->vk_mount != node->vi_key.vk_mount)
1082 1.36 hannken continue;
1083 1.57 hannken if (key->vk_key_len != node->vi_key.vk_key_len)
1084 1.36 hannken continue;
1085 1.57 hannken if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
1086 1.36 hannken continue;
1087 1.36 hannken return node;
1088 1.36 hannken }
1089 1.36 hannken return NULL;
1090 1.36 hannken }
1091 1.36 hannken
1092 1.36 hannken /*
1093 1.50 hannken * Allocate a new, uninitialized vcache node.
1094 1.50 hannken */
1095 1.57 hannken static vnode_impl_t *
1096 1.50 hannken vcache_alloc(void)
1097 1.50 hannken {
1098 1.57 hannken vnode_impl_t *node;
1099 1.50 hannken vnode_t *vp;
1100 1.50 hannken
1101 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
1102 1.50 hannken memset(node, 0, sizeof(*node));
1103 1.50 hannken
1104 1.57 hannken /* SLIST_INIT(&node->vi_hash); */
1105 1.50 hannken
1106 1.57 hannken vp = VIMPL_TO_VNODE(node);
1107 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1108 1.50 hannken cv_init(&vp->v_cv, "vnode");
1109 1.50 hannken /* LIST_INIT(&vp->v_nclist); */
1110 1.50 hannken /* LIST_INIT(&vp->v_dnclist); */
1111 1.50 hannken
1112 1.50 hannken mutex_enter(&vnode_free_list_lock);
1113 1.50 hannken numvnodes++;
1114 1.50 hannken if (numvnodes > desiredvnodes + desiredvnodes / 10)
1115 1.50 hannken cv_signal(&vdrain_cv);
1116 1.50 hannken mutex_exit(&vnode_free_list_lock);
1117 1.50 hannken
1118 1.50 hannken rw_init(&vp->v_lock);
1119 1.50 hannken vp->v_usecount = 1;
1120 1.50 hannken vp->v_type = VNON;
1121 1.50 hannken vp->v_size = vp->v_writesize = VSIZENOTSET;
1122 1.50 hannken
1123 1.57 hannken node->vi_state = VS_LOADING;
1124 1.51 hannken
1125 1.50 hannken return node;
1126 1.50 hannken }
1127 1.50 hannken
1128 1.50 hannken /*
1129 1.50 hannken * Free an unused, unreferenced vcache node.
1130 1.50 hannken */
1131 1.50 hannken static void
1132 1.57 hannken vcache_free(vnode_impl_t *node)
1133 1.50 hannken {
1134 1.50 hannken vnode_t *vp;
1135 1.50 hannken
1136 1.57 hannken vp = VIMPL_TO_VNODE(node);
1137 1.50 hannken
1138 1.50 hannken KASSERT(vp->v_usecount == 0);
1139 1.50 hannken
1140 1.50 hannken rw_destroy(&vp->v_lock);
1141 1.50 hannken mutex_enter(&vnode_free_list_lock);
1142 1.50 hannken numvnodes--;
1143 1.50 hannken mutex_exit(&vnode_free_list_lock);
1144 1.50 hannken
1145 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
1146 1.50 hannken cv_destroy(&vp->v_cv);
1147 1.50 hannken pool_cache_put(vcache.pool, node);
1148 1.50 hannken }
1149 1.50 hannken
1150 1.50 hannken /*
1151 1.36 hannken * Get a vnode / fs node pair by key and return it referenced through vpp.
1152 1.36 hannken */
1153 1.36 hannken int
1154 1.36 hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
1155 1.36 hannken struct vnode **vpp)
1156 1.36 hannken {
1157 1.36 hannken int error;
1158 1.36 hannken uint32_t hash;
1159 1.36 hannken const void *new_key;
1160 1.36 hannken struct vnode *vp;
1161 1.36 hannken struct vcache_key vcache_key;
1162 1.57 hannken vnode_impl_t *node, *new_node;
1163 1.36 hannken
1164 1.36 hannken new_key = NULL;
1165 1.36 hannken *vpp = NULL;
1166 1.36 hannken
1167 1.36 hannken vcache_key.vk_mount = mp;
1168 1.36 hannken vcache_key.vk_key = key;
1169 1.36 hannken vcache_key.vk_key_len = key_len;
1170 1.36 hannken hash = vcache_hash(&vcache_key);
1171 1.36 hannken
1172 1.36 hannken again:
1173 1.36 hannken mutex_enter(&vcache.lock);
1174 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1175 1.36 hannken
1176 1.36 hannken /* If found, take a reference or retry. */
1177 1.52 hannken if (__predict_true(node != NULL)) {
1178 1.52 hannken /*
1179 1.52 hannken * If the vnode is loading we cannot take the v_interlock
1180 1.52 hannken * here as it might change during load (see uvm_obj_setlock()).
1181 1.57 hannken * As changing state from VS_LOADING requires both vcache.lock
1182 1.52 hannken * and v_interlock it is safe to test with vcache.lock held.
1183 1.52 hannken *
1184 1.57 hannken * Wait for vnodes changing state from VS_LOADING and retry.
1185 1.52 hannken */
1186 1.57 hannken if (__predict_false(node->vi_state == VS_LOADING)) {
1187 1.52 hannken cv_wait(&vcache.cv, &vcache.lock);
1188 1.52 hannken mutex_exit(&vcache.lock);
1189 1.52 hannken goto again;
1190 1.52 hannken }
1191 1.57 hannken vp = VIMPL_TO_VNODE(node);
1192 1.36 hannken mutex_enter(vp->v_interlock);
1193 1.36 hannken mutex_exit(&vcache.lock);
1194 1.41 riastrad error = vget(vp, 0, true /* wait */);
1195 1.36 hannken if (error == ENOENT)
1196 1.36 hannken goto again;
1197 1.36 hannken if (error == 0)
1198 1.36 hannken *vpp = vp;
1199 1.36 hannken KASSERT((error != 0) == (*vpp == NULL));
1200 1.36 hannken return error;
1201 1.36 hannken }
1202 1.36 hannken mutex_exit(&vcache.lock);
1203 1.36 hannken
1204 1.36 hannken /* Allocate and initialize a new vcache / vnode pair. */
1205 1.36 hannken error = vfs_busy(mp, NULL);
1206 1.36 hannken if (error)
1207 1.36 hannken return error;
1208 1.50 hannken new_node = vcache_alloc();
1209 1.57 hannken new_node->vi_key = vcache_key;
1210 1.57 hannken vp = VIMPL_TO_VNODE(new_node);
1211 1.36 hannken mutex_enter(&vcache.lock);
1212 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1213 1.36 hannken if (node == NULL) {
1214 1.36 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1215 1.57 hannken new_node, vi_hash);
1216 1.36 hannken node = new_node;
1217 1.36 hannken }
1218 1.36 hannken
1219 1.36 hannken /* If another thread beat us inserting this node, retry. */
1220 1.36 hannken if (node != new_node) {
1221 1.52 hannken mutex_enter(vp->v_interlock);
1222 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1223 1.52 hannken mutex_exit(&vcache.lock);
1224 1.52 hannken vrelel(vp, 0);
1225 1.36 hannken vfs_unbusy(mp, false, NULL);
1226 1.36 hannken goto again;
1227 1.36 hannken }
1228 1.52 hannken mutex_exit(&vcache.lock);
1229 1.36 hannken
1230 1.57 hannken /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1231 1.36 hannken error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1232 1.36 hannken if (error) {
1233 1.36 hannken mutex_enter(&vcache.lock);
1234 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1235 1.57 hannken new_node, vnode_impl, vi_hash);
1236 1.52 hannken mutex_enter(vp->v_interlock);
1237 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1238 1.36 hannken mutex_exit(&vcache.lock);
1239 1.52 hannken vrelel(vp, 0);
1240 1.36 hannken vfs_unbusy(mp, false, NULL);
1241 1.36 hannken KASSERT(*vpp == NULL);
1242 1.36 hannken return error;
1243 1.36 hannken }
1244 1.36 hannken KASSERT(new_key != NULL);
1245 1.36 hannken KASSERT(memcmp(key, new_key, key_len) == 0);
1246 1.36 hannken KASSERT(vp->v_op != NULL);
1247 1.36 hannken vfs_insmntque(vp, mp);
1248 1.36 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1249 1.36 hannken vp->v_vflag |= VV_MPSAFE;
1250 1.36 hannken vfs_unbusy(mp, true, NULL);
1251 1.36 hannken
1252 1.36 hannken /* Finished loading, finalize node. */
1253 1.36 hannken mutex_enter(&vcache.lock);
1254 1.57 hannken new_node->vi_key.vk_key = new_key;
1255 1.39 hannken mutex_enter(vp->v_interlock);
1256 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1257 1.39 hannken mutex_exit(vp->v_interlock);
1258 1.52 hannken mutex_exit(&vcache.lock);
1259 1.36 hannken *vpp = vp;
1260 1.36 hannken return 0;
1261 1.36 hannken }
1262 1.36 hannken
1263 1.36 hannken /*
1264 1.40 hannken * Create a new vnode / fs node pair and return it referenced through vpp.
1265 1.40 hannken */
1266 1.40 hannken int
1267 1.40 hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1268 1.40 hannken kauth_cred_t cred, struct vnode **vpp)
1269 1.40 hannken {
1270 1.40 hannken int error;
1271 1.40 hannken uint32_t hash;
1272 1.52 hannken struct vnode *ovp, *vp;
1273 1.57 hannken vnode_impl_t *new_node;
1274 1.57 hannken vnode_impl_t *old_node __diagused;
1275 1.40 hannken
1276 1.40 hannken *vpp = NULL;
1277 1.40 hannken
1278 1.40 hannken /* Allocate and initialize a new vcache / vnode pair. */
1279 1.40 hannken error = vfs_busy(mp, NULL);
1280 1.40 hannken if (error)
1281 1.40 hannken return error;
1282 1.50 hannken new_node = vcache_alloc();
1283 1.57 hannken new_node->vi_key.vk_mount = mp;
1284 1.57 hannken vp = VIMPL_TO_VNODE(new_node);
1285 1.40 hannken
1286 1.40 hannken /* Create and load the fs node. */
1287 1.40 hannken error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1288 1.57 hannken &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
1289 1.40 hannken if (error) {
1290 1.52 hannken mutex_enter(&vcache.lock);
1291 1.52 hannken mutex_enter(vp->v_interlock);
1292 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1293 1.52 hannken mutex_exit(&vcache.lock);
1294 1.52 hannken vrelel(vp, 0);
1295 1.40 hannken vfs_unbusy(mp, false, NULL);
1296 1.40 hannken KASSERT(*vpp == NULL);
1297 1.40 hannken return error;
1298 1.40 hannken }
1299 1.57 hannken KASSERT(new_node->vi_key.vk_key != NULL);
1300 1.40 hannken KASSERT(vp->v_op != NULL);
1301 1.57 hannken hash = vcache_hash(&new_node->vi_key);
1302 1.40 hannken
1303 1.40 hannken /* Wait for previous instance to be reclaimed, then insert new node. */
1304 1.40 hannken mutex_enter(&vcache.lock);
1305 1.57 hannken while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
1306 1.57 hannken ovp = VIMPL_TO_VNODE(old_node);
1307 1.52 hannken mutex_enter(ovp->v_interlock);
1308 1.40 hannken mutex_exit(&vcache.lock);
1309 1.52 hannken error = vget(ovp, 0, true /* wait */);
1310 1.52 hannken KASSERT(error == ENOENT);
1311 1.40 hannken mutex_enter(&vcache.lock);
1312 1.40 hannken }
1313 1.40 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1314 1.57 hannken new_node, vi_hash);
1315 1.40 hannken mutex_exit(&vcache.lock);
1316 1.40 hannken vfs_insmntque(vp, mp);
1317 1.40 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1318 1.40 hannken vp->v_vflag |= VV_MPSAFE;
1319 1.40 hannken vfs_unbusy(mp, true, NULL);
1320 1.40 hannken
1321 1.40 hannken /* Finished loading, finalize node. */
1322 1.40 hannken mutex_enter(&vcache.lock);
1323 1.52 hannken mutex_enter(vp->v_interlock);
1324 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1325 1.40 hannken mutex_exit(&vcache.lock);
1326 1.40 hannken mutex_exit(vp->v_interlock);
1327 1.40 hannken *vpp = vp;
1328 1.40 hannken return 0;
1329 1.40 hannken }
1330 1.40 hannken
1331 1.40 hannken /*
1332 1.37 hannken * Prepare key change: lock old and new cache node.
1333 1.37 hannken * Return an error if the new node already exists.
1334 1.37 hannken */
1335 1.37 hannken int
1336 1.37 hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1337 1.37 hannken const void *old_key, size_t old_key_len,
1338 1.37 hannken const void *new_key, size_t new_key_len)
1339 1.37 hannken {
1340 1.37 hannken uint32_t old_hash, new_hash;
1341 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1342 1.57 hannken vnode_impl_t *node, *new_node;
1343 1.52 hannken struct vnode *tvp;
1344 1.37 hannken
1345 1.37 hannken old_vcache_key.vk_mount = mp;
1346 1.37 hannken old_vcache_key.vk_key = old_key;
1347 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1348 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1349 1.37 hannken
1350 1.37 hannken new_vcache_key.vk_mount = mp;
1351 1.37 hannken new_vcache_key.vk_key = new_key;
1352 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1353 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1354 1.37 hannken
1355 1.50 hannken new_node = vcache_alloc();
1356 1.57 hannken new_node->vi_key = new_vcache_key;
1357 1.57 hannken tvp = VIMPL_TO_VNODE(new_node);
1358 1.37 hannken
1359 1.52 hannken /* Insert locked new node used as placeholder. */
1360 1.37 hannken mutex_enter(&vcache.lock);
1361 1.37 hannken node = vcache_hash_lookup(&new_vcache_key, new_hash);
1362 1.37 hannken if (node != NULL) {
1363 1.52 hannken mutex_enter(tvp->v_interlock);
1364 1.57 hannken VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1365 1.37 hannken mutex_exit(&vcache.lock);
1366 1.52 hannken vrelel(tvp, 0);
1367 1.37 hannken return EEXIST;
1368 1.37 hannken }
1369 1.37 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1370 1.57 hannken new_node, vi_hash);
1371 1.49 hannken
1372 1.49 hannken /* Lock old node. */
1373 1.37 hannken node = vcache_hash_lookup(&old_vcache_key, old_hash);
1374 1.37 hannken KASSERT(node != NULL);
1375 1.57 hannken KASSERT(VIMPL_TO_VNODE(node) == vp);
1376 1.52 hannken mutex_enter(vp->v_interlock);
1377 1.57 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
1378 1.57 hannken node->vi_key = old_vcache_key;
1379 1.52 hannken mutex_exit(vp->v_interlock);
1380 1.37 hannken mutex_exit(&vcache.lock);
1381 1.37 hannken return 0;
1382 1.37 hannken }
1383 1.37 hannken
1384 1.37 hannken /*
1385 1.37 hannken * Key change complete: remove old node and unlock new node.
1386 1.37 hannken */
1387 1.37 hannken void
1388 1.37 hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1389 1.37 hannken const void *old_key, size_t old_key_len,
1390 1.37 hannken const void *new_key, size_t new_key_len)
1391 1.37 hannken {
1392 1.37 hannken uint32_t old_hash, new_hash;
1393 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1394 1.57 hannken vnode_impl_t *old_node, *new_node;
1395 1.52 hannken struct vnode *tvp;
1396 1.37 hannken
1397 1.37 hannken old_vcache_key.vk_mount = mp;
1398 1.37 hannken old_vcache_key.vk_key = old_key;
1399 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1400 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1401 1.37 hannken
1402 1.37 hannken new_vcache_key.vk_mount = mp;
1403 1.37 hannken new_vcache_key.vk_key = new_key;
1404 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1405 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1406 1.37 hannken
1407 1.37 hannken mutex_enter(&vcache.lock);
1408 1.49 hannken
1409 1.49 hannken /* Lookup old and new node. */
1410 1.49 hannken old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1411 1.49 hannken KASSERT(old_node != NULL);
1412 1.57 hannken KASSERT(VIMPL_TO_VNODE(old_node) == vp);
1413 1.52 hannken mutex_enter(vp->v_interlock);
1414 1.57 hannken VSTATE_ASSERT(vp, VS_BLOCKED);
1415 1.52 hannken
1416 1.49 hannken new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1417 1.52 hannken KASSERT(new_node != NULL);
1418 1.57 hannken KASSERT(new_node->vi_key.vk_key_len == new_key_len);
1419 1.57 hannken tvp = VIMPL_TO_VNODE(new_node);
1420 1.52 hannken mutex_enter(tvp->v_interlock);
1421 1.57 hannken VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
1422 1.49 hannken
1423 1.49 hannken /* Rekey old node and put it onto its new hashlist. */
1424 1.57 hannken old_node->vi_key = new_vcache_key;
1425 1.49 hannken if (old_hash != new_hash) {
1426 1.49 hannken SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1427 1.57 hannken old_node, vnode_impl, vi_hash);
1428 1.49 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1429 1.57 hannken old_node, vi_hash);
1430 1.49 hannken }
1431 1.57 hannken VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
1432 1.52 hannken mutex_exit(vp->v_interlock);
1433 1.49 hannken
1434 1.49 hannken /* Remove new node used as placeholder. */
1435 1.49 hannken SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
1436 1.57 hannken new_node, vnode_impl, vi_hash);
1437 1.57 hannken VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1438 1.37 hannken mutex_exit(&vcache.lock);
1439 1.52 hannken vrelel(tvp, 0);
1440 1.37 hannken }
1441 1.37 hannken
1442 1.37 hannken /*
1443 1.54 hannken * Disassociate the underlying file system from a vnode.
1444 1.54 hannken *
1445 1.54 hannken * Must be called with vnode locked and will return unlocked.
1446 1.54 hannken * Must be called with the interlock held, and will return with it held.
1447 1.54 hannken */
1448 1.54 hannken static void
1449 1.54 hannken vcache_reclaim(vnode_t *vp)
1450 1.54 hannken {
1451 1.54 hannken lwp_t *l = curlwp;
1452 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
1453 1.55 hannken uint32_t hash;
1454 1.55 hannken uint8_t temp_buf[64], *temp_key;
1455 1.55 hannken size_t temp_key_len;
1456 1.54 hannken bool recycle, active;
1457 1.54 hannken int error;
1458 1.54 hannken
1459 1.54 hannken KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1460 1.54 hannken VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1461 1.54 hannken KASSERT(mutex_owned(vp->v_interlock));
1462 1.54 hannken KASSERT(vp->v_usecount != 0);
1463 1.54 hannken
1464 1.54 hannken active = (vp->v_usecount > 1);
1465 1.57 hannken temp_key_len = node->vi_key.vk_key_len;
1466 1.54 hannken /*
1467 1.54 hannken * Prevent the vnode from being recycled or brought into use
1468 1.54 hannken * while we clean it out.
1469 1.54 hannken */
1470 1.57 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
1471 1.54 hannken if (vp->v_iflag & VI_EXECMAP) {
1472 1.54 hannken atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1473 1.54 hannken atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1474 1.54 hannken }
1475 1.54 hannken vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1476 1.54 hannken mutex_exit(vp->v_interlock);
1477 1.54 hannken
1478 1.55 hannken /* Replace the vnode key with a temporary copy. */
1479 1.57 hannken if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
1480 1.55 hannken temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1481 1.55 hannken } else {
1482 1.55 hannken temp_key = temp_buf;
1483 1.55 hannken }
1484 1.55 hannken mutex_enter(&vcache.lock);
1485 1.57 hannken memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
1486 1.57 hannken node->vi_key.vk_key = temp_key;
1487 1.55 hannken mutex_exit(&vcache.lock);
1488 1.55 hannken
1489 1.54 hannken /*
1490 1.54 hannken * Clean out any cached data associated with the vnode.
1491 1.54 hannken * If purging an active vnode, it must be closed and
1492 1.60 hannken * deactivated before being reclaimed.
1493 1.54 hannken */
1494 1.54 hannken error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1495 1.54 hannken if (error != 0) {
1496 1.54 hannken if (wapbl_vphaswapbl(vp))
1497 1.54 hannken WAPBL_DISCARD(wapbl_vptomp(vp));
1498 1.54 hannken error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1499 1.54 hannken }
1500 1.54 hannken KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1501 1.54 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1502 1.54 hannken if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1503 1.54 hannken spec_node_revoke(vp);
1504 1.54 hannken }
1505 1.54 hannken
1506 1.60 hannken /*
1507 1.60 hannken * Disassociate the underlying file system from the vnode.
1508 1.60 hannken * Note that the VOP_INACTIVE will unlock the vnode.
1509 1.60 hannken */
1510 1.60 hannken VOP_INACTIVE(vp, &recycle);
1511 1.54 hannken if (VOP_RECLAIM(vp)) {
1512 1.54 hannken vnpanic(vp, "%s: cannot reclaim", __func__);
1513 1.54 hannken }
1514 1.54 hannken
1515 1.54 hannken KASSERT(vp->v_data == NULL);
1516 1.54 hannken KASSERT(vp->v_uobj.uo_npages == 0);
1517 1.54 hannken
1518 1.54 hannken if (vp->v_type == VREG && vp->v_ractx != NULL) {
1519 1.54 hannken uvm_ra_freectx(vp->v_ractx);
1520 1.54 hannken vp->v_ractx = NULL;
1521 1.54 hannken }
1522 1.54 hannken
1523 1.54 hannken /* Purge name cache. */
1524 1.54 hannken cache_purge(vp);
1525 1.54 hannken
1526 1.54 hannken /* Move to dead mount. */
1527 1.54 hannken vp->v_vflag &= ~VV_ROOT;
1528 1.54 hannken atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1529 1.54 hannken vfs_insmntque(vp, dead_rootmount);
1530 1.54 hannken
1531 1.55 hannken /* Remove from vnode cache. */
1532 1.57 hannken hash = vcache_hash(&node->vi_key);
1533 1.55 hannken mutex_enter(&vcache.lock);
1534 1.57 hannken KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
1535 1.55 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1536 1.57 hannken node, vnode_impl, vi_hash);
1537 1.55 hannken mutex_exit(&vcache.lock);
1538 1.55 hannken if (temp_key != temp_buf)
1539 1.55 hannken kmem_free(temp_key, temp_key_len);
1540 1.55 hannken
1541 1.54 hannken /* Done with purge, notify sleepers of the grim news. */
1542 1.54 hannken mutex_enter(vp->v_interlock);
1543 1.54 hannken vp->v_op = dead_vnodeop_p;
1544 1.54 hannken vp->v_vflag |= VV_LOCKSWORK;
1545 1.57 hannken VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1546 1.54 hannken vp->v_tag = VT_NON;
1547 1.54 hannken KNOTE(&vp->v_klist, NOTE_REVOKE);
1548 1.54 hannken
1549 1.54 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1550 1.54 hannken }
1551 1.54 hannken
1552 1.54 hannken /*
1553 1.1 rmind * Update outstanding I/O count and do wakeup if requested.
1554 1.1 rmind */
1555 1.1 rmind void
1556 1.1 rmind vwakeup(struct buf *bp)
1557 1.1 rmind {
1558 1.1 rmind vnode_t *vp;
1559 1.1 rmind
1560 1.1 rmind if ((vp = bp->b_vp) == NULL)
1561 1.1 rmind return;
1562 1.1 rmind
1563 1.9 rmind KASSERT(bp->b_objlock == vp->v_interlock);
1564 1.1 rmind KASSERT(mutex_owned(bp->b_objlock));
1565 1.1 rmind
1566 1.1 rmind if (--vp->v_numoutput < 0)
1567 1.11 christos vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1568 1.1 rmind if (vp->v_numoutput == 0)
1569 1.1 rmind cv_broadcast(&vp->v_cv);
1570 1.1 rmind }
1571 1.1 rmind
1572 1.1 rmind /*
1573 1.35 hannken * Test a vnode for being or becoming dead. Returns one of:
1574 1.35 hannken * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1575 1.35 hannken * ENOENT: vnode is dead.
1576 1.35 hannken * 0: otherwise.
1577 1.35 hannken *
1578 1.35 hannken * Whenever this function returns a non-zero value all future
1579 1.35 hannken * calls will also return a non-zero value.
1580 1.35 hannken */
1581 1.35 hannken int
1582 1.35 hannken vdead_check(struct vnode *vp, int flags)
1583 1.35 hannken {
1584 1.35 hannken
1585 1.35 hannken KASSERT(mutex_owned(vp->v_interlock));
1586 1.35 hannken
1587 1.52 hannken if (! ISSET(flags, VDEAD_NOWAIT))
1588 1.52 hannken VSTATE_WAIT_STABLE(vp);
1589 1.1 rmind
1590 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMING) {
1591 1.52 hannken KASSERT(ISSET(flags, VDEAD_NOWAIT));
1592 1.52 hannken return EBUSY;
1593 1.57 hannken } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1594 1.52 hannken return ENOENT;
1595 1.52 hannken }
1596 1.1 rmind
1597 1.52 hannken return 0;
1598 1.1 rmind }
1599 1.1 rmind
1600 1.1 rmind int
1601 1.61 hannken vfs_drainvnodes(void)
1602 1.1 rmind {
1603 1.61 hannken int error, gen;
1604 1.61 hannken
1605 1.61 hannken mutex_enter(&vrele_lock);
1606 1.61 hannken gen = vrele_gen;
1607 1.61 hannken while (vrele_pending && gen == vrele_gen) {
1608 1.61 hannken cv_broadcast(&vrele_cv);
1609 1.61 hannken cv_wait(&vrele_cv, &vrele_lock);
1610 1.61 hannken }
1611 1.61 hannken mutex_exit(&vrele_lock);
1612 1.12 hannken
1613 1.12 hannken mutex_enter(&vnode_free_list_lock);
1614 1.1 rmind
1615 1.61 hannken while (numvnodes > desiredvnodes) {
1616 1.12 hannken error = cleanvnode();
1617 1.12 hannken if (error != 0)
1618 1.12 hannken return error;
1619 1.1 rmind mutex_enter(&vnode_free_list_lock);
1620 1.1 rmind }
1621 1.12 hannken
1622 1.12 hannken mutex_exit(&vnode_free_list_lock);
1623 1.12 hannken
1624 1.61 hannken if (vcache.hashsize != desiredvnodes)
1625 1.61 hannken vcache_reinit();
1626 1.36 hannken
1627 1.1 rmind return 0;
1628 1.1 rmind }
1629 1.1 rmind
1630 1.1 rmind void
1631 1.11 christos vnpanic(vnode_t *vp, const char *fmt, ...)
1632 1.1 rmind {
1633 1.11 christos va_list ap;
1634 1.11 christos
1635 1.1 rmind #ifdef DIAGNOSTIC
1636 1.1 rmind vprint(NULL, vp);
1637 1.1 rmind #endif
1638 1.11 christos va_start(ap, fmt);
1639 1.11 christos vpanic(fmt, ap);
1640 1.11 christos va_end(ap);
1641 1.1 rmind }
1642