vfs_vnode.c revision 1.60 1 1.60 hannken /* $NetBSD: vfs_vnode.c,v 1.60 2016/12/01 14:49:03 hannken Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.2 rmind * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rmind * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 rmind * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 1.1 rmind *
11 1.1 rmind * Redistribution and use in source and binary forms, with or without
12 1.1 rmind * modification, are permitted provided that the following conditions
13 1.1 rmind * are met:
14 1.1 rmind * 1. Redistributions of source code must retain the above copyright
15 1.1 rmind * notice, this list of conditions and the following disclaimer.
16 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 rmind * notice, this list of conditions and the following disclaimer in the
18 1.1 rmind * documentation and/or other materials provided with the distribution.
19 1.1 rmind *
20 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
31 1.1 rmind */
32 1.1 rmind
33 1.1 rmind /*
34 1.1 rmind * Copyright (c) 1989, 1993
35 1.1 rmind * The Regents of the University of California. All rights reserved.
36 1.1 rmind * (c) UNIX System Laboratories, Inc.
37 1.1 rmind * All or some portions of this file are derived from material licensed
38 1.1 rmind * to the University of California by American Telephone and Telegraph
39 1.1 rmind * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 1.1 rmind * the permission of UNIX System Laboratories, Inc.
41 1.1 rmind *
42 1.1 rmind * Redistribution and use in source and binary forms, with or without
43 1.1 rmind * modification, are permitted provided that the following conditions
44 1.1 rmind * are met:
45 1.1 rmind * 1. Redistributions of source code must retain the above copyright
46 1.1 rmind * notice, this list of conditions and the following disclaimer.
47 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
48 1.1 rmind * notice, this list of conditions and the following disclaimer in the
49 1.1 rmind * documentation and/or other materials provided with the distribution.
50 1.1 rmind * 3. Neither the name of the University nor the names of its contributors
51 1.1 rmind * may be used to endorse or promote products derived from this software
52 1.1 rmind * without specific prior written permission.
53 1.1 rmind *
54 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 1.1 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.1 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.1 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 1.1 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.1 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.1 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.1 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.1 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.1 rmind * SUCH DAMAGE.
65 1.1 rmind *
66 1.1 rmind * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 1.1 rmind */
68 1.1 rmind
69 1.1 rmind /*
70 1.8 rmind * The vnode cache subsystem.
71 1.1 rmind *
72 1.8 rmind * Life-cycle
73 1.1 rmind *
74 1.8 rmind * Normally, there are two points where new vnodes are created:
75 1.8 rmind * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 1.8 rmind * starts in one of the following ways:
77 1.8 rmind *
78 1.45 hannken * - Allocation, via vcache_get(9) or vcache_new(9).
79 1.8 rmind * - Reclamation of inactive vnode, via vget(9).
80 1.8 rmind *
81 1.16 rmind * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 1.16 rmind * was another, traditional way. Currently, only the draining thread
83 1.16 rmind * recycles the vnodes. This behaviour might be revisited.
84 1.16 rmind *
85 1.8 rmind * The life-cycle ends when the last reference is dropped, usually
86 1.8 rmind * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 1.8 rmind * the file system that vnode is inactive. Via this call, file system
88 1.16 rmind * indicates whether vnode can be recycled (usually, it checks its own
89 1.16 rmind * references, e.g. count of links, whether the file was removed).
90 1.8 rmind *
91 1.8 rmind * Depending on indication, vnode can be put into a free list (cache),
92 1.54 hannken * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 1.54 hannken * disassociate underlying file system from the vnode, and finally
94 1.54 hannken * destroyed.
95 1.8 rmind *
96 1.52 hannken * Vnode state
97 1.52 hannken *
98 1.52 hannken * Vnode is always in one of six states:
99 1.52 hannken * - MARKER This is a marker vnode to help list traversal. It
100 1.52 hannken * will never change its state.
101 1.52 hannken * - LOADING Vnode is associating underlying file system and not
102 1.52 hannken * yet ready to use.
103 1.52 hannken * - ACTIVE Vnode has associated underlying file system and is
104 1.52 hannken * ready to use.
105 1.52 hannken * - BLOCKED Vnode is active but cannot get new references.
106 1.52 hannken * - RECLAIMING Vnode is disassociating from the underlying file
107 1.52 hannken * system.
108 1.52 hannken * - RECLAIMED Vnode has disassociated from underlying file system
109 1.52 hannken * and is dead.
110 1.52 hannken *
111 1.52 hannken * Valid state changes are:
112 1.52 hannken * LOADING -> ACTIVE
113 1.52 hannken * Vnode has been initialised in vcache_get() or
114 1.52 hannken * vcache_new() and is ready to use.
115 1.52 hannken * ACTIVE -> RECLAIMING
116 1.52 hannken * Vnode starts disassociation from underlying file
117 1.54 hannken * system in vcache_reclaim().
118 1.52 hannken * RECLAIMING -> RECLAIMED
119 1.52 hannken * Vnode finished disassociation from underlying file
120 1.54 hannken * system in vcache_reclaim().
121 1.52 hannken * ACTIVE -> BLOCKED
122 1.52 hannken * Either vcache_rekey*() is changing the vnode key or
123 1.52 hannken * vrelel() is about to call VOP_INACTIVE().
124 1.52 hannken * BLOCKED -> ACTIVE
125 1.52 hannken * The block condition is over.
126 1.52 hannken * LOADING -> RECLAIMED
127 1.52 hannken * Either vcache_get() or vcache_new() failed to
128 1.52 hannken * associate the underlying file system or vcache_rekey*()
129 1.52 hannken * drops a vnode used as placeholder.
130 1.52 hannken *
131 1.52 hannken * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
132 1.52 hannken * and it is possible to wait for state change.
133 1.52 hannken *
134 1.52 hannken * State is protected with v_interlock with one exception:
135 1.52 hannken * to change from LOADING both v_interlock and vcache.lock must be held
136 1.52 hannken * so it is possible to check "state == LOADING" without holding
137 1.52 hannken * v_interlock. See vcache_get() for details.
138 1.52 hannken *
139 1.8 rmind * Reference counting
140 1.8 rmind *
141 1.8 rmind * Vnode is considered active, if reference count (vnode_t::v_usecount)
142 1.8 rmind * is non-zero. It is maintained using: vref(9) and vrele(9), as well
143 1.8 rmind * as vput(9), routines. Common points holding references are e.g.
144 1.8 rmind * file openings, current working directory, mount points, etc.
145 1.8 rmind *
146 1.8 rmind * Note on v_usecount and its locking
147 1.8 rmind *
148 1.8 rmind * At nearly all points it is known that v_usecount could be zero,
149 1.8 rmind * the vnode_t::v_interlock will be held. To change v_usecount away
150 1.8 rmind * from zero, the interlock must be held. To change from a non-zero
151 1.8 rmind * value to zero, again the interlock must be held.
152 1.8 rmind *
153 1.24 hannken * Changing the usecount from a non-zero value to a non-zero value can
154 1.24 hannken * safely be done using atomic operations, without the interlock held.
155 1.8 rmind *
156 1.1 rmind */
157 1.1 rmind
158 1.1 rmind #include <sys/cdefs.h>
159 1.60 hannken __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.60 2016/12/01 14:49:03 hannken Exp $");
160 1.1 rmind
161 1.1 rmind #include <sys/param.h>
162 1.1 rmind #include <sys/kernel.h>
163 1.1 rmind
164 1.1 rmind #include <sys/atomic.h>
165 1.1 rmind #include <sys/buf.h>
166 1.1 rmind #include <sys/conf.h>
167 1.1 rmind #include <sys/device.h>
168 1.36 hannken #include <sys/hash.h>
169 1.1 rmind #include <sys/kauth.h>
170 1.1 rmind #include <sys/kmem.h>
171 1.1 rmind #include <sys/kthread.h>
172 1.1 rmind #include <sys/module.h>
173 1.1 rmind #include <sys/mount.h>
174 1.1 rmind #include <sys/namei.h>
175 1.1 rmind #include <sys/syscallargs.h>
176 1.1 rmind #include <sys/sysctl.h>
177 1.1 rmind #include <sys/systm.h>
178 1.58 hannken #include <sys/vnode_impl.h>
179 1.1 rmind #include <sys/wapbl.h>
180 1.24 hannken #include <sys/fstrans.h>
181 1.1 rmind
182 1.1 rmind #include <uvm/uvm.h>
183 1.1 rmind #include <uvm/uvm_readahead.h>
184 1.1 rmind
185 1.23 hannken /* Flags to vrelel. */
186 1.23 hannken #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
187 1.23 hannken
188 1.6 rmind u_int numvnodes __cacheline_aligned;
189 1.1 rmind
190 1.16 rmind /*
191 1.16 rmind * There are two free lists: one is for vnodes which have no buffer/page
192 1.16 rmind * references and one for those which do (i.e. v_holdcnt is non-zero).
193 1.16 rmind * Vnode recycling mechanism first attempts to look into the former list.
194 1.16 rmind */
195 1.6 rmind static kmutex_t vnode_free_list_lock __cacheline_aligned;
196 1.6 rmind static vnodelst_t vnode_free_list __cacheline_aligned;
197 1.6 rmind static vnodelst_t vnode_hold_list __cacheline_aligned;
198 1.16 rmind static kcondvar_t vdrain_cv __cacheline_aligned;
199 1.16 rmind
200 1.6 rmind static vnodelst_t vrele_list __cacheline_aligned;
201 1.6 rmind static kmutex_t vrele_lock __cacheline_aligned;
202 1.6 rmind static kcondvar_t vrele_cv __cacheline_aligned;
203 1.6 rmind static lwp_t * vrele_lwp __cacheline_aligned;
204 1.6 rmind static int vrele_pending __cacheline_aligned;
205 1.6 rmind static int vrele_gen __cacheline_aligned;
206 1.1 rmind
207 1.57 hannken SLIST_HEAD(hashhead, vnode_impl);
208 1.36 hannken static struct {
209 1.36 hannken kmutex_t lock;
210 1.51 hannken kcondvar_t cv;
211 1.36 hannken u_long hashmask;
212 1.38 matt struct hashhead *hashtab;
213 1.36 hannken pool_cache_t pool;
214 1.36 hannken } vcache __cacheline_aligned;
215 1.36 hannken
216 1.12 hannken static int cleanvnode(void);
217 1.57 hannken static vnode_impl_t *vcache_alloc(void);
218 1.57 hannken static void vcache_free(vnode_impl_t *);
219 1.36 hannken static void vcache_init(void);
220 1.36 hannken static void vcache_reinit(void);
221 1.54 hannken static void vcache_reclaim(vnode_t *);
222 1.23 hannken static void vrelel(vnode_t *, int);
223 1.12 hannken static void vdrain_thread(void *);
224 1.1 rmind static void vrele_thread(void *);
225 1.11 christos static void vnpanic(vnode_t *, const char *, ...)
226 1.18 christos __printflike(2, 3);
227 1.1 rmind
228 1.1 rmind /* Routines having to do with the management of the vnode table. */
229 1.44 hannken extern struct mount *dead_rootmount;
230 1.1 rmind extern int (**dead_vnodeop_p)(void *);
231 1.31 hannken extern struct vfsops dead_vfsops;
232 1.1 rmind
233 1.51 hannken /* Vnode state operations and diagnostics. */
234 1.51 hannken
235 1.51 hannken #if defined(DIAGNOSTIC)
236 1.51 hannken
237 1.51 hannken #define VSTATE_GET(vp) \
238 1.51 hannken vstate_assert_get((vp), __func__, __LINE__)
239 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
240 1.51 hannken vstate_assert_change((vp), (from), (to), __func__, __LINE__)
241 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
242 1.51 hannken vstate_assert_wait_stable((vp), __func__, __LINE__)
243 1.51 hannken #define VSTATE_ASSERT(vp, state) \
244 1.51 hannken vstate_assert((vp), (state), __func__, __LINE__)
245 1.51 hannken
246 1.52 hannken static void
247 1.57 hannken vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line)
248 1.51 hannken {
249 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
250 1.51 hannken
251 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
252 1.51 hannken
253 1.57 hannken if (__predict_true(node->vi_state == state))
254 1.51 hannken return;
255 1.51 hannken vnpanic(vp, "state is %s, expected %s at %s:%d",
256 1.57 hannken vstate_name(node->vi_state), vstate_name(state), func, line);
257 1.51 hannken }
258 1.51 hannken
259 1.57 hannken static enum vnode_state
260 1.51 hannken vstate_assert_get(vnode_t *vp, const char *func, int line)
261 1.51 hannken {
262 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
263 1.51 hannken
264 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
265 1.57 hannken if (node->vi_state == VS_MARKER)
266 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
267 1.57 hannken vstate_name(node->vi_state), func, line);
268 1.51 hannken
269 1.57 hannken return node->vi_state;
270 1.51 hannken }
271 1.51 hannken
272 1.52 hannken static void
273 1.51 hannken vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
274 1.51 hannken {
275 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
276 1.51 hannken
277 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
278 1.57 hannken if (node->vi_state == VS_MARKER)
279 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
280 1.57 hannken vstate_name(node->vi_state), func, line);
281 1.51 hannken
282 1.57 hannken while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
283 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
284 1.51 hannken
285 1.57 hannken if (node->vi_state == VS_MARKER)
286 1.51 hannken vnpanic(vp, "state is %s at %s:%d",
287 1.57 hannken vstate_name(node->vi_state), func, line);
288 1.51 hannken }
289 1.51 hannken
290 1.52 hannken static void
291 1.57 hannken vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
292 1.51 hannken const char *func, int line)
293 1.51 hannken {
294 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
295 1.51 hannken
296 1.51 hannken KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
297 1.57 hannken if (from == VS_LOADING)
298 1.51 hannken KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);
299 1.51 hannken
300 1.57 hannken if (from == VS_MARKER)
301 1.51 hannken vnpanic(vp, "from is %s at %s:%d",
302 1.51 hannken vstate_name(from), func, line);
303 1.57 hannken if (to == VS_MARKER)
304 1.51 hannken vnpanic(vp, "to is %s at %s:%d",
305 1.51 hannken vstate_name(to), func, line);
306 1.57 hannken if (node->vi_state != from)
307 1.51 hannken vnpanic(vp, "from is %s, expected %s at %s:%d\n",
308 1.57 hannken vstate_name(node->vi_state), vstate_name(from), func, line);
309 1.51 hannken
310 1.57 hannken node->vi_state = to;
311 1.57 hannken if (from == VS_LOADING)
312 1.51 hannken cv_broadcast(&vcache.cv);
313 1.57 hannken if (to == VS_ACTIVE || to == VS_RECLAIMED)
314 1.51 hannken cv_broadcast(&vp->v_cv);
315 1.51 hannken }
316 1.51 hannken
317 1.51 hannken #else /* defined(DIAGNOSTIC) */
318 1.51 hannken
319 1.51 hannken #define VSTATE_GET(vp) \
320 1.57 hannken (VNODE_TO_VIMPL((vp))->vi_state)
321 1.51 hannken #define VSTATE_CHANGE(vp, from, to) \
322 1.51 hannken vstate_change((vp), (from), (to))
323 1.51 hannken #define VSTATE_WAIT_STABLE(vp) \
324 1.51 hannken vstate_wait_stable((vp))
325 1.51 hannken #define VSTATE_ASSERT(vp, state)
326 1.51 hannken
327 1.52 hannken static void
328 1.51 hannken vstate_wait_stable(vnode_t *vp)
329 1.51 hannken {
330 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
331 1.51 hannken
332 1.57 hannken while (node->vi_state != VS_ACTIVE && node->vi_state != VS_RECLAIMED)
333 1.51 hannken cv_wait(&vp->v_cv, vp->v_interlock);
334 1.51 hannken }
335 1.51 hannken
336 1.52 hannken static void
337 1.57 hannken vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
338 1.51 hannken {
339 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
340 1.51 hannken
341 1.57 hannken node->vi_state = to;
342 1.57 hannken if (from == VS_LOADING)
343 1.51 hannken cv_broadcast(&vcache.cv);
344 1.57 hannken if (to == VS_ACTIVE || to == VS_RECLAIMED)
345 1.51 hannken cv_broadcast(&vp->v_cv);
346 1.51 hannken }
347 1.51 hannken
348 1.51 hannken #endif /* defined(DIAGNOSTIC) */
349 1.51 hannken
350 1.1 rmind void
351 1.1 rmind vfs_vnode_sysinit(void)
352 1.1 rmind {
353 1.22 martin int error __diagused;
354 1.1 rmind
355 1.44 hannken dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
356 1.44 hannken KASSERT(dead_rootmount != NULL);
357 1.44 hannken dead_rootmount->mnt_iflag = IMNT_MPSAFE;
358 1.31 hannken
359 1.1 rmind mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
360 1.1 rmind TAILQ_INIT(&vnode_free_list);
361 1.1 rmind TAILQ_INIT(&vnode_hold_list);
362 1.1 rmind TAILQ_INIT(&vrele_list);
363 1.1 rmind
364 1.36 hannken vcache_init();
365 1.36 hannken
366 1.1 rmind mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
367 1.12 hannken cv_init(&vdrain_cv, "vdrain");
368 1.1 rmind cv_init(&vrele_cv, "vrele");
369 1.12 hannken error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
370 1.12 hannken NULL, NULL, "vdrain");
371 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
372 1.1 rmind error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
373 1.1 rmind NULL, &vrele_lwp, "vrele");
374 1.47 riastrad KASSERTMSG((error == 0), "kthread_create(vrele) failed: %d", error);
375 1.1 rmind }
376 1.1 rmind
377 1.1 rmind /*
378 1.48 hannken * Allocate a new marker vnode.
379 1.48 hannken */
380 1.48 hannken vnode_t *
381 1.48 hannken vnalloc_marker(struct mount *mp)
382 1.48 hannken {
383 1.57 hannken vnode_impl_t *node;
384 1.50 hannken vnode_t *vp;
385 1.50 hannken
386 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
387 1.50 hannken memset(node, 0, sizeof(*node));
388 1.57 hannken vp = VIMPL_TO_VNODE(node);
389 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
390 1.50 hannken vp->v_mount = mp;
391 1.50 hannken vp->v_type = VBAD;
392 1.57 hannken node->vi_state = VS_MARKER;
393 1.48 hannken
394 1.50 hannken return vp;
395 1.48 hannken }
396 1.48 hannken
397 1.48 hannken /*
398 1.48 hannken * Free a marker vnode.
399 1.48 hannken */
400 1.48 hannken void
401 1.48 hannken vnfree_marker(vnode_t *vp)
402 1.48 hannken {
403 1.57 hannken vnode_impl_t *node;
404 1.48 hannken
405 1.57 hannken node = VNODE_TO_VIMPL(vp);
406 1.57 hannken KASSERT(node->vi_state == VS_MARKER);
407 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
408 1.50 hannken pool_cache_put(vcache.pool, node);
409 1.48 hannken }
410 1.48 hannken
411 1.48 hannken /*
412 1.48 hannken * Test a vnode for being a marker vnode.
413 1.48 hannken */
414 1.48 hannken bool
415 1.48 hannken vnis_marker(vnode_t *vp)
416 1.48 hannken {
417 1.48 hannken
418 1.57 hannken return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
419 1.48 hannken }
420 1.48 hannken
421 1.48 hannken /*
422 1.12 hannken * cleanvnode: grab a vnode from freelist, clean and free it.
423 1.5 rmind *
424 1.5 rmind * => Releases vnode_free_list_lock.
425 1.1 rmind */
426 1.12 hannken static int
427 1.12 hannken cleanvnode(void)
428 1.1 rmind {
429 1.1 rmind vnode_t *vp;
430 1.1 rmind vnodelst_t *listhd;
431 1.24 hannken struct mount *mp;
432 1.1 rmind
433 1.1 rmind KASSERT(mutex_owned(&vnode_free_list_lock));
434 1.24 hannken
435 1.1 rmind listhd = &vnode_free_list;
436 1.1 rmind try_nextlist:
437 1.1 rmind TAILQ_FOREACH(vp, listhd, v_freelist) {
438 1.1 rmind /*
439 1.1 rmind * It's safe to test v_usecount and v_iflag
440 1.1 rmind * without holding the interlock here, since
441 1.1 rmind * these vnodes should never appear on the
442 1.1 rmind * lists.
443 1.1 rmind */
444 1.5 rmind KASSERT(vp->v_usecount == 0);
445 1.5 rmind KASSERT(vp->v_freelisthd == listhd);
446 1.5 rmind
447 1.60 hannken if (!mutex_tryenter(vp->v_interlock))
448 1.1 rmind continue;
449 1.24 hannken mp = vp->v_mount;
450 1.24 hannken if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
451 1.24 hannken mutex_exit(vp->v_interlock);
452 1.24 hannken continue;
453 1.24 hannken }
454 1.24 hannken break;
455 1.1 rmind }
456 1.1 rmind
457 1.1 rmind if (vp == NULL) {
458 1.1 rmind if (listhd == &vnode_free_list) {
459 1.1 rmind listhd = &vnode_hold_list;
460 1.1 rmind goto try_nextlist;
461 1.1 rmind }
462 1.1 rmind mutex_exit(&vnode_free_list_lock);
463 1.12 hannken return EBUSY;
464 1.1 rmind }
465 1.1 rmind
466 1.1 rmind mutex_exit(&vnode_free_list_lock);
467 1.1 rmind
468 1.60 hannken if (vget(vp, 0, true /* wait */) == 0) {
469 1.60 hannken if (!vrecycle(vp))
470 1.60 hannken vrele(vp);
471 1.60 hannken }
472 1.24 hannken fstrans_done(mp);
473 1.12 hannken
474 1.12 hannken return 0;
475 1.1 rmind }
476 1.1 rmind
477 1.1 rmind /*
478 1.12 hannken * Helper thread to keep the number of vnodes below desiredvnodes.
479 1.12 hannken */
480 1.12 hannken static void
481 1.12 hannken vdrain_thread(void *cookie)
482 1.12 hannken {
483 1.12 hannken int error;
484 1.12 hannken
485 1.12 hannken mutex_enter(&vnode_free_list_lock);
486 1.12 hannken
487 1.12 hannken for (;;) {
488 1.12 hannken cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
489 1.12 hannken while (numvnodes > desiredvnodes) {
490 1.12 hannken error = cleanvnode();
491 1.12 hannken if (error)
492 1.12 hannken kpause("vndsbusy", false, hz, NULL);
493 1.12 hannken mutex_enter(&vnode_free_list_lock);
494 1.12 hannken if (error)
495 1.12 hannken break;
496 1.12 hannken }
497 1.12 hannken }
498 1.12 hannken }
499 1.12 hannken
500 1.12 hannken /*
501 1.1 rmind * Remove a vnode from its freelist.
502 1.1 rmind */
503 1.1 rmind void
504 1.1 rmind vremfree(vnode_t *vp)
505 1.1 rmind {
506 1.1 rmind
507 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
508 1.1 rmind KASSERT(vp->v_usecount == 0);
509 1.1 rmind
510 1.1 rmind /*
511 1.1 rmind * Note that the reference count must not change until
512 1.1 rmind * the vnode is removed.
513 1.1 rmind */
514 1.1 rmind mutex_enter(&vnode_free_list_lock);
515 1.1 rmind if (vp->v_holdcnt > 0) {
516 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
517 1.1 rmind } else {
518 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
519 1.1 rmind }
520 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
521 1.1 rmind vp->v_freelisthd = NULL;
522 1.1 rmind mutex_exit(&vnode_free_list_lock);
523 1.1 rmind }
524 1.1 rmind
525 1.1 rmind /*
526 1.4 rmind * vget: get a particular vnode from the free list, increment its reference
527 1.52 hannken * count and return it.
528 1.4 rmind *
529 1.52 hannken * => Must be called with v_interlock held.
530 1.4 rmind *
531 1.57 hannken * If state is VS_RECLAIMING, the vnode may be eliminated in vcache_reclaim().
532 1.4 rmind * In that case, we cannot grab the vnode, so the process is awakened when
533 1.4 rmind * the transition is completed, and an error returned to indicate that the
534 1.29 christos * vnode is no longer usable.
535 1.52 hannken *
536 1.57 hannken * If state is VS_LOADING or VS_BLOCKED, wait until the vnode enters a
537 1.57 hannken * stable state (VS_ACTIVE or VS_RECLAIMED).
538 1.1 rmind */
539 1.1 rmind int
540 1.41 riastrad vget(vnode_t *vp, int flags, bool waitok)
541 1.1 rmind {
542 1.1 rmind
543 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
544 1.41 riastrad KASSERT((flags & ~LK_NOWAIT) == 0);
545 1.41 riastrad KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
546 1.1 rmind
547 1.1 rmind /*
548 1.1 rmind * Before adding a reference, we must remove the vnode
549 1.1 rmind * from its freelist.
550 1.1 rmind */
551 1.1 rmind if (vp->v_usecount == 0) {
552 1.1 rmind vremfree(vp);
553 1.1 rmind vp->v_usecount = 1;
554 1.1 rmind } else {
555 1.1 rmind atomic_inc_uint(&vp->v_usecount);
556 1.1 rmind }
557 1.1 rmind
558 1.1 rmind /*
559 1.29 christos * If the vnode is in the process of changing state we wait
560 1.29 christos * for the change to complete and take care not to return
561 1.29 christos * a clean vnode.
562 1.1 rmind */
563 1.52 hannken if (! ISSET(flags, LK_NOWAIT))
564 1.52 hannken VSTATE_WAIT_STABLE(vp);
565 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMED) {
566 1.52 hannken vrelel(vp, 0);
567 1.52 hannken return ENOENT;
568 1.57 hannken } else if (VSTATE_GET(vp) != VS_ACTIVE) {
569 1.52 hannken KASSERT(ISSET(flags, LK_NOWAIT));
570 1.52 hannken vrelel(vp, 0);
571 1.52 hannken return EBUSY;
572 1.17 hannken }
573 1.17 hannken
574 1.1 rmind /*
575 1.41 riastrad * Ok, we got it in good shape.
576 1.1 rmind */
577 1.57 hannken VSTATE_ASSERT(vp, VS_ACTIVE);
578 1.9 rmind mutex_exit(vp->v_interlock);
579 1.52 hannken
580 1.52 hannken return 0;
581 1.1 rmind }
582 1.1 rmind
583 1.1 rmind /*
584 1.4 rmind * vput: unlock and release the reference.
585 1.1 rmind */
586 1.1 rmind void
587 1.1 rmind vput(vnode_t *vp)
588 1.1 rmind {
589 1.1 rmind
590 1.1 rmind VOP_UNLOCK(vp);
591 1.1 rmind vrele(vp);
592 1.1 rmind }
593 1.1 rmind
594 1.1 rmind /*
595 1.1 rmind * Try to drop reference on a vnode. Abort if we are releasing the
596 1.1 rmind * last reference. Note: this _must_ succeed if not the last reference.
597 1.1 rmind */
598 1.1 rmind static inline bool
599 1.1 rmind vtryrele(vnode_t *vp)
600 1.1 rmind {
601 1.1 rmind u_int use, next;
602 1.1 rmind
603 1.1 rmind for (use = vp->v_usecount;; use = next) {
604 1.1 rmind if (use == 1) {
605 1.1 rmind return false;
606 1.1 rmind }
607 1.24 hannken KASSERT(use > 1);
608 1.1 rmind next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
609 1.1 rmind if (__predict_true(next == use)) {
610 1.1 rmind return true;
611 1.1 rmind }
612 1.1 rmind }
613 1.1 rmind }
614 1.1 rmind
615 1.1 rmind /*
616 1.1 rmind * Vnode release. If reference count drops to zero, call inactive
617 1.1 rmind * routine and either return to freelist or free to the pool.
618 1.1 rmind */
619 1.23 hannken static void
620 1.1 rmind vrelel(vnode_t *vp, int flags)
621 1.1 rmind {
622 1.1 rmind bool recycle, defer;
623 1.1 rmind int error;
624 1.1 rmind
625 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
626 1.1 rmind KASSERT(vp->v_freelisthd == NULL);
627 1.1 rmind
628 1.1 rmind if (__predict_false(vp->v_op == dead_vnodeop_p &&
629 1.57 hannken VSTATE_GET(vp) != VS_RECLAIMED)) {
630 1.11 christos vnpanic(vp, "dead but not clean");
631 1.1 rmind }
632 1.1 rmind
633 1.1 rmind /*
634 1.1 rmind * If not the last reference, just drop the reference count
635 1.1 rmind * and unlock.
636 1.1 rmind */
637 1.1 rmind if (vtryrele(vp)) {
638 1.9 rmind mutex_exit(vp->v_interlock);
639 1.1 rmind return;
640 1.1 rmind }
641 1.1 rmind if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
642 1.11 christos vnpanic(vp, "%s: bad ref count", __func__);
643 1.1 rmind }
644 1.1 rmind
645 1.15 hannken #ifdef DIAGNOSTIC
646 1.15 hannken if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
647 1.15 hannken vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
648 1.15 hannken vprint("vrelel: missing VOP_CLOSE()", vp);
649 1.15 hannken }
650 1.15 hannken #endif
651 1.15 hannken
652 1.1 rmind /*
653 1.1 rmind * If not clean, deactivate the vnode, but preserve
654 1.1 rmind * our reference across the call to VOP_INACTIVE().
655 1.1 rmind */
656 1.57 hannken if (VSTATE_GET(vp) != VS_RECLAIMED) {
657 1.1 rmind recycle = false;
658 1.1 rmind
659 1.1 rmind /*
660 1.1 rmind * XXX This ugly block can be largely eliminated if
661 1.1 rmind * locking is pushed down into the file systems.
662 1.1 rmind *
663 1.1 rmind * Defer vnode release to vrele_thread if caller
664 1.30 hannken * requests it explicitly or is the pagedaemon.
665 1.1 rmind */
666 1.1 rmind if ((curlwp == uvm.pagedaemon_lwp) ||
667 1.1 rmind (flags & VRELEL_ASYNC_RELE) != 0) {
668 1.1 rmind defer = true;
669 1.1 rmind } else if (curlwp == vrele_lwp) {
670 1.17 hannken /*
671 1.29 christos * We have to try harder.
672 1.17 hannken */
673 1.9 rmind mutex_exit(vp->v_interlock);
674 1.32 hannken error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
675 1.47 riastrad KASSERTMSG((error == 0), "vn_lock failed: %d", error);
676 1.17 hannken mutex_enter(vp->v_interlock);
677 1.1 rmind defer = false;
678 1.4 rmind } else {
679 1.1 rmind /* If we can't acquire the lock, then defer. */
680 1.32 hannken mutex_exit(vp->v_interlock);
681 1.32 hannken error = vn_lock(vp,
682 1.32 hannken LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
683 1.30 hannken defer = (error != 0);
684 1.32 hannken mutex_enter(vp->v_interlock);
685 1.1 rmind }
686 1.1 rmind
687 1.30 hannken KASSERT(mutex_owned(vp->v_interlock));
688 1.30 hannken KASSERT(! (curlwp == vrele_lwp && defer));
689 1.30 hannken
690 1.1 rmind if (defer) {
691 1.1 rmind /*
692 1.1 rmind * Defer reclaim to the kthread; it's not safe to
693 1.1 rmind * clean it here. We donate it our last reference.
694 1.1 rmind */
695 1.1 rmind mutex_enter(&vrele_lock);
696 1.1 rmind TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
697 1.1 rmind if (++vrele_pending > (desiredvnodes >> 8))
698 1.53 msaitoh cv_signal(&vrele_cv);
699 1.1 rmind mutex_exit(&vrele_lock);
700 1.9 rmind mutex_exit(vp->v_interlock);
701 1.1 rmind return;
702 1.1 rmind }
703 1.1 rmind
704 1.32 hannken /*
705 1.32 hannken * If the node got another reference while we
706 1.32 hannken * released the interlock, don't try to inactivate it yet.
707 1.32 hannken */
708 1.32 hannken if (__predict_false(vtryrele(vp))) {
709 1.32 hannken VOP_UNLOCK(vp);
710 1.32 hannken mutex_exit(vp->v_interlock);
711 1.32 hannken return;
712 1.32 hannken }
713 1.57 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
714 1.29 christos mutex_exit(vp->v_interlock);
715 1.29 christos
716 1.1 rmind /*
717 1.52 hannken * The vnode must not gain another reference while being
718 1.1 rmind * deactivated. If VOP_INACTIVE() indicates that
719 1.1 rmind * the described file has been deleted, then recycle
720 1.52 hannken * the vnode.
721 1.1 rmind *
722 1.1 rmind * Note that VOP_INACTIVE() will drop the vnode lock.
723 1.1 rmind */
724 1.1 rmind VOP_INACTIVE(vp, &recycle);
725 1.46 hannken if (recycle) {
726 1.54 hannken /* vcache_reclaim() below will drop the lock. */
727 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
728 1.46 hannken recycle = false;
729 1.46 hannken }
730 1.9 rmind mutex_enter(vp->v_interlock);
731 1.57 hannken VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
732 1.1 rmind if (!recycle) {
733 1.1 rmind if (vtryrele(vp)) {
734 1.9 rmind mutex_exit(vp->v_interlock);
735 1.1 rmind return;
736 1.1 rmind }
737 1.1 rmind }
738 1.1 rmind
739 1.1 rmind /* Take care of space accounting. */
740 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
741 1.1 rmind atomic_add_int(&uvmexp.execpages,
742 1.1 rmind -vp->v_uobj.uo_npages);
743 1.1 rmind atomic_add_int(&uvmexp.filepages,
744 1.1 rmind vp->v_uobj.uo_npages);
745 1.1 rmind }
746 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
747 1.1 rmind vp->v_vflag &= ~VV_MAPPED;
748 1.1 rmind
749 1.1 rmind /*
750 1.1 rmind * Recycle the vnode if the file is now unused (unlinked),
751 1.1 rmind * otherwise just free it.
752 1.1 rmind */
753 1.1 rmind if (recycle) {
754 1.57 hannken VSTATE_ASSERT(vp, VS_ACTIVE);
755 1.54 hannken vcache_reclaim(vp);
756 1.1 rmind }
757 1.1 rmind KASSERT(vp->v_usecount > 0);
758 1.1 rmind }
759 1.1 rmind
760 1.1 rmind if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
761 1.1 rmind /* Gained another reference while being reclaimed. */
762 1.9 rmind mutex_exit(vp->v_interlock);
763 1.1 rmind return;
764 1.1 rmind }
765 1.1 rmind
766 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMED) {
767 1.1 rmind /*
768 1.1 rmind * It's clean so destroy it. It isn't referenced
769 1.1 rmind * anywhere since it has been reclaimed.
770 1.1 rmind */
771 1.1 rmind KASSERT(vp->v_holdcnt == 0);
772 1.1 rmind KASSERT(vp->v_writecount == 0);
773 1.9 rmind mutex_exit(vp->v_interlock);
774 1.1 rmind vfs_insmntque(vp, NULL);
775 1.1 rmind if (vp->v_type == VBLK || vp->v_type == VCHR) {
776 1.1 rmind spec_node_destroy(vp);
777 1.1 rmind }
778 1.57 hannken vcache_free(VNODE_TO_VIMPL(vp));
779 1.1 rmind } else {
780 1.1 rmind /*
781 1.1 rmind * Otherwise, put it back onto the freelist. It
782 1.1 rmind * can't be destroyed while still associated with
783 1.1 rmind * a file system.
784 1.1 rmind */
785 1.1 rmind mutex_enter(&vnode_free_list_lock);
786 1.1 rmind if (vp->v_holdcnt > 0) {
787 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
788 1.1 rmind } else {
789 1.1 rmind vp->v_freelisthd = &vnode_free_list;
790 1.1 rmind }
791 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
792 1.1 rmind mutex_exit(&vnode_free_list_lock);
793 1.9 rmind mutex_exit(vp->v_interlock);
794 1.1 rmind }
795 1.1 rmind }
796 1.1 rmind
797 1.1 rmind void
798 1.1 rmind vrele(vnode_t *vp)
799 1.1 rmind {
800 1.1 rmind
801 1.29 christos if (vtryrele(vp)) {
802 1.1 rmind return;
803 1.1 rmind }
804 1.9 rmind mutex_enter(vp->v_interlock);
805 1.1 rmind vrelel(vp, 0);
806 1.1 rmind }
807 1.1 rmind
808 1.1 rmind /*
809 1.1 rmind * Asynchronous vnode release, vnode is released in different context.
810 1.1 rmind */
811 1.1 rmind void
812 1.1 rmind vrele_async(vnode_t *vp)
813 1.1 rmind {
814 1.1 rmind
815 1.29 christos if (vtryrele(vp)) {
816 1.1 rmind return;
817 1.1 rmind }
818 1.9 rmind mutex_enter(vp->v_interlock);
819 1.1 rmind vrelel(vp, VRELEL_ASYNC_RELE);
820 1.1 rmind }
821 1.1 rmind
822 1.1 rmind static void
823 1.1 rmind vrele_thread(void *cookie)
824 1.1 rmind {
825 1.34 hannken vnodelst_t skip_list;
826 1.1 rmind vnode_t *vp;
827 1.34 hannken struct mount *mp;
828 1.34 hannken
829 1.34 hannken TAILQ_INIT(&skip_list);
830 1.1 rmind
831 1.34 hannken mutex_enter(&vrele_lock);
832 1.1 rmind for (;;) {
833 1.1 rmind while (TAILQ_EMPTY(&vrele_list)) {
834 1.1 rmind vrele_gen++;
835 1.1 rmind cv_broadcast(&vrele_cv);
836 1.1 rmind cv_timedwait(&vrele_cv, &vrele_lock, hz);
837 1.34 hannken TAILQ_CONCAT(&vrele_list, &skip_list, v_freelist);
838 1.1 rmind }
839 1.1 rmind vp = TAILQ_FIRST(&vrele_list);
840 1.34 hannken mp = vp->v_mount;
841 1.1 rmind TAILQ_REMOVE(&vrele_list, vp, v_freelist);
842 1.34 hannken if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
843 1.34 hannken TAILQ_INSERT_TAIL(&skip_list, vp, v_freelist);
844 1.34 hannken continue;
845 1.34 hannken }
846 1.1 rmind vrele_pending--;
847 1.1 rmind mutex_exit(&vrele_lock);
848 1.1 rmind
849 1.1 rmind /*
850 1.1 rmind * If not the last reference, then ignore the vnode
851 1.1 rmind * and look for more work.
852 1.1 rmind */
853 1.9 rmind mutex_enter(vp->v_interlock);
854 1.1 rmind vrelel(vp, 0);
855 1.34 hannken fstrans_done(mp);
856 1.34 hannken mutex_enter(&vrele_lock);
857 1.1 rmind }
858 1.1 rmind }
859 1.1 rmind
860 1.2 rmind void
861 1.2 rmind vrele_flush(void)
862 1.2 rmind {
863 1.2 rmind int gen;
864 1.2 rmind
865 1.2 rmind mutex_enter(&vrele_lock);
866 1.2 rmind gen = vrele_gen;
867 1.2 rmind while (vrele_pending && gen == vrele_gen) {
868 1.2 rmind cv_broadcast(&vrele_cv);
869 1.2 rmind cv_wait(&vrele_cv, &vrele_lock);
870 1.2 rmind }
871 1.2 rmind mutex_exit(&vrele_lock);
872 1.2 rmind }
873 1.2 rmind
874 1.1 rmind /*
875 1.1 rmind * Vnode reference, where a reference is already held by some other
876 1.1 rmind * object (for example, a file structure).
877 1.1 rmind */
878 1.1 rmind void
879 1.1 rmind vref(vnode_t *vp)
880 1.1 rmind {
881 1.1 rmind
882 1.1 rmind KASSERT(vp->v_usecount != 0);
883 1.1 rmind
884 1.1 rmind atomic_inc_uint(&vp->v_usecount);
885 1.1 rmind }
886 1.1 rmind
887 1.1 rmind /*
888 1.1 rmind * Page or buffer structure gets a reference.
889 1.1 rmind * Called with v_interlock held.
890 1.1 rmind */
891 1.1 rmind void
892 1.1 rmind vholdl(vnode_t *vp)
893 1.1 rmind {
894 1.1 rmind
895 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
896 1.1 rmind
897 1.1 rmind if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
898 1.1 rmind mutex_enter(&vnode_free_list_lock);
899 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
900 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
901 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
902 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
903 1.1 rmind mutex_exit(&vnode_free_list_lock);
904 1.1 rmind }
905 1.1 rmind }
906 1.1 rmind
907 1.1 rmind /*
908 1.1 rmind * Page or buffer structure frees a reference.
909 1.1 rmind * Called with v_interlock held.
910 1.1 rmind */
911 1.1 rmind void
912 1.1 rmind holdrelel(vnode_t *vp)
913 1.1 rmind {
914 1.1 rmind
915 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
916 1.1 rmind
917 1.1 rmind if (vp->v_holdcnt <= 0) {
918 1.11 christos vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
919 1.1 rmind }
920 1.1 rmind
921 1.1 rmind vp->v_holdcnt--;
922 1.1 rmind if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
923 1.1 rmind mutex_enter(&vnode_free_list_lock);
924 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
925 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
926 1.1 rmind vp->v_freelisthd = &vnode_free_list;
927 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
928 1.1 rmind mutex_exit(&vnode_free_list_lock);
929 1.1 rmind }
930 1.1 rmind }
931 1.1 rmind
932 1.1 rmind /*
933 1.33 hannken * Recycle an unused vnode if caller holds the last reference.
934 1.1 rmind */
935 1.33 hannken bool
936 1.33 hannken vrecycle(vnode_t *vp)
937 1.1 rmind {
938 1.60 hannken int error __diagused;
939 1.46 hannken
940 1.33 hannken mutex_enter(vp->v_interlock);
941 1.33 hannken
942 1.60 hannken /* Make sure we hold the last reference. */
943 1.60 hannken VSTATE_WAIT_STABLE(vp);
944 1.33 hannken if (vp->v_usecount != 1) {
945 1.33 hannken mutex_exit(vp->v_interlock);
946 1.33 hannken return false;
947 1.1 rmind }
948 1.60 hannken
949 1.60 hannken /* If the vnode is already clean we're done. */
950 1.60 hannken if (VSTATE_GET(vp) != VS_ACTIVE) {
951 1.60 hannken VSTATE_ASSERT(vp, VS_RECLAIMED);
952 1.60 hannken vrelel(vp, 0);
953 1.60 hannken return true;
954 1.60 hannken }
955 1.60 hannken
956 1.60 hannken /* Prevent further references until the vnode is locked. */
957 1.60 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
958 1.60 hannken mutex_exit(vp->v_interlock);
959 1.60 hannken
960 1.60 hannken error = vn_lock(vp, LK_EXCLUSIVE);
961 1.60 hannken KASSERT(error == 0);
962 1.60 hannken
963 1.60 hannken mutex_enter(vp->v_interlock);
964 1.60 hannken VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
965 1.60 hannken
966 1.54 hannken vcache_reclaim(vp);
967 1.52 hannken vrelel(vp, 0);
968 1.60 hannken
969 1.33 hannken return true;
970 1.1 rmind }
971 1.1 rmind
972 1.1 rmind /*
973 1.1 rmind * Eliminate all activity associated with the requested vnode
974 1.1 rmind * and with all vnodes aliased to the requested vnode.
975 1.1 rmind */
976 1.1 rmind void
977 1.1 rmind vrevoke(vnode_t *vp)
978 1.1 rmind {
979 1.19 hannken vnode_t *vq;
980 1.1 rmind enum vtype type;
981 1.1 rmind dev_t dev;
982 1.1 rmind
983 1.1 rmind KASSERT(vp->v_usecount > 0);
984 1.1 rmind
985 1.9 rmind mutex_enter(vp->v_interlock);
986 1.52 hannken VSTATE_WAIT_STABLE(vp);
987 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMED) {
988 1.9 rmind mutex_exit(vp->v_interlock);
989 1.1 rmind return;
990 1.1 rmind } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
991 1.1 rmind atomic_inc_uint(&vp->v_usecount);
992 1.29 christos mutex_exit(vp->v_interlock);
993 1.29 christos vgone(vp);
994 1.1 rmind return;
995 1.1 rmind } else {
996 1.1 rmind dev = vp->v_rdev;
997 1.1 rmind type = vp->v_type;
998 1.9 rmind mutex_exit(vp->v_interlock);
999 1.1 rmind }
1000 1.1 rmind
1001 1.19 hannken while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1002 1.29 christos vgone(vq);
1003 1.1 rmind }
1004 1.1 rmind }
1005 1.1 rmind
1006 1.1 rmind /*
1007 1.1 rmind * Eliminate all activity associated with a vnode in preparation for
1008 1.1 rmind * reuse. Drops a reference from the vnode.
1009 1.1 rmind */
1010 1.1 rmind void
1011 1.1 rmind vgone(vnode_t *vp)
1012 1.1 rmind {
1013 1.1 rmind
1014 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
1015 1.57 hannken VSTATE_ASSERT(vp, VS_RECLAIMED);
1016 1.46 hannken vrele(vp);
1017 1.46 hannken }
1018 1.46 hannken
1019 1.9 rmind mutex_enter(vp->v_interlock);
1020 1.54 hannken vcache_reclaim(vp);
1021 1.52 hannken vrelel(vp, 0);
1022 1.1 rmind }
1023 1.1 rmind
1024 1.36 hannken static inline uint32_t
1025 1.36 hannken vcache_hash(const struct vcache_key *key)
1026 1.36 hannken {
1027 1.36 hannken uint32_t hash = HASH32_BUF_INIT;
1028 1.36 hannken
1029 1.36 hannken hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1030 1.36 hannken hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1031 1.36 hannken return hash;
1032 1.36 hannken }
1033 1.36 hannken
1034 1.36 hannken static void
1035 1.36 hannken vcache_init(void)
1036 1.36 hannken {
1037 1.36 hannken
1038 1.57 hannken vcache.pool = pool_cache_init(sizeof(vnode_impl_t), 0, 0, 0,
1039 1.36 hannken "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1040 1.36 hannken KASSERT(vcache.pool != NULL);
1041 1.36 hannken mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1042 1.51 hannken cv_init(&vcache.cv, "vcache");
1043 1.36 hannken vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1044 1.36 hannken &vcache.hashmask);
1045 1.36 hannken }
1046 1.36 hannken
1047 1.36 hannken static void
1048 1.36 hannken vcache_reinit(void)
1049 1.36 hannken {
1050 1.36 hannken int i;
1051 1.36 hannken uint32_t hash;
1052 1.36 hannken u_long oldmask, newmask;
1053 1.36 hannken struct hashhead *oldtab, *newtab;
1054 1.57 hannken vnode_impl_t *node;
1055 1.36 hannken
1056 1.36 hannken newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1057 1.36 hannken mutex_enter(&vcache.lock);
1058 1.36 hannken oldtab = vcache.hashtab;
1059 1.36 hannken oldmask = vcache.hashmask;
1060 1.36 hannken vcache.hashtab = newtab;
1061 1.36 hannken vcache.hashmask = newmask;
1062 1.36 hannken for (i = 0; i <= oldmask; i++) {
1063 1.36 hannken while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1064 1.57 hannken SLIST_REMOVE(&oldtab[i], node, vnode_impl, vi_hash);
1065 1.57 hannken hash = vcache_hash(&node->vi_key);
1066 1.36 hannken SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1067 1.57 hannken node, vi_hash);
1068 1.36 hannken }
1069 1.36 hannken }
1070 1.36 hannken mutex_exit(&vcache.lock);
1071 1.36 hannken hashdone(oldtab, HASH_SLIST, oldmask);
1072 1.36 hannken }
1073 1.36 hannken
1074 1.57 hannken static inline vnode_impl_t *
1075 1.36 hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1076 1.36 hannken {
1077 1.36 hannken struct hashhead *hashp;
1078 1.57 hannken vnode_impl_t *node;
1079 1.36 hannken
1080 1.36 hannken KASSERT(mutex_owned(&vcache.lock));
1081 1.36 hannken
1082 1.36 hannken hashp = &vcache.hashtab[hash & vcache.hashmask];
1083 1.57 hannken SLIST_FOREACH(node, hashp, vi_hash) {
1084 1.57 hannken if (key->vk_mount != node->vi_key.vk_mount)
1085 1.36 hannken continue;
1086 1.57 hannken if (key->vk_key_len != node->vi_key.vk_key_len)
1087 1.36 hannken continue;
1088 1.57 hannken if (memcmp(key->vk_key, node->vi_key.vk_key, key->vk_key_len))
1089 1.36 hannken continue;
1090 1.36 hannken return node;
1091 1.36 hannken }
1092 1.36 hannken return NULL;
1093 1.36 hannken }
1094 1.36 hannken
1095 1.36 hannken /*
1096 1.50 hannken * Allocate a new, uninitialized vcache node.
1097 1.50 hannken */
1098 1.57 hannken static vnode_impl_t *
1099 1.50 hannken vcache_alloc(void)
1100 1.50 hannken {
1101 1.57 hannken vnode_impl_t *node;
1102 1.50 hannken vnode_t *vp;
1103 1.50 hannken
1104 1.50 hannken node = pool_cache_get(vcache.pool, PR_WAITOK);
1105 1.50 hannken memset(node, 0, sizeof(*node));
1106 1.50 hannken
1107 1.57 hannken /* SLIST_INIT(&node->vi_hash); */
1108 1.50 hannken
1109 1.57 hannken vp = VIMPL_TO_VNODE(node);
1110 1.50 hannken uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1111 1.50 hannken cv_init(&vp->v_cv, "vnode");
1112 1.50 hannken /* LIST_INIT(&vp->v_nclist); */
1113 1.50 hannken /* LIST_INIT(&vp->v_dnclist); */
1114 1.50 hannken
1115 1.50 hannken mutex_enter(&vnode_free_list_lock);
1116 1.50 hannken numvnodes++;
1117 1.50 hannken if (numvnodes > desiredvnodes + desiredvnodes / 10)
1118 1.50 hannken cv_signal(&vdrain_cv);
1119 1.50 hannken mutex_exit(&vnode_free_list_lock);
1120 1.50 hannken
1121 1.50 hannken rw_init(&vp->v_lock);
1122 1.50 hannken vp->v_usecount = 1;
1123 1.50 hannken vp->v_type = VNON;
1124 1.50 hannken vp->v_size = vp->v_writesize = VSIZENOTSET;
1125 1.50 hannken
1126 1.57 hannken node->vi_state = VS_LOADING;
1127 1.51 hannken
1128 1.50 hannken return node;
1129 1.50 hannken }
1130 1.50 hannken
1131 1.50 hannken /*
1132 1.50 hannken * Free an unused, unreferenced vcache node.
1133 1.50 hannken */
1134 1.50 hannken static void
1135 1.57 hannken vcache_free(vnode_impl_t *node)
1136 1.50 hannken {
1137 1.50 hannken vnode_t *vp;
1138 1.50 hannken
1139 1.57 hannken vp = VIMPL_TO_VNODE(node);
1140 1.50 hannken
1141 1.50 hannken KASSERT(vp->v_usecount == 0);
1142 1.50 hannken
1143 1.50 hannken rw_destroy(&vp->v_lock);
1144 1.50 hannken mutex_enter(&vnode_free_list_lock);
1145 1.50 hannken numvnodes--;
1146 1.50 hannken mutex_exit(&vnode_free_list_lock);
1147 1.50 hannken
1148 1.50 hannken uvm_obj_destroy(&vp->v_uobj, true);
1149 1.50 hannken cv_destroy(&vp->v_cv);
1150 1.50 hannken pool_cache_put(vcache.pool, node);
1151 1.50 hannken }
1152 1.50 hannken
1153 1.50 hannken /*
1154 1.36 hannken * Get a vnode / fs node pair by key and return it referenced through vpp.
1155 1.36 hannken */
1156 1.36 hannken int
1157 1.36 hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
1158 1.36 hannken struct vnode **vpp)
1159 1.36 hannken {
1160 1.36 hannken int error;
1161 1.36 hannken uint32_t hash;
1162 1.36 hannken const void *new_key;
1163 1.36 hannken struct vnode *vp;
1164 1.36 hannken struct vcache_key vcache_key;
1165 1.57 hannken vnode_impl_t *node, *new_node;
1166 1.36 hannken
1167 1.36 hannken new_key = NULL;
1168 1.36 hannken *vpp = NULL;
1169 1.36 hannken
1170 1.36 hannken vcache_key.vk_mount = mp;
1171 1.36 hannken vcache_key.vk_key = key;
1172 1.36 hannken vcache_key.vk_key_len = key_len;
1173 1.36 hannken hash = vcache_hash(&vcache_key);
1174 1.36 hannken
1175 1.36 hannken again:
1176 1.36 hannken mutex_enter(&vcache.lock);
1177 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1178 1.36 hannken
1179 1.36 hannken /* If found, take a reference or retry. */
1180 1.52 hannken if (__predict_true(node != NULL)) {
1181 1.52 hannken /*
1182 1.52 hannken * If the vnode is loading we cannot take the v_interlock
1183 1.52 hannken * here as it might change during load (see uvm_obj_setlock()).
1184 1.57 hannken * As changing state from VS_LOADING requires both vcache.lock
1185 1.52 hannken * and v_interlock it is safe to test with vcache.lock held.
1186 1.52 hannken *
1187 1.57 hannken * Wait for vnodes changing state from VS_LOADING and retry.
1188 1.52 hannken */
1189 1.57 hannken if (__predict_false(node->vi_state == VS_LOADING)) {
1190 1.52 hannken cv_wait(&vcache.cv, &vcache.lock);
1191 1.52 hannken mutex_exit(&vcache.lock);
1192 1.52 hannken goto again;
1193 1.52 hannken }
1194 1.57 hannken vp = VIMPL_TO_VNODE(node);
1195 1.36 hannken mutex_enter(vp->v_interlock);
1196 1.36 hannken mutex_exit(&vcache.lock);
1197 1.41 riastrad error = vget(vp, 0, true /* wait */);
1198 1.36 hannken if (error == ENOENT)
1199 1.36 hannken goto again;
1200 1.36 hannken if (error == 0)
1201 1.36 hannken *vpp = vp;
1202 1.36 hannken KASSERT((error != 0) == (*vpp == NULL));
1203 1.36 hannken return error;
1204 1.36 hannken }
1205 1.36 hannken mutex_exit(&vcache.lock);
1206 1.36 hannken
1207 1.36 hannken /* Allocate and initialize a new vcache / vnode pair. */
1208 1.36 hannken error = vfs_busy(mp, NULL);
1209 1.36 hannken if (error)
1210 1.36 hannken return error;
1211 1.50 hannken new_node = vcache_alloc();
1212 1.57 hannken new_node->vi_key = vcache_key;
1213 1.57 hannken vp = VIMPL_TO_VNODE(new_node);
1214 1.36 hannken mutex_enter(&vcache.lock);
1215 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1216 1.36 hannken if (node == NULL) {
1217 1.36 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1218 1.57 hannken new_node, vi_hash);
1219 1.36 hannken node = new_node;
1220 1.36 hannken }
1221 1.36 hannken
1222 1.36 hannken /* If another thread beat us inserting this node, retry. */
1223 1.36 hannken if (node != new_node) {
1224 1.52 hannken mutex_enter(vp->v_interlock);
1225 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1226 1.52 hannken mutex_exit(&vcache.lock);
1227 1.52 hannken vrelel(vp, 0);
1228 1.36 hannken vfs_unbusy(mp, false, NULL);
1229 1.36 hannken goto again;
1230 1.36 hannken }
1231 1.52 hannken mutex_exit(&vcache.lock);
1232 1.36 hannken
1233 1.57 hannken /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1234 1.36 hannken error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1235 1.36 hannken if (error) {
1236 1.36 hannken mutex_enter(&vcache.lock);
1237 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1238 1.57 hannken new_node, vnode_impl, vi_hash);
1239 1.52 hannken mutex_enter(vp->v_interlock);
1240 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1241 1.36 hannken mutex_exit(&vcache.lock);
1242 1.52 hannken vrelel(vp, 0);
1243 1.36 hannken vfs_unbusy(mp, false, NULL);
1244 1.36 hannken KASSERT(*vpp == NULL);
1245 1.36 hannken return error;
1246 1.36 hannken }
1247 1.36 hannken KASSERT(new_key != NULL);
1248 1.36 hannken KASSERT(memcmp(key, new_key, key_len) == 0);
1249 1.36 hannken KASSERT(vp->v_op != NULL);
1250 1.36 hannken vfs_insmntque(vp, mp);
1251 1.36 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1252 1.36 hannken vp->v_vflag |= VV_MPSAFE;
1253 1.36 hannken vfs_unbusy(mp, true, NULL);
1254 1.36 hannken
1255 1.36 hannken /* Finished loading, finalize node. */
1256 1.36 hannken mutex_enter(&vcache.lock);
1257 1.57 hannken new_node->vi_key.vk_key = new_key;
1258 1.39 hannken mutex_enter(vp->v_interlock);
1259 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1260 1.39 hannken mutex_exit(vp->v_interlock);
1261 1.52 hannken mutex_exit(&vcache.lock);
1262 1.36 hannken *vpp = vp;
1263 1.36 hannken return 0;
1264 1.36 hannken }
1265 1.36 hannken
1266 1.36 hannken /*
1267 1.40 hannken * Create a new vnode / fs node pair and return it referenced through vpp.
1268 1.40 hannken */
1269 1.40 hannken int
1270 1.40 hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1271 1.40 hannken kauth_cred_t cred, struct vnode **vpp)
1272 1.40 hannken {
1273 1.40 hannken int error;
1274 1.40 hannken uint32_t hash;
1275 1.52 hannken struct vnode *ovp, *vp;
1276 1.57 hannken vnode_impl_t *new_node;
1277 1.57 hannken vnode_impl_t *old_node __diagused;
1278 1.40 hannken
1279 1.40 hannken *vpp = NULL;
1280 1.40 hannken
1281 1.40 hannken /* Allocate and initialize a new vcache / vnode pair. */
1282 1.40 hannken error = vfs_busy(mp, NULL);
1283 1.40 hannken if (error)
1284 1.40 hannken return error;
1285 1.50 hannken new_node = vcache_alloc();
1286 1.57 hannken new_node->vi_key.vk_mount = mp;
1287 1.57 hannken vp = VIMPL_TO_VNODE(new_node);
1288 1.40 hannken
1289 1.40 hannken /* Create and load the fs node. */
1290 1.40 hannken error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1291 1.57 hannken &new_node->vi_key.vk_key_len, &new_node->vi_key.vk_key);
1292 1.40 hannken if (error) {
1293 1.52 hannken mutex_enter(&vcache.lock);
1294 1.52 hannken mutex_enter(vp->v_interlock);
1295 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1296 1.52 hannken mutex_exit(&vcache.lock);
1297 1.52 hannken vrelel(vp, 0);
1298 1.40 hannken vfs_unbusy(mp, false, NULL);
1299 1.40 hannken KASSERT(*vpp == NULL);
1300 1.40 hannken return error;
1301 1.40 hannken }
1302 1.57 hannken KASSERT(new_node->vi_key.vk_key != NULL);
1303 1.40 hannken KASSERT(vp->v_op != NULL);
1304 1.57 hannken hash = vcache_hash(&new_node->vi_key);
1305 1.40 hannken
1306 1.40 hannken /* Wait for previous instance to be reclaimed, then insert new node. */
1307 1.40 hannken mutex_enter(&vcache.lock);
1308 1.57 hannken while ((old_node = vcache_hash_lookup(&new_node->vi_key, hash))) {
1309 1.57 hannken ovp = VIMPL_TO_VNODE(old_node);
1310 1.52 hannken mutex_enter(ovp->v_interlock);
1311 1.40 hannken mutex_exit(&vcache.lock);
1312 1.52 hannken error = vget(ovp, 0, true /* wait */);
1313 1.52 hannken KASSERT(error == ENOENT);
1314 1.40 hannken mutex_enter(&vcache.lock);
1315 1.40 hannken }
1316 1.40 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1317 1.57 hannken new_node, vi_hash);
1318 1.40 hannken mutex_exit(&vcache.lock);
1319 1.40 hannken vfs_insmntque(vp, mp);
1320 1.40 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1321 1.40 hannken vp->v_vflag |= VV_MPSAFE;
1322 1.40 hannken vfs_unbusy(mp, true, NULL);
1323 1.40 hannken
1324 1.40 hannken /* Finished loading, finalize node. */
1325 1.40 hannken mutex_enter(&vcache.lock);
1326 1.52 hannken mutex_enter(vp->v_interlock);
1327 1.57 hannken VSTATE_CHANGE(vp, VS_LOADING, VS_ACTIVE);
1328 1.40 hannken mutex_exit(&vcache.lock);
1329 1.40 hannken mutex_exit(vp->v_interlock);
1330 1.40 hannken *vpp = vp;
1331 1.40 hannken return 0;
1332 1.40 hannken }
1333 1.40 hannken
1334 1.40 hannken /*
1335 1.37 hannken * Prepare key change: lock old and new cache node.
1336 1.37 hannken * Return an error if the new node already exists.
1337 1.37 hannken */
1338 1.37 hannken int
1339 1.37 hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1340 1.37 hannken const void *old_key, size_t old_key_len,
1341 1.37 hannken const void *new_key, size_t new_key_len)
1342 1.37 hannken {
1343 1.37 hannken uint32_t old_hash, new_hash;
1344 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1345 1.57 hannken vnode_impl_t *node, *new_node;
1346 1.52 hannken struct vnode *tvp;
1347 1.37 hannken
1348 1.37 hannken old_vcache_key.vk_mount = mp;
1349 1.37 hannken old_vcache_key.vk_key = old_key;
1350 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1351 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1352 1.37 hannken
1353 1.37 hannken new_vcache_key.vk_mount = mp;
1354 1.37 hannken new_vcache_key.vk_key = new_key;
1355 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1356 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1357 1.37 hannken
1358 1.50 hannken new_node = vcache_alloc();
1359 1.57 hannken new_node->vi_key = new_vcache_key;
1360 1.57 hannken tvp = VIMPL_TO_VNODE(new_node);
1361 1.37 hannken
1362 1.52 hannken /* Insert locked new node used as placeholder. */
1363 1.37 hannken mutex_enter(&vcache.lock);
1364 1.37 hannken node = vcache_hash_lookup(&new_vcache_key, new_hash);
1365 1.37 hannken if (node != NULL) {
1366 1.52 hannken mutex_enter(tvp->v_interlock);
1367 1.57 hannken VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1368 1.37 hannken mutex_exit(&vcache.lock);
1369 1.52 hannken vrelel(tvp, 0);
1370 1.37 hannken return EEXIST;
1371 1.37 hannken }
1372 1.37 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1373 1.57 hannken new_node, vi_hash);
1374 1.49 hannken
1375 1.49 hannken /* Lock old node. */
1376 1.37 hannken node = vcache_hash_lookup(&old_vcache_key, old_hash);
1377 1.37 hannken KASSERT(node != NULL);
1378 1.57 hannken KASSERT(VIMPL_TO_VNODE(node) == vp);
1379 1.52 hannken mutex_enter(vp->v_interlock);
1380 1.57 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_BLOCKED);
1381 1.57 hannken node->vi_key = old_vcache_key;
1382 1.52 hannken mutex_exit(vp->v_interlock);
1383 1.37 hannken mutex_exit(&vcache.lock);
1384 1.37 hannken return 0;
1385 1.37 hannken }
1386 1.37 hannken
1387 1.37 hannken /*
1388 1.37 hannken * Key change complete: remove old node and unlock new node.
1389 1.37 hannken */
1390 1.37 hannken void
1391 1.37 hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1392 1.37 hannken const void *old_key, size_t old_key_len,
1393 1.37 hannken const void *new_key, size_t new_key_len)
1394 1.37 hannken {
1395 1.37 hannken uint32_t old_hash, new_hash;
1396 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1397 1.57 hannken vnode_impl_t *old_node, *new_node;
1398 1.52 hannken struct vnode *tvp;
1399 1.37 hannken
1400 1.37 hannken old_vcache_key.vk_mount = mp;
1401 1.37 hannken old_vcache_key.vk_key = old_key;
1402 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1403 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1404 1.37 hannken
1405 1.37 hannken new_vcache_key.vk_mount = mp;
1406 1.37 hannken new_vcache_key.vk_key = new_key;
1407 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1408 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1409 1.37 hannken
1410 1.37 hannken mutex_enter(&vcache.lock);
1411 1.49 hannken
1412 1.49 hannken /* Lookup old and new node. */
1413 1.49 hannken old_node = vcache_hash_lookup(&old_vcache_key, old_hash);
1414 1.49 hannken KASSERT(old_node != NULL);
1415 1.57 hannken KASSERT(VIMPL_TO_VNODE(old_node) == vp);
1416 1.52 hannken mutex_enter(vp->v_interlock);
1417 1.57 hannken VSTATE_ASSERT(vp, VS_BLOCKED);
1418 1.52 hannken
1419 1.49 hannken new_node = vcache_hash_lookup(&new_vcache_key, new_hash);
1420 1.52 hannken KASSERT(new_node != NULL);
1421 1.57 hannken KASSERT(new_node->vi_key.vk_key_len == new_key_len);
1422 1.57 hannken tvp = VIMPL_TO_VNODE(new_node);
1423 1.52 hannken mutex_enter(tvp->v_interlock);
1424 1.57 hannken VSTATE_ASSERT(VIMPL_TO_VNODE(new_node), VS_LOADING);
1425 1.49 hannken
1426 1.49 hannken /* Rekey old node and put it onto its new hashlist. */
1427 1.57 hannken old_node->vi_key = new_vcache_key;
1428 1.49 hannken if (old_hash != new_hash) {
1429 1.49 hannken SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1430 1.57 hannken old_node, vnode_impl, vi_hash);
1431 1.49 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1432 1.57 hannken old_node, vi_hash);
1433 1.49 hannken }
1434 1.57 hannken VSTATE_CHANGE(vp, VS_BLOCKED, VS_ACTIVE);
1435 1.52 hannken mutex_exit(vp->v_interlock);
1436 1.49 hannken
1437 1.49 hannken /* Remove new node used as placeholder. */
1438 1.49 hannken SLIST_REMOVE(&vcache.hashtab[new_hash & vcache.hashmask],
1439 1.57 hannken new_node, vnode_impl, vi_hash);
1440 1.57 hannken VSTATE_CHANGE(tvp, VS_LOADING, VS_RECLAIMED);
1441 1.37 hannken mutex_exit(&vcache.lock);
1442 1.52 hannken vrelel(tvp, 0);
1443 1.37 hannken }
1444 1.37 hannken
1445 1.37 hannken /*
1446 1.54 hannken * Disassociate the underlying file system from a vnode.
1447 1.54 hannken *
1448 1.54 hannken * Must be called with vnode locked and will return unlocked.
1449 1.54 hannken * Must be called with the interlock held, and will return with it held.
1450 1.54 hannken */
1451 1.54 hannken static void
1452 1.54 hannken vcache_reclaim(vnode_t *vp)
1453 1.54 hannken {
1454 1.54 hannken lwp_t *l = curlwp;
1455 1.57 hannken vnode_impl_t *node = VNODE_TO_VIMPL(vp);
1456 1.55 hannken uint32_t hash;
1457 1.55 hannken uint8_t temp_buf[64], *temp_key;
1458 1.55 hannken size_t temp_key_len;
1459 1.54 hannken bool recycle, active;
1460 1.54 hannken int error;
1461 1.54 hannken
1462 1.54 hannken KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1463 1.54 hannken VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1464 1.54 hannken KASSERT(mutex_owned(vp->v_interlock));
1465 1.54 hannken KASSERT(vp->v_usecount != 0);
1466 1.54 hannken
1467 1.54 hannken active = (vp->v_usecount > 1);
1468 1.57 hannken temp_key_len = node->vi_key.vk_key_len;
1469 1.54 hannken /*
1470 1.54 hannken * Prevent the vnode from being recycled or brought into use
1471 1.54 hannken * while we clean it out.
1472 1.54 hannken */
1473 1.57 hannken VSTATE_CHANGE(vp, VS_ACTIVE, VS_RECLAIMING);
1474 1.54 hannken if (vp->v_iflag & VI_EXECMAP) {
1475 1.54 hannken atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
1476 1.54 hannken atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
1477 1.54 hannken }
1478 1.54 hannken vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1479 1.54 hannken mutex_exit(vp->v_interlock);
1480 1.54 hannken
1481 1.55 hannken /* Replace the vnode key with a temporary copy. */
1482 1.57 hannken if (node->vi_key.vk_key_len > sizeof(temp_buf)) {
1483 1.55 hannken temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1484 1.55 hannken } else {
1485 1.55 hannken temp_key = temp_buf;
1486 1.55 hannken }
1487 1.55 hannken mutex_enter(&vcache.lock);
1488 1.57 hannken memcpy(temp_key, node->vi_key.vk_key, temp_key_len);
1489 1.57 hannken node->vi_key.vk_key = temp_key;
1490 1.55 hannken mutex_exit(&vcache.lock);
1491 1.55 hannken
1492 1.54 hannken /*
1493 1.54 hannken * Clean out any cached data associated with the vnode.
1494 1.54 hannken * If purging an active vnode, it must be closed and
1495 1.60 hannken * deactivated before being reclaimed.
1496 1.54 hannken */
1497 1.54 hannken error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1498 1.54 hannken if (error != 0) {
1499 1.54 hannken if (wapbl_vphaswapbl(vp))
1500 1.54 hannken WAPBL_DISCARD(wapbl_vptomp(vp));
1501 1.54 hannken error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1502 1.54 hannken }
1503 1.54 hannken KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1504 1.54 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1505 1.54 hannken if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1506 1.54 hannken spec_node_revoke(vp);
1507 1.54 hannken }
1508 1.54 hannken
1509 1.60 hannken /*
1510 1.60 hannken * Disassociate the underlying file system from the vnode.
1511 1.60 hannken * Note that the VOP_INACTIVE will unlock the vnode.
1512 1.60 hannken */
1513 1.60 hannken VOP_INACTIVE(vp, &recycle);
1514 1.54 hannken if (VOP_RECLAIM(vp)) {
1515 1.54 hannken vnpanic(vp, "%s: cannot reclaim", __func__);
1516 1.54 hannken }
1517 1.54 hannken
1518 1.54 hannken KASSERT(vp->v_data == NULL);
1519 1.54 hannken KASSERT(vp->v_uobj.uo_npages == 0);
1520 1.54 hannken
1521 1.54 hannken if (vp->v_type == VREG && vp->v_ractx != NULL) {
1522 1.54 hannken uvm_ra_freectx(vp->v_ractx);
1523 1.54 hannken vp->v_ractx = NULL;
1524 1.54 hannken }
1525 1.54 hannken
1526 1.54 hannken /* Purge name cache. */
1527 1.54 hannken cache_purge(vp);
1528 1.54 hannken
1529 1.54 hannken /* Move to dead mount. */
1530 1.54 hannken vp->v_vflag &= ~VV_ROOT;
1531 1.54 hannken atomic_inc_uint(&dead_rootmount->mnt_refcnt);
1532 1.54 hannken vfs_insmntque(vp, dead_rootmount);
1533 1.54 hannken
1534 1.55 hannken /* Remove from vnode cache. */
1535 1.57 hannken hash = vcache_hash(&node->vi_key);
1536 1.55 hannken mutex_enter(&vcache.lock);
1537 1.57 hannken KASSERT(node == vcache_hash_lookup(&node->vi_key, hash));
1538 1.55 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1539 1.57 hannken node, vnode_impl, vi_hash);
1540 1.55 hannken mutex_exit(&vcache.lock);
1541 1.55 hannken if (temp_key != temp_buf)
1542 1.55 hannken kmem_free(temp_key, temp_key_len);
1543 1.55 hannken
1544 1.54 hannken /* Done with purge, notify sleepers of the grim news. */
1545 1.54 hannken mutex_enter(vp->v_interlock);
1546 1.54 hannken vp->v_op = dead_vnodeop_p;
1547 1.54 hannken vp->v_vflag |= VV_LOCKSWORK;
1548 1.57 hannken VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1549 1.54 hannken vp->v_tag = VT_NON;
1550 1.54 hannken KNOTE(&vp->v_klist, NOTE_REVOKE);
1551 1.54 hannken
1552 1.54 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1553 1.54 hannken }
1554 1.54 hannken
1555 1.54 hannken /*
1556 1.1 rmind * Update outstanding I/O count and do wakeup if requested.
1557 1.1 rmind */
1558 1.1 rmind void
1559 1.1 rmind vwakeup(struct buf *bp)
1560 1.1 rmind {
1561 1.1 rmind vnode_t *vp;
1562 1.1 rmind
1563 1.1 rmind if ((vp = bp->b_vp) == NULL)
1564 1.1 rmind return;
1565 1.1 rmind
1566 1.9 rmind KASSERT(bp->b_objlock == vp->v_interlock);
1567 1.1 rmind KASSERT(mutex_owned(bp->b_objlock));
1568 1.1 rmind
1569 1.1 rmind if (--vp->v_numoutput < 0)
1570 1.11 christos vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1571 1.1 rmind if (vp->v_numoutput == 0)
1572 1.1 rmind cv_broadcast(&vp->v_cv);
1573 1.1 rmind }
1574 1.1 rmind
1575 1.1 rmind /*
1576 1.35 hannken * Test a vnode for being or becoming dead. Returns one of:
1577 1.35 hannken * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1578 1.35 hannken * ENOENT: vnode is dead.
1579 1.35 hannken * 0: otherwise.
1580 1.35 hannken *
1581 1.35 hannken * Whenever this function returns a non-zero value all future
1582 1.35 hannken * calls will also return a non-zero value.
1583 1.35 hannken */
1584 1.35 hannken int
1585 1.35 hannken vdead_check(struct vnode *vp, int flags)
1586 1.35 hannken {
1587 1.35 hannken
1588 1.35 hannken KASSERT(mutex_owned(vp->v_interlock));
1589 1.35 hannken
1590 1.52 hannken if (! ISSET(flags, VDEAD_NOWAIT))
1591 1.52 hannken VSTATE_WAIT_STABLE(vp);
1592 1.1 rmind
1593 1.57 hannken if (VSTATE_GET(vp) == VS_RECLAIMING) {
1594 1.52 hannken KASSERT(ISSET(flags, VDEAD_NOWAIT));
1595 1.52 hannken return EBUSY;
1596 1.57 hannken } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1597 1.52 hannken return ENOENT;
1598 1.52 hannken }
1599 1.1 rmind
1600 1.52 hannken return 0;
1601 1.1 rmind }
1602 1.1 rmind
1603 1.1 rmind int
1604 1.3 rmind vfs_drainvnodes(long target)
1605 1.1 rmind {
1606 1.12 hannken int error;
1607 1.12 hannken
1608 1.12 hannken mutex_enter(&vnode_free_list_lock);
1609 1.1 rmind
1610 1.1 rmind while (numvnodes > target) {
1611 1.12 hannken error = cleanvnode();
1612 1.12 hannken if (error != 0)
1613 1.12 hannken return error;
1614 1.1 rmind mutex_enter(&vnode_free_list_lock);
1615 1.1 rmind }
1616 1.12 hannken
1617 1.12 hannken mutex_exit(&vnode_free_list_lock);
1618 1.12 hannken
1619 1.36 hannken vcache_reinit();
1620 1.36 hannken
1621 1.1 rmind return 0;
1622 1.1 rmind }
1623 1.1 rmind
1624 1.1 rmind void
1625 1.11 christos vnpanic(vnode_t *vp, const char *fmt, ...)
1626 1.1 rmind {
1627 1.11 christos va_list ap;
1628 1.11 christos
1629 1.1 rmind #ifdef DIAGNOSTIC
1630 1.1 rmind vprint(NULL, vp);
1631 1.1 rmind #endif
1632 1.11 christos va_start(ap, fmt);
1633 1.11 christos vpanic(fmt, ap);
1634 1.11 christos va_end(ap);
1635 1.1 rmind }
1636