vfs_vnode.c revision 1.46 1 1.46 hannken /* $NetBSD: vfs_vnode.c,v 1.46 2015/11/12 11:35:42 hannken Exp $ */
2 1.1 rmind
3 1.1 rmind /*-
4 1.2 rmind * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 1.1 rmind * All rights reserved.
6 1.1 rmind *
7 1.1 rmind * This code is derived from software contributed to The NetBSD Foundation
8 1.1 rmind * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 1.1 rmind * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 1.1 rmind *
11 1.1 rmind * Redistribution and use in source and binary forms, with or without
12 1.1 rmind * modification, are permitted provided that the following conditions
13 1.1 rmind * are met:
14 1.1 rmind * 1. Redistributions of source code must retain the above copyright
15 1.1 rmind * notice, this list of conditions and the following disclaimer.
16 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
17 1.1 rmind * notice, this list of conditions and the following disclaimer in the
18 1.1 rmind * documentation and/or other materials provided with the distribution.
19 1.1 rmind *
20 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 1.1 rmind * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 1.1 rmind * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 1.1 rmind * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 1.1 rmind * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 1.1 rmind * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 1.1 rmind * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 1.1 rmind * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 1.1 rmind * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 1.1 rmind * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 1.1 rmind * POSSIBILITY OF SUCH DAMAGE.
31 1.1 rmind */
32 1.1 rmind
33 1.1 rmind /*
34 1.1 rmind * Copyright (c) 1989, 1993
35 1.1 rmind * The Regents of the University of California. All rights reserved.
36 1.1 rmind * (c) UNIX System Laboratories, Inc.
37 1.1 rmind * All or some portions of this file are derived from material licensed
38 1.1 rmind * to the University of California by American Telephone and Telegraph
39 1.1 rmind * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 1.1 rmind * the permission of UNIX System Laboratories, Inc.
41 1.1 rmind *
42 1.1 rmind * Redistribution and use in source and binary forms, with or without
43 1.1 rmind * modification, are permitted provided that the following conditions
44 1.1 rmind * are met:
45 1.1 rmind * 1. Redistributions of source code must retain the above copyright
46 1.1 rmind * notice, this list of conditions and the following disclaimer.
47 1.1 rmind * 2. Redistributions in binary form must reproduce the above copyright
48 1.1 rmind * notice, this list of conditions and the following disclaimer in the
49 1.1 rmind * documentation and/or other materials provided with the distribution.
50 1.1 rmind * 3. Neither the name of the University nor the names of its contributors
51 1.1 rmind * may be used to endorse or promote products derived from this software
52 1.1 rmind * without specific prior written permission.
53 1.1 rmind *
54 1.1 rmind * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 1.1 rmind * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 1.1 rmind * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 1.1 rmind * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 1.1 rmind * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 1.1 rmind * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 1.1 rmind * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 1.1 rmind * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 1.1 rmind * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 1.1 rmind * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 1.1 rmind * SUCH DAMAGE.
65 1.1 rmind *
66 1.1 rmind * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 1.1 rmind */
68 1.1 rmind
69 1.1 rmind /*
70 1.8 rmind * The vnode cache subsystem.
71 1.1 rmind *
72 1.8 rmind * Life-cycle
73 1.1 rmind *
74 1.8 rmind * Normally, there are two points where new vnodes are created:
75 1.8 rmind * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 1.8 rmind * starts in one of the following ways:
77 1.8 rmind *
78 1.45 hannken * - Allocation, via vcache_get(9) or vcache_new(9).
79 1.8 rmind * - Reclamation of inactive vnode, via vget(9).
80 1.8 rmind *
81 1.16 rmind * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 1.16 rmind * was another, traditional way. Currently, only the draining thread
83 1.16 rmind * recycles the vnodes. This behaviour might be revisited.
84 1.16 rmind *
85 1.8 rmind * The life-cycle ends when the last reference is dropped, usually
86 1.8 rmind * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 1.8 rmind * the file system that vnode is inactive. Via this call, file system
88 1.16 rmind * indicates whether vnode can be recycled (usually, it checks its own
89 1.16 rmind * references, e.g. count of links, whether the file was removed).
90 1.8 rmind *
91 1.8 rmind * Depending on indication, vnode can be put into a free list (cache),
92 1.8 rmind * or cleaned via vclean(9), which calls VOP_RECLAIM(9) to disassociate
93 1.8 rmind * underlying file system from the vnode, and finally destroyed.
94 1.8 rmind *
95 1.8 rmind * Reference counting
96 1.8 rmind *
97 1.8 rmind * Vnode is considered active, if reference count (vnode_t::v_usecount)
98 1.8 rmind * is non-zero. It is maintained using: vref(9) and vrele(9), as well
99 1.8 rmind * as vput(9), routines. Common points holding references are e.g.
100 1.8 rmind * file openings, current working directory, mount points, etc.
101 1.8 rmind *
102 1.8 rmind * Note on v_usecount and its locking
103 1.8 rmind *
104 1.8 rmind * At nearly all points it is known that v_usecount could be zero,
105 1.8 rmind * the vnode_t::v_interlock will be held. To change v_usecount away
106 1.8 rmind * from zero, the interlock must be held. To change from a non-zero
107 1.8 rmind * value to zero, again the interlock must be held.
108 1.8 rmind *
109 1.24 hannken * Changing the usecount from a non-zero value to a non-zero value can
110 1.24 hannken * safely be done using atomic operations, without the interlock held.
111 1.8 rmind *
112 1.8 rmind * Note: if VI_CLEAN is set, vnode_t::v_interlock will be released while
113 1.8 rmind * mntvnode_lock is still held.
114 1.20 dholland *
115 1.20 dholland * See PR 41374.
116 1.1 rmind */
117 1.1 rmind
118 1.1 rmind #include <sys/cdefs.h>
119 1.46 hannken __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.46 2015/11/12 11:35:42 hannken Exp $");
120 1.23 hannken
121 1.23 hannken #define _VFS_VNODE_PRIVATE
122 1.1 rmind
123 1.1 rmind #include <sys/param.h>
124 1.1 rmind #include <sys/kernel.h>
125 1.1 rmind
126 1.1 rmind #include <sys/atomic.h>
127 1.1 rmind #include <sys/buf.h>
128 1.1 rmind #include <sys/conf.h>
129 1.1 rmind #include <sys/device.h>
130 1.36 hannken #include <sys/hash.h>
131 1.1 rmind #include <sys/kauth.h>
132 1.1 rmind #include <sys/kmem.h>
133 1.1 rmind #include <sys/kthread.h>
134 1.1 rmind #include <sys/module.h>
135 1.1 rmind #include <sys/mount.h>
136 1.1 rmind #include <sys/namei.h>
137 1.1 rmind #include <sys/syscallargs.h>
138 1.1 rmind #include <sys/sysctl.h>
139 1.1 rmind #include <sys/systm.h>
140 1.1 rmind #include <sys/vnode.h>
141 1.1 rmind #include <sys/wapbl.h>
142 1.24 hannken #include <sys/fstrans.h>
143 1.1 rmind
144 1.1 rmind #include <uvm/uvm.h>
145 1.1 rmind #include <uvm/uvm_readahead.h>
146 1.1 rmind
147 1.23 hannken /* Flags to vrelel. */
148 1.23 hannken #define VRELEL_ASYNC_RELE 0x0001 /* Always defer to vrele thread. */
149 1.29 christos #define VRELEL_CHANGING_SET 0x0002 /* VI_CHANGING set by caller. */
150 1.23 hannken
151 1.36 hannken struct vcache_key {
152 1.36 hannken struct mount *vk_mount;
153 1.36 hannken const void *vk_key;
154 1.36 hannken size_t vk_key_len;
155 1.36 hannken };
156 1.36 hannken struct vcache_node {
157 1.36 hannken SLIST_ENTRY(vcache_node) vn_hash;
158 1.36 hannken struct vnode *vn_vnode;
159 1.36 hannken struct vcache_key vn_key;
160 1.36 hannken };
161 1.36 hannken
162 1.6 rmind u_int numvnodes __cacheline_aligned;
163 1.1 rmind
164 1.6 rmind static pool_cache_t vnode_cache __read_mostly;
165 1.16 rmind
166 1.16 rmind /*
167 1.16 rmind * There are two free lists: one is for vnodes which have no buffer/page
168 1.16 rmind * references and one for those which do (i.e. v_holdcnt is non-zero).
169 1.16 rmind * Vnode recycling mechanism first attempts to look into the former list.
170 1.16 rmind */
171 1.6 rmind static kmutex_t vnode_free_list_lock __cacheline_aligned;
172 1.6 rmind static vnodelst_t vnode_free_list __cacheline_aligned;
173 1.6 rmind static vnodelst_t vnode_hold_list __cacheline_aligned;
174 1.16 rmind static kcondvar_t vdrain_cv __cacheline_aligned;
175 1.16 rmind
176 1.6 rmind static vnodelst_t vrele_list __cacheline_aligned;
177 1.6 rmind static kmutex_t vrele_lock __cacheline_aligned;
178 1.6 rmind static kcondvar_t vrele_cv __cacheline_aligned;
179 1.6 rmind static lwp_t * vrele_lwp __cacheline_aligned;
180 1.6 rmind static int vrele_pending __cacheline_aligned;
181 1.6 rmind static int vrele_gen __cacheline_aligned;
182 1.1 rmind
183 1.38 matt SLIST_HEAD(hashhead, vcache_node);
184 1.36 hannken static struct {
185 1.36 hannken kmutex_t lock;
186 1.36 hannken u_long hashmask;
187 1.38 matt struct hashhead *hashtab;
188 1.36 hannken pool_cache_t pool;
189 1.36 hannken } vcache __cacheline_aligned;
190 1.36 hannken
191 1.12 hannken static int cleanvnode(void);
192 1.36 hannken static void vcache_init(void);
193 1.36 hannken static void vcache_reinit(void);
194 1.25 hannken static void vclean(vnode_t *);
195 1.23 hannken static void vrelel(vnode_t *, int);
196 1.12 hannken static void vdrain_thread(void *);
197 1.1 rmind static void vrele_thread(void *);
198 1.11 christos static void vnpanic(vnode_t *, const char *, ...)
199 1.18 christos __printflike(2, 3);
200 1.35 hannken static void vwait(vnode_t *, int);
201 1.1 rmind
202 1.1 rmind /* Routines having to do with the management of the vnode table. */
203 1.44 hannken extern struct mount *dead_rootmount;
204 1.1 rmind extern int (**dead_vnodeop_p)(void *);
205 1.31 hannken extern struct vfsops dead_vfsops;
206 1.1 rmind
207 1.1 rmind void
208 1.1 rmind vfs_vnode_sysinit(void)
209 1.1 rmind {
210 1.22 martin int error __diagused;
211 1.1 rmind
212 1.1 rmind vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl",
213 1.1 rmind NULL, IPL_NONE, NULL, NULL, NULL);
214 1.1 rmind KASSERT(vnode_cache != NULL);
215 1.1 rmind
216 1.44 hannken dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
217 1.44 hannken KASSERT(dead_rootmount != NULL);
218 1.44 hannken dead_rootmount->mnt_iflag = IMNT_MPSAFE;
219 1.31 hannken
220 1.1 rmind mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
221 1.1 rmind TAILQ_INIT(&vnode_free_list);
222 1.1 rmind TAILQ_INIT(&vnode_hold_list);
223 1.1 rmind TAILQ_INIT(&vrele_list);
224 1.1 rmind
225 1.36 hannken vcache_init();
226 1.36 hannken
227 1.1 rmind mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
228 1.12 hannken cv_init(&vdrain_cv, "vdrain");
229 1.1 rmind cv_init(&vrele_cv, "vrele");
230 1.12 hannken error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
231 1.12 hannken NULL, NULL, "vdrain");
232 1.12 hannken KASSERT(error == 0);
233 1.1 rmind error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
234 1.1 rmind NULL, &vrele_lwp, "vrele");
235 1.1 rmind KASSERT(error == 0);
236 1.1 rmind }
237 1.1 rmind
238 1.1 rmind /*
239 1.1 rmind * Allocate a new, uninitialized vnode. If 'mp' is non-NULL, this is a
240 1.13 hannken * marker vnode.
241 1.1 rmind */
242 1.1 rmind vnode_t *
243 1.1 rmind vnalloc(struct mount *mp)
244 1.1 rmind {
245 1.1 rmind vnode_t *vp;
246 1.1 rmind
247 1.13 hannken vp = pool_cache_get(vnode_cache, PR_WAITOK);
248 1.13 hannken KASSERT(vp != NULL);
249 1.1 rmind
250 1.1 rmind memset(vp, 0, sizeof(*vp));
251 1.9 rmind uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
252 1.1 rmind cv_init(&vp->v_cv, "vnode");
253 1.1 rmind /*
254 1.1 rmind * Done by memset() above.
255 1.1 rmind * LIST_INIT(&vp->v_nclist);
256 1.1 rmind * LIST_INIT(&vp->v_dnclist);
257 1.1 rmind */
258 1.1 rmind
259 1.1 rmind if (mp != NULL) {
260 1.1 rmind vp->v_mount = mp;
261 1.1 rmind vp->v_type = VBAD;
262 1.1 rmind vp->v_iflag = VI_MARKER;
263 1.36 hannken return vp;
264 1.1 rmind }
265 1.1 rmind
266 1.36 hannken mutex_enter(&vnode_free_list_lock);
267 1.36 hannken numvnodes++;
268 1.36 hannken if (numvnodes > desiredvnodes + desiredvnodes / 10)
269 1.36 hannken cv_signal(&vdrain_cv);
270 1.36 hannken mutex_exit(&vnode_free_list_lock);
271 1.36 hannken
272 1.36 hannken rw_init(&vp->v_lock);
273 1.36 hannken vp->v_usecount = 1;
274 1.36 hannken vp->v_type = VNON;
275 1.36 hannken vp->v_size = vp->v_writesize = VSIZENOTSET;
276 1.36 hannken
277 1.1 rmind return vp;
278 1.1 rmind }
279 1.1 rmind
280 1.1 rmind /*
281 1.1 rmind * Free an unused, unreferenced vnode.
282 1.1 rmind */
283 1.1 rmind void
284 1.1 rmind vnfree(vnode_t *vp)
285 1.1 rmind {
286 1.1 rmind
287 1.1 rmind KASSERT(vp->v_usecount == 0);
288 1.1 rmind
289 1.1 rmind if ((vp->v_iflag & VI_MARKER) == 0) {
290 1.1 rmind rw_destroy(&vp->v_lock);
291 1.1 rmind mutex_enter(&vnode_free_list_lock);
292 1.1 rmind numvnodes--;
293 1.1 rmind mutex_exit(&vnode_free_list_lock);
294 1.1 rmind }
295 1.1 rmind
296 1.9 rmind uvm_obj_destroy(&vp->v_uobj, true);
297 1.1 rmind cv_destroy(&vp->v_cv);
298 1.1 rmind pool_cache_put(vnode_cache, vp);
299 1.1 rmind }
300 1.1 rmind
301 1.1 rmind /*
302 1.12 hannken * cleanvnode: grab a vnode from freelist, clean and free it.
303 1.5 rmind *
304 1.5 rmind * => Releases vnode_free_list_lock.
305 1.1 rmind */
306 1.12 hannken static int
307 1.12 hannken cleanvnode(void)
308 1.1 rmind {
309 1.1 rmind vnode_t *vp;
310 1.1 rmind vnodelst_t *listhd;
311 1.24 hannken struct mount *mp;
312 1.1 rmind
313 1.1 rmind KASSERT(mutex_owned(&vnode_free_list_lock));
314 1.24 hannken
315 1.1 rmind listhd = &vnode_free_list;
316 1.1 rmind try_nextlist:
317 1.1 rmind TAILQ_FOREACH(vp, listhd, v_freelist) {
318 1.1 rmind /*
319 1.1 rmind * It's safe to test v_usecount and v_iflag
320 1.1 rmind * without holding the interlock here, since
321 1.1 rmind * these vnodes should never appear on the
322 1.1 rmind * lists.
323 1.1 rmind */
324 1.5 rmind KASSERT(vp->v_usecount == 0);
325 1.5 rmind KASSERT((vp->v_iflag & VI_CLEAN) == 0);
326 1.5 rmind KASSERT(vp->v_freelisthd == listhd);
327 1.5 rmind
328 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) != 0)
329 1.1 rmind continue;
330 1.46 hannken if (!mutex_tryenter(vp->v_interlock)) {
331 1.46 hannken VOP_UNLOCK(vp);
332 1.24 hannken continue;
333 1.24 hannken }
334 1.46 hannken KASSERT((vp->v_iflag & VI_XLOCK) == 0);
335 1.24 hannken mp = vp->v_mount;
336 1.24 hannken if (fstrans_start_nowait(mp, FSTRANS_SHARED) != 0) {
337 1.24 hannken mutex_exit(vp->v_interlock);
338 1.46 hannken VOP_UNLOCK(vp);
339 1.24 hannken continue;
340 1.24 hannken }
341 1.24 hannken break;
342 1.1 rmind }
343 1.1 rmind
344 1.1 rmind if (vp == NULL) {
345 1.1 rmind if (listhd == &vnode_free_list) {
346 1.1 rmind listhd = &vnode_hold_list;
347 1.1 rmind goto try_nextlist;
348 1.1 rmind }
349 1.1 rmind mutex_exit(&vnode_free_list_lock);
350 1.12 hannken return EBUSY;
351 1.1 rmind }
352 1.1 rmind
353 1.1 rmind /* Remove it from the freelist. */
354 1.1 rmind TAILQ_REMOVE(listhd, vp, v_freelist);
355 1.1 rmind vp->v_freelisthd = NULL;
356 1.1 rmind mutex_exit(&vnode_free_list_lock);
357 1.1 rmind
358 1.1 rmind KASSERT(vp->v_usecount == 0);
359 1.1 rmind
360 1.1 rmind /*
361 1.1 rmind * The vnode is still associated with a file system, so we must
362 1.12 hannken * clean it out before freeing it. We need to add a reference
363 1.24 hannken * before doing this.
364 1.1 rmind */
365 1.24 hannken vp->v_usecount = 1;
366 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) == 0);
367 1.29 christos vp->v_iflag |= VI_CHANGING;
368 1.25 hannken vclean(vp);
369 1.29 christos vrelel(vp, VRELEL_CHANGING_SET);
370 1.24 hannken fstrans_done(mp);
371 1.12 hannken
372 1.12 hannken return 0;
373 1.1 rmind }
374 1.1 rmind
375 1.1 rmind /*
376 1.12 hannken * Helper thread to keep the number of vnodes below desiredvnodes.
377 1.12 hannken */
378 1.12 hannken static void
379 1.12 hannken vdrain_thread(void *cookie)
380 1.12 hannken {
381 1.12 hannken int error;
382 1.12 hannken
383 1.12 hannken mutex_enter(&vnode_free_list_lock);
384 1.12 hannken
385 1.12 hannken for (;;) {
386 1.12 hannken cv_timedwait(&vdrain_cv, &vnode_free_list_lock, hz);
387 1.12 hannken while (numvnodes > desiredvnodes) {
388 1.12 hannken error = cleanvnode();
389 1.12 hannken if (error)
390 1.12 hannken kpause("vndsbusy", false, hz, NULL);
391 1.12 hannken mutex_enter(&vnode_free_list_lock);
392 1.12 hannken if (error)
393 1.12 hannken break;
394 1.12 hannken }
395 1.12 hannken }
396 1.12 hannken }
397 1.12 hannken
398 1.12 hannken /*
399 1.1 rmind * Remove a vnode from its freelist.
400 1.1 rmind */
401 1.1 rmind void
402 1.1 rmind vremfree(vnode_t *vp)
403 1.1 rmind {
404 1.1 rmind
405 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
406 1.1 rmind KASSERT(vp->v_usecount == 0);
407 1.1 rmind
408 1.1 rmind /*
409 1.1 rmind * Note that the reference count must not change until
410 1.1 rmind * the vnode is removed.
411 1.1 rmind */
412 1.1 rmind mutex_enter(&vnode_free_list_lock);
413 1.1 rmind if (vp->v_holdcnt > 0) {
414 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
415 1.1 rmind } else {
416 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
417 1.1 rmind }
418 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
419 1.1 rmind vp->v_freelisthd = NULL;
420 1.1 rmind mutex_exit(&vnode_free_list_lock);
421 1.1 rmind }
422 1.1 rmind
423 1.1 rmind /*
424 1.4 rmind * vget: get a particular vnode from the free list, increment its reference
425 1.4 rmind * count and lock it.
426 1.4 rmind *
427 1.4 rmind * => Should be called with v_interlock held.
428 1.4 rmind *
429 1.29 christos * If VI_CHANGING is set, the vnode may be eliminated in vgone()/vclean().
430 1.4 rmind * In that case, we cannot grab the vnode, so the process is awakened when
431 1.4 rmind * the transition is completed, and an error returned to indicate that the
432 1.29 christos * vnode is no longer usable.
433 1.1 rmind */
434 1.1 rmind int
435 1.41 riastrad vget(vnode_t *vp, int flags, bool waitok)
436 1.1 rmind {
437 1.1 rmind int error = 0;
438 1.1 rmind
439 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
440 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
441 1.41 riastrad KASSERT((flags & ~LK_NOWAIT) == 0);
442 1.41 riastrad KASSERT(waitok == ((flags & LK_NOWAIT) == 0));
443 1.1 rmind
444 1.1 rmind /*
445 1.1 rmind * Before adding a reference, we must remove the vnode
446 1.1 rmind * from its freelist.
447 1.1 rmind */
448 1.1 rmind if (vp->v_usecount == 0) {
449 1.1 rmind vremfree(vp);
450 1.1 rmind vp->v_usecount = 1;
451 1.1 rmind } else {
452 1.1 rmind atomic_inc_uint(&vp->v_usecount);
453 1.1 rmind }
454 1.1 rmind
455 1.1 rmind /*
456 1.29 christos * If the vnode is in the process of changing state we wait
457 1.29 christos * for the change to complete and take care not to return
458 1.29 christos * a clean vnode.
459 1.1 rmind */
460 1.29 christos if ((vp->v_iflag & VI_CHANGING) != 0) {
461 1.1 rmind if ((flags & LK_NOWAIT) != 0) {
462 1.1 rmind vrelel(vp, 0);
463 1.1 rmind return EBUSY;
464 1.1 rmind }
465 1.29 christos vwait(vp, VI_CHANGING);
466 1.17 hannken if ((vp->v_iflag & VI_CLEAN) != 0) {
467 1.17 hannken vrelel(vp, 0);
468 1.17 hannken return ENOENT;
469 1.17 hannken }
470 1.17 hannken }
471 1.17 hannken
472 1.1 rmind /*
473 1.41 riastrad * Ok, we got it in good shape.
474 1.1 rmind */
475 1.1 rmind KASSERT((vp->v_iflag & VI_CLEAN) == 0);
476 1.9 rmind mutex_exit(vp->v_interlock);
477 1.1 rmind return error;
478 1.1 rmind }
479 1.1 rmind
480 1.1 rmind /*
481 1.4 rmind * vput: unlock and release the reference.
482 1.1 rmind */
483 1.1 rmind void
484 1.1 rmind vput(vnode_t *vp)
485 1.1 rmind {
486 1.1 rmind
487 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
488 1.1 rmind
489 1.1 rmind VOP_UNLOCK(vp);
490 1.1 rmind vrele(vp);
491 1.1 rmind }
492 1.1 rmind
493 1.1 rmind /*
494 1.1 rmind * Try to drop reference on a vnode. Abort if we are releasing the
495 1.1 rmind * last reference. Note: this _must_ succeed if not the last reference.
496 1.1 rmind */
497 1.1 rmind static inline bool
498 1.1 rmind vtryrele(vnode_t *vp)
499 1.1 rmind {
500 1.1 rmind u_int use, next;
501 1.1 rmind
502 1.1 rmind for (use = vp->v_usecount;; use = next) {
503 1.1 rmind if (use == 1) {
504 1.1 rmind return false;
505 1.1 rmind }
506 1.24 hannken KASSERT(use > 1);
507 1.1 rmind next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
508 1.1 rmind if (__predict_true(next == use)) {
509 1.1 rmind return true;
510 1.1 rmind }
511 1.1 rmind }
512 1.1 rmind }
513 1.1 rmind
514 1.1 rmind /*
515 1.1 rmind * Vnode release. If reference count drops to zero, call inactive
516 1.1 rmind * routine and either return to freelist or free to the pool.
517 1.1 rmind */
518 1.23 hannken static void
519 1.1 rmind vrelel(vnode_t *vp, int flags)
520 1.1 rmind {
521 1.1 rmind bool recycle, defer;
522 1.1 rmind int error;
523 1.1 rmind
524 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
525 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
526 1.1 rmind KASSERT(vp->v_freelisthd == NULL);
527 1.1 rmind
528 1.1 rmind if (__predict_false(vp->v_op == dead_vnodeop_p &&
529 1.1 rmind (vp->v_iflag & (VI_CLEAN|VI_XLOCK)) == 0)) {
530 1.11 christos vnpanic(vp, "dead but not clean");
531 1.1 rmind }
532 1.1 rmind
533 1.1 rmind /*
534 1.1 rmind * If not the last reference, just drop the reference count
535 1.1 rmind * and unlock.
536 1.1 rmind */
537 1.1 rmind if (vtryrele(vp)) {
538 1.29 christos if ((flags & VRELEL_CHANGING_SET) != 0) {
539 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) != 0);
540 1.29 christos vp->v_iflag &= ~VI_CHANGING;
541 1.29 christos cv_broadcast(&vp->v_cv);
542 1.29 christos }
543 1.9 rmind mutex_exit(vp->v_interlock);
544 1.1 rmind return;
545 1.1 rmind }
546 1.1 rmind if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
547 1.11 christos vnpanic(vp, "%s: bad ref count", __func__);
548 1.1 rmind }
549 1.1 rmind
550 1.1 rmind KASSERT((vp->v_iflag & VI_XLOCK) == 0);
551 1.1 rmind
552 1.15 hannken #ifdef DIAGNOSTIC
553 1.15 hannken if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
554 1.15 hannken vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
555 1.15 hannken vprint("vrelel: missing VOP_CLOSE()", vp);
556 1.15 hannken }
557 1.15 hannken #endif
558 1.15 hannken
559 1.1 rmind /*
560 1.1 rmind * If not clean, deactivate the vnode, but preserve
561 1.1 rmind * our reference across the call to VOP_INACTIVE().
562 1.1 rmind */
563 1.1 rmind if ((vp->v_iflag & VI_CLEAN) == 0) {
564 1.1 rmind recycle = false;
565 1.1 rmind
566 1.1 rmind /*
567 1.1 rmind * XXX This ugly block can be largely eliminated if
568 1.1 rmind * locking is pushed down into the file systems.
569 1.1 rmind *
570 1.1 rmind * Defer vnode release to vrele_thread if caller
571 1.30 hannken * requests it explicitly or is the pagedaemon.
572 1.1 rmind */
573 1.1 rmind if ((curlwp == uvm.pagedaemon_lwp) ||
574 1.1 rmind (flags & VRELEL_ASYNC_RELE) != 0) {
575 1.1 rmind defer = true;
576 1.1 rmind } else if (curlwp == vrele_lwp) {
577 1.17 hannken /*
578 1.29 christos * We have to try harder.
579 1.17 hannken */
580 1.9 rmind mutex_exit(vp->v_interlock);
581 1.32 hannken error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
582 1.30 hannken KASSERT(error == 0);
583 1.17 hannken mutex_enter(vp->v_interlock);
584 1.1 rmind defer = false;
585 1.4 rmind } else {
586 1.1 rmind /* If we can't acquire the lock, then defer. */
587 1.32 hannken mutex_exit(vp->v_interlock);
588 1.32 hannken error = vn_lock(vp,
589 1.32 hannken LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
590 1.30 hannken defer = (error != 0);
591 1.32 hannken mutex_enter(vp->v_interlock);
592 1.1 rmind }
593 1.1 rmind
594 1.30 hannken KASSERT(mutex_owned(vp->v_interlock));
595 1.30 hannken KASSERT(! (curlwp == vrele_lwp && defer));
596 1.30 hannken
597 1.1 rmind if (defer) {
598 1.1 rmind /*
599 1.1 rmind * Defer reclaim to the kthread; it's not safe to
600 1.1 rmind * clean it here. We donate it our last reference.
601 1.1 rmind */
602 1.29 christos if ((flags & VRELEL_CHANGING_SET) != 0) {
603 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) != 0);
604 1.29 christos vp->v_iflag &= ~VI_CHANGING;
605 1.29 christos cv_broadcast(&vp->v_cv);
606 1.29 christos }
607 1.1 rmind mutex_enter(&vrele_lock);
608 1.1 rmind TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
609 1.1 rmind if (++vrele_pending > (desiredvnodes >> 8))
610 1.1 rmind cv_signal(&vrele_cv);
611 1.1 rmind mutex_exit(&vrele_lock);
612 1.9 rmind mutex_exit(vp->v_interlock);
613 1.1 rmind return;
614 1.1 rmind }
615 1.1 rmind
616 1.32 hannken /*
617 1.32 hannken * If the node got another reference while we
618 1.32 hannken * released the interlock, don't try to inactivate it yet.
619 1.32 hannken */
620 1.32 hannken if (__predict_false(vtryrele(vp))) {
621 1.32 hannken VOP_UNLOCK(vp);
622 1.32 hannken if ((flags & VRELEL_CHANGING_SET) != 0) {
623 1.32 hannken KASSERT((vp->v_iflag & VI_CHANGING) != 0);
624 1.32 hannken vp->v_iflag &= ~VI_CHANGING;
625 1.32 hannken cv_broadcast(&vp->v_cv);
626 1.32 hannken }
627 1.32 hannken mutex_exit(vp->v_interlock);
628 1.32 hannken return;
629 1.32 hannken }
630 1.32 hannken
631 1.29 christos if ((flags & VRELEL_CHANGING_SET) == 0) {
632 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) == 0);
633 1.29 christos vp->v_iflag |= VI_CHANGING;
634 1.29 christos }
635 1.29 christos mutex_exit(vp->v_interlock);
636 1.29 christos
637 1.1 rmind /*
638 1.1 rmind * The vnode can gain another reference while being
639 1.1 rmind * deactivated. If VOP_INACTIVE() indicates that
640 1.1 rmind * the described file has been deleted, then recycle
641 1.1 rmind * the vnode irrespective of additional references.
642 1.1 rmind * Another thread may be waiting to re-use the on-disk
643 1.1 rmind * inode.
644 1.1 rmind *
645 1.1 rmind * Note that VOP_INACTIVE() will drop the vnode lock.
646 1.1 rmind */
647 1.1 rmind VOP_INACTIVE(vp, &recycle);
648 1.46 hannken if (recycle) {
649 1.46 hannken /* vclean() below will drop the lock. */
650 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
651 1.46 hannken recycle = false;
652 1.46 hannken }
653 1.9 rmind mutex_enter(vp->v_interlock);
654 1.1 rmind if (!recycle) {
655 1.1 rmind if (vtryrele(vp)) {
656 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) != 0);
657 1.29 christos vp->v_iflag &= ~VI_CHANGING;
658 1.29 christos cv_broadcast(&vp->v_cv);
659 1.9 rmind mutex_exit(vp->v_interlock);
660 1.1 rmind return;
661 1.1 rmind }
662 1.1 rmind }
663 1.1 rmind
664 1.1 rmind /* Take care of space accounting. */
665 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
666 1.1 rmind atomic_add_int(&uvmexp.execpages,
667 1.1 rmind -vp->v_uobj.uo_npages);
668 1.1 rmind atomic_add_int(&uvmexp.filepages,
669 1.1 rmind vp->v_uobj.uo_npages);
670 1.1 rmind }
671 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
672 1.1 rmind vp->v_vflag &= ~VV_MAPPED;
673 1.1 rmind
674 1.1 rmind /*
675 1.1 rmind * Recycle the vnode if the file is now unused (unlinked),
676 1.1 rmind * otherwise just free it.
677 1.1 rmind */
678 1.1 rmind if (recycle) {
679 1.25 hannken vclean(vp);
680 1.1 rmind }
681 1.1 rmind KASSERT(vp->v_usecount > 0);
682 1.29 christos } else { /* vnode was already clean */
683 1.29 christos if ((flags & VRELEL_CHANGING_SET) == 0) {
684 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) == 0);
685 1.29 christos vp->v_iflag |= VI_CHANGING;
686 1.29 christos }
687 1.1 rmind }
688 1.1 rmind
689 1.1 rmind if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
690 1.1 rmind /* Gained another reference while being reclaimed. */
691 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) != 0);
692 1.29 christos vp->v_iflag &= ~VI_CHANGING;
693 1.29 christos cv_broadcast(&vp->v_cv);
694 1.9 rmind mutex_exit(vp->v_interlock);
695 1.1 rmind return;
696 1.1 rmind }
697 1.1 rmind
698 1.1 rmind if ((vp->v_iflag & VI_CLEAN) != 0) {
699 1.1 rmind /*
700 1.1 rmind * It's clean so destroy it. It isn't referenced
701 1.1 rmind * anywhere since it has been reclaimed.
702 1.1 rmind */
703 1.1 rmind KASSERT(vp->v_holdcnt == 0);
704 1.1 rmind KASSERT(vp->v_writecount == 0);
705 1.9 rmind mutex_exit(vp->v_interlock);
706 1.1 rmind vfs_insmntque(vp, NULL);
707 1.1 rmind if (vp->v_type == VBLK || vp->v_type == VCHR) {
708 1.1 rmind spec_node_destroy(vp);
709 1.1 rmind }
710 1.1 rmind vnfree(vp);
711 1.1 rmind } else {
712 1.1 rmind /*
713 1.1 rmind * Otherwise, put it back onto the freelist. It
714 1.1 rmind * can't be destroyed while still associated with
715 1.1 rmind * a file system.
716 1.1 rmind */
717 1.1 rmind mutex_enter(&vnode_free_list_lock);
718 1.1 rmind if (vp->v_holdcnt > 0) {
719 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
720 1.1 rmind } else {
721 1.1 rmind vp->v_freelisthd = &vnode_free_list;
722 1.1 rmind }
723 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
724 1.1 rmind mutex_exit(&vnode_free_list_lock);
725 1.29 christos KASSERT((vp->v_iflag & VI_CHANGING) != 0);
726 1.29 christos vp->v_iflag &= ~VI_CHANGING;
727 1.29 christos cv_broadcast(&vp->v_cv);
728 1.9 rmind mutex_exit(vp->v_interlock);
729 1.1 rmind }
730 1.1 rmind }
731 1.1 rmind
732 1.1 rmind void
733 1.1 rmind vrele(vnode_t *vp)
734 1.1 rmind {
735 1.1 rmind
736 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
737 1.1 rmind
738 1.29 christos if (vtryrele(vp)) {
739 1.1 rmind return;
740 1.1 rmind }
741 1.9 rmind mutex_enter(vp->v_interlock);
742 1.1 rmind vrelel(vp, 0);
743 1.1 rmind }
744 1.1 rmind
745 1.1 rmind /*
746 1.1 rmind * Asynchronous vnode release, vnode is released in different context.
747 1.1 rmind */
748 1.1 rmind void
749 1.1 rmind vrele_async(vnode_t *vp)
750 1.1 rmind {
751 1.1 rmind
752 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
753 1.1 rmind
754 1.29 christos if (vtryrele(vp)) {
755 1.1 rmind return;
756 1.1 rmind }
757 1.9 rmind mutex_enter(vp->v_interlock);
758 1.1 rmind vrelel(vp, VRELEL_ASYNC_RELE);
759 1.1 rmind }
760 1.1 rmind
761 1.1 rmind static void
762 1.1 rmind vrele_thread(void *cookie)
763 1.1 rmind {
764 1.34 hannken vnodelst_t skip_list;
765 1.1 rmind vnode_t *vp;
766 1.34 hannken struct mount *mp;
767 1.34 hannken
768 1.34 hannken TAILQ_INIT(&skip_list);
769 1.1 rmind
770 1.34 hannken mutex_enter(&vrele_lock);
771 1.1 rmind for (;;) {
772 1.1 rmind while (TAILQ_EMPTY(&vrele_list)) {
773 1.1 rmind vrele_gen++;
774 1.1 rmind cv_broadcast(&vrele_cv);
775 1.1 rmind cv_timedwait(&vrele_cv, &vrele_lock, hz);
776 1.34 hannken TAILQ_CONCAT(&vrele_list, &skip_list, v_freelist);
777 1.1 rmind }
778 1.1 rmind vp = TAILQ_FIRST(&vrele_list);
779 1.34 hannken mp = vp->v_mount;
780 1.1 rmind TAILQ_REMOVE(&vrele_list, vp, v_freelist);
781 1.34 hannken if (fstrans_start_nowait(mp, FSTRANS_LAZY) != 0) {
782 1.34 hannken TAILQ_INSERT_TAIL(&skip_list, vp, v_freelist);
783 1.34 hannken continue;
784 1.34 hannken }
785 1.1 rmind vrele_pending--;
786 1.1 rmind mutex_exit(&vrele_lock);
787 1.1 rmind
788 1.1 rmind /*
789 1.1 rmind * If not the last reference, then ignore the vnode
790 1.1 rmind * and look for more work.
791 1.1 rmind */
792 1.9 rmind mutex_enter(vp->v_interlock);
793 1.1 rmind vrelel(vp, 0);
794 1.34 hannken fstrans_done(mp);
795 1.34 hannken mutex_enter(&vrele_lock);
796 1.1 rmind }
797 1.1 rmind }
798 1.1 rmind
799 1.2 rmind void
800 1.2 rmind vrele_flush(void)
801 1.2 rmind {
802 1.2 rmind int gen;
803 1.2 rmind
804 1.2 rmind mutex_enter(&vrele_lock);
805 1.2 rmind gen = vrele_gen;
806 1.2 rmind while (vrele_pending && gen == vrele_gen) {
807 1.2 rmind cv_broadcast(&vrele_cv);
808 1.2 rmind cv_wait(&vrele_cv, &vrele_lock);
809 1.2 rmind }
810 1.2 rmind mutex_exit(&vrele_lock);
811 1.2 rmind }
812 1.2 rmind
813 1.1 rmind /*
814 1.1 rmind * Vnode reference, where a reference is already held by some other
815 1.1 rmind * object (for example, a file structure).
816 1.1 rmind */
817 1.1 rmind void
818 1.1 rmind vref(vnode_t *vp)
819 1.1 rmind {
820 1.1 rmind
821 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
822 1.1 rmind KASSERT(vp->v_usecount != 0);
823 1.1 rmind
824 1.1 rmind atomic_inc_uint(&vp->v_usecount);
825 1.1 rmind }
826 1.1 rmind
827 1.1 rmind /*
828 1.1 rmind * Page or buffer structure gets a reference.
829 1.1 rmind * Called with v_interlock held.
830 1.1 rmind */
831 1.1 rmind void
832 1.1 rmind vholdl(vnode_t *vp)
833 1.1 rmind {
834 1.1 rmind
835 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
836 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
837 1.1 rmind
838 1.1 rmind if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
839 1.1 rmind mutex_enter(&vnode_free_list_lock);
840 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_free_list);
841 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
842 1.1 rmind vp->v_freelisthd = &vnode_hold_list;
843 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
844 1.1 rmind mutex_exit(&vnode_free_list_lock);
845 1.1 rmind }
846 1.1 rmind }
847 1.1 rmind
848 1.1 rmind /*
849 1.1 rmind * Page or buffer structure frees a reference.
850 1.1 rmind * Called with v_interlock held.
851 1.1 rmind */
852 1.1 rmind void
853 1.1 rmind holdrelel(vnode_t *vp)
854 1.1 rmind {
855 1.1 rmind
856 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
857 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
858 1.1 rmind
859 1.1 rmind if (vp->v_holdcnt <= 0) {
860 1.11 christos vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
861 1.1 rmind }
862 1.1 rmind
863 1.1 rmind vp->v_holdcnt--;
864 1.1 rmind if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
865 1.1 rmind mutex_enter(&vnode_free_list_lock);
866 1.1 rmind KASSERT(vp->v_freelisthd == &vnode_hold_list);
867 1.1 rmind TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
868 1.1 rmind vp->v_freelisthd = &vnode_free_list;
869 1.1 rmind TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
870 1.1 rmind mutex_exit(&vnode_free_list_lock);
871 1.1 rmind }
872 1.1 rmind }
873 1.1 rmind
874 1.1 rmind /*
875 1.1 rmind * Disassociate the underlying file system from a vnode.
876 1.1 rmind *
877 1.46 hannken * Must be called with vnode locked and will return unlocked.
878 1.1 rmind * Must be called with the interlock held, and will return with it held.
879 1.1 rmind */
880 1.25 hannken static void
881 1.25 hannken vclean(vnode_t *vp)
882 1.1 rmind {
883 1.1 rmind lwp_t *l = curlwp;
884 1.43 hannken bool recycle, active;
885 1.1 rmind int error;
886 1.1 rmind
887 1.46 hannken KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
888 1.46 hannken VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
889 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
890 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
891 1.46 hannken KASSERT((vp->v_iflag & (VI_XLOCK | VI_CLEAN)) == 0);
892 1.1 rmind KASSERT(vp->v_usecount != 0);
893 1.1 rmind
894 1.32 hannken active = (vp->v_usecount > 1);
895 1.1 rmind /*
896 1.1 rmind * Prevent the vnode from being recycled or brought into use
897 1.1 rmind * while we clean it out.
898 1.1 rmind */
899 1.1 rmind vp->v_iflag |= VI_XLOCK;
900 1.1 rmind if (vp->v_iflag & VI_EXECMAP) {
901 1.1 rmind atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
902 1.1 rmind atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
903 1.1 rmind }
904 1.1 rmind vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
905 1.9 rmind mutex_exit(vp->v_interlock);
906 1.23 hannken
907 1.1 rmind /*
908 1.1 rmind * Clean out any cached data associated with the vnode.
909 1.1 rmind * If purging an active vnode, it must be closed and
910 1.1 rmind * deactivated before being reclaimed. Note that the
911 1.1 rmind * VOP_INACTIVE will unlock the vnode.
912 1.1 rmind */
913 1.43 hannken error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
914 1.43 hannken if (error != 0) {
915 1.43 hannken if (wapbl_vphaswapbl(vp))
916 1.43 hannken WAPBL_DISCARD(wapbl_vptomp(vp));
917 1.43 hannken error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
918 1.43 hannken }
919 1.43 hannken KASSERT(error == 0);
920 1.43 hannken KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
921 1.43 hannken if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
922 1.43 hannken spec_node_revoke(vp);
923 1.1 rmind }
924 1.1 rmind if (active) {
925 1.1 rmind VOP_INACTIVE(vp, &recycle);
926 1.1 rmind } else {
927 1.1 rmind /*
928 1.1 rmind * Any other processes trying to obtain this lock must first
929 1.1 rmind * wait for VI_XLOCK to clear, then call the new lock operation.
930 1.1 rmind */
931 1.1 rmind VOP_UNLOCK(vp);
932 1.1 rmind }
933 1.1 rmind
934 1.1 rmind /* Disassociate the underlying file system from the vnode. */
935 1.1 rmind if (VOP_RECLAIM(vp)) {
936 1.11 christos vnpanic(vp, "%s: cannot reclaim", __func__);
937 1.1 rmind }
938 1.1 rmind
939 1.7 rmind KASSERT(vp->v_data == NULL);
940 1.1 rmind KASSERT(vp->v_uobj.uo_npages == 0);
941 1.7 rmind
942 1.1 rmind if (vp->v_type == VREG && vp->v_ractx != NULL) {
943 1.1 rmind uvm_ra_freectx(vp->v_ractx);
944 1.1 rmind vp->v_ractx = NULL;
945 1.1 rmind }
946 1.7 rmind
947 1.7 rmind /* Purge name cache. */
948 1.1 rmind cache_purge(vp);
949 1.1 rmind
950 1.31 hannken /* Move to dead mount. */
951 1.31 hannken vp->v_vflag &= ~VV_ROOT;
952 1.44 hannken atomic_inc_uint(&dead_rootmount->mnt_refcnt);
953 1.44 hannken vfs_insmntque(vp, dead_rootmount);
954 1.23 hannken
955 1.1 rmind /* Done with purge, notify sleepers of the grim news. */
956 1.9 rmind mutex_enter(vp->v_interlock);
957 1.43 hannken vp->v_op = dead_vnodeop_p;
958 1.43 hannken vp->v_vflag |= VV_LOCKSWORK;
959 1.43 hannken vp->v_iflag |= VI_CLEAN;
960 1.1 rmind vp->v_tag = VT_NON;
961 1.1 rmind KNOTE(&vp->v_klist, NOTE_REVOKE);
962 1.1 rmind vp->v_iflag &= ~VI_XLOCK;
963 1.1 rmind cv_broadcast(&vp->v_cv);
964 1.1 rmind
965 1.1 rmind KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
966 1.1 rmind }
967 1.1 rmind
968 1.1 rmind /*
969 1.33 hannken * Recycle an unused vnode if caller holds the last reference.
970 1.1 rmind */
971 1.33 hannken bool
972 1.33 hannken vrecycle(vnode_t *vp)
973 1.1 rmind {
974 1.1 rmind
975 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0)
976 1.46 hannken return false;
977 1.46 hannken
978 1.33 hannken mutex_enter(vp->v_interlock);
979 1.33 hannken
980 1.1 rmind KASSERT((vp->v_iflag & VI_MARKER) == 0);
981 1.1 rmind
982 1.33 hannken if (vp->v_usecount != 1) {
983 1.9 rmind mutex_exit(vp->v_interlock);
984 1.46 hannken VOP_UNLOCK(vp);
985 1.33 hannken return false;
986 1.1 rmind }
987 1.33 hannken if ((vp->v_iflag & VI_CHANGING) != 0)
988 1.33 hannken vwait(vp, VI_CHANGING);
989 1.33 hannken if (vp->v_usecount != 1) {
990 1.33 hannken mutex_exit(vp->v_interlock);
991 1.46 hannken VOP_UNLOCK(vp);
992 1.33 hannken return false;
993 1.1 rmind }
994 1.46 hannken KASSERT((vp->v_iflag & VI_CLEAN) == 0);
995 1.29 christos vp->v_iflag |= VI_CHANGING;
996 1.25 hannken vclean(vp);
997 1.29 christos vrelel(vp, VRELEL_CHANGING_SET);
998 1.33 hannken return true;
999 1.1 rmind }
1000 1.1 rmind
1001 1.1 rmind /*
1002 1.1 rmind * Eliminate all activity associated with the requested vnode
1003 1.1 rmind * and with all vnodes aliased to the requested vnode.
1004 1.1 rmind */
1005 1.1 rmind void
1006 1.1 rmind vrevoke(vnode_t *vp)
1007 1.1 rmind {
1008 1.19 hannken vnode_t *vq;
1009 1.1 rmind enum vtype type;
1010 1.1 rmind dev_t dev;
1011 1.1 rmind
1012 1.1 rmind KASSERT(vp->v_usecount > 0);
1013 1.1 rmind
1014 1.9 rmind mutex_enter(vp->v_interlock);
1015 1.1 rmind if ((vp->v_iflag & VI_CLEAN) != 0) {
1016 1.9 rmind mutex_exit(vp->v_interlock);
1017 1.1 rmind return;
1018 1.1 rmind } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1019 1.1 rmind atomic_inc_uint(&vp->v_usecount);
1020 1.29 christos mutex_exit(vp->v_interlock);
1021 1.29 christos vgone(vp);
1022 1.1 rmind return;
1023 1.1 rmind } else {
1024 1.1 rmind dev = vp->v_rdev;
1025 1.1 rmind type = vp->v_type;
1026 1.9 rmind mutex_exit(vp->v_interlock);
1027 1.1 rmind }
1028 1.1 rmind
1029 1.19 hannken while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1030 1.29 christos vgone(vq);
1031 1.1 rmind }
1032 1.1 rmind }
1033 1.1 rmind
1034 1.1 rmind /*
1035 1.1 rmind * Eliminate all activity associated with a vnode in preparation for
1036 1.1 rmind * reuse. Drops a reference from the vnode.
1037 1.1 rmind */
1038 1.1 rmind void
1039 1.1 rmind vgone(vnode_t *vp)
1040 1.1 rmind {
1041 1.1 rmind
1042 1.46 hannken if (vn_lock(vp, LK_EXCLUSIVE) != 0) {
1043 1.46 hannken KASSERT((vp->v_iflag & VI_CLEAN) != 0);
1044 1.46 hannken vrele(vp);
1045 1.46 hannken }
1046 1.46 hannken
1047 1.9 rmind mutex_enter(vp->v_interlock);
1048 1.29 christos if ((vp->v_iflag & VI_CHANGING) != 0)
1049 1.29 christos vwait(vp, VI_CHANGING);
1050 1.29 christos vp->v_iflag |= VI_CHANGING;
1051 1.25 hannken vclean(vp);
1052 1.29 christos vrelel(vp, VRELEL_CHANGING_SET);
1053 1.1 rmind }
1054 1.1 rmind
1055 1.36 hannken static inline uint32_t
1056 1.36 hannken vcache_hash(const struct vcache_key *key)
1057 1.36 hannken {
1058 1.36 hannken uint32_t hash = HASH32_BUF_INIT;
1059 1.36 hannken
1060 1.36 hannken hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1061 1.36 hannken hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1062 1.36 hannken return hash;
1063 1.36 hannken }
1064 1.36 hannken
1065 1.36 hannken static void
1066 1.36 hannken vcache_init(void)
1067 1.36 hannken {
1068 1.36 hannken
1069 1.36 hannken vcache.pool = pool_cache_init(sizeof(struct vcache_node), 0, 0, 0,
1070 1.36 hannken "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1071 1.36 hannken KASSERT(vcache.pool != NULL);
1072 1.36 hannken mutex_init(&vcache.lock, MUTEX_DEFAULT, IPL_NONE);
1073 1.36 hannken vcache.hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1074 1.36 hannken &vcache.hashmask);
1075 1.36 hannken }
1076 1.36 hannken
1077 1.36 hannken static void
1078 1.36 hannken vcache_reinit(void)
1079 1.36 hannken {
1080 1.36 hannken int i;
1081 1.36 hannken uint32_t hash;
1082 1.36 hannken u_long oldmask, newmask;
1083 1.36 hannken struct hashhead *oldtab, *newtab;
1084 1.36 hannken struct vcache_node *node;
1085 1.36 hannken
1086 1.36 hannken newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1087 1.36 hannken mutex_enter(&vcache.lock);
1088 1.36 hannken oldtab = vcache.hashtab;
1089 1.36 hannken oldmask = vcache.hashmask;
1090 1.36 hannken vcache.hashtab = newtab;
1091 1.36 hannken vcache.hashmask = newmask;
1092 1.36 hannken for (i = 0; i <= oldmask; i++) {
1093 1.36 hannken while ((node = SLIST_FIRST(&oldtab[i])) != NULL) {
1094 1.36 hannken SLIST_REMOVE(&oldtab[i], node, vcache_node, vn_hash);
1095 1.36 hannken hash = vcache_hash(&node->vn_key);
1096 1.36 hannken SLIST_INSERT_HEAD(&newtab[hash & vcache.hashmask],
1097 1.36 hannken node, vn_hash);
1098 1.36 hannken }
1099 1.36 hannken }
1100 1.36 hannken mutex_exit(&vcache.lock);
1101 1.36 hannken hashdone(oldtab, HASH_SLIST, oldmask);
1102 1.36 hannken }
1103 1.36 hannken
1104 1.36 hannken static inline struct vcache_node *
1105 1.36 hannken vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1106 1.36 hannken {
1107 1.36 hannken struct hashhead *hashp;
1108 1.36 hannken struct vcache_node *node;
1109 1.36 hannken
1110 1.36 hannken KASSERT(mutex_owned(&vcache.lock));
1111 1.36 hannken
1112 1.36 hannken hashp = &vcache.hashtab[hash & vcache.hashmask];
1113 1.36 hannken SLIST_FOREACH(node, hashp, vn_hash) {
1114 1.36 hannken if (key->vk_mount != node->vn_key.vk_mount)
1115 1.36 hannken continue;
1116 1.36 hannken if (key->vk_key_len != node->vn_key.vk_key_len)
1117 1.36 hannken continue;
1118 1.36 hannken if (memcmp(key->vk_key, node->vn_key.vk_key, key->vk_key_len))
1119 1.36 hannken continue;
1120 1.36 hannken return node;
1121 1.36 hannken }
1122 1.36 hannken return NULL;
1123 1.36 hannken }
1124 1.36 hannken
1125 1.36 hannken /*
1126 1.36 hannken * Get a vnode / fs node pair by key and return it referenced through vpp.
1127 1.36 hannken */
1128 1.36 hannken int
1129 1.36 hannken vcache_get(struct mount *mp, const void *key, size_t key_len,
1130 1.36 hannken struct vnode **vpp)
1131 1.36 hannken {
1132 1.36 hannken int error;
1133 1.36 hannken uint32_t hash;
1134 1.36 hannken const void *new_key;
1135 1.36 hannken struct vnode *vp;
1136 1.36 hannken struct vcache_key vcache_key;
1137 1.36 hannken struct vcache_node *node, *new_node;
1138 1.36 hannken
1139 1.36 hannken new_key = NULL;
1140 1.36 hannken *vpp = NULL;
1141 1.36 hannken
1142 1.36 hannken vcache_key.vk_mount = mp;
1143 1.36 hannken vcache_key.vk_key = key;
1144 1.36 hannken vcache_key.vk_key_len = key_len;
1145 1.36 hannken hash = vcache_hash(&vcache_key);
1146 1.36 hannken
1147 1.36 hannken again:
1148 1.36 hannken mutex_enter(&vcache.lock);
1149 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1150 1.36 hannken
1151 1.36 hannken /* If found, take a reference or retry. */
1152 1.36 hannken if (__predict_true(node != NULL && node->vn_vnode != NULL)) {
1153 1.36 hannken vp = node->vn_vnode;
1154 1.36 hannken mutex_enter(vp->v_interlock);
1155 1.36 hannken mutex_exit(&vcache.lock);
1156 1.41 riastrad error = vget(vp, 0, true /* wait */);
1157 1.36 hannken if (error == ENOENT)
1158 1.36 hannken goto again;
1159 1.36 hannken if (error == 0)
1160 1.36 hannken *vpp = vp;
1161 1.36 hannken KASSERT((error != 0) == (*vpp == NULL));
1162 1.36 hannken return error;
1163 1.36 hannken }
1164 1.36 hannken
1165 1.36 hannken /* If another thread loads this node, wait and retry. */
1166 1.36 hannken if (node != NULL) {
1167 1.36 hannken KASSERT(node->vn_vnode == NULL);
1168 1.36 hannken mutex_exit(&vcache.lock);
1169 1.36 hannken kpause("vcache", false, mstohz(20), NULL);
1170 1.36 hannken goto again;
1171 1.36 hannken }
1172 1.36 hannken mutex_exit(&vcache.lock);
1173 1.36 hannken
1174 1.36 hannken /* Allocate and initialize a new vcache / vnode pair. */
1175 1.36 hannken error = vfs_busy(mp, NULL);
1176 1.36 hannken if (error)
1177 1.36 hannken return error;
1178 1.36 hannken new_node = pool_cache_get(vcache.pool, PR_WAITOK);
1179 1.36 hannken new_node->vn_vnode = NULL;
1180 1.36 hannken new_node->vn_key = vcache_key;
1181 1.36 hannken vp = vnalloc(NULL);
1182 1.36 hannken mutex_enter(&vcache.lock);
1183 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1184 1.36 hannken if (node == NULL) {
1185 1.36 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1186 1.36 hannken new_node, vn_hash);
1187 1.36 hannken node = new_node;
1188 1.36 hannken }
1189 1.36 hannken mutex_exit(&vcache.lock);
1190 1.36 hannken
1191 1.36 hannken /* If another thread beat us inserting this node, retry. */
1192 1.36 hannken if (node != new_node) {
1193 1.36 hannken pool_cache_put(vcache.pool, new_node);
1194 1.36 hannken KASSERT(vp->v_usecount == 1);
1195 1.36 hannken vp->v_usecount = 0;
1196 1.36 hannken vnfree(vp);
1197 1.36 hannken vfs_unbusy(mp, false, NULL);
1198 1.36 hannken goto again;
1199 1.36 hannken }
1200 1.36 hannken
1201 1.36 hannken /* Load the fs node. Exclusive as new_node->vn_vnode is NULL. */
1202 1.39 hannken vp->v_iflag |= VI_CHANGING;
1203 1.36 hannken error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1204 1.36 hannken if (error) {
1205 1.36 hannken mutex_enter(&vcache.lock);
1206 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1207 1.36 hannken new_node, vcache_node, vn_hash);
1208 1.36 hannken mutex_exit(&vcache.lock);
1209 1.36 hannken pool_cache_put(vcache.pool, new_node);
1210 1.36 hannken KASSERT(vp->v_usecount == 1);
1211 1.36 hannken vp->v_usecount = 0;
1212 1.36 hannken vnfree(vp);
1213 1.36 hannken vfs_unbusy(mp, false, NULL);
1214 1.36 hannken KASSERT(*vpp == NULL);
1215 1.36 hannken return error;
1216 1.36 hannken }
1217 1.36 hannken KASSERT(new_key != NULL);
1218 1.36 hannken KASSERT(memcmp(key, new_key, key_len) == 0);
1219 1.36 hannken KASSERT(vp->v_op != NULL);
1220 1.36 hannken vfs_insmntque(vp, mp);
1221 1.36 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1222 1.36 hannken vp->v_vflag |= VV_MPSAFE;
1223 1.36 hannken vfs_unbusy(mp, true, NULL);
1224 1.36 hannken
1225 1.36 hannken /* Finished loading, finalize node. */
1226 1.36 hannken mutex_enter(&vcache.lock);
1227 1.36 hannken new_node->vn_key.vk_key = new_key;
1228 1.36 hannken new_node->vn_vnode = vp;
1229 1.36 hannken mutex_exit(&vcache.lock);
1230 1.39 hannken mutex_enter(vp->v_interlock);
1231 1.39 hannken vp->v_iflag &= ~VI_CHANGING;
1232 1.39 hannken cv_broadcast(&vp->v_cv);
1233 1.39 hannken mutex_exit(vp->v_interlock);
1234 1.36 hannken *vpp = vp;
1235 1.36 hannken return 0;
1236 1.36 hannken }
1237 1.36 hannken
1238 1.36 hannken /*
1239 1.40 hannken * Create a new vnode / fs node pair and return it referenced through vpp.
1240 1.40 hannken */
1241 1.40 hannken int
1242 1.40 hannken vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1243 1.40 hannken kauth_cred_t cred, struct vnode **vpp)
1244 1.40 hannken {
1245 1.40 hannken int error;
1246 1.40 hannken uint32_t hash;
1247 1.40 hannken struct vnode *vp;
1248 1.40 hannken struct vcache_node *new_node;
1249 1.40 hannken struct vcache_node *old_node __diagused;
1250 1.40 hannken
1251 1.40 hannken *vpp = NULL;
1252 1.40 hannken
1253 1.40 hannken /* Allocate and initialize a new vcache / vnode pair. */
1254 1.40 hannken error = vfs_busy(mp, NULL);
1255 1.40 hannken if (error)
1256 1.40 hannken return error;
1257 1.40 hannken new_node = pool_cache_get(vcache.pool, PR_WAITOK);
1258 1.40 hannken new_node->vn_key.vk_mount = mp;
1259 1.40 hannken new_node->vn_vnode = NULL;
1260 1.40 hannken vp = vnalloc(NULL);
1261 1.40 hannken
1262 1.40 hannken /* Create and load the fs node. */
1263 1.40 hannken vp->v_iflag |= VI_CHANGING;
1264 1.40 hannken error = VFS_NEWVNODE(mp, dvp, vp, vap, cred,
1265 1.40 hannken &new_node->vn_key.vk_key_len, &new_node->vn_key.vk_key);
1266 1.40 hannken if (error) {
1267 1.40 hannken pool_cache_put(vcache.pool, new_node);
1268 1.40 hannken KASSERT(vp->v_usecount == 1);
1269 1.40 hannken vp->v_usecount = 0;
1270 1.40 hannken vnfree(vp);
1271 1.40 hannken vfs_unbusy(mp, false, NULL);
1272 1.40 hannken KASSERT(*vpp == NULL);
1273 1.40 hannken return error;
1274 1.40 hannken }
1275 1.40 hannken KASSERT(new_node->vn_key.vk_key != NULL);
1276 1.40 hannken KASSERT(vp->v_op != NULL);
1277 1.40 hannken hash = vcache_hash(&new_node->vn_key);
1278 1.40 hannken
1279 1.40 hannken /* Wait for previous instance to be reclaimed, then insert new node. */
1280 1.40 hannken mutex_enter(&vcache.lock);
1281 1.40 hannken while ((old_node = vcache_hash_lookup(&new_node->vn_key, hash))) {
1282 1.40 hannken #ifdef DIAGNOSTIC
1283 1.40 hannken if (old_node->vn_vnode != NULL)
1284 1.40 hannken mutex_enter(old_node->vn_vnode->v_interlock);
1285 1.40 hannken KASSERT(old_node->vn_vnode == NULL ||
1286 1.40 hannken (old_node->vn_vnode->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0);
1287 1.40 hannken if (old_node->vn_vnode != NULL)
1288 1.40 hannken mutex_exit(old_node->vn_vnode->v_interlock);
1289 1.40 hannken #endif
1290 1.40 hannken mutex_exit(&vcache.lock);
1291 1.40 hannken kpause("vcache", false, mstohz(20), NULL);
1292 1.40 hannken mutex_enter(&vcache.lock);
1293 1.40 hannken }
1294 1.40 hannken SLIST_INSERT_HEAD(&vcache.hashtab[hash & vcache.hashmask],
1295 1.40 hannken new_node, vn_hash);
1296 1.40 hannken mutex_exit(&vcache.lock);
1297 1.40 hannken vfs_insmntque(vp, mp);
1298 1.40 hannken if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1299 1.40 hannken vp->v_vflag |= VV_MPSAFE;
1300 1.40 hannken vfs_unbusy(mp, true, NULL);
1301 1.40 hannken
1302 1.40 hannken /* Finished loading, finalize node. */
1303 1.40 hannken mutex_enter(&vcache.lock);
1304 1.40 hannken new_node->vn_vnode = vp;
1305 1.40 hannken mutex_exit(&vcache.lock);
1306 1.40 hannken mutex_enter(vp->v_interlock);
1307 1.40 hannken vp->v_iflag &= ~VI_CHANGING;
1308 1.40 hannken cv_broadcast(&vp->v_cv);
1309 1.40 hannken mutex_exit(vp->v_interlock);
1310 1.40 hannken *vpp = vp;
1311 1.40 hannken return 0;
1312 1.40 hannken }
1313 1.40 hannken
1314 1.40 hannken /*
1315 1.37 hannken * Prepare key change: lock old and new cache node.
1316 1.37 hannken * Return an error if the new node already exists.
1317 1.37 hannken */
1318 1.37 hannken int
1319 1.37 hannken vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1320 1.37 hannken const void *old_key, size_t old_key_len,
1321 1.37 hannken const void *new_key, size_t new_key_len)
1322 1.37 hannken {
1323 1.37 hannken uint32_t old_hash, new_hash;
1324 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1325 1.37 hannken struct vcache_node *node, *new_node;
1326 1.37 hannken
1327 1.37 hannken old_vcache_key.vk_mount = mp;
1328 1.37 hannken old_vcache_key.vk_key = old_key;
1329 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1330 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1331 1.37 hannken
1332 1.37 hannken new_vcache_key.vk_mount = mp;
1333 1.37 hannken new_vcache_key.vk_key = new_key;
1334 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1335 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1336 1.37 hannken
1337 1.37 hannken new_node = pool_cache_get(vcache.pool, PR_WAITOK);
1338 1.37 hannken new_node->vn_vnode = NULL;
1339 1.37 hannken new_node->vn_key = new_vcache_key;
1340 1.37 hannken
1341 1.37 hannken mutex_enter(&vcache.lock);
1342 1.37 hannken node = vcache_hash_lookup(&new_vcache_key, new_hash);
1343 1.37 hannken if (node != NULL) {
1344 1.37 hannken mutex_exit(&vcache.lock);
1345 1.37 hannken pool_cache_put(vcache.pool, new_node);
1346 1.37 hannken return EEXIST;
1347 1.37 hannken }
1348 1.37 hannken SLIST_INSERT_HEAD(&vcache.hashtab[new_hash & vcache.hashmask],
1349 1.37 hannken new_node, vn_hash);
1350 1.37 hannken node = vcache_hash_lookup(&old_vcache_key, old_hash);
1351 1.37 hannken KASSERT(node != NULL);
1352 1.37 hannken KASSERT(node->vn_vnode == vp);
1353 1.37 hannken node->vn_vnode = NULL;
1354 1.37 hannken node->vn_key = old_vcache_key;
1355 1.37 hannken mutex_exit(&vcache.lock);
1356 1.37 hannken return 0;
1357 1.37 hannken }
1358 1.37 hannken
1359 1.37 hannken /*
1360 1.37 hannken * Key change complete: remove old node and unlock new node.
1361 1.37 hannken */
1362 1.37 hannken void
1363 1.37 hannken vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1364 1.37 hannken const void *old_key, size_t old_key_len,
1365 1.37 hannken const void *new_key, size_t new_key_len)
1366 1.37 hannken {
1367 1.37 hannken uint32_t old_hash, new_hash;
1368 1.37 hannken struct vcache_key old_vcache_key, new_vcache_key;
1369 1.37 hannken struct vcache_node *node;
1370 1.37 hannken
1371 1.37 hannken old_vcache_key.vk_mount = mp;
1372 1.37 hannken old_vcache_key.vk_key = old_key;
1373 1.37 hannken old_vcache_key.vk_key_len = old_key_len;
1374 1.37 hannken old_hash = vcache_hash(&old_vcache_key);
1375 1.37 hannken
1376 1.37 hannken new_vcache_key.vk_mount = mp;
1377 1.37 hannken new_vcache_key.vk_key = new_key;
1378 1.37 hannken new_vcache_key.vk_key_len = new_key_len;
1379 1.37 hannken new_hash = vcache_hash(&new_vcache_key);
1380 1.37 hannken
1381 1.37 hannken mutex_enter(&vcache.lock);
1382 1.37 hannken node = vcache_hash_lookup(&new_vcache_key, new_hash);
1383 1.37 hannken KASSERT(node != NULL && node->vn_vnode == NULL);
1384 1.37 hannken KASSERT(node->vn_key.vk_key_len == new_key_len);
1385 1.37 hannken node->vn_vnode = vp;
1386 1.37 hannken node->vn_key = new_vcache_key;
1387 1.37 hannken node = vcache_hash_lookup(&old_vcache_key, old_hash);
1388 1.37 hannken KASSERT(node != NULL);
1389 1.37 hannken KASSERT(node->vn_vnode == NULL);
1390 1.37 hannken SLIST_REMOVE(&vcache.hashtab[old_hash & vcache.hashmask],
1391 1.37 hannken node, vcache_node, vn_hash);
1392 1.37 hannken mutex_exit(&vcache.lock);
1393 1.37 hannken pool_cache_put(vcache.pool, node);
1394 1.37 hannken }
1395 1.37 hannken
1396 1.37 hannken /*
1397 1.36 hannken * Remove a vnode / fs node pair from the cache.
1398 1.36 hannken */
1399 1.36 hannken void
1400 1.36 hannken vcache_remove(struct mount *mp, const void *key, size_t key_len)
1401 1.36 hannken {
1402 1.36 hannken uint32_t hash;
1403 1.36 hannken struct vcache_key vcache_key;
1404 1.36 hannken struct vcache_node *node;
1405 1.36 hannken
1406 1.36 hannken vcache_key.vk_mount = mp;
1407 1.36 hannken vcache_key.vk_key = key;
1408 1.36 hannken vcache_key.vk_key_len = key_len;
1409 1.36 hannken hash = vcache_hash(&vcache_key);
1410 1.36 hannken
1411 1.36 hannken mutex_enter(&vcache.lock);
1412 1.36 hannken node = vcache_hash_lookup(&vcache_key, hash);
1413 1.36 hannken KASSERT(node != NULL);
1414 1.36 hannken SLIST_REMOVE(&vcache.hashtab[hash & vcache.hashmask],
1415 1.36 hannken node, vcache_node, vn_hash);
1416 1.36 hannken mutex_exit(&vcache.lock);
1417 1.36 hannken pool_cache_put(vcache.pool, node);
1418 1.36 hannken }
1419 1.36 hannken
1420 1.1 rmind /*
1421 1.1 rmind * Update outstanding I/O count and do wakeup if requested.
1422 1.1 rmind */
1423 1.1 rmind void
1424 1.1 rmind vwakeup(struct buf *bp)
1425 1.1 rmind {
1426 1.1 rmind vnode_t *vp;
1427 1.1 rmind
1428 1.1 rmind if ((vp = bp->b_vp) == NULL)
1429 1.1 rmind return;
1430 1.1 rmind
1431 1.9 rmind KASSERT(bp->b_objlock == vp->v_interlock);
1432 1.1 rmind KASSERT(mutex_owned(bp->b_objlock));
1433 1.1 rmind
1434 1.1 rmind if (--vp->v_numoutput < 0)
1435 1.11 christos vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1436 1.1 rmind if (vp->v_numoutput == 0)
1437 1.1 rmind cv_broadcast(&vp->v_cv);
1438 1.1 rmind }
1439 1.1 rmind
1440 1.1 rmind /*
1441 1.35 hannken * Test a vnode for being or becoming dead. Returns one of:
1442 1.35 hannken * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1443 1.35 hannken * ENOENT: vnode is dead.
1444 1.35 hannken * 0: otherwise.
1445 1.35 hannken *
1446 1.35 hannken * Whenever this function returns a non-zero value all future
1447 1.35 hannken * calls will also return a non-zero value.
1448 1.35 hannken */
1449 1.35 hannken int
1450 1.35 hannken vdead_check(struct vnode *vp, int flags)
1451 1.35 hannken {
1452 1.35 hannken
1453 1.35 hannken KASSERT(mutex_owned(vp->v_interlock));
1454 1.35 hannken if (ISSET(vp->v_iflag, VI_XLOCK)) {
1455 1.35 hannken if (ISSET(flags, VDEAD_NOWAIT))
1456 1.35 hannken return EBUSY;
1457 1.35 hannken vwait(vp, VI_XLOCK);
1458 1.35 hannken KASSERT(ISSET(vp->v_iflag, VI_CLEAN));
1459 1.35 hannken }
1460 1.35 hannken if (ISSET(vp->v_iflag, VI_CLEAN))
1461 1.35 hannken return ENOENT;
1462 1.35 hannken return 0;
1463 1.35 hannken }
1464 1.35 hannken
1465 1.35 hannken /*
1466 1.1 rmind * Wait for a vnode (typically with VI_XLOCK set) to be cleaned or
1467 1.1 rmind * recycled.
1468 1.1 rmind */
1469 1.35 hannken static void
1470 1.1 rmind vwait(vnode_t *vp, int flags)
1471 1.1 rmind {
1472 1.1 rmind
1473 1.9 rmind KASSERT(mutex_owned(vp->v_interlock));
1474 1.1 rmind KASSERT(vp->v_usecount != 0);
1475 1.1 rmind
1476 1.1 rmind while ((vp->v_iflag & flags) != 0)
1477 1.9 rmind cv_wait(&vp->v_cv, vp->v_interlock);
1478 1.1 rmind }
1479 1.1 rmind
1480 1.1 rmind int
1481 1.3 rmind vfs_drainvnodes(long target)
1482 1.1 rmind {
1483 1.12 hannken int error;
1484 1.12 hannken
1485 1.12 hannken mutex_enter(&vnode_free_list_lock);
1486 1.1 rmind
1487 1.1 rmind while (numvnodes > target) {
1488 1.12 hannken error = cleanvnode();
1489 1.12 hannken if (error != 0)
1490 1.12 hannken return error;
1491 1.1 rmind mutex_enter(&vnode_free_list_lock);
1492 1.1 rmind }
1493 1.12 hannken
1494 1.12 hannken mutex_exit(&vnode_free_list_lock);
1495 1.12 hannken
1496 1.36 hannken vcache_reinit();
1497 1.36 hannken
1498 1.1 rmind return 0;
1499 1.1 rmind }
1500 1.1 rmind
1501 1.1 rmind void
1502 1.11 christos vnpanic(vnode_t *vp, const char *fmt, ...)
1503 1.1 rmind {
1504 1.11 christos va_list ap;
1505 1.11 christos
1506 1.1 rmind #ifdef DIAGNOSTIC
1507 1.1 rmind vprint(NULL, vp);
1508 1.1 rmind #endif
1509 1.11 christos va_start(ap, fmt);
1510 1.11 christos vpanic(fmt, ap);
1511 1.11 christos va_end(ap);
1512 1.1 rmind }
1513