vfs_vnode.c revision 1.5.2.3 1 /* $NetBSD: vfs_vnode.c,v 1.5.2.3 2011/05/19 03:43:03 rmind Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * Note on v_usecount and locking:
71 *
72 * At nearly all points it is known that v_usecount could be zero, the
73 * vnode interlock will be held.
74 *
75 * To change v_usecount away from zero, the interlock must be held. To
76 * change from a non-zero value to zero, again the interlock must be
77 * held.
78 *
79 * There's a flag bit, VC_XLOCK, embedded in v_usecount.
80 * To raise v_usecount, if the VC_XLOCK bit is set in it, the interlock
81 * must be held.
82 * To modify the VC_XLOCK bit, the interlock must be held.
83 * We always keep the usecount (v_usecount & VC_MASK) non-zero while the
84 * VC_XLOCK bit is set.
85 *
86 * Unless the VC_XLOCK bit is set, changing the usecount from a non-zero
87 * value to a non-zero value can safely be done using atomic operations,
88 * without the interlock held.
89 * Even if the VC_XLOCK bit is set, decreasing the usecount to a non-zero
90 * value can be done using atomic operations, without the interlock held.
91 */
92
93 #include <sys/cdefs.h>
94 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.5.2.3 2011/05/19 03:43:03 rmind Exp $");
95
96 #include <sys/param.h>
97 #include <sys/kernel.h>
98
99 #include <sys/atomic.h>
100 #include <sys/buf.h>
101 #include <sys/conf.h>
102 #include <sys/device.h>
103 #include <sys/kauth.h>
104 #include <sys/kmem.h>
105 #include <sys/kthread.h>
106 #include <sys/module.h>
107 #include <sys/mount.h>
108 #include <sys/namei.h>
109 #include <sys/syscallargs.h>
110 #include <sys/sysctl.h>
111 #include <sys/systm.h>
112 #include <sys/vnode.h>
113 #include <sys/wapbl.h>
114
115 #include <uvm/uvm.h>
116 #include <uvm/uvm_readahead.h>
117
118 u_int numvnodes;
119
120 static pool_cache_t vnode_cache;
121 static kmutex_t vnode_free_list_lock;
122
123 static vnodelst_t vnode_free_list;
124 static vnodelst_t vnode_hold_list;
125 static vnodelst_t vrele_list;
126
127 static kmutex_t vrele_lock;
128 static kcondvar_t vrele_cv;
129 static lwp_t * vrele_lwp;
130 static int vrele_pending;
131 static int vrele_gen;
132
133 static vnode_t * getcleanvnode(void);
134 static void vrele_thread(void *);
135 static void vpanic(vnode_t *, const char *);
136
137 /* Routines having to do with the management of the vnode table. */
138 extern int (**dead_vnodeop_p)(void *);
139
140 void
141 vfs_vnode_sysinit(void)
142 {
143 int error;
144
145 vnode_cache = pool_cache_init(sizeof(vnode_t), 0, 0, 0, "vnodepl",
146 NULL, IPL_NONE, NULL, NULL, NULL);
147 KASSERT(vnode_cache != NULL);
148
149 mutex_init(&vnode_free_list_lock, MUTEX_DEFAULT, IPL_NONE);
150 TAILQ_INIT(&vnode_free_list);
151 TAILQ_INIT(&vnode_hold_list);
152 TAILQ_INIT(&vrele_list);
153
154 mutex_init(&vrele_lock, MUTEX_DEFAULT, IPL_NONE);
155 cv_init(&vrele_cv, "vrele");
156 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vrele_thread,
157 NULL, &vrele_lwp, "vrele");
158 KASSERT(error == 0);
159 }
160
161 /*
162 * Allocate a new, uninitialized vnode. If 'mp' is non-NULL, this is a
163 * marker vnode and we are prepared to wait for the allocation.
164 */
165 vnode_t *
166 vnalloc(struct mount *mp)
167 {
168 vnode_t *vp;
169
170 vp = pool_cache_get(vnode_cache, (mp != NULL ? PR_WAITOK : PR_NOWAIT));
171 if (vp == NULL) {
172 return NULL;
173 }
174
175 memset(vp, 0, sizeof(*vp));
176 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
177 cv_init(&vp->v_cv, "vnode");
178 /*
179 * Done by memset() above.
180 * LIST_INIT(&vp->v_nclist);
181 * LIST_INIT(&vp->v_dnclist);
182 */
183
184 if (mp != NULL) {
185 vp->v_mount = mp;
186 vp->v_type = VBAD;
187 vp->v_iflag = VI_MARKER;
188 } else {
189 rw_init(&vp->v_lock);
190 }
191
192 return vp;
193 }
194
195 /*
196 * Free an unused, unreferenced vnode.
197 */
198 void
199 vnfree(vnode_t *vp)
200 {
201
202 KASSERT(vp->v_usecount == 0);
203
204 if ((vp->v_iflag & VI_MARKER) == 0) {
205 rw_destroy(&vp->v_lock);
206 mutex_enter(&vnode_free_list_lock);
207 numvnodes--;
208 mutex_exit(&vnode_free_list_lock);
209 }
210
211 /*
212 * Note: the vnode interlock will either be freed, of reference
213 * dropped (if VI_LOCKSHARE was in use).
214 */
215 uvm_obj_destroy(&vp->v_uobj, true);
216 cv_destroy(&vp->v_cv);
217 pool_cache_put(vnode_cache, vp);
218 }
219
220 /*
221 * getcleanvnode: grab a vnode from freelist and clean it.
222 *
223 * => Releases vnode_free_list_lock.
224 * => Returns referenced vnode on success.
225 */
226 static vnode_t *
227 getcleanvnode(void)
228 {
229 vnode_t *vp;
230 vnodelst_t *listhd;
231
232 KASSERT(mutex_owned(&vnode_free_list_lock));
233 retry:
234 listhd = &vnode_free_list;
235 try_nextlist:
236 TAILQ_FOREACH(vp, listhd, v_freelist) {
237 /*
238 * It's safe to test v_usecount and v_iflag
239 * without holding the interlock here, since
240 * these vnodes should never appear on the
241 * lists.
242 */
243 KASSERT(vp->v_usecount == 0);
244 KASSERT((vp->v_iflag & VI_CLEAN) == 0);
245 KASSERT(vp->v_freelisthd == listhd);
246
247 if (!mutex_tryenter(vp->v_interlock))
248 continue;
249 if ((vp->v_iflag & VI_XLOCK) == 0)
250 break;
251 mutex_exit(vp->v_interlock);
252 }
253
254 if (vp == NULL) {
255 if (listhd == &vnode_free_list) {
256 listhd = &vnode_hold_list;
257 goto try_nextlist;
258 }
259 mutex_exit(&vnode_free_list_lock);
260 return NULL;
261 }
262
263 /* Remove it from the freelist. */
264 TAILQ_REMOVE(listhd, vp, v_freelist);
265 vp->v_freelisthd = NULL;
266 mutex_exit(&vnode_free_list_lock);
267
268 KASSERT(vp->v_usecount == 0);
269
270 /*
271 * The vnode is still associated with a file system, so we must
272 * clean it out before reusing it. We need to add a reference
273 * before doing this. If the vnode gains another reference while
274 * being cleaned out then we lose - retry.
275 */
276 atomic_add_int(&vp->v_usecount, 1 + VC_XLOCK);
277 vclean(vp, DOCLOSE);
278 KASSERT(vp->v_usecount >= 1 + VC_XLOCK);
279 atomic_add_int(&vp->v_usecount, -VC_XLOCK);
280 if (vp->v_usecount == 1) {
281 /* We're about to dirty it. */
282 vp->v_iflag &= ~VI_CLEAN;
283 mutex_exit(vp->v_interlock);
284 if (vp->v_type == VBLK || vp->v_type == VCHR) {
285 spec_node_destroy(vp);
286 }
287 vp->v_type = VNON;
288 } else {
289 /*
290 * Don't return to freelist - the holder of the last
291 * reference will destroy it.
292 */
293 vrelel(vp, 0); /* releases vp->v_interlock */
294 mutex_enter(&vnode_free_list_lock);
295 goto retry;
296 }
297
298 KASSERT(vp->v_data == NULL);
299 KASSERT(vp->v_uobj.uo_npages == 0);
300 KASSERT(TAILQ_EMPTY(&vp->v_uobj.memq));
301 KASSERT(vp->v_numoutput == 0);
302 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
303
304 return vp;
305 }
306
307 /*
308 * getnewvnode: return the next vnode from the free list.
309 *
310 * => Returns referenced vnode, moved into the mount queue.
311 * => Shares the lock with vnode specified by 'svp', if it is not NULL.
312 */
313 int
314 getnewvnode(enum vtagtype tag, struct mount *mp, int (**vops)(void *),
315 const vnode_t *svp, vnode_t **vpp)
316 {
317 struct uvm_object *uobj;
318 static int toggle;
319 vnode_t *vp;
320 int error = 0, tryalloc;
321
322 try_again:
323 if (mp != NULL) {
324 /*
325 * Mark filesystem busy while we are creating a vnode.
326 * If unmount is in progress, this will fail.
327 */
328 error = vfs_busy(mp, NULL);
329 if (error)
330 return error;
331 }
332
333 /*
334 * We must choose whether to allocate a new vnode or recycle an
335 * existing one. The criterion for allocating a new one is that
336 * the total number of vnodes is less than the number desired or
337 * there are no vnodes on either free list. Generally we only
338 * want to recycle vnodes that have no buffers associated with
339 * them, so we look first on the vnode_free_list. If it is empty,
340 * we next consider vnodes with referencing buffers on the
341 * vnode_hold_list. The toggle ensures that half the time we
342 * will use a buffer from the vnode_hold_list, and half the time
343 * we will allocate a new one unless the list has grown to twice
344 * the desired size. We are reticent to recycle vnodes from the
345 * vnode_hold_list because we will lose the identity of all its
346 * referencing buffers.
347 */
348
349 vp = NULL;
350
351 mutex_enter(&vnode_free_list_lock);
352
353 toggle ^= 1;
354 if (numvnodes > 2 * desiredvnodes)
355 toggle = 0;
356
357 tryalloc = numvnodes < desiredvnodes ||
358 (TAILQ_FIRST(&vnode_free_list) == NULL &&
359 (TAILQ_FIRST(&vnode_hold_list) == NULL || toggle));
360
361 if (tryalloc) {
362 /* Allocate a new vnode. */
363 numvnodes++;
364 mutex_exit(&vnode_free_list_lock);
365 if ((vp = vnalloc(NULL)) == NULL) {
366 mutex_enter(&vnode_free_list_lock);
367 numvnodes--;
368 } else
369 vp->v_usecount = 1;
370 }
371
372 if (vp == NULL) {
373 /* Recycle and get vnode clean. */
374 vp = getcleanvnode();
375 if (vp == NULL) {
376 if (mp != NULL) {
377 vfs_unbusy(mp, false, NULL);
378 }
379 if (tryalloc) {
380 printf("WARNING: unable to allocate new "
381 "vnode, retrying...\n");
382 kpause("newvn", false, hz, NULL);
383 goto try_again;
384 }
385 tablefull("vnode", "increase kern.maxvnodes or NVNODE");
386 *vpp = 0;
387 return ENFILE;
388 }
389 if ((vp->v_iflag & VI_LOCKSHARE) != 0 || svp) {
390 /* We must remove vnode from the old mount point. */
391 if (vp->v_mount) {
392 vfs_insmntque(vp, NULL);
393 }
394 /* Allocate a new interlock, if it was shared. */
395 if (vp->v_iflag & VI_LOCKSHARE) {
396 uvm_obj_setlock(&vp->v_uobj, NULL);
397 vp->v_iflag &= ~VI_LOCKSHARE;
398 }
399 }
400 vp->v_iflag = 0;
401 vp->v_vflag = 0;
402 vp->v_uflag = 0;
403 vp->v_socket = NULL;
404 }
405
406 KASSERT(vp->v_usecount == 1);
407 KASSERT(vp->v_freelisthd == NULL);
408 KASSERT(LIST_EMPTY(&vp->v_nclist));
409 KASSERT(LIST_EMPTY(&vp->v_dnclist));
410
411 /* Initialize vnode. */
412 vp->v_type = VNON;
413 vp->v_tag = tag;
414 vp->v_op = vops;
415 vp->v_data = NULL;
416
417 uobj = &vp->v_uobj;
418 KASSERT(uobj->pgops == &uvm_vnodeops);
419 KASSERT(uobj->uo_npages == 0);
420 KASSERT(TAILQ_FIRST(&uobj->memq) == NULL);
421 vp->v_size = vp->v_writesize = VSIZENOTSET;
422
423 /* Share the vnode_t::v_interlock, if requested. */
424 if (svp) {
425 /* Set the interlock and mark that it is shared. */
426 KASSERT(vp->v_mount == NULL);
427 uvm_obj_setlock(&vp->v_uobj, svp->v_interlock);
428 KASSERT(vp->v_interlock == svp->v_interlock);
429 vp->v_iflag |= VI_LOCKSHARE;
430 }
431
432 /* Finally, move vnode into the mount queue. */
433 vfs_insmntque(vp, mp);
434
435 if (mp != NULL) {
436 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
437 vp->v_vflag |= VV_MPSAFE;
438 vfs_unbusy(mp, true, NULL);
439 }
440
441 *vpp = vp;
442 return 0;
443 }
444
445 /*
446 * This is really just the reverse of getnewvnode(). Needed for
447 * VFS_VGET functions who may need to push back a vnode in case
448 * of a locking race.
449 */
450 void
451 ungetnewvnode(vnode_t *vp)
452 {
453
454 KASSERT(vp->v_usecount == 1);
455 KASSERT(vp->v_data == NULL);
456 KASSERT(vp->v_freelisthd == NULL);
457
458 mutex_enter(vp->v_interlock);
459 vp->v_iflag |= VI_CLEAN;
460 vrelel(vp, 0);
461 }
462
463 /*
464 * Remove a vnode from its freelist.
465 */
466 void
467 vremfree(vnode_t *vp)
468 {
469
470 KASSERT(mutex_owned(vp->v_interlock));
471 KASSERT(vp->v_usecount == 0);
472
473 /*
474 * Note that the reference count must not change until
475 * the vnode is removed.
476 */
477 mutex_enter(&vnode_free_list_lock);
478 if (vp->v_holdcnt > 0) {
479 KASSERT(vp->v_freelisthd == &vnode_hold_list);
480 } else {
481 KASSERT(vp->v_freelisthd == &vnode_free_list);
482 }
483 TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
484 vp->v_freelisthd = NULL;
485 mutex_exit(&vnode_free_list_lock);
486 }
487
488 /*
489 * Try to gain a reference to a vnode, without acquiring its interlock.
490 * The caller must hold a lock that will prevent the vnode from being
491 * recycled or freed.
492 */
493 bool
494 vtryget(vnode_t *vp)
495 {
496 u_int use, next;
497
498 /*
499 * If the vnode is being freed, don't make life any harder
500 * for vclean() by adding another reference without waiting.
501 * This is not strictly necessary, but we'll do it anyway.
502 */
503 if (__predict_false((vp->v_iflag & VI_XLOCK) != 0)) {
504 return false;
505 }
506 for (use = vp->v_usecount;; use = next) {
507 if (use == 0 || __predict_false((use & VC_XLOCK) != 0)) {
508 /* Need interlock held if first reference. */
509 return false;
510 }
511 next = atomic_cas_uint(&vp->v_usecount, use, use + 1);
512 if (__predict_true(next == use)) {
513 return true;
514 }
515 }
516 }
517
518 /*
519 * vget: get a particular vnode from the free list, increment its reference
520 * count and lock it.
521 *
522 * => Should be called with v_interlock held.
523 *
524 * If VI_XLOCK is set, the vnode is being eliminated in vgone()/vclean().
525 * In that case, we cannot grab the vnode, so the process is awakened when
526 * the transition is completed, and an error returned to indicate that the
527 * vnode is no longer usable (e.g. changed to a new file system type).
528 */
529 int
530 vget(vnode_t *vp, int flags)
531 {
532 int error = 0;
533
534 KASSERT((vp->v_iflag & VI_MARKER) == 0);
535 KASSERT(mutex_owned(vp->v_interlock));
536 KASSERT((flags & ~(LK_SHARED|LK_EXCLUSIVE|LK_NOWAIT)) == 0);
537
538 /*
539 * Before adding a reference, we must remove the vnode
540 * from its freelist.
541 */
542 if (vp->v_usecount == 0) {
543 vremfree(vp);
544 vp->v_usecount = 1;
545 } else {
546 atomic_inc_uint(&vp->v_usecount);
547 }
548
549 /*
550 * If the vnode is in the process of being cleaned out for
551 * another use, we wait for the cleaning to finish and then
552 * return failure. Cleaning is determined by checking if
553 * the VI_XLOCK flag is set.
554 */
555 if ((vp->v_iflag & VI_XLOCK) != 0) {
556 if ((flags & LK_NOWAIT) != 0) {
557 vrelel(vp, 0);
558 return EBUSY;
559 }
560 vwait(vp, VI_XLOCK);
561 vrelel(vp, 0);
562 return ENOENT;
563 }
564
565 /*
566 * Ok, we got it in good shape. Just locking left.
567 */
568 KASSERT((vp->v_iflag & VI_CLEAN) == 0);
569 mutex_exit(vp->v_interlock);
570 if (flags & (LK_EXCLUSIVE | LK_SHARED)) {
571 error = vn_lock(vp, flags);
572 if (error != 0) {
573 vrele(vp);
574 }
575 }
576 return error;
577 }
578
579 /*
580 * vput: unlock and release the reference.
581 */
582 void
583 vput(vnode_t *vp)
584 {
585
586 KASSERT((vp->v_iflag & VI_MARKER) == 0);
587
588 VOP_UNLOCK(vp);
589 vrele(vp);
590 }
591
592 /*
593 * Try to drop reference on a vnode. Abort if we are releasing the
594 * last reference. Note: this _must_ succeed if not the last reference.
595 */
596 static inline bool
597 vtryrele(vnode_t *vp)
598 {
599 u_int use, next;
600
601 for (use = vp->v_usecount;; use = next) {
602 if (use == 1) {
603 return false;
604 }
605 KASSERT((use & VC_MASK) > 1);
606 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
607 if (__predict_true(next == use)) {
608 return true;
609 }
610 }
611 }
612
613 /*
614 * Vnode release. If reference count drops to zero, call inactive
615 * routine and either return to freelist or free to the pool.
616 */
617 void
618 vrelel(vnode_t *vp, int flags)
619 {
620 bool recycle, defer;
621 int error;
622
623 KASSERT(mutex_owned(vp->v_interlock));
624 KASSERT((vp->v_iflag & VI_MARKER) == 0);
625 KASSERT(vp->v_freelisthd == NULL);
626
627 if (__predict_false(vp->v_op == dead_vnodeop_p &&
628 (vp->v_iflag & (VI_CLEAN|VI_XLOCK)) == 0)) {
629 vpanic(vp, "dead but not clean");
630 }
631
632 /*
633 * If not the last reference, just drop the reference count
634 * and unlock.
635 */
636 if (vtryrele(vp)) {
637 vp->v_iflag |= VI_INACTREDO;
638 mutex_exit(vp->v_interlock);
639 return;
640 }
641 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
642 vpanic(vp, "vrelel: bad ref count");
643 }
644
645 KASSERT((vp->v_iflag & VI_XLOCK) == 0);
646
647 /*
648 * If not clean, deactivate the vnode, but preserve
649 * our reference across the call to VOP_INACTIVE().
650 */
651 retry:
652 if ((vp->v_iflag & VI_CLEAN) == 0) {
653 recycle = false;
654 vp->v_iflag |= VI_INACTNOW;
655
656 /*
657 * XXX This ugly block can be largely eliminated if
658 * locking is pushed down into the file systems.
659 *
660 * Defer vnode release to vrele_thread if caller
661 * requests it explicitly.
662 */
663 if ((curlwp == uvm.pagedaemon_lwp) ||
664 (flags & VRELEL_ASYNC_RELE) != 0) {
665 /* The pagedaemon can't wait around; defer. */
666 defer = true;
667 } else if (curlwp == vrele_lwp) {
668 /* We have to try harder. */
669 vp->v_iflag &= ~VI_INACTREDO;
670 mutex_exit(vp->v_interlock);
671 error = vn_lock(vp, LK_EXCLUSIVE);
672 if (error != 0) {
673 /* XXX */
674 vpanic(vp, "vrele: unable to lock %p");
675 }
676 defer = false;
677 } else if ((vp->v_iflag & VI_LAYER) != 0) {
678 /*
679 * Acquiring the stack's lock in vclean() even
680 * for an honest vput/vrele is dangerous because
681 * our caller may hold other vnode locks; defer.
682 */
683 defer = true;
684 } else {
685 /* If we can't acquire the lock, then defer. */
686 vp->v_iflag &= ~VI_INACTREDO;
687 mutex_exit(vp->v_interlock);
688 error = vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT);
689 if (error != 0) {
690 defer = true;
691 mutex_enter(vp->v_interlock);
692 } else {
693 defer = false;
694 }
695 }
696
697 if (defer) {
698 /*
699 * Defer reclaim to the kthread; it's not safe to
700 * clean it here. We donate it our last reference.
701 */
702 KASSERT(mutex_owned(vp->v_interlock));
703 KASSERT((vp->v_iflag & VI_INACTPEND) == 0);
704 vp->v_iflag &= ~VI_INACTNOW;
705 vp->v_iflag |= VI_INACTPEND;
706 mutex_enter(&vrele_lock);
707 TAILQ_INSERT_TAIL(&vrele_list, vp, v_freelist);
708 if (++vrele_pending > (desiredvnodes >> 8))
709 cv_signal(&vrele_cv);
710 mutex_exit(&vrele_lock);
711 mutex_exit(vp->v_interlock);
712 return;
713 }
714
715 #ifdef DIAGNOSTIC
716 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
717 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
718 vprint("vrelel: missing VOP_CLOSE()", vp);
719 }
720 #endif
721
722 /*
723 * The vnode can gain another reference while being
724 * deactivated. If VOP_INACTIVE() indicates that
725 * the described file has been deleted, then recycle
726 * the vnode irrespective of additional references.
727 * Another thread may be waiting to re-use the on-disk
728 * inode.
729 *
730 * Note that VOP_INACTIVE() will drop the vnode lock.
731 */
732 VOP_INACTIVE(vp, &recycle);
733 mutex_enter(vp->v_interlock);
734 vp->v_iflag &= ~VI_INACTNOW;
735 if (!recycle) {
736 if (vtryrele(vp)) {
737 mutex_exit(vp->v_interlock);
738 return;
739 }
740
741 /*
742 * If we grew another reference while
743 * VOP_INACTIVE() was underway, retry.
744 */
745 if ((vp->v_iflag & VI_INACTREDO) != 0) {
746 goto retry;
747 }
748 }
749
750 /* Take care of space accounting. */
751 if (vp->v_iflag & VI_EXECMAP) {
752 atomic_add_int(&uvmexp.execpages,
753 -vp->v_uobj.uo_npages);
754 atomic_add_int(&uvmexp.filepages,
755 vp->v_uobj.uo_npages);
756 }
757 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
758 vp->v_vflag &= ~VV_MAPPED;
759
760 /*
761 * Recycle the vnode if the file is now unused (unlinked),
762 * otherwise just free it.
763 */
764 if (recycle) {
765 vclean(vp, DOCLOSE);
766 }
767 KASSERT(vp->v_usecount > 0);
768 }
769
770 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
771 /* Gained another reference while being reclaimed. */
772 mutex_exit(vp->v_interlock);
773 return;
774 }
775
776 if ((vp->v_iflag & VI_CLEAN) != 0) {
777 /*
778 * It's clean so destroy it. It isn't referenced
779 * anywhere since it has been reclaimed.
780 */
781 KASSERT(vp->v_holdcnt == 0);
782 KASSERT(vp->v_writecount == 0);
783 mutex_exit(vp->v_interlock);
784 vfs_insmntque(vp, NULL);
785 if (vp->v_type == VBLK || vp->v_type == VCHR) {
786 spec_node_destroy(vp);
787 }
788 vnfree(vp);
789 } else {
790 /*
791 * Otherwise, put it back onto the freelist. It
792 * can't be destroyed while still associated with
793 * a file system.
794 */
795 mutex_enter(&vnode_free_list_lock);
796 if (vp->v_holdcnt > 0) {
797 vp->v_freelisthd = &vnode_hold_list;
798 } else {
799 vp->v_freelisthd = &vnode_free_list;
800 }
801 TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
802 mutex_exit(&vnode_free_list_lock);
803 mutex_exit(vp->v_interlock);
804 }
805 }
806
807 void
808 vrele(vnode_t *vp)
809 {
810
811 KASSERT((vp->v_iflag & VI_MARKER) == 0);
812
813 if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
814 return;
815 }
816 mutex_enter(vp->v_interlock);
817 vrelel(vp, 0);
818 }
819
820 /*
821 * Asynchronous vnode release, vnode is released in different context.
822 */
823 void
824 vrele_async(vnode_t *vp)
825 {
826
827 KASSERT((vp->v_iflag & VI_MARKER) == 0);
828
829 if ((vp->v_iflag & VI_INACTNOW) == 0 && vtryrele(vp)) {
830 return;
831 }
832 mutex_enter(vp->v_interlock);
833 vrelel(vp, VRELEL_ASYNC_RELE);
834 }
835
836 static void
837 vrele_thread(void *cookie)
838 {
839 vnode_t *vp;
840
841 for (;;) {
842 mutex_enter(&vrele_lock);
843 while (TAILQ_EMPTY(&vrele_list)) {
844 vrele_gen++;
845 cv_broadcast(&vrele_cv);
846 cv_timedwait(&vrele_cv, &vrele_lock, hz);
847 }
848 vp = TAILQ_FIRST(&vrele_list);
849 TAILQ_REMOVE(&vrele_list, vp, v_freelist);
850 vrele_pending--;
851 mutex_exit(&vrele_lock);
852
853 /*
854 * If not the last reference, then ignore the vnode
855 * and look for more work.
856 */
857 mutex_enter(vp->v_interlock);
858 KASSERT((vp->v_iflag & VI_INACTPEND) != 0);
859 vp->v_iflag &= ~VI_INACTPEND;
860 vrelel(vp, 0);
861 }
862 }
863
864 void
865 vrele_flush(void)
866 {
867 int gen;
868
869 mutex_enter(&vrele_lock);
870 gen = vrele_gen;
871 while (vrele_pending && gen == vrele_gen) {
872 cv_broadcast(&vrele_cv);
873 cv_wait(&vrele_cv, &vrele_lock);
874 }
875 mutex_exit(&vrele_lock);
876 }
877
878 /*
879 * Vnode reference, where a reference is already held by some other
880 * object (for example, a file structure).
881 */
882 void
883 vref(vnode_t *vp)
884 {
885
886 KASSERT((vp->v_iflag & VI_MARKER) == 0);
887 KASSERT(vp->v_usecount != 0);
888
889 atomic_inc_uint(&vp->v_usecount);
890 }
891
892 /*
893 * Page or buffer structure gets a reference.
894 * Called with v_interlock held.
895 */
896 void
897 vholdl(vnode_t *vp)
898 {
899
900 KASSERT(mutex_owned(vp->v_interlock));
901 KASSERT((vp->v_iflag & VI_MARKER) == 0);
902
903 if (vp->v_holdcnt++ == 0 && vp->v_usecount == 0) {
904 mutex_enter(&vnode_free_list_lock);
905 KASSERT(vp->v_freelisthd == &vnode_free_list);
906 TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
907 vp->v_freelisthd = &vnode_hold_list;
908 TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
909 mutex_exit(&vnode_free_list_lock);
910 }
911 }
912
913 /*
914 * Page or buffer structure frees a reference.
915 * Called with v_interlock held.
916 */
917 void
918 holdrelel(vnode_t *vp)
919 {
920
921 KASSERT(mutex_owned(vp->v_interlock));
922 KASSERT((vp->v_iflag & VI_MARKER) == 0);
923
924 if (vp->v_holdcnt <= 0) {
925 vpanic(vp, "holdrelel: holdcnt vp %p");
926 }
927
928 vp->v_holdcnt--;
929 if (vp->v_holdcnt == 0 && vp->v_usecount == 0) {
930 mutex_enter(&vnode_free_list_lock);
931 KASSERT(vp->v_freelisthd == &vnode_hold_list);
932 TAILQ_REMOVE(vp->v_freelisthd, vp, v_freelist);
933 vp->v_freelisthd = &vnode_free_list;
934 TAILQ_INSERT_TAIL(vp->v_freelisthd, vp, v_freelist);
935 mutex_exit(&vnode_free_list_lock);
936 }
937 }
938
939 /*
940 * Disassociate the underlying file system from a vnode.
941 *
942 * Must be called with the interlock held, and will return with it held.
943 */
944 void
945 vclean(vnode_t *vp, int flags)
946 {
947 lwp_t *l = curlwp;
948 bool recycle, active;
949 int error;
950
951 KASSERT(mutex_owned(vp->v_interlock));
952 KASSERT((vp->v_iflag & VI_MARKER) == 0);
953 KASSERT(vp->v_usecount != 0);
954
955 /* If cleaning is already in progress wait until done and return. */
956 if (vp->v_iflag & VI_XLOCK) {
957 vwait(vp, VI_XLOCK);
958 return;
959 }
960
961 /* If already clean, nothing to do. */
962 if ((vp->v_iflag & VI_CLEAN) != 0) {
963 return;
964 }
965
966 /*
967 * Prevent the vnode from being recycled or brought into use
968 * while we clean it out.
969 */
970 vp->v_iflag |= VI_XLOCK;
971 if (vp->v_iflag & VI_EXECMAP) {
972 atomic_add_int(&uvmexp.execpages, -vp->v_uobj.uo_npages);
973 atomic_add_int(&uvmexp.filepages, vp->v_uobj.uo_npages);
974 }
975 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
976 active = (vp->v_usecount & VC_MASK) > 1;
977
978 /* XXXAD should not lock vnode under layer */
979 mutex_exit(vp->v_interlock);
980 VOP_LOCK(vp, LK_EXCLUSIVE);
981
982 /*
983 * Clean out any cached data associated with the vnode.
984 * If purging an active vnode, it must be closed and
985 * deactivated before being reclaimed. Note that the
986 * VOP_INACTIVE will unlock the vnode.
987 */
988 if (flags & DOCLOSE) {
989 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
990 if (error != 0) {
991 /* XXX, fix vn_start_write's grab of mp and use that. */
992
993 if (wapbl_vphaswapbl(vp))
994 WAPBL_DISCARD(wapbl_vptomp(vp));
995 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
996 }
997 KASSERT(error == 0);
998 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
999 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1000 spec_node_revoke(vp);
1001 }
1002 }
1003 if (active) {
1004 VOP_INACTIVE(vp, &recycle);
1005 } else {
1006 /*
1007 * Any other processes trying to obtain this lock must first
1008 * wait for VI_XLOCK to clear, then call the new lock operation.
1009 */
1010 VOP_UNLOCK(vp);
1011 }
1012
1013 /* Disassociate the underlying file system from the vnode. */
1014 if (VOP_RECLAIM(vp)) {
1015 vpanic(vp, "vclean: cannot reclaim");
1016 }
1017
1018 KASSERT(vp->v_uobj.uo_npages == 0);
1019 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1020 uvm_ra_freectx(vp->v_ractx);
1021 vp->v_ractx = NULL;
1022 }
1023 cache_purge(vp);
1024
1025 /* Done with purge, notify sleepers of the grim news. */
1026 mutex_enter(vp->v_interlock);
1027 vp->v_op = dead_vnodeop_p;
1028 vp->v_tag = VT_NON;
1029 KNOTE(&vp->v_klist, NOTE_REVOKE);
1030 vp->v_iflag &= ~VI_XLOCK;
1031 vp->v_vflag &= ~VV_LOCKSWORK;
1032 if ((flags & DOCLOSE) != 0) {
1033 vp->v_iflag |= VI_CLEAN;
1034 }
1035 cv_broadcast(&vp->v_cv);
1036
1037 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1038 }
1039
1040 /*
1041 * Recycle an unused vnode to the front of the free list.
1042 * Release the passed interlock if the vnode will be recycled.
1043 */
1044 int
1045 vrecycle(vnode_t *vp, kmutex_t *inter_lkp, struct lwp *l)
1046 {
1047
1048 KASSERT((vp->v_iflag & VI_MARKER) == 0);
1049
1050 mutex_enter(vp->v_interlock);
1051 if (vp->v_usecount != 0) {
1052 mutex_exit(vp->v_interlock);
1053 return 0;
1054 }
1055 if (inter_lkp) {
1056 mutex_exit(inter_lkp);
1057 }
1058 vremfree(vp);
1059 vp->v_usecount = 1;
1060 vclean(vp, DOCLOSE);
1061 vrelel(vp, 0);
1062 return 1;
1063 }
1064
1065 /*
1066 * Eliminate all activity associated with the requested vnode
1067 * and with all vnodes aliased to the requested vnode.
1068 */
1069 void
1070 vrevoke(vnode_t *vp)
1071 {
1072 vnode_t *vq, **vpp;
1073 enum vtype type;
1074 dev_t dev;
1075
1076 KASSERT(vp->v_usecount > 0);
1077
1078 mutex_enter(vp->v_interlock);
1079 if ((vp->v_iflag & VI_CLEAN) != 0) {
1080 mutex_exit(vp->v_interlock);
1081 return;
1082 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1083 atomic_inc_uint(&vp->v_usecount);
1084 vclean(vp, DOCLOSE);
1085 vrelel(vp, 0);
1086 return;
1087 } else {
1088 dev = vp->v_rdev;
1089 type = vp->v_type;
1090 mutex_exit(vp->v_interlock);
1091 }
1092
1093 vpp = &specfs_hash[SPECHASH(dev)];
1094 mutex_enter(&device_lock);
1095 for (vq = *vpp; vq != NULL;) {
1096 /* If clean or being cleaned, then ignore it. */
1097 mutex_enter(vq->v_interlock);
1098 if ((vq->v_iflag & (VI_CLEAN | VI_XLOCK)) != 0 ||
1099 vq->v_rdev != dev || vq->v_type != type) {
1100 mutex_exit(vq->v_interlock);
1101 vq = vq->v_specnext;
1102 continue;
1103 }
1104 mutex_exit(&device_lock);
1105 if (vq->v_usecount == 0) {
1106 vremfree(vq);
1107 vq->v_usecount = 1;
1108 } else {
1109 atomic_inc_uint(&vq->v_usecount);
1110 }
1111 vclean(vq, DOCLOSE);
1112 vrelel(vq, 0);
1113 mutex_enter(&device_lock);
1114 vq = *vpp;
1115 }
1116 mutex_exit(&device_lock);
1117 }
1118
1119 /*
1120 * Eliminate all activity associated with a vnode in preparation for
1121 * reuse. Drops a reference from the vnode.
1122 */
1123 void
1124 vgone(vnode_t *vp)
1125 {
1126
1127 mutex_enter(vp->v_interlock);
1128 vclean(vp, DOCLOSE);
1129 vrelel(vp, 0);
1130 }
1131
1132 /*
1133 * Update outstanding I/O count and do wakeup if requested.
1134 */
1135 void
1136 vwakeup(struct buf *bp)
1137 {
1138 vnode_t *vp;
1139
1140 if ((vp = bp->b_vp) == NULL)
1141 return;
1142
1143 KASSERT(bp->b_objlock == vp->v_interlock);
1144 KASSERT(mutex_owned(bp->b_objlock));
1145
1146 if (--vp->v_numoutput < 0)
1147 panic("vwakeup: neg numoutput, vp %p", vp);
1148 if (vp->v_numoutput == 0)
1149 cv_broadcast(&vp->v_cv);
1150 }
1151
1152 /*
1153 * Wait for a vnode (typically with VI_XLOCK set) to be cleaned or
1154 * recycled.
1155 */
1156 void
1157 vwait(vnode_t *vp, int flags)
1158 {
1159
1160 KASSERT(mutex_owned(vp->v_interlock));
1161 KASSERT(vp->v_usecount != 0);
1162
1163 while ((vp->v_iflag & flags) != 0)
1164 cv_wait(&vp->v_cv, vp->v_interlock);
1165 }
1166
1167 int
1168 vfs_drainvnodes(long target)
1169 {
1170
1171 while (numvnodes > target) {
1172 vnode_t *vp;
1173
1174 mutex_enter(&vnode_free_list_lock);
1175 vp = getcleanvnode();
1176 if (vp == NULL) {
1177 return EBUSY;
1178 }
1179 ungetnewvnode(vp);
1180 }
1181 return 0;
1182 }
1183
1184 void
1185 vpanic(vnode_t *vp, const char *msg)
1186 {
1187 #ifdef DIAGNOSTIC
1188
1189 vprint(NULL, vp);
1190 panic("%s\n", msg);
1191 #endif
1192 }
1193