vfs_vnode.c revision 1.105.2.6 1 /* $NetBSD: vfs_vnode.c,v 1.105.2.6 2020/01/25 15:54:03 ad Exp $ */
2
3 /*-
4 * Copyright (c) 1997-2011, 2019 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jason R. Thorpe of the Numerical Aerospace Simulation Facility,
9 * NASA Ames Research Center, by Charles M. Hannum, and by Andrew Doran.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
16 * 2. Redistributions in binary form must reproduce the above copyright
17 * notice, this list of conditions and the following disclaimer in the
18 * documentation and/or other materials provided with the distribution.
19 *
20 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
22 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
23 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
24 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
30 * POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 /*
34 * Copyright (c) 1989, 1993
35 * The Regents of the University of California. All rights reserved.
36 * (c) UNIX System Laboratories, Inc.
37 * All or some portions of this file are derived from material licensed
38 * to the University of California by American Telephone and Telegraph
39 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
40 * the permission of UNIX System Laboratories, Inc.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)vfs_subr.c 8.13 (Berkeley) 4/18/94
67 */
68
69 /*
70 * The vnode cache subsystem.
71 *
72 * Life-cycle
73 *
74 * Normally, there are two points where new vnodes are created:
75 * VOP_CREATE(9) and VOP_LOOKUP(9). The life-cycle of a vnode
76 * starts in one of the following ways:
77 *
78 * - Allocation, via vcache_get(9) or vcache_new(9).
79 * - Reclamation of inactive vnode, via vcache_vget(9).
80 *
81 * Recycle from a free list, via getnewvnode(9) -> getcleanvnode(9)
82 * was another, traditional way. Currently, only the draining thread
83 * recycles the vnodes. This behaviour might be revisited.
84 *
85 * The life-cycle ends when the last reference is dropped, usually
86 * in VOP_REMOVE(9). In such case, VOP_INACTIVE(9) is called to inform
87 * the file system that vnode is inactive. Via this call, file system
88 * indicates whether vnode can be recycled (usually, it checks its own
89 * references, e.g. count of links, whether the file was removed).
90 *
91 * Depending on indication, vnode can be put into a free list (cache),
92 * or cleaned via vcache_reclaim, which calls VOP_RECLAIM(9) to
93 * disassociate underlying file system from the vnode, and finally
94 * destroyed.
95 *
96 * Vnode state
97 *
98 * Vnode is always in one of six states:
99 * - MARKER This is a marker vnode to help list traversal. It
100 * will never change its state.
101 * - LOADING Vnode is associating underlying file system and not
102 * yet ready to use.
103 * - LOADED Vnode has associated underlying file system and is
104 * ready to use.
105 * - BLOCKED Vnode is active but cannot get new references.
106 * - RECLAIMING Vnode is disassociating from the underlying file
107 * system.
108 * - RECLAIMED Vnode has disassociated from underlying file system
109 * and is dead.
110 *
111 * Valid state changes are:
112 * LOADING -> LOADED
113 * Vnode has been initialised in vcache_get() or
114 * vcache_new() and is ready to use.
115 * LOADED -> RECLAIMING
116 * Vnode starts disassociation from underlying file
117 * system in vcache_reclaim().
118 * RECLAIMING -> RECLAIMED
119 * Vnode finished disassociation from underlying file
120 * system in vcache_reclaim().
121 * LOADED -> BLOCKED
122 * vcache_rekey*() is changing the vnode key.
123 * BLOCKED -> LOADED
124 * The block condition is over.
125 * LOADING -> RECLAIMED
126 * Either vcache_get() or vcache_new() failed to
127 * associate the underlying file system or vcache_rekey*()
128 * drops a vnode used as placeholder.
129 *
130 * Of these states LOADING, BLOCKED and RECLAIMING are intermediate
131 * and it is possible to wait for state change.
132 *
133 * State is protected with v_interlock with one exception:
134 * to change from LOADING both v_interlock and vcache_lock must be held
135 * so it is possible to check "state == LOADING" without holding
136 * v_interlock. See vcache_get() for details.
137 *
138 * Reference counting
139 *
140 * Vnode is considered active, if reference count (vnode_t::v_usecount)
141 * is non-zero. It is maintained using: vref(9) and vrele(9), as well
142 * as vput(9), routines. Common points holding references are e.g.
143 * file openings, current working directory, mount points, etc.
144 *
145 * Note on v_usecount & v_holdcnt and their locking
146 *
147 * At nearly all points it is known that the counts could be zero,
148 * the vnode_t::v_interlock will be held. To change the counts away
149 * from zero, the interlock must be held. To change from a non-zero
150 * value to zero, again the interlock must be held.
151 *
152 * Changing the usecount from a non-zero value to a non-zero value can
153 * safely be done using atomic operations, without the interlock held.
154 */
155
156 #include <sys/cdefs.h>
157 __KERNEL_RCSID(0, "$NetBSD: vfs_vnode.c,v 1.105.2.6 2020/01/25 15:54:03 ad Exp $");
158
159 #include <sys/param.h>
160 #include <sys/kernel.h>
161
162 #include <sys/atomic.h>
163 #include <sys/buf.h>
164 #include <sys/conf.h>
165 #include <sys/device.h>
166 #include <sys/hash.h>
167 #include <sys/kauth.h>
168 #include <sys/kmem.h>
169 #include <sys/kthread.h>
170 #include <sys/module.h>
171 #include <sys/mount.h>
172 #include <sys/namei.h>
173 #include <sys/syscallargs.h>
174 #include <sys/sysctl.h>
175 #include <sys/systm.h>
176 #include <sys/vnode_impl.h>
177 #include <sys/wapbl.h>
178 #include <sys/fstrans.h>
179
180 #include <uvm/uvm.h>
181 #include <uvm/uvm_readahead.h>
182 #include <uvm/uvm_stat.h>
183
184 /* Flags to vrelel. */
185 #define VRELEL_ASYNC 0x0001 /* Always defer to vrele thread. */
186
187 #define LRU_VRELE 0
188 #define LRU_FREE 1
189 #define LRU_HOLD 2
190 #define LRU_COUNT 3
191
192 /*
193 * There are three lru lists: one holds vnodes waiting for async release,
194 * one is for vnodes which have no buffer/page references and one for those
195 * which do (i.e. v_holdcnt is non-zero). We put the lists into a single,
196 * private cache line as vnodes migrate between them while under the same
197 * lock (vdrain_lock).
198 */
199 u_int numvnodes __cacheline_aligned;
200 static vnodelst_t lru_list[LRU_COUNT] __cacheline_aligned;
201 static kmutex_t vdrain_lock __cacheline_aligned;
202 static kcondvar_t vdrain_cv;
203 static int vdrain_gen;
204 static kcondvar_t vdrain_gen_cv;
205 static bool vdrain_retry;
206 static lwp_t * vdrain_lwp;
207 SLIST_HEAD(hashhead, vnode_impl);
208 static kmutex_t vcache_lock __cacheline_aligned;
209 static kcondvar_t vcache_cv;
210 static u_int vcache_hashsize;
211 static u_long vcache_hashmask;
212 static struct hashhead *vcache_hashtab;
213 static pool_cache_t vcache_pool;
214 static void lru_requeue(vnode_t *, vnodelst_t *);
215 static vnodelst_t * lru_which(vnode_t *);
216 static vnode_impl_t * vcache_alloc(void);
217 static void vcache_dealloc(vnode_impl_t *);
218 static void vcache_free(vnode_impl_t *);
219 static void vcache_init(void);
220 static void vcache_reinit(void);
221 static void vcache_reclaim(vnode_t *);
222 static void vrelel(vnode_t *, int, int);
223 static void vdrain_thread(void *);
224 static void vnpanic(vnode_t *, const char *, ...)
225 __printflike(2, 3);
226
227 /* Routines having to do with the management of the vnode table. */
228 extern struct mount *dead_rootmount;
229 extern int (**dead_vnodeop_p)(void *);
230 extern int (**spec_vnodeop_p)(void *);
231 extern struct vfsops dead_vfsops;
232
233 /* Vnode state operations and diagnostics. */
234
235 #if defined(DIAGNOSTIC)
236
237 #define VSTATE_VALID(state) \
238 ((state) != VS_ACTIVE && (state) != VS_MARKER)
239 #define VSTATE_GET(vp) \
240 vstate_assert_get((vp), __func__, __LINE__)
241 #define VSTATE_CHANGE(vp, from, to) \
242 vstate_assert_change((vp), (from), (to), __func__, __LINE__)
243 #define VSTATE_WAIT_STABLE(vp) \
244 vstate_assert_wait_stable((vp), __func__, __LINE__)
245
246 void
247 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
248 bool has_lock)
249 {
250 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
251
252 if (!has_lock) {
253 /*
254 * Prevent predictive loads from the CPU, but check the state
255 * without loooking first.
256 */
257 membar_enter();
258 if (state == VS_ACTIVE && vp->v_usecount > 0 &&
259 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED))
260 return;
261 if (vip->vi_state == state)
262 return;
263 mutex_enter((vp)->v_interlock);
264 }
265
266 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
267
268 if ((state == VS_ACTIVE && vp->v_usecount > 0 &&
269 (vip->vi_state == VS_LOADED || vip->vi_state == VS_BLOCKED)) ||
270 vip->vi_state == state) {
271 if (!has_lock)
272 mutex_exit((vp)->v_interlock);
273 return;
274 }
275 vnpanic(vp, "state is %s, usecount %d, expected %s at %s:%d",
276 vstate_name(vip->vi_state), vp->v_usecount,
277 vstate_name(state), func, line);
278 }
279
280 static enum vnode_state
281 vstate_assert_get(vnode_t *vp, const char *func, int line)
282 {
283 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
284
285 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
286 if (! VSTATE_VALID(vip->vi_state))
287 vnpanic(vp, "state is %s at %s:%d",
288 vstate_name(vip->vi_state), func, line);
289
290 return vip->vi_state;
291 }
292
293 static void
294 vstate_assert_wait_stable(vnode_t *vp, const char *func, int line)
295 {
296 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
297
298 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
299 if (! VSTATE_VALID(vip->vi_state))
300 vnpanic(vp, "state is %s at %s:%d",
301 vstate_name(vip->vi_state), func, line);
302
303 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
304 cv_wait(&vp->v_cv, vp->v_interlock);
305
306 if (! VSTATE_VALID(vip->vi_state))
307 vnpanic(vp, "state is %s at %s:%d",
308 vstate_name(vip->vi_state), func, line);
309 }
310
311 static void
312 vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
313 const char *func, int line)
314 {
315 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
316
317 KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
318 if (from == VS_LOADING)
319 KASSERTMSG(mutex_owned(&vcache_lock), "at %s:%d", func, line);
320
321 if (! VSTATE_VALID(from))
322 vnpanic(vp, "from is %s at %s:%d",
323 vstate_name(from), func, line);
324 if (! VSTATE_VALID(to))
325 vnpanic(vp, "to is %s at %s:%d",
326 vstate_name(to), func, line);
327 if (vip->vi_state != from)
328 vnpanic(vp, "from is %s, expected %s at %s:%d\n",
329 vstate_name(vip->vi_state), vstate_name(from), func, line);
330 if ((from == VS_BLOCKED || to == VS_BLOCKED) && vp->v_usecount != 1)
331 vnpanic(vp, "%s to %s with usecount %d at %s:%d",
332 vstate_name(from), vstate_name(to), vp->v_usecount,
333 func, line);
334
335 vip->vi_state = to;
336 if (from == VS_LOADING)
337 cv_broadcast(&vcache_cv);
338 if (to == VS_LOADED || to == VS_RECLAIMED)
339 cv_broadcast(&vp->v_cv);
340 }
341
342 #else /* defined(DIAGNOSTIC) */
343
344 #define VSTATE_GET(vp) \
345 (VNODE_TO_VIMPL((vp))->vi_state)
346 #define VSTATE_CHANGE(vp, from, to) \
347 vstate_change((vp), (from), (to))
348 #define VSTATE_WAIT_STABLE(vp) \
349 vstate_wait_stable((vp))
350 void
351 _vstate_assert(vnode_t *vp, enum vnode_state state, const char *func, int line,
352 bool has_lock)
353 {
354
355 }
356
357 static void
358 vstate_wait_stable(vnode_t *vp)
359 {
360 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
361
362 while (vip->vi_state != VS_LOADED && vip->vi_state != VS_RECLAIMED)
363 cv_wait(&vp->v_cv, vp->v_interlock);
364 }
365
366 static void
367 vstate_change(vnode_t *vp, enum vnode_state from, enum vnode_state to)
368 {
369 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
370
371 vip->vi_state = to;
372 if (from == VS_LOADING)
373 cv_broadcast(&vcache_cv);
374 if (to == VS_LOADED || to == VS_RECLAIMED)
375 cv_broadcast(&vp->v_cv);
376 }
377
378 #endif /* defined(DIAGNOSTIC) */
379
380 void
381 vfs_vnode_sysinit(void)
382 {
383 int error __diagused, i;
384
385 dead_rootmount = vfs_mountalloc(&dead_vfsops, NULL);
386 KASSERT(dead_rootmount != NULL);
387 dead_rootmount->mnt_iflag |= IMNT_MPSAFE;
388
389 mutex_init(&vdrain_lock, MUTEX_DEFAULT, IPL_NONE);
390 for (i = 0; i < LRU_COUNT; i++) {
391 TAILQ_INIT(&lru_list[i]);
392 }
393 vcache_init();
394
395 cv_init(&vdrain_cv, "vdrain");
396 cv_init(&vdrain_gen_cv, "vdrainwt");
397 error = kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL, vdrain_thread,
398 NULL, &vdrain_lwp, "vdrain");
399 KASSERTMSG((error == 0), "kthread_create(vdrain) failed: %d", error);
400 }
401
402 /*
403 * Allocate a new marker vnode.
404 */
405 vnode_t *
406 vnalloc_marker(struct mount *mp)
407 {
408 vnode_impl_t *vip;
409 vnode_t *vp;
410
411 vip = pool_cache_get(vcache_pool, PR_WAITOK);
412 memset(vip, 0, sizeof(*vip));
413 vp = VIMPL_TO_VNODE(vip);
414 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
415 vp->v_mount = mp;
416 vp->v_type = VBAD;
417 vip->vi_state = VS_MARKER;
418
419 return vp;
420 }
421
422 /*
423 * Free a marker vnode.
424 */
425 void
426 vnfree_marker(vnode_t *vp)
427 {
428 vnode_impl_t *vip;
429
430 vip = VNODE_TO_VIMPL(vp);
431 KASSERT(vip->vi_state == VS_MARKER);
432 uvm_obj_destroy(&vp->v_uobj, true);
433 pool_cache_put(vcache_pool, vip);
434 }
435
436 /*
437 * Test a vnode for being a marker vnode.
438 */
439 bool
440 vnis_marker(vnode_t *vp)
441 {
442
443 return (VNODE_TO_VIMPL(vp)->vi_state == VS_MARKER);
444 }
445
446 /*
447 * Return the lru list this node should be on.
448 */
449 static vnodelst_t *
450 lru_which(vnode_t *vp)
451 {
452
453 KASSERT(mutex_owned(vp->v_interlock));
454
455 if (vp->v_holdcnt > 0)
456 return &lru_list[LRU_HOLD];
457 else
458 return &lru_list[LRU_FREE];
459 }
460
461 /*
462 * Put vnode to end of given list.
463 * Both the current and the new list may be NULL, used on vnode alloc/free.
464 * Adjust numvnodes and signal vdrain thread if there is work.
465 */
466 static void
467 lru_requeue(vnode_t *vp, vnodelst_t *listhd)
468 {
469 vnode_impl_t *vip;
470 int d;
471
472 /*
473 * If the vnode is on the correct list, and was put there recently,
474 * then leave it be, thus avoiding huge cache and lock contention.
475 */
476 vip = VNODE_TO_VIMPL(vp);
477 if (listhd == vip->vi_lrulisthd &&
478 (hardclock_ticks - vip->vi_lrulisttm) < hz) {
479 return;
480 }
481
482 mutex_enter(&vdrain_lock);
483 d = 0;
484 if (vip->vi_lrulisthd != NULL)
485 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
486 else
487 d++;
488 vip->vi_lrulisthd = listhd;
489 vip->vi_lrulisttm = hardclock_ticks;
490 if (vip->vi_lrulisthd != NULL)
491 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
492 else
493 d--;
494 if (d != 0) {
495 /*
496 * Looks strange? This is not a bug. Don't store
497 * numvnodes unless there is a change - avoid false
498 * sharing on MP.
499 */
500 numvnodes += d;
501 }
502 if (numvnodes > desiredvnodes || listhd == &lru_list[LRU_VRELE])
503 cv_broadcast(&vdrain_cv);
504 mutex_exit(&vdrain_lock);
505 }
506
507 /*
508 * Release deferred vrele vnodes for this mount.
509 * Called with file system suspended.
510 */
511 void
512 vrele_flush(struct mount *mp)
513 {
514 vnode_impl_t *vip, *marker;
515 vnode_t *vp;
516
517 KASSERT(fstrans_is_owner(mp));
518
519 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
520
521 mutex_enter(&vdrain_lock);
522 TAILQ_INSERT_HEAD(&lru_list[LRU_VRELE], marker, vi_lrulist);
523
524 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
525 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
526 TAILQ_INSERT_AFTER(&lru_list[LRU_VRELE], vip, marker,
527 vi_lrulist);
528 vp = VIMPL_TO_VNODE(vip);
529 if (vnis_marker(vp))
530 continue;
531
532 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
533 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
534 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
535 vip->vi_lrulisttm = hardclock_ticks;
536 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
537 mutex_exit(&vdrain_lock);
538
539 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
540 mutex_enter(vp->v_interlock);
541 vrelel(vp, 0, LK_EXCLUSIVE);
542
543 mutex_enter(&vdrain_lock);
544 }
545
546 TAILQ_REMOVE(&lru_list[LRU_VRELE], marker, vi_lrulist);
547 mutex_exit(&vdrain_lock);
548
549 vnfree_marker(VIMPL_TO_VNODE(marker));
550 }
551
552 /*
553 * Reclaim a cached vnode. Used from vdrain_thread only.
554 */
555 static __inline void
556 vdrain_remove(vnode_t *vp)
557 {
558 struct mount *mp;
559
560 KASSERT(mutex_owned(&vdrain_lock));
561
562 /* Probe usecount (unlocked). */
563 if (vp->v_usecount > 0)
564 return;
565 /* Try v_interlock -- we lock the wrong direction! */
566 if (!mutex_tryenter(vp->v_interlock))
567 return;
568 /* Probe usecount and state. */
569 if (vp->v_usecount > 0 || VSTATE_GET(vp) != VS_LOADED) {
570 mutex_exit(vp->v_interlock);
571 return;
572 }
573 mp = vp->v_mount;
574 if (fstrans_start_nowait(mp) != 0) {
575 mutex_exit(vp->v_interlock);
576 return;
577 }
578 vdrain_retry = true;
579 mutex_exit(&vdrain_lock);
580
581 if (vcache_vget(vp) == 0) {
582 if (!vrecycle(vp)) {
583 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
584 mutex_enter(vp->v_interlock);
585 vrelel(vp, 0, LK_EXCLUSIVE);
586 }
587 }
588 fstrans_done(mp);
589
590 mutex_enter(&vdrain_lock);
591 }
592
593 /*
594 * Release a cached vnode. Used from vdrain_thread only.
595 */
596 static __inline void
597 vdrain_vrele(vnode_t *vp)
598 {
599 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
600 struct mount *mp;
601
602 KASSERT(mutex_owned(&vdrain_lock));
603
604 mp = vp->v_mount;
605 if (fstrans_start_nowait(mp) != 0)
606 return;
607
608 /*
609 * First remove the vnode from the vrele list.
610 * Put it on the last lru list, the last vrele()
611 * will put it back onto the right list before
612 * its v_usecount reaches zero.
613 */
614 KASSERT(vip->vi_lrulisthd == &lru_list[LRU_VRELE]);
615 TAILQ_REMOVE(vip->vi_lrulisthd, vip, vi_lrulist);
616 vip->vi_lrulisthd = &lru_list[LRU_HOLD];
617 vip->vi_lrulisttm = hardclock_ticks;
618 TAILQ_INSERT_TAIL(vip->vi_lrulisthd, vip, vi_lrulist);
619
620 vdrain_retry = true;
621 mutex_exit(&vdrain_lock);
622
623 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
624 mutex_enter(vp->v_interlock);
625 vrelel(vp, 0, LK_EXCLUSIVE);
626 fstrans_done(mp);
627
628 mutex_enter(&vdrain_lock);
629 }
630
631 /*
632 * Helper thread to keep the number of vnodes below desiredvnodes
633 * and release vnodes from asynchronous vrele.
634 */
635 static void
636 vdrain_thread(void *cookie)
637 {
638 int i;
639 u_int target;
640 vnode_impl_t *vip, *marker;
641
642 marker = VNODE_TO_VIMPL(vnalloc_marker(NULL));
643
644 mutex_enter(&vdrain_lock);
645
646 for (;;) {
647 vdrain_retry = false;
648 target = desiredvnodes - desiredvnodes/10;
649
650 for (i = 0; i < LRU_COUNT; i++) {
651 TAILQ_INSERT_HEAD(&lru_list[i], marker, vi_lrulist);
652 while ((vip = TAILQ_NEXT(marker, vi_lrulist))) {
653 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
654 TAILQ_INSERT_AFTER(&lru_list[i], vip, marker,
655 vi_lrulist);
656 if (vnis_marker(VIMPL_TO_VNODE(vip)))
657 continue;
658 if (i == LRU_VRELE)
659 vdrain_vrele(VIMPL_TO_VNODE(vip));
660 else if (numvnodes < target)
661 break;
662 else
663 vdrain_remove(VIMPL_TO_VNODE(vip));
664 }
665 TAILQ_REMOVE(&lru_list[i], marker, vi_lrulist);
666 }
667
668 if (vdrain_retry) {
669 mutex_exit(&vdrain_lock);
670 yield();
671 mutex_enter(&vdrain_lock);
672 } else {
673 vdrain_gen++;
674 cv_broadcast(&vdrain_gen_cv);
675 cv_wait(&vdrain_cv, &vdrain_lock);
676 }
677 }
678 }
679
680 /*
681 * Try to drop reference on a vnode. Abort if we are releasing the
682 * last reference. Note: this _must_ succeed if not the last reference.
683 */
684 static bool
685 vtryrele(vnode_t *vp)
686 {
687 u_int use, next;
688
689 for (use = vp->v_usecount;; use = next) {
690 if (__predict_false(use == 1)) {
691 return false;
692 }
693 KASSERT(use > 1);
694 next = atomic_cas_uint(&vp->v_usecount, use, use - 1);
695 if (__predict_true(next == use)) {
696 return true;
697 }
698 }
699 }
700
701 /*
702 * vput: unlock and release the reference.
703 */
704 void
705 vput(vnode_t *vp)
706 {
707 int lktype;
708
709 /*
710 * Do an unlocked check of v_usecount. If it looks like we're not
711 * about to drop the last reference, then unlock the vnode and try
712 * to drop the reference. If it ends up being the last reference
713 * after all, we dropped the lock when we shouldn't have. vrelel()
714 * can fix it all up. Most of the time this will all go to plan.
715 */
716 if (vp->v_usecount > 1) {
717 VOP_UNLOCK(vp);
718 if (vtryrele(vp)) {
719 return;
720 }
721 lktype = LK_NONE;
722 } else if ((vp->v_vflag & VV_LOCKSWORK) == 0) {
723 lktype = LK_EXCLUSIVE;
724 } else {
725 lktype = VOP_ISLOCKED(vp);
726 KASSERT(lktype != LK_NONE);
727 }
728 mutex_enter(vp->v_interlock);
729 vrelel(vp, 0, lktype);
730 }
731
732 /*
733 * Vnode release. If reference count drops to zero, call inactive
734 * routine and either return to freelist or free to the pool.
735 */
736 static void
737 vrelel(vnode_t *vp, int flags, int lktype)
738 {
739 const bool async = ((flags & VRELEL_ASYNC) != 0);
740 bool recycle, defer;
741 int error;
742
743 KASSERT(mutex_owned(vp->v_interlock));
744
745 if (__predict_false(vp->v_op == dead_vnodeop_p &&
746 VSTATE_GET(vp) != VS_RECLAIMED)) {
747 vnpanic(vp, "dead but not clean");
748 }
749
750 /*
751 * If not the last reference, just drop the reference count
752 * and unlock.
753 */
754 if (vtryrele(vp)) {
755 if (lktype != LK_NONE) {
756 VOP_UNLOCK(vp);
757 }
758 mutex_exit(vp->v_interlock);
759 return;
760 }
761 if (vp->v_usecount <= 0 || vp->v_writecount != 0) {
762 vnpanic(vp, "%s: bad ref count", __func__);
763 }
764
765 #ifdef DIAGNOSTIC
766 if ((vp->v_type == VBLK || vp->v_type == VCHR) &&
767 vp->v_specnode != NULL && vp->v_specnode->sn_opencnt != 0) {
768 vprint("vrelel: missing VOP_CLOSE()", vp);
769 }
770 #endif
771
772 /*
773 * First try to get the vnode locked for VOP_INACTIVE().
774 * Defer vnode release to vdrain_thread if caller requests
775 * it explicitly, is the pagedaemon or the lock failed.
776 */
777 defer = false;
778 if ((curlwp == uvm.pagedaemon_lwp) || async) {
779 defer = true;
780 } else if (lktype == LK_SHARED) {
781 /* Excellent chance of getting, if the last ref. */
782 error = vn_lock(vp, LK_UPGRADE | LK_RETRY |
783 LK_NOWAIT);
784 if (error != 0) {
785 defer = true;
786 } else {
787 lktype = LK_EXCLUSIVE;
788 }
789 } else if (lktype == LK_NONE) {
790 /* Excellent chance of getting, if the last ref. */
791 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY |
792 LK_NOWAIT);
793 if (error != 0) {
794 defer = true;
795 } else {
796 lktype = LK_EXCLUSIVE;
797 }
798 }
799 KASSERT(mutex_owned(vp->v_interlock));
800 if (defer) {
801 /*
802 * Defer reclaim to the kthread; it's not safe to
803 * clean it here. We donate it our last reference.
804 */
805 if (lktype != LK_NONE) {
806 VOP_UNLOCK(vp);
807 }
808 lru_requeue(vp, &lru_list[LRU_VRELE]);
809 mutex_exit(vp->v_interlock);
810 return;
811 }
812 KASSERT(lktype == LK_EXCLUSIVE);
813
814 /*
815 * If not clean, deactivate the vnode, but preserve
816 * our reference across the call to VOP_INACTIVE().
817 */
818 if (VSTATE_GET(vp) == VS_RECLAIMED) {
819 VOP_UNLOCK(vp);
820 } else {
821 /*
822 * If VOP_INACTIVE() indicates that the described file has
823 * been deleted, then recycle the vnode. Note that
824 * VOP_INACTIVE() will not drop the vnode lock.
825 *
826 * If the file has been deleted, this is a lingering
827 * reference and there is no need to worry about new
828 * references looking to do real work with the vnode (as it
829 * will have been purged from directories, caches, etc).
830 */
831 recycle = false;
832 mutex_exit(vp->v_interlock);
833 VOP_INACTIVE(vp, &recycle);
834 mutex_enter(vp->v_interlock);
835 if (!recycle) {
836 VOP_UNLOCK(vp);
837 if (vtryrele(vp)) {
838 mutex_exit(vp->v_interlock);
839 return;
840 }
841 }
842
843 /* Take care of space accounting. */
844 if ((vp->v_iflag & VI_EXECMAP) != 0 &&
845 vp->v_uobj.uo_npages != 0) {
846 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
847 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
848 }
849 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP|VI_WRMAP);
850 vp->v_vflag &= ~VV_MAPPED;
851
852 /*
853 * Recycle the vnode if the file is now unused (unlinked),
854 * otherwise just free it.
855 */
856 if (recycle) {
857 VSTATE_ASSERT(vp, VS_LOADED);
858 /* vcache_reclaim drops the lock. */
859 vcache_reclaim(vp);
860 }
861 KASSERT(vp->v_usecount > 0);
862 }
863
864 if (atomic_dec_uint_nv(&vp->v_usecount) != 0) {
865 /* Gained another reference while being reclaimed. */
866 mutex_exit(vp->v_interlock);
867 return;
868 }
869
870 if (VSTATE_GET(vp) == VS_RECLAIMED && vp->v_holdcnt == 0) {
871 /*
872 * It's clean so destroy it. It isn't referenced
873 * anywhere since it has been reclaimed.
874 */
875 vcache_free(VNODE_TO_VIMPL(vp));
876 } else {
877 /*
878 * Otherwise, put it back onto the freelist. It
879 * can't be destroyed while still associated with
880 * a file system.
881 */
882 lru_requeue(vp, lru_which(vp));
883 mutex_exit(vp->v_interlock);
884 }
885 }
886
887 void
888 vrele(vnode_t *vp)
889 {
890
891 if (vtryrele(vp)) {
892 return;
893 }
894 mutex_enter(vp->v_interlock);
895 vrelel(vp, 0, LK_NONE);
896 }
897
898 /*
899 * Asynchronous vnode release, vnode is released in different context.
900 */
901 void
902 vrele_async(vnode_t *vp)
903 {
904
905 if (vtryrele(vp)) {
906 return;
907 }
908 mutex_enter(vp->v_interlock);
909 vrelel(vp, VRELEL_ASYNC, LK_NONE);
910 }
911
912 /*
913 * Vnode reference, where a reference is already held by some other
914 * object (for example, a file structure).
915 *
916 * NB: we have lockless code sequences that rely on this not blocking.
917 */
918 void
919 vref(vnode_t *vp)
920 {
921
922 KASSERT(vp->v_usecount != 0);
923
924 atomic_inc_uint(&vp->v_usecount);
925 }
926
927 /*
928 * Page or buffer structure gets a reference.
929 * Called with v_interlock held.
930 */
931 void
932 vholdl(vnode_t *vp)
933 {
934
935 KASSERT(mutex_owned(vp->v_interlock));
936
937 if (atomic_inc_uint_nv(&vp->v_holdcnt) == 1 && vp->v_usecount == 0)
938 lru_requeue(vp, lru_which(vp));
939 }
940
941 /*
942 * Page or buffer structure gets a reference.
943 */
944 void
945 vhold(vnode_t *vp)
946 {
947 int hold, next;
948
949 for (hold = vp->v_holdcnt;; hold = next) {
950 if (__predict_false(hold == 0)) {
951 break;
952 }
953 next = atomic_cas_uint(&vp->v_holdcnt, hold, hold + 1);
954 if (__predict_true(next == hold)) {
955 return;
956 }
957 }
958
959 mutex_enter(vp->v_interlock);
960 vholdl(vp);
961 mutex_exit(vp->v_interlock);
962 }
963
964 /*
965 * Page or buffer structure frees a reference.
966 * Called with v_interlock held.
967 */
968 void
969 holdrelel(vnode_t *vp)
970 {
971
972 KASSERT(mutex_owned(vp->v_interlock));
973
974 if (vp->v_holdcnt <= 0) {
975 vnpanic(vp, "%s: holdcnt vp %p", __func__, vp);
976 }
977
978 if (atomic_dec_uint_nv(&vp->v_holdcnt) == 0 && vp->v_usecount == 0)
979 lru_requeue(vp, lru_which(vp));
980 }
981
982 /*
983 * Page or buffer structure frees a reference.
984 */
985 void
986 holdrele(vnode_t *vp)
987 {
988 int hold, next;
989
990 for (hold = vp->v_holdcnt;; hold = next) {
991 if (__predict_false(hold == 1)) {
992 break;
993 }
994 KASSERT(hold > 1);
995 next = atomic_cas_uint(&vp->v_holdcnt, hold, hold - 1);
996 if (__predict_true(next == hold)) {
997 return;
998 }
999 }
1000
1001 mutex_enter(vp->v_interlock);
1002 holdrelel(vp);
1003 mutex_exit(vp->v_interlock);
1004 }
1005
1006 /*
1007 * Recycle an unused vnode if caller holds the last reference.
1008 */
1009 bool
1010 vrecycle(vnode_t *vp)
1011 {
1012 int error __diagused;
1013
1014 mutex_enter(vp->v_interlock);
1015
1016 /* Make sure we hold the last reference. */
1017 VSTATE_WAIT_STABLE(vp);
1018 if (vp->v_usecount != 1) {
1019 mutex_exit(vp->v_interlock);
1020 return false;
1021 }
1022
1023 /* If the vnode is already clean we're done. */
1024 if (VSTATE_GET(vp) != VS_LOADED) {
1025 VSTATE_ASSERT(vp, VS_RECLAIMED);
1026 vrelel(vp, 0, LK_NONE);
1027 return true;
1028 }
1029
1030 /* Prevent further references until the vnode is locked. */
1031 VSTATE_CHANGE(vp, VS_LOADED, VS_BLOCKED);
1032 mutex_exit(vp->v_interlock);
1033
1034 /*
1035 * On a leaf file system this lock will always succeed as we hold
1036 * the last reference and prevent further references.
1037 * On layered file systems waiting for the lock would open a can of
1038 * deadlocks as the lower vnodes may have other active references.
1039 */
1040 error = vn_lock(vp, LK_EXCLUSIVE | LK_RETRY | LK_NOWAIT);
1041
1042 mutex_enter(vp->v_interlock);
1043 VSTATE_CHANGE(vp, VS_BLOCKED, VS_LOADED);
1044
1045 if (error) {
1046 mutex_exit(vp->v_interlock);
1047 return false;
1048 }
1049
1050 KASSERT(vp->v_usecount == 1);
1051 vcache_reclaim(vp);
1052 vrelel(vp, 0, LK_NONE);
1053
1054 return true;
1055 }
1056
1057 /*
1058 * Helper for vrevoke() to propagate suspension from lastmp
1059 * to thismp. Both args may be NULL.
1060 * Returns the currently suspended file system or NULL.
1061 */
1062 static struct mount *
1063 vrevoke_suspend_next(struct mount *lastmp, struct mount *thismp)
1064 {
1065 int error;
1066
1067 if (lastmp == thismp)
1068 return thismp;
1069
1070 if (lastmp != NULL)
1071 vfs_resume(lastmp);
1072
1073 if (thismp == NULL)
1074 return NULL;
1075
1076 do {
1077 error = vfs_suspend(thismp, 0);
1078 } while (error == EINTR || error == ERESTART);
1079
1080 if (error == 0)
1081 return thismp;
1082
1083 KASSERT(error == EOPNOTSUPP);
1084 return NULL;
1085 }
1086
1087 /*
1088 * Eliminate all activity associated with the requested vnode
1089 * and with all vnodes aliased to the requested vnode.
1090 */
1091 void
1092 vrevoke(vnode_t *vp)
1093 {
1094 struct mount *mp;
1095 vnode_t *vq;
1096 enum vtype type;
1097 dev_t dev;
1098
1099 KASSERT(vp->v_usecount > 0);
1100
1101 mp = vrevoke_suspend_next(NULL, vp->v_mount);
1102
1103 mutex_enter(vp->v_interlock);
1104 VSTATE_WAIT_STABLE(vp);
1105 if (VSTATE_GET(vp) == VS_RECLAIMED) {
1106 mutex_exit(vp->v_interlock);
1107 } else if (vp->v_type != VBLK && vp->v_type != VCHR) {
1108 atomic_inc_uint(&vp->v_usecount);
1109 mutex_exit(vp->v_interlock);
1110 vgone(vp);
1111 } else {
1112 dev = vp->v_rdev;
1113 type = vp->v_type;
1114 mutex_exit(vp->v_interlock);
1115
1116 while (spec_node_lookup_by_dev(type, dev, &vq) == 0) {
1117 mp = vrevoke_suspend_next(mp, vq->v_mount);
1118 vgone(vq);
1119 }
1120 }
1121 vrevoke_suspend_next(mp, NULL);
1122 }
1123
1124 /*
1125 * Eliminate all activity associated with a vnode in preparation for
1126 * reuse. Drops a reference from the vnode.
1127 */
1128 void
1129 vgone(vnode_t *vp)
1130 {
1131 int lktype;
1132
1133 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1134
1135 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
1136 lktype = LK_EXCLUSIVE;
1137 mutex_enter(vp->v_interlock);
1138 VSTATE_WAIT_STABLE(vp);
1139 if (VSTATE_GET(vp) == VS_LOADED) {
1140 vcache_reclaim(vp);
1141 lktype = LK_NONE;
1142 }
1143 VSTATE_ASSERT(vp, VS_RECLAIMED);
1144 vrelel(vp, 0, lktype);
1145 }
1146
1147 static inline uint32_t
1148 vcache_hash(const struct vcache_key *key)
1149 {
1150 uint32_t hash = HASH32_BUF_INIT;
1151
1152 KASSERT(key->vk_key_len > 0);
1153
1154 hash = hash32_buf(&key->vk_mount, sizeof(struct mount *), hash);
1155 hash = hash32_buf(key->vk_key, key->vk_key_len, hash);
1156 return hash;
1157 }
1158
1159 static void
1160 vcache_init(void)
1161 {
1162
1163 vcache_pool = pool_cache_init(sizeof(vnode_impl_t), coherency_unit,
1164 0, 0, "vcachepl", NULL, IPL_NONE, NULL, NULL, NULL);
1165 KASSERT(vcache_pool != NULL);
1166 mutex_init(&vcache_lock, MUTEX_DEFAULT, IPL_NONE);
1167 cv_init(&vcache_cv, "vcache");
1168 vcache_hashsize = desiredvnodes;
1169 vcache_hashtab = hashinit(desiredvnodes, HASH_SLIST, true,
1170 &vcache_hashmask);
1171 }
1172
1173 static void
1174 vcache_reinit(void)
1175 {
1176 int i;
1177 uint32_t hash;
1178 u_long oldmask, newmask;
1179 struct hashhead *oldtab, *newtab;
1180 vnode_impl_t *vip;
1181
1182 newtab = hashinit(desiredvnodes, HASH_SLIST, true, &newmask);
1183 mutex_enter(&vcache_lock);
1184 oldtab = vcache_hashtab;
1185 oldmask = vcache_hashmask;
1186 vcache_hashsize = desiredvnodes;
1187 vcache_hashtab = newtab;
1188 vcache_hashmask = newmask;
1189 for (i = 0; i <= oldmask; i++) {
1190 while ((vip = SLIST_FIRST(&oldtab[i])) != NULL) {
1191 SLIST_REMOVE(&oldtab[i], vip, vnode_impl, vi_hash);
1192 hash = vcache_hash(&vip->vi_key);
1193 SLIST_INSERT_HEAD(&newtab[hash & vcache_hashmask],
1194 vip, vi_hash);
1195 }
1196 }
1197 mutex_exit(&vcache_lock);
1198 hashdone(oldtab, HASH_SLIST, oldmask);
1199 }
1200
1201 static inline vnode_impl_t *
1202 vcache_hash_lookup(const struct vcache_key *key, uint32_t hash)
1203 {
1204 struct hashhead *hashp;
1205 vnode_impl_t *vip;
1206
1207 KASSERT(mutex_owned(&vcache_lock));
1208
1209 hashp = &vcache_hashtab[hash & vcache_hashmask];
1210 SLIST_FOREACH(vip, hashp, vi_hash) {
1211 if (key->vk_mount != vip->vi_key.vk_mount)
1212 continue;
1213 if (key->vk_key_len != vip->vi_key.vk_key_len)
1214 continue;
1215 if (memcmp(key->vk_key, vip->vi_key.vk_key, key->vk_key_len))
1216 continue;
1217 return vip;
1218 }
1219 return NULL;
1220 }
1221
1222 /*
1223 * Allocate a new, uninitialized vcache node.
1224 */
1225 static vnode_impl_t *
1226 vcache_alloc(void)
1227 {
1228 vnode_impl_t *vip;
1229 vnode_t *vp;
1230
1231 vip = pool_cache_get(vcache_pool, PR_WAITOK);
1232 memset(vip, 0, sizeof(*vip));
1233
1234 rw_init(&vip->vi_lock);
1235
1236 vp = VIMPL_TO_VNODE(vip);
1237 uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0);
1238 cv_init(&vp->v_cv, "vnode");
1239 cache_vnode_init(vp);
1240
1241 vp->v_usecount = 1;
1242 vp->v_type = VNON;
1243 vp->v_size = vp->v_writesize = VSIZENOTSET;
1244
1245 vip->vi_state = VS_LOADING;
1246
1247 lru_requeue(vp, &lru_list[LRU_FREE]);
1248
1249 return vip;
1250 }
1251
1252 /*
1253 * Deallocate a vcache node in state VS_LOADING.
1254 *
1255 * vcache_lock held on entry and released on return.
1256 */
1257 static void
1258 vcache_dealloc(vnode_impl_t *vip)
1259 {
1260 vnode_t *vp;
1261
1262 KASSERT(mutex_owned(&vcache_lock));
1263
1264 vp = VIMPL_TO_VNODE(vip);
1265 vfs_ref(dead_rootmount);
1266 vfs_insmntque(vp, dead_rootmount);
1267 mutex_enter(vp->v_interlock);
1268 vp->v_op = dead_vnodeop_p;
1269 VSTATE_CHANGE(vp, VS_LOADING, VS_RECLAIMED);
1270 mutex_exit(&vcache_lock);
1271 vrelel(vp, 0, LK_NONE);
1272 }
1273
1274 /*
1275 * Free an unused, unreferenced vcache node.
1276 * v_interlock locked on entry.
1277 */
1278 static void
1279 vcache_free(vnode_impl_t *vip)
1280 {
1281 vnode_t *vp;
1282
1283 vp = VIMPL_TO_VNODE(vip);
1284 KASSERT(mutex_owned(vp->v_interlock));
1285
1286 KASSERT(vp->v_usecount == 0);
1287 KASSERT(vp->v_holdcnt == 0);
1288 KASSERT(vp->v_writecount == 0);
1289 lru_requeue(vp, NULL);
1290 mutex_exit(vp->v_interlock);
1291
1292 vfs_insmntque(vp, NULL);
1293 if (vp->v_type == VBLK || vp->v_type == VCHR)
1294 spec_node_destroy(vp);
1295
1296 rw_destroy(&vip->vi_lock);
1297 uvm_obj_destroy(&vp->v_uobj, true);
1298 cv_destroy(&vp->v_cv);
1299 cache_vnode_fini(vp);
1300 pool_cache_put(vcache_pool, vip);
1301 }
1302
1303 /*
1304 * Try to get an initial reference on this cached vnode.
1305 * Returns zero on success, ENOENT if the vnode has been reclaimed and
1306 * EBUSY if the vnode state is unstable.
1307 *
1308 * v_interlock locked on entry and unlocked on exit.
1309 */
1310 int
1311 vcache_tryvget(vnode_t *vp)
1312 {
1313 int error = 0;
1314
1315 KASSERT(mutex_owned(vp->v_interlock));
1316
1317 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED))
1318 error = ENOENT;
1319 else if (__predict_false(VSTATE_GET(vp) != VS_LOADED))
1320 error = EBUSY;
1321 else if (vp->v_usecount == 0)
1322 vp->v_usecount = 1;
1323 else
1324 atomic_inc_uint(&vp->v_usecount);
1325
1326 mutex_exit(vp->v_interlock);
1327
1328 return error;
1329 }
1330
1331 /*
1332 * Try to get an initial reference on this cached vnode.
1333 * Returns zero on success and ENOENT if the vnode has been reclaimed.
1334 * Will wait for the vnode state to be stable.
1335 *
1336 * v_interlock locked on entry and unlocked on exit.
1337 */
1338 int
1339 vcache_vget(vnode_t *vp)
1340 {
1341
1342 KASSERT(mutex_owned(vp->v_interlock));
1343
1344 /* Increment hold count to prevent vnode from disappearing. */
1345 vp->v_holdcnt++;
1346 VSTATE_WAIT_STABLE(vp);
1347 vp->v_holdcnt--;
1348
1349 /* If this was the last reference to a reclaimed vnode free it now. */
1350 if (__predict_false(VSTATE_GET(vp) == VS_RECLAIMED)) {
1351 if (vp->v_holdcnt == 0 && vp->v_usecount == 0)
1352 vcache_free(VNODE_TO_VIMPL(vp));
1353 else
1354 mutex_exit(vp->v_interlock);
1355 return ENOENT;
1356 }
1357 VSTATE_ASSERT(vp, VS_LOADED);
1358 if (vp->v_usecount == 0)
1359 vp->v_usecount = 1;
1360 else
1361 atomic_inc_uint(&vp->v_usecount);
1362 mutex_exit(vp->v_interlock);
1363
1364 return 0;
1365 }
1366
1367 /*
1368 * Get a vnode / fs node pair by key and return it referenced through vpp.
1369 */
1370 int
1371 vcache_get(struct mount *mp, const void *key, size_t key_len,
1372 struct vnode **vpp)
1373 {
1374 int error;
1375 uint32_t hash;
1376 const void *new_key;
1377 struct vnode *vp;
1378 struct vcache_key vcache_key;
1379 vnode_impl_t *vip, *new_vip;
1380
1381 new_key = NULL;
1382 *vpp = NULL;
1383
1384 vcache_key.vk_mount = mp;
1385 vcache_key.vk_key = key;
1386 vcache_key.vk_key_len = key_len;
1387 hash = vcache_hash(&vcache_key);
1388
1389 again:
1390 mutex_enter(&vcache_lock);
1391 vip = vcache_hash_lookup(&vcache_key, hash);
1392
1393 /* If found, take a reference or retry. */
1394 if (__predict_true(vip != NULL)) {
1395 /*
1396 * If the vnode is loading we cannot take the v_interlock
1397 * here as it might change during load (see uvm_obj_setlock()).
1398 * As changing state from VS_LOADING requires both vcache_lock
1399 * and v_interlock it is safe to test with vcache_lock held.
1400 *
1401 * Wait for vnodes changing state from VS_LOADING and retry.
1402 */
1403 if (__predict_false(vip->vi_state == VS_LOADING)) {
1404 cv_wait(&vcache_cv, &vcache_lock);
1405 mutex_exit(&vcache_lock);
1406 goto again;
1407 }
1408 vp = VIMPL_TO_VNODE(vip);
1409 mutex_enter(vp->v_interlock);
1410 mutex_exit(&vcache_lock);
1411 error = vcache_vget(vp);
1412 if (error == ENOENT)
1413 goto again;
1414 if (error == 0)
1415 *vpp = vp;
1416 KASSERT((error != 0) == (*vpp == NULL));
1417 return error;
1418 }
1419 mutex_exit(&vcache_lock);
1420
1421 /* Allocate and initialize a new vcache / vnode pair. */
1422 error = vfs_busy(mp);
1423 if (error)
1424 return error;
1425 new_vip = vcache_alloc();
1426 new_vip->vi_key = vcache_key;
1427 vp = VIMPL_TO_VNODE(new_vip);
1428 mutex_enter(&vcache_lock);
1429 vip = vcache_hash_lookup(&vcache_key, hash);
1430 if (vip == NULL) {
1431 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1432 new_vip, vi_hash);
1433 vip = new_vip;
1434 }
1435
1436 /* If another thread beat us inserting this node, retry. */
1437 if (vip != new_vip) {
1438 vcache_dealloc(new_vip);
1439 vfs_unbusy(mp);
1440 goto again;
1441 }
1442 mutex_exit(&vcache_lock);
1443
1444 /* Load the fs node. Exclusive as new_node is VS_LOADING. */
1445 error = VFS_LOADVNODE(mp, vp, key, key_len, &new_key);
1446 if (error) {
1447 mutex_enter(&vcache_lock);
1448 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1449 new_vip, vnode_impl, vi_hash);
1450 vcache_dealloc(new_vip);
1451 vfs_unbusy(mp);
1452 KASSERT(*vpp == NULL);
1453 return error;
1454 }
1455 KASSERT(new_key != NULL);
1456 KASSERT(memcmp(key, new_key, key_len) == 0);
1457 KASSERT(vp->v_op != NULL);
1458 vfs_insmntque(vp, mp);
1459 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1460 vp->v_vflag |= VV_MPSAFE;
1461 vfs_ref(mp);
1462 vfs_unbusy(mp);
1463
1464 /* Finished loading, finalize node. */
1465 mutex_enter(&vcache_lock);
1466 new_vip->vi_key.vk_key = new_key;
1467 mutex_enter(vp->v_interlock);
1468 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1469 mutex_exit(vp->v_interlock);
1470 mutex_exit(&vcache_lock);
1471 *vpp = vp;
1472 return 0;
1473 }
1474
1475 /*
1476 * Create a new vnode / fs node pair and return it referenced through vpp.
1477 */
1478 int
1479 vcache_new(struct mount *mp, struct vnode *dvp, struct vattr *vap,
1480 kauth_cred_t cred, void *extra, struct vnode **vpp)
1481 {
1482 int error;
1483 uint32_t hash;
1484 struct vnode *vp, *ovp;
1485 vnode_impl_t *vip, *ovip;
1486
1487 *vpp = NULL;
1488
1489 /* Allocate and initialize a new vcache / vnode pair. */
1490 error = vfs_busy(mp);
1491 if (error)
1492 return error;
1493 vip = vcache_alloc();
1494 vip->vi_key.vk_mount = mp;
1495 vp = VIMPL_TO_VNODE(vip);
1496
1497 /* Create and load the fs node. */
1498 error = VFS_NEWVNODE(mp, dvp, vp, vap, cred, extra,
1499 &vip->vi_key.vk_key_len, &vip->vi_key.vk_key);
1500 if (error) {
1501 mutex_enter(&vcache_lock);
1502 vcache_dealloc(vip);
1503 vfs_unbusy(mp);
1504 KASSERT(*vpp == NULL);
1505 return error;
1506 }
1507 KASSERT(vp->v_op != NULL);
1508 KASSERT((vip->vi_key.vk_key_len == 0) == (mp == dead_rootmount));
1509 if (vip->vi_key.vk_key_len > 0) {
1510 KASSERT(vip->vi_key.vk_key != NULL);
1511 hash = vcache_hash(&vip->vi_key);
1512
1513 /*
1514 * Wait for previous instance to be reclaimed,
1515 * then insert new node.
1516 */
1517 mutex_enter(&vcache_lock);
1518 while ((ovip = vcache_hash_lookup(&vip->vi_key, hash))) {
1519 ovp = VIMPL_TO_VNODE(ovip);
1520 mutex_enter(ovp->v_interlock);
1521 mutex_exit(&vcache_lock);
1522 error = vcache_vget(ovp);
1523 KASSERT(error == ENOENT);
1524 mutex_enter(&vcache_lock);
1525 }
1526 SLIST_INSERT_HEAD(&vcache_hashtab[hash & vcache_hashmask],
1527 vip, vi_hash);
1528 mutex_exit(&vcache_lock);
1529 }
1530 vfs_insmntque(vp, mp);
1531 if ((mp->mnt_iflag & IMNT_MPSAFE) != 0)
1532 vp->v_vflag |= VV_MPSAFE;
1533 vfs_ref(mp);
1534 vfs_unbusy(mp);
1535
1536 /* Finished loading, finalize node. */
1537 mutex_enter(&vcache_lock);
1538 mutex_enter(vp->v_interlock);
1539 VSTATE_CHANGE(vp, VS_LOADING, VS_LOADED);
1540 mutex_exit(&vcache_lock);
1541 mutex_exit(vp->v_interlock);
1542 *vpp = vp;
1543 return 0;
1544 }
1545
1546 /*
1547 * Prepare key change: update old cache nodes key and lock new cache node.
1548 * Return an error if the new node already exists.
1549 */
1550 int
1551 vcache_rekey_enter(struct mount *mp, struct vnode *vp,
1552 const void *old_key, size_t old_key_len,
1553 const void *new_key, size_t new_key_len)
1554 {
1555 uint32_t old_hash, new_hash;
1556 struct vcache_key old_vcache_key, new_vcache_key;
1557 vnode_impl_t *vip, *new_vip;
1558
1559 old_vcache_key.vk_mount = mp;
1560 old_vcache_key.vk_key = old_key;
1561 old_vcache_key.vk_key_len = old_key_len;
1562 old_hash = vcache_hash(&old_vcache_key);
1563
1564 new_vcache_key.vk_mount = mp;
1565 new_vcache_key.vk_key = new_key;
1566 new_vcache_key.vk_key_len = new_key_len;
1567 new_hash = vcache_hash(&new_vcache_key);
1568
1569 new_vip = vcache_alloc();
1570 new_vip->vi_key = new_vcache_key;
1571
1572 /* Insert locked new node used as placeholder. */
1573 mutex_enter(&vcache_lock);
1574 vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1575 if (vip != NULL) {
1576 vcache_dealloc(new_vip);
1577 return EEXIST;
1578 }
1579 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1580 new_vip, vi_hash);
1581
1582 /* Replace old nodes key with the temporary copy. */
1583 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1584 KASSERT(vip != NULL);
1585 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1586 KASSERT(vip->vi_key.vk_key != old_vcache_key.vk_key);
1587 vip->vi_key = old_vcache_key;
1588 mutex_exit(&vcache_lock);
1589 return 0;
1590 }
1591
1592 /*
1593 * Key change complete: update old node and remove placeholder.
1594 */
1595 void
1596 vcache_rekey_exit(struct mount *mp, struct vnode *vp,
1597 const void *old_key, size_t old_key_len,
1598 const void *new_key, size_t new_key_len)
1599 {
1600 uint32_t old_hash, new_hash;
1601 struct vcache_key old_vcache_key, new_vcache_key;
1602 vnode_impl_t *vip, *new_vip;
1603 struct vnode *new_vp;
1604
1605 old_vcache_key.vk_mount = mp;
1606 old_vcache_key.vk_key = old_key;
1607 old_vcache_key.vk_key_len = old_key_len;
1608 old_hash = vcache_hash(&old_vcache_key);
1609
1610 new_vcache_key.vk_mount = mp;
1611 new_vcache_key.vk_key = new_key;
1612 new_vcache_key.vk_key_len = new_key_len;
1613 new_hash = vcache_hash(&new_vcache_key);
1614
1615 mutex_enter(&vcache_lock);
1616
1617 /* Lookup old and new node. */
1618 vip = vcache_hash_lookup(&old_vcache_key, old_hash);
1619 KASSERT(vip != NULL);
1620 KASSERT(VIMPL_TO_VNODE(vip) == vp);
1621
1622 new_vip = vcache_hash_lookup(&new_vcache_key, new_hash);
1623 KASSERT(new_vip != NULL);
1624 KASSERT(new_vip->vi_key.vk_key_len == new_key_len);
1625 new_vp = VIMPL_TO_VNODE(new_vip);
1626 mutex_enter(new_vp->v_interlock);
1627 VSTATE_ASSERT(VIMPL_TO_VNODE(new_vip), VS_LOADING);
1628 mutex_exit(new_vp->v_interlock);
1629
1630 /* Rekey old node and put it onto its new hashlist. */
1631 vip->vi_key = new_vcache_key;
1632 if (old_hash != new_hash) {
1633 SLIST_REMOVE(&vcache_hashtab[old_hash & vcache_hashmask],
1634 vip, vnode_impl, vi_hash);
1635 SLIST_INSERT_HEAD(&vcache_hashtab[new_hash & vcache_hashmask],
1636 vip, vi_hash);
1637 }
1638
1639 /* Remove new node used as placeholder. */
1640 SLIST_REMOVE(&vcache_hashtab[new_hash & vcache_hashmask],
1641 new_vip, vnode_impl, vi_hash);
1642 vcache_dealloc(new_vip);
1643 }
1644
1645 /*
1646 * Disassociate the underlying file system from a vnode.
1647 *
1648 * Must be called with vnode locked and will return unlocked.
1649 * Must be called with the interlock held, and will return with it held.
1650 */
1651 static void
1652 vcache_reclaim(vnode_t *vp)
1653 {
1654 lwp_t *l = curlwp;
1655 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1656 struct mount *mp = vp->v_mount;
1657 uint32_t hash;
1658 uint8_t temp_buf[64], *temp_key;
1659 size_t temp_key_len;
1660 bool recycle, active;
1661 int error;
1662
1663 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1664 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1665 KASSERT(mutex_owned(vp->v_interlock));
1666 KASSERT(vp->v_usecount != 0);
1667
1668 active = (vp->v_usecount > 1);
1669 temp_key_len = vip->vi_key.vk_key_len;
1670 /*
1671 * Prevent the vnode from being recycled or brought into use
1672 * while we clean it out.
1673 */
1674 VSTATE_CHANGE(vp, VS_LOADED, VS_RECLAIMING);
1675 if ((vp->v_iflag & VI_EXECMAP) != 0 && vp->v_uobj.uo_npages != 0) {
1676 cpu_count(CPU_COUNT_EXECPAGES, -vp->v_uobj.uo_npages);
1677 cpu_count(CPU_COUNT_FILEPAGES, vp->v_uobj.uo_npages);
1678 }
1679 vp->v_iflag &= ~(VI_TEXT|VI_EXECMAP);
1680 mutex_exit(vp->v_interlock);
1681
1682 /*
1683 * With vnode state set to reclaiming, purge name cache immediately
1684 * to prevent new handles on vnode, and wait for existing threads
1685 * trying to get a handle to notice VS_RECLAIMED status and abort.
1686 */
1687 cache_purge(vp);
1688
1689 /* Replace the vnode key with a temporary copy. */
1690 if (vip->vi_key.vk_key_len > sizeof(temp_buf)) {
1691 temp_key = kmem_alloc(temp_key_len, KM_SLEEP);
1692 } else {
1693 temp_key = temp_buf;
1694 }
1695 if (vip->vi_key.vk_key_len > 0) {
1696 mutex_enter(&vcache_lock);
1697 memcpy(temp_key, vip->vi_key.vk_key, temp_key_len);
1698 vip->vi_key.vk_key = temp_key;
1699 mutex_exit(&vcache_lock);
1700 }
1701
1702 fstrans_start(mp);
1703
1704 /*
1705 * Clean out any cached data associated with the vnode.
1706 * If purging an active vnode, it must be closed and
1707 * deactivated before being reclaimed.
1708 */
1709 error = vinvalbuf(vp, V_SAVE, NOCRED, l, 0, 0);
1710 if (error != 0) {
1711 if (wapbl_vphaswapbl(vp))
1712 WAPBL_DISCARD(wapbl_vptomp(vp));
1713 error = vinvalbuf(vp, 0, NOCRED, l, 0, 0);
1714 }
1715 KASSERTMSG((error == 0), "vinvalbuf failed: %d", error);
1716 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1717 if (active && (vp->v_type == VBLK || vp->v_type == VCHR)) {
1718 spec_node_revoke(vp);
1719 }
1720
1721 /*
1722 * Disassociate the underlying file system from the vnode.
1723 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1724 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1725 * would no longer function.
1726 */
1727 VOP_INACTIVE(vp, &recycle);
1728 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1729 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1730 if (VOP_RECLAIM(vp)) {
1731 vnpanic(vp, "%s: cannot reclaim", __func__);
1732 }
1733
1734 KASSERT(vp->v_data == NULL);
1735 KASSERT(vp->v_uobj.uo_npages == 0);
1736
1737 if (vp->v_type == VREG && vp->v_ractx != NULL) {
1738 uvm_ra_freectx(vp->v_ractx);
1739 vp->v_ractx = NULL;
1740 }
1741
1742 if (vip->vi_key.vk_key_len > 0) {
1743 /* Remove from vnode cache. */
1744 hash = vcache_hash(&vip->vi_key);
1745 mutex_enter(&vcache_lock);
1746 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1747 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1748 vip, vnode_impl, vi_hash);
1749 mutex_exit(&vcache_lock);
1750 }
1751 if (temp_key != temp_buf)
1752 kmem_free(temp_key, temp_key_len);
1753
1754 /* Done with purge, notify sleepers of the grim news. */
1755 mutex_enter(vp->v_interlock);
1756 vp->v_op = dead_vnodeop_p;
1757 vp->v_vflag |= VV_LOCKSWORK;
1758 VSTATE_CHANGE(vp, VS_RECLAIMING, VS_RECLAIMED);
1759 vp->v_tag = VT_NON;
1760 KNOTE(&vp->v_klist, NOTE_REVOKE);
1761 mutex_exit(vp->v_interlock);
1762
1763 /*
1764 * Move to dead mount. Must be after changing the operations
1765 * vector as vnode operations enter the mount before using the
1766 * operations vector. See sys/kern/vnode_if.c.
1767 */
1768 vp->v_vflag &= ~VV_ROOT;
1769 vfs_ref(dead_rootmount);
1770 vfs_insmntque(vp, dead_rootmount);
1771
1772 mutex_enter(vp->v_interlock);
1773 fstrans_done(mp);
1774 KASSERT((vp->v_iflag & VI_ONWORKLST) == 0);
1775 }
1776
1777 /*
1778 * Disassociate the underlying file system from an open device vnode
1779 * and make it anonymous.
1780 *
1781 * Vnode unlocked on entry, drops a reference to the vnode.
1782 */
1783 void
1784 vcache_make_anon(vnode_t *vp)
1785 {
1786 vnode_impl_t *vip = VNODE_TO_VIMPL(vp);
1787 uint32_t hash;
1788 bool recycle;
1789
1790 KASSERT(vp->v_type == VBLK || vp->v_type == VCHR);
1791 KASSERT(vp->v_mount == dead_rootmount || fstrans_is_owner(vp->v_mount));
1792 VSTATE_ASSERT_UNLOCKED(vp, VS_ACTIVE);
1793
1794 /* Remove from vnode cache. */
1795 hash = vcache_hash(&vip->vi_key);
1796 mutex_enter(&vcache_lock);
1797 KASSERT(vip == vcache_hash_lookup(&vip->vi_key, hash));
1798 SLIST_REMOVE(&vcache_hashtab[hash & vcache_hashmask],
1799 vip, vnode_impl, vi_hash);
1800 vip->vi_key.vk_mount = dead_rootmount;
1801 vip->vi_key.vk_key_len = 0;
1802 vip->vi_key.vk_key = NULL;
1803 mutex_exit(&vcache_lock);
1804
1805 /*
1806 * Disassociate the underlying file system from the vnode.
1807 * VOP_INACTIVE leaves the vnode locked; VOP_RECLAIM unlocks
1808 * the vnode, and may destroy the vnode so that VOP_UNLOCK
1809 * would no longer function.
1810 */
1811 if (vn_lock(vp, LK_EXCLUSIVE)) {
1812 vnpanic(vp, "%s: cannot lock", __func__);
1813 }
1814 VOP_INACTIVE(vp, &recycle);
1815 KASSERT((vp->v_vflag & VV_LOCKSWORK) == 0 ||
1816 VOP_ISLOCKED(vp) == LK_EXCLUSIVE);
1817 if (VOP_RECLAIM(vp)) {
1818 vnpanic(vp, "%s: cannot reclaim", __func__);
1819 }
1820
1821 /* Purge name cache. */
1822 cache_purge(vp);
1823
1824 /* Done with purge, change operations vector. */
1825 mutex_enter(vp->v_interlock);
1826 vp->v_op = spec_vnodeop_p;
1827 vp->v_vflag |= VV_MPSAFE;
1828 vp->v_vflag &= ~VV_LOCKSWORK;
1829 mutex_exit(vp->v_interlock);
1830
1831 /*
1832 * Move to dead mount. Must be after changing the operations
1833 * vector as vnode operations enter the mount before using the
1834 * operations vector. See sys/kern/vnode_if.c.
1835 */
1836 vfs_ref(dead_rootmount);
1837 vfs_insmntque(vp, dead_rootmount);
1838
1839 vrele(vp);
1840 }
1841
1842 /*
1843 * Update outstanding I/O count and do wakeup if requested.
1844 */
1845 void
1846 vwakeup(struct buf *bp)
1847 {
1848 vnode_t *vp;
1849
1850 if ((vp = bp->b_vp) == NULL)
1851 return;
1852
1853 KASSERT(bp->b_objlock == vp->v_interlock);
1854 KASSERT(mutex_owned(bp->b_objlock));
1855
1856 if (--vp->v_numoutput < 0)
1857 vnpanic(vp, "%s: neg numoutput, vp %p", __func__, vp);
1858 if (vp->v_numoutput == 0)
1859 cv_broadcast(&vp->v_cv);
1860 }
1861
1862 /*
1863 * Test a vnode for being or becoming dead. Returns one of:
1864 * EBUSY: vnode is becoming dead, with "flags == VDEAD_NOWAIT" only.
1865 * ENOENT: vnode is dead.
1866 * 0: otherwise.
1867 *
1868 * Whenever this function returns a non-zero value all future
1869 * calls will also return a non-zero value.
1870 */
1871 int
1872 vdead_check(struct vnode *vp, int flags)
1873 {
1874
1875 KASSERT(mutex_owned(vp->v_interlock));
1876
1877 if (! ISSET(flags, VDEAD_NOWAIT))
1878 VSTATE_WAIT_STABLE(vp);
1879
1880 if (VSTATE_GET(vp) == VS_RECLAIMING) {
1881 KASSERT(ISSET(flags, VDEAD_NOWAIT));
1882 return EBUSY;
1883 } else if (VSTATE_GET(vp) == VS_RECLAIMED) {
1884 return ENOENT;
1885 }
1886
1887 return 0;
1888 }
1889
1890 int
1891 vfs_drainvnodes(void)
1892 {
1893 int i, gen;
1894
1895 mutex_enter(&vdrain_lock);
1896 for (i = 0; i < 2; i++) {
1897 gen = vdrain_gen;
1898 while (gen == vdrain_gen) {
1899 cv_broadcast(&vdrain_cv);
1900 cv_wait(&vdrain_gen_cv, &vdrain_lock);
1901 }
1902 }
1903 mutex_exit(&vdrain_lock);
1904
1905 if (numvnodes >= desiredvnodes)
1906 return EBUSY;
1907
1908 if (vcache_hashsize != desiredvnodes)
1909 vcache_reinit();
1910
1911 return 0;
1912 }
1913
1914 void
1915 vnpanic(vnode_t *vp, const char *fmt, ...)
1916 {
1917 va_list ap;
1918
1919 #ifdef DIAGNOSTIC
1920 vprint(NULL, vp);
1921 #endif
1922 va_start(ap, fmt);
1923 vpanic(fmt, ap);
1924 va_end(ap);
1925 }
1926