vfs_trans.c revision 1.45.2.1 1 /* $NetBSD: vfs_trans.c,v 1.45.2.1 2017/06/04 20:35:01 bouyer Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.45.2.1 2017/06/04 20:35:01 bouyer Exp $");
34
35 /*
36 * File system transaction operations.
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/atomic.h>
46 #include <sys/buf.h>
47 #include <sys/kmem.h>
48 #include <sys/mount.h>
49 #include <sys/pserialize.h>
50 #include <sys/vnode.h>
51 #include <sys/fstrans.h>
52 #include <sys/proc.h>
53
54 #include <miscfs/specfs/specdev.h>
55
56 enum fstrans_lock_type {
57 FSTRANS_SHARED, /* Granted while not suspending */
58 FSTRANS_EXCL /* Internal: exclusive lock */
59 };
60
61 struct fscow_handler {
62 LIST_ENTRY(fscow_handler) ch_list;
63 int (*ch_func)(void *, struct buf *, bool);
64 void *ch_arg;
65 };
66 struct fstrans_lwp_info {
67 struct fstrans_lwp_info *fli_succ;
68 struct lwp *fli_self;
69 struct mount *fli_mount;
70 int fli_trans_cnt;
71 int fli_cow_cnt;
72 enum fstrans_lock_type fli_lock_type;
73 LIST_ENTRY(fstrans_lwp_info) fli_list;
74 };
75 struct fstrans_mount_info {
76 enum fstrans_state fmi_state;
77 unsigned int fmi_ref_cnt;
78 bool fmi_cow_change;
79 LIST_HEAD(, fscow_handler) fmi_cow_handler;
80 };
81
82 static specificdata_key_t lwp_data_key; /* Our specific data key. */
83 static kmutex_t vfs_suspend_lock; /* Serialize suspensions. */
84 static kmutex_t fstrans_lock; /* Fstrans big lock. */
85 static kmutex_t fstrans_mount_lock; /* Fstrans mount big lock. */
86 static kcondvar_t fstrans_state_cv; /* Fstrans or cow state changed. */
87 static kcondvar_t fstrans_count_cv; /* Fstrans or cow count changed. */
88 static pserialize_t fstrans_psz; /* Pserialize state. */
89 static LIST_HEAD(fstrans_lwp_head, fstrans_lwp_info) fstrans_fli_head;
90 /* List of all fstrans_lwp_info. */
91
92 static inline struct mount *fstrans_normalize_mount(struct mount *);
93 static void fstrans_lwp_dtor(void *);
94 static void fstrans_mount_dtor(struct mount *);
95 static struct fstrans_lwp_info *fstrans_get_lwp_info(struct mount *, bool);
96 static inline int _fstrans_start(struct mount *, enum fstrans_lock_type, int);
97 static bool grant_lock(const enum fstrans_state, const enum fstrans_lock_type);
98 static bool state_change_done(const struct mount *);
99 static bool cow_state_change_done(const struct mount *);
100 static void cow_change_enter(const struct mount *);
101 static void cow_change_done(const struct mount *);
102
103 /*
104 * Initialize.
105 */
106 void
107 fstrans_init(void)
108 {
109 int error __diagused;
110
111 error = lwp_specific_key_create(&lwp_data_key, fstrans_lwp_dtor);
112 KASSERT(error == 0);
113
114 mutex_init(&vfs_suspend_lock, MUTEX_DEFAULT, IPL_NONE);
115 mutex_init(&fstrans_lock, MUTEX_DEFAULT, IPL_NONE);
116 mutex_init(&fstrans_mount_lock, MUTEX_DEFAULT, IPL_NONE);
117 cv_init(&fstrans_state_cv, "fstchg");
118 cv_init(&fstrans_count_cv, "fstcnt");
119 fstrans_psz = pserialize_create();
120 LIST_INIT(&fstrans_fli_head);
121 }
122
123 /*
124 * Normalize mount.
125 * Return mount if file system supports fstrans, NULL otherwise.
126 */
127 static inline struct mount *
128 fstrans_normalize_mount(struct mount *mp)
129 {
130
131 while (mp && mp->mnt_lower)
132 mp = mp->mnt_lower;
133 if (mp == NULL)
134 return NULL;
135 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
136 return NULL;
137 return mp;
138 }
139
140 /*
141 * Deallocate lwp state.
142 */
143 static void
144 fstrans_lwp_dtor(void *arg)
145 {
146 struct fstrans_lwp_info *fli, *fli_next;
147
148 for (fli = arg; fli; fli = fli_next) {
149 KASSERT(fli->fli_trans_cnt == 0);
150 KASSERT(fli->fli_cow_cnt == 0);
151 if (fli->fli_mount != NULL)
152 fstrans_mount_dtor(fli->fli_mount);
153 fli_next = fli->fli_succ;
154 fli->fli_mount = NULL;
155 membar_sync();
156 fli->fli_self = NULL;
157 }
158 }
159
160 /*
161 * Dereference mount state.
162 */
163 static void
164 fstrans_mount_dtor(struct mount *mp)
165 {
166 struct fstrans_mount_info *fmi;
167
168 mutex_enter(&fstrans_mount_lock);
169
170 fmi = mp->mnt_transinfo;
171 KASSERT(fmi != NULL);
172 fmi->fmi_ref_cnt -= 1;
173 if (fmi->fmi_ref_cnt > 0) {
174 mutex_exit(&fstrans_mount_lock);
175 return;
176 }
177
178 KASSERT(fmi->fmi_state == FSTRANS_NORMAL);
179 KASSERT(LIST_FIRST(&fmi->fmi_cow_handler) == NULL);
180
181 mp->mnt_iflag &= ~IMNT_HAS_TRANS;
182 mp->mnt_transinfo = NULL;
183
184 mutex_exit(&fstrans_mount_lock);
185
186 kmem_free(fmi, sizeof(*fmi));
187 vfs_rele(mp);
188 }
189
190 /*
191 * Allocate mount state.
192 */
193 int
194 fstrans_mount(struct mount *mp)
195 {
196 struct fstrans_mount_info *newfmi;
197
198 newfmi = kmem_alloc(sizeof(*newfmi), KM_SLEEP);
199 newfmi->fmi_state = FSTRANS_NORMAL;
200 newfmi->fmi_ref_cnt = 1;
201 LIST_INIT(&newfmi->fmi_cow_handler);
202 newfmi->fmi_cow_change = false;
203
204 mutex_enter(&fstrans_mount_lock);
205 mp->mnt_transinfo = newfmi;
206 mp->mnt_iflag |= IMNT_HAS_TRANS;
207 mutex_exit(&fstrans_mount_lock);
208
209 vfs_ref(mp);
210
211 return 0;
212 }
213
214 /*
215 * Deallocate mount state.
216 */
217 void
218 fstrans_unmount(struct mount *mp)
219 {
220
221 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
222 return;
223
224 KASSERT(mp->mnt_transinfo != NULL);
225
226 fstrans_mount_dtor(mp);
227 }
228
229 /*
230 * Retrieve the per lwp info for this mount allocating if necessary.
231 */
232 static struct fstrans_lwp_info *
233 fstrans_get_lwp_info(struct mount *mp, bool do_alloc)
234 {
235 struct fstrans_lwp_info *fli, *res;
236 struct fstrans_mount_info *fmi;
237
238 /*
239 * Scan our list for a match clearing entries whose mount is gone.
240 */
241 res = NULL;
242 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
243 if (fli->fli_mount == mp) {
244 KASSERT(res == NULL);
245 res = fli;
246 } else if (fli->fli_mount != NULL &&
247 (fli->fli_mount->mnt_iflag & IMNT_GONE) != 0 &&
248 fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
249 fstrans_mount_dtor(fli->fli_mount);
250 fli->fli_mount = NULL;
251 }
252 }
253 if (__predict_true(res != NULL))
254 return res;
255
256 if (! do_alloc)
257 return NULL;
258
259 /*
260 * Try to reuse a cleared entry or allocate a new one.
261 */
262 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
263 if (fli->fli_mount == NULL) {
264 KASSERT(fli->fli_trans_cnt == 0);
265 KASSERT(fli->fli_cow_cnt == 0);
266 break;
267 }
268 }
269 if (fli == NULL) {
270 mutex_enter(&fstrans_lock);
271 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
272 if (fli->fli_self == NULL) {
273 KASSERT(fli->fli_mount == NULL);
274 KASSERT(fli->fli_trans_cnt == 0);
275 KASSERT(fli->fli_cow_cnt == 0);
276 fli->fli_self = curlwp;
277 fli->fli_succ = lwp_getspecific(lwp_data_key);
278 lwp_setspecific(lwp_data_key, fli);
279 break;
280 }
281 }
282 mutex_exit(&fstrans_lock);
283 }
284 if (fli == NULL) {
285 fli = kmem_alloc(sizeof(*fli), KM_SLEEP);
286 mutex_enter(&fstrans_lock);
287 memset(fli, 0, sizeof(*fli));
288 fli->fli_self = curlwp;
289 LIST_INSERT_HEAD(&fstrans_fli_head, fli, fli_list);
290 mutex_exit(&fstrans_lock);
291 fli->fli_succ = lwp_getspecific(lwp_data_key);
292 lwp_setspecific(lwp_data_key, fli);
293 }
294
295 /*
296 * Attach the entry to the mount if its mnt_transinfo is valid.
297 */
298 mutex_enter(&fstrans_mount_lock);
299 fmi = mp->mnt_transinfo;
300 if (__predict_true(fmi != NULL)) {
301 fli->fli_mount = mp;
302 fmi->fmi_ref_cnt += 1;
303 } else {
304 fli = NULL;
305 }
306 mutex_exit(&fstrans_mount_lock);
307
308 return fli;
309 }
310
311 /*
312 * Check if this lock type is granted at this state.
313 */
314 static bool
315 grant_lock(const enum fstrans_state state, const enum fstrans_lock_type type)
316 {
317
318 if (__predict_true(state == FSTRANS_NORMAL))
319 return true;
320 if (type == FSTRANS_EXCL)
321 return true;
322
323 return false;
324 }
325
326 /*
327 * Start a transaction. If this thread already has a transaction on this
328 * file system increment the reference counter.
329 */
330 static inline int
331 _fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait)
332 {
333 int s;
334 struct mount *lmp;
335 struct fstrans_lwp_info *fli;
336 struct fstrans_mount_info *fmi;
337
338 if ((lmp = fstrans_normalize_mount(mp)) == NULL)
339 return 0;
340
341 ASSERT_SLEEPABLE();
342
343 /*
344 * Allocate per lwp info for layered file systems to
345 * get a reference to the mount. No need to increment
346 * the reference counter here.
347 */
348 for (lmp = mp; lmp->mnt_lower; lmp = lmp->mnt_lower) {
349 fli = fstrans_get_lwp_info(lmp, true);
350 }
351
352 if ((fli = fstrans_get_lwp_info(lmp, true)) == NULL)
353 return 0;
354
355 if (fli->fli_trans_cnt > 0) {
356 KASSERT(lock_type != FSTRANS_EXCL);
357 fli->fli_trans_cnt += 1;
358
359 return 0;
360 }
361
362 s = pserialize_read_enter();
363 fmi = lmp->mnt_transinfo;
364 if (__predict_true(grant_lock(fmi->fmi_state, lock_type))) {
365 fli->fli_trans_cnt = 1;
366 fli->fli_lock_type = lock_type;
367 pserialize_read_exit(s);
368
369 return 0;
370 }
371 pserialize_read_exit(s);
372
373 if (! wait)
374 return EBUSY;
375
376 mutex_enter(&fstrans_lock);
377 while (! grant_lock(fmi->fmi_state, lock_type))
378 cv_wait(&fstrans_state_cv, &fstrans_lock);
379 fli->fli_trans_cnt = 1;
380 fli->fli_lock_type = lock_type;
381 mutex_exit(&fstrans_lock);
382
383 return 0;
384 }
385
386 void
387 fstrans_start(struct mount *mp)
388 {
389 int error __diagused;
390
391 error = _fstrans_start(mp, FSTRANS_SHARED, 1);
392 KASSERT(error == 0);
393 }
394
395 int
396 fstrans_start_nowait(struct mount *mp)
397 {
398
399 return _fstrans_start(mp, FSTRANS_SHARED, 0);
400 }
401
402 /*
403 * Finish a transaction.
404 */
405 void
406 fstrans_done(struct mount *mp)
407 {
408 int s;
409 struct fstrans_lwp_info *fli;
410 struct fstrans_mount_info *fmi;
411
412 if ((mp = fstrans_normalize_mount(mp)) == NULL)
413 return;
414 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
415 return;
416 KASSERT(fli->fli_trans_cnt > 0);
417
418 if (fli->fli_trans_cnt > 1) {
419 fli->fli_trans_cnt -= 1;
420
421 return;
422 }
423
424 s = pserialize_read_enter();
425 fmi = mp->mnt_transinfo;
426 if (__predict_true(fmi->fmi_state == FSTRANS_NORMAL)) {
427 fli->fli_trans_cnt = 0;
428 pserialize_read_exit(s);
429
430 return;
431 }
432 pserialize_read_exit(s);
433
434 mutex_enter(&fstrans_lock);
435 fli->fli_trans_cnt = 0;
436 cv_signal(&fstrans_count_cv);
437 mutex_exit(&fstrans_lock);
438 }
439
440 /*
441 * Check if this thread has an exclusive lock.
442 */
443 int
444 fstrans_is_owner(struct mount *mp)
445 {
446 struct fstrans_lwp_info *fli;
447
448 if ((mp = fstrans_normalize_mount(mp)) == NULL)
449 return 0;
450 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
451 return 0;
452
453 if (fli->fli_trans_cnt == 0)
454 return 0;
455
456 KASSERT(fli->fli_mount == mp);
457 KASSERT(fli->fli_trans_cnt > 0);
458
459 return (fli->fli_lock_type == FSTRANS_EXCL);
460 }
461
462 /*
463 * True, if no thread is in a transaction not granted at the current state.
464 */
465 static bool
466 state_change_done(const struct mount *mp)
467 {
468 struct fstrans_lwp_info *fli;
469 struct fstrans_mount_info *fmi;
470
471 KASSERT(mutex_owned(&fstrans_lock));
472
473 fmi = mp->mnt_transinfo;
474 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
475 if (fli->fli_mount != mp)
476 continue;
477 if (fli->fli_trans_cnt == 0)
478 continue;
479 if (grant_lock(fmi->fmi_state, fli->fli_lock_type))
480 continue;
481
482 return false;
483 }
484
485 return true;
486 }
487
488 /*
489 * Set new file system state.
490 */
491 int
492 fstrans_setstate(struct mount *mp, enum fstrans_state new_state)
493 {
494 int error;
495 enum fstrans_state old_state;
496 struct fstrans_mount_info *fmi;
497
498 fmi = mp->mnt_transinfo;
499 old_state = fmi->fmi_state;
500 if (old_state == new_state)
501 return 0;
502
503 mutex_enter(&fstrans_lock);
504 fmi->fmi_state = new_state;
505 pserialize_perform(fstrans_psz);
506
507 /*
508 * All threads see the new state now.
509 * Wait for transactions invalid at this state to leave.
510 */
511 error = 0;
512 while (! state_change_done(mp)) {
513 error = cv_wait_sig(&fstrans_count_cv, &fstrans_lock);
514 if (error) {
515 new_state = fmi->fmi_state = FSTRANS_NORMAL;
516 break;
517 }
518 }
519 cv_broadcast(&fstrans_state_cv);
520 mutex_exit(&fstrans_lock);
521
522 if (old_state != new_state) {
523 if (old_state == FSTRANS_NORMAL)
524 _fstrans_start(mp, FSTRANS_EXCL, 1);
525 if (new_state == FSTRANS_NORMAL)
526 fstrans_done(mp);
527 }
528
529 return error;
530 }
531
532 /*
533 * Get current file system state.
534 */
535 enum fstrans_state
536 fstrans_getstate(struct mount *mp)
537 {
538 struct fstrans_mount_info *fmi;
539
540 fmi = mp->mnt_transinfo;
541 KASSERT(fmi != NULL);
542
543 return fmi->fmi_state;
544 }
545
546 /*
547 * Request a filesystem to suspend all operations.
548 */
549 int
550 vfs_suspend(struct mount *mp, int nowait)
551 {
552 int error;
553
554 if ((mp = fstrans_normalize_mount(mp)) == NULL)
555 return EOPNOTSUPP;
556 if (nowait) {
557 if (!mutex_tryenter(&vfs_suspend_lock))
558 return EWOULDBLOCK;
559 } else
560 mutex_enter(&vfs_suspend_lock);
561
562 if ((error = VFS_SUSPENDCTL(mp, SUSPEND_SUSPEND)) != 0)
563 mutex_exit(&vfs_suspend_lock);
564
565 return error;
566 }
567
568 /*
569 * Request a filesystem to resume all operations.
570 */
571 void
572 vfs_resume(struct mount *mp)
573 {
574
575 mp = fstrans_normalize_mount(mp);
576 KASSERT(mp != NULL);
577
578 VFS_SUSPENDCTL(mp, SUSPEND_RESUME);
579 mutex_exit(&vfs_suspend_lock);
580 }
581
582
583 /*
584 * True, if no thread is running a cow handler.
585 */
586 static bool
587 cow_state_change_done(const struct mount *mp)
588 {
589 struct fstrans_lwp_info *fli;
590 struct fstrans_mount_info *fmi __diagused;
591
592 fmi = mp->mnt_transinfo;
593
594 KASSERT(mutex_owned(&fstrans_lock));
595 KASSERT(fmi->fmi_cow_change);
596
597 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
598 if (fli->fli_mount != mp)
599 continue;
600 if (fli->fli_cow_cnt == 0)
601 continue;
602
603 return false;
604 }
605
606 return true;
607 }
608
609 /*
610 * Prepare for changing this mounts cow list.
611 * Returns with fstrans_lock locked.
612 */
613 static void
614 cow_change_enter(const struct mount *mp)
615 {
616 struct fstrans_mount_info *fmi;
617
618 fmi = mp->mnt_transinfo;
619
620 mutex_enter(&fstrans_lock);
621
622 /*
623 * Wait for other threads changing the list.
624 */
625 while (fmi->fmi_cow_change)
626 cv_wait(&fstrans_state_cv, &fstrans_lock);
627
628 /*
629 * Wait until all threads are aware of a state change.
630 */
631 fmi->fmi_cow_change = true;
632 pserialize_perform(fstrans_psz);
633
634 while (! cow_state_change_done(mp))
635 cv_wait(&fstrans_count_cv, &fstrans_lock);
636 }
637
638 /*
639 * Done changing this mounts cow list.
640 */
641 static void
642 cow_change_done(const struct mount *mp)
643 {
644 struct fstrans_mount_info *fmi;
645
646 KASSERT(mutex_owned(&fstrans_lock));
647
648 fmi = mp->mnt_transinfo;
649
650 fmi->fmi_cow_change = false;
651 pserialize_perform(fstrans_psz);
652
653 cv_broadcast(&fstrans_state_cv);
654
655 mutex_exit(&fstrans_lock);
656 }
657
658 /*
659 * Add a handler to this mount.
660 */
661 int
662 fscow_establish(struct mount *mp, int (*func)(void *, struct buf *, bool),
663 void *arg)
664 {
665 struct fstrans_mount_info *fmi;
666 struct fscow_handler *newch;
667
668 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
669 return EINVAL;
670
671 fmi = mp->mnt_transinfo;
672 KASSERT(fmi != NULL);
673
674 newch = kmem_alloc(sizeof(*newch), KM_SLEEP);
675 newch->ch_func = func;
676 newch->ch_arg = arg;
677
678 cow_change_enter(mp);
679 LIST_INSERT_HEAD(&fmi->fmi_cow_handler, newch, ch_list);
680 cow_change_done(mp);
681
682 return 0;
683 }
684
685 /*
686 * Remove a handler from this mount.
687 */
688 int
689 fscow_disestablish(struct mount *mp, int (*func)(void *, struct buf *, bool),
690 void *arg)
691 {
692 struct fstrans_mount_info *fmi;
693 struct fscow_handler *hp = NULL;
694
695 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
696 return EINVAL;
697
698 fmi = mp->mnt_transinfo;
699 KASSERT(fmi != NULL);
700
701 cow_change_enter(mp);
702 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
703 if (hp->ch_func == func && hp->ch_arg == arg)
704 break;
705 if (hp != NULL) {
706 LIST_REMOVE(hp, ch_list);
707 kmem_free(hp, sizeof(*hp));
708 }
709 cow_change_done(mp);
710
711 return hp ? 0 : EINVAL;
712 }
713
714 /*
715 * Check for need to copy block that is about to be written.
716 */
717 int
718 fscow_run(struct buf *bp, bool data_valid)
719 {
720 int error, s;
721 struct mount *mp;
722 struct fstrans_lwp_info *fli;
723 struct fstrans_mount_info *fmi;
724 struct fscow_handler *hp;
725
726 /*
727 * First check if we need run the copy-on-write handler.
728 */
729 if ((bp->b_flags & B_COWDONE))
730 return 0;
731 if (bp->b_vp == NULL) {
732 bp->b_flags |= B_COWDONE;
733 return 0;
734 }
735 if (bp->b_vp->v_type == VBLK)
736 mp = spec_node_getmountedfs(bp->b_vp);
737 else
738 mp = bp->b_vp->v_mount;
739 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0) {
740 bp->b_flags |= B_COWDONE;
741 return 0;
742 }
743
744 fli = fstrans_get_lwp_info(mp, true);
745 fmi = mp->mnt_transinfo;
746
747 /*
748 * On non-recursed run check if other threads
749 * want to change the list.
750 */
751 if (fli->fli_cow_cnt == 0) {
752 s = pserialize_read_enter();
753 if (__predict_false(fmi->fmi_cow_change)) {
754 pserialize_read_exit(s);
755 mutex_enter(&fstrans_lock);
756 while (fmi->fmi_cow_change)
757 cv_wait(&fstrans_state_cv, &fstrans_lock);
758 fli->fli_cow_cnt = 1;
759 mutex_exit(&fstrans_lock);
760 } else {
761 fli->fli_cow_cnt = 1;
762 pserialize_read_exit(s);
763 }
764 } else
765 fli->fli_cow_cnt += 1;
766
767 /*
768 * Run all copy-on-write handlers, stop on error.
769 */
770 error = 0;
771 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
772 if ((error = (*hp->ch_func)(hp->ch_arg, bp, data_valid)) != 0)
773 break;
774 if (error == 0)
775 bp->b_flags |= B_COWDONE;
776
777 /*
778 * Check if other threads want to change the list.
779 */
780 if (fli->fli_cow_cnt > 1) {
781 fli->fli_cow_cnt -= 1;
782 } else {
783 s = pserialize_read_enter();
784 if (__predict_false(fmi->fmi_cow_change)) {
785 pserialize_read_exit(s);
786 mutex_enter(&fstrans_lock);
787 fli->fli_cow_cnt = 0;
788 cv_signal(&fstrans_count_cv);
789 mutex_exit(&fstrans_lock);
790 } else {
791 fli->fli_cow_cnt = 0;
792 pserialize_read_exit(s);
793 }
794 }
795
796 return error;
797 }
798
799 #if defined(DDB)
800 void fstrans_dump(int);
801
802 static void
803 fstrans_print_lwp(struct proc *p, struct lwp *l, int verbose)
804 {
805 char prefix[9];
806 struct fstrans_lwp_info *fli;
807
808 snprintf(prefix, sizeof(prefix), "%d.%d", p->p_pid, l->l_lid);
809 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
810 if (fli->fli_self != l)
811 continue;
812 if (fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
813 if (! verbose)
814 continue;
815 }
816 printf("%-8s", prefix);
817 if (verbose)
818 printf(" @%p", fli);
819 if (fli->fli_mount != NULL)
820 printf(" (%s)", fli->fli_mount->mnt_stat.f_mntonname);
821 else
822 printf(" NULL");
823 if (fli->fli_trans_cnt == 0) {
824 printf(" -");
825 } else {
826 switch (fli->fli_lock_type) {
827 case FSTRANS_SHARED:
828 printf(" shared");
829 break;
830 case FSTRANS_EXCL:
831 printf(" excl");
832 break;
833 default:
834 printf(" %#x", fli->fli_lock_type);
835 break;
836 }
837 }
838 printf(" %d cow %d\n", fli->fli_trans_cnt, fli->fli_cow_cnt);
839 prefix[0] = '\0';
840 }
841 }
842
843 static void
844 fstrans_print_mount(struct mount *mp, int verbose)
845 {
846 struct fstrans_mount_info *fmi;
847
848 fmi = mp->mnt_transinfo;
849 if (!verbose && (fmi == NULL || fmi->fmi_state == FSTRANS_NORMAL))
850 return;
851
852 printf("%-16s ", mp->mnt_stat.f_mntonname);
853 if (fmi == NULL) {
854 printf("(null)\n");
855 return;
856 }
857 switch (fmi->fmi_state) {
858 case FSTRANS_NORMAL:
859 printf("state normal\n");
860 break;
861 case FSTRANS_SUSPENDED:
862 printf("state suspended\n");
863 break;
864 default:
865 printf("state %#x\n", fmi->fmi_state);
866 break;
867 }
868 }
869
870 void
871 fstrans_dump(int full)
872 {
873 const struct proclist_desc *pd;
874 struct proc *p;
875 struct lwp *l;
876 struct mount *mp;
877
878 printf("Fstrans locks by lwp:\n");
879 for (pd = proclists; pd->pd_list != NULL; pd++)
880 PROCLIST_FOREACH(p, pd->pd_list)
881 LIST_FOREACH(l, &p->p_lwps, l_sibling)
882 fstrans_print_lwp(p, l, full == 1);
883
884 printf("Fstrans state by mount:\n");
885 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp))
886 fstrans_print_mount(mp, full == 1);
887 }
888 #endif /* defined(DDB) */
889