vfs_trans.c revision 1.34.2.2 1 /* $NetBSD: vfs_trans.c,v 1.34.2.2 2017/04/26 02:53:27 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.34.2.2 2017/04/26 02:53:27 pgoyette Exp $");
34
35 /*
36 * File system transaction operations.
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/atomic.h>
46 #include <sys/buf.h>
47 #include <sys/kmem.h>
48 #include <sys/mount.h>
49 #include <sys/pserialize.h>
50 #include <sys/vnode.h>
51 #define _FSTRANS_API_PRIVATE
52 #include <sys/fstrans.h>
53 #include <sys/proc.h>
54
55 #include <miscfs/specfs/specdev.h>
56
57 struct fscow_handler {
58 LIST_ENTRY(fscow_handler) ch_list;
59 int (*ch_func)(void *, struct buf *, bool);
60 void *ch_arg;
61 };
62 struct fstrans_lwp_info {
63 struct fstrans_lwp_info *fli_succ;
64 struct lwp *fli_self;
65 struct mount *fli_mount;
66 int fli_trans_cnt;
67 int fli_cow_cnt;
68 enum fstrans_lock_type fli_lock_type;
69 LIST_ENTRY(fstrans_lwp_info) fli_list;
70 };
71 struct fstrans_mount_info {
72 enum fstrans_state fmi_state;
73 unsigned int fmi_ref_cnt;
74 bool fmi_cow_change;
75 LIST_HEAD(, fscow_handler) fmi_cow_handler;
76 };
77
78 static specificdata_key_t lwp_data_key; /* Our specific data key. */
79 static kmutex_t vfs_suspend_lock; /* Serialize suspensions. */
80 static kmutex_t fstrans_lock; /* Fstrans big lock. */
81 static kmutex_t fstrans_mount_lock; /* Fstrans mount big lock. */
82 static kcondvar_t fstrans_state_cv; /* Fstrans or cow state changed. */
83 static kcondvar_t fstrans_count_cv; /* Fstrans or cow count changed. */
84 static pserialize_t fstrans_psz; /* Pserialize state. */
85 static LIST_HEAD(fstrans_lwp_head, fstrans_lwp_info) fstrans_fli_head;
86 /* List of all fstrans_lwp_info. */
87
88 static inline struct mount *fstrans_normalize_mount(struct mount *);
89 static void fstrans_lwp_dtor(void *);
90 static void fstrans_mount_dtor(struct mount *);
91 static struct fstrans_lwp_info *fstrans_get_lwp_info(struct mount *, bool);
92 static bool grant_lock(const enum fstrans_state, const enum fstrans_lock_type);
93 static bool state_change_done(const struct mount *);
94 static bool cow_state_change_done(const struct mount *);
95 static void cow_change_enter(const struct mount *);
96 static void cow_change_done(const struct mount *);
97
98 /*
99 * Initialize.
100 */
101 void
102 fstrans_init(void)
103 {
104 int error __diagused;
105
106 error = lwp_specific_key_create(&lwp_data_key, fstrans_lwp_dtor);
107 KASSERT(error == 0);
108
109 mutex_init(&vfs_suspend_lock, MUTEX_DEFAULT, IPL_NONE);
110 mutex_init(&fstrans_lock, MUTEX_DEFAULT, IPL_NONE);
111 mutex_init(&fstrans_mount_lock, MUTEX_DEFAULT, IPL_NONE);
112 cv_init(&fstrans_state_cv, "fstchg");
113 cv_init(&fstrans_count_cv, "fstcnt");
114 fstrans_psz = pserialize_create();
115 LIST_INIT(&fstrans_fli_head);
116 }
117
118 /*
119 * Normalize mount.
120 * Return mount if file system supports fstrans, NULL otherwise.
121 */
122 static inline struct mount *
123 fstrans_normalize_mount(struct mount *mp)
124 {
125
126 while (mp && mp->mnt_lower)
127 mp = mp->mnt_lower;
128 if (mp == NULL)
129 return NULL;
130 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
131 return NULL;
132 return mp;
133 }
134
135 /*
136 * Deallocate lwp state.
137 */
138 static void
139 fstrans_lwp_dtor(void *arg)
140 {
141 struct fstrans_lwp_info *fli, *fli_next;
142
143 for (fli = arg; fli; fli = fli_next) {
144 KASSERT(fli->fli_trans_cnt == 0);
145 KASSERT(fli->fli_cow_cnt == 0);
146 if (fli->fli_mount != NULL)
147 fstrans_mount_dtor(fli->fli_mount);
148 fli_next = fli->fli_succ;
149 fli->fli_mount = NULL;
150 membar_sync();
151 fli->fli_self = NULL;
152 }
153 }
154
155 /*
156 * Dereference mount state.
157 */
158 static void
159 fstrans_mount_dtor(struct mount *mp)
160 {
161 struct fstrans_mount_info *fmi;
162
163 mutex_enter(&fstrans_mount_lock);
164
165 fmi = mp->mnt_transinfo;
166 KASSERT(fmi != NULL);
167 fmi->fmi_ref_cnt -= 1;
168 if (fmi->fmi_ref_cnt > 0) {
169 mutex_exit(&fstrans_mount_lock);
170 return;
171 }
172
173 KASSERT(fmi->fmi_state == FSTRANS_NORMAL);
174 KASSERT(LIST_FIRST(&fmi->fmi_cow_handler) == NULL);
175
176 mp->mnt_iflag &= ~IMNT_HAS_TRANS;
177 mp->mnt_transinfo = NULL;
178
179 mutex_exit(&fstrans_mount_lock);
180
181 kmem_free(fmi, sizeof(*fmi));
182 vfs_rele(mp);
183 }
184
185 /*
186 * Allocate mount state.
187 */
188 int
189 fstrans_mount(struct mount *mp)
190 {
191 int error;
192 struct fstrans_mount_info *newfmi;
193
194 error = vfs_busy(mp);
195 if (error)
196 return error;
197 newfmi = kmem_alloc(sizeof(*newfmi), KM_SLEEP);
198 newfmi->fmi_state = FSTRANS_NORMAL;
199 newfmi->fmi_ref_cnt = 1;
200 LIST_INIT(&newfmi->fmi_cow_handler);
201 newfmi->fmi_cow_change = false;
202
203 mutex_enter(&fstrans_mount_lock);
204 mp->mnt_transinfo = newfmi;
205 mp->mnt_iflag |= IMNT_HAS_TRANS;
206 mutex_exit(&fstrans_mount_lock);
207
208 vfs_ref(mp);
209 vfs_unbusy(mp);
210
211 return 0;
212 }
213
214 /*
215 * Deallocate mount state.
216 */
217 void
218 fstrans_unmount(struct mount *mp)
219 {
220
221 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
222 return;
223
224 KASSERT(mp->mnt_transinfo != NULL);
225
226 fstrans_mount_dtor(mp);
227 }
228
229 /*
230 * Retrieve the per lwp info for this mount allocating if necessary.
231 */
232 static struct fstrans_lwp_info *
233 fstrans_get_lwp_info(struct mount *mp, bool do_alloc)
234 {
235 struct fstrans_lwp_info *fli, *res;
236 struct fstrans_mount_info *fmi;
237
238 /*
239 * Scan our list for a match clearing entries whose mount is gone.
240 */
241 res = NULL;
242 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
243 if (fli->fli_mount == mp) {
244 KASSERT(res == NULL);
245 res = fli;
246 } else if (fli->fli_mount != NULL &&
247 (fli->fli_mount->mnt_iflag & IMNT_GONE) != 0 &&
248 fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
249 fstrans_mount_dtor(fli->fli_mount);
250 fli->fli_mount = NULL;
251 }
252 }
253 if (__predict_true(res != NULL))
254 return res;
255
256 if (! do_alloc)
257 return NULL;
258
259 /*
260 * Try to reuse a cleared entry or allocate a new one.
261 */
262 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
263 if (fli->fli_mount == NULL) {
264 KASSERT(fli->fli_trans_cnt == 0);
265 KASSERT(fli->fli_cow_cnt == 0);
266 break;
267 }
268 }
269 if (fli == NULL) {
270 mutex_enter(&fstrans_lock);
271 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
272 if (fli->fli_self == NULL) {
273 KASSERT(fli->fli_mount == NULL);
274 KASSERT(fli->fli_trans_cnt == 0);
275 KASSERT(fli->fli_cow_cnt == 0);
276 fli->fli_self = curlwp;
277 fli->fli_succ = lwp_getspecific(lwp_data_key);
278 lwp_setspecific(lwp_data_key, fli);
279 break;
280 }
281 }
282 mutex_exit(&fstrans_lock);
283 }
284 if (fli == NULL) {
285 fli = kmem_alloc(sizeof(*fli), KM_SLEEP);
286 mutex_enter(&fstrans_lock);
287 memset(fli, 0, sizeof(*fli));
288 fli->fli_self = curlwp;
289 LIST_INSERT_HEAD(&fstrans_fli_head, fli, fli_list);
290 mutex_exit(&fstrans_lock);
291 fli->fli_succ = lwp_getspecific(lwp_data_key);
292 lwp_setspecific(lwp_data_key, fli);
293 }
294
295 /*
296 * Attach the entry to the mount.
297 */
298 mutex_enter(&fstrans_mount_lock);
299 fmi = mp->mnt_transinfo;
300 KASSERT(fmi != NULL);
301 fli->fli_mount = mp;
302 fmi->fmi_ref_cnt += 1;
303 mutex_exit(&fstrans_mount_lock);
304
305 return fli;
306 }
307
308 /*
309 * Check if this lock type is granted at this state.
310 */
311 static bool
312 grant_lock(const enum fstrans_state state, const enum fstrans_lock_type type)
313 {
314
315 if (__predict_true(state == FSTRANS_NORMAL))
316 return true;
317 if (type == FSTRANS_EXCL)
318 return true;
319 if (state == FSTRANS_SUSPENDING && type == FSTRANS_LAZY)
320 return true;
321
322 return false;
323 }
324
325 /*
326 * Start a transaction. If this thread already has a transaction on this
327 * file system increment the reference counter.
328 */
329 int
330 _fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait)
331 {
332 int s;
333 struct mount *lmp;
334 struct fstrans_lwp_info *fli;
335 struct fstrans_mount_info *fmi;
336
337 if ((lmp = fstrans_normalize_mount(mp)) == NULL)
338 return 0;
339
340 ASSERT_SLEEPABLE();
341
342 /*
343 * Allocate per lwp info for layered file systems to
344 * get a reference to the mount. No need to increment
345 * the reference counter here.
346 */
347 for (lmp = mp; lmp->mnt_lower; lmp = lmp->mnt_lower) {
348 fli = fstrans_get_lwp_info(lmp, true);
349 KASSERT(fli != NULL);
350 }
351
352 if ((fli = fstrans_get_lwp_info(lmp, true)) == NULL)
353 return 0;
354
355 if (fli->fli_trans_cnt > 0) {
356 KASSERT(lock_type != FSTRANS_EXCL);
357 fli->fli_trans_cnt += 1;
358
359 return 0;
360 }
361
362 s = pserialize_read_enter();
363 fmi = lmp->mnt_transinfo;
364 if (__predict_true(grant_lock(fmi->fmi_state, lock_type))) {
365 fli->fli_trans_cnt = 1;
366 fli->fli_lock_type = lock_type;
367 pserialize_read_exit(s);
368
369 return 0;
370 }
371 pserialize_read_exit(s);
372
373 if (! wait)
374 return EBUSY;
375
376 mutex_enter(&fstrans_lock);
377 while (! grant_lock(fmi->fmi_state, lock_type))
378 cv_wait(&fstrans_state_cv, &fstrans_lock);
379 fli->fli_trans_cnt = 1;
380 fli->fli_lock_type = lock_type;
381 mutex_exit(&fstrans_lock);
382
383 return 0;
384 }
385
386 /*
387 * Finish a transaction.
388 */
389 void
390 fstrans_done(struct mount *mp)
391 {
392 int s;
393 struct fstrans_lwp_info *fli;
394 struct fstrans_mount_info *fmi;
395
396 if ((mp = fstrans_normalize_mount(mp)) == NULL)
397 return;
398 fli = fstrans_get_lwp_info(mp, false);
399 KASSERT(fli != NULL);
400 KASSERT(fli->fli_trans_cnt > 0);
401
402 if (fli->fli_trans_cnt > 1) {
403 fli->fli_trans_cnt -= 1;
404
405 return;
406 }
407
408 s = pserialize_read_enter();
409 fmi = mp->mnt_transinfo;
410 if (__predict_true(fmi->fmi_state == FSTRANS_NORMAL)) {
411 fli->fli_trans_cnt = 0;
412 pserialize_read_exit(s);
413
414 return;
415 }
416 pserialize_read_exit(s);
417
418 mutex_enter(&fstrans_lock);
419 fli->fli_trans_cnt = 0;
420 cv_signal(&fstrans_count_cv);
421 mutex_exit(&fstrans_lock);
422 }
423
424 /*
425 * Check if this thread has an exclusive lock.
426 */
427 int
428 fstrans_is_owner(struct mount *mp)
429 {
430 struct fstrans_lwp_info *fli;
431
432 if ((mp = fstrans_normalize_mount(mp)) == NULL)
433 return 0;
434 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
435 return 0;
436
437 if (fli->fli_trans_cnt == 0)
438 return 0;
439
440 KASSERT(fli->fli_mount == mp);
441 KASSERT(fli->fli_trans_cnt > 0);
442
443 return (fli->fli_lock_type == FSTRANS_EXCL);
444 }
445
446 /*
447 * True, if no thread is in a transaction not granted at the current state.
448 */
449 static bool
450 state_change_done(const struct mount *mp)
451 {
452 struct fstrans_lwp_info *fli;
453 struct fstrans_mount_info *fmi;
454
455 KASSERT(mutex_owned(&fstrans_lock));
456
457 fmi = mp->mnt_transinfo;
458 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
459 if (fli->fli_mount != mp)
460 continue;
461 if (fli->fli_trans_cnt == 0)
462 continue;
463 if (grant_lock(fmi->fmi_state, fli->fli_lock_type))
464 continue;
465
466 return false;
467 }
468
469 return true;
470 }
471
472 /*
473 * Set new file system state.
474 */
475 int
476 fstrans_setstate(struct mount *mp, enum fstrans_state new_state)
477 {
478 int error;
479 enum fstrans_state old_state;
480 struct fstrans_mount_info *fmi;
481
482 fmi = mp->mnt_transinfo;
483 old_state = fmi->fmi_state;
484 if (old_state == new_state)
485 return 0;
486
487 mutex_enter(&fstrans_lock);
488 fmi->fmi_state = new_state;
489 pserialize_perform(fstrans_psz);
490
491 /*
492 * All threads see the new state now.
493 * Wait for transactions invalid at this state to leave.
494 */
495 error = 0;
496 while (! state_change_done(mp)) {
497 error = cv_wait_sig(&fstrans_count_cv, &fstrans_lock);
498 if (error) {
499 new_state = fmi->fmi_state = FSTRANS_NORMAL;
500 break;
501 }
502 }
503 cv_broadcast(&fstrans_state_cv);
504 mutex_exit(&fstrans_lock);
505
506 if (old_state != new_state) {
507 if (old_state == FSTRANS_NORMAL)
508 fstrans_start(mp, FSTRANS_EXCL);
509 if (new_state == FSTRANS_NORMAL)
510 fstrans_done(mp);
511 }
512
513 return error;
514 }
515
516 /*
517 * Get current file system state.
518 */
519 enum fstrans_state
520 fstrans_getstate(struct mount *mp)
521 {
522 struct fstrans_mount_info *fmi;
523
524 fmi = mp->mnt_transinfo;
525 KASSERT(fmi != NULL);
526
527 return fmi->fmi_state;
528 }
529
530 /*
531 * Request a filesystem to suspend all operations.
532 */
533 int
534 vfs_suspend(struct mount *mp, int nowait)
535 {
536 int error;
537
538 if ((mp = fstrans_normalize_mount(mp)) == NULL)
539 return EOPNOTSUPP;
540 if (nowait) {
541 if (!mutex_tryenter(&vfs_suspend_lock))
542 return EWOULDBLOCK;
543 } else
544 mutex_enter(&vfs_suspend_lock);
545
546 if ((error = VFS_SUSPENDCTL(mp, SUSPEND_SUSPEND)) != 0)
547 mutex_exit(&vfs_suspend_lock);
548
549 return error;
550 }
551
552 /*
553 * Request a filesystem to resume all operations.
554 */
555 void
556 vfs_resume(struct mount *mp)
557 {
558
559 mp = fstrans_normalize_mount(mp);
560 KASSERT(mp != NULL);
561
562 VFS_SUSPENDCTL(mp, SUSPEND_RESUME);
563 mutex_exit(&vfs_suspend_lock);
564 }
565
566
567 /*
568 * True, if no thread is running a cow handler.
569 */
570 static bool
571 cow_state_change_done(const struct mount *mp)
572 {
573 struct fstrans_lwp_info *fli;
574 struct fstrans_mount_info *fmi __diagused;
575
576 fmi = mp->mnt_transinfo;
577
578 KASSERT(mutex_owned(&fstrans_lock));
579 KASSERT(fmi->fmi_cow_change);
580
581 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
582 if (fli->fli_mount != mp)
583 continue;
584 if (fli->fli_cow_cnt == 0)
585 continue;
586
587 return false;
588 }
589
590 return true;
591 }
592
593 /*
594 * Prepare for changing this mounts cow list.
595 * Returns with fstrans_lock locked.
596 */
597 static void
598 cow_change_enter(const struct mount *mp)
599 {
600 struct fstrans_mount_info *fmi;
601
602 fmi = mp->mnt_transinfo;
603
604 mutex_enter(&fstrans_lock);
605
606 /*
607 * Wait for other threads changing the list.
608 */
609 while (fmi->fmi_cow_change)
610 cv_wait(&fstrans_state_cv, &fstrans_lock);
611
612 /*
613 * Wait until all threads are aware of a state change.
614 */
615 fmi->fmi_cow_change = true;
616 pserialize_perform(fstrans_psz);
617
618 while (! cow_state_change_done(mp))
619 cv_wait(&fstrans_count_cv, &fstrans_lock);
620 }
621
622 /*
623 * Done changing this mounts cow list.
624 */
625 static void
626 cow_change_done(const struct mount *mp)
627 {
628 struct fstrans_mount_info *fmi;
629
630 KASSERT(mutex_owned(&fstrans_lock));
631
632 fmi = mp->mnt_transinfo;
633
634 fmi->fmi_cow_change = false;
635 pserialize_perform(fstrans_psz);
636
637 cv_broadcast(&fstrans_state_cv);
638
639 mutex_exit(&fstrans_lock);
640 }
641
642 /*
643 * Add a handler to this mount.
644 */
645 int
646 fscow_establish(struct mount *mp, int (*func)(void *, struct buf *, bool),
647 void *arg)
648 {
649 struct fstrans_mount_info *fmi;
650 struct fscow_handler *newch;
651
652 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
653 return EINVAL;
654
655 fmi = mp->mnt_transinfo;
656 KASSERT(fmi != NULL);
657
658 newch = kmem_alloc(sizeof(*newch), KM_SLEEP);
659 newch->ch_func = func;
660 newch->ch_arg = arg;
661
662 cow_change_enter(mp);
663 LIST_INSERT_HEAD(&fmi->fmi_cow_handler, newch, ch_list);
664 cow_change_done(mp);
665
666 return 0;
667 }
668
669 /*
670 * Remove a handler from this mount.
671 */
672 int
673 fscow_disestablish(struct mount *mp, int (*func)(void *, struct buf *, bool),
674 void *arg)
675 {
676 struct fstrans_mount_info *fmi;
677 struct fscow_handler *hp = NULL;
678
679 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
680 return EINVAL;
681
682 fmi = mp->mnt_transinfo;
683 KASSERT(fmi != NULL);
684
685 cow_change_enter(mp);
686 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
687 if (hp->ch_func == func && hp->ch_arg == arg)
688 break;
689 if (hp != NULL) {
690 LIST_REMOVE(hp, ch_list);
691 kmem_free(hp, sizeof(*hp));
692 }
693 cow_change_done(mp);
694
695 return hp ? 0 : EINVAL;
696 }
697
698 /*
699 * Check for need to copy block that is about to be written.
700 */
701 int
702 fscow_run(struct buf *bp, bool data_valid)
703 {
704 int error, s;
705 struct mount *mp;
706 struct fstrans_lwp_info *fli;
707 struct fstrans_mount_info *fmi;
708 struct fscow_handler *hp;
709
710 /*
711 * First check if we need run the copy-on-write handler.
712 */
713 if ((bp->b_flags & B_COWDONE))
714 return 0;
715 if (bp->b_vp == NULL) {
716 bp->b_flags |= B_COWDONE;
717 return 0;
718 }
719 if (bp->b_vp->v_type == VBLK)
720 mp = spec_node_getmountedfs(bp->b_vp);
721 else
722 mp = bp->b_vp->v_mount;
723 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0) {
724 bp->b_flags |= B_COWDONE;
725 return 0;
726 }
727
728 fli = fstrans_get_lwp_info(mp, true);
729 fmi = mp->mnt_transinfo;
730
731 /*
732 * On non-recursed run check if other threads
733 * want to change the list.
734 */
735 if (fli->fli_cow_cnt == 0) {
736 s = pserialize_read_enter();
737 if (__predict_false(fmi->fmi_cow_change)) {
738 pserialize_read_exit(s);
739 mutex_enter(&fstrans_lock);
740 while (fmi->fmi_cow_change)
741 cv_wait(&fstrans_state_cv, &fstrans_lock);
742 fli->fli_cow_cnt = 1;
743 mutex_exit(&fstrans_lock);
744 } else {
745 fli->fli_cow_cnt = 1;
746 pserialize_read_exit(s);
747 }
748 } else
749 fli->fli_cow_cnt += 1;
750
751 /*
752 * Run all copy-on-write handlers, stop on error.
753 */
754 error = 0;
755 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
756 if ((error = (*hp->ch_func)(hp->ch_arg, bp, data_valid)) != 0)
757 break;
758 if (error == 0)
759 bp->b_flags |= B_COWDONE;
760
761 /*
762 * Check if other threads want to change the list.
763 */
764 if (fli->fli_cow_cnt > 1) {
765 fli->fli_cow_cnt -= 1;
766 } else {
767 s = pserialize_read_enter();
768 if (__predict_false(fmi->fmi_cow_change)) {
769 pserialize_read_exit(s);
770 mutex_enter(&fstrans_lock);
771 fli->fli_cow_cnt = 0;
772 cv_signal(&fstrans_count_cv);
773 mutex_exit(&fstrans_lock);
774 } else {
775 fli->fli_cow_cnt = 0;
776 pserialize_read_exit(s);
777 }
778 }
779
780 return error;
781 }
782
783 #if defined(DDB)
784 void fstrans_dump(int);
785
786 static void
787 fstrans_print_lwp(struct proc *p, struct lwp *l, int verbose)
788 {
789 char prefix[9];
790 struct fstrans_lwp_info *fli;
791
792 snprintf(prefix, sizeof(prefix), "%d.%d", p->p_pid, l->l_lid);
793 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
794 if (fli->fli_self != l)
795 continue;
796 if (fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
797 if (! verbose)
798 continue;
799 }
800 printf("%-8s", prefix);
801 if (verbose)
802 printf(" @%p", fli);
803 if (fli->fli_mount != NULL)
804 printf(" (%s)", fli->fli_mount->mnt_stat.f_mntonname);
805 else
806 printf(" NULL");
807 if (fli->fli_trans_cnt == 0) {
808 printf(" -");
809 } else {
810 switch (fli->fli_lock_type) {
811 case FSTRANS_LAZY:
812 printf(" lazy");
813 break;
814 case FSTRANS_SHARED:
815 printf(" shared");
816 break;
817 case FSTRANS_EXCL:
818 printf(" excl");
819 break;
820 default:
821 printf(" %#x", fli->fli_lock_type);
822 break;
823 }
824 }
825 printf(" %d cow %d\n", fli->fli_trans_cnt, fli->fli_cow_cnt);
826 prefix[0] = '\0';
827 }
828 }
829
830 static void
831 fstrans_print_mount(struct mount *mp, int verbose)
832 {
833 struct fstrans_mount_info *fmi;
834
835 fmi = mp->mnt_transinfo;
836 if (!verbose && (fmi == NULL || fmi->fmi_state == FSTRANS_NORMAL))
837 return;
838
839 printf("%-16s ", mp->mnt_stat.f_mntonname);
840 if (fmi == NULL) {
841 printf("(null)\n");
842 return;
843 }
844 switch (fmi->fmi_state) {
845 case FSTRANS_NORMAL:
846 printf("state normal\n");
847 break;
848 case FSTRANS_SUSPENDING:
849 printf("state suspending\n");
850 break;
851 case FSTRANS_SUSPENDED:
852 printf("state suspended\n");
853 break;
854 default:
855 printf("state %#x\n", fmi->fmi_state);
856 break;
857 }
858 }
859
860 void
861 fstrans_dump(int full)
862 {
863 const struct proclist_desc *pd;
864 struct proc *p;
865 struct lwp *l;
866 struct mount *mp;
867
868 printf("Fstrans locks by lwp:\n");
869 for (pd = proclists; pd->pd_list != NULL; pd++)
870 PROCLIST_FOREACH(p, pd->pd_list)
871 LIST_FOREACH(l, &p->p_lwps, l_sibling)
872 fstrans_print_lwp(p, l, full == 1);
873
874 printf("Fstrans state by mount:\n");
875 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp))
876 fstrans_print_mount(mp, full == 1);
877 }
878 #endif /* defined(DDB) */
879