vfs_trans.c revision 1.40 1 /* $NetBSD: vfs_trans.c,v 1.40 2017/03/30 09:13:01 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.40 2017/03/30 09:13:01 hannken Exp $");
34
35 /*
36 * File system transaction operations.
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/atomic.h>
46 #include <sys/buf.h>
47 #include <sys/kmem.h>
48 #include <sys/mount.h>
49 #include <sys/pserialize.h>
50 #include <sys/vnode.h>
51 #define _FSTRANS_API_PRIVATE
52 #include <sys/fstrans.h>
53 #include <sys/proc.h>
54
55 #include <miscfs/specfs/specdev.h>
56
57 struct fscow_handler {
58 LIST_ENTRY(fscow_handler) ch_list;
59 int (*ch_func)(void *, struct buf *, bool);
60 void *ch_arg;
61 };
62 struct fstrans_lwp_info {
63 struct fstrans_lwp_info *fli_succ;
64 struct lwp *fli_self;
65 struct mount *fli_mount;
66 int fli_trans_cnt;
67 int fli_cow_cnt;
68 enum fstrans_lock_type fli_lock_type;
69 LIST_ENTRY(fstrans_lwp_info) fli_list;
70 };
71 struct fstrans_mount_info {
72 enum fstrans_state fmi_state;
73 unsigned int fmi_ref_cnt;
74 bool fmi_cow_change;
75 LIST_HEAD(, fscow_handler) fmi_cow_handler;
76 };
77
78 static specificdata_key_t lwp_data_key; /* Our specific data key. */
79 static kmutex_t vfs_suspend_lock; /* Serialize suspensions. */
80 static kmutex_t fstrans_lock; /* Fstrans big lock. */
81 static kmutex_t fstrans_mount_lock; /* Fstrans mount big lock. */
82 static kcondvar_t fstrans_state_cv; /* Fstrans or cow state changed. */
83 static kcondvar_t fstrans_count_cv; /* Fstrans or cow count changed. */
84 static pserialize_t fstrans_psz; /* Pserialize state. */
85 static LIST_HEAD(fstrans_lwp_head, fstrans_lwp_info) fstrans_fli_head;
86 /* List of all fstrans_lwp_info. */
87
88 static inline struct mount *fstrans_normalize_mount(struct mount *);
89 static void fstrans_lwp_dtor(void *);
90 static void fstrans_mount_dtor(struct mount *);
91 static struct fstrans_lwp_info *fstrans_get_lwp_info(struct mount *, bool);
92 static bool grant_lock(const enum fstrans_state, const enum fstrans_lock_type);
93 static bool state_change_done(const struct mount *);
94 static bool cow_state_change_done(const struct mount *);
95 static void cow_change_enter(const struct mount *);
96 static void cow_change_done(const struct mount *);
97
98 /*
99 * Initialize.
100 */
101 void
102 fstrans_init(void)
103 {
104 int error __diagused;
105
106 error = lwp_specific_key_create(&lwp_data_key, fstrans_lwp_dtor);
107 KASSERT(error == 0);
108
109 mutex_init(&vfs_suspend_lock, MUTEX_DEFAULT, IPL_NONE);
110 mutex_init(&fstrans_lock, MUTEX_DEFAULT, IPL_NONE);
111 mutex_init(&fstrans_mount_lock, MUTEX_DEFAULT, IPL_NONE);
112 cv_init(&fstrans_state_cv, "fstchg");
113 cv_init(&fstrans_count_cv, "fstcnt");
114 fstrans_psz = pserialize_create();
115 LIST_INIT(&fstrans_fli_head);
116 }
117
118 /*
119 * Normalize mount.
120 * Return mount if file system supports fstrans, NULL otherwise.
121 */
122 static inline struct mount *
123 fstrans_normalize_mount(struct mount *mp)
124 {
125
126 while (mp && mp->mnt_lower)
127 mp = mp->mnt_lower;
128 if (mp == NULL)
129 return NULL;
130 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
131 return NULL;
132 return mp;
133 }
134
135 /*
136 * Deallocate lwp state.
137 */
138 static void
139 fstrans_lwp_dtor(void *arg)
140 {
141 struct fstrans_lwp_info *fli, *fli_next;
142
143 for (fli = arg; fli; fli = fli_next) {
144 KASSERT(fli->fli_trans_cnt == 0);
145 KASSERT(fli->fli_cow_cnt == 0);
146 if (fli->fli_mount != NULL)
147 fstrans_mount_dtor(fli->fli_mount);
148 fli_next = fli->fli_succ;
149 fli->fli_mount = NULL;
150 membar_sync();
151 fli->fli_self = NULL;
152 }
153 }
154
155 /*
156 * Dereference mount state.
157 */
158 static void
159 fstrans_mount_dtor(struct mount *mp)
160 {
161 struct fstrans_mount_info *fmi;
162
163 mutex_enter(&fstrans_mount_lock);
164
165 fmi = mp->mnt_transinfo;
166 KASSERT(fmi != NULL);
167 fmi->fmi_ref_cnt -= 1;
168 if (fmi->fmi_ref_cnt > 0) {
169 mutex_exit(&fstrans_mount_lock);
170 return;
171 }
172
173 KASSERT(fmi->fmi_state == FSTRANS_NORMAL);
174 KASSERT(LIST_FIRST(&fmi->fmi_cow_handler) == NULL);
175
176 mp->mnt_iflag &= ~IMNT_HAS_TRANS;
177 mp->mnt_transinfo = NULL;
178
179 mutex_exit(&fstrans_mount_lock);
180
181 kmem_free(fmi, sizeof(*fmi));
182 vfs_destroy(mp);
183 }
184
185 /*
186 * Allocate mount state.
187 */
188 int
189 fstrans_mount(struct mount *mp)
190 {
191 int error;
192 struct fstrans_mount_info *newfmi;
193
194 error = vfs_busy(mp, NULL);
195 if (error)
196 return error;
197 newfmi = kmem_alloc(sizeof(*newfmi), KM_SLEEP);
198 newfmi->fmi_state = FSTRANS_NORMAL;
199 newfmi->fmi_ref_cnt = 1;
200 LIST_INIT(&newfmi->fmi_cow_handler);
201 newfmi->fmi_cow_change = false;
202
203 mutex_enter(&fstrans_mount_lock);
204 mp->mnt_transinfo = newfmi;
205 mp->mnt_iflag |= IMNT_HAS_TRANS;
206 mutex_exit(&fstrans_mount_lock);
207
208 vfs_unbusy(mp, true, NULL);
209
210 return 0;
211 }
212
213 /*
214 * Deallocate mount state.
215 */
216 void
217 fstrans_unmount(struct mount *mp)
218 {
219
220 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
221 return;
222
223 KASSERT(mp->mnt_transinfo != NULL);
224
225 fstrans_mount_dtor(mp);
226 }
227
228 /*
229 * Retrieve the per lwp info for this mount allocating if necessary.
230 */
231 static struct fstrans_lwp_info *
232 fstrans_get_lwp_info(struct mount *mp, bool do_alloc)
233 {
234 struct fstrans_lwp_info *fli, *res;
235 struct fstrans_mount_info *fmi;
236
237 /*
238 * Scan our list for a match clearing entries whose mount is gone.
239 */
240 res = NULL;
241 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
242 if (fli->fli_mount == mp) {
243 KASSERT(res == NULL);
244 res = fli;
245 } else if (fli->fli_mount != NULL &&
246 (fli->fli_mount->mnt_iflag & IMNT_GONE) != 0 &&
247 fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
248 fstrans_mount_dtor(fli->fli_mount);
249 fli->fli_mount = NULL;
250 }
251 }
252 if (__predict_true(res != NULL))
253 return res;
254
255 if (! do_alloc)
256 return NULL;
257
258 /*
259 * Try to reuse a cleared entry or allocate a new one.
260 */
261 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
262 if (fli->fli_mount == NULL) {
263 KASSERT(fli->fli_trans_cnt == 0);
264 KASSERT(fli->fli_cow_cnt == 0);
265 break;
266 }
267 }
268 if (fli == NULL) {
269 mutex_enter(&fstrans_lock);
270 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
271 if (fli->fli_self == NULL) {
272 KASSERT(fli->fli_mount == NULL);
273 KASSERT(fli->fli_trans_cnt == 0);
274 KASSERT(fli->fli_cow_cnt == 0);
275 fli->fli_self = curlwp;
276 fli->fli_succ = lwp_getspecific(lwp_data_key);
277 lwp_setspecific(lwp_data_key, fli);
278 break;
279 }
280 }
281 mutex_exit(&fstrans_lock);
282 }
283 if (fli == NULL) {
284 fli = kmem_alloc(sizeof(*fli), KM_SLEEP);
285 mutex_enter(&fstrans_lock);
286 memset(fli, 0, sizeof(*fli));
287 fli->fli_self = curlwp;
288 LIST_INSERT_HEAD(&fstrans_fli_head, fli, fli_list);
289 mutex_exit(&fstrans_lock);
290 fli->fli_succ = lwp_getspecific(lwp_data_key);
291 lwp_setspecific(lwp_data_key, fli);
292 }
293
294 /*
295 * Attach the entry to the mount.
296 */
297 mutex_enter(&fstrans_mount_lock);
298 fmi = mp->mnt_transinfo;
299 KASSERT(fmi != NULL);
300 fli->fli_mount = mp;
301 fmi->fmi_ref_cnt += 1;
302 mutex_exit(&fstrans_mount_lock);
303
304 return fli;
305 }
306
307 /*
308 * Check if this lock type is granted at this state.
309 */
310 static bool
311 grant_lock(const enum fstrans_state state, const enum fstrans_lock_type type)
312 {
313
314 if (__predict_true(state == FSTRANS_NORMAL))
315 return true;
316 if (type == FSTRANS_EXCL)
317 return true;
318 if (state == FSTRANS_SUSPENDING && type == FSTRANS_LAZY)
319 return true;
320
321 return false;
322 }
323
324 /*
325 * Start a transaction. If this thread already has a transaction on this
326 * file system increment the reference counter.
327 */
328 int
329 _fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait)
330 {
331 int s;
332 struct mount *lmp;
333 struct fstrans_lwp_info *fli;
334 struct fstrans_mount_info *fmi;
335
336 if ((lmp = fstrans_normalize_mount(mp)) == NULL)
337 return 0;
338
339 ASSERT_SLEEPABLE();
340
341 /*
342 * Allocate per lwp info for layered file systems to
343 * get a reference to the mount. No need to increment
344 * the reference counter here.
345 */
346 for (lmp = mp; lmp->mnt_lower; lmp = lmp->mnt_lower) {
347 fli = fstrans_get_lwp_info(lmp, true);
348 KASSERT(fli != NULL);
349 }
350
351 if ((fli = fstrans_get_lwp_info(lmp, true)) == NULL)
352 return 0;
353
354 if (fli->fli_trans_cnt > 0) {
355 KASSERT(lock_type != FSTRANS_EXCL);
356 fli->fli_trans_cnt += 1;
357
358 return 0;
359 }
360
361 s = pserialize_read_enter();
362 fmi = lmp->mnt_transinfo;
363 if (__predict_true(grant_lock(fmi->fmi_state, lock_type))) {
364 fli->fli_trans_cnt = 1;
365 fli->fli_lock_type = lock_type;
366 pserialize_read_exit(s);
367
368 return 0;
369 }
370 pserialize_read_exit(s);
371
372 if (! wait)
373 return EBUSY;
374
375 mutex_enter(&fstrans_lock);
376 while (! grant_lock(fmi->fmi_state, lock_type))
377 cv_wait(&fstrans_state_cv, &fstrans_lock);
378 fli->fli_trans_cnt = 1;
379 fli->fli_lock_type = lock_type;
380 mutex_exit(&fstrans_lock);
381
382 return 0;
383 }
384
385 /*
386 * Finish a transaction.
387 */
388 void
389 fstrans_done(struct mount *mp)
390 {
391 int s;
392 struct fstrans_lwp_info *fli;
393 struct fstrans_mount_info *fmi;
394
395 if ((mp = fstrans_normalize_mount(mp)) == NULL)
396 return;
397 fli = fstrans_get_lwp_info(mp, false);
398 KASSERT(fli != NULL);
399 KASSERT(fli->fli_trans_cnt > 0);
400
401 if (fli->fli_trans_cnt > 1) {
402 fli->fli_trans_cnt -= 1;
403
404 return;
405 }
406
407 s = pserialize_read_enter();
408 fmi = mp->mnt_transinfo;
409 if (__predict_true(fmi->fmi_state == FSTRANS_NORMAL)) {
410 fli->fli_trans_cnt = 0;
411 pserialize_read_exit(s);
412
413 return;
414 }
415 pserialize_read_exit(s);
416
417 mutex_enter(&fstrans_lock);
418 fli->fli_trans_cnt = 0;
419 cv_signal(&fstrans_count_cv);
420 mutex_exit(&fstrans_lock);
421 }
422
423 /*
424 * Check if this thread has an exclusive lock.
425 */
426 int
427 fstrans_is_owner(struct mount *mp)
428 {
429 struct fstrans_lwp_info *fli;
430
431 if ((mp = fstrans_normalize_mount(mp)) == NULL)
432 return 0;
433 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
434 return 0;
435
436 if (fli->fli_trans_cnt == 0)
437 return 0;
438
439 KASSERT(fli->fli_mount == mp);
440 KASSERT(fli->fli_trans_cnt > 0);
441
442 return (fli->fli_lock_type == FSTRANS_EXCL);
443 }
444
445 /*
446 * True, if no thread is in a transaction not granted at the current state.
447 */
448 static bool
449 state_change_done(const struct mount *mp)
450 {
451 struct fstrans_lwp_info *fli;
452 struct fstrans_mount_info *fmi;
453
454 KASSERT(mutex_owned(&fstrans_lock));
455
456 fmi = mp->mnt_transinfo;
457 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
458 if (fli->fli_mount != mp)
459 continue;
460 if (fli->fli_trans_cnt == 0)
461 continue;
462 if (grant_lock(fmi->fmi_state, fli->fli_lock_type))
463 continue;
464
465 return false;
466 }
467
468 return true;
469 }
470
471 /*
472 * Set new file system state.
473 */
474 int
475 fstrans_setstate(struct mount *mp, enum fstrans_state new_state)
476 {
477 int error;
478 enum fstrans_state old_state;
479 struct fstrans_mount_info *fmi;
480
481 fmi = mp->mnt_transinfo;
482 old_state = fmi->fmi_state;
483 if (old_state == new_state)
484 return 0;
485
486 mutex_enter(&fstrans_lock);
487 fmi->fmi_state = new_state;
488 pserialize_perform(fstrans_psz);
489
490 /*
491 * All threads see the new state now.
492 * Wait for transactions invalid at this state to leave.
493 */
494 error = 0;
495 while (! state_change_done(mp)) {
496 error = cv_wait_sig(&fstrans_count_cv, &fstrans_lock);
497 if (error) {
498 new_state = fmi->fmi_state = FSTRANS_NORMAL;
499 break;
500 }
501 }
502 cv_broadcast(&fstrans_state_cv);
503 mutex_exit(&fstrans_lock);
504
505 if (old_state != new_state) {
506 if (old_state == FSTRANS_NORMAL)
507 fstrans_start(mp, FSTRANS_EXCL);
508 if (new_state == FSTRANS_NORMAL)
509 fstrans_done(mp);
510 }
511
512 return error;
513 }
514
515 /*
516 * Get current file system state.
517 */
518 enum fstrans_state
519 fstrans_getstate(struct mount *mp)
520 {
521 struct fstrans_mount_info *fmi;
522
523 fmi = mp->mnt_transinfo;
524 KASSERT(fmi != NULL);
525
526 return fmi->fmi_state;
527 }
528
529 /*
530 * Request a filesystem to suspend all operations.
531 */
532 int
533 vfs_suspend(struct mount *mp, int nowait)
534 {
535 int error;
536
537 if ((mp = fstrans_normalize_mount(mp)) == NULL)
538 return EOPNOTSUPP;
539 if (nowait) {
540 if (!mutex_tryenter(&vfs_suspend_lock))
541 return EWOULDBLOCK;
542 } else
543 mutex_enter(&vfs_suspend_lock);
544
545 if ((error = VFS_SUSPENDCTL(mp, SUSPEND_SUSPEND)) != 0)
546 mutex_exit(&vfs_suspend_lock);
547
548 return error;
549 }
550
551 /*
552 * Request a filesystem to resume all operations.
553 */
554 void
555 vfs_resume(struct mount *mp)
556 {
557
558 mp = fstrans_normalize_mount(mp);
559 KASSERT(mp != NULL);
560
561 VFS_SUSPENDCTL(mp, SUSPEND_RESUME);
562 mutex_exit(&vfs_suspend_lock);
563 }
564
565
566 /*
567 * True, if no thread is running a cow handler.
568 */
569 static bool
570 cow_state_change_done(const struct mount *mp)
571 {
572 struct fstrans_lwp_info *fli;
573 struct fstrans_mount_info *fmi __diagused;
574
575 fmi = mp->mnt_transinfo;
576
577 KASSERT(mutex_owned(&fstrans_lock));
578 KASSERT(fmi->fmi_cow_change);
579
580 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
581 if (fli->fli_mount != mp)
582 continue;
583 if (fli->fli_cow_cnt == 0)
584 continue;
585
586 return false;
587 }
588
589 return true;
590 }
591
592 /*
593 * Prepare for changing this mounts cow list.
594 * Returns with fstrans_lock locked.
595 */
596 static void
597 cow_change_enter(const struct mount *mp)
598 {
599 struct fstrans_mount_info *fmi;
600
601 fmi = mp->mnt_transinfo;
602
603 mutex_enter(&fstrans_lock);
604
605 /*
606 * Wait for other threads changing the list.
607 */
608 while (fmi->fmi_cow_change)
609 cv_wait(&fstrans_state_cv, &fstrans_lock);
610
611 /*
612 * Wait until all threads are aware of a state change.
613 */
614 fmi->fmi_cow_change = true;
615 pserialize_perform(fstrans_psz);
616
617 while (! cow_state_change_done(mp))
618 cv_wait(&fstrans_count_cv, &fstrans_lock);
619 }
620
621 /*
622 * Done changing this mounts cow list.
623 */
624 static void
625 cow_change_done(const struct mount *mp)
626 {
627 struct fstrans_mount_info *fmi;
628
629 KASSERT(mutex_owned(&fstrans_lock));
630
631 fmi = mp->mnt_transinfo;
632
633 fmi->fmi_cow_change = false;
634 pserialize_perform(fstrans_psz);
635
636 cv_broadcast(&fstrans_state_cv);
637
638 mutex_exit(&fstrans_lock);
639 }
640
641 /*
642 * Add a handler to this mount.
643 */
644 int
645 fscow_establish(struct mount *mp, int (*func)(void *, struct buf *, bool),
646 void *arg)
647 {
648 struct fstrans_mount_info *fmi;
649 struct fscow_handler *newch;
650
651 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
652 return EINVAL;
653
654 fmi = mp->mnt_transinfo;
655 KASSERT(fmi != NULL);
656
657 newch = kmem_alloc(sizeof(*newch), KM_SLEEP);
658 newch->ch_func = func;
659 newch->ch_arg = arg;
660
661 cow_change_enter(mp);
662 LIST_INSERT_HEAD(&fmi->fmi_cow_handler, newch, ch_list);
663 cow_change_done(mp);
664
665 return 0;
666 }
667
668 /*
669 * Remove a handler from this mount.
670 */
671 int
672 fscow_disestablish(struct mount *mp, int (*func)(void *, struct buf *, bool),
673 void *arg)
674 {
675 struct fstrans_mount_info *fmi;
676 struct fscow_handler *hp = NULL;
677
678 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
679 return EINVAL;
680
681 fmi = mp->mnt_transinfo;
682 KASSERT(fmi != NULL);
683
684 cow_change_enter(mp);
685 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
686 if (hp->ch_func == func && hp->ch_arg == arg)
687 break;
688 if (hp != NULL) {
689 LIST_REMOVE(hp, ch_list);
690 kmem_free(hp, sizeof(*hp));
691 }
692 cow_change_done(mp);
693
694 return hp ? 0 : EINVAL;
695 }
696
697 /*
698 * Check for need to copy block that is about to be written.
699 */
700 int
701 fscow_run(struct buf *bp, bool data_valid)
702 {
703 int error, s;
704 struct mount *mp;
705 struct fstrans_lwp_info *fli;
706 struct fstrans_mount_info *fmi;
707 struct fscow_handler *hp;
708
709 /*
710 * First check if we need run the copy-on-write handler.
711 */
712 if ((bp->b_flags & B_COWDONE))
713 return 0;
714 if (bp->b_vp == NULL) {
715 bp->b_flags |= B_COWDONE;
716 return 0;
717 }
718 if (bp->b_vp->v_type == VBLK)
719 mp = spec_node_getmountedfs(bp->b_vp);
720 else
721 mp = bp->b_vp->v_mount;
722 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0) {
723 bp->b_flags |= B_COWDONE;
724 return 0;
725 }
726
727 fli = fstrans_get_lwp_info(mp, true);
728 fmi = mp->mnt_transinfo;
729
730 /*
731 * On non-recursed run check if other threads
732 * want to change the list.
733 */
734 if (fli->fli_cow_cnt == 0) {
735 s = pserialize_read_enter();
736 if (__predict_false(fmi->fmi_cow_change)) {
737 pserialize_read_exit(s);
738 mutex_enter(&fstrans_lock);
739 while (fmi->fmi_cow_change)
740 cv_wait(&fstrans_state_cv, &fstrans_lock);
741 fli->fli_cow_cnt = 1;
742 mutex_exit(&fstrans_lock);
743 } else {
744 fli->fli_cow_cnt = 1;
745 pserialize_read_exit(s);
746 }
747 } else
748 fli->fli_cow_cnt += 1;
749
750 /*
751 * Run all copy-on-write handlers, stop on error.
752 */
753 error = 0;
754 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
755 if ((error = (*hp->ch_func)(hp->ch_arg, bp, data_valid)) != 0)
756 break;
757 if (error == 0)
758 bp->b_flags |= B_COWDONE;
759
760 /*
761 * Check if other threads want to change the list.
762 */
763 if (fli->fli_cow_cnt > 1) {
764 fli->fli_cow_cnt -= 1;
765 } else {
766 s = pserialize_read_enter();
767 if (__predict_false(fmi->fmi_cow_change)) {
768 pserialize_read_exit(s);
769 mutex_enter(&fstrans_lock);
770 fli->fli_cow_cnt = 0;
771 cv_signal(&fstrans_count_cv);
772 mutex_exit(&fstrans_lock);
773 } else {
774 fli->fli_cow_cnt = 0;
775 pserialize_read_exit(s);
776 }
777 }
778
779 return error;
780 }
781
782 #if defined(DDB)
783 void fstrans_dump(int);
784
785 static void
786 fstrans_print_lwp(struct proc *p, struct lwp *l, int verbose)
787 {
788 char prefix[9];
789 struct fstrans_lwp_info *fli;
790
791 snprintf(prefix, sizeof(prefix), "%d.%d", p->p_pid, l->l_lid);
792 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
793 if (fli->fli_self != l)
794 continue;
795 if (fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
796 if (! verbose)
797 continue;
798 }
799 printf("%-8s", prefix);
800 if (verbose)
801 printf(" @%p", fli);
802 if (fli->fli_mount != NULL)
803 printf(" (%s)", fli->fli_mount->mnt_stat.f_mntonname);
804 else
805 printf(" NULL");
806 if (fli->fli_trans_cnt == 0) {
807 printf(" -");
808 } else {
809 switch (fli->fli_lock_type) {
810 case FSTRANS_LAZY:
811 printf(" lazy");
812 break;
813 case FSTRANS_SHARED:
814 printf(" shared");
815 break;
816 case FSTRANS_EXCL:
817 printf(" excl");
818 break;
819 default:
820 printf(" %#x", fli->fli_lock_type);
821 break;
822 }
823 }
824 printf(" %d cow %d\n", fli->fli_trans_cnt, fli->fli_cow_cnt);
825 prefix[0] = '\0';
826 }
827 }
828
829 static void
830 fstrans_print_mount(struct mount *mp, int verbose)
831 {
832 struct fstrans_mount_info *fmi;
833
834 fmi = mp->mnt_transinfo;
835 if (!verbose && (fmi == NULL || fmi->fmi_state == FSTRANS_NORMAL))
836 return;
837
838 printf("%-16s ", mp->mnt_stat.f_mntonname);
839 if (fmi == NULL) {
840 printf("(null)\n");
841 return;
842 }
843 switch (fmi->fmi_state) {
844 case FSTRANS_NORMAL:
845 printf("state normal\n");
846 break;
847 case FSTRANS_SUSPENDING:
848 printf("state suspending\n");
849 break;
850 case FSTRANS_SUSPENDED:
851 printf("state suspended\n");
852 break;
853 default:
854 printf("state %#x\n", fmi->fmi_state);
855 break;
856 }
857 }
858
859 void
860 fstrans_dump(int full)
861 {
862 const struct proclist_desc *pd;
863 struct proc *p;
864 struct lwp *l;
865 struct mount *mp;
866
867 printf("Fstrans locks by lwp:\n");
868 for (pd = proclists; pd->pd_list != NULL; pd++)
869 PROCLIST_FOREACH(p, pd->pd_list)
870 LIST_FOREACH(l, &p->p_lwps, l_sibling)
871 fstrans_print_lwp(p, l, full == 1);
872
873 printf("Fstrans state by mount:\n");
874 TAILQ_FOREACH(mp, &mountlist, mnt_list)
875 fstrans_print_mount(mp, full == 1);
876 }
877 #endif /* defined(DDB) */
878