vfs_trans.c revision 1.43.2.1 1 /* $NetBSD: vfs_trans.c,v 1.43.2.1 2017/05/11 02:58:40 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.43.2.1 2017/05/11 02:58:40 pgoyette Exp $");
34
35 /*
36 * File system transaction operations.
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/atomic.h>
46 #include <sys/buf.h>
47 #include <sys/kmem.h>
48 #include <sys/mount.h>
49 #include <sys/pserialize.h>
50 #include <sys/vnode.h>
51 #define _FSTRANS_API_PRIVATE
52 #include <sys/fstrans.h>
53 #include <sys/proc.h>
54
55 #include <miscfs/specfs/specdev.h>
56
57 struct fscow_handler {
58 LIST_ENTRY(fscow_handler) ch_list;
59 int (*ch_func)(void *, struct buf *, bool);
60 void *ch_arg;
61 };
62 struct fstrans_lwp_info {
63 struct fstrans_lwp_info *fli_succ;
64 struct lwp *fli_self;
65 struct mount *fli_mount;
66 int fli_trans_cnt;
67 int fli_cow_cnt;
68 enum fstrans_lock_type fli_lock_type;
69 LIST_ENTRY(fstrans_lwp_info) fli_list;
70 };
71 struct fstrans_mount_info {
72 enum fstrans_state fmi_state;
73 unsigned int fmi_ref_cnt;
74 bool fmi_cow_change;
75 LIST_HEAD(, fscow_handler) fmi_cow_handler;
76 };
77
78 static specificdata_key_t lwp_data_key; /* Our specific data key. */
79 static kmutex_t vfs_suspend_lock; /* Serialize suspensions. */
80 static kmutex_t fstrans_lock; /* Fstrans big lock. */
81 static kmutex_t fstrans_mount_lock; /* Fstrans mount big lock. */
82 static kcondvar_t fstrans_state_cv; /* Fstrans or cow state changed. */
83 static kcondvar_t fstrans_count_cv; /* Fstrans or cow count changed. */
84 static pserialize_t fstrans_psz; /* Pserialize state. */
85 static LIST_HEAD(fstrans_lwp_head, fstrans_lwp_info) fstrans_fli_head;
86 /* List of all fstrans_lwp_info. */
87
88 static inline struct mount *fstrans_normalize_mount(struct mount *);
89 static void fstrans_lwp_dtor(void *);
90 static void fstrans_mount_dtor(struct mount *);
91 static struct fstrans_lwp_info *fstrans_get_lwp_info(struct mount *, bool);
92 static bool grant_lock(const enum fstrans_state, const enum fstrans_lock_type);
93 static bool state_change_done(const struct mount *);
94 static bool cow_state_change_done(const struct mount *);
95 static void cow_change_enter(const struct mount *);
96 static void cow_change_done(const struct mount *);
97
98 /*
99 * Initialize.
100 */
101 void
102 fstrans_init(void)
103 {
104 int error __diagused;
105
106 error = lwp_specific_key_create(&lwp_data_key, fstrans_lwp_dtor);
107 KASSERT(error == 0);
108
109 mutex_init(&vfs_suspend_lock, MUTEX_DEFAULT, IPL_NONE);
110 mutex_init(&fstrans_lock, MUTEX_DEFAULT, IPL_NONE);
111 mutex_init(&fstrans_mount_lock, MUTEX_DEFAULT, IPL_NONE);
112 cv_init(&fstrans_state_cv, "fstchg");
113 cv_init(&fstrans_count_cv, "fstcnt");
114 fstrans_psz = pserialize_create();
115 LIST_INIT(&fstrans_fli_head);
116 }
117
118 /*
119 * Normalize mount.
120 * Return mount if file system supports fstrans, NULL otherwise.
121 */
122 static inline struct mount *
123 fstrans_normalize_mount(struct mount *mp)
124 {
125
126 while (mp && mp->mnt_lower)
127 mp = mp->mnt_lower;
128 if (mp == NULL)
129 return NULL;
130 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
131 return NULL;
132 return mp;
133 }
134
135 /*
136 * Deallocate lwp state.
137 */
138 static void
139 fstrans_lwp_dtor(void *arg)
140 {
141 struct fstrans_lwp_info *fli, *fli_next;
142
143 for (fli = arg; fli; fli = fli_next) {
144 KASSERT(fli->fli_trans_cnt == 0);
145 KASSERT(fli->fli_cow_cnt == 0);
146 if (fli->fli_mount != NULL)
147 fstrans_mount_dtor(fli->fli_mount);
148 fli_next = fli->fli_succ;
149 fli->fli_mount = NULL;
150 membar_sync();
151 fli->fli_self = NULL;
152 }
153 }
154
155 /*
156 * Dereference mount state.
157 */
158 static void
159 fstrans_mount_dtor(struct mount *mp)
160 {
161 struct fstrans_mount_info *fmi;
162
163 mutex_enter(&fstrans_mount_lock);
164
165 fmi = mp->mnt_transinfo;
166 KASSERT(fmi != NULL);
167 fmi->fmi_ref_cnt -= 1;
168 if (fmi->fmi_ref_cnt > 0) {
169 mutex_exit(&fstrans_mount_lock);
170 return;
171 }
172
173 KASSERT(fmi->fmi_state == FSTRANS_NORMAL);
174 KASSERT(LIST_FIRST(&fmi->fmi_cow_handler) == NULL);
175
176 mp->mnt_iflag &= ~IMNT_HAS_TRANS;
177 mp->mnt_transinfo = NULL;
178
179 mutex_exit(&fstrans_mount_lock);
180
181 kmem_free(fmi, sizeof(*fmi));
182 vfs_rele(mp);
183 }
184
185 /*
186 * Allocate mount state.
187 */
188 int
189 fstrans_mount(struct mount *mp)
190 {
191 struct fstrans_mount_info *newfmi;
192
193 newfmi = kmem_alloc(sizeof(*newfmi), KM_SLEEP);
194 newfmi->fmi_state = FSTRANS_NORMAL;
195 newfmi->fmi_ref_cnt = 1;
196 LIST_INIT(&newfmi->fmi_cow_handler);
197 newfmi->fmi_cow_change = false;
198
199 mutex_enter(&fstrans_mount_lock);
200 mp->mnt_transinfo = newfmi;
201 mp->mnt_iflag |= IMNT_HAS_TRANS;
202 mutex_exit(&fstrans_mount_lock);
203
204 vfs_ref(mp);
205
206 return 0;
207 }
208
209 /*
210 * Deallocate mount state.
211 */
212 void
213 fstrans_unmount(struct mount *mp)
214 {
215
216 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
217 return;
218
219 KASSERT(mp->mnt_transinfo != NULL);
220
221 fstrans_mount_dtor(mp);
222 }
223
224 /*
225 * Retrieve the per lwp info for this mount allocating if necessary.
226 */
227 static struct fstrans_lwp_info *
228 fstrans_get_lwp_info(struct mount *mp, bool do_alloc)
229 {
230 struct fstrans_lwp_info *fli, *res;
231 struct fstrans_mount_info *fmi;
232
233 /*
234 * Scan our list for a match clearing entries whose mount is gone.
235 */
236 res = NULL;
237 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
238 if (fli->fli_mount == mp) {
239 KASSERT(res == NULL);
240 res = fli;
241 } else if (fli->fli_mount != NULL &&
242 (fli->fli_mount->mnt_iflag & IMNT_GONE) != 0 &&
243 fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
244 fstrans_mount_dtor(fli->fli_mount);
245 fli->fli_mount = NULL;
246 }
247 }
248 if (__predict_true(res != NULL))
249 return res;
250
251 if (! do_alloc)
252 return NULL;
253
254 /*
255 * Try to reuse a cleared entry or allocate a new one.
256 */
257 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
258 if (fli->fli_mount == NULL) {
259 KASSERT(fli->fli_trans_cnt == 0);
260 KASSERT(fli->fli_cow_cnt == 0);
261 break;
262 }
263 }
264 if (fli == NULL) {
265 mutex_enter(&fstrans_lock);
266 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
267 if (fli->fli_self == NULL) {
268 KASSERT(fli->fli_mount == NULL);
269 KASSERT(fli->fli_trans_cnt == 0);
270 KASSERT(fli->fli_cow_cnt == 0);
271 fli->fli_self = curlwp;
272 fli->fli_succ = lwp_getspecific(lwp_data_key);
273 lwp_setspecific(lwp_data_key, fli);
274 break;
275 }
276 }
277 mutex_exit(&fstrans_lock);
278 }
279 if (fli == NULL) {
280 fli = kmem_alloc(sizeof(*fli), KM_SLEEP);
281 mutex_enter(&fstrans_lock);
282 memset(fli, 0, sizeof(*fli));
283 fli->fli_self = curlwp;
284 LIST_INSERT_HEAD(&fstrans_fli_head, fli, fli_list);
285 mutex_exit(&fstrans_lock);
286 fli->fli_succ = lwp_getspecific(lwp_data_key);
287 lwp_setspecific(lwp_data_key, fli);
288 }
289
290 /*
291 * Attach the entry to the mount if its mnt_transinfo is valid.
292 */
293 mutex_enter(&fstrans_mount_lock);
294 fmi = mp->mnt_transinfo;
295 if (__predict_true(fmi != NULL)) {
296 fli->fli_mount = mp;
297 fmi->fmi_ref_cnt += 1;
298 } else {
299 fli = NULL;
300 }
301 mutex_exit(&fstrans_mount_lock);
302
303 return fli;
304 }
305
306 /*
307 * Check if this lock type is granted at this state.
308 */
309 static bool
310 grant_lock(const enum fstrans_state state, const enum fstrans_lock_type type)
311 {
312
313 if (__predict_true(state == FSTRANS_NORMAL))
314 return true;
315 if (type == FSTRANS_EXCL)
316 return true;
317 if (state == FSTRANS_SUSPENDING && type == FSTRANS_LAZY)
318 return true;
319
320 return false;
321 }
322
323 /*
324 * Start a transaction. If this thread already has a transaction on this
325 * file system increment the reference counter.
326 */
327 int
328 _fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait)
329 {
330 int s;
331 struct mount *lmp;
332 struct fstrans_lwp_info *fli;
333 struct fstrans_mount_info *fmi;
334
335 if ((lmp = fstrans_normalize_mount(mp)) == NULL)
336 return 0;
337
338 ASSERT_SLEEPABLE();
339
340 /*
341 * Allocate per lwp info for layered file systems to
342 * get a reference to the mount. No need to increment
343 * the reference counter here.
344 */
345 for (lmp = mp; lmp->mnt_lower; lmp = lmp->mnt_lower) {
346 fli = fstrans_get_lwp_info(lmp, true);
347 }
348
349 if ((fli = fstrans_get_lwp_info(lmp, true)) == NULL)
350 return 0;
351
352 if (fli->fli_trans_cnt > 0) {
353 KASSERT(lock_type != FSTRANS_EXCL);
354 fli->fli_trans_cnt += 1;
355
356 return 0;
357 }
358
359 s = pserialize_read_enter();
360 fmi = lmp->mnt_transinfo;
361 if (__predict_true(grant_lock(fmi->fmi_state, lock_type))) {
362 fli->fli_trans_cnt = 1;
363 fli->fli_lock_type = lock_type;
364 pserialize_read_exit(s);
365
366 return 0;
367 }
368 pserialize_read_exit(s);
369
370 if (! wait)
371 return EBUSY;
372
373 mutex_enter(&fstrans_lock);
374 while (! grant_lock(fmi->fmi_state, lock_type))
375 cv_wait(&fstrans_state_cv, &fstrans_lock);
376 fli->fli_trans_cnt = 1;
377 fli->fli_lock_type = lock_type;
378 mutex_exit(&fstrans_lock);
379
380 return 0;
381 }
382
383 /*
384 * Finish a transaction.
385 */
386 void
387 fstrans_done(struct mount *mp)
388 {
389 int s;
390 struct fstrans_lwp_info *fli;
391 struct fstrans_mount_info *fmi;
392
393 if ((mp = fstrans_normalize_mount(mp)) == NULL)
394 return;
395 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
396 return;
397 KASSERT(fli->fli_trans_cnt > 0);
398
399 if (fli->fli_trans_cnt > 1) {
400 fli->fli_trans_cnt -= 1;
401
402 return;
403 }
404
405 s = pserialize_read_enter();
406 fmi = mp->mnt_transinfo;
407 if (__predict_true(fmi->fmi_state == FSTRANS_NORMAL)) {
408 fli->fli_trans_cnt = 0;
409 pserialize_read_exit(s);
410
411 return;
412 }
413 pserialize_read_exit(s);
414
415 mutex_enter(&fstrans_lock);
416 fli->fli_trans_cnt = 0;
417 cv_signal(&fstrans_count_cv);
418 mutex_exit(&fstrans_lock);
419 }
420
421 /*
422 * Check if this thread has an exclusive lock.
423 */
424 int
425 fstrans_is_owner(struct mount *mp)
426 {
427 struct fstrans_lwp_info *fli;
428
429 if ((mp = fstrans_normalize_mount(mp)) == NULL)
430 return 0;
431 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
432 return 0;
433
434 if (fli->fli_trans_cnt == 0)
435 return 0;
436
437 KASSERT(fli->fli_mount == mp);
438 KASSERT(fli->fli_trans_cnt > 0);
439
440 return (fli->fli_lock_type == FSTRANS_EXCL);
441 }
442
443 /*
444 * True, if no thread is in a transaction not granted at the current state.
445 */
446 static bool
447 state_change_done(const struct mount *mp)
448 {
449 struct fstrans_lwp_info *fli;
450 struct fstrans_mount_info *fmi;
451
452 KASSERT(mutex_owned(&fstrans_lock));
453
454 fmi = mp->mnt_transinfo;
455 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
456 if (fli->fli_mount != mp)
457 continue;
458 if (fli->fli_trans_cnt == 0)
459 continue;
460 if (grant_lock(fmi->fmi_state, fli->fli_lock_type))
461 continue;
462
463 return false;
464 }
465
466 return true;
467 }
468
469 /*
470 * Set new file system state.
471 */
472 int
473 fstrans_setstate(struct mount *mp, enum fstrans_state new_state)
474 {
475 int error;
476 enum fstrans_state old_state;
477 struct fstrans_mount_info *fmi;
478
479 fmi = mp->mnt_transinfo;
480 old_state = fmi->fmi_state;
481 if (old_state == new_state)
482 return 0;
483
484 mutex_enter(&fstrans_lock);
485 fmi->fmi_state = new_state;
486 pserialize_perform(fstrans_psz);
487
488 /*
489 * All threads see the new state now.
490 * Wait for transactions invalid at this state to leave.
491 */
492 error = 0;
493 while (! state_change_done(mp)) {
494 error = cv_wait_sig(&fstrans_count_cv, &fstrans_lock);
495 if (error) {
496 new_state = fmi->fmi_state = FSTRANS_NORMAL;
497 break;
498 }
499 }
500 cv_broadcast(&fstrans_state_cv);
501 mutex_exit(&fstrans_lock);
502
503 if (old_state != new_state) {
504 if (old_state == FSTRANS_NORMAL)
505 fstrans_start(mp, FSTRANS_EXCL);
506 if (new_state == FSTRANS_NORMAL)
507 fstrans_done(mp);
508 }
509
510 return error;
511 }
512
513 /*
514 * Get current file system state.
515 */
516 enum fstrans_state
517 fstrans_getstate(struct mount *mp)
518 {
519 struct fstrans_mount_info *fmi;
520
521 fmi = mp->mnt_transinfo;
522 KASSERT(fmi != NULL);
523
524 return fmi->fmi_state;
525 }
526
527 /*
528 * Request a filesystem to suspend all operations.
529 */
530 int
531 vfs_suspend(struct mount *mp, int nowait)
532 {
533 int error;
534
535 if ((mp = fstrans_normalize_mount(mp)) == NULL)
536 return EOPNOTSUPP;
537 if (nowait) {
538 if (!mutex_tryenter(&vfs_suspend_lock))
539 return EWOULDBLOCK;
540 } else
541 mutex_enter(&vfs_suspend_lock);
542
543 if ((error = VFS_SUSPENDCTL(mp, SUSPEND_SUSPEND)) != 0)
544 mutex_exit(&vfs_suspend_lock);
545
546 return error;
547 }
548
549 /*
550 * Request a filesystem to resume all operations.
551 */
552 void
553 vfs_resume(struct mount *mp)
554 {
555
556 mp = fstrans_normalize_mount(mp);
557 KASSERT(mp != NULL);
558
559 VFS_SUSPENDCTL(mp, SUSPEND_RESUME);
560 mutex_exit(&vfs_suspend_lock);
561 }
562
563
564 /*
565 * True, if no thread is running a cow handler.
566 */
567 static bool
568 cow_state_change_done(const struct mount *mp)
569 {
570 struct fstrans_lwp_info *fli;
571 struct fstrans_mount_info *fmi __diagused;
572
573 fmi = mp->mnt_transinfo;
574
575 KASSERT(mutex_owned(&fstrans_lock));
576 KASSERT(fmi->fmi_cow_change);
577
578 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
579 if (fli->fli_mount != mp)
580 continue;
581 if (fli->fli_cow_cnt == 0)
582 continue;
583
584 return false;
585 }
586
587 return true;
588 }
589
590 /*
591 * Prepare for changing this mounts cow list.
592 * Returns with fstrans_lock locked.
593 */
594 static void
595 cow_change_enter(const struct mount *mp)
596 {
597 struct fstrans_mount_info *fmi;
598
599 fmi = mp->mnt_transinfo;
600
601 mutex_enter(&fstrans_lock);
602
603 /*
604 * Wait for other threads changing the list.
605 */
606 while (fmi->fmi_cow_change)
607 cv_wait(&fstrans_state_cv, &fstrans_lock);
608
609 /*
610 * Wait until all threads are aware of a state change.
611 */
612 fmi->fmi_cow_change = true;
613 pserialize_perform(fstrans_psz);
614
615 while (! cow_state_change_done(mp))
616 cv_wait(&fstrans_count_cv, &fstrans_lock);
617 }
618
619 /*
620 * Done changing this mounts cow list.
621 */
622 static void
623 cow_change_done(const struct mount *mp)
624 {
625 struct fstrans_mount_info *fmi;
626
627 KASSERT(mutex_owned(&fstrans_lock));
628
629 fmi = mp->mnt_transinfo;
630
631 fmi->fmi_cow_change = false;
632 pserialize_perform(fstrans_psz);
633
634 cv_broadcast(&fstrans_state_cv);
635
636 mutex_exit(&fstrans_lock);
637 }
638
639 /*
640 * Add a handler to this mount.
641 */
642 int
643 fscow_establish(struct mount *mp, int (*func)(void *, struct buf *, bool),
644 void *arg)
645 {
646 struct fstrans_mount_info *fmi;
647 struct fscow_handler *newch;
648
649 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
650 return EINVAL;
651
652 fmi = mp->mnt_transinfo;
653 KASSERT(fmi != NULL);
654
655 newch = kmem_alloc(sizeof(*newch), KM_SLEEP);
656 newch->ch_func = func;
657 newch->ch_arg = arg;
658
659 cow_change_enter(mp);
660 LIST_INSERT_HEAD(&fmi->fmi_cow_handler, newch, ch_list);
661 cow_change_done(mp);
662
663 return 0;
664 }
665
666 /*
667 * Remove a handler from this mount.
668 */
669 int
670 fscow_disestablish(struct mount *mp, int (*func)(void *, struct buf *, bool),
671 void *arg)
672 {
673 struct fstrans_mount_info *fmi;
674 struct fscow_handler *hp = NULL;
675
676 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
677 return EINVAL;
678
679 fmi = mp->mnt_transinfo;
680 KASSERT(fmi != NULL);
681
682 cow_change_enter(mp);
683 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
684 if (hp->ch_func == func && hp->ch_arg == arg)
685 break;
686 if (hp != NULL) {
687 LIST_REMOVE(hp, ch_list);
688 kmem_free(hp, sizeof(*hp));
689 }
690 cow_change_done(mp);
691
692 return hp ? 0 : EINVAL;
693 }
694
695 /*
696 * Check for need to copy block that is about to be written.
697 */
698 int
699 fscow_run(struct buf *bp, bool data_valid)
700 {
701 int error, s;
702 struct mount *mp;
703 struct fstrans_lwp_info *fli;
704 struct fstrans_mount_info *fmi;
705 struct fscow_handler *hp;
706
707 /*
708 * First check if we need run the copy-on-write handler.
709 */
710 if ((bp->b_flags & B_COWDONE))
711 return 0;
712 if (bp->b_vp == NULL) {
713 bp->b_flags |= B_COWDONE;
714 return 0;
715 }
716 if (bp->b_vp->v_type == VBLK)
717 mp = spec_node_getmountedfs(bp->b_vp);
718 else
719 mp = bp->b_vp->v_mount;
720 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0) {
721 bp->b_flags |= B_COWDONE;
722 return 0;
723 }
724
725 fli = fstrans_get_lwp_info(mp, true);
726 fmi = mp->mnt_transinfo;
727
728 /*
729 * On non-recursed run check if other threads
730 * want to change the list.
731 */
732 if (fli->fli_cow_cnt == 0) {
733 s = pserialize_read_enter();
734 if (__predict_false(fmi->fmi_cow_change)) {
735 pserialize_read_exit(s);
736 mutex_enter(&fstrans_lock);
737 while (fmi->fmi_cow_change)
738 cv_wait(&fstrans_state_cv, &fstrans_lock);
739 fli->fli_cow_cnt = 1;
740 mutex_exit(&fstrans_lock);
741 } else {
742 fli->fli_cow_cnt = 1;
743 pserialize_read_exit(s);
744 }
745 } else
746 fli->fli_cow_cnt += 1;
747
748 /*
749 * Run all copy-on-write handlers, stop on error.
750 */
751 error = 0;
752 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
753 if ((error = (*hp->ch_func)(hp->ch_arg, bp, data_valid)) != 0)
754 break;
755 if (error == 0)
756 bp->b_flags |= B_COWDONE;
757
758 /*
759 * Check if other threads want to change the list.
760 */
761 if (fli->fli_cow_cnt > 1) {
762 fli->fli_cow_cnt -= 1;
763 } else {
764 s = pserialize_read_enter();
765 if (__predict_false(fmi->fmi_cow_change)) {
766 pserialize_read_exit(s);
767 mutex_enter(&fstrans_lock);
768 fli->fli_cow_cnt = 0;
769 cv_signal(&fstrans_count_cv);
770 mutex_exit(&fstrans_lock);
771 } else {
772 fli->fli_cow_cnt = 0;
773 pserialize_read_exit(s);
774 }
775 }
776
777 return error;
778 }
779
780 #if defined(DDB)
781 void fstrans_dump(int);
782
783 static void
784 fstrans_print_lwp(struct proc *p, struct lwp *l, int verbose)
785 {
786 char prefix[9];
787 struct fstrans_lwp_info *fli;
788
789 snprintf(prefix, sizeof(prefix), "%d.%d", p->p_pid, l->l_lid);
790 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
791 if (fli->fli_self != l)
792 continue;
793 if (fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
794 if (! verbose)
795 continue;
796 }
797 printf("%-8s", prefix);
798 if (verbose)
799 printf(" @%p", fli);
800 if (fli->fli_mount != NULL)
801 printf(" (%s)", fli->fli_mount->mnt_stat.f_mntonname);
802 else
803 printf(" NULL");
804 if (fli->fli_trans_cnt == 0) {
805 printf(" -");
806 } else {
807 switch (fli->fli_lock_type) {
808 case FSTRANS_LAZY:
809 printf(" lazy");
810 break;
811 case FSTRANS_SHARED:
812 printf(" shared");
813 break;
814 case FSTRANS_EXCL:
815 printf(" excl");
816 break;
817 default:
818 printf(" %#x", fli->fli_lock_type);
819 break;
820 }
821 }
822 printf(" %d cow %d\n", fli->fli_trans_cnt, fli->fli_cow_cnt);
823 prefix[0] = '\0';
824 }
825 }
826
827 static void
828 fstrans_print_mount(struct mount *mp, int verbose)
829 {
830 struct fstrans_mount_info *fmi;
831
832 fmi = mp->mnt_transinfo;
833 if (!verbose && (fmi == NULL || fmi->fmi_state == FSTRANS_NORMAL))
834 return;
835
836 printf("%-16s ", mp->mnt_stat.f_mntonname);
837 if (fmi == NULL) {
838 printf("(null)\n");
839 return;
840 }
841 switch (fmi->fmi_state) {
842 case FSTRANS_NORMAL:
843 printf("state normal\n");
844 break;
845 case FSTRANS_SUSPENDING:
846 printf("state suspending\n");
847 break;
848 case FSTRANS_SUSPENDED:
849 printf("state suspended\n");
850 break;
851 default:
852 printf("state %#x\n", fmi->fmi_state);
853 break;
854 }
855 }
856
857 void
858 fstrans_dump(int full)
859 {
860 const struct proclist_desc *pd;
861 struct proc *p;
862 struct lwp *l;
863 struct mount *mp;
864
865 printf("Fstrans locks by lwp:\n");
866 for (pd = proclists; pd->pd_list != NULL; pd++)
867 PROCLIST_FOREACH(p, pd->pd_list)
868 LIST_FOREACH(l, &p->p_lwps, l_sibling)
869 fstrans_print_lwp(p, l, full == 1);
870
871 printf("Fstrans state by mount:\n");
872 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp))
873 fstrans_print_mount(mp, full == 1);
874 }
875 #endif /* defined(DDB) */
876