vfs_trans.c revision 1.45.2.3 1 /* $NetBSD: vfs_trans.c,v 1.45.2.3 2018/10/09 09:58:08 martin Exp $ */
2
3 /*-
4 * Copyright (c) 2007 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Juergen Hannken-Illjes.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vfs_trans.c,v 1.45.2.3 2018/10/09 09:58:08 martin Exp $");
34
35 /*
36 * File system transaction operations.
37 */
38
39 #ifdef _KERNEL_OPT
40 #include "opt_ddb.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/atomic.h>
46 #include <sys/buf.h>
47 #include <sys/kmem.h>
48 #include <sys/mount.h>
49 #include <sys/pserialize.h>
50 #include <sys/vnode.h>
51 #include <sys/fstrans.h>
52 #include <sys/proc.h>
53
54 #include <miscfs/specfs/specdev.h>
55
56 enum fstrans_lock_type {
57 FSTRANS_LAZY, /* Granted while not suspended */
58 FSTRANS_SHARED, /* Granted while not suspending */
59 FSTRANS_EXCL /* Internal: exclusive lock */
60 };
61
62 struct fscow_handler {
63 LIST_ENTRY(fscow_handler) ch_list;
64 int (*ch_func)(void *, struct buf *, bool);
65 void *ch_arg;
66 };
67 struct fstrans_lwp_info {
68 struct fstrans_lwp_info *fli_succ;
69 struct lwp *fli_self;
70 struct mount *fli_mount;
71 int fli_trans_cnt;
72 int fli_cow_cnt;
73 enum fstrans_lock_type fli_lock_type;
74 LIST_ENTRY(fstrans_lwp_info) fli_list;
75 };
76 struct fstrans_mount_info {
77 enum fstrans_state fmi_state;
78 unsigned int fmi_ref_cnt;
79 bool fmi_cow_change;
80 LIST_HEAD(, fscow_handler) fmi_cow_handler;
81 };
82
83 static specificdata_key_t lwp_data_key; /* Our specific data key. */
84 static kmutex_t vfs_suspend_lock; /* Serialize suspensions. */
85 static kmutex_t fstrans_lock; /* Fstrans big lock. */
86 static kmutex_t fstrans_mount_lock; /* Fstrans mount big lock. */
87 static kcondvar_t fstrans_state_cv; /* Fstrans or cow state changed. */
88 static kcondvar_t fstrans_count_cv; /* Fstrans or cow count changed. */
89 static pserialize_t fstrans_psz; /* Pserialize state. */
90 static LIST_HEAD(fstrans_lwp_head, fstrans_lwp_info) fstrans_fli_head;
91 /* List of all fstrans_lwp_info. */
92
93 static inline struct mount *fstrans_normalize_mount(struct mount *);
94 static void fstrans_lwp_dtor(void *);
95 static void fstrans_mount_dtor(struct mount *);
96 static void fstrans_clear_lwp_info(void);
97 static inline struct fstrans_lwp_info *
98 fstrans_get_lwp_info(struct mount *, bool);
99 static struct fstrans_lwp_info *fstrans_alloc_lwp_info(struct mount *);
100 static inline int _fstrans_start(struct mount *, enum fstrans_lock_type, int);
101 static bool grant_lock(const enum fstrans_state, const enum fstrans_lock_type);
102 static bool state_change_done(const struct mount *);
103 static bool cow_state_change_done(const struct mount *);
104 static void cow_change_enter(const struct mount *);
105 static void cow_change_done(const struct mount *);
106
107 /*
108 * Initialize.
109 */
110 void
111 fstrans_init(void)
112 {
113 int error __diagused;
114
115 error = lwp_specific_key_create(&lwp_data_key, fstrans_lwp_dtor);
116 KASSERT(error == 0);
117
118 mutex_init(&vfs_suspend_lock, MUTEX_DEFAULT, IPL_NONE);
119 mutex_init(&fstrans_lock, MUTEX_DEFAULT, IPL_NONE);
120 mutex_init(&fstrans_mount_lock, MUTEX_DEFAULT, IPL_NONE);
121 cv_init(&fstrans_state_cv, "fstchg");
122 cv_init(&fstrans_count_cv, "fstcnt");
123 fstrans_psz = pserialize_create();
124 LIST_INIT(&fstrans_fli_head);
125 }
126
127 /*
128 * Normalize mount.
129 * Return mount if file system supports fstrans, NULL otherwise.
130 */
131 static inline struct mount *
132 fstrans_normalize_mount(struct mount *mp)
133 {
134
135 while (mp && mp->mnt_lower)
136 mp = mp->mnt_lower;
137 if (mp == NULL)
138 return NULL;
139 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
140 return NULL;
141 return mp;
142 }
143
144 /*
145 * Deallocate lwp state.
146 */
147 static void
148 fstrans_lwp_dtor(void *arg)
149 {
150 struct fstrans_lwp_info *fli, *fli_next;
151
152 for (fli = arg; fli; fli = fli_next) {
153 KASSERT(fli->fli_trans_cnt == 0);
154 KASSERT(fli->fli_cow_cnt == 0);
155 if (fli->fli_mount != NULL)
156 fstrans_mount_dtor(fli->fli_mount);
157 fli_next = fli->fli_succ;
158 fli->fli_mount = NULL;
159 membar_sync();
160 fli->fli_self = NULL;
161 }
162 }
163
164 /*
165 * Dereference mount state.
166 */
167 static void
168 fstrans_mount_dtor(struct mount *mp)
169 {
170 struct fstrans_mount_info *fmi;
171
172 mutex_enter(&fstrans_mount_lock);
173
174 fmi = mp->mnt_transinfo;
175 KASSERT(fmi != NULL);
176 fmi->fmi_ref_cnt -= 1;
177 if (fmi->fmi_ref_cnt > 0) {
178 mutex_exit(&fstrans_mount_lock);
179 return;
180 }
181
182 KASSERT(fmi->fmi_state == FSTRANS_NORMAL);
183 KASSERT(LIST_FIRST(&fmi->fmi_cow_handler) == NULL);
184
185 mp->mnt_iflag &= ~IMNT_HAS_TRANS;
186 mp->mnt_transinfo = NULL;
187
188 mutex_exit(&fstrans_mount_lock);
189
190 kmem_free(fmi, sizeof(*fmi));
191 vfs_rele(mp);
192 }
193
194 /*
195 * Allocate mount state.
196 */
197 int
198 fstrans_mount(struct mount *mp)
199 {
200 struct fstrans_mount_info *newfmi;
201
202 newfmi = kmem_alloc(sizeof(*newfmi), KM_SLEEP);
203 newfmi->fmi_state = FSTRANS_NORMAL;
204 newfmi->fmi_ref_cnt = 1;
205 LIST_INIT(&newfmi->fmi_cow_handler);
206 newfmi->fmi_cow_change = false;
207
208 mutex_enter(&fstrans_mount_lock);
209 mp->mnt_transinfo = newfmi;
210 mp->mnt_iflag |= IMNT_HAS_TRANS;
211 mutex_exit(&fstrans_mount_lock);
212
213 vfs_ref(mp);
214
215 return 0;
216 }
217
218 /*
219 * Deallocate mount state.
220 */
221 void
222 fstrans_unmount(struct mount *mp)
223 {
224
225 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
226 return;
227
228 KASSERT(mp->mnt_transinfo != NULL);
229
230 fstrans_mount_dtor(mp);
231 }
232
233 /*
234 * Clear mount entries whose mount is gone.
235 */
236 static void
237 fstrans_clear_lwp_info(void)
238 {
239 struct fstrans_lwp_info *fli;
240
241 /*
242 * Scan our list clearing entries whose mount is gone.
243 */
244 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
245 if (fli->fli_mount != NULL &&
246 (fli->fli_mount->mnt_iflag & IMNT_GONE) != 0 &&
247 fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
248 fstrans_mount_dtor(fli->fli_mount);
249 fli->fli_mount = NULL;
250 }
251 }
252 }
253
254 /*
255 * Allocate and return per lwp info for this mount.
256 */
257 static struct fstrans_lwp_info *
258 fstrans_alloc_lwp_info(struct mount *mp)
259 {
260 struct fstrans_lwp_info *fli;
261 struct fstrans_mount_info *fmi;
262
263 /*
264 * Try to reuse a cleared entry or allocate a new one.
265 */
266 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
267 KASSERT(fli->fli_mount != mp);
268 if (fli->fli_mount == NULL) {
269 KASSERT(fli->fli_trans_cnt == 0);
270 KASSERT(fli->fli_cow_cnt == 0);
271 break;
272 }
273 }
274 if (fli == NULL) {
275 mutex_enter(&fstrans_lock);
276 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
277 if (fli->fli_self == NULL) {
278 KASSERT(fli->fli_mount == NULL);
279 KASSERT(fli->fli_trans_cnt == 0);
280 KASSERT(fli->fli_cow_cnt == 0);
281 fli->fli_self = curlwp;
282 fli->fli_succ = lwp_getspecific(lwp_data_key);
283 lwp_setspecific(lwp_data_key, fli);
284 break;
285 }
286 }
287 mutex_exit(&fstrans_lock);
288 }
289 if (fli == NULL) {
290 fli = kmem_alloc(sizeof(*fli), KM_SLEEP);
291 mutex_enter(&fstrans_lock);
292 memset(fli, 0, sizeof(*fli));
293 fli->fli_self = curlwp;
294 LIST_INSERT_HEAD(&fstrans_fli_head, fli, fli_list);
295 mutex_exit(&fstrans_lock);
296 fli->fli_succ = lwp_getspecific(lwp_data_key);
297 lwp_setspecific(lwp_data_key, fli);
298 }
299
300 /*
301 * Attach the entry to the mount if its mnt_transinfo is valid.
302 */
303 mutex_enter(&fstrans_mount_lock);
304 fmi = mp->mnt_transinfo;
305 if (__predict_true(fmi != NULL)) {
306 fli->fli_mount = mp;
307 fmi->fmi_ref_cnt += 1;
308 } else {
309 fli = NULL;
310 }
311 mutex_exit(&fstrans_mount_lock);
312
313 return fli;
314 }
315
316 /*
317 * Retrieve the per lwp info for this mount allocating if necessary.
318 */
319 static inline struct fstrans_lwp_info *
320 fstrans_get_lwp_info(struct mount *mp, bool do_alloc)
321 {
322 struct fstrans_lwp_info *fli;
323
324 /*
325 * Scan our list for a match.
326 */
327 for (fli = lwp_getspecific(lwp_data_key); fli; fli = fli->fli_succ) {
328 if (fli->fli_mount == mp)
329 return fli;
330 }
331
332 return (do_alloc ? fstrans_alloc_lwp_info(mp) : NULL);
333 }
334
335 /*
336 * Check if this lock type is granted at this state.
337 */
338 static bool
339 grant_lock(const enum fstrans_state state, const enum fstrans_lock_type type)
340 {
341
342 if (__predict_true(state == FSTRANS_NORMAL))
343 return true;
344 if (type == FSTRANS_EXCL)
345 return true;
346 if (state == FSTRANS_SUSPENDING && type == FSTRANS_LAZY)
347 return true;
348
349 return false;
350 }
351
352 /*
353 * Start a transaction. If this thread already has a transaction on this
354 * file system increment the reference counter.
355 */
356 static inline int
357 _fstrans_start(struct mount *mp, enum fstrans_lock_type lock_type, int wait)
358 {
359 int s;
360 struct mount *lmp;
361 struct fstrans_lwp_info *fli;
362 struct fstrans_mount_info *fmi;
363
364 if ((lmp = fstrans_normalize_mount(mp)) == NULL)
365 return 0;
366
367 ASSERT_SLEEPABLE();
368
369 /*
370 * Allocate per lwp info for layered file systems to
371 * get a reference to the mount. No need to increment
372 * the reference counter here.
373 */
374 for (lmp = mp; lmp->mnt_lower; lmp = lmp->mnt_lower) {
375 fli = fstrans_get_lwp_info(lmp, true);
376 }
377
378 if ((fli = fstrans_get_lwp_info(lmp, true)) == NULL)
379 return 0;
380
381 if (fli->fli_trans_cnt > 0) {
382 KASSERT(lock_type != FSTRANS_EXCL);
383 fli->fli_trans_cnt += 1;
384
385 return 0;
386 }
387
388 s = pserialize_read_enter();
389 fmi = lmp->mnt_transinfo;
390 if (__predict_true(grant_lock(fmi->fmi_state, lock_type))) {
391 fli->fli_trans_cnt = 1;
392 fli->fli_lock_type = lock_type;
393 pserialize_read_exit(s);
394
395 return 0;
396 }
397 pserialize_read_exit(s);
398
399 if (! wait)
400 return EBUSY;
401
402 mutex_enter(&fstrans_lock);
403 while (! grant_lock(fmi->fmi_state, lock_type))
404 cv_wait(&fstrans_state_cv, &fstrans_lock);
405 fli->fli_trans_cnt = 1;
406 fli->fli_lock_type = lock_type;
407 mutex_exit(&fstrans_lock);
408
409 return 0;
410 }
411
412 void
413 fstrans_start(struct mount *mp)
414 {
415 int error __diagused;
416
417 error = _fstrans_start(mp, FSTRANS_SHARED, 1);
418 KASSERT(error == 0);
419 }
420
421 int
422 fstrans_start_nowait(struct mount *mp)
423 {
424
425 return _fstrans_start(mp, FSTRANS_SHARED, 0);
426 }
427
428 void
429 fstrans_start_lazy(struct mount *mp)
430 {
431 int error __diagused;
432
433 error = _fstrans_start(mp, FSTRANS_LAZY, 1);
434 KASSERT(error == 0);
435 }
436
437 /*
438 * Finish a transaction.
439 */
440 void
441 fstrans_done(struct mount *mp)
442 {
443 int s;
444 struct fstrans_lwp_info *fli;
445 struct fstrans_mount_info *fmi;
446
447 if ((mp = fstrans_normalize_mount(mp)) == NULL)
448 return;
449 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
450 return;
451 KASSERT(fli->fli_trans_cnt > 0);
452
453 if (fli->fli_trans_cnt > 1) {
454 fli->fli_trans_cnt -= 1;
455
456 return;
457 }
458
459 fstrans_clear_lwp_info();
460
461 s = pserialize_read_enter();
462 fmi = mp->mnt_transinfo;
463 if (__predict_true(fmi->fmi_state == FSTRANS_NORMAL)) {
464 fli->fli_trans_cnt = 0;
465 pserialize_read_exit(s);
466
467 return;
468 }
469 pserialize_read_exit(s);
470
471 mutex_enter(&fstrans_lock);
472 fli->fli_trans_cnt = 0;
473 cv_signal(&fstrans_count_cv);
474 mutex_exit(&fstrans_lock);
475 }
476
477 /*
478 * Check if this thread has an exclusive lock.
479 */
480 int
481 fstrans_is_owner(struct mount *mp)
482 {
483 struct fstrans_lwp_info *fli;
484
485 if ((mp = fstrans_normalize_mount(mp)) == NULL)
486 return 0;
487 if ((fli = fstrans_get_lwp_info(mp, false)) == NULL)
488 return 0;
489
490 if (fli->fli_trans_cnt == 0)
491 return 0;
492
493 KASSERT(fli->fli_mount == mp);
494 KASSERT(fli->fli_trans_cnt > 0);
495
496 return (fli->fli_lock_type == FSTRANS_EXCL);
497 }
498
499 /*
500 * True, if no thread is in a transaction not granted at the current state.
501 */
502 static bool
503 state_change_done(const struct mount *mp)
504 {
505 struct fstrans_lwp_info *fli;
506 struct fstrans_mount_info *fmi;
507
508 KASSERT(mutex_owned(&fstrans_lock));
509
510 fmi = mp->mnt_transinfo;
511 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
512 if (fli->fli_mount != mp)
513 continue;
514 if (fli->fli_trans_cnt == 0)
515 continue;
516 if (grant_lock(fmi->fmi_state, fli->fli_lock_type))
517 continue;
518
519 return false;
520 }
521
522 return true;
523 }
524
525 /*
526 * Set new file system state.
527 */
528 int
529 fstrans_setstate(struct mount *mp, enum fstrans_state new_state)
530 {
531 int error;
532 enum fstrans_state old_state;
533 struct fstrans_mount_info *fmi;
534
535 fmi = mp->mnt_transinfo;
536 old_state = fmi->fmi_state;
537 if (old_state == new_state)
538 return 0;
539
540 mutex_enter(&fstrans_lock);
541 fmi->fmi_state = new_state;
542 pserialize_perform(fstrans_psz);
543
544 /*
545 * All threads see the new state now.
546 * Wait for transactions invalid at this state to leave.
547 */
548 error = 0;
549 while (! state_change_done(mp)) {
550 error = cv_wait_sig(&fstrans_count_cv, &fstrans_lock);
551 if (error) {
552 new_state = fmi->fmi_state = FSTRANS_NORMAL;
553 break;
554 }
555 }
556 cv_broadcast(&fstrans_state_cv);
557 mutex_exit(&fstrans_lock);
558
559 if (old_state != new_state) {
560 if (old_state == FSTRANS_NORMAL)
561 _fstrans_start(mp, FSTRANS_EXCL, 1);
562 if (new_state == FSTRANS_NORMAL)
563 fstrans_done(mp);
564 }
565
566 return error;
567 }
568
569 /*
570 * Get current file system state.
571 */
572 enum fstrans_state
573 fstrans_getstate(struct mount *mp)
574 {
575 struct fstrans_mount_info *fmi;
576
577 fmi = mp->mnt_transinfo;
578 KASSERT(fmi != NULL);
579
580 return fmi->fmi_state;
581 }
582
583 /*
584 * Request a filesystem to suspend all operations.
585 */
586 int
587 vfs_suspend(struct mount *mp, int nowait)
588 {
589 int error;
590
591 if ((mp = fstrans_normalize_mount(mp)) == NULL)
592 return EOPNOTSUPP;
593 if (nowait) {
594 if (!mutex_tryenter(&vfs_suspend_lock))
595 return EWOULDBLOCK;
596 } else
597 mutex_enter(&vfs_suspend_lock);
598
599 if ((error = VFS_SUSPENDCTL(mp, SUSPEND_SUSPEND)) != 0)
600 mutex_exit(&vfs_suspend_lock);
601
602 return error;
603 }
604
605 /*
606 * Request a filesystem to resume all operations.
607 */
608 void
609 vfs_resume(struct mount *mp)
610 {
611
612 mp = fstrans_normalize_mount(mp);
613 KASSERT(mp != NULL);
614
615 VFS_SUSPENDCTL(mp, SUSPEND_RESUME);
616 mutex_exit(&vfs_suspend_lock);
617 }
618
619
620 /*
621 * True, if no thread is running a cow handler.
622 */
623 static bool
624 cow_state_change_done(const struct mount *mp)
625 {
626 struct fstrans_lwp_info *fli;
627 struct fstrans_mount_info *fmi __diagused;
628
629 fmi = mp->mnt_transinfo;
630
631 KASSERT(mutex_owned(&fstrans_lock));
632 KASSERT(fmi->fmi_cow_change);
633
634 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
635 if (fli->fli_mount != mp)
636 continue;
637 if (fli->fli_cow_cnt == 0)
638 continue;
639
640 return false;
641 }
642
643 return true;
644 }
645
646 /*
647 * Prepare for changing this mounts cow list.
648 * Returns with fstrans_lock locked.
649 */
650 static void
651 cow_change_enter(const struct mount *mp)
652 {
653 struct fstrans_mount_info *fmi;
654
655 fmi = mp->mnt_transinfo;
656
657 mutex_enter(&fstrans_lock);
658
659 /*
660 * Wait for other threads changing the list.
661 */
662 while (fmi->fmi_cow_change)
663 cv_wait(&fstrans_state_cv, &fstrans_lock);
664
665 /*
666 * Wait until all threads are aware of a state change.
667 */
668 fmi->fmi_cow_change = true;
669 pserialize_perform(fstrans_psz);
670
671 while (! cow_state_change_done(mp))
672 cv_wait(&fstrans_count_cv, &fstrans_lock);
673 }
674
675 /*
676 * Done changing this mounts cow list.
677 */
678 static void
679 cow_change_done(const struct mount *mp)
680 {
681 struct fstrans_mount_info *fmi;
682
683 KASSERT(mutex_owned(&fstrans_lock));
684
685 fmi = mp->mnt_transinfo;
686
687 fmi->fmi_cow_change = false;
688 pserialize_perform(fstrans_psz);
689
690 cv_broadcast(&fstrans_state_cv);
691
692 mutex_exit(&fstrans_lock);
693 }
694
695 /*
696 * Add a handler to this mount.
697 */
698 int
699 fscow_establish(struct mount *mp, int (*func)(void *, struct buf *, bool),
700 void *arg)
701 {
702 struct fstrans_mount_info *fmi;
703 struct fscow_handler *newch;
704
705 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
706 return EINVAL;
707
708 fmi = mp->mnt_transinfo;
709 KASSERT(fmi != NULL);
710
711 newch = kmem_alloc(sizeof(*newch), KM_SLEEP);
712 newch->ch_func = func;
713 newch->ch_arg = arg;
714
715 cow_change_enter(mp);
716 LIST_INSERT_HEAD(&fmi->fmi_cow_handler, newch, ch_list);
717 cow_change_done(mp);
718
719 return 0;
720 }
721
722 /*
723 * Remove a handler from this mount.
724 */
725 int
726 fscow_disestablish(struct mount *mp, int (*func)(void *, struct buf *, bool),
727 void *arg)
728 {
729 struct fstrans_mount_info *fmi;
730 struct fscow_handler *hp = NULL;
731
732 if ((mp->mnt_iflag & IMNT_HAS_TRANS) == 0)
733 return EINVAL;
734
735 fmi = mp->mnt_transinfo;
736 KASSERT(fmi != NULL);
737
738 cow_change_enter(mp);
739 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
740 if (hp->ch_func == func && hp->ch_arg == arg)
741 break;
742 if (hp != NULL) {
743 LIST_REMOVE(hp, ch_list);
744 kmem_free(hp, sizeof(*hp));
745 }
746 cow_change_done(mp);
747
748 return hp ? 0 : EINVAL;
749 }
750
751 /*
752 * Check for need to copy block that is about to be written.
753 */
754 int
755 fscow_run(struct buf *bp, bool data_valid)
756 {
757 int error, s;
758 struct mount *mp;
759 struct fstrans_lwp_info *fli;
760 struct fstrans_mount_info *fmi;
761 struct fscow_handler *hp;
762
763 /*
764 * First check if we need run the copy-on-write handler.
765 */
766 if ((bp->b_flags & B_COWDONE))
767 return 0;
768 if (bp->b_vp == NULL) {
769 bp->b_flags |= B_COWDONE;
770 return 0;
771 }
772 if (bp->b_vp->v_type == VBLK)
773 mp = spec_node_getmountedfs(bp->b_vp);
774 else
775 mp = bp->b_vp->v_mount;
776 if (mp == NULL || (mp->mnt_iflag & IMNT_HAS_TRANS) == 0) {
777 bp->b_flags |= B_COWDONE;
778 return 0;
779 }
780
781 fli = fstrans_get_lwp_info(mp, true);
782 fmi = mp->mnt_transinfo;
783
784 /*
785 * On non-recursed run check if other threads
786 * want to change the list.
787 */
788 if (fli->fli_cow_cnt == 0) {
789 s = pserialize_read_enter();
790 if (__predict_false(fmi->fmi_cow_change)) {
791 pserialize_read_exit(s);
792 mutex_enter(&fstrans_lock);
793 while (fmi->fmi_cow_change)
794 cv_wait(&fstrans_state_cv, &fstrans_lock);
795 fli->fli_cow_cnt = 1;
796 mutex_exit(&fstrans_lock);
797 } else {
798 fli->fli_cow_cnt = 1;
799 pserialize_read_exit(s);
800 }
801 } else
802 fli->fli_cow_cnt += 1;
803
804 /*
805 * Run all copy-on-write handlers, stop on error.
806 */
807 error = 0;
808 LIST_FOREACH(hp, &fmi->fmi_cow_handler, ch_list)
809 if ((error = (*hp->ch_func)(hp->ch_arg, bp, data_valid)) != 0)
810 break;
811 if (error == 0)
812 bp->b_flags |= B_COWDONE;
813
814 /*
815 * Check if other threads want to change the list.
816 */
817 if (fli->fli_cow_cnt > 1) {
818 fli->fli_cow_cnt -= 1;
819 } else {
820 s = pserialize_read_enter();
821 if (__predict_false(fmi->fmi_cow_change)) {
822 pserialize_read_exit(s);
823 mutex_enter(&fstrans_lock);
824 fli->fli_cow_cnt = 0;
825 cv_signal(&fstrans_count_cv);
826 mutex_exit(&fstrans_lock);
827 } else {
828 fli->fli_cow_cnt = 0;
829 pserialize_read_exit(s);
830 }
831 }
832
833 return error;
834 }
835
836 #if defined(DDB)
837 void fstrans_dump(int);
838
839 static void
840 fstrans_print_lwp(struct proc *p, struct lwp *l, int verbose)
841 {
842 char prefix[9];
843 struct fstrans_lwp_info *fli;
844
845 snprintf(prefix, sizeof(prefix), "%d.%d", p->p_pid, l->l_lid);
846 LIST_FOREACH(fli, &fstrans_fli_head, fli_list) {
847 if (fli->fli_self != l)
848 continue;
849 if (fli->fli_trans_cnt == 0 && fli->fli_cow_cnt == 0) {
850 if (! verbose)
851 continue;
852 }
853 printf("%-8s", prefix);
854 if (verbose)
855 printf(" @%p", fli);
856 if (fli->fli_mount != NULL)
857 printf(" (%s)", fli->fli_mount->mnt_stat.f_mntonname);
858 else
859 printf(" NULL");
860 if (fli->fli_trans_cnt == 0) {
861 printf(" -");
862 } else {
863 switch (fli->fli_lock_type) {
864 case FSTRANS_LAZY:
865 printf(" lazy");
866 break;
867 case FSTRANS_SHARED:
868 printf(" shared");
869 break;
870 case FSTRANS_EXCL:
871 printf(" excl");
872 break;
873 default:
874 printf(" %#x", fli->fli_lock_type);
875 break;
876 }
877 }
878 printf(" %d cow %d\n", fli->fli_trans_cnt, fli->fli_cow_cnt);
879 prefix[0] = '\0';
880 }
881 }
882
883 static void
884 fstrans_print_mount(struct mount *mp, int verbose)
885 {
886 struct fstrans_mount_info *fmi;
887
888 fmi = mp->mnt_transinfo;
889 if (!verbose && (fmi == NULL || fmi->fmi_state == FSTRANS_NORMAL))
890 return;
891
892 printf("%-16s ", mp->mnt_stat.f_mntonname);
893 if (fmi == NULL) {
894 printf("(null)\n");
895 return;
896 }
897 switch (fmi->fmi_state) {
898 case FSTRANS_NORMAL:
899 printf("state normal\n");
900 break;
901 case FSTRANS_SUSPENDING:
902 printf("state suspending\n");
903 break;
904 case FSTRANS_SUSPENDED:
905 printf("state suspended\n");
906 break;
907 default:
908 printf("state %#x\n", fmi->fmi_state);
909 break;
910 }
911 }
912
913 void
914 fstrans_dump(int full)
915 {
916 const struct proclist_desc *pd;
917 struct proc *p;
918 struct lwp *l;
919 struct mount *mp;
920
921 printf("Fstrans locks by lwp:\n");
922 for (pd = proclists; pd->pd_list != NULL; pd++)
923 PROCLIST_FOREACH(p, pd->pd_list)
924 LIST_FOREACH(l, &p->p_lwps, l_sibling)
925 fstrans_print_lwp(p, l, full == 1);
926
927 printf("Fstrans state by mount:\n");
928 for (mp = _mountlist_next(NULL); mp; mp = _mountlist_next(mp))
929 fstrans_print_mount(mp, full == 1);
930 }
931 #endif /* defined(DDB) */
932