lfs_bio.c revision 1.121 1 /* $NetBSD: lfs_bio.c,v 1.121 2012/01/02 22:10:44 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31 /*
32 * Copyright (c) 1991, 1993
33 * The Regents of the University of California. All rights reserved.
34 *
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
37 * are met:
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. Neither the name of the University nor the names of its contributors
44 * may be used to endorse or promote products derived from this software
45 * without specific prior written permission.
46 *
47 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
57 * SUCH DAMAGE.
58 *
59 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
60 */
61
62 #include <sys/cdefs.h>
63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.121 2012/01/02 22:10:44 perseant Exp $");
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/proc.h>
68 #include <sys/buf.h>
69 #include <sys/vnode.h>
70 #include <sys/resourcevar.h>
71 #include <sys/mount.h>
72 #include <sys/kernel.h>
73 #include <sys/kauth.h>
74
75 #include <ufs/ufs/inode.h>
76 #include <ufs/ufs/ufsmount.h>
77 #include <ufs/ufs/ufs_extern.h>
78
79 #include <ufs/lfs/lfs.h>
80 #include <ufs/lfs/lfs_extern.h>
81
82 #include <uvm/uvm.h>
83
84 /*
85 * LFS block write function.
86 *
87 * XXX
88 * No write cost accounting is done.
89 * This is almost certainly wrong for synchronous operations and NFS.
90 *
91 * protected by lfs_lock.
92 */
93 int locked_queue_count = 0; /* Count of locked-down buffers. */
94 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
95 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
96 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
97 int lfs_writing = 0; /* Set if already kicked off a writer
98 because of buffer space */
99 int locked_queue_waiters = 0; /* Number of processes waiting on lq */
100
101 /* Lock and condition variables for above. */
102 kcondvar_t locked_queue_cv;
103 kcondvar_t lfs_writing_cv;
104 kmutex_t lfs_lock;
105
106 extern int lfs_dostats;
107
108 /*
109 * reserved number/bytes of locked buffers
110 */
111 int locked_queue_rcount = 0;
112 long locked_queue_rbytes = 0L;
113
114 static int lfs_fits_buf(struct lfs *, int, int);
115 static int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
116 int, int);
117 static int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2,
118 int);
119
120 static int
121 lfs_fits_buf(struct lfs *fs, int n, int bytes)
122 {
123 int count_fit, bytes_fit;
124
125 ASSERT_NO_SEGLOCK(fs);
126 KASSERT(mutex_owned(&lfs_lock));
127
128 count_fit =
129 (locked_queue_count + locked_queue_rcount + n <= LFS_WAIT_BUFS);
130 bytes_fit =
131 (locked_queue_bytes + locked_queue_rbytes + bytes <= LFS_WAIT_BYTES);
132
133 #ifdef DEBUG
134 if (!count_fit) {
135 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
136 locked_queue_count, locked_queue_rcount,
137 n, LFS_WAIT_BUFS));
138 }
139 if (!bytes_fit) {
140 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
141 locked_queue_bytes, locked_queue_rbytes,
142 bytes, LFS_WAIT_BYTES));
143 }
144 #endif /* DEBUG */
145
146 return (count_fit && bytes_fit);
147 }
148
149 /* ARGSUSED */
150 static int
151 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
152 struct vnode *vp2, int n, int bytes)
153 {
154 ASSERT_MAYBE_SEGLOCK(fs);
155 KASSERT(locked_queue_rcount >= 0);
156 KASSERT(locked_queue_rbytes >= 0);
157
158 mutex_enter(&lfs_lock);
159 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
160 int error;
161
162 lfs_flush(fs, 0, 0);
163
164 DLOG((DLOG_AVAIL, "lfs_reservebuf: waiting: count=%d, bytes=%ld\n",
165 locked_queue_count, locked_queue_bytes));
166 ++locked_queue_waiters;
167 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
168 hz * LFS_BUFWAIT);
169 --locked_queue_waiters;
170 if (error && error != EWOULDBLOCK) {
171 mutex_exit(&lfs_lock);
172 return error;
173 }
174 }
175
176 locked_queue_rcount += n;
177 locked_queue_rbytes += bytes;
178
179 if (n < 0 && locked_queue_waiters > 0) {
180 DLOG((DLOG_AVAIL, "lfs_reservebuf: broadcast: count=%d, bytes=%ld\n",
181 locked_queue_count, locked_queue_bytes));
182 cv_broadcast(&locked_queue_cv);
183 }
184
185 mutex_exit(&lfs_lock);
186
187 KASSERT(locked_queue_rcount >= 0);
188 KASSERT(locked_queue_rbytes >= 0);
189
190 return 0;
191 }
192
193 /*
194 * Try to reserve some blocks, prior to performing a sensitive operation that
195 * requires the vnode lock to be honored. If there is not enough space, give
196 * up the vnode lock temporarily and wait for the space to become available.
197 *
198 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
199 *
200 * XXX YAMT - it isn't safe to unlock vp here
201 * because the node might be modified while we sleep.
202 * (eg. cached states like i_offset might be stale,
203 * the vnode might be truncated, etc..)
204 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
205 * or rearrange vnodeop interface to leave vnode locking to file system
206 * specific code so that each file systems can have their own vnode locking and
207 * vnode re-using strategies.
208 */
209 static int
210 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
211 struct vnode *vp2, int fsb)
212 {
213 CLEANERINFO *cip;
214 struct buf *bp;
215 int error, slept;
216
217 ASSERT_MAYBE_SEGLOCK(fs);
218 slept = 0;
219 mutex_enter(&lfs_lock);
220 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
221 mutex_exit(&lfs_lock);
222 #if 0
223 /*
224 * XXX ideally, we should unlock vnodes here
225 * because we might sleep very long time.
226 */
227 VOP_UNLOCK(vp);
228 if (vp2 != NULL) {
229 VOP_UNLOCK(vp2);
230 }
231 #else
232 /*
233 * XXX since we'll sleep for cleaner with vnode lock holding,
234 * deadlock will occur if cleaner tries to lock the vnode.
235 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
236 */
237 #endif
238
239 if (!slept) {
240 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
241 " est_bfree = %d)\n",
242 fsb + fs->lfs_ravail + fs->lfs_favail,
243 fs->lfs_bfree, LFS_EST_BFREE(fs)));
244 }
245 ++slept;
246
247 /* Wake up the cleaner */
248 LFS_CLEANERINFO(cip, fs, bp);
249 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
250 lfs_wakeup_cleaner(fs);
251
252 mutex_enter(&lfs_lock);
253 /* Cleaner might have run while we were reading, check again */
254 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
255 break;
256
257 error = mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
258 0, &lfs_lock);
259 #if 0
260 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
261 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
262 #endif
263 if (error) {
264 mutex_exit(&lfs_lock);
265 return error;
266 }
267 }
268 #ifdef DEBUG
269 if (slept) {
270 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
271 }
272 #endif
273 fs->lfs_ravail += fsb;
274 mutex_exit(&lfs_lock);
275
276 return 0;
277 }
278
279 #ifdef DIAGNOSTIC
280 int lfs_rescount;
281 int lfs_rescountdirop;
282 #endif
283
284 int
285 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
286 {
287 int error;
288 int cantwait;
289
290 ASSERT_MAYBE_SEGLOCK(fs);
291 if (vp2) {
292 /* Make sure we're not in the process of reclaiming vp2 */
293 mutex_enter(&lfs_lock);
294 while(fs->lfs_flags & LFS_UNDIROP) {
295 mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
296 &lfs_lock);
297 }
298 mutex_exit(&lfs_lock);
299 }
300
301 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
302 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
303 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
304 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
305
306 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
307 #ifdef DIAGNOSTIC
308 if (cantwait) {
309 if (fsb > 0)
310 lfs_rescountdirop++;
311 else if (fsb < 0)
312 lfs_rescountdirop--;
313 if (lfs_rescountdirop < 0)
314 panic("lfs_rescountdirop");
315 }
316 else {
317 if (fsb > 0)
318 lfs_rescount++;
319 else if (fsb < 0)
320 lfs_rescount--;
321 if (lfs_rescount < 0)
322 panic("lfs_rescount");
323 }
324 #endif
325 if (cantwait)
326 return 0;
327
328 /*
329 * XXX
330 * vref vnodes here so that cleaner doesn't try to reuse them.
331 * (see XXX comment in lfs_reserveavail)
332 */
333 vhold(vp);
334 if (vp2 != NULL) {
335 vhold(vp2);
336 }
337
338 error = lfs_reserveavail(fs, vp, vp2, fsb);
339 if (error)
340 goto done;
341
342 /*
343 * XXX just a guess. should be more precise.
344 */
345 error = lfs_reservebuf(fs, vp, vp2, fsb, fsbtob(fs, fsb));
346 if (error)
347 lfs_reserveavail(fs, vp, vp2, -fsb);
348
349 done:
350 holdrele(vp);
351 if (vp2 != NULL) {
352 holdrele(vp2);
353 }
354
355 return error;
356 }
357
358 int
359 lfs_bwrite(void *v)
360 {
361 struct vop_bwrite_args /* {
362 struct vnode *a_vp;
363 struct buf *a_bp;
364 } */ *ap = v;
365 struct buf *bp = ap->a_bp;
366
367 #ifdef DIAGNOSTIC
368 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
369 panic("bawrite LFS buffer");
370 }
371 #endif /* DIAGNOSTIC */
372 return lfs_bwrite_ext(bp, 0);
373 }
374
375 /*
376 * Determine if there is enough room currently available to write fsb
377 * blocks. We need enough blocks for the new blocks, the current
378 * inode blocks (including potentially the ifile inode), a summary block,
379 * and the segment usage table, plus an ifile block.
380 */
381 int
382 lfs_fits(struct lfs *fs, int fsb)
383 {
384 int needed;
385
386 ASSERT_NO_SEGLOCK(fs);
387 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
388 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
389 1) << (fs->lfs_bshift - fs->lfs_ffshift));
390
391 if (needed >= fs->lfs_avail) {
392 #ifdef DEBUG
393 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
394 "needed = %ld, avail = %ld\n",
395 (long)fsb, (long)fs->lfs_uinodes, (long)needed,
396 (long)fs->lfs_avail));
397 #endif
398 return 0;
399 }
400 return 1;
401 }
402
403 int
404 lfs_availwait(struct lfs *fs, int fsb)
405 {
406 int error;
407 CLEANERINFO *cip;
408 struct buf *cbp;
409
410 ASSERT_NO_SEGLOCK(fs);
411 /* Push cleaner blocks through regardless */
412 mutex_enter(&lfs_lock);
413 if (LFS_SEGLOCK_HELD(fs) &&
414 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
415 mutex_exit(&lfs_lock);
416 return 0;
417 }
418 mutex_exit(&lfs_lock);
419
420 while (!lfs_fits(fs, fsb)) {
421 /*
422 * Out of space, need cleaner to run.
423 * Update the cleaner info, then wake it up.
424 * Note the cleanerinfo block is on the ifile
425 * so it CANT_WAIT.
426 */
427 LFS_CLEANERINFO(cip, fs, cbp);
428 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
429
430 #ifdef DEBUG
431 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
432 "waiting on cleaner\n"));
433 #endif
434
435 lfs_wakeup_cleaner(fs);
436 #ifdef DIAGNOSTIC
437 if (LFS_SEGLOCK_HELD(fs))
438 panic("lfs_availwait: deadlock");
439 #endif
440 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
441 if (error)
442 return (error);
443 }
444 return 0;
445 }
446
447 int
448 lfs_bwrite_ext(struct buf *bp, int flags)
449 {
450 struct lfs *fs;
451 struct inode *ip;
452 struct vnode *vp;
453 int fsb;
454
455 vp = bp->b_vp;
456 fs = VFSTOUFS(vp->v_mount)->um_lfs;
457
458 ASSERT_MAYBE_SEGLOCK(fs);
459 KASSERT(bp->b_cflags & BC_BUSY);
460 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
461 KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
462 != BO_DELWRI);
463
464 /*
465 * Don't write *any* blocks if we're mounted read-only, or
466 * if we are "already unmounted".
467 *
468 * In particular the cleaner can't write blocks either.
469 */
470 if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
471 bp->b_oflags &= ~BO_DELWRI;
472 bp->b_flags |= B_READ; /* XXX is this right? --ks */
473 bp->b_error = 0;
474 mutex_enter(&bufcache_lock);
475 LFS_UNLOCK_BUF(bp);
476 if (LFS_IS_MALLOC_BUF(bp))
477 bp->b_cflags &= ~BC_BUSY;
478 else
479 brelsel(bp, 0);
480 mutex_exit(&bufcache_lock);
481 return (fs->lfs_ronly ? EROFS : 0);
482 }
483
484 /*
485 * Set the delayed write flag and use reassignbuf to move the buffer
486 * from the clean list to the dirty one.
487 *
488 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
489 * the buffer onto the LOCKED free list. This is necessary, otherwise
490 * getnewbuf() would try to reclaim the buffers using bawrite, which
491 * isn't going to work.
492 *
493 * XXX we don't let meta-data writes run out of space because they can
494 * come from the segment writer. We need to make sure that there is
495 * enough space reserved so that there's room to write meta-data
496 * blocks.
497 */
498 if ((bp->b_flags & B_LOCKED) == 0) {
499 fsb = numfrags(fs, bp->b_bcount);
500
501 ip = VTOI(vp);
502 mutex_enter(&lfs_lock);
503 if (flags & BW_CLEAN) {
504 LFS_SET_UINO(ip, IN_CLEANING);
505 } else {
506 LFS_SET_UINO(ip, IN_MODIFIED);
507 }
508 mutex_exit(&lfs_lock);
509 fs->lfs_avail -= fsb;
510
511 mutex_enter(&bufcache_lock);
512 mutex_enter(vp->v_interlock);
513 bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
514 LFS_LOCK_BUF(bp);
515 bp->b_flags &= ~B_READ;
516 bp->b_error = 0;
517 reassignbuf(bp, bp->b_vp);
518 mutex_exit(vp->v_interlock);
519 } else {
520 mutex_enter(&bufcache_lock);
521 }
522
523 if (bp->b_iodone != NULL)
524 bp->b_cflags &= ~BC_BUSY;
525 else
526 brelsel(bp, 0);
527 mutex_exit(&bufcache_lock);
528
529 return (0);
530 }
531
532 /*
533 * Called and return with the lfs_lock held.
534 */
535 void
536 lfs_flush_fs(struct lfs *fs, int flags)
537 {
538 ASSERT_NO_SEGLOCK(fs);
539 KASSERT(mutex_owned(&lfs_lock));
540 if (fs->lfs_ronly)
541 return;
542
543 if (lfs_dostats)
544 ++lfs_stats.flush_invoked;
545
546 fs->lfs_pdflush = 0;
547 mutex_exit(&lfs_lock);
548 lfs_writer_enter(fs, "fldirop");
549 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
550 lfs_writer_leave(fs);
551 mutex_enter(&lfs_lock);
552 fs->lfs_favail = 0; /* XXX */
553 }
554
555 /*
556 * This routine initiates segment writes when LFS is consuming too many
557 * resources. Ideally the pageout daemon would be able to direct LFS
558 * more subtly.
559 * XXX We have one static count of locked buffers;
560 * XXX need to think more about the multiple filesystem case.
561 *
562 * Called and return with lfs_lock held.
563 * If fs != NULL, we hold the segment lock for fs.
564 */
565 void
566 lfs_flush(struct lfs *fs, int flags, int only_onefs)
567 {
568 extern u_int64_t locked_fakequeue_count;
569 struct mount *mp, *nmp;
570 struct lfs *tfs;
571
572 KASSERT(mutex_owned(&lfs_lock));
573 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
574
575 if (lfs_dostats)
576 ++lfs_stats.write_exceeded;
577 /* XXX should we include SEGM_CKP here? */
578 if (lfs_writing && !(flags & SEGM_SYNC)) {
579 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
580 return;
581 }
582 while (lfs_writing)
583 cv_wait(&lfs_writing_cv, &lfs_lock);
584 lfs_writing = 1;
585
586 mutex_exit(&lfs_lock);
587
588 if (only_onefs) {
589 KASSERT(fs != NULL);
590 if (vfs_busy(fs->lfs_ivnode->v_mount, NULL))
591 goto errout;
592 mutex_enter(&lfs_lock);
593 lfs_flush_fs(fs, flags);
594 mutex_exit(&lfs_lock);
595 vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
596 } else {
597 locked_fakequeue_count = 0;
598 mutex_enter(&mountlist_lock);
599 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
600 mp = nmp) {
601 if (vfs_busy(mp, &nmp)) {
602 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
603 continue;
604 }
605 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
606 sizeof(mp->mnt_stat.f_fstypename)) == 0) {
607 tfs = VFSTOUFS(mp)->um_lfs;
608 mutex_enter(&lfs_lock);
609 lfs_flush_fs(tfs, flags);
610 mutex_exit(&lfs_lock);
611 }
612 vfs_unbusy(mp, false, &nmp);
613 }
614 mutex_exit(&mountlist_lock);
615 }
616 LFS_DEBUG_COUNTLOCKED("flush");
617 wakeup(&lfs_subsys_pages);
618
619 errout:
620 mutex_enter(&lfs_lock);
621 KASSERT(lfs_writing);
622 lfs_writing = 0;
623 wakeup(&lfs_writing);
624 }
625
626 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
627 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
628
629 /*
630 * make sure that we don't have too many locked buffers.
631 * flush buffers if needed.
632 */
633 int
634 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
635 {
636 int error;
637 struct lfs *fs;
638 struct inode *ip;
639 extern pid_t lfs_writer_daemon;
640
641 error = 0;
642 ip = VTOI(vp);
643
644 /* If out of buffers, wait on writer */
645 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
646 if (ip->i_number == LFS_IFILE_INUM)
647 return 0;
648 /* If we're being called from inside a dirop, don't sleep */
649 if (ip->i_flag & IN_ADIROP)
650 return 0;
651
652 fs = ip->i_lfs;
653
654 ASSERT_NO_SEGLOCK(fs);
655
656 /*
657 * If we would flush below, but dirops are active, sleep.
658 * Note that a dirop cannot ever reach this code!
659 */
660 mutex_enter(&lfs_lock);
661 while (fs->lfs_dirops > 0 &&
662 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
663 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
664 lfs_subsys_pages > LFS_MAX_PAGES ||
665 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
666 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
667 {
668 ++fs->lfs_diropwait;
669 mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
670 &lfs_lock);
671 --fs->lfs_diropwait;
672 }
673
674 #ifdef DEBUG
675 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
676 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
677 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
678 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
679 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
680 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
681 if (lfs_subsys_pages > LFS_MAX_PAGES)
682 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
683 lfs_subsys_pages, LFS_MAX_PAGES));
684 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
685 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
686 fs->lfs_pages, lfs_fs_pagetrip));
687 if (lfs_dirvcount > LFS_MAX_DIROP)
688 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
689 lfs_dirvcount, LFS_MAX_DIROP));
690 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
691 DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
692 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
693 if (fs->lfs_diropwait > 0)
694 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
695 fs->lfs_diropwait));
696 #endif
697
698 /* If there are too many pending dirops, we have to flush them. */
699 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
700 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
701 mutex_exit(&lfs_lock);
702 lfs_flush_dirops(fs);
703 mutex_enter(&lfs_lock);
704 } else if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
705 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
706 lfs_subsys_pages > LFS_MAX_PAGES ||
707 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
708 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
709 lfs_flush(fs, flags, 0);
710 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
711 /*
712 * If we didn't flush the whole thing, some filesystems
713 * still might want to be flushed.
714 */
715 ++fs->lfs_pdflush;
716 wakeup(&lfs_writer_daemon);
717 }
718
719 while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS ||
720 locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES ||
721 lfs_subsys_pages > LFS_WAIT_PAGES ||
722 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
723 lfs_dirvcount > LFS_MAX_DIROP) {
724
725 if (lfs_dostats)
726 ++lfs_stats.wait_exceeded;
727 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
728 locked_queue_count, locked_queue_bytes));
729 ++locked_queue_waiters;
730 error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
731 hz * LFS_BUFWAIT);
732 --locked_queue_waiters;
733 if (error != EWOULDBLOCK)
734 break;
735
736 /*
737 * lfs_flush might not flush all the buffers, if some of the
738 * inodes were locked or if most of them were Ifile blocks
739 * and we weren't asked to checkpoint. Try flushing again
740 * to keep us from blocking indefinitely.
741 */
742 if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
743 locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
744 lfs_flush(fs, flags | SEGM_CKP, 0);
745 }
746 }
747 mutex_exit(&lfs_lock);
748 return (error);
749 }
750
751 /*
752 * Allocate a new buffer header.
753 */
754 struct buf *
755 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
756 {
757 struct buf *bp;
758 size_t nbytes;
759
760 ASSERT_MAYBE_SEGLOCK(fs);
761 nbytes = roundup(size, fsbtob(fs, 1));
762
763 bp = getiobuf(NULL, true);
764 if (nbytes) {
765 bp->b_data = lfs_malloc(fs, nbytes, type);
766 /* memset(bp->b_data, 0, nbytes); */
767 }
768 #ifdef DIAGNOSTIC
769 if (vp == NULL)
770 panic("vp is NULL in lfs_newbuf");
771 if (bp == NULL)
772 panic("bp is NULL after malloc in lfs_newbuf");
773 #endif
774
775 bp->b_bufsize = size;
776 bp->b_bcount = size;
777 bp->b_lblkno = daddr;
778 bp->b_blkno = daddr;
779 bp->b_error = 0;
780 bp->b_resid = 0;
781 bp->b_iodone = lfs_callback;
782 bp->b_cflags = BC_BUSY | BC_NOCACHE;
783 bp->b_private = fs;
784
785 mutex_enter(&bufcache_lock);
786 mutex_enter(vp->v_interlock);
787 bgetvp(vp, bp);
788 mutex_exit(vp->v_interlock);
789 mutex_exit(&bufcache_lock);
790
791 return (bp);
792 }
793
794 void
795 lfs_freebuf(struct lfs *fs, struct buf *bp)
796 {
797 struct vnode *vp;
798
799 if ((vp = bp->b_vp) != NULL) {
800 mutex_enter(&bufcache_lock);
801 mutex_enter(vp->v_interlock);
802 brelvp(bp);
803 mutex_exit(vp->v_interlock);
804 mutex_exit(&bufcache_lock);
805 }
806 if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
807 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
808 bp->b_data = NULL;
809 }
810 putiobuf(bp);
811 }
812
813 /*
814 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
815 * Don't count malloced buffers, since they don't detract from the total.
816 */
817 void
818 lfs_countlocked(int *count, long *bytes, const char *msg)
819 {
820 struct buf *bp;
821 int n = 0;
822 long int size = 0L;
823
824 mutex_enter(&bufcache_lock);
825 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
826 KASSERT(bp->b_iodone == NULL);
827 n++;
828 size += bp->b_bufsize;
829 #ifdef DIAGNOSTIC
830 if (n > nbuf)
831 panic("lfs_countlocked: this can't happen: more"
832 " buffers locked than exist");
833 #endif
834 }
835 /*
836 * Theoretically this function never really does anything.
837 * Give a warning if we have to fix the accounting.
838 */
839 if (n != *count) {
840 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
841 " from %d to %d\n", msg, *count, n));
842 }
843 if (size != *bytes) {
844 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
845 " from %ld to %ld\n", msg, *bytes, size));
846 }
847 *count = n;
848 *bytes = size;
849 mutex_exit(&bufcache_lock);
850 return;
851 }
852
853 int
854 lfs_wait_pages(void)
855 {
856 int active, inactive;
857
858 uvm_estimatepageable(&active, &inactive);
859 return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
860 }
861
862 int
863 lfs_max_pages(void)
864 {
865 int active, inactive;
866
867 uvm_estimatepageable(&active, &inactive);
868 return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
869 }
870