lfs_bio.c revision 1.81.2.4 1 /* $NetBSD: lfs_bio.c,v 1.81.2.4 2006/05/20 22:42:50 riz Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.81.2.4 2006/05/20 22:42:50 riz Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufsmount.h>
83 #include <ufs/ufs/ufs_extern.h>
84
85 #include <ufs/lfs/lfs.h>
86 #include <ufs/lfs/lfs_extern.h>
87
88 #include <uvm/uvm.h>
89
90 /* Macros to clear/set/test flags. */
91 # define SET(t, f) (t) |= (f)
92 # define CLR(t, f) (t) &= ~(f)
93 # define ISSET(t, f) ((t) & (f))
94
95 /*
96 * LFS block write function.
97 *
98 * XXX
99 * No write cost accounting is done.
100 * This is almost certainly wrong for synchronous operations and NFS.
101 *
102 * protected by lfs_subsys_lock.
103 */
104 int locked_queue_count = 0; /* Count of locked-down buffers. */
105 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
106 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
107 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
108 int lfs_writing = 0; /* Set if already kicked off a writer
109 because of buffer space */
110 /* Lock for aboves */
111 struct simplelock lfs_subsys_lock = SIMPLELOCK_INITIALIZER;
112
113 extern int lfs_dostats;
114
115 /*
116 * reserved number/bytes of locked buffers
117 */
118 int locked_queue_rcount = 0;
119 long locked_queue_rbytes = 0L;
120
121 int lfs_fits_buf(struct lfs *, int, int);
122 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
123 int, int);
124 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
125
126 int
127 lfs_fits_buf(struct lfs *fs, int n, int bytes)
128 {
129 int count_fit, bytes_fit;
130
131 ASSERT_NO_SEGLOCK(fs);
132 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
133
134 count_fit =
135 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
136 bytes_fit =
137 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
138
139 #ifdef DEBUG
140 if (!count_fit) {
141 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
142 locked_queue_count, locked_queue_rcount,
143 n, LFS_WAIT_BUFS));
144 }
145 if (!bytes_fit) {
146 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
147 locked_queue_bytes, locked_queue_rbytes,
148 bytes, LFS_WAIT_BYTES));
149 }
150 #endif /* DEBUG */
151
152 return (count_fit && bytes_fit);
153 }
154
155 /* ARGSUSED */
156 int
157 lfs_reservebuf(struct lfs *fs, struct vnode *vp, struct vnode *vp2,
158 int n, int bytes)
159 {
160 ASSERT_MAYBE_SEGLOCK(fs);
161 KASSERT(locked_queue_rcount >= 0);
162 KASSERT(locked_queue_rbytes >= 0);
163
164 simple_lock(&lfs_subsys_lock);
165 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
166 int error;
167
168 lfs_flush(fs, 0, 0);
169
170 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
171 "lfsresbuf", hz * LFS_BUFWAIT, &lfs_subsys_lock);
172 if (error && error != EWOULDBLOCK) {
173 simple_unlock(&lfs_subsys_lock);
174 return error;
175 }
176 }
177
178 locked_queue_rcount += n;
179 locked_queue_rbytes += bytes;
180
181 simple_unlock(&lfs_subsys_lock);
182
183 KASSERT(locked_queue_rcount >= 0);
184 KASSERT(locked_queue_rbytes >= 0);
185
186 return 0;
187 }
188
189 /*
190 * Try to reserve some blocks, prior to performing a sensitive operation that
191 * requires the vnode lock to be honored. If there is not enough space, give
192 * up the vnode lock temporarily and wait for the space to become available.
193 *
194 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
195 *
196 * XXX YAMT - it isn't safe to unlock vp here
197 * because the node might be modified while we sleep.
198 * (eg. cached states like i_offset might be stale,
199 * the vnode might be truncated, etc..)
200 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
201 * or rearrange vnodeop interface to leave vnode locking to file system
202 * specific code so that each file systems can have their own vnode locking and
203 * vnode re-using strategies.
204 */
205 int
206 lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
207 {
208 CLEANERINFO *cip;
209 struct buf *bp;
210 int error, slept;
211
212 ASSERT_MAYBE_SEGLOCK(fs);
213 slept = 0;
214 simple_lock(&fs->lfs_interlock);
215 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
216 simple_unlock(&fs->lfs_interlock);
217 #if 0
218 /*
219 * XXX ideally, we should unlock vnodes here
220 * because we might sleep very long time.
221 */
222 VOP_UNLOCK(vp, 0);
223 if (vp2 != NULL) {
224 VOP_UNLOCK(vp2, 0);
225 }
226 #else
227 /*
228 * XXX since we'll sleep for cleaner with vnode lock holding,
229 * deadlock will occur if cleaner tries to lock the vnode.
230 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
231 */
232 #endif
233
234 if (!slept) {
235 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
236 " est_bfree = %d)\n",
237 fsb + fs->lfs_ravail + fs->lfs_favail,
238 fs->lfs_bfree, LFS_EST_BFREE(fs)));
239 }
240 ++slept;
241
242 /* Wake up the cleaner */
243 LFS_CLEANERINFO(cip, fs, bp);
244 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
245 wakeup(&lfs_allclean_wakeup);
246 wakeup(&fs->lfs_nextseg);
247
248 simple_lock(&fs->lfs_interlock);
249 /* Cleaner might have run while we were reading, check again */
250 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
251 break;
252
253 error = ltsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
254 0, &fs->lfs_interlock);
255 #if 0
256 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
257 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
258 #endif
259 if (error) {
260 return error;
261 simple_unlock(&fs->lfs_interlock);
262 }
263 }
264 #ifdef DEBUG
265 if (slept) {
266 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
267 }
268 #endif
269 fs->lfs_ravail += fsb;
270 simple_unlock(&fs->lfs_interlock);
271
272 return 0;
273 }
274
275 #ifdef DIAGNOSTIC
276 int lfs_rescount;
277 int lfs_rescountdirop;
278 #endif
279
280 int
281 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
282 {
283 int error;
284 int cantwait;
285
286 ASSERT_MAYBE_SEGLOCK(fs);
287 if (vp2) {
288 /* Make sure we're not in the process of reclaiming vp2 */
289 simple_lock(&fs->lfs_interlock);
290 while(fs->lfs_flags & LFS_UNDIROP) {
291 ltsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
292 &fs->lfs_interlock);
293 }
294 simple_unlock(&fs->lfs_interlock);
295 }
296
297 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
298 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
299 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
300 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
301
302 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
303 #ifdef DIAGNOSTIC
304 if (cantwait) {
305 if (fsb > 0)
306 lfs_rescountdirop++;
307 else if (fsb < 0)
308 lfs_rescountdirop--;
309 if (lfs_rescountdirop < 0)
310 panic("lfs_rescountdirop");
311 }
312 else {
313 if (fsb > 0)
314 lfs_rescount++;
315 else if (fsb < 0)
316 lfs_rescount--;
317 if (lfs_rescount < 0)
318 panic("lfs_rescount");
319 }
320 #endif
321 if (cantwait)
322 return 0;
323
324 /*
325 * XXX
326 * vref vnodes here so that cleaner doesn't try to reuse them.
327 * (see XXX comment in lfs_reserveavail)
328 */
329 lfs_vref(vp);
330 if (vp2 != NULL) {
331 lfs_vref(vp2);
332 }
333
334 error = lfs_reserveavail(fs, vp, vp2, fsb);
335 if (error)
336 goto done;
337
338 /*
339 * XXX just a guess. should be more precise.
340 */
341 error = lfs_reservebuf(fs, vp, vp2,
342 fragstoblks(fs, fsb), fsbtob(fs, fsb));
343 if (error)
344 lfs_reserveavail(fs, vp, vp2, -fsb);
345
346 done:
347 lfs_vunref(vp);
348 if (vp2 != NULL) {
349 lfs_vunref(vp2);
350 }
351
352 return error;
353 }
354
355 int
356 lfs_bwrite(void *v)
357 {
358 struct vop_bwrite_args /* {
359 struct buf *a_bp;
360 } */ *ap = v;
361 struct buf *bp = ap->a_bp;
362
363 #ifdef DIAGNOSTIC
364 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
365 panic("bawrite LFS buffer");
366 }
367 #endif /* DIAGNOSTIC */
368 return lfs_bwrite_ext(bp, 0);
369 }
370
371 /*
372 * Determine if there is enough room currently available to write fsb
373 * blocks. We need enough blocks for the new blocks, the current
374 * inode blocks (including potentially the ifile inode), a summary block,
375 * and the segment usage table, plus an ifile block.
376 */
377 int
378 lfs_fits(struct lfs *fs, int fsb)
379 {
380 int needed;
381
382 ASSERT_NO_SEGLOCK(fs);
383 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
384 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
385 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
386
387 if (needed >= fs->lfs_avail) {
388 #ifdef DEBUG
389 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
390 "needed = %ld, avail = %ld\n",
391 (long)fsb, (long)fs->lfs_uinodes, (long)needed,
392 (long)fs->lfs_avail));
393 #endif
394 return 0;
395 }
396 return 1;
397 }
398
399 int
400 lfs_availwait(struct lfs *fs, int fsb)
401 {
402 int error;
403 CLEANERINFO *cip;
404 struct buf *cbp;
405
406 ASSERT_NO_SEGLOCK(fs);
407 /* Push cleaner blocks through regardless */
408 simple_lock(&fs->lfs_interlock);
409 if (LFS_SEGLOCK_HELD(fs) &&
410 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
411 simple_unlock(&fs->lfs_interlock);
412 return 0;
413 }
414 simple_unlock(&fs->lfs_interlock);
415
416 while (!lfs_fits(fs, fsb)) {
417 /*
418 * Out of space, need cleaner to run.
419 * Update the cleaner info, then wake it up.
420 * Note the cleanerinfo block is on the ifile
421 * so it CANT_WAIT.
422 */
423 LFS_CLEANERINFO(cip, fs, cbp);
424 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
425
426 #ifdef DEBUG
427 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
428 "waiting on cleaner\n"));
429 #endif
430
431 wakeup(&lfs_allclean_wakeup);
432 wakeup(&fs->lfs_nextseg);
433 #ifdef DIAGNOSTIC
434 if (LFS_SEGLOCK_HELD(fs))
435 panic("lfs_availwait: deadlock");
436 #endif
437 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
438 if (error)
439 return (error);
440 }
441 return 0;
442 }
443
444 int
445 lfs_bwrite_ext(struct buf *bp, int flags)
446 {
447 struct lfs *fs;
448 struct inode *ip;
449 int fsb, s;
450
451 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
452
453 ASSERT_MAYBE_SEGLOCK(fs);
454 KASSERT(bp->b_flags & B_BUSY);
455 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
456 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
457 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_LOCKED);
458
459 /*
460 * Don't write *any* blocks if we're mounted read-only, or
461 * if we are "already unmounted".
462 *
463 * In particular the cleaner can't write blocks either.
464 */
465 if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
466 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
467 LFS_UNLOCK_BUF(bp);
468 if (LFS_IS_MALLOC_BUF(bp))
469 bp->b_flags &= ~B_BUSY;
470 else
471 brelse(bp);
472 return (fs->lfs_ronly ? EROFS : 0);
473 }
474
475 /*
476 * Set the delayed write flag and use reassignbuf to move the buffer
477 * from the clean list to the dirty one.
478 *
479 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
480 * the buffer onto the LOCKED free list. This is necessary, otherwise
481 * getnewbuf() would try to reclaim the buffers using bawrite, which
482 * isn't going to work.
483 *
484 * XXX we don't let meta-data writes run out of space because they can
485 * come from the segment writer. We need to make sure that there is
486 * enough space reserved so that there's room to write meta-data
487 * blocks.
488 */
489 if (!(bp->b_flags & B_LOCKED)) {
490 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
491
492 ip = VTOI(bp->b_vp);
493 if (flags & BW_CLEAN) {
494 LFS_SET_UINO(ip, IN_CLEANING);
495 } else {
496 LFS_SET_UINO(ip, IN_MODIFIED);
497 }
498 fs->lfs_avail -= fsb;
499 bp->b_flags |= B_DELWRI;
500
501 LFS_LOCK_BUF(bp);
502 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
503 s = splbio();
504 reassignbuf(bp, bp->b_vp);
505 splx(s);
506 }
507
508 if (bp->b_flags & B_CALL)
509 bp->b_flags &= ~B_BUSY;
510 else
511 brelse(bp);
512
513 return (0);
514 }
515
516 /*
517 * Called and return with the lfs_interlock held, but the lfs_subsys_lock
518 * not held.
519 */
520 void
521 lfs_flush_fs(struct lfs *fs, int flags)
522 {
523 ASSERT_NO_SEGLOCK(fs);
524 LOCK_ASSERT(simple_lock_held(&fs->lfs_interlock));
525 LOCK_ASSERT(!simple_lock_held(&lfs_subsys_lock));
526 if (fs->lfs_ronly)
527 return;
528
529 simple_lock(&lfs_subsys_lock);
530 if (lfs_dostats)
531 ++lfs_stats.flush_invoked;
532 simple_unlock(&lfs_subsys_lock);
533
534 simple_unlock(&fs->lfs_interlock);
535 lfs_writer_enter(fs, "fldirop");
536 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
537 lfs_writer_leave(fs);
538 simple_lock(&fs->lfs_interlock);
539 fs->lfs_favail = 0; /* XXX */
540 }
541
542 /*
543 * This routine initiates segment writes when LFS is consuming too many
544 * resources. Ideally the pageout daemon would be able to direct LFS
545 * more subtly.
546 * XXX We have one static count of locked buffers;
547 * XXX need to think more about the multiple filesystem case.
548 *
549 * Called and return with lfs_subsys_lock held.
550 * If fs != NULL, we hold the segment lock for fs.
551 */
552 void
553 lfs_flush(struct lfs *fs, int flags, int only_onefs)
554 {
555 extern u_int64_t locked_fakequeue_count;
556 struct mount *mp, *nmp;
557 struct lfs *tfs;
558
559 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
560 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
561
562 if (lfs_dostats)
563 ++lfs_stats.write_exceeded;
564 /* XXX should we include SEGM_CKP here? */
565 if (lfs_writing && !(flags & SEGM_SYNC)) {
566 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
567 return;
568 }
569 while (lfs_writing)
570 ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
571 &lfs_subsys_lock);
572 lfs_writing = 1;
573
574 simple_unlock(&lfs_subsys_lock);
575
576 if (only_onefs) {
577 KASSERT(fs != NULL);
578 if (vfs_busy(fs->lfs_ivnode->v_mount, LK_NOWAIT,
579 &mountlist_slock))
580 goto errout;
581 simple_lock(&fs->lfs_interlock);
582 lfs_flush_fs(fs, flags);
583 simple_unlock(&fs->lfs_interlock);
584 vfs_unbusy(fs->lfs_ivnode->v_mount);
585 } else {
586 locked_fakequeue_count = 0;
587 simple_lock(&mountlist_slock);
588 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
589 mp = nmp) {
590 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
591 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
592 nmp = CIRCLEQ_NEXT(mp, mnt_list);
593 continue;
594 }
595 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
596 MFSNAMELEN) == 0) {
597 tfs = VFSTOUFS(mp)->um_lfs;
598 simple_lock(&tfs->lfs_interlock);
599 lfs_flush_fs(tfs, flags);
600 simple_unlock(&tfs->lfs_interlock);
601 }
602 simple_lock(&mountlist_slock);
603 nmp = CIRCLEQ_NEXT(mp, mnt_list);
604 vfs_unbusy(mp);
605 }
606 simple_unlock(&mountlist_slock);
607 }
608 LFS_DEBUG_COUNTLOCKED("flush");
609 wakeup(&lfs_subsys_pages);
610
611 errout:
612 simple_lock(&lfs_subsys_lock);
613 KASSERT(lfs_writing);
614 lfs_writing = 0;
615 wakeup(&lfs_writing);
616 }
617
618 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
619 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
620
621 /*
622 * make sure that we don't have too many locked buffers.
623 * flush buffers if needed.
624 */
625 int
626 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
627 {
628 int error;
629 struct lfs *fs;
630 struct inode *ip;
631 extern pid_t lfs_writer_daemon;
632
633 error = 0;
634 ip = VTOI(vp);
635
636 /* If out of buffers, wait on writer */
637 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
638 if (ip->i_number == LFS_IFILE_INUM)
639 return 0;
640 /* If we're being called from inside a dirop, don't sleep */
641 if (ip->i_flag & IN_ADIROP)
642 return 0;
643
644 fs = ip->i_lfs;
645
646 ASSERT_NO_SEGLOCK(fs);
647 LOCK_ASSERT(!simple_lock_held(&fs->lfs_interlock));
648
649 /*
650 * If we would flush below, but dirops are active, sleep.
651 * Note that a dirop cannot ever reach this code!
652 */
653 simple_lock(&fs->lfs_interlock);
654 simple_lock(&lfs_subsys_lock);
655 while (fs->lfs_dirops > 0 &&
656 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
657 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
658 lfs_subsys_pages > LFS_MAX_PAGES ||
659 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
660 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
661 {
662 simple_unlock(&lfs_subsys_lock);
663 ++fs->lfs_diropwait;
664 ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
665 &fs->lfs_interlock);
666 --fs->lfs_diropwait;
667 simple_lock(&lfs_subsys_lock);
668 }
669
670 #ifdef DEBUG
671 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
672 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
673 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
674 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
675 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
676 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
677 if (lfs_subsys_pages > LFS_MAX_PAGES)
678 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
679 lfs_subsys_pages, LFS_MAX_PAGES));
680 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
681 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
682 fs->lfs_pages, lfs_fs_pagetrip));
683 if (lfs_dirvcount > LFS_MAX_DIROP)
684 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
685 lfs_dirvcount, LFS_MAX_DIROP));
686 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
687 DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
688 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
689 if (fs->lfs_diropwait > 0)
690 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
691 fs->lfs_diropwait));
692 #endif
693
694 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
695 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
696 lfs_subsys_pages > LFS_MAX_PAGES ||
697 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
698 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
699 simple_unlock(&fs->lfs_interlock);
700 lfs_flush(fs, flags, 0);
701 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
702 /*
703 * If we didn't flush the whole thing, some filesystems
704 * still might want to be flushed.
705 */
706 ++fs->lfs_pdflush;
707 wakeup(&lfs_writer_daemon);
708 simple_unlock(&fs->lfs_interlock);
709 } else
710 simple_unlock(&fs->lfs_interlock);
711
712 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
713 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
714 lfs_subsys_pages > LFS_WAIT_PAGES ||
715 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
716 lfs_dirvcount > LFS_MAX_DIROP) {
717
718 if (lfs_dostats)
719 ++lfs_stats.wait_exceeded;
720 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
721 locked_queue_count, locked_queue_bytes));
722 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
723 "buffers", hz * LFS_BUFWAIT, &lfs_subsys_lock);
724 if (error != EWOULDBLOCK)
725 break;
726
727 /*
728 * lfs_flush might not flush all the buffers, if some of the
729 * inodes were locked or if most of them were Ifile blocks
730 * and we weren't asked to checkpoint. Try flushing again
731 * to keep us from blocking indefinitely.
732 */
733 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
734 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
735 lfs_flush(fs, flags | SEGM_CKP, 0);
736 }
737 }
738 simple_unlock(&lfs_subsys_lock);
739 return (error);
740 }
741
742 /*
743 * Allocate a new buffer header.
744 */
745 struct buf *
746 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
747 {
748 struct buf *bp;
749 size_t nbytes;
750 int s;
751
752 ASSERT_MAYBE_SEGLOCK(fs);
753 nbytes = roundup(size, fsbtob(fs, 1));
754
755 s = splbio();
756 bp = pool_get(&bufpool, PR_WAITOK);
757 splx(s);
758 memset(bp, 0, sizeof(struct buf));
759 BUF_INIT(bp);
760 if (nbytes) {
761 bp->b_data = lfs_malloc(fs, nbytes, type);
762 /* memset(bp->b_data, 0, nbytes); */
763 }
764 #ifdef DIAGNOSTIC
765 if (vp == NULL)
766 panic("vp is NULL in lfs_newbuf");
767 if (bp == NULL)
768 panic("bp is NULL after malloc in lfs_newbuf");
769 #endif
770 s = splbio();
771 bgetvp(vp, bp);
772 splx(s);
773
774 bp->b_bufsize = size;
775 bp->b_bcount = size;
776 bp->b_lblkno = daddr;
777 bp->b_blkno = daddr;
778 bp->b_error = 0;
779 bp->b_resid = 0;
780 bp->b_iodone = lfs_callback;
781 bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE;
782 bp->b_private = fs;
783
784 return (bp);
785 }
786
787 void
788 lfs_freebuf(struct lfs *fs, struct buf *bp)
789 {
790 int s;
791
792 s = splbio();
793 if (bp->b_vp)
794 brelvp(bp);
795 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
796 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
797 bp->b_data = NULL;
798 }
799 pool_put(&bufpool, bp);
800 splx(s);
801 }
802
803 /*
804 * Definitions for the buffer free lists.
805 */
806 #define BQUEUES 4 /* number of free buffer queues */
807
808 #define BQ_LOCKED 0 /* super-blocks &c */
809 #define BQ_LRU 1 /* lru, useful buffers */
810 #define BQ_AGE 2 /* rubbish */
811 #define BQ_EMPTY 3 /* buffer headers with no memory */
812
813 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
814 extern struct simplelock bqueue_slock;
815
816 /*
817 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
818 * Don't count malloced buffers, since they don't detract from the total.
819 */
820 void
821 lfs_countlocked(int *count, long *bytes, const char *msg)
822 {
823 struct buf *bp;
824 int n = 0;
825 long int size = 0L;
826 int s;
827
828 s = splbio();
829 simple_lock(&bqueue_slock);
830 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist) {
831 KASSERT(!(bp->b_flags & B_CALL));
832 n++;
833 size += bp->b_bufsize;
834 #ifdef DIAGNOSTIC
835 if (n > nbuf)
836 panic("lfs_countlocked: this can't happen: more"
837 " buffers locked than exist");
838 #endif
839 }
840 /*
841 * Theoretically this function never really does anything.
842 * Give a warning if we have to fix the accounting.
843 */
844 if (n != *count)
845 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
846 " from %d to %d\n", msg, *count, n));
847 if (size != *bytes)
848 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
849 " from %ld to %ld\n", msg, *bytes, size));
850 *count = n;
851 *bytes = size;
852 simple_unlock(&bqueue_slock);
853 splx(s);
854 return;
855 }
856