lfs_bio.c revision 1.89.6.2 1 /* $NetBSD: lfs_bio.c,v 1.89.6.2 2006/03/13 09:07:43 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.89.6.2 2006/03/13 09:07:43 yamt Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80
81 #include <ufs/ufs/inode.h>
82 #include <ufs/ufs/ufsmount.h>
83 #include <ufs/ufs/ufs_extern.h>
84
85 #include <ufs/lfs/lfs.h>
86 #include <ufs/lfs/lfs_extern.h>
87
88 #include <uvm/uvm.h>
89
90 /*
91 * LFS block write function.
92 *
93 * XXX
94 * No write cost accounting is done.
95 * This is almost certainly wrong for synchronous operations and NFS.
96 *
97 * protected by lfs_subsys_lock.
98 */
99 int locked_queue_count = 0; /* Count of locked-down buffers. */
100 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
101 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
102 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
103 int lfs_writing = 0; /* Set if already kicked off a writer
104 because of buffer space */
105 /* Lock for aboves */
106 struct simplelock lfs_subsys_lock = SIMPLELOCK_INITIALIZER;
107
108 extern int lfs_dostats;
109
110 /*
111 * reserved number/bytes of locked buffers
112 */
113 int locked_queue_rcount = 0;
114 long locked_queue_rbytes = 0L;
115
116 int lfs_fits_buf(struct lfs *, int, int);
117 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
118 int, int);
119 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
120
121 int
122 lfs_fits_buf(struct lfs *fs, int n, int bytes)
123 {
124 int count_fit, bytes_fit;
125
126 ASSERT_NO_SEGLOCK(fs);
127 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
128
129 count_fit =
130 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
131 bytes_fit =
132 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
133
134 #ifdef DEBUG
135 if (!count_fit) {
136 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
137 locked_queue_count, locked_queue_rcount,
138 n, LFS_WAIT_BUFS));
139 }
140 if (!bytes_fit) {
141 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
142 locked_queue_bytes, locked_queue_rbytes,
143 bytes, LFS_WAIT_BYTES));
144 }
145 #endif /* DEBUG */
146
147 return (count_fit && bytes_fit);
148 }
149
150 /* ARGSUSED */
151 int
152 lfs_reservebuf(struct lfs *fs, struct vnode *vp, struct vnode *vp2,
153 int n, int bytes)
154 {
155 ASSERT_MAYBE_SEGLOCK(fs);
156 KASSERT(locked_queue_rcount >= 0);
157 KASSERT(locked_queue_rbytes >= 0);
158
159 simple_lock(&lfs_subsys_lock);
160 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
161 int error;
162
163 lfs_flush(fs, 0, 0);
164
165 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
166 "lfsresbuf", hz * LFS_BUFWAIT, &lfs_subsys_lock);
167 if (error && error != EWOULDBLOCK) {
168 simple_unlock(&lfs_subsys_lock);
169 return error;
170 }
171 }
172
173 locked_queue_rcount += n;
174 locked_queue_rbytes += bytes;
175
176 simple_unlock(&lfs_subsys_lock);
177
178 KASSERT(locked_queue_rcount >= 0);
179 KASSERT(locked_queue_rbytes >= 0);
180
181 return 0;
182 }
183
184 /*
185 * Try to reserve some blocks, prior to performing a sensitive operation that
186 * requires the vnode lock to be honored. If there is not enough space, give
187 * up the vnode lock temporarily and wait for the space to become available.
188 *
189 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
190 *
191 * XXX YAMT - it isn't safe to unlock vp here
192 * because the node might be modified while we sleep.
193 * (eg. cached states like i_offset might be stale,
194 * the vnode might be truncated, etc..)
195 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
196 * or rearrange vnodeop interface to leave vnode locking to file system
197 * specific code so that each file systems can have their own vnode locking and
198 * vnode re-using strategies.
199 */
200 int
201 lfs_reserveavail(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
202 {
203 CLEANERINFO *cip;
204 struct buf *bp;
205 int error, slept;
206
207 ASSERT_MAYBE_SEGLOCK(fs);
208 slept = 0;
209 simple_lock(&fs->lfs_interlock);
210 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
211 simple_unlock(&fs->lfs_interlock);
212 #if 0
213 /*
214 * XXX ideally, we should unlock vnodes here
215 * because we might sleep very long time.
216 */
217 VOP_UNLOCK(vp, 0);
218 if (vp2 != NULL) {
219 VOP_UNLOCK(vp2, 0);
220 }
221 #else
222 /*
223 * XXX since we'll sleep for cleaner with vnode lock holding,
224 * deadlock will occur if cleaner tries to lock the vnode.
225 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
226 */
227 #endif
228
229 if (!slept) {
230 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
231 " est_bfree = %d)\n",
232 fsb + fs->lfs_ravail + fs->lfs_favail,
233 fs->lfs_bfree, LFS_EST_BFREE(fs)));
234 }
235 ++slept;
236
237 /* Wake up the cleaner */
238 LFS_CLEANERINFO(cip, fs, bp);
239 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
240 wakeup(&lfs_allclean_wakeup);
241 wakeup(&fs->lfs_nextseg);
242
243 simple_lock(&fs->lfs_interlock);
244 /* Cleaner might have run while we were reading, check again */
245 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
246 break;
247
248 error = ltsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
249 0, &fs->lfs_interlock);
250 #if 0
251 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
252 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
253 #endif
254 if (error)
255 return error;
256 }
257 #ifdef DEBUG
258 if (slept) {
259 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
260 }
261 #endif
262 fs->lfs_ravail += fsb;
263 simple_unlock(&fs->lfs_interlock);
264
265 return 0;
266 }
267
268 #ifdef DIAGNOSTIC
269 int lfs_rescount;
270 int lfs_rescountdirop;
271 #endif
272
273 int
274 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
275 {
276 int error;
277 int cantwait;
278
279 ASSERT_MAYBE_SEGLOCK(fs);
280 if (vp2) {
281 /* Make sure we're not in the process of reclaiming vp2 */
282 simple_lock(&fs->lfs_interlock);
283 while(fs->lfs_flags & LFS_UNDIROP) {
284 ltsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
285 &fs->lfs_interlock);
286 }
287 simple_unlock(&fs->lfs_interlock);
288 }
289
290 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
291 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
292 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
293 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
294
295 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
296 #ifdef DIAGNOSTIC
297 if (cantwait) {
298 if (fsb > 0)
299 lfs_rescountdirop++;
300 else if (fsb < 0)
301 lfs_rescountdirop--;
302 if (lfs_rescountdirop < 0)
303 panic("lfs_rescountdirop");
304 }
305 else {
306 if (fsb > 0)
307 lfs_rescount++;
308 else if (fsb < 0)
309 lfs_rescount--;
310 if (lfs_rescount < 0)
311 panic("lfs_rescount");
312 }
313 #endif
314 if (cantwait)
315 return 0;
316
317 /*
318 * XXX
319 * vref vnodes here so that cleaner doesn't try to reuse them.
320 * (see XXX comment in lfs_reserveavail)
321 */
322 lfs_vref(vp);
323 if (vp2 != NULL) {
324 lfs_vref(vp2);
325 }
326
327 error = lfs_reserveavail(fs, vp, vp2, fsb);
328 if (error)
329 goto done;
330
331 /*
332 * XXX just a guess. should be more precise.
333 */
334 error = lfs_reservebuf(fs, vp, vp2,
335 fragstoblks(fs, fsb), fsbtob(fs, fsb));
336 if (error)
337 lfs_reserveavail(fs, vp, vp2, -fsb);
338
339 done:
340 lfs_vunref(vp);
341 if (vp2 != NULL) {
342 lfs_vunref(vp2);
343 }
344
345 return error;
346 }
347
348 int
349 lfs_bwrite(void *v)
350 {
351 struct vop_bwrite_args /* {
352 struct buf *a_bp;
353 } */ *ap = v;
354 struct buf *bp = ap->a_bp;
355
356 #ifdef DIAGNOSTIC
357 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
358 panic("bawrite LFS buffer");
359 }
360 #endif /* DIAGNOSTIC */
361 return lfs_bwrite_ext(bp, 0);
362 }
363
364 /*
365 * Determine if there is enough room currently available to write fsb
366 * blocks. We need enough blocks for the new blocks, the current
367 * inode blocks (including potentially the ifile inode), a summary block,
368 * and the segment usage table, plus an ifile block.
369 */
370 int
371 lfs_fits(struct lfs *fs, int fsb)
372 {
373 int needed;
374
375 ASSERT_NO_SEGLOCK(fs);
376 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
377 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
378 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
379
380 if (needed >= fs->lfs_avail) {
381 #ifdef DEBUG
382 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
383 "needed = %ld, avail = %ld\n",
384 (long)fsb, (long)fs->lfs_uinodes, (long)needed,
385 (long)fs->lfs_avail));
386 #endif
387 return 0;
388 }
389 return 1;
390 }
391
392 int
393 lfs_availwait(struct lfs *fs, int fsb)
394 {
395 int error;
396 CLEANERINFO *cip;
397 struct buf *cbp;
398
399 ASSERT_NO_SEGLOCK(fs);
400 /* Push cleaner blocks through regardless */
401 simple_lock(&fs->lfs_interlock);
402 if (LFS_SEGLOCK_HELD(fs) &&
403 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
404 simple_unlock(&fs->lfs_interlock);
405 return 0;
406 }
407 simple_unlock(&fs->lfs_interlock);
408
409 while (!lfs_fits(fs, fsb)) {
410 /*
411 * Out of space, need cleaner to run.
412 * Update the cleaner info, then wake it up.
413 * Note the cleanerinfo block is on the ifile
414 * so it CANT_WAIT.
415 */
416 LFS_CLEANERINFO(cip, fs, cbp);
417 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
418
419 #ifdef DEBUG
420 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
421 "waiting on cleaner\n"));
422 #endif
423
424 wakeup(&lfs_allclean_wakeup);
425 wakeup(&fs->lfs_nextseg);
426 #ifdef DIAGNOSTIC
427 if (LFS_SEGLOCK_HELD(fs))
428 panic("lfs_availwait: deadlock");
429 #endif
430 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
431 if (error)
432 return (error);
433 }
434 return 0;
435 }
436
437 int
438 lfs_bwrite_ext(struct buf *bp, int flags)
439 {
440 struct lfs *fs;
441 struct inode *ip;
442 int fsb, s;
443
444 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
445
446 ASSERT_MAYBE_SEGLOCK(fs);
447 KASSERT(bp->b_flags & B_BUSY);
448 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
449 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
450 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_LOCKED);
451
452 /*
453 * Don't write *any* blocks if we're mounted read-only, or
454 * if we are "already unmounted".
455 *
456 * In particular the cleaner can't write blocks either.
457 */
458 if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
459 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
460 LFS_UNLOCK_BUF(bp);
461 if (LFS_IS_MALLOC_BUF(bp))
462 bp->b_flags &= ~B_BUSY;
463 else
464 brelse(bp);
465 return (fs->lfs_ronly ? EROFS : 0);
466 }
467
468 /*
469 * Set the delayed write flag and use reassignbuf to move the buffer
470 * from the clean list to the dirty one.
471 *
472 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
473 * the buffer onto the LOCKED free list. This is necessary, otherwise
474 * getnewbuf() would try to reclaim the buffers using bawrite, which
475 * isn't going to work.
476 *
477 * XXX we don't let meta-data writes run out of space because they can
478 * come from the segment writer. We need to make sure that there is
479 * enough space reserved so that there's room to write meta-data
480 * blocks.
481 */
482 if (!(bp->b_flags & B_LOCKED)) {
483 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
484
485 ip = VTOI(bp->b_vp);
486 if (flags & BW_CLEAN) {
487 LFS_SET_UINO(ip, IN_CLEANING);
488 } else {
489 LFS_SET_UINO(ip, IN_MODIFIED);
490 }
491 fs->lfs_avail -= fsb;
492 bp->b_flags |= B_DELWRI;
493
494 LFS_LOCK_BUF(bp);
495 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
496 s = splbio();
497 reassignbuf(bp, bp->b_vp);
498 splx(s);
499 }
500
501 if (bp->b_flags & B_CALL)
502 bp->b_flags &= ~B_BUSY;
503 else
504 brelse(bp);
505
506 return (0);
507 }
508
509 /*
510 * Called and return with the lfs_interlock held, but the lfs_subsys_lock
511 * not held.
512 */
513 void
514 lfs_flush_fs(struct lfs *fs, int flags)
515 {
516 ASSERT_NO_SEGLOCK(fs);
517 LOCK_ASSERT(simple_lock_held(&fs->lfs_interlock));
518 LOCK_ASSERT(!simple_lock_held(&lfs_subsys_lock));
519 if (fs->lfs_ronly)
520 return;
521
522 simple_lock(&lfs_subsys_lock);
523 if (lfs_dostats)
524 ++lfs_stats.flush_invoked;
525 simple_unlock(&lfs_subsys_lock);
526
527 simple_unlock(&fs->lfs_interlock);
528 lfs_writer_enter(fs, "fldirop");
529 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
530 lfs_writer_leave(fs);
531 simple_lock(&fs->lfs_interlock);
532 fs->lfs_favail = 0; /* XXX */
533 }
534
535 /*
536 * This routine initiates segment writes when LFS is consuming too many
537 * resources. Ideally the pageout daemon would be able to direct LFS
538 * more subtly.
539 * XXX We have one static count of locked buffers;
540 * XXX need to think more about the multiple filesystem case.
541 *
542 * Called and return with lfs_subsys_lock held.
543 * If fs != NULL, we hold the segment lock for fs.
544 */
545 void
546 lfs_flush(struct lfs *fs, int flags, int only_onefs)
547 {
548 extern u_int64_t locked_fakequeue_count;
549 struct mount *mp, *nmp;
550 struct lfs *tfs;
551
552 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
553 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
554
555 if (lfs_dostats)
556 ++lfs_stats.write_exceeded;
557 /* XXX should we include SEGM_CKP here? */
558 if (lfs_writing && !(flags & SEGM_SYNC)) {
559 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
560 return;
561 }
562 while (lfs_writing)
563 ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
564 &lfs_subsys_lock);
565 lfs_writing = 1;
566
567 simple_unlock(&lfs_subsys_lock);
568
569 if (only_onefs) {
570 KASSERT(fs != NULL);
571 if (vfs_busy(fs->lfs_ivnode->v_mount, LK_NOWAIT,
572 &mountlist_slock))
573 goto errout;
574 simple_lock(&fs->lfs_interlock);
575 lfs_flush_fs(fs, flags);
576 simple_unlock(&fs->lfs_interlock);
577 vfs_unbusy(fs->lfs_ivnode->v_mount);
578 } else {
579 locked_fakequeue_count = 0;
580 simple_lock(&mountlist_slock);
581 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
582 mp = nmp) {
583 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
584 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
585 nmp = CIRCLEQ_NEXT(mp, mnt_list);
586 continue;
587 }
588 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
589 MFSNAMELEN) == 0) {
590 tfs = VFSTOUFS(mp)->um_lfs;
591 simple_lock(&tfs->lfs_interlock);
592 lfs_flush_fs(tfs, flags);
593 simple_unlock(&tfs->lfs_interlock);
594 }
595 simple_lock(&mountlist_slock);
596 nmp = CIRCLEQ_NEXT(mp, mnt_list);
597 vfs_unbusy(mp);
598 }
599 simple_unlock(&mountlist_slock);
600 }
601 LFS_DEBUG_COUNTLOCKED("flush");
602 wakeup(&lfs_subsys_pages);
603
604 errout:
605 simple_lock(&lfs_subsys_lock);
606 KASSERT(lfs_writing);
607 lfs_writing = 0;
608 wakeup(&lfs_writing);
609 }
610
611 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
612 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
613
614 /*
615 * make sure that we don't have too many locked buffers.
616 * flush buffers if needed.
617 */
618 int
619 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
620 {
621 int error;
622 struct lfs *fs;
623 struct inode *ip;
624 extern pid_t lfs_writer_daemon;
625
626 error = 0;
627 ip = VTOI(vp);
628
629 /* If out of buffers, wait on writer */
630 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
631 if (ip->i_number == LFS_IFILE_INUM)
632 return 0;
633 /* If we're being called from inside a dirop, don't sleep */
634 if (ip->i_flag & IN_ADIROP)
635 return 0;
636
637 fs = ip->i_lfs;
638
639 ASSERT_NO_SEGLOCK(fs);
640 LOCK_ASSERT(!simple_lock_held(&fs->lfs_interlock));
641
642 /*
643 * If we would flush below, but dirops are active, sleep.
644 * Note that a dirop cannot ever reach this code!
645 */
646 simple_lock(&fs->lfs_interlock);
647 simple_lock(&lfs_subsys_lock);
648 while (fs->lfs_dirops > 0 &&
649 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
650 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
651 lfs_subsys_pages > LFS_MAX_PAGES ||
652 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
653 {
654 simple_unlock(&lfs_subsys_lock);
655 ++fs->lfs_diropwait;
656 ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
657 &fs->lfs_interlock);
658 --fs->lfs_diropwait;
659 simple_lock(&lfs_subsys_lock);
660 }
661
662 #ifdef DEBUG
663 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
664 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
665 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
666 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
667 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
668 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
669 if (lfs_subsys_pages > LFS_MAX_PAGES)
670 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
671 lfs_subsys_pages, LFS_MAX_PAGES));
672 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
673 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
674 fs->lfs_pages, lfs_fs_pagetrip));
675 if (lfs_dirvcount > LFS_MAX_DIROP)
676 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
677 lfs_dirvcount, LFS_MAX_DIROP));
678 if (fs->lfs_diropwait > 0)
679 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
680 fs->lfs_diropwait));
681 #endif
682
683 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
684 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
685 lfs_subsys_pages > LFS_MAX_PAGES ||
686 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
687 simple_unlock(&fs->lfs_interlock);
688 lfs_flush(fs, flags, 0);
689 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
690 /*
691 * If we didn't flush the whole thing, some filesystems
692 * still might want to be flushed.
693 */
694 ++fs->lfs_pdflush;
695 wakeup(&lfs_writer_daemon);
696 simple_unlock(&fs->lfs_interlock);
697 } else
698 simple_unlock(&fs->lfs_interlock);
699
700 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
701 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
702 lfs_subsys_pages > LFS_WAIT_PAGES ||
703 lfs_dirvcount > LFS_MAX_DIROP) {
704
705 if (lfs_dostats)
706 ++lfs_stats.wait_exceeded;
707 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
708 locked_queue_count, locked_queue_bytes));
709 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
710 "buffers", hz * LFS_BUFWAIT, &lfs_subsys_lock);
711 if (error != EWOULDBLOCK)
712 break;
713
714 /*
715 * lfs_flush might not flush all the buffers, if some of the
716 * inodes were locked or if most of them were Ifile blocks
717 * and we weren't asked to checkpoint. Try flushing again
718 * to keep us from blocking indefinitely.
719 */
720 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
721 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
722 lfs_flush(fs, flags | SEGM_CKP, 0);
723 }
724 }
725 simple_unlock(&lfs_subsys_lock);
726 return (error);
727 }
728
729 /*
730 * Allocate a new buffer header.
731 */
732 struct buf *
733 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
734 {
735 struct buf *bp;
736 size_t nbytes;
737 int s;
738
739 ASSERT_MAYBE_SEGLOCK(fs);
740 nbytes = roundup(size, fsbtob(fs, 1));
741
742 bp = getiobuf();
743 if (nbytes) {
744 bp->b_data = lfs_malloc(fs, nbytes, type);
745 /* memset(bp->b_data, 0, nbytes); */
746 }
747 #ifdef DIAGNOSTIC
748 if (vp == NULL)
749 panic("vp is NULL in lfs_newbuf");
750 if (bp == NULL)
751 panic("bp is NULL after malloc in lfs_newbuf");
752 #endif
753 bp->b_vp = NULL;
754 s = splbio();
755 bgetvp(vp, bp);
756 splx(s);
757
758 bp->b_bufsize = size;
759 bp->b_bcount = size;
760 bp->b_lblkno = daddr;
761 bp->b_blkno = daddr;
762 bp->b_error = 0;
763 bp->b_resid = 0;
764 bp->b_iodone = lfs_callback;
765 bp->b_flags = B_BUSY | B_CALL | B_NOCACHE;
766 bp->b_private = fs;
767
768 return (bp);
769 }
770
771 void
772 lfs_freebuf(struct lfs *fs, struct buf *bp)
773 {
774 int s;
775
776 s = splbio();
777 if (bp->b_vp)
778 brelvp(bp);
779 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
780 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
781 bp->b_data = NULL;
782 }
783 splx(s);
784 putiobuf(bp);
785 }
786
787 /*
788 * Definitions for the buffer free lists.
789 */
790 #define BQUEUES 4 /* number of free buffer queues */
791
792 #define BQ_LOCKED 0 /* super-blocks &c */
793 #define BQ_LRU 1 /* lru, useful buffers */
794 #define BQ_AGE 2 /* rubbish */
795 #define BQ_EMPTY 3 /* buffer headers with no memory */
796
797 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
798 extern struct simplelock bqueue_slock;
799
800 /*
801 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
802 * Don't count malloced buffers, since they don't detract from the total.
803 */
804 void
805 lfs_countlocked(int *count, long *bytes, const char *msg)
806 {
807 struct buf *bp;
808 int n = 0;
809 long int size = 0L;
810 int s;
811
812 s = splbio();
813 simple_lock(&bqueue_slock);
814 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist) {
815 KASSERT(!(bp->b_flags & B_CALL));
816 n++;
817 size += bp->b_bufsize;
818 #ifdef DIAGNOSTIC
819 if (n > nbuf)
820 panic("lfs_countlocked: this can't happen: more"
821 " buffers locked than exist");
822 #endif
823 }
824 /*
825 * Theoretically this function never really does anything.
826 * Give a warning if we have to fix the accounting.
827 */
828 if (n != *count)
829 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
830 " from %d to %d\n", msg, *count, n));
831 if (size != *bytes)
832 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
833 " from %ld to %ld\n", msg, *bytes, size));
834 *count = n;
835 *bytes = size;
836 simple_unlock(&bqueue_slock);
837 splx(s);
838 return;
839 }
840
841 int
842 lfs_wait_pages(void)
843 {
844 int active, inactive;
845
846 uvm_estimatepageable(&active, &inactive);
847 return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
848 }
849
850 int
851 lfs_max_pages(void)
852 {
853 int active, inactive;
854
855 uvm_estimatepageable(&active, &inactive);
856 return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
857 }
858