lfs_bio.c revision 1.101 1 /* $NetBSD: lfs_bio.c,v 1.101 2007/05/16 19:11:37 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.101 2007/05/16 19:11:37 perseant Exp $");
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/proc.h>
75 #include <sys/buf.h>
76 #include <sys/vnode.h>
77 #include <sys/resourcevar.h>
78 #include <sys/mount.h>
79 #include <sys/kernel.h>
80 #include <sys/kauth.h>
81
82 #include <ufs/ufs/inode.h>
83 #include <ufs/ufs/ufsmount.h>
84 #include <ufs/ufs/ufs_extern.h>
85
86 #include <ufs/lfs/lfs.h>
87 #include <ufs/lfs/lfs_extern.h>
88
89 #include <uvm/uvm.h>
90
91 /*
92 * LFS block write function.
93 *
94 * XXX
95 * No write cost accounting is done.
96 * This is almost certainly wrong for synchronous operations and NFS.
97 *
98 * protected by lfs_subsys_lock.
99 */
100 int locked_queue_count = 0; /* Count of locked-down buffers. */
101 long locked_queue_bytes = 0L; /* Total size of locked buffers. */
102 int lfs_subsys_pages = 0L; /* Total number LFS-written pages */
103 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
104 int lfs_writing = 0; /* Set if already kicked off a writer
105 because of buffer space */
106 /* Lock for aboves */
107 struct simplelock lfs_subsys_lock = SIMPLELOCK_INITIALIZER;
108
109 extern int lfs_dostats;
110
111 /*
112 * reserved number/bytes of locked buffers
113 */
114 int locked_queue_rcount = 0;
115 long locked_queue_rbytes = 0L;
116
117 int lfs_fits_buf(struct lfs *, int, int);
118 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
119 int, int);
120 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
121
122 int
123 lfs_fits_buf(struct lfs *fs, int n, int bytes)
124 {
125 int count_fit, bytes_fit;
126
127 ASSERT_NO_SEGLOCK(fs);
128 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
129
130 count_fit =
131 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
132 bytes_fit =
133 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
134
135 #ifdef DEBUG
136 if (!count_fit) {
137 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
138 locked_queue_count, locked_queue_rcount,
139 n, LFS_WAIT_BUFS));
140 }
141 if (!bytes_fit) {
142 DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
143 locked_queue_bytes, locked_queue_rbytes,
144 bytes, LFS_WAIT_BYTES));
145 }
146 #endif /* DEBUG */
147
148 return (count_fit && bytes_fit);
149 }
150
151 /* ARGSUSED */
152 int
153 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
154 struct vnode *vp2, int n, int bytes)
155 {
156 ASSERT_MAYBE_SEGLOCK(fs);
157 KASSERT(locked_queue_rcount >= 0);
158 KASSERT(locked_queue_rbytes >= 0);
159
160 simple_lock(&lfs_subsys_lock);
161 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
162 int error;
163
164 lfs_flush(fs, 0, 0);
165
166 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
167 "lfsresbuf", hz * LFS_BUFWAIT, &lfs_subsys_lock);
168 if (error && error != EWOULDBLOCK) {
169 simple_unlock(&lfs_subsys_lock);
170 return error;
171 }
172 }
173
174 locked_queue_rcount += n;
175 locked_queue_rbytes += bytes;
176
177 simple_unlock(&lfs_subsys_lock);
178
179 KASSERT(locked_queue_rcount >= 0);
180 KASSERT(locked_queue_rbytes >= 0);
181
182 return 0;
183 }
184
185 /*
186 * Try to reserve some blocks, prior to performing a sensitive operation that
187 * requires the vnode lock to be honored. If there is not enough space, give
188 * up the vnode lock temporarily and wait for the space to become available.
189 *
190 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
191 *
192 * XXX YAMT - it isn't safe to unlock vp here
193 * because the node might be modified while we sleep.
194 * (eg. cached states like i_offset might be stale,
195 * the vnode might be truncated, etc..)
196 * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
197 * or rearrange vnodeop interface to leave vnode locking to file system
198 * specific code so that each file systems can have their own vnode locking and
199 * vnode re-using strategies.
200 */
201 int
202 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
203 struct vnode *vp2, int fsb)
204 {
205 CLEANERINFO *cip;
206 struct buf *bp;
207 int error, slept;
208
209 ASSERT_MAYBE_SEGLOCK(fs);
210 slept = 0;
211 simple_lock(&fs->lfs_interlock);
212 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
213 simple_unlock(&fs->lfs_interlock);
214 #if 0
215 /*
216 * XXX ideally, we should unlock vnodes here
217 * because we might sleep very long time.
218 */
219 VOP_UNLOCK(vp, 0);
220 if (vp2 != NULL) {
221 VOP_UNLOCK(vp2, 0);
222 }
223 #else
224 /*
225 * XXX since we'll sleep for cleaner with vnode lock holding,
226 * deadlock will occur if cleaner tries to lock the vnode.
227 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
228 */
229 #endif
230
231 if (!slept) {
232 DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
233 " est_bfree = %d)\n",
234 fsb + fs->lfs_ravail + fs->lfs_favail,
235 fs->lfs_bfree, LFS_EST_BFREE(fs)));
236 }
237 ++slept;
238
239 /* Wake up the cleaner */
240 LFS_CLEANERINFO(cip, fs, bp);
241 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
242 lfs_wakeup_cleaner(fs);
243
244 simple_lock(&fs->lfs_interlock);
245 /* Cleaner might have run while we were reading, check again */
246 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
247 break;
248
249 error = ltsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
250 0, &fs->lfs_interlock);
251 #if 0
252 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
253 vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
254 #endif
255 if (error) {
256 simple_unlock(&fs->lfs_interlock);
257 return error;
258 }
259 }
260 #ifdef DEBUG
261 if (slept) {
262 DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
263 }
264 #endif
265 fs->lfs_ravail += fsb;
266 simple_unlock(&fs->lfs_interlock);
267
268 return 0;
269 }
270
271 #ifdef DIAGNOSTIC
272 int lfs_rescount;
273 int lfs_rescountdirop;
274 #endif
275
276 int
277 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
278 {
279 int error;
280 int cantwait;
281
282 ASSERT_MAYBE_SEGLOCK(fs);
283 if (vp2) {
284 /* Make sure we're not in the process of reclaiming vp2 */
285 simple_lock(&fs->lfs_interlock);
286 while(fs->lfs_flags & LFS_UNDIROP) {
287 ltsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
288 &fs->lfs_interlock);
289 }
290 simple_unlock(&fs->lfs_interlock);
291 }
292
293 KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
294 KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
295 KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
296 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
297
298 cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
299 #ifdef DIAGNOSTIC
300 if (cantwait) {
301 if (fsb > 0)
302 lfs_rescountdirop++;
303 else if (fsb < 0)
304 lfs_rescountdirop--;
305 if (lfs_rescountdirop < 0)
306 panic("lfs_rescountdirop");
307 }
308 else {
309 if (fsb > 0)
310 lfs_rescount++;
311 else if (fsb < 0)
312 lfs_rescount--;
313 if (lfs_rescount < 0)
314 panic("lfs_rescount");
315 }
316 #endif
317 if (cantwait)
318 return 0;
319
320 /*
321 * XXX
322 * vref vnodes here so that cleaner doesn't try to reuse them.
323 * (see XXX comment in lfs_reserveavail)
324 */
325 lfs_vref(vp);
326 if (vp2 != NULL) {
327 lfs_vref(vp2);
328 }
329
330 error = lfs_reserveavail(fs, vp, vp2, fsb);
331 if (error)
332 goto done;
333
334 /*
335 * XXX just a guess. should be more precise.
336 */
337 error = lfs_reservebuf(fs, vp, vp2,
338 fragstoblks(fs, fsb), fsbtob(fs, fsb));
339 if (error)
340 lfs_reserveavail(fs, vp, vp2, -fsb);
341
342 done:
343 lfs_vunref(vp);
344 if (vp2 != NULL) {
345 lfs_vunref(vp2);
346 }
347
348 return error;
349 }
350
351 int
352 lfs_bwrite(void *v)
353 {
354 struct vop_bwrite_args /* {
355 struct buf *a_bp;
356 } */ *ap = v;
357 struct buf *bp = ap->a_bp;
358
359 #ifdef DIAGNOSTIC
360 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
361 panic("bawrite LFS buffer");
362 }
363 #endif /* DIAGNOSTIC */
364 return lfs_bwrite_ext(bp, 0);
365 }
366
367 /*
368 * Determine if there is enough room currently available to write fsb
369 * blocks. We need enough blocks for the new blocks, the current
370 * inode blocks (including potentially the ifile inode), a summary block,
371 * and the segment usage table, plus an ifile block.
372 */
373 int
374 lfs_fits(struct lfs *fs, int fsb)
375 {
376 int needed;
377
378 ASSERT_NO_SEGLOCK(fs);
379 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
380 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
381 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
382
383 if (needed >= fs->lfs_avail) {
384 #ifdef DEBUG
385 DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
386 "needed = %ld, avail = %ld\n",
387 (long)fsb, (long)fs->lfs_uinodes, (long)needed,
388 (long)fs->lfs_avail));
389 #endif
390 return 0;
391 }
392 return 1;
393 }
394
395 int
396 lfs_availwait(struct lfs *fs, int fsb)
397 {
398 int error;
399 CLEANERINFO *cip;
400 struct buf *cbp;
401
402 ASSERT_NO_SEGLOCK(fs);
403 /* Push cleaner blocks through regardless */
404 simple_lock(&fs->lfs_interlock);
405 if (LFS_SEGLOCK_HELD(fs) &&
406 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
407 simple_unlock(&fs->lfs_interlock);
408 return 0;
409 }
410 simple_unlock(&fs->lfs_interlock);
411
412 while (!lfs_fits(fs, fsb)) {
413 /*
414 * Out of space, need cleaner to run.
415 * Update the cleaner info, then wake it up.
416 * Note the cleanerinfo block is on the ifile
417 * so it CANT_WAIT.
418 */
419 LFS_CLEANERINFO(cip, fs, cbp);
420 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
421
422 #ifdef DEBUG
423 DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
424 "waiting on cleaner\n"));
425 #endif
426
427 lfs_wakeup_cleaner(fs);
428 #ifdef DIAGNOSTIC
429 if (LFS_SEGLOCK_HELD(fs))
430 panic("lfs_availwait: deadlock");
431 #endif
432 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
433 if (error)
434 return (error);
435 }
436 return 0;
437 }
438
439 int
440 lfs_bwrite_ext(struct buf *bp, int flags)
441 {
442 struct lfs *fs;
443 struct inode *ip;
444 int fsb, s;
445
446 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
447
448 ASSERT_MAYBE_SEGLOCK(fs);
449 KASSERT(bp->b_flags & B_BUSY);
450 KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
451 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_DELWRI);
452 KASSERT((bp->b_flags & (B_DELWRI|B_LOCKED)) != B_LOCKED);
453
454 /*
455 * Don't write *any* blocks if we're mounted read-only, or
456 * if we are "already unmounted".
457 *
458 * In particular the cleaner can't write blocks either.
459 */
460 if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
461 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
462 LFS_UNLOCK_BUF(bp);
463 if (LFS_IS_MALLOC_BUF(bp))
464 bp->b_flags &= ~B_BUSY;
465 else
466 brelse(bp);
467 return (fs->lfs_ronly ? EROFS : 0);
468 }
469
470 /*
471 * Set the delayed write flag and use reassignbuf to move the buffer
472 * from the clean list to the dirty one.
473 *
474 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
475 * the buffer onto the LOCKED free list. This is necessary, otherwise
476 * getnewbuf() would try to reclaim the buffers using bawrite, which
477 * isn't going to work.
478 *
479 * XXX we don't let meta-data writes run out of space because they can
480 * come from the segment writer. We need to make sure that there is
481 * enough space reserved so that there's room to write meta-data
482 * blocks.
483 */
484 if (!(bp->b_flags & B_LOCKED)) {
485 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
486
487 ip = VTOI(bp->b_vp);
488 if (flags & BW_CLEAN) {
489 LFS_SET_UINO(ip, IN_CLEANING);
490 } else {
491 LFS_SET_UINO(ip, IN_MODIFIED);
492 }
493 fs->lfs_avail -= fsb;
494 bp->b_flags |= B_DELWRI;
495
496 LFS_LOCK_BUF(bp);
497 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
498 s = splbio();
499 reassignbuf(bp, bp->b_vp);
500 splx(s);
501 }
502
503 if (bp->b_flags & B_CALL)
504 bp->b_flags &= ~B_BUSY;
505 else
506 brelse(bp);
507
508 return (0);
509 }
510
511 /*
512 * Called and return with the lfs_interlock held, but no other simple_locks
513 * held.
514 */
515 void
516 lfs_flush_fs(struct lfs *fs, int flags)
517 {
518 ASSERT_NO_SEGLOCK(fs);
519 LOCK_ASSERT(simple_lock_held(&fs->lfs_interlock));
520 LOCK_ASSERT(!simple_lock_held(&lfs_subsys_lock));
521 if (fs->lfs_ronly)
522 return;
523
524 simple_lock(&lfs_subsys_lock);
525 if (lfs_dostats)
526 ++lfs_stats.flush_invoked;
527 simple_unlock(&lfs_subsys_lock);
528
529 simple_unlock(&fs->lfs_interlock);
530 lfs_writer_enter(fs, "fldirop");
531 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
532 lfs_writer_leave(fs);
533 simple_lock(&fs->lfs_interlock);
534 fs->lfs_favail = 0; /* XXX */
535 }
536
537 /*
538 * This routine initiates segment writes when LFS is consuming too many
539 * resources. Ideally the pageout daemon would be able to direct LFS
540 * more subtly.
541 * XXX We have one static count of locked buffers;
542 * XXX need to think more about the multiple filesystem case.
543 *
544 * Called and return with lfs_subsys_lock held.
545 * If fs != NULL, we hold the segment lock for fs.
546 */
547 void
548 lfs_flush(struct lfs *fs, int flags, int only_onefs)
549 {
550 extern u_int64_t locked_fakequeue_count;
551 struct mount *mp, *nmp;
552 struct lfs *tfs;
553
554 LOCK_ASSERT(simple_lock_held(&lfs_subsys_lock));
555 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
556
557 if (lfs_dostats)
558 ++lfs_stats.write_exceeded;
559 /* XXX should we include SEGM_CKP here? */
560 if (lfs_writing && !(flags & SEGM_SYNC)) {
561 DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
562 return;
563 }
564 while (lfs_writing)
565 ltsleep(&lfs_writing, PRIBIO + 1, "lfsflush", 0,
566 &lfs_subsys_lock);
567 lfs_writing = 1;
568
569 simple_unlock(&lfs_subsys_lock);
570
571 if (only_onefs) {
572 KASSERT(fs != NULL);
573 if (vfs_busy(fs->lfs_ivnode->v_mount, LK_NOWAIT,
574 &mountlist_slock))
575 goto errout;
576 simple_lock(&fs->lfs_interlock);
577 lfs_flush_fs(fs, flags);
578 simple_unlock(&fs->lfs_interlock);
579 vfs_unbusy(fs->lfs_ivnode->v_mount);
580 } else {
581 locked_fakequeue_count = 0;
582 simple_lock(&mountlist_slock);
583 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
584 mp = nmp) {
585 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
586 DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
587 nmp = CIRCLEQ_NEXT(mp, mnt_list);
588 continue;
589 }
590 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
591 MFSNAMELEN) == 0) {
592 tfs = VFSTOUFS(mp)->um_lfs;
593 simple_lock(&tfs->lfs_interlock);
594 lfs_flush_fs(tfs, flags);
595 simple_unlock(&tfs->lfs_interlock);
596 }
597 simple_lock(&mountlist_slock);
598 nmp = CIRCLEQ_NEXT(mp, mnt_list);
599 vfs_unbusy(mp);
600 }
601 simple_unlock(&mountlist_slock);
602 }
603 LFS_DEBUG_COUNTLOCKED("flush");
604 wakeup(&lfs_subsys_pages);
605
606 errout:
607 simple_lock(&lfs_subsys_lock);
608 KASSERT(lfs_writing);
609 lfs_writing = 0;
610 wakeup(&lfs_writing);
611 }
612
613 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
614 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
615
616 /*
617 * make sure that we don't have too many locked buffers.
618 * flush buffers if needed.
619 */
620 int
621 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
622 {
623 int error;
624 struct lfs *fs;
625 struct inode *ip;
626 extern pid_t lfs_writer_daemon;
627
628 error = 0;
629 ip = VTOI(vp);
630
631 /* If out of buffers, wait on writer */
632 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
633 if (ip->i_number == LFS_IFILE_INUM)
634 return 0;
635 /* If we're being called from inside a dirop, don't sleep */
636 if (ip->i_flag & IN_ADIROP)
637 return 0;
638
639 fs = ip->i_lfs;
640
641 ASSERT_NO_SEGLOCK(fs);
642 LOCK_ASSERT(!simple_lock_held(&fs->lfs_interlock));
643
644 /*
645 * If we would flush below, but dirops are active, sleep.
646 * Note that a dirop cannot ever reach this code!
647 */
648 simple_lock(&fs->lfs_interlock);
649 simple_lock(&lfs_subsys_lock);
650 while (fs->lfs_dirops > 0 &&
651 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
652 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
653 lfs_subsys_pages > LFS_MAX_PAGES ||
654 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
655 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
656 {
657 simple_unlock(&lfs_subsys_lock);
658 ++fs->lfs_diropwait;
659 ltsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
660 &fs->lfs_interlock);
661 --fs->lfs_diropwait;
662 simple_lock(&lfs_subsys_lock);
663 }
664
665 #ifdef DEBUG
666 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
667 DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
668 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
669 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
670 DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
671 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
672 if (lfs_subsys_pages > LFS_MAX_PAGES)
673 DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
674 lfs_subsys_pages, LFS_MAX_PAGES));
675 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
676 DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
677 fs->lfs_pages, lfs_fs_pagetrip));
678 if (lfs_dirvcount > LFS_MAX_DIROP)
679 DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
680 lfs_dirvcount, LFS_MAX_DIROP));
681 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
682 DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
683 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
684 if (fs->lfs_diropwait > 0)
685 DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
686 fs->lfs_diropwait));
687 #endif
688
689 /* If there are too many pending dirops, we have to flush them. */
690 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
691 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
692 flags |= SEGM_CKP;
693 }
694
695 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
696 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
697 lfs_subsys_pages > LFS_MAX_PAGES ||
698 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
699 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
700 simple_unlock(&fs->lfs_interlock);
701 lfs_flush(fs, flags, 0);
702 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
703 /*
704 * If we didn't flush the whole thing, some filesystems
705 * still might want to be flushed.
706 */
707 ++fs->lfs_pdflush;
708 wakeup(&lfs_writer_daemon);
709 simple_unlock(&fs->lfs_interlock);
710 } else
711 simple_unlock(&fs->lfs_interlock);
712
713 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
714 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
715 lfs_subsys_pages > LFS_WAIT_PAGES ||
716 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
717 lfs_dirvcount > LFS_MAX_DIROP) {
718
719 if (lfs_dostats)
720 ++lfs_stats.wait_exceeded;
721 DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
722 locked_queue_count, locked_queue_bytes));
723 error = ltsleep(&locked_queue_count, PCATCH | PUSER,
724 "buffers", hz * LFS_BUFWAIT, &lfs_subsys_lock);
725 if (error != EWOULDBLOCK)
726 break;
727
728 /*
729 * lfs_flush might not flush all the buffers, if some of the
730 * inodes were locked or if most of them were Ifile blocks
731 * and we weren't asked to checkpoint. Try flushing again
732 * to keep us from blocking indefinitely.
733 */
734 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
735 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
736 lfs_flush(fs, flags | SEGM_CKP, 0);
737 }
738 }
739 simple_unlock(&lfs_subsys_lock);
740 return (error);
741 }
742
743 /*
744 * Allocate a new buffer header.
745 */
746 struct buf *
747 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
748 {
749 struct buf *bp;
750 size_t nbytes;
751 int s;
752
753 ASSERT_MAYBE_SEGLOCK(fs);
754 nbytes = roundup(size, fsbtob(fs, 1));
755
756 bp = getiobuf();
757 if (nbytes) {
758 bp->b_data = lfs_malloc(fs, nbytes, type);
759 /* memset(bp->b_data, 0, nbytes); */
760 }
761 #ifdef DIAGNOSTIC
762 if (vp == NULL)
763 panic("vp is NULL in lfs_newbuf");
764 if (bp == NULL)
765 panic("bp is NULL after malloc in lfs_newbuf");
766 #endif
767 bp->b_vp = NULL;
768 s = splbio();
769 bgetvp(vp, bp);
770 splx(s);
771
772 bp->b_bufsize = size;
773 bp->b_bcount = size;
774 bp->b_lblkno = daddr;
775 bp->b_blkno = daddr;
776 bp->b_error = 0;
777 bp->b_resid = 0;
778 bp->b_iodone = lfs_callback;
779 bp->b_flags = B_BUSY | B_CALL | B_NOCACHE;
780 bp->b_private = fs;
781
782 return (bp);
783 }
784
785 void
786 lfs_freebuf(struct lfs *fs, struct buf *bp)
787 {
788 int s;
789
790 s = splbio();
791 if (bp->b_vp)
792 brelvp(bp);
793 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
794 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
795 bp->b_data = NULL;
796 }
797 splx(s);
798 putiobuf(bp);
799 }
800
801 /*
802 * Definitions for the buffer free lists.
803 */
804 #define BQUEUES 4 /* number of free buffer queues */
805
806 #define BQ_LOCKED 0 /* super-blocks &c */
807 #define BQ_LRU 1 /* lru, useful buffers */
808 #define BQ_AGE 2 /* rubbish */
809 #define BQ_EMPTY 3 /* buffer headers with no memory */
810
811 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
812 extern struct simplelock bqueue_slock;
813
814 /*
815 * Count buffers on the "locked" queue, and compare it to a pro-forma count.
816 * Don't count malloced buffers, since they don't detract from the total.
817 */
818 void
819 lfs_countlocked(int *count, long *bytes, const char *msg)
820 {
821 struct buf *bp;
822 int n = 0;
823 long int size = 0L;
824 int s;
825
826 s = splbio();
827 simple_lock(&bqueue_slock);
828 TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist) {
829 KASSERT(!(bp->b_flags & B_CALL));
830 n++;
831 size += bp->b_bufsize;
832 #ifdef DIAGNOSTIC
833 if (n > nbuf)
834 panic("lfs_countlocked: this can't happen: more"
835 " buffers locked than exist");
836 #endif
837 }
838 /*
839 * Theoretically this function never really does anything.
840 * Give a warning if we have to fix the accounting.
841 */
842 if (n != *count) {
843 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
844 " from %d to %d\n", msg, *count, n));
845 }
846 if (size != *bytes) {
847 DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
848 " from %ld to %ld\n", msg, *bytes, size));
849 }
850 *count = n;
851 *bytes = size;
852 simple_unlock(&bqueue_slock);
853 splx(s);
854 return;
855 }
856
857 int
858 lfs_wait_pages(void)
859 {
860 int active, inactive;
861
862 uvm_estimatepageable(&active, &inactive);
863 return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
864 }
865
866 int
867 lfs_max_pages(void)
868 {
869 int active, inactive;
870
871 uvm_estimatepageable(&active, &inactive);
872 return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
873 }
874