Home | History | Annotate | Download | only in lfs

Lines Matching refs:fs

98 int	lfs_fs_pagetrip	     = 0;	/* # of pages to trip per-fs write */
124 lfs_fits_buf(struct lfs *fs, int n, int bytes)
128 ASSERT_NO_SEGLOCK(fs);
154 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
159 ASSERT_MAYBE_SEGLOCK(fs);
163 cantwait = (VTOI(vp)->i_state & IN_ADIROP) || fs->lfs_unlockvp == vp;
165 while (!cantwait && n > 0 && !lfs_fits_buf(fs, n, bytes)) {
168 DLOG((DLOG_FLUSH, "lfs_reservebuf: flush filesystem %p with checkpoint\n", fs));
169 lfs_flush(fs, SEGM_CKP, 0);
209 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
217 ASSERT_MAYBE_SEGLOCK(fs);
220 cantwait = (VTOI(vp)->i_state & IN_ADIROP) || fs->lfs_unlockvp == vp;
222 !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
228 fsb + fs->lfs_ravail + fs->lfs_favail,
229 (intmax_t)lfs_sb_getbfree(fs),
230 (intmax_t)LFS_EST_BFREE(fs)));
235 LFS_CLEANERINFO(cip, fs, bp);
236 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
237 lfs_wakeup_cleaner(fs);
241 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
244 error = mtsleep(&fs->lfs_availsleep, PCATCH | PUSER,
256 fs->lfs_ravail += fsb;
268 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
272 ASSERT_MAYBE_SEGLOCK(fs);
276 while(fs->lfs_flags & LFS_UNDIROP) {
277 mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
285 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
298 error = lfs_reserveavail(fs, vp, vp2, fsb);
305 error = lfs_reservebuf(fs, vp, vp2, fsb, lfs_fsbtob(fs, fsb));
307 lfs_reserveavail(fs, vp, vp2, -fsb);
348 lfs_fits(struct lfs *fs, int fsb)
352 ASSERT_NO_SEGLOCK(fs);
353 needed = fsb + lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
354 ((howmany(lfs_sb_getuinodes(fs) + 1, LFS_INOPB(fs)) +
355 lfs_sb_getsegtabsz(fs) +
356 1) << (lfs_sb_getbshift(fs) - lfs_sb_getffshift(fs)));
358 if (needed >= lfs_sb_getavail(fs)) {
362 (long)fsb, (long)lfs_sb_getuinodes(fs), (intmax_t)needed,
363 (intmax_t)lfs_sb_getavail(fs)));
371 lfs_availwait(struct lfs *fs, int fsb)
377 ASSERT_NO_SEGLOCK(fs);
380 if (LFS_SEGLOCK_HELD(fs) &&
381 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
387 while (!lfs_fits(fs, fsb)) {
394 LFS_CLEANERINFO(cip, fs, cbp);
395 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
402 lfs_wakeup_cleaner(fs);
403 KASSERTMSG(!LFS_SEGLOCK_HELD(fs), "lfs_availwait: deadlock");
404 error = tsleep(&fs->lfs_availsleep, PCATCH | PUSER,
415 struct lfs *fs;
421 fs = VFSTOULFS(vp->v_mount)->um_lfs;
423 ASSERT_MAYBE_SEGLOCK(fs);
434 if (fs->lfs_ronly || (lfs_sb_getpflags(fs) & LFS_PF_CLEAN)) {
445 return (fs->lfs_ronly ? EROFS : 0);
463 fsb = lfs_numfrags(fs, bp->b_bcount);
473 lfs_sb_subavail(fs, fsb);
500 lfs_flush_fs(struct lfs *fs, int flags)
502 ASSERT_NO_SEGLOCK(fs);
504 if (fs->lfs_ronly)
510 fs->lfs_pdflush = 0;
512 lfs_writer_enter(fs, "fldirop");
513 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
514 lfs_writer_leave(fs);
516 fs->lfs_favail = 0; /* XXX */
527 * If fs != NULL, we hold the segment lock for fs.
530 lfs_flush(struct lfs *fs, int flags, int only_onefs)
538 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
539 KASSERT(!(fs == NULL && only_onefs));
553 if (fs != NULL) {
554 if (!(fs->lfs_flags & LFS_NOTYET)
555 && vfs_busy(fs->lfs_ivnode->v_mount))
558 lfs_flush_fs(fs, flags);
560 if (!(fs->lfs_flags & LFS_NOTYET))
561 vfs_unbusy(fs->lfs_ivnode->v_mount);
570 if (tfs == fs)
588 #define INOCOUNT(fs) howmany(lfs_sb_getuinodes(fs), LFS_INOPB(fs))
589 #define INOBYTES(fs) (lfs_sb_getuinodes(fs) * DINOSIZE(fs))
599 struct lfs *fs;
614 fs = ip->i_lfs;
616 ASSERT_NO_SEGLOCK(fs);
623 while (fs->lfs_dirops > 0 &&
624 (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
625 locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
627 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
628 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
630 ++fs->lfs_diropwait;
631 mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
633 --fs->lfs_diropwait;
637 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
639 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
640 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
642 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
646 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
648 fs->lfs_pages, lfs_fs_pagetrip));
652 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
654 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
655 if (fs->lfs_diropwait > 0)
657 fs->lfs_diropwait));
661 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
662 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
663 KASSERT(fs->lfs_dirops == 0);
664 fs->lfs_writer++;
666 lfs_flush_dirops(fs);
668 if (--fs->lfs_writer == 0)
669 cv_broadcast(&fs->lfs_diropscv);
670 KASSERT(fs->lfs_dirops == 0);
671 } else if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
672 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
674 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
675 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
676 lfs_flush(fs, flags, 0);
677 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
682 ++fs->lfs_pdflush;
686 while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS ||
687 locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES ||
689 fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
710 if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
711 locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
712 lfs_flush(fs, flags | SEGM_CKP, 0);
723 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
728 ASSERT_MAYBE_SEGLOCK(fs);
729 nbytes = roundup(size, lfs_fsbtob(fs, 1));
733 bp->b_data = lfs_malloc(fs, nbytes, type);
747 bp->b_private = fs;
759 lfs_freebuf(struct lfs *fs, struct buf *bp)
771 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);