Lines Matching refs:fs
98 int lfs_fs_pagetrip = 0; /* # of pages to trip per-fs write */
124 lfs_fits_buf(struct lfs *fs, int n, int bytes)
128 ASSERT_NO_SEGLOCK(fs);
154 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
159 ASSERT_MAYBE_SEGLOCK(fs);
163 cantwait = (VTOI(vp)->i_state & IN_ADIROP) || fs->lfs_unlockvp == vp;
165 while (!cantwait && n > 0 && !lfs_fits_buf(fs, n, bytes)) {
168 DLOG((DLOG_FLUSH, "lfs_reservebuf: flush filesystem %p with checkpoint\n", fs));
169 lfs_flush(fs, SEGM_CKP, 0);
209 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
217 ASSERT_MAYBE_SEGLOCK(fs);
220 cantwait = (VTOI(vp)->i_state & IN_ADIROP) || fs->lfs_unlockvp == vp;
222 !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
228 fsb + fs->lfs_ravail + fs->lfs_favail,
229 (intmax_t)lfs_sb_getbfree(fs),
230 (intmax_t)LFS_EST_BFREE(fs)));
235 LFS_CLEANERINFO(cip, fs, bp);
236 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
237 lfs_wakeup_cleaner(fs);
241 if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
244 error = mtsleep(&fs->lfs_availsleep, PCATCH | PUSER,
256 fs->lfs_ravail += fsb;
268 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
272 ASSERT_MAYBE_SEGLOCK(fs);
276 while(fs->lfs_flags & LFS_UNDIROP) {
277 mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
285 KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
298 error = lfs_reserveavail(fs, vp, vp2, fsb);
305 error = lfs_reservebuf(fs, vp, vp2, fsb, lfs_fsbtob(fs, fsb));
307 lfs_reserveavail(fs, vp, vp2, -fsb);
348 lfs_fits(struct lfs *fs, int fsb)
352 ASSERT_NO_SEGLOCK(fs);
353 needed = fsb + lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
354 ((howmany(lfs_sb_getuinodes(fs) + 1, LFS_INOPB(fs)) +
355 lfs_sb_getsegtabsz(fs) +
356 1) << (lfs_sb_getbshift(fs) - lfs_sb_getffshift(fs)));
358 if (needed >= lfs_sb_getavail(fs)) {
362 (long)fsb, (long)lfs_sb_getuinodes(fs), (intmax_t)needed,
363 (intmax_t)lfs_sb_getavail(fs)));
371 lfs_availwait(struct lfs *fs, int fsb)
377 ASSERT_NO_SEGLOCK(fs);
380 if (LFS_SEGLOCK_HELD(fs) &&
381 fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
387 while (!lfs_fits(fs, fsb)) {
394 LFS_CLEANERINFO(cip, fs, cbp);
395 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
402 lfs_wakeup_cleaner(fs);
403 KASSERTMSG(!LFS_SEGLOCK_HELD(fs), "lfs_availwait: deadlock");
404 error = tsleep(&fs->lfs_availsleep, PCATCH | PUSER,
415 struct lfs *fs;
421 fs = VFSTOULFS(vp->v_mount)->um_lfs;
423 ASSERT_MAYBE_SEGLOCK(fs);
427 KASSERT(!(flags & BW_CLEAN) || lfs_cleanerlock_held(fs));
436 if (fs->lfs_ronly || (lfs_sb_getpflags(fs) & LFS_PF_CLEAN)) {
447 return (fs->lfs_ronly ? EROFS : 0);
465 fsb = lfs_numfrags(fs, bp->b_bcount);
469 lfs_setclean(fs, vp);
476 lfs_sb_subavail(fs, fsb);
503 lfs_flush_fs(struct lfs *fs, int flags)
505 ASSERT_NO_SEGLOCK(fs);
507 if (fs->lfs_ronly)
513 fs->lfs_pdflush = 0;
515 lfs_writer_enter(fs, "fldirop");
516 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
517 lfs_writer_leave(fs);
519 fs->lfs_favail = 0; /* XXX */
530 * If fs != NULL, we hold the segment lock for fs.
533 lfs_flush(struct lfs *fs, int flags, int only_onefs)
541 KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
542 KASSERT(!(fs == NULL && only_onefs));
556 if (fs != NULL) {
557 if (!(fs->lfs_flags & LFS_NOTYET)
558 && vfs_busy(fs->lfs_ivnode->v_mount))
561 lfs_flush_fs(fs, flags);
563 if (!(fs->lfs_flags & LFS_NOTYET))
564 vfs_unbusy(fs->lfs_ivnode->v_mount);
573 if (tfs == fs)
591 #define INOCOUNT(fs) howmany(lfs_sb_getuinodes(fs), LFS_INOPB(fs))
592 #define INOBYTES(fs) (lfs_sb_getuinodes(fs) * DINOSIZE(fs))
599 lfs_needsflush(struct lfs *fs)
601 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
603 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
607 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
611 if (fs->lfs_diropwait > 0)
621 lfs_needswait(struct lfs *fs)
623 if (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS)
625 if (locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES)
629 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs)) /* XXX */
633 if (fs->lfs_diropwait > 0)
648 struct lfs *fs;
663 fs = ip->i_lfs;
665 ASSERT_NO_SEGLOCK(fs);
672 while (fs->lfs_dirops > 0 && lfs_needswait(fs)) {
673 ++fs->lfs_diropwait;
674 mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
676 --fs->lfs_diropwait;
680 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
682 locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
683 if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
685 locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
689 if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
691 fs->lfs_pages, lfs_fs_pagetrip));
695 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
697 fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
698 if (fs->lfs_diropwait > 0)
700 fs->lfs_diropwait));
704 if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
705 lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
706 KASSERT(fs->lfs_dirops == 0);
707 fs->lfs_writer++;
709 lfs_flush_dirops(fs);
711 if (--fs->lfs_writer == 0)
712 cv_broadcast(&fs->lfs_diropscv);
713 KASSERT(fs->lfs_dirops == 0);
714 } else if (lfs_needsflush(fs)) {
715 lfs_flush(fs, flags, 0);
716 } else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
721 ++fs->lfs_pdflush;
725 while (lfs_needswait(fs)) {
744 if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
745 locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
746 lfs_flush(fs, flags | SEGM_CKP, 0);
757 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
762 ASSERT_MAYBE_SEGLOCK(fs);
763 nbytes = roundup(size, lfs_fsbtob(fs, 1));
767 bp->b_data = lfs_malloc(fs, nbytes, type);
781 bp->b_private = fs;
793 lfs_freebuf(struct lfs *fs, struct buf *bp)
805 lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);