Home | History | Annotate | Line # | Download | only in lfs
lfs_bio.c revision 1.137
      1 /*	$NetBSD: lfs_bio.c,v 1.137 2017/04/01 17:34:21 maya Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 /*
     32  * Copyright (c) 1991, 1993
     33  *	The Regents of the University of California.  All rights reserved.
     34  *
     35  * Redistribution and use in source and binary forms, with or without
     36  * modification, are permitted provided that the following conditions
     37  * are met:
     38  * 1. Redistributions of source code must retain the above copyright
     39  *    notice, this list of conditions and the following disclaimer.
     40  * 2. Redistributions in binary form must reproduce the above copyright
     41  *    notice, this list of conditions and the following disclaimer in the
     42  *    documentation and/or other materials provided with the distribution.
     43  * 3. Neither the name of the University nor the names of its contributors
     44  *    may be used to endorse or promote products derived from this software
     45  *    without specific prior written permission.
     46  *
     47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     57  * SUCH DAMAGE.
     58  *
     59  *	@(#)lfs_bio.c	8.10 (Berkeley) 6/10/95
     60  */
     61 
     62 #include <sys/cdefs.h>
     63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.137 2017/04/01 17:34:21 maya Exp $");
     64 
     65 #include <sys/param.h>
     66 #include <sys/systm.h>
     67 #include <sys/proc.h>
     68 #include <sys/buf.h>
     69 #include <sys/vnode.h>
     70 #include <sys/resourcevar.h>
     71 #include <sys/mount.h>
     72 #include <sys/kernel.h>
     73 #include <sys/kauth.h>
     74 
     75 #include <ufs/lfs/ulfs_inode.h>
     76 #include <ufs/lfs/ulfsmount.h>
     77 #include <ufs/lfs/ulfs_extern.h>
     78 
     79 #include <ufs/lfs/lfs.h>
     80 #include <ufs/lfs/lfs_accessors.h>
     81 #include <ufs/lfs/lfs_extern.h>
     82 #include <ufs/lfs/lfs_kernel.h>
     83 
     84 #include <uvm/uvm.h>
     85 
     86 /*
     87  * LFS block write function.
     88  *
     89  * XXX
     90  * No write cost accounting is done.
     91  * This is almost certainly wrong for synchronous operations and NFS.
     92  *
     93  * protected by lfs_lock.
     94  */
     95 int	locked_queue_count   = 0;	/* Count of locked-down buffers. */
     96 long	locked_queue_bytes   = 0L;	/* Total size of locked buffers. */
     97 int	lfs_subsys_pages     = 0L;	/* Total number LFS-written pages */
     98 int	lfs_fs_pagetrip	     = 0;	/* # of pages to trip per-fs write */
     99 int	lfs_writing	     = 0;	/* Set if already kicked off a writer
    100 					   because of buffer space */
    101 int	locked_queue_waiters = 0;	/* Number of processes waiting on lq */
    102 
    103 /* Lock and condition variables for above. */
    104 kcondvar_t	locked_queue_cv;
    105 kcondvar_t	lfs_writing_cv;
    106 kmutex_t	lfs_lock;
    107 
    108 extern int lfs_dostats;
    109 
    110 /*
    111  * reserved number/bytes of locked buffers
    112  */
    113 int locked_queue_rcount = 0;
    114 long locked_queue_rbytes = 0L;
    115 
    116 static int lfs_fits_buf(struct lfs *, int, int);
    117 static int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
    118     int, int);
    119 static int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2,
    120     int);
    121 
    122 static int
    123 lfs_fits_buf(struct lfs *fs, int n, int bytes)
    124 {
    125 	int count_fit, bytes_fit;
    126 
    127 	ASSERT_NO_SEGLOCK(fs);
    128 	KASSERT(mutex_owned(&lfs_lock));
    129 
    130 	count_fit =
    131 	    (locked_queue_count + locked_queue_rcount + n <= LFS_WAIT_BUFS);
    132 	bytes_fit =
    133 	    (locked_queue_bytes + locked_queue_rbytes + bytes <= LFS_WAIT_BYTES);
    134 
    135 #ifdef DEBUG
    136 	if (!count_fit) {
    137 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
    138 		      locked_queue_count, locked_queue_rcount,
    139 		      n, LFS_WAIT_BUFS));
    140 	}
    141 	if (!bytes_fit) {
    142 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
    143 		      locked_queue_bytes, locked_queue_rbytes,
    144 		      bytes, LFS_WAIT_BYTES));
    145 	}
    146 #endif /* DEBUG */
    147 
    148 	return (count_fit && bytes_fit);
    149 }
    150 
    151 /* ARGSUSED */
    152 static int
    153 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
    154     struct vnode *vp2, int n, int bytes)
    155 {
    156 	int cantwait;
    157 
    158 	ASSERT_MAYBE_SEGLOCK(fs);
    159 	KASSERT(locked_queue_rcount >= 0);
    160 	KASSERT(locked_queue_rbytes >= 0);
    161 
    162 	cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
    163 	mutex_enter(&lfs_lock);
    164 	while (!cantwait && n > 0 && !lfs_fits_buf(fs, n, bytes)) {
    165 		int error;
    166 
    167 		lfs_flush(fs, 0, 0);
    168 
    169 		DLOG((DLOG_AVAIL, "lfs_reservebuf: waiting: count=%d, bytes=%ld\n",
    170 		      locked_queue_count, locked_queue_bytes));
    171 		++locked_queue_waiters;
    172 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
    173 		    hz * LFS_BUFWAIT);
    174 		--locked_queue_waiters;
    175 		if (error && error != EWOULDBLOCK) {
    176 			mutex_exit(&lfs_lock);
    177 			return error;
    178 		}
    179 	}
    180 
    181 	locked_queue_rcount += n;
    182 	locked_queue_rbytes += bytes;
    183 
    184 	if (n < 0 && locked_queue_waiters > 0) {
    185 		DLOG((DLOG_AVAIL, "lfs_reservebuf: broadcast: count=%d, bytes=%ld\n",
    186 		      locked_queue_count, locked_queue_bytes));
    187 		cv_broadcast(&locked_queue_cv);
    188 	}
    189 
    190 	mutex_exit(&lfs_lock);
    191 
    192 	KASSERT(locked_queue_rcount >= 0);
    193 	KASSERT(locked_queue_rbytes >= 0);
    194 
    195 	return 0;
    196 }
    197 
    198 /*
    199  * Try to reserve some blocks, prior to performing a sensitive operation that
    200  * requires the vnode lock to be honored.  If there is not enough space, wait
    201  * for the space to become available.
    202  *
    203  * Called with vp locked.  (Note nowever that if fsb < 0, vp is ignored.)
    204  */
    205 static int
    206 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
    207     struct vnode *vp2, int fsb)
    208 {
    209 	CLEANERINFO *cip;
    210 	struct buf *bp;
    211 	int error, slept;
    212 	int cantwait;
    213 
    214 	ASSERT_MAYBE_SEGLOCK(fs);
    215 	slept = 0;
    216 	mutex_enter(&lfs_lock);
    217 	cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
    218 	while (!cantwait && fsb > 0 &&
    219 	       !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
    220 		mutex_exit(&lfs_lock);
    221 
    222 		if (!slept) {
    223 			DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %jd,"
    224 			      " est_bfree = %jd)\n",
    225 			      fsb + fs->lfs_ravail + fs->lfs_favail,
    226 			      (intmax_t)lfs_sb_getbfree(fs),
    227 			      (intmax_t)LFS_EST_BFREE(fs)));
    228 		}
    229 		++slept;
    230 
    231 		/* Wake up the cleaner */
    232 		LFS_CLEANERINFO(cip, fs, bp);
    233 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
    234 		lfs_wakeup_cleaner(fs);
    235 
    236 		mutex_enter(&lfs_lock);
    237 		/* Cleaner might have run while we were reading, check again */
    238 		if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
    239 			break;
    240 
    241 		error = mtsleep(&fs->lfs_availsleep, PCATCH | PUSER,
    242 				"lfs_reserve", 0, &lfs_lock);
    243 		if (error) {
    244 			mutex_exit(&lfs_lock);
    245 			return error;
    246 		}
    247 	}
    248 #ifdef DEBUG
    249 	if (slept) {
    250 		DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
    251 	}
    252 #endif
    253 	fs->lfs_ravail += fsb;
    254 	mutex_exit(&lfs_lock);
    255 
    256 	return 0;
    257 }
    258 
    259 #ifdef DIAGNOSTIC
    260 int lfs_rescount;
    261 int lfs_rescountdirop;
    262 #endif
    263 
    264 int
    265 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
    266 {
    267 	int error;
    268 
    269 	ASSERT_MAYBE_SEGLOCK(fs);
    270 	if (vp2) {
    271 		/* Make sure we're not in the process of reclaiming vp2 */
    272 		mutex_enter(&lfs_lock);
    273 		while(fs->lfs_flags & LFS_UNDIROP) {
    274 			mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
    275 			    &lfs_lock);
    276 		}
    277 		mutex_exit(&lfs_lock);
    278 	}
    279 
    280 	KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
    281 	KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
    282 	KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
    283 
    284 #ifdef DIAGNOSTIC
    285 	mutex_enter(&lfs_lock);
    286 	if (fsb > 0)
    287 		lfs_rescount++;
    288 	else if (fsb < 0)
    289 		lfs_rescount--;
    290 	if (lfs_rescount < 0)
    291 		panic("lfs_rescount");
    292 	mutex_exit(&lfs_lock);
    293 #endif
    294 
    295 	error = lfs_reserveavail(fs, vp, vp2, fsb);
    296 	if (error)
    297 		return error;
    298 
    299 	/*
    300 	 * XXX just a guess. should be more precise.
    301 	 */
    302 	error = lfs_reservebuf(fs, vp, vp2, fsb, lfs_fsbtob(fs, fsb));
    303 	if (error)
    304 		lfs_reserveavail(fs, vp, vp2, -fsb);
    305 
    306 	return error;
    307 }
    308 
    309 int
    310 lfs_bwrite(void *v)
    311 {
    312 	struct vop_bwrite_args /* {
    313 		struct vnode *a_vp;
    314 		struct buf *a_bp;
    315 	} */ *ap = v;
    316 	struct buf *bp = ap->a_bp;
    317 
    318 	KASSERTMSG((VTOI(bp->b_vp)->i_lfs->lfs_ronly ||
    319 		!(bp->b_flags & B_ASYNC)),
    320 	    "bawrite LFS buffer");
    321 	return lfs_bwrite_ext(bp, 0);
    322 }
    323 
    324 /*
    325  * Determine if there is enough room currently available to write fsb
    326  * blocks.  We need enough blocks for the new blocks, the current
    327  * inode blocks (including potentially the ifile inode), a summary block,
    328  * and the segment usage table, plus an ifile block.
    329  */
    330 int
    331 lfs_fits(struct lfs *fs, int fsb)
    332 {
    333 	int64_t needed;
    334 
    335 	ASSERT_NO_SEGLOCK(fs);
    336 	needed = fsb + lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
    337 		 ((howmany(lfs_sb_getuinodes(fs) + 1, LFS_INOPB(fs)) +
    338 		   lfs_sb_getsegtabsz(fs) +
    339 		   1) << (lfs_sb_getbshift(fs) - lfs_sb_getffshift(fs)));
    340 
    341 	if (needed >= lfs_sb_getavail(fs)) {
    342 #ifdef DEBUG
    343 		DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
    344 		      "needed = %jd, avail = %jd\n",
    345 		      (long)fsb, (long)lfs_sb_getuinodes(fs), (intmax_t)needed,
    346 		      (intmax_t)lfs_sb_getavail(fs)));
    347 #endif
    348 		return 0;
    349 	}
    350 	return 1;
    351 }
    352 
    353 int
    354 lfs_availwait(struct lfs *fs, int fsb)
    355 {
    356 	int error;
    357 	CLEANERINFO *cip;
    358 	struct buf *cbp;
    359 
    360 	ASSERT_NO_SEGLOCK(fs);
    361 	/* Push cleaner blocks through regardless */
    362 	mutex_enter(&lfs_lock);
    363 	if (LFS_SEGLOCK_HELD(fs) &&
    364 	    fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
    365 		mutex_exit(&lfs_lock);
    366 		return 0;
    367 	}
    368 	mutex_exit(&lfs_lock);
    369 
    370 	while (!lfs_fits(fs, fsb)) {
    371 		/*
    372 		 * Out of space, need cleaner to run.
    373 		 * Update the cleaner info, then wake it up.
    374 		 * Note the cleanerinfo block is on the ifile
    375 		 * so it CANT_WAIT.
    376 		 */
    377 		LFS_CLEANERINFO(cip, fs, cbp);
    378 		LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
    379 
    380 #ifdef DEBUG
    381 		DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
    382 		      "waiting on cleaner\n"));
    383 #endif
    384 
    385 		lfs_wakeup_cleaner(fs);
    386 		KASSERTMSG(!LFS_SEGLOCK_HELD(fs), "lfs_availwait: deadlock");
    387 		error = tsleep(&fs->lfs_availsleep, PCATCH | PUSER,
    388 			       "cleaner", 0);
    389 		if (error)
    390 			return (error);
    391 	}
    392 	return 0;
    393 }
    394 
    395 int
    396 lfs_bwrite_ext(struct buf *bp, int flags)
    397 {
    398 	struct lfs *fs;
    399 	struct inode *ip;
    400 	struct vnode *vp;
    401 	int fsb;
    402 
    403 	vp = bp->b_vp;
    404 	fs = VFSTOULFS(vp->v_mount)->um_lfs;
    405 
    406 	ASSERT_MAYBE_SEGLOCK(fs);
    407 	KASSERT(bp->b_cflags & BC_BUSY);
    408 	KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
    409 	KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
    410 	    != BO_DELWRI);
    411 
    412 	/*
    413 	 * Don't write *any* blocks if we're mounted read-only, or
    414 	 * if we are "already unmounted".
    415 	 *
    416 	 * In particular the cleaner can't write blocks either.
    417 	 */
    418 	if (fs->lfs_ronly || (lfs_sb_getpflags(fs) & LFS_PF_CLEAN)) {
    419 		bp->b_oflags &= ~BO_DELWRI;
    420 		bp->b_flags |= B_READ; /* XXX is this right? --ks */
    421 		bp->b_error = 0;
    422 		mutex_enter(&bufcache_lock);
    423 		LFS_UNLOCK_BUF(bp);
    424 		if (LFS_IS_MALLOC_BUF(bp))
    425 			bp->b_cflags &= ~BC_BUSY;
    426 		else
    427 			brelsel(bp, 0);
    428 		mutex_exit(&bufcache_lock);
    429 		return (fs->lfs_ronly ? EROFS : 0);
    430 	}
    431 
    432 	/*
    433 	 * Set the delayed write flag and use reassignbuf to move the buffer
    434 	 * from the clean list to the dirty one.
    435 	 *
    436 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
    437 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
    438 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
    439 	 * isn't going to work.
    440 	 *
    441 	 * XXX we don't let meta-data writes run out of space because they can
    442 	 * come from the segment writer.  We need to make sure that there is
    443 	 * enough space reserved so that there's room to write meta-data
    444 	 * blocks.
    445 	 */
    446 	if ((bp->b_flags & B_LOCKED) == 0) {
    447 		fsb = lfs_numfrags(fs, bp->b_bcount);
    448 
    449 		ip = VTOI(vp);
    450 		mutex_enter(&lfs_lock);
    451 		if (flags & BW_CLEAN) {
    452 			LFS_SET_UINO(ip, IN_CLEANING);
    453 		} else {
    454 			LFS_SET_UINO(ip, IN_MODIFIED);
    455 		}
    456 		mutex_exit(&lfs_lock);
    457 		lfs_sb_subavail(fs, fsb);
    458 
    459 		mutex_enter(&bufcache_lock);
    460 		mutex_enter(vp->v_interlock);
    461 		bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
    462 		LFS_LOCK_BUF(bp);
    463 		bp->b_flags &= ~B_READ;
    464 		bp->b_error = 0;
    465 		reassignbuf(bp, bp->b_vp);
    466 		mutex_exit(vp->v_interlock);
    467 	} else {
    468 		mutex_enter(&bufcache_lock);
    469 	}
    470 
    471 	if (bp->b_iodone != NULL)
    472 		bp->b_cflags &= ~BC_BUSY;
    473 	else
    474 		brelsel(bp, 0);
    475 	mutex_exit(&bufcache_lock);
    476 
    477 	return (0);
    478 }
    479 
    480 /*
    481  * Called and return with the lfs_lock held.
    482  */
    483 void
    484 lfs_flush_fs(struct lfs *fs, int flags)
    485 {
    486 	ASSERT_NO_SEGLOCK(fs);
    487 	KASSERT(mutex_owned(&lfs_lock));
    488 	if (fs->lfs_ronly)
    489 		return;
    490 
    491 	if (lfs_dostats)
    492 		++lfs_stats.flush_invoked;
    493 
    494 	fs->lfs_pdflush = 0;
    495 	mutex_exit(&lfs_lock);
    496 	lfs_writer_enter(fs, "fldirop");
    497 	lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
    498 	lfs_writer_leave(fs);
    499 	mutex_enter(&lfs_lock);
    500 	fs->lfs_favail = 0; /* XXX */
    501 }
    502 
    503 /*
    504  * This routine initiates segment writes when LFS is consuming too many
    505  * resources.  Ideally the pageout daemon would be able to direct LFS
    506  * more subtly.
    507  * XXX We have one static count of locked buffers;
    508  * XXX need to think more about the multiple filesystem case.
    509  *
    510  * Called and return with lfs_lock held.
    511  * If fs != NULL, we hold the segment lock for fs.
    512  */
    513 void
    514 lfs_flush(struct lfs *fs, int flags, int only_onefs)
    515 {
    516 	extern u_int64_t locked_fakequeue_count;
    517 	struct mount *mp, *nmp;
    518 	struct lfs *tfs;
    519 
    520 	KASSERT(mutex_owned(&lfs_lock));
    521 	KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
    522 
    523 	if (lfs_dostats)
    524 		++lfs_stats.write_exceeded;
    525 	/* XXX should we include SEGM_CKP here? */
    526 	if (lfs_writing && !(flags & SEGM_SYNC)) {
    527 		DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
    528 		return;
    529 	}
    530 	while (lfs_writing)
    531 		cv_wait(&lfs_writing_cv, &lfs_lock);
    532 	lfs_writing = 1;
    533 
    534 	mutex_exit(&lfs_lock);
    535 
    536 	if (only_onefs) {
    537 		KASSERT(fs != NULL);
    538 		if (vfs_busy(fs->lfs_ivnode->v_mount, NULL))
    539 			goto errout;
    540 		mutex_enter(&lfs_lock);
    541 		lfs_flush_fs(fs, flags);
    542 		mutex_exit(&lfs_lock);
    543 		vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
    544 	} else {
    545 		locked_fakequeue_count = 0;
    546 		mutex_enter(&mountlist_lock);
    547 		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp) {
    548 			if (vfs_busy(mp, &nmp)) {
    549 				DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
    550 				continue;
    551 			}
    552 			if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
    553 			    sizeof(mp->mnt_stat.f_fstypename)) == 0) {
    554 				tfs = VFSTOULFS(mp)->um_lfs;
    555 				mutex_enter(&lfs_lock);
    556 				lfs_flush_fs(tfs, flags);
    557 				mutex_exit(&lfs_lock);
    558 			}
    559 			vfs_unbusy(mp, false, &nmp);
    560 		}
    561 		mutex_exit(&mountlist_lock);
    562 	}
    563 	LFS_DEBUG_COUNTLOCKED("flush");
    564 	wakeup(&lfs_subsys_pages);
    565 
    566     errout:
    567 	mutex_enter(&lfs_lock);
    568 	KASSERT(lfs_writing);
    569 	lfs_writing = 0;
    570 	wakeup(&lfs_writing);
    571 }
    572 
    573 #define INOCOUNT(fs) howmany(lfs_sb_getuinodes(fs), LFS_INOPB(fs))
    574 #define INOBYTES(fs) (lfs_sb_getuinodes(fs) * DINOSIZE(fs))
    575 
    576 /*
    577  * make sure that we don't have too many locked buffers.
    578  * flush buffers if needed.
    579  */
    580 int
    581 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
    582 {
    583 	int error;
    584 	struct lfs *fs;
    585 	struct inode *ip;
    586 	extern kcondvar_t lfs_writerd_cv;
    587 
    588 	error = 0;
    589 	ip = VTOI(vp);
    590 
    591 	/* If out of buffers, wait on writer */
    592 	/* XXX KS - if it's the Ifile, we're probably the cleaner! */
    593 	if (ip->i_number == LFS_IFILE_INUM)
    594 		return 0;
    595 	/* If we're being called from inside a dirop, don't sleep */
    596 	if (ip->i_flag & IN_ADIROP)
    597 		return 0;
    598 
    599 	fs = ip->i_lfs;
    600 
    601 	ASSERT_NO_SEGLOCK(fs);
    602 
    603 	/*
    604 	 * If we would flush below, but dirops are active, sleep.
    605 	 * Note that a dirop cannot ever reach this code!
    606 	 */
    607 	mutex_enter(&lfs_lock);
    608 	while (fs->lfs_dirops > 0 &&
    609 	       (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
    610 		locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
    611 		lfs_subsys_pages > LFS_MAX_PAGES ||
    612 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    613 		lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
    614 	{
    615 		++fs->lfs_diropwait;
    616 		mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
    617 			&lfs_lock);
    618 		--fs->lfs_diropwait;
    619 	}
    620 
    621 #ifdef DEBUG
    622 	if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
    623 		DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
    624 		      locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
    625 	if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
    626 		DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
    627 		      locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
    628 	if (lfs_subsys_pages > LFS_MAX_PAGES)
    629 		DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
    630 		      lfs_subsys_pages, LFS_MAX_PAGES));
    631 	if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
    632 		DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
    633 		      fs->lfs_pages, lfs_fs_pagetrip));
    634 	if (lfs_dirvcount > LFS_MAX_DIROP)
    635 		DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
    636 		      lfs_dirvcount, LFS_MAX_DIROP));
    637 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
    638 		DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
    639 		      fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
    640 	if (fs->lfs_diropwait > 0)
    641 		DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
    642 		      fs->lfs_diropwait));
    643 #endif
    644 
    645 	/* If there are too many pending dirops, we have to flush them. */
    646 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    647 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
    648 		mutex_exit(&lfs_lock);
    649 		lfs_flush_dirops(fs);
    650 		mutex_enter(&lfs_lock);
    651 	} else if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
    652 	    locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
    653 	    lfs_subsys_pages > LFS_MAX_PAGES ||
    654 	    fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    655 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
    656 		lfs_flush(fs, flags, 0);
    657 	} else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
    658 		/*
    659 		 * If we didn't flush the whole thing, some filesystems
    660 		 * still might want to be flushed.
    661 		 */
    662 		++fs->lfs_pdflush;
    663 		cv_broadcast(&lfs_writerd_cv);
    664 	}
    665 
    666 	while (locked_queue_count + INOCOUNT(fs) >= LFS_WAIT_BUFS ||
    667 		locked_queue_bytes + INOBYTES(fs) >= LFS_WAIT_BYTES ||
    668 		lfs_subsys_pages > LFS_WAIT_PAGES ||
    669 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    670 		lfs_dirvcount > LFS_MAX_DIROP) {
    671 
    672 		if (lfs_dostats)
    673 			++lfs_stats.wait_exceeded;
    674 		DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
    675 		      locked_queue_count, locked_queue_bytes));
    676 		++locked_queue_waiters;
    677 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
    678 		    hz * LFS_BUFWAIT);
    679 		--locked_queue_waiters;
    680 		if (error != EWOULDBLOCK)
    681 			break;
    682 
    683 		/*
    684 		 * lfs_flush might not flush all the buffers, if some of the
    685 		 * inodes were locked or if most of them were Ifile blocks
    686 		 * and we weren't asked to checkpoint.	Try flushing again
    687 		 * to keep us from blocking indefinitely.
    688 		 */
    689 		if (locked_queue_count + INOCOUNT(fs) >= LFS_MAX_BUFS ||
    690 		    locked_queue_bytes + INOBYTES(fs) >= LFS_MAX_BYTES) {
    691 			lfs_flush(fs, flags | SEGM_CKP, 0);
    692 		}
    693 	}
    694 	mutex_exit(&lfs_lock);
    695 	return (error);
    696 }
    697 
    698 /*
    699  * Allocate a new buffer header.
    700  */
    701 struct buf *
    702 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
    703 {
    704 	struct buf *bp;
    705 	size_t nbytes;
    706 
    707 	ASSERT_MAYBE_SEGLOCK(fs);
    708 	nbytes = roundup(size, lfs_fsbtob(fs, 1));
    709 
    710 	bp = getiobuf(NULL, true);
    711 	if (nbytes) {
    712 		bp->b_data = lfs_malloc(fs, nbytes, type);
    713 		/* memset(bp->b_data, 0, nbytes); */
    714 	}
    715 	KASSERT(vp != NULL);
    716 	KASSERT(bp != NULL);
    717 
    718 	bp->b_bufsize = size;
    719 	bp->b_bcount = size;
    720 	bp->b_lblkno = daddr;
    721 	bp->b_blkno = daddr;
    722 	bp->b_error = 0;
    723 	bp->b_resid = 0;
    724 	bp->b_iodone = lfs_callback;
    725 	bp->b_cflags = BC_BUSY | BC_NOCACHE;
    726 	bp->b_private = fs;
    727 
    728 	mutex_enter(&bufcache_lock);
    729 	mutex_enter(vp->v_interlock);
    730 	bgetvp(vp, bp);
    731 	mutex_exit(vp->v_interlock);
    732 	mutex_exit(&bufcache_lock);
    733 
    734 	return (bp);
    735 }
    736 
    737 void
    738 lfs_freebuf(struct lfs *fs, struct buf *bp)
    739 {
    740 	struct vnode *vp;
    741 
    742 	if ((vp = bp->b_vp) != NULL) {
    743 		mutex_enter(&bufcache_lock);
    744 		mutex_enter(vp->v_interlock);
    745 		brelvp(bp);
    746 		mutex_exit(vp->v_interlock);
    747 		mutex_exit(&bufcache_lock);
    748 	}
    749 	if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
    750 		lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
    751 		bp->b_data = NULL;
    752 	}
    753 	putiobuf(bp);
    754 }
    755 
    756 /*
    757  * Count buffers on the "locked" queue, and compare it to a pro-forma count.
    758  * Don't count malloced buffers, since they don't detract from the total.
    759  */
    760 void
    761 lfs_countlocked(int *count, long *bytes, const char *msg)
    762 {
    763 	struct buf *bp;
    764 	int n = 0;
    765 	long int size = 0L;
    766 
    767 	mutex_enter(&bufcache_lock);
    768 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
    769 		KASSERT(bp->b_iodone == NULL);
    770 		n++;
    771 		size += bp->b_bufsize;
    772 		KASSERTMSG((n <= nbuf),
    773 		    "lfs_countlocked: this can't happen: more"
    774 		    " buffers locked than exist");
    775 	}
    776 	/*
    777 	 * Theoretically this function never really does anything.
    778 	 * Give a warning if we have to fix the accounting.
    779 	 */
    780 	if (n != *count) {
    781 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
    782 		      " from %d to %d\n", msg, *count, n));
    783 	}
    784 	if (size != *bytes) {
    785 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
    786 		      " from %ld to %ld\n", msg, *bytes, size));
    787 	}
    788 	*count = n;
    789 	*bytes = size;
    790 	mutex_exit(&bufcache_lock);
    791 	return;
    792 }
    793 
    794 int
    795 lfs_wait_pages(void)
    796 {
    797 	int active, inactive;
    798 
    799 	uvm_estimatepageable(&active, &inactive);
    800 	return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
    801 }
    802 
    803 int
    804 lfs_max_pages(void)
    805 {
    806 	int active, inactive;
    807 
    808 	uvm_estimatepageable(&active, &inactive);
    809 	return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
    810 }
    811