Home | History | Annotate | Line # | Download | only in lfs
lfs_bio.c revision 1.113
      1 /*	$NetBSD: lfs_bio.c,v 1.113 2008/04/30 12:49:17 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2008 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 /*
     32  * Copyright (c) 1991, 1993
     33  *	The Regents of the University of California.  All rights reserved.
     34  *
     35  * Redistribution and use in source and binary forms, with or without
     36  * modification, are permitted provided that the following conditions
     37  * are met:
     38  * 1. Redistributions of source code must retain the above copyright
     39  *    notice, this list of conditions and the following disclaimer.
     40  * 2. Redistributions in binary form must reproduce the above copyright
     41  *    notice, this list of conditions and the following disclaimer in the
     42  *    documentation and/or other materials provided with the distribution.
     43  * 3. Neither the name of the University nor the names of its contributors
     44  *    may be used to endorse or promote products derived from this software
     45  *    without specific prior written permission.
     46  *
     47  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     48  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     49  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     50  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     51  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     52  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     53  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     55  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     56  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     57  * SUCH DAMAGE.
     58  *
     59  *	@(#)lfs_bio.c	8.10 (Berkeley) 6/10/95
     60  */
     61 
     62 #include <sys/cdefs.h>
     63 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.113 2008/04/30 12:49:17 ad Exp $");
     64 
     65 #include <sys/param.h>
     66 #include <sys/systm.h>
     67 #include <sys/proc.h>
     68 #include <sys/buf.h>
     69 #include <sys/vnode.h>
     70 #include <sys/resourcevar.h>
     71 #include <sys/mount.h>
     72 #include <sys/kernel.h>
     73 #include <sys/kauth.h>
     74 
     75 #include <ufs/ufs/inode.h>
     76 #include <ufs/ufs/ufsmount.h>
     77 #include <ufs/ufs/ufs_extern.h>
     78 
     79 #include <ufs/lfs/lfs.h>
     80 #include <ufs/lfs/lfs_extern.h>
     81 
     82 #include <uvm/uvm.h>
     83 
     84 /*
     85  * LFS block write function.
     86  *
     87  * XXX
     88  * No write cost accounting is done.
     89  * This is almost certainly wrong for synchronous operations and NFS.
     90  *
     91  * protected by lfs_lock.
     92  */
     93 int	locked_queue_count   = 0;	/* Count of locked-down buffers. */
     94 long	locked_queue_bytes   = 0L;	/* Total size of locked buffers. */
     95 int	lfs_subsys_pages     = 0L;	/* Total number LFS-written pages */
     96 int	lfs_fs_pagetrip	     = 0;	/* # of pages to trip per-fs write */
     97 int	lfs_writing	     = 0;	/* Set if already kicked off a writer
     98 					   because of buffer space */
     99 
    100 /* Lock and condition variables for above. */
    101 kcondvar_t	locked_queue_cv;
    102 kcondvar_t	lfs_writing_cv;
    103 kmutex_t	lfs_lock;
    104 
    105 extern int lfs_dostats;
    106 
    107 /*
    108  * reserved number/bytes of locked buffers
    109  */
    110 int locked_queue_rcount = 0;
    111 long locked_queue_rbytes = 0L;
    112 
    113 int lfs_fits_buf(struct lfs *, int, int);
    114 int lfs_reservebuf(struct lfs *, struct vnode *vp, struct vnode *vp2,
    115     int, int);
    116 int lfs_reserveavail(struct lfs *, struct vnode *vp, struct vnode *vp2, int);
    117 
    118 int
    119 lfs_fits_buf(struct lfs *fs, int n, int bytes)
    120 {
    121 	int count_fit, bytes_fit;
    122 
    123 	ASSERT_NO_SEGLOCK(fs);
    124 	KASSERT(mutex_owned(&lfs_lock));
    125 
    126 	count_fit =
    127 	    (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
    128 	bytes_fit =
    129 	    (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
    130 
    131 #ifdef DEBUG
    132 	if (!count_fit) {
    133 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
    134 		      locked_queue_count, locked_queue_rcount,
    135 		      n, LFS_WAIT_BUFS));
    136 	}
    137 	if (!bytes_fit) {
    138 		DLOG((DLOG_AVAIL, "lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %ld\n",
    139 		      locked_queue_bytes, locked_queue_rbytes,
    140 		      bytes, LFS_WAIT_BYTES));
    141 	}
    142 #endif /* DEBUG */
    143 
    144 	return (count_fit && bytes_fit);
    145 }
    146 
    147 /* ARGSUSED */
    148 int
    149 lfs_reservebuf(struct lfs *fs, struct vnode *vp,
    150     struct vnode *vp2, int n, int bytes)
    151 {
    152 	ASSERT_MAYBE_SEGLOCK(fs);
    153 	KASSERT(locked_queue_rcount >= 0);
    154 	KASSERT(locked_queue_rbytes >= 0);
    155 
    156 	mutex_enter(&lfs_lock);
    157 	while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
    158 		int error;
    159 
    160 		lfs_flush(fs, 0, 0);
    161 
    162 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
    163 		    hz * LFS_BUFWAIT);
    164 		if (error && error != EWOULDBLOCK) {
    165 			mutex_exit(&lfs_lock);
    166 			return error;
    167 		}
    168 	}
    169 
    170 	locked_queue_rcount += n;
    171 	locked_queue_rbytes += bytes;
    172 
    173 	mutex_exit(&lfs_lock);
    174 
    175 	KASSERT(locked_queue_rcount >= 0);
    176 	KASSERT(locked_queue_rbytes >= 0);
    177 
    178 	return 0;
    179 }
    180 
    181 /*
    182  * Try to reserve some blocks, prior to performing a sensitive operation that
    183  * requires the vnode lock to be honored.  If there is not enough space, give
    184  * up the vnode lock temporarily and wait for the space to become available.
    185  *
    186  * Called with vp locked.  (Note nowever that if fsb < 0, vp is ignored.)
    187  *
    188  * XXX YAMT - it isn't safe to unlock vp here
    189  * because the node might be modified while we sleep.
    190  * (eg. cached states like i_offset might be stale,
    191  *  the vnode might be truncated, etc..)
    192  * maybe we should have a way to restart the vnodeop (EVOPRESTART?)
    193  * or rearrange vnodeop interface to leave vnode locking to file system
    194  * specific code so that each file systems can have their own vnode locking and
    195  * vnode re-using strategies.
    196  */
    197 int
    198 lfs_reserveavail(struct lfs *fs, struct vnode *vp,
    199     struct vnode *vp2, int fsb)
    200 {
    201 	CLEANERINFO *cip;
    202 	struct buf *bp;
    203 	int error, slept;
    204 
    205 	ASSERT_MAYBE_SEGLOCK(fs);
    206 	slept = 0;
    207 	mutex_enter(&lfs_lock);
    208 	while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail)) {
    209 		mutex_exit(&lfs_lock);
    210 #if 0
    211 		/*
    212 		 * XXX ideally, we should unlock vnodes here
    213 		 * because we might sleep very long time.
    214 		 */
    215 		VOP_UNLOCK(vp, 0);
    216 		if (vp2 != NULL) {
    217 			VOP_UNLOCK(vp2, 0);
    218 		}
    219 #else
    220 		/*
    221 		 * XXX since we'll sleep for cleaner with vnode lock holding,
    222 		 * deadlock will occur if cleaner tries to lock the vnode.
    223 		 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
    224 		 */
    225 #endif
    226 
    227 		if (!slept) {
    228 			DLOG((DLOG_AVAIL, "lfs_reserve: waiting for %ld (bfree = %d,"
    229 			      " est_bfree = %d)\n",
    230 			      fsb + fs->lfs_ravail + fs->lfs_favail,
    231 			      fs->lfs_bfree, LFS_EST_BFREE(fs)));
    232 		}
    233 		++slept;
    234 
    235 		/* Wake up the cleaner */
    236 		LFS_CLEANERINFO(cip, fs, bp);
    237 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
    238 		lfs_wakeup_cleaner(fs);
    239 
    240 		mutex_enter(&lfs_lock);
    241 		/* Cleaner might have run while we were reading, check again */
    242 		if (lfs_fits(fs, fsb + fs->lfs_ravail + fs->lfs_favail))
    243 			break;
    244 
    245 		error = mtsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
    246 				0, &lfs_lock);
    247 #if 0
    248 		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
    249 		vn_lock(vp2, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
    250 #endif
    251 		if (error) {
    252 			mutex_exit(&lfs_lock);
    253 			return error;
    254 		}
    255 	}
    256 #ifdef DEBUG
    257 	if (slept) {
    258 		DLOG((DLOG_AVAIL, "lfs_reserve: woke up\n"));
    259 	}
    260 #endif
    261 	fs->lfs_ravail += fsb;
    262 	mutex_exit(&lfs_lock);
    263 
    264 	return 0;
    265 }
    266 
    267 #ifdef DIAGNOSTIC
    268 int lfs_rescount;
    269 int lfs_rescountdirop;
    270 #endif
    271 
    272 int
    273 lfs_reserve(struct lfs *fs, struct vnode *vp, struct vnode *vp2, int fsb)
    274 {
    275 	int error;
    276 	int cantwait;
    277 
    278 	ASSERT_MAYBE_SEGLOCK(fs);
    279 	if (vp2) {
    280 		/* Make sure we're not in the process of reclaiming vp2 */
    281 		mutex_enter(&lfs_lock);
    282 		while(fs->lfs_flags & LFS_UNDIROP) {
    283 			mtsleep(&fs->lfs_flags, PRIBIO + 1, "lfsrundirop", 0,
    284 			    &lfs_lock);
    285 		}
    286 		mutex_exit(&lfs_lock);
    287 	}
    288 
    289 	KASSERT(fsb < 0 || VOP_ISLOCKED(vp));
    290 	KASSERT(vp2 == NULL || fsb < 0 || VOP_ISLOCKED(vp2));
    291 	KASSERT(vp2 == NULL || !(VTOI(vp2)->i_flag & IN_ADIROP));
    292 	KASSERT(vp2 == NULL || vp2 != fs->lfs_unlockvp);
    293 
    294 	cantwait = (VTOI(vp)->i_flag & IN_ADIROP) || fs->lfs_unlockvp == vp;
    295 #ifdef DIAGNOSTIC
    296 	if (cantwait) {
    297 		if (fsb > 0)
    298 			lfs_rescountdirop++;
    299 		else if (fsb < 0)
    300 			lfs_rescountdirop--;
    301 		if (lfs_rescountdirop < 0)
    302 			panic("lfs_rescountdirop");
    303 	}
    304 	else {
    305 		if (fsb > 0)
    306 			lfs_rescount++;
    307 		else if (fsb < 0)
    308 			lfs_rescount--;
    309 		if (lfs_rescount < 0)
    310 			panic("lfs_rescount");
    311 	}
    312 #endif
    313 	if (cantwait)
    314 		return 0;
    315 
    316 	/*
    317 	 * XXX
    318 	 * vref vnodes here so that cleaner doesn't try to reuse them.
    319 	 * (see XXX comment in lfs_reserveavail)
    320 	 */
    321 	mutex_enter(&vp->v_interlock);
    322 	lfs_vref(vp);
    323 	if (vp2 != NULL) {
    324 		mutex_enter(&vp2->v_interlock);
    325 		lfs_vref(vp2);
    326 	}
    327 
    328 	error = lfs_reserveavail(fs, vp, vp2, fsb);
    329 	if (error)
    330 		goto done;
    331 
    332 	/*
    333 	 * XXX just a guess. should be more precise.
    334 	 */
    335 	error = lfs_reservebuf(fs, vp, vp2,
    336 	    fragstoblks(fs, fsb), fsbtob(fs, fsb));
    337 	if (error)
    338 		lfs_reserveavail(fs, vp, vp2, -fsb);
    339 
    340 done:
    341 	lfs_vunref(vp);
    342 	if (vp2 != NULL) {
    343 		lfs_vunref(vp2);
    344 	}
    345 
    346 	return error;
    347 }
    348 
    349 int
    350 lfs_bwrite(void *v)
    351 {
    352 	struct vop_bwrite_args /* {
    353 		struct buf *a_bp;
    354 	} */ *ap = v;
    355 	struct buf *bp = ap->a_bp;
    356 
    357 #ifdef DIAGNOSTIC
    358 	if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
    359 		panic("bawrite LFS buffer");
    360 	}
    361 #endif /* DIAGNOSTIC */
    362 	return lfs_bwrite_ext(bp, 0);
    363 }
    364 
    365 /*
    366  * Determine if there is enough room currently available to write fsb
    367  * blocks.  We need enough blocks for the new blocks, the current
    368  * inode blocks (including potentially the ifile inode), a summary block,
    369  * and the segment usage table, plus an ifile block.
    370  */
    371 int
    372 lfs_fits(struct lfs *fs, int fsb)
    373 {
    374 	int needed;
    375 
    376 	ASSERT_NO_SEGLOCK(fs);
    377 	needed = fsb + btofsb(fs, fs->lfs_sumsize) +
    378 		 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
    379 		   1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
    380 
    381 	if (needed >= fs->lfs_avail) {
    382 #ifdef DEBUG
    383 		DLOG((DLOG_AVAIL, "lfs_fits: no fit: fsb = %ld, uinodes = %ld, "
    384 		      "needed = %ld, avail = %ld\n",
    385 		      (long)fsb, (long)fs->lfs_uinodes, (long)needed,
    386 		      (long)fs->lfs_avail));
    387 #endif
    388 		return 0;
    389 	}
    390 	return 1;
    391 }
    392 
    393 int
    394 lfs_availwait(struct lfs *fs, int fsb)
    395 {
    396 	int error;
    397 	CLEANERINFO *cip;
    398 	struct buf *cbp;
    399 
    400 	ASSERT_NO_SEGLOCK(fs);
    401 	/* Push cleaner blocks through regardless */
    402 	mutex_enter(&lfs_lock);
    403 	if (LFS_SEGLOCK_HELD(fs) &&
    404 	    fs->lfs_sp->seg_flags & (SEGM_CLEAN | SEGM_FORCE_CKP)) {
    405 		mutex_exit(&lfs_lock);
    406 		return 0;
    407 	}
    408 	mutex_exit(&lfs_lock);
    409 
    410 	while (!lfs_fits(fs, fsb)) {
    411 		/*
    412 		 * Out of space, need cleaner to run.
    413 		 * Update the cleaner info, then wake it up.
    414 		 * Note the cleanerinfo block is on the ifile
    415 		 * so it CANT_WAIT.
    416 		 */
    417 		LFS_CLEANERINFO(cip, fs, cbp);
    418 		LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
    419 
    420 #ifdef DEBUG
    421 		DLOG((DLOG_AVAIL, "lfs_availwait: out of available space, "
    422 		      "waiting on cleaner\n"));
    423 #endif
    424 
    425 		lfs_wakeup_cleaner(fs);
    426 #ifdef DIAGNOSTIC
    427 		if (LFS_SEGLOCK_HELD(fs))
    428 			panic("lfs_availwait: deadlock");
    429 #endif
    430 		error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
    431 		if (error)
    432 			return (error);
    433 	}
    434 	return 0;
    435 }
    436 
    437 int
    438 lfs_bwrite_ext(struct buf *bp, int flags)
    439 {
    440 	struct lfs *fs;
    441 	struct inode *ip;
    442 	struct vnode *vp;
    443 	int fsb;
    444 
    445 	vp = bp->b_vp;
    446 	fs = VFSTOUFS(vp->v_mount)->um_lfs;
    447 
    448 	ASSERT_MAYBE_SEGLOCK(fs);
    449 	KASSERT(bp->b_cflags & BC_BUSY);
    450 	KASSERT(flags & BW_CLEAN || !LFS_IS_MALLOC_BUF(bp));
    451 	KASSERT(((bp->b_oflags | bp->b_flags) & (BO_DELWRI|B_LOCKED))
    452 	    != BO_DELWRI);
    453 
    454 	/*
    455 	 * Don't write *any* blocks if we're mounted read-only, or
    456 	 * if we are "already unmounted".
    457 	 *
    458 	 * In particular the cleaner can't write blocks either.
    459 	 */
    460 	if (fs->lfs_ronly || (fs->lfs_pflags & LFS_PF_CLEAN)) {
    461 		bp->b_oflags &= ~BO_DELWRI;
    462 		bp->b_flags |= B_READ;
    463 		bp->b_error = 0;
    464 		mutex_enter(&bufcache_lock);
    465 		LFS_UNLOCK_BUF(bp);
    466 		if (LFS_IS_MALLOC_BUF(bp))
    467 			bp->b_cflags &= ~BC_BUSY;
    468 		else
    469 			brelsel(bp, 0);
    470 		mutex_exit(&bufcache_lock);
    471 		return (fs->lfs_ronly ? EROFS : 0);
    472 	}
    473 
    474 	/*
    475 	 * Set the delayed write flag and use reassignbuf to move the buffer
    476 	 * from the clean list to the dirty one.
    477 	 *
    478 	 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
    479 	 * the buffer onto the LOCKED free list.  This is necessary, otherwise
    480 	 * getnewbuf() would try to reclaim the buffers using bawrite, which
    481 	 * isn't going to work.
    482 	 *
    483 	 * XXX we don't let meta-data writes run out of space because they can
    484 	 * come from the segment writer.  We need to make sure that there is
    485 	 * enough space reserved so that there's room to write meta-data
    486 	 * blocks.
    487 	 */
    488 	if ((bp->b_flags & B_LOCKED) == 0) {
    489 		fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
    490 
    491 		ip = VTOI(vp);
    492 		mutex_enter(&lfs_lock);
    493 		if (flags & BW_CLEAN) {
    494 			LFS_SET_UINO(ip, IN_CLEANING);
    495 		} else {
    496 			LFS_SET_UINO(ip, IN_MODIFIED);
    497 		}
    498 		mutex_exit(&lfs_lock);
    499 		fs->lfs_avail -= fsb;
    500 
    501 		mutex_enter(&bufcache_lock);
    502 		mutex_enter(&vp->v_interlock);
    503 		bp->b_oflags = (bp->b_oflags | BO_DELWRI) & ~BO_DONE;
    504 		LFS_LOCK_BUF(bp);
    505 		bp->b_flags &= ~B_READ;
    506 		bp->b_error = 0;
    507 		reassignbuf(bp, bp->b_vp);
    508 		mutex_exit(&vp->v_interlock);
    509 	} else {
    510 		mutex_enter(&bufcache_lock);
    511 	}
    512 
    513 	if (bp->b_iodone != NULL)
    514 		bp->b_cflags &= ~BC_BUSY;
    515 	else
    516 		brelsel(bp, 0);
    517 	mutex_exit(&bufcache_lock);
    518 
    519 	return (0);
    520 }
    521 
    522 /*
    523  * Called and return with the lfs_lock held.
    524  */
    525 void
    526 lfs_flush_fs(struct lfs *fs, int flags)
    527 {
    528 	ASSERT_NO_SEGLOCK(fs);
    529 	KASSERT(mutex_owned(&lfs_lock));
    530 	if (fs->lfs_ronly)
    531 		return;
    532 
    533 	if (lfs_dostats)
    534 		++lfs_stats.flush_invoked;
    535 
    536 	mutex_exit(&lfs_lock);
    537 	lfs_writer_enter(fs, "fldirop");
    538 	lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
    539 	lfs_writer_leave(fs);
    540 	mutex_enter(&lfs_lock);
    541 	fs->lfs_favail = 0; /* XXX */
    542 }
    543 
    544 /*
    545  * This routine initiates segment writes when LFS is consuming too many
    546  * resources.  Ideally the pageout daemon would be able to direct LFS
    547  * more subtly.
    548  * XXX We have one static count of locked buffers;
    549  * XXX need to think more about the multiple filesystem case.
    550  *
    551  * Called and return with lfs_lock held.
    552  * If fs != NULL, we hold the segment lock for fs.
    553  */
    554 void
    555 lfs_flush(struct lfs *fs, int flags, int only_onefs)
    556 {
    557 	extern u_int64_t locked_fakequeue_count;
    558 	struct mount *mp, *nmp;
    559 	struct lfs *tfs;
    560 
    561 	KASSERT(mutex_owned(&lfs_lock));
    562 	KDASSERT(fs == NULL || !LFS_SEGLOCK_HELD(fs));
    563 
    564 	if (lfs_dostats)
    565 		++lfs_stats.write_exceeded;
    566 	/* XXX should we include SEGM_CKP here? */
    567 	if (lfs_writing && !(flags & SEGM_SYNC)) {
    568 		DLOG((DLOG_FLUSH, "lfs_flush: not flushing because another flush is active\n"));
    569 		return;
    570 	}
    571 	while (lfs_writing)
    572 		cv_wait(&lfs_writing_cv, &lfs_lock);
    573 	lfs_writing = 1;
    574 
    575 	mutex_exit(&lfs_lock);
    576 
    577 	if (only_onefs) {
    578 		KASSERT(fs != NULL);
    579 		if (vfs_trybusy(fs->lfs_ivnode->v_mount, RW_READER, NULL))
    580 			goto errout;
    581 		mutex_enter(&lfs_lock);
    582 		lfs_flush_fs(fs, flags);
    583 		mutex_exit(&lfs_lock);
    584 		vfs_unbusy(fs->lfs_ivnode->v_mount, false, NULL);
    585 	} else {
    586 		locked_fakequeue_count = 0;
    587 		mutex_enter(&mountlist_lock);
    588 		for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
    589 		     mp = nmp) {
    590 			if (vfs_trybusy(mp, RW_READER, &nmp)) {
    591 				DLOG((DLOG_FLUSH, "lfs_flush: fs vfs_busy\n"));
    592 				continue;
    593 			}
    594 			if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
    595 			    sizeof(mp->mnt_stat.f_fstypename)) == 0) {
    596 				tfs = VFSTOUFS(mp)->um_lfs;
    597 				mutex_enter(&lfs_lock);
    598 				lfs_flush_fs(tfs, flags);
    599 				mutex_exit(&lfs_lock);
    600 			}
    601 			mutex_enter(&mountlist_lock);
    602 			vfs_unbusy(mp, false, &nmp);
    603 		}
    604 		mutex_exit(&mountlist_lock);
    605 	}
    606 	LFS_DEBUG_COUNTLOCKED("flush");
    607 	wakeup(&lfs_subsys_pages);
    608 
    609     errout:
    610 	mutex_enter(&lfs_lock);
    611 	KASSERT(lfs_writing);
    612 	lfs_writing = 0;
    613 	wakeup(&lfs_writing);
    614 }
    615 
    616 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
    617 #define INOBYTES(fs) ((fs)->lfs_uinodes * sizeof (struct ufs1_dinode))
    618 
    619 /*
    620  * make sure that we don't have too many locked buffers.
    621  * flush buffers if needed.
    622  */
    623 int
    624 lfs_check(struct vnode *vp, daddr_t blkno, int flags)
    625 {
    626 	int error;
    627 	struct lfs *fs;
    628 	struct inode *ip;
    629 	extern pid_t lfs_writer_daemon;
    630 
    631 	error = 0;
    632 	ip = VTOI(vp);
    633 
    634 	/* If out of buffers, wait on writer */
    635 	/* XXX KS - if it's the Ifile, we're probably the cleaner! */
    636 	if (ip->i_number == LFS_IFILE_INUM)
    637 		return 0;
    638 	/* If we're being called from inside a dirop, don't sleep */
    639 	if (ip->i_flag & IN_ADIROP)
    640 		return 0;
    641 
    642 	fs = ip->i_lfs;
    643 
    644 	ASSERT_NO_SEGLOCK(fs);
    645 
    646 	/*
    647 	 * If we would flush below, but dirops are active, sleep.
    648 	 * Note that a dirop cannot ever reach this code!
    649 	 */
    650 	mutex_enter(&lfs_lock);
    651 	while (fs->lfs_dirops > 0 &&
    652 	       (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
    653 		locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
    654 		lfs_subsys_pages > LFS_MAX_PAGES ||
    655 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    656 		lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0))
    657 	{
    658 		++fs->lfs_diropwait;
    659 		mtsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0,
    660 			&lfs_lock);
    661 		--fs->lfs_diropwait;
    662 	}
    663 
    664 #ifdef DEBUG
    665 	if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS)
    666 		DLOG((DLOG_FLUSH, "lfs_check: lqc = %d, max %d\n",
    667 		      locked_queue_count + INOCOUNT(fs), LFS_MAX_BUFS));
    668 	if (locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
    669 		DLOG((DLOG_FLUSH, "lfs_check: lqb = %ld, max %ld\n",
    670 		      locked_queue_bytes + INOBYTES(fs), LFS_MAX_BYTES));
    671 	if (lfs_subsys_pages > LFS_MAX_PAGES)
    672 		DLOG((DLOG_FLUSH, "lfs_check: lssp = %d, max %d\n",
    673 		      lfs_subsys_pages, LFS_MAX_PAGES));
    674 	if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip)
    675 		DLOG((DLOG_FLUSH, "lfs_check: fssp = %d, trip at %d\n",
    676 		      fs->lfs_pages, lfs_fs_pagetrip));
    677 	if (lfs_dirvcount > LFS_MAX_DIROP)
    678 		DLOG((DLOG_FLUSH, "lfs_check: ldvc = %d, max %d\n",
    679 		      lfs_dirvcount, LFS_MAX_DIROP));
    680 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs))
    681 		DLOG((DLOG_FLUSH, "lfs_check: lfdvc = %d, max %d\n",
    682 		      fs->lfs_dirvcount, LFS_MAX_FSDIROP(fs)));
    683 	if (fs->lfs_diropwait > 0)
    684 		DLOG((DLOG_FLUSH, "lfs_check: ldvw = %d\n",
    685 		      fs->lfs_diropwait));
    686 #endif
    687 
    688 	/* If there are too many pending dirops, we have to flush them. */
    689 	if (fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    690 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
    691 		flags |= SEGM_CKP;
    692 	}
    693 
    694 	if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
    695 	    locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
    696 	    lfs_subsys_pages > LFS_MAX_PAGES ||
    697 	    fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    698 	    lfs_dirvcount > LFS_MAX_DIROP || fs->lfs_diropwait > 0) {
    699 		lfs_flush(fs, flags, 0);
    700 	} else if (lfs_fs_pagetrip && fs->lfs_pages > lfs_fs_pagetrip) {
    701 		/*
    702 		 * If we didn't flush the whole thing, some filesystems
    703 		 * still might want to be flushed.
    704 		 */
    705 		++fs->lfs_pdflush;
    706 		wakeup(&lfs_writer_daemon);
    707 	}
    708 
    709 	while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS ||
    710 		locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES ||
    711 		lfs_subsys_pages > LFS_WAIT_PAGES ||
    712 		fs->lfs_dirvcount > LFS_MAX_FSDIROP(fs) ||
    713 		lfs_dirvcount > LFS_MAX_DIROP) {
    714 
    715 		if (lfs_dostats)
    716 			++lfs_stats.wait_exceeded;
    717 		DLOG((DLOG_AVAIL, "lfs_check: waiting: count=%d, bytes=%ld\n",
    718 		      locked_queue_count, locked_queue_bytes));
    719 		error = cv_timedwait_sig(&locked_queue_cv, &lfs_lock,
    720 		    hz * LFS_BUFWAIT);
    721 		if (error != EWOULDBLOCK)
    722 			break;
    723 
    724 		/*
    725 		 * lfs_flush might not flush all the buffers, if some of the
    726 		 * inodes were locked or if most of them were Ifile blocks
    727 		 * and we weren't asked to checkpoint.	Try flushing again
    728 		 * to keep us from blocking indefinitely.
    729 		 */
    730 		if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
    731 		    locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES) {
    732 			lfs_flush(fs, flags | SEGM_CKP, 0);
    733 		}
    734 	}
    735 	mutex_exit(&lfs_lock);
    736 	return (error);
    737 }
    738 
    739 /*
    740  * Allocate a new buffer header.
    741  */
    742 struct buf *
    743 lfs_newbuf(struct lfs *fs, struct vnode *vp, daddr_t daddr, size_t size, int type)
    744 {
    745 	struct buf *bp;
    746 	size_t nbytes;
    747 
    748 	ASSERT_MAYBE_SEGLOCK(fs);
    749 	nbytes = roundup(size, fsbtob(fs, 1));
    750 
    751 	bp = getiobuf(NULL, true);
    752 	if (nbytes) {
    753 		bp->b_data = lfs_malloc(fs, nbytes, type);
    754 		/* memset(bp->b_data, 0, nbytes); */
    755 	}
    756 #ifdef DIAGNOSTIC
    757 	if (vp == NULL)
    758 		panic("vp is NULL in lfs_newbuf");
    759 	if (bp == NULL)
    760 		panic("bp is NULL after malloc in lfs_newbuf");
    761 #endif
    762 
    763 	bp->b_bufsize = size;
    764 	bp->b_bcount = size;
    765 	bp->b_lblkno = daddr;
    766 	bp->b_blkno = daddr;
    767 	bp->b_error = 0;
    768 	bp->b_resid = 0;
    769 	bp->b_iodone = lfs_callback;
    770 	bp->b_cflags = BC_BUSY | BC_NOCACHE;
    771 	bp->b_private = fs;
    772 
    773 	mutex_enter(&bufcache_lock);
    774 	mutex_enter(&vp->v_interlock);
    775 	bgetvp(vp, bp);
    776 	mutex_exit(&vp->v_interlock);
    777 	mutex_exit(&bufcache_lock);
    778 
    779 	return (bp);
    780 }
    781 
    782 void
    783 lfs_freebuf(struct lfs *fs, struct buf *bp)
    784 {
    785 	struct vnode *vp;
    786 
    787 	if ((vp = bp->b_vp) != NULL) {
    788 		mutex_enter(&bufcache_lock);
    789 		mutex_enter(&vp->v_interlock);
    790 		brelvp(bp);
    791 		mutex_exit(&vp->v_interlock);
    792 		mutex_exit(&bufcache_lock);
    793 	}
    794 	if (!(bp->b_cflags & BC_INVAL)) { /* BC_INVAL indicates a "fake" buffer */
    795 		lfs_free(fs, bp->b_data, LFS_NB_UNKNOWN);
    796 		bp->b_data = NULL;
    797 	}
    798 	putiobuf(bp);
    799 }
    800 
    801 /*
    802  * Count buffers on the "locked" queue, and compare it to a pro-forma count.
    803  * Don't count malloced buffers, since they don't detract from the total.
    804  */
    805 void
    806 lfs_countlocked(int *count, long *bytes, const char *msg)
    807 {
    808 	struct buf *bp;
    809 	int n = 0;
    810 	long int size = 0L;
    811 
    812 	mutex_enter(&bufcache_lock);
    813 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED].bq_queue, b_freelist) {
    814 		KASSERT(bp->b_iodone == NULL);
    815 		n++;
    816 		size += bp->b_bufsize;
    817 #ifdef DIAGNOSTIC
    818 		if (n > nbuf)
    819 			panic("lfs_countlocked: this can't happen: more"
    820 			      " buffers locked than exist");
    821 #endif
    822 	}
    823 	/*
    824 	 * Theoretically this function never really does anything.
    825 	 * Give a warning if we have to fix the accounting.
    826 	 */
    827 	if (n != *count) {
    828 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted buf count"
    829 		      " from %d to %d\n", msg, *count, n));
    830 	}
    831 	if (size != *bytes) {
    832 		DLOG((DLOG_LLIST, "lfs_countlocked: %s: adjusted byte count"
    833 		      " from %ld to %ld\n", msg, *bytes, size));
    834 	}
    835 	*count = n;
    836 	*bytes = size;
    837 	mutex_exit(&bufcache_lock);
    838 	return;
    839 }
    840 
    841 int
    842 lfs_wait_pages(void)
    843 {
    844 	int active, inactive;
    845 
    846 	uvm_estimatepageable(&active, &inactive);
    847 	return LFS_WAIT_RESOURCE(active + inactive + uvmexp.free, 1);
    848 }
    849 
    850 int
    851 lfs_max_pages(void)
    852 {
    853 	int active, inactive;
    854 
    855 	uvm_estimatepageable(&active, &inactive);
    856 	return LFS_MAX_RESOURCE(active + inactive + uvmexp.free, 1);
    857 }
    858