Home | History | Annotate | Line # | Download | only in lfs
lfs_balloc.c revision 1.56
      1 /*	$NetBSD: lfs_balloc.c,v 1.56 2005/04/19 20:59:05 perseant Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 /*
     39  * Copyright (c) 1989, 1991, 1993
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)lfs_balloc.c	8.4 (Berkeley) 5/8/95
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: lfs_balloc.c,v 1.56 2005/04/19 20:59:05 perseant Exp $");
     71 
     72 #if defined(_KERNEL_OPT)
     73 #include "opt_quota.h"
     74 #endif
     75 
     76 #include <sys/param.h>
     77 #include <sys/systm.h>
     78 #include <sys/buf.h>
     79 #include <sys/proc.h>
     80 #include <sys/vnode.h>
     81 #include <sys/mount.h>
     82 #include <sys/resourcevar.h>
     83 #include <sys/tree.h>
     84 #include <sys/trace.h>
     85 
     86 #include <miscfs/specfs/specdev.h>
     87 
     88 #include <ufs/ufs/quota.h>
     89 #include <ufs/ufs/inode.h>
     90 #include <ufs/ufs/ufsmount.h>
     91 #include <ufs/ufs/ufs_extern.h>
     92 
     93 #include <ufs/lfs/lfs.h>
     94 #include <ufs/lfs/lfs_extern.h>
     95 
     96 #include <uvm/uvm.h>
     97 
     98 int lfs_fragextend(struct vnode *, int, int, daddr_t, struct buf **, struct ucred *);
     99 
    100 u_int64_t locked_fakequeue_count;
    101 
    102 /*
    103  * Allocate a block, and to inode and filesystem block accounting for it
    104  * and for any indirect blocks the may need to be created in order for
    105  * this block to be created.
    106  *
    107  * Blocks which have never been accounted for (i.e., which "do not exist")
    108  * have disk address 0, which is translated by ufs_bmap to the special value
    109  * UNASSIGNED == -1, as in the historical UFS.
    110  *
    111  * Blocks which have been accounted for but which have not yet been written
    112  * to disk are given the new special disk address UNWRITTEN == -2, so that
    113  * they can be differentiated from completely new blocks.
    114  */
    115 /* VOP_BWRITE NIADDR+2 times */
    116 int
    117 lfs_balloc(void *v)
    118 {
    119 	struct vop_balloc_args /* {
    120 		struct vnode *a_vp;
    121 		off_t a_startoffset;
    122 		int a_size;
    123 		struct ucred *a_cred;
    124 		int a_flags;
    125 		struct buf *a_bpp;
    126 	} */ *ap = v;
    127 	struct vnode *vp;
    128 	int offset;
    129 	u_long iosize;
    130 	daddr_t daddr, idaddr;
    131 	struct buf *ibp, *bp, **bpp;
    132 	struct inode *ip;
    133 	struct lfs *fs;
    134 	struct indir indirs[NIADDR+2], *idp;
    135 	daddr_t	lbn, lastblock;
    136 	int bb, bcount;
    137 	int error, frags, i, nsize, osize, num;
    138 
    139 	vp = ap->a_vp;
    140 	ip = VTOI(vp);
    141 	fs = ip->i_lfs;
    142 	offset = blkoff(fs, ap->a_startoffset);
    143 	iosize = ap->a_size;
    144 	KASSERT(iosize <= fs->lfs_bsize);
    145 	lbn = lblkno(fs, ap->a_startoffset);
    146 	/* (void)lfs_check(vp, lbn, 0); */
    147 	bpp = ap->a_bpp;
    148 
    149 	ASSERT_MAYBE_SEGLOCK(fs);
    150 
    151 	/*
    152 	 * Three cases: it's a block beyond the end of file, it's a block in
    153 	 * the file that may or may not have been assigned a disk address or
    154 	 * we're writing an entire block.
    155 	 *
    156 	 * Note, if the daddr is UNWRITTEN, the block already exists in
    157 	 * the cache (it was read or written earlier).	If so, make sure
    158 	 * we don't count it as a new block or zero out its contents. If
    159 	 * it did not, make sure we allocate any necessary indirect
    160 	 * blocks.
    161 	 *
    162 	 * If we are writing a block beyond the end of the file, we need to
    163 	 * check if the old last block was a fragment.	If it was, we need
    164 	 * to rewrite it.
    165 	 */
    166 
    167 	if (bpp)
    168 		*bpp = NULL;
    169 
    170 	/* Check for block beyond end of file and fragment extension needed. */
    171 	lastblock = lblkno(fs, ip->i_size);
    172 	if (lastblock < NDADDR && lastblock < lbn) {
    173 		osize = blksize(fs, ip, lastblock);
    174 		if (osize < fs->lfs_bsize && osize > 0) {
    175 			if ((error = lfs_fragextend(vp, osize, fs->lfs_bsize,
    176 						    lastblock,
    177 						    (bpp ? &bp : NULL),
    178 						    ap->a_cred)))
    179 				return (error);
    180 			ip->i_ffs1_size = ip->i_size =
    181 			    (lastblock + 1) * fs->lfs_bsize;
    182 			uvm_vnp_setsize(vp, ip->i_size);
    183 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
    184 			if (bpp)
    185 				(void) VOP_BWRITE(bp);
    186 		}
    187 	}
    188 
    189 	/*
    190 	 * If the block we are writing is a direct block, it's the last
    191 	 * block in the file, and offset + iosize is less than a full
    192 	 * block, we can write one or more fragments.  There are two cases:
    193 	 * the block is brand new and we should allocate it the correct
    194 	 * size or it already exists and contains some fragments and
    195 	 * may need to extend it.
    196 	 */
    197 	if (lbn < NDADDR && lblkno(fs, ip->i_size) <= lbn) {
    198 		osize = blksize(fs, ip, lbn);
    199 		nsize = fragroundup(fs, offset + iosize);
    200 		if (lblktosize(fs, lbn) >= ip->i_size) {
    201 			/* Brand new block or fragment */
    202 			frags = numfrags(fs, nsize);
    203 			bb = fragstofsb(fs, frags);
    204 			if (!ISSPACE(fs, bb, ap->a_cred))
    205 				return ENOSPC;
    206 			if (bpp) {
    207 				*ap->a_bpp = bp = getblk(vp, lbn, nsize, 0, 0);
    208 				bp->b_blkno = UNWRITTEN;
    209 				if (ap->a_flags & B_CLRBUF)
    210 					clrbuf(bp);
    211 			}
    212 			ip->i_lfs_effnblks += bb;
    213 			simple_lock(&fs->lfs_interlock);
    214 			fs->lfs_bfree -= bb;
    215 			simple_unlock(&fs->lfs_interlock);
    216 			ip->i_ffs1_db[lbn] = UNWRITTEN;
    217 		} else {
    218 			if (nsize <= osize) {
    219 				/* No need to extend */
    220 				if (bpp && (error = bread(vp, lbn, osize, NOCRED, &bp)))
    221 					return error;
    222 			} else {
    223 				/* Extend existing block */
    224 				if ((error =
    225 				     lfs_fragextend(vp, osize, nsize, lbn,
    226 						    (bpp ? &bp : NULL),
    227 						    ap->a_cred)))
    228 					return error;
    229 			}
    230 			if (bpp)
    231 				*bpp = bp;
    232 		}
    233 		return 0;
    234 	}
    235 
    236 	error = ufs_bmaparray(vp, lbn, &daddr, &indirs[0], &num, NULL, NULL);
    237 	if (error)
    238 		return (error);
    239 
    240 	daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
    241 	KASSERT(daddr <= LFS_MAX_DADDR);
    242 
    243 	/*
    244 	 * Do byte accounting all at once, so we can gracefully fail *before*
    245 	 * we start assigning blocks.
    246 	 */
    247 	bb = VFSTOUFS(vp->v_mount)->um_seqinc;
    248 	bcount = 0;
    249 	if (daddr == UNASSIGNED) {
    250 		bcount = bb;
    251 	}
    252 	for (i = 1; i < num; ++i) {
    253 		if (!indirs[i].in_exists) {
    254 			bcount += bb;
    255 		}
    256 	}
    257 	if (ISSPACE(fs, bcount, ap->a_cred)) {
    258 		simple_lock(&fs->lfs_interlock);
    259 		fs->lfs_bfree -= bcount;
    260 		simple_unlock(&fs->lfs_interlock);
    261 		ip->i_lfs_effnblks += bcount;
    262 	} else {
    263 		return ENOSPC;
    264 	}
    265 
    266 	if (daddr == UNASSIGNED) {
    267 		if (num > 0 && ip->i_ffs1_ib[indirs[0].in_off] == 0) {
    268 			ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
    269 		}
    270 
    271 		/*
    272 		 * Create new indirect blocks if necessary
    273 		 */
    274 		if (num > 1) {
    275 			idaddr = ip->i_ffs1_ib[indirs[0].in_off];
    276 			for (i = 1; i < num; ++i) {
    277 				ibp = getblk(vp, indirs[i].in_lbn,
    278 				    fs->lfs_bsize, 0,0);
    279 				if (!indirs[i].in_exists) {
    280 					clrbuf(ibp);
    281 					ibp->b_blkno = UNWRITTEN;
    282 				} else if (!(ibp->b_flags & (B_DELWRI | B_DONE))) {
    283 					ibp->b_blkno = fsbtodb(fs, idaddr);
    284 					ibp->b_flags |= B_READ;
    285 					VOP_STRATEGY(vp, ibp);
    286 					biowait(ibp);
    287 				}
    288 				/*
    289 				 * This block exists, but the next one may not.
    290 				 * If that is the case mark it UNWRITTEN to keep
    291 				 * the accounting straight.
    292 				 */
    293 				/* XXX ondisk32 */
    294 				if (((int32_t *)ibp->b_data)[indirs[i].in_off] == 0)
    295 					((int32_t *)ibp->b_data)[indirs[i].in_off] =
    296 						UNWRITTEN;
    297 				/* XXX ondisk32 */
    298 				idaddr = ((int32_t *)ibp->b_data)[indirs[i].in_off];
    299 #ifdef DEBUG
    300 				if (vp == fs->lfs_ivnode) {
    301 					LFS_ENTER_LOG("balloc", __FILE__,
    302 						__LINE__, indirs[i].in_lbn,
    303 						ibp->b_flags, curproc->p_pid);
    304 				}
    305 #endif
    306 				if ((error = VOP_BWRITE(ibp)))
    307 					return error;
    308 			}
    309 		}
    310 	}
    311 
    312 
    313 	/*
    314 	 * Get the existing block from the cache, if requested.
    315 	 */
    316 	frags = fsbtofrags(fs, bb);
    317 	if (bpp)
    318 		*bpp = bp = getblk(vp, lbn, blksize(fs, ip, lbn), 0, 0);
    319 
    320 	/*
    321 	 * Do accounting on blocks that represent pages.
    322 	 */
    323 	if (!bpp)
    324 		lfs_register_block(vp, lbn);
    325 
    326 	/*
    327 	 * The block we are writing may be a brand new block
    328 	 * in which case we need to do accounting.
    329 	 *
    330 	 * We can tell a truly new block because ufs_bmaparray will say
    331 	 * it is UNASSIGNED.  Once we allocate it we will assign it the
    332 	 * disk address UNWRITTEN.
    333 	 */
    334 	if (daddr == UNASSIGNED) {
    335 		if (bpp) {
    336 			if (ap->a_flags & B_CLRBUF)
    337 				clrbuf(bp);
    338 
    339 			/* Note the new address */
    340 			bp->b_blkno = UNWRITTEN;
    341 		}
    342 
    343 		switch (num) {
    344 		    case 0:
    345 			ip->i_ffs1_db[lbn] = UNWRITTEN;
    346 			break;
    347 		    case 1:
    348 			ip->i_ffs1_ib[indirs[0].in_off] = UNWRITTEN;
    349 			break;
    350 		    default:
    351 			idp = &indirs[num - 1];
    352 			if (bread(vp, idp->in_lbn, fs->lfs_bsize, NOCRED,
    353 				  &ibp))
    354 				panic("lfs_balloc: bread bno %lld",
    355 				    (long long)idp->in_lbn);
    356 			/* XXX ondisk32 */
    357 			((int32_t *)ibp->b_data)[idp->in_off] = UNWRITTEN;
    358 #ifdef DEBUG
    359 			if (vp == fs->lfs_ivnode) {
    360 				LFS_ENTER_LOG("balloc", __FILE__,
    361 					__LINE__, idp->in_lbn,
    362 					ibp->b_flags, curproc->p_pid);
    363 			}
    364 #endif
    365 			VOP_BWRITE(ibp);
    366 		}
    367 	} else if (bpp && !(bp->b_flags & (B_DONE|B_DELWRI))) {
    368 		/*
    369 		 * Not a brand new block, also not in the cache;
    370 		 * read it in from disk.
    371 		 */
    372 		if (iosize == fs->lfs_bsize)
    373 			/* Optimization: I/O is unnecessary. */
    374 			bp->b_blkno = daddr;
    375 		else {
    376 			/*
    377 			 * We need to read the block to preserve the
    378 			 * existing bytes.
    379 			 */
    380 			bp->b_blkno = daddr;
    381 			bp->b_flags |= B_READ;
    382 			VOP_STRATEGY(vp, bp);
    383 			return (biowait(bp));
    384 		}
    385 	}
    386 
    387 	return (0);
    388 }
    389 
    390 /* VOP_BWRITE 1 time */
    391 int
    392 lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp, struct ucred *cred)
    393 {
    394 	struct inode *ip;
    395 	struct lfs *fs;
    396 	long bb;
    397 	int error;
    398 	extern long locked_queue_bytes;
    399 	size_t obufsize;
    400 
    401 	ip = VTOI(vp);
    402 	fs = ip->i_lfs;
    403 	bb = (long)fragstofsb(fs, numfrags(fs, nsize - osize));
    404 	error = 0;
    405 
    406 	ASSERT_DUNNO_SEGLOCK(fs);
    407 
    408 	/*
    409 	 * Get the seglock so we don't enlarge blocks while a segment
    410 	 * is being written.  If we're called with bpp==NULL, though,
    411 	 * we are only pretending to change a buffer, so we don't have to
    412 	 * lock.
    413 	 */
    414     top:
    415 	if (bpp) {
    416 		lockmgr(&fs->lfs_fraglock, LK_SHARED, 0);
    417 		LFS_DEBUG_COUNTLOCKED("frag");
    418 	}
    419 
    420 	if (!ISSPACE(fs, bb, cred)) {
    421 		error = ENOSPC;
    422 		goto out;
    423 	}
    424 
    425 	/*
    426 	 * If we are not asked to actually return the block, all we need
    427 	 * to do is allocate space for it.  UBC will handle dirtying the
    428 	 * appropriate things and making sure it all goes to disk.
    429 	 * Don't bother to read in that case.
    430 	 */
    431 	if (bpp && (error = bread(vp, lbn, osize, NOCRED, bpp))) {
    432 		brelse(*bpp);
    433 		goto out;
    434 	}
    435 #ifdef QUOTA
    436 	if ((error = chkdq(ip, bb, cred, 0))) {
    437 		if (bpp)
    438 			brelse(*bpp);
    439 		goto out;
    440 	}
    441 #endif
    442 	/*
    443 	 * Adjust accounting for lfs_avail.  If there's not enough room,
    444 	 * we will have to wait for the cleaner, which we can't do while
    445 	 * holding a block busy or while holding the seglock.  In that case,
    446 	 * release both and start over after waiting.
    447 	 */
    448 
    449 	if (bpp && ((*bpp)->b_flags & B_DELWRI)) {
    450 		if (!lfs_fits(fs, bb)) {
    451 			if (bpp)
    452 				brelse(*bpp);
    453 #ifdef QUOTA
    454 			chkdq(ip, -bb, cred, 0);
    455 #endif
    456 			lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
    457 			lfs_availwait(fs, bb);
    458 			goto top;
    459 		}
    460 		fs->lfs_avail -= bb;
    461 	}
    462 
    463 	simple_lock(&fs->lfs_interlock);
    464 	fs->lfs_bfree -= bb;
    465 	simple_unlock(&fs->lfs_interlock);
    466 	ip->i_lfs_effnblks += bb;
    467 	ip->i_flag |= IN_CHANGE | IN_UPDATE;
    468 
    469 	if (bpp) {
    470 		obufsize = (*bpp)->b_bufsize;
    471 		allocbuf(*bpp, nsize, 1);
    472 
    473 		/* Adjust locked-list accounting */
    474 		if (((*bpp)->b_flags & (B_LOCKED | B_CALL)) == B_LOCKED) {
    475 			simple_lock(&lfs_subsys_lock);
    476 			locked_queue_bytes += (*bpp)->b_bufsize - obufsize;
    477 			simple_unlock(&lfs_subsys_lock);
    478 		}
    479 
    480 		bzero((char *)((*bpp)->b_data) + osize, (u_int)(nsize - osize));
    481 	}
    482 
    483     out:
    484 	if (bpp) {
    485 		lockmgr(&fs->lfs_fraglock, LK_RELEASE, 0);
    486 	}
    487 	return (error);
    488 }
    489 
    490 static __inline int
    491 lge(struct lbnentry *a, struct lbnentry *b)
    492 {
    493 	return a->lbn - b->lbn;
    494 }
    495 
    496 SPLAY_PROTOTYPE(lfs_splay, lbnentry, entry, lge);
    497 
    498 SPLAY_GENERATE(lfs_splay, lbnentry, entry, lge);
    499 
    500 /*
    501  * Record this lbn as being "write pending".  We used to have this information
    502  * on the buffer headers, but since pages don't have buffer headers we
    503  * record it here instead.
    504  */
    505 void
    506 lfs_register_block(struct vnode *vp, daddr_t lbn)
    507 {
    508 	struct lfs *fs;
    509 	struct inode *ip;
    510 	struct lbnentry *lbp;
    511 
    512 	ip = VTOI(vp);
    513 
    514 	/* Don't count metadata */
    515 	if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
    516 		return;
    517 
    518 	fs = ip->i_lfs;
    519 
    520 	ASSERT_NO_SEGLOCK(fs);
    521 
    522 	/* If no space, wait for the cleaner */
    523 	lfs_availwait(fs, btofsb(fs, 1 << fs->lfs_bshift));
    524 
    525 	lbp = (struct lbnentry *)pool_get(&lfs_lbnentry_pool, PR_WAITOK);
    526 	lbp->lbn = lbn;
    527 	if (SPLAY_INSERT(lfs_splay, &ip->i_lfs_lbtree, lbp) != NULL) {
    528 		/* Already there */
    529 		pool_put(&lfs_lbnentry_pool, lbp);
    530 		return;
    531 	}
    532 
    533 	++ip->i_lfs_nbtree;
    534 	simple_lock(&fs->lfs_interlock);
    535 	fs->lfs_favail += btofsb(fs, (1 << fs->lfs_bshift));
    536 	fs->lfs_pages += fs->lfs_bsize >> PAGE_SHIFT;
    537 	simple_lock(&lfs_subsys_lock);
    538 	++locked_fakequeue_count;
    539 	lfs_subsys_pages += fs->lfs_bsize >> PAGE_SHIFT;
    540 	simple_unlock(&lfs_subsys_lock);
    541 	simple_unlock(&fs->lfs_interlock);
    542 }
    543 
    544 static void
    545 lfs_do_deregister(struct lfs *fs, struct inode *ip, struct lbnentry *lbp)
    546 {
    547 	ASSERT_MAYBE_SEGLOCK(fs);
    548 
    549 	--ip->i_lfs_nbtree;
    550 	SPLAY_REMOVE(lfs_splay, &ip->i_lfs_lbtree, lbp);
    551 	pool_put(&lfs_lbnentry_pool, lbp);
    552 	simple_lock(&fs->lfs_interlock);
    553 	if (fs->lfs_favail > btofsb(fs, (1 << fs->lfs_bshift)))
    554 		fs->lfs_favail -= btofsb(fs, (1 << fs->lfs_bshift));
    555 	fs->lfs_pages -= fs->lfs_bsize >> PAGE_SHIFT;
    556 	simple_lock(&lfs_subsys_lock);
    557 	if (locked_fakequeue_count > 0)
    558 		--locked_fakequeue_count;
    559 	lfs_subsys_pages -= fs->lfs_bsize >> PAGE_SHIFT;
    560 	simple_unlock(&lfs_subsys_lock);
    561 	simple_unlock(&fs->lfs_interlock);
    562 }
    563 
    564 void
    565 lfs_deregister_block(struct vnode *vp, daddr_t lbn)
    566 {
    567 	struct lfs *fs;
    568 	struct inode *ip;
    569 	struct lbnentry *lbp;
    570 	struct lbnentry tmp;
    571 
    572 	ip = VTOI(vp);
    573 
    574 	/* Don't count metadata */
    575 	if (lbn < 0 || vp->v_type != VREG || ip->i_number == LFS_IFILE_INUM)
    576 		return;
    577 
    578 	fs = ip->i_lfs;
    579 	tmp.lbn = lbn;
    580 	lbp = SPLAY_FIND(lfs_splay, &ip->i_lfs_lbtree, &tmp);
    581 	if (lbp == NULL)
    582 		return;
    583 
    584 	lfs_do_deregister(fs, ip, lbp);
    585 }
    586 
    587 void
    588 lfs_deregister_all(struct vnode *vp)
    589 {
    590 	struct lbnentry *lbp, *nlbp;
    591 	struct lfs_splay *hd;
    592 	struct lfs *fs;
    593 	struct inode *ip;
    594 
    595 	ip = VTOI(vp);
    596 	fs = ip->i_lfs;
    597 	hd = &ip->i_lfs_lbtree;
    598 
    599 	for (lbp = SPLAY_MIN(lfs_splay, hd); lbp != NULL; lbp = nlbp) {
    600 		nlbp = SPLAY_NEXT(lfs_splay, hd, lbp);
    601 		lfs_do_deregister(fs, ip, lbp);
    602 	}
    603 }
    604