Home | History | Annotate | Line # | Download | only in ffs
ffs_balloc.c revision 1.44.8.1
      1 /*	$NetBSD: ffs_balloc.c,v 1.44.8.1 2007/07/11 20:12:41 mjf Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 2002 Networks Associates Technology, Inc.
      5  * All rights reserved.
      6  *
      7  * This software was developed for the FreeBSD Project by Marshall
      8  * Kirk McKusick and Network Associates Laboratories, the Security
      9  * Research Division of Network Associates, Inc. under DARPA/SPAWAR
     10  * contract N66001-01-C-8035 ("CBOSS"), as part of the DARPA CHATS
     11  * research program
     12  *
     13  * Copyright (c) 1982, 1986, 1989, 1993
     14  *	The Regents of the University of California.  All rights reserved.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. Neither the name of the University nor the names of its contributors
     25  *    may be used to endorse or promote products derived from this software
     26  *    without specific prior written permission.
     27  *
     28  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     29  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     30  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     31  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     32  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     33  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     34  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     35  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     36  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     37  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     38  * SUCH DAMAGE.
     39  *
     40  *	@(#)ffs_balloc.c	8.8 (Berkeley) 6/16/95
     41  */
     42 
     43 #include <sys/cdefs.h>
     44 __KERNEL_RCSID(0, "$NetBSD: ffs_balloc.c,v 1.44.8.1 2007/07/11 20:12:41 mjf Exp $");
     45 
     46 #if defined(_KERNEL_OPT)
     47 #include "opt_quota.h"
     48 #endif
     49 
     50 #include <sys/param.h>
     51 #include <sys/systm.h>
     52 #include <sys/buf.h>
     53 #include <sys/file.h>
     54 #include <sys/mount.h>
     55 #include <sys/vnode.h>
     56 #include <sys/kauth.h>
     57 
     58 #include <ufs/ufs/quota.h>
     59 #include <ufs/ufs/ufsmount.h>
     60 #include <ufs/ufs/inode.h>
     61 #include <ufs/ufs/ufs_extern.h>
     62 #include <ufs/ufs/ufs_bswap.h>
     63 
     64 #include <ufs/ffs/fs.h>
     65 #include <ufs/ffs/ffs_extern.h>
     66 
     67 #include <uvm/uvm.h>
     68 
     69 static int ffs_balloc_ufs1(struct vnode *, off_t, int, kauth_cred_t, int,
     70     struct buf **);
     71 static int ffs_balloc_ufs2(struct vnode *, off_t, int, kauth_cred_t, int,
     72     struct buf **);
     73 
     74 /*
     75  * Balloc defines the structure of file system storage
     76  * by allocating the physical blocks on a device given
     77  * the inode and the logical block number in a file.
     78  */
     79 
     80 int
     81 ffs_balloc(struct vnode *vp, off_t off, int size, kauth_cred_t cred, int flags,
     82     struct buf **bpp)
     83 {
     84 
     85 	if (VTOI(vp)->i_fs->fs_magic == FS_UFS2_MAGIC)
     86 		return ffs_balloc_ufs2(vp, off, size, cred, flags, bpp);
     87 	else
     88 		return ffs_balloc_ufs1(vp, off, size, cred, flags, bpp);
     89 }
     90 
     91 static int
     92 ffs_balloc_ufs1(struct vnode *vp, off_t off, int size, kauth_cred_t cred,
     93     int flags, struct buf **bpp)
     94 {
     95 	daddr_t lbn, lastlbn;
     96 	struct buf *bp, *nbp;
     97 	struct inode *ip = VTOI(vp);
     98 	struct fs *fs = ip->i_fs;
     99 	struct indir indirs[NIADDR + 2];
    100 	daddr_t newb, pref, nb;
    101 	int32_t *bap;	/* XXX ondisk32 */
    102 	int deallocated, osize, nsize, num, i, error;
    103 	int32_t *blkp, *allocblk, allociblk[NIADDR + 1];
    104 	int32_t *allocib;
    105 	int unwindidx = -1;
    106 #ifdef FFS_EI
    107 	const int needswap = UFS_FSNEEDSWAP(fs);
    108 #endif
    109 	UVMHIST_FUNC("ffs_balloc"); UVMHIST_CALLED(ubchist);
    110 
    111 	lbn = lblkno(fs, off);
    112 	size = blkoff(fs, off) + size;
    113 	if (size > fs->fs_bsize)
    114 		panic("ffs_balloc: blk too big");
    115 	if (bpp != NULL) {
    116 		*bpp = NULL;
    117 	}
    118 	UVMHIST_LOG(ubchist, "vp %p lbn 0x%x size 0x%x", vp, lbn, size,0);
    119 
    120 	if (lbn < 0)
    121 		return (EFBIG);
    122 
    123 	/*
    124 	 * If the next write will extend the file into a new block,
    125 	 * and the file is currently composed of a fragment
    126 	 * this fragment has to be extended to be a full block.
    127 	 */
    128 
    129 	lastlbn = lblkno(fs, ip->i_size);
    130 	if (lastlbn < NDADDR && lastlbn < lbn) {
    131 		nb = lastlbn;
    132 		osize = blksize(fs, ip, nb);
    133 		if (osize < fs->fs_bsize && osize > 0) {
    134 			error = ffs_realloccg(ip, nb,
    135 				    ffs_blkpref_ufs1(ip, lastlbn, nb,
    136 					&ip->i_ffs1_db[0]),
    137 				    osize, (int)fs->fs_bsize, cred, bpp, &newb);
    138 			if (error)
    139 				return (error);
    140 			if (DOINGSOFTDEP(vp))
    141 				softdep_setup_allocdirect(ip, nb, newb,
    142 				    ufs_rw32(ip->i_ffs1_db[nb], needswap),
    143 				    fs->fs_bsize, osize, bpp ? *bpp : NULL);
    144 			ip->i_size = lblktosize(fs, nb + 1);
    145 			ip->i_ffs1_size = ip->i_size;
    146 			uvm_vnp_setsize(vp, ip->i_ffs1_size);
    147 			ip->i_ffs1_db[nb] = ufs_rw32((u_int32_t)newb, needswap);
    148 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
    149 			if (bpp && *bpp) {
    150 				if (flags & B_SYNC)
    151 					bwrite(*bpp);
    152 				else
    153 					bawrite(*bpp);
    154 			}
    155 		}
    156 	}
    157 
    158 	/*
    159 	 * The first NDADDR blocks are direct blocks
    160 	 */
    161 
    162 	if (lbn < NDADDR) {
    163 		nb = ufs_rw32(ip->i_ffs1_db[lbn], needswap);
    164 		if (nb != 0 && ip->i_size >= lblktosize(fs, lbn + 1)) {
    165 
    166 			/*
    167 			 * The block is an already-allocated direct block
    168 			 * and the file already extends past this block,
    169 			 * thus this must be a whole block.
    170 			 * Just read the block (if requested).
    171 			 */
    172 
    173 			if (bpp != NULL) {
    174 				error = bread(vp, lbn, fs->fs_bsize, NOCRED,
    175 					      bpp);
    176 				if (error) {
    177 					brelse(*bpp);
    178 					return (error);
    179 				}
    180 			}
    181 			return (0);
    182 		}
    183 		if (nb != 0) {
    184 
    185 			/*
    186 			 * Consider need to reallocate a fragment.
    187 			 */
    188 
    189 			osize = fragroundup(fs, blkoff(fs, ip->i_size));
    190 			nsize = fragroundup(fs, size);
    191 			if (nsize <= osize) {
    192 
    193 				/*
    194 				 * The existing block is already
    195 				 * at least as big as we want.
    196 				 * Just read the block (if requested).
    197 				 */
    198 
    199 				if (bpp != NULL) {
    200 					error = bread(vp, lbn, osize, NOCRED,
    201 						      bpp);
    202 					if (error) {
    203 						brelse(*bpp);
    204 						return (error);
    205 					}
    206 				}
    207 				return 0;
    208 			} else {
    209 
    210 				/*
    211 				 * The existing block is smaller than we want,
    212 				 * grow it.
    213 				 */
    214 
    215 				error = ffs_realloccg(ip, lbn,
    216 				    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
    217 					&ip->i_ffs1_db[0]), osize, nsize, cred,
    218 					bpp, &newb);
    219 				if (error)
    220 					return (error);
    221 				if (DOINGSOFTDEP(vp))
    222 					softdep_setup_allocdirect(ip, lbn,
    223 					    newb, nb, nsize, osize,
    224 					    bpp ? *bpp : NULL);
    225 			}
    226 		} else {
    227 
    228 			/*
    229 			 * the block was not previously allocated,
    230 			 * allocate a new block or fragment.
    231 			 */
    232 
    233 			if (ip->i_size < lblktosize(fs, lbn + 1))
    234 				nsize = fragroundup(fs, size);
    235 			else
    236 				nsize = fs->fs_bsize;
    237 			error = ffs_alloc(ip, lbn,
    238 			    ffs_blkpref_ufs1(ip, lbn, (int)lbn,
    239 				&ip->i_ffs1_db[0]),
    240 				nsize, cred, &newb);
    241 			if (error)
    242 				return (error);
    243 			if (bpp != NULL) {
    244 				bp = getblk(vp, lbn, nsize, 0, 0);
    245 				bp->b_blkno = fsbtodb(fs, newb);
    246 				if (flags & B_CLRBUF)
    247 					clrbuf(bp);
    248 				*bpp = bp;
    249 			}
    250 			if (DOINGSOFTDEP(vp)) {
    251 				softdep_setup_allocdirect(ip, lbn, newb, 0,
    252 				    nsize, 0, bpp ? *bpp : NULL);
    253 			}
    254 		}
    255 		ip->i_ffs1_db[lbn] = ufs_rw32((u_int32_t)newb, needswap);
    256 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    257 		return (0);
    258 	}
    259 
    260 	/*
    261 	 * Determine the number of levels of indirection.
    262 	 */
    263 
    264 	pref = 0;
    265 	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
    266 		return (error);
    267 
    268 	/*
    269 	 * Fetch the first indirect block allocating if necessary.
    270 	 */
    271 
    272 	--num;
    273 	nb = ufs_rw32(ip->i_ffs1_ib[indirs[0].in_off], needswap);
    274 	allocib = NULL;
    275 	allocblk = allociblk;
    276 	if (nb == 0) {
    277 		pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
    278 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
    279 		    &newb);
    280 		if (error)
    281 			goto fail;
    282 		nb = newb;
    283 		*allocblk++ = nb;
    284 		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
    285 		bp->b_blkno = fsbtodb(fs, nb);
    286 		clrbuf(bp);
    287 		if (DOINGSOFTDEP(vp)) {
    288 			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
    289 			    newb, 0, fs->fs_bsize, 0, bp);
    290 			bdwrite(bp);
    291 		} else {
    292 
    293 			/*
    294 			 * Write synchronously so that indirect blocks
    295 			 * never point at garbage.
    296 			 */
    297 
    298 			if ((error = bwrite(bp)) != 0)
    299 				goto fail;
    300 		}
    301 		unwindidx = 0;
    302 		allocib = &ip->i_ffs1_ib[indirs[0].in_off];
    303 		*allocib = ufs_rw32(nb, needswap);
    304 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    305 	}
    306 
    307 	/*
    308 	 * Fetch through the indirect blocks, allocating as necessary.
    309 	 */
    310 
    311 	for (i = 1;;) {
    312 		error = bread(vp,
    313 		    indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
    314 		if (error) {
    315 			brelse(bp);
    316 			goto fail;
    317 		}
    318 		bap = (int32_t *)bp->b_data;	/* XXX ondisk32 */
    319 		nb = ufs_rw32(bap[indirs[i].in_off], needswap);
    320 		if (i == num)
    321 			break;
    322 		i++;
    323 		if (nb != 0) {
    324 			brelse(bp);
    325 			continue;
    326 		}
    327 		if (pref == 0)
    328 			pref = ffs_blkpref_ufs1(ip, lbn, 0, (int32_t *)0);
    329 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
    330 		    &newb);
    331 		if (error) {
    332 			brelse(bp);
    333 			goto fail;
    334 		}
    335 		nb = newb;
    336 		*allocblk++ = nb;
    337 		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
    338 		nbp->b_blkno = fsbtodb(fs, nb);
    339 		clrbuf(nbp);
    340 		if (DOINGSOFTDEP(vp)) {
    341 			softdep_setup_allocindir_meta(nbp, ip, bp,
    342 			    indirs[i - 1].in_off, nb);
    343 			bdwrite(nbp);
    344 		} else {
    345 
    346 			/*
    347 			 * Write synchronously so that indirect blocks
    348 			 * never point at garbage.
    349 			 */
    350 
    351 			if ((error = bwrite(nbp)) != 0) {
    352 				brelse(bp);
    353 				goto fail;
    354 			}
    355 		}
    356 		if (unwindidx < 0)
    357 			unwindidx = i - 1;
    358 		bap[indirs[i - 1].in_off] = ufs_rw32(nb, needswap);
    359 
    360 		/*
    361 		 * If required, write synchronously, otherwise use
    362 		 * delayed write.
    363 		 */
    364 
    365 		if (flags & B_SYNC) {
    366 			bwrite(bp);
    367 		} else {
    368 			bdwrite(bp);
    369 		}
    370 	}
    371 
    372 	if (flags & B_METAONLY) {
    373 		KASSERT(bpp != NULL);
    374 		*bpp = bp;
    375 		return (0);
    376 	}
    377 
    378 	/*
    379 	 * Get the data block, allocating if necessary.
    380 	 */
    381 
    382 	if (nb == 0) {
    383 		pref = ffs_blkpref_ufs1(ip, lbn, indirs[num].in_off, &bap[0]);
    384 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
    385 		    &newb);
    386 		if (error) {
    387 			brelse(bp);
    388 			goto fail;
    389 		}
    390 		nb = newb;
    391 		*allocblk++ = nb;
    392 		if (bpp != NULL) {
    393 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
    394 			nbp->b_blkno = fsbtodb(fs, nb);
    395 			if (flags & B_CLRBUF)
    396 				clrbuf(nbp);
    397 			*bpp = nbp;
    398 		}
    399 		if (DOINGSOFTDEP(vp))
    400 			softdep_setup_allocindir_page(ip, lbn, bp,
    401 			    indirs[num].in_off, nb, 0, bpp ? *bpp : NULL);
    402 		bap[indirs[num].in_off] = ufs_rw32(nb, needswap);
    403 		if (allocib == NULL && unwindidx < 0) {
    404 			unwindidx = i - 1;
    405 		}
    406 
    407 		/*
    408 		 * If required, write synchronously, otherwise use
    409 		 * delayed write.
    410 		 */
    411 
    412 		if (flags & B_SYNC) {
    413 			bwrite(bp);
    414 		} else {
    415 			bdwrite(bp);
    416 		}
    417 		return (0);
    418 	}
    419 	brelse(bp);
    420 	if (bpp != NULL) {
    421 		if (flags & B_CLRBUF) {
    422 			error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
    423 			if (error) {
    424 				brelse(nbp);
    425 				goto fail;
    426 			}
    427 		} else {
    428 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
    429 			nbp->b_blkno = fsbtodb(fs, nb);
    430 			clrbuf(nbp);
    431 		}
    432 		*bpp = nbp;
    433 	}
    434 	return (0);
    435 
    436 fail:
    437 	/*
    438 	 * If we have failed part way through block allocation, we
    439 	 * have to deallocate any indirect blocks that we have allocated.
    440 	 */
    441 
    442 	if (unwindidx >= 0) {
    443 
    444 		/*
    445 		 * First write out any buffers we've created to resolve their
    446 		 * softdeps.  This must be done in reverse order of creation
    447 		 * so that we resolve the dependencies in one pass.
    448 		 * Write the cylinder group buffers for these buffers too.
    449 		 */
    450 
    451 		for (i = num; i >= unwindidx; i--) {
    452 			if (i == 0) {
    453 				break;
    454 			}
    455 			bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0,
    456 			    0);
    457 			if (bp->b_flags & B_DELWRI) {
    458 				nb = fsbtodb(fs, cgtod(fs, dtog(fs,
    459 				    dbtofsb(fs, bp->b_blkno))));
    460 				bwrite(bp);
    461 				bp = getblk(ip->i_devvp, nb, (int)fs->fs_cgsize,
    462 				    0, 0);
    463 				if (bp->b_flags & B_DELWRI) {
    464 					bwrite(bp);
    465 				} else {
    466 					bp->b_flags |= B_INVAL;
    467 					brelse(bp);
    468 				}
    469 			} else {
    470 				bp->b_flags |= B_INVAL;
    471 				brelse(bp);
    472 			}
    473 		}
    474 		if (DOINGSOFTDEP(vp) && unwindidx == 0) {
    475 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
    476 			ffs_update(vp, NULL, NULL, UPDATE_WAIT);
    477 		}
    478 
    479 		/*
    480 		 * Now that any dependencies that we created have been
    481 		 * resolved, we can undo the partial allocation.
    482 		 */
    483 
    484 		if (unwindidx == 0) {
    485 			*allocib = 0;
    486 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
    487 			if (DOINGSOFTDEP(vp))
    488 				ffs_update(vp, NULL, NULL, UPDATE_WAIT);
    489 		} else {
    490 			int r;
    491 
    492 			r = bread(vp, indirs[unwindidx].in_lbn,
    493 			    (int)fs->fs_bsize, NOCRED, &bp);
    494 			if (r) {
    495 				panic("Could not unwind indirect block, error %d", r);
    496 				brelse(bp);
    497 			} else {
    498 				bap = (int32_t *)bp->b_data; /* XXX ondisk32 */
    499 				bap[indirs[unwindidx].in_off] = 0;
    500 				bwrite(bp);
    501 			}
    502 		}
    503 		for (i = unwindidx + 1; i <= num; i++) {
    504 			bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0,
    505 			    0);
    506 			bp->b_flags |= B_INVAL;
    507 			brelse(bp);
    508 		}
    509 	}
    510 	for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
    511 		ffs_blkfree(fs, ip->i_devvp, *blkp, fs->fs_bsize, ip->i_number);
    512 		deallocated += fs->fs_bsize;
    513 	}
    514 	if (deallocated) {
    515 #ifdef QUOTA
    516 		/*
    517 		 * Restore user's disk quota because allocation failed.
    518 		 */
    519 		(void)chkdq(ip, -btodb(deallocated), cred, FORCE);
    520 #endif
    521 		ip->i_ffs1_blocks -= btodb(deallocated);
    522 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    523 	}
    524 	return (error);
    525 }
    526 
    527 static int
    528 ffs_balloc_ufs2(struct vnode *vp, off_t off, int size, kauth_cred_t cred,
    529     int flags, struct buf **bpp)
    530 {
    531 	daddr_t lbn, lastlbn;
    532 	struct buf *bp, *nbp;
    533 	struct inode *ip = VTOI(vp);
    534 	struct fs *fs = ip->i_fs;
    535 	struct indir indirs[NIADDR + 2];
    536 	daddr_t newb, pref, nb;
    537 	int64_t *bap;
    538 	int deallocated, osize, nsize, num, i, error;
    539 	daddr_t *blkp, *allocblk, allociblk[NIADDR + 1];
    540 	int64_t *allocib;
    541 	int unwindidx = -1;
    542 #ifdef FFS_EI
    543 	const int needswap = UFS_FSNEEDSWAP(fs);
    544 #endif
    545 	UVMHIST_FUNC("ffs_balloc"); UVMHIST_CALLED(ubchist);
    546 
    547 	lbn = lblkno(fs, off);
    548 	size = blkoff(fs, off) + size;
    549 	if (size > fs->fs_bsize)
    550 		panic("ffs_balloc: blk too big");
    551 	if (bpp != NULL) {
    552 		*bpp = NULL;
    553 	}
    554 	UVMHIST_LOG(ubchist, "vp %p lbn 0x%x size 0x%x", vp, lbn, size,0);
    555 
    556 	if (lbn < 0)
    557 		return (EFBIG);
    558 
    559 #ifdef notyet
    560 	/*
    561 	 * Check for allocating external data.
    562 	 */
    563 	if (flags & IO_EXT) {
    564 		if (lbn >= NXADDR)
    565 			return (EFBIG);
    566 		/*
    567 		 * If the next write will extend the data into a new block,
    568 		 * and the data is currently composed of a fragment
    569 		 * this fragment has to be extended to be a full block.
    570 		 */
    571 		lastlbn = lblkno(fs, dp->di_extsize);
    572 		if (lastlbn < lbn) {
    573 			nb = lastlbn;
    574 			osize = sblksize(fs, dp->di_extsize, nb);
    575 			if (osize < fs->fs_bsize && osize > 0) {
    576 				error = ffs_realloccg(ip, -1 - nb,
    577 				    dp->di_extb[nb],
    578 				    ffs_blkpref_ufs2(ip, lastlbn, (int)nb,
    579 				    &dp->di_extb[0]), osize,
    580 				    (int)fs->fs_bsize, cred, &bp);
    581 				if (error)
    582 					return (error);
    583 				if (DOINGSOFTDEP(vp))
    584 					softdep_setup_allocext(ip, nb,
    585 					    dbtofsb(fs, bp->b_blkno),
    586 					    dp->di_extb[nb],
    587 					    fs->fs_bsize, osize, bp);
    588 				dp->di_extsize = smalllblktosize(fs, nb + 1);
    589 				dp->di_extb[nb] = dbtofsb(fs, bp->b_blkno);
    590 				bp->b_xflags |= BX_ALTDATA;
    591 				ip->i_flag |= IN_CHANGE | IN_UPDATE;
    592 				if (flags & IO_SYNC)
    593 					bwrite(bp);
    594 				else
    595 					bawrite(bp);
    596 			}
    597 		}
    598 		/*
    599 		 * All blocks are direct blocks
    600 		 */
    601 		if (flags & BA_METAONLY)
    602 			panic("ffs_balloc_ufs2: BA_METAONLY for ext block");
    603 		nb = dp->di_extb[lbn];
    604 		if (nb != 0 && dp->di_extsize >= smalllblktosize(fs, lbn + 1)) {
    605 			error = bread(vp, -1 - lbn, fs->fs_bsize, NOCRED, &bp);
    606 			if (error) {
    607 				brelse(bp);
    608 				return (error);
    609 			}
    610 			bp->b_blkno = fsbtodb(fs, nb);
    611 			bp->b_xflags |= BX_ALTDATA;
    612 			*bpp = bp;
    613 			return (0);
    614 		}
    615 		if (nb != 0) {
    616 			/*
    617 			 * Consider need to reallocate a fragment.
    618 			 */
    619 			osize = fragroundup(fs, blkoff(fs, dp->di_extsize));
    620 			nsize = fragroundup(fs, size);
    621 			if (nsize <= osize) {
    622 				error = bread(vp, -1 - lbn, osize, NOCRED, &bp);
    623 				if (error) {
    624 					brelse(bp);
    625 					return (error);
    626 				}
    627 				bp->b_blkno = fsbtodb(fs, nb);
    628 				bp->b_xflags |= BX_ALTDATA;
    629 			} else {
    630 				error = ffs_realloccg(ip, -1 - lbn,
    631 				    dp->di_extb[lbn],
    632 				    ffs_blkpref_ufs2(ip, lbn, (int)lbn,
    633 				    &dp->di_extb[0]), osize, nsize, cred, &bp);
    634 				if (error)
    635 					return (error);
    636 				bp->b_xflags |= BX_ALTDATA;
    637 				if (DOINGSOFTDEP(vp))
    638 					softdep_setup_allocext(ip, lbn,
    639 					    dbtofsb(fs, bp->b_blkno), nb,
    640 					    nsize, osize, bp);
    641 			}
    642 		} else {
    643 			if (dp->di_extsize < smalllblktosize(fs, lbn + 1))
    644 				nsize = fragroundup(fs, size);
    645 			else
    646 				nsize = fs->fs_bsize;
    647 			error = ffs_alloc(ip, lbn,
    648 			   ffs_blkpref_ufs2(ip, lbn, (int)lbn, &dp->di_extb[0]),
    649 			   nsize, cred, &newb);
    650 			if (error)
    651 				return (error);
    652 			bp = getblk(vp, -1 - lbn, nsize, 0, 0);
    653 			bp->b_blkno = fsbtodb(fs, newb);
    654 			bp->b_xflags |= BX_ALTDATA;
    655 			if (flags & BA_CLRBUF)
    656 				vfs_bio_clrbuf(bp);
    657 			if (DOINGSOFTDEP(vp))
    658 				softdep_setup_allocext(ip, lbn, newb, 0,
    659 				    nsize, 0, bp);
    660 		}
    661 		dp->di_extb[lbn] = dbtofsb(fs, bp->b_blkno);
    662 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    663 		*bpp = bp;
    664 		return (0);
    665 	}
    666 #endif
    667 	/*
    668 	 * If the next write will extend the file into a new block,
    669 	 * and the file is currently composed of a fragment
    670 	 * this fragment has to be extended to be a full block.
    671 	 */
    672 
    673 	lastlbn = lblkno(fs, ip->i_size);
    674 	if (lastlbn < NDADDR && lastlbn < lbn) {
    675 		nb = lastlbn;
    676 		osize = blksize(fs, ip, nb);
    677 		if (osize < fs->fs_bsize && osize > 0) {
    678 			error = ffs_realloccg(ip, nb,
    679 				    ffs_blkpref_ufs2(ip, lastlbn, nb,
    680 					&ip->i_ffs2_db[0]),
    681 				    osize, (int)fs->fs_bsize, cred, bpp, &newb);
    682 			if (error)
    683 				return (error);
    684 			if (DOINGSOFTDEP(vp))
    685 				softdep_setup_allocdirect(ip, nb, newb,
    686 				    ufs_rw64(ip->i_ffs2_db[nb], needswap),
    687 				    fs->fs_bsize, osize, bpp ? *bpp : NULL);
    688 			ip->i_size = lblktosize(fs, nb + 1);
    689 			ip->i_ffs2_size = ip->i_size;
    690 			uvm_vnp_setsize(vp, ip->i_size);
    691 			ip->i_ffs2_db[nb] = ufs_rw64(newb, needswap);
    692 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
    693 			if (bpp) {
    694 				if (flags & B_SYNC)
    695 					bwrite(*bpp);
    696 				else
    697 					bawrite(*bpp);
    698 			}
    699 		}
    700 	}
    701 
    702 	/*
    703 	 * The first NDADDR blocks are direct blocks
    704 	 */
    705 
    706 	if (lbn < NDADDR) {
    707 		nb = ufs_rw64(ip->i_ffs2_db[lbn], needswap);
    708 		if (nb != 0 && ip->i_size >= lblktosize(fs, lbn + 1)) {
    709 
    710 			/*
    711 			 * The block is an already-allocated direct block
    712 			 * and the file already extends past this block,
    713 			 * thus this must be a whole block.
    714 			 * Just read the block (if requested).
    715 			 */
    716 
    717 			if (bpp != NULL) {
    718 				error = bread(vp, lbn, fs->fs_bsize, NOCRED,
    719 					      bpp);
    720 				if (error) {
    721 					brelse(*bpp);
    722 					return (error);
    723 				}
    724 			}
    725 			return (0);
    726 		}
    727 		if (nb != 0) {
    728 
    729 			/*
    730 			 * Consider need to reallocate a fragment.
    731 			 */
    732 
    733 			osize = fragroundup(fs, blkoff(fs, ip->i_size));
    734 			nsize = fragroundup(fs, size);
    735 			if (nsize <= osize) {
    736 
    737 				/*
    738 				 * The existing block is already
    739 				 * at least as big as we want.
    740 				 * Just read the block (if requested).
    741 				 */
    742 
    743 				if (bpp != NULL) {
    744 					error = bread(vp, lbn, osize, NOCRED,
    745 						      bpp);
    746 					if (error) {
    747 						brelse(*bpp);
    748 						return (error);
    749 					}
    750 				}
    751 				return 0;
    752 			} else {
    753 
    754 				/*
    755 				 * The existing block is smaller than we want,
    756 				 * grow it.
    757 				 */
    758 
    759 				error = ffs_realloccg(ip, lbn,
    760 				    ffs_blkpref_ufs2(ip, lbn, (int)lbn,
    761 					&ip->i_ffs2_db[0]), osize, nsize, cred,
    762 					bpp, &newb);
    763 				if (error)
    764 					return (error);
    765 				if (DOINGSOFTDEP(vp))
    766 					softdep_setup_allocdirect(ip, lbn,
    767 					    newb, nb, nsize, osize,
    768 					    bpp ? *bpp : NULL);
    769 			}
    770 		} else {
    771 
    772 			/*
    773 			 * the block was not previously allocated,
    774 			 * allocate a new block or fragment.
    775 			 */
    776 
    777 			if (ip->i_size < lblktosize(fs, lbn + 1))
    778 				nsize = fragroundup(fs, size);
    779 			else
    780 				nsize = fs->fs_bsize;
    781 			error = ffs_alloc(ip, lbn,
    782 			    ffs_blkpref_ufs2(ip, lbn, (int)lbn,
    783 				&ip->i_ffs2_db[0]), nsize, cred, &newb);
    784 			if (error)
    785 				return (error);
    786 			if (bpp != NULL) {
    787 				bp = getblk(vp, lbn, nsize, 0, 0);
    788 				bp->b_blkno = fsbtodb(fs, newb);
    789 				if (flags & B_CLRBUF)
    790 					clrbuf(bp);
    791 				*bpp = bp;
    792 			}
    793 			if (DOINGSOFTDEP(vp)) {
    794 				softdep_setup_allocdirect(ip, lbn, newb, 0,
    795 				    nsize, 0, bpp ? *bpp : NULL);
    796 			}
    797 		}
    798 		ip->i_ffs2_db[lbn] = ufs_rw64(newb, needswap);
    799 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    800 		return (0);
    801 	}
    802 
    803 	/*
    804 	 * Determine the number of levels of indirection.
    805 	 */
    806 
    807 	pref = 0;
    808 	if ((error = ufs_getlbns(vp, lbn, indirs, &num)) != 0)
    809 		return (error);
    810 
    811 	/*
    812 	 * Fetch the first indirect block allocating if necessary.
    813 	 */
    814 
    815 	--num;
    816 	nb = ufs_rw64(ip->i_ffs2_ib[indirs[0].in_off], needswap);
    817 	allocib = NULL;
    818 	allocblk = allociblk;
    819 	if (nb == 0) {
    820 		pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
    821 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
    822 		    &newb);
    823 		if (error)
    824 			goto fail;
    825 		nb = newb;
    826 		*allocblk++ = nb;
    827 		bp = getblk(vp, indirs[1].in_lbn, fs->fs_bsize, 0, 0);
    828 		bp->b_blkno = fsbtodb(fs, nb);
    829 		clrbuf(bp);
    830 		if (DOINGSOFTDEP(vp)) {
    831 			softdep_setup_allocdirect(ip, NDADDR + indirs[0].in_off,
    832 			    newb, 0, fs->fs_bsize, 0, bp);
    833 			bdwrite(bp);
    834 		} else {
    835 
    836 			/*
    837 			 * Write synchronously so that indirect blocks
    838 			 * never point at garbage.
    839 			 */
    840 
    841 			if ((error = bwrite(bp)) != 0)
    842 				goto fail;
    843 		}
    844 		unwindidx = 0;
    845 		allocib = &ip->i_ffs2_ib[indirs[0].in_off];
    846 		*allocib = ufs_rw64(nb, needswap);
    847 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
    848 	}
    849 
    850 	/*
    851 	 * Fetch through the indirect blocks, allocating as necessary.
    852 	 */
    853 
    854 	for (i = 1;;) {
    855 		error = bread(vp,
    856 		    indirs[i].in_lbn, (int)fs->fs_bsize, NOCRED, &bp);
    857 		if (error) {
    858 			brelse(bp);
    859 			goto fail;
    860 		}
    861 		bap = (int64_t *)bp->b_data;
    862 		nb = ufs_rw64(bap[indirs[i].in_off], needswap);
    863 		if (i == num)
    864 			break;
    865 		i++;
    866 		if (nb != 0) {
    867 			brelse(bp);
    868 			continue;
    869 		}
    870 		if (pref == 0)
    871 			pref = ffs_blkpref_ufs2(ip, lbn, 0, (int64_t *)0);
    872 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
    873 		    &newb);
    874 		if (error) {
    875 			brelse(bp);
    876 			goto fail;
    877 		}
    878 		nb = newb;
    879 		*allocblk++ = nb;
    880 		nbp = getblk(vp, indirs[i].in_lbn, fs->fs_bsize, 0, 0);
    881 		nbp->b_blkno = fsbtodb(fs, nb);
    882 		clrbuf(nbp);
    883 		if (DOINGSOFTDEP(vp)) {
    884 			softdep_setup_allocindir_meta(nbp, ip, bp,
    885 			    indirs[i - 1].in_off, nb);
    886 			bdwrite(nbp);
    887 		} else {
    888 
    889 			/*
    890 			 * Write synchronously so that indirect blocks
    891 			 * never point at garbage.
    892 			 */
    893 
    894 			if ((error = bwrite(nbp)) != 0) {
    895 				brelse(bp);
    896 				goto fail;
    897 			}
    898 		}
    899 		if (unwindidx < 0)
    900 			unwindidx = i - 1;
    901 		bap[indirs[i - 1].in_off] = ufs_rw64(nb, needswap);
    902 
    903 		/*
    904 		 * If required, write synchronously, otherwise use
    905 		 * delayed write.
    906 		 */
    907 
    908 		if (flags & B_SYNC) {
    909 			bwrite(bp);
    910 		} else {
    911 			bdwrite(bp);
    912 		}
    913 	}
    914 
    915 	if (flags & B_METAONLY) {
    916 		KASSERT(bpp != NULL);
    917 		*bpp = bp;
    918 		return (0);
    919 	}
    920 
    921 	/*
    922 	 * Get the data block, allocating if necessary.
    923 	 */
    924 
    925 	if (nb == 0) {
    926 		pref = ffs_blkpref_ufs2(ip, lbn, indirs[num].in_off, &bap[0]);
    927 		error = ffs_alloc(ip, lbn, pref, (int)fs->fs_bsize, cred,
    928 		    &newb);
    929 		if (error) {
    930 			brelse(bp);
    931 			goto fail;
    932 		}
    933 		nb = newb;
    934 		*allocblk++ = nb;
    935 		if (bpp != NULL) {
    936 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
    937 			nbp->b_blkno = fsbtodb(fs, nb);
    938 			if (flags & B_CLRBUF)
    939 				clrbuf(nbp);
    940 			*bpp = nbp;
    941 		}
    942 		if (DOINGSOFTDEP(vp))
    943 			softdep_setup_allocindir_page(ip, lbn, bp,
    944 			    indirs[num].in_off, nb, 0, bpp ? *bpp : NULL);
    945 		bap[indirs[num].in_off] = ufs_rw64(nb, needswap);
    946 		if (allocib == NULL && unwindidx < 0) {
    947 			unwindidx = i - 1;
    948 		}
    949 
    950 		/*
    951 		 * If required, write synchronously, otherwise use
    952 		 * delayed write.
    953 		 */
    954 
    955 		if (flags & B_SYNC) {
    956 			bwrite(bp);
    957 		} else {
    958 			bdwrite(bp);
    959 		}
    960 		return (0);
    961 	}
    962 	brelse(bp);
    963 	if (bpp != NULL) {
    964 		if (flags & B_CLRBUF) {
    965 			error = bread(vp, lbn, (int)fs->fs_bsize, NOCRED, &nbp);
    966 			if (error) {
    967 				brelse(nbp);
    968 				goto fail;
    969 			}
    970 		} else {
    971 			nbp = getblk(vp, lbn, fs->fs_bsize, 0, 0);
    972 			nbp->b_blkno = fsbtodb(fs, nb);
    973 			clrbuf(nbp);
    974 		}
    975 		*bpp = nbp;
    976 	}
    977 	return (0);
    978 
    979 fail:
    980 	/*
    981 	 * If we have failed part way through block allocation, we
    982 	 * have to deallocate any indirect blocks that we have allocated.
    983 	 */
    984 
    985 	if (unwindidx >= 0) {
    986 
    987 		/*
    988 		 * First write out any buffers we've created to resolve their
    989 		 * softdeps.  This must be done in reverse order of creation
    990 		 * so that we resolve the dependencies in one pass.
    991 		 * Write the cylinder group buffers for these buffers too.
    992 		 */
    993 
    994 		for (i = num; i >= unwindidx; i--) {
    995 			if (i == 0) {
    996 				break;
    997 			}
    998 			bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0,
    999 			    0);
   1000 			if (bp->b_flags & B_DELWRI) {
   1001 				nb = fsbtodb(fs, cgtod(fs, dtog(fs,
   1002 				    dbtofsb(fs, bp->b_blkno))));
   1003 				bwrite(bp);
   1004 				bp = getblk(ip->i_devvp, nb, (int)fs->fs_cgsize,
   1005 				    0, 0);
   1006 				if (bp->b_flags & B_DELWRI) {
   1007 					bwrite(bp);
   1008 				} else {
   1009 					bp->b_flags |= B_INVAL;
   1010 					brelse(bp);
   1011 				}
   1012 			} else {
   1013 				bp->b_flags |= B_INVAL;
   1014 				brelse(bp);
   1015 			}
   1016 		}
   1017 		if (DOINGSOFTDEP(vp) && unwindidx == 0) {
   1018 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
   1019 			ffs_update(vp, NULL, NULL, UPDATE_WAIT);
   1020 		}
   1021 
   1022 		/*
   1023 		 * Now that any dependencies that we created have been
   1024 		 * resolved, we can undo the partial allocation.
   1025 		 */
   1026 
   1027 		if (unwindidx == 0) {
   1028 			*allocib = 0;
   1029 			ip->i_flag |= IN_CHANGE | IN_UPDATE;
   1030 			if (DOINGSOFTDEP(vp))
   1031 				ffs_update(vp, NULL, NULL, UPDATE_WAIT);
   1032 		} else {
   1033 			int r;
   1034 
   1035 			r = bread(vp, indirs[unwindidx].in_lbn,
   1036 			    (int)fs->fs_bsize, NOCRED, &bp);
   1037 			if (r) {
   1038 				panic("Could not unwind indirect block, error %d", r);
   1039 				brelse(bp);
   1040 			} else {
   1041 				bap = (int64_t *)bp->b_data;
   1042 				bap[indirs[unwindidx].in_off] = 0;
   1043 				bwrite(bp);
   1044 			}
   1045 		}
   1046 		for (i = unwindidx + 1; i <= num; i++) {
   1047 			bp = getblk(vp, indirs[i].in_lbn, (int)fs->fs_bsize, 0,
   1048 			    0);
   1049 			bp->b_flags |= B_INVAL;
   1050 			brelse(bp);
   1051 		}
   1052 	}
   1053 	for (deallocated = 0, blkp = allociblk; blkp < allocblk; blkp++) {
   1054 		ffs_blkfree(fs, ip->i_devvp, *blkp, fs->fs_bsize, ip->i_number);
   1055 		deallocated += fs->fs_bsize;
   1056 	}
   1057 	if (deallocated) {
   1058 #ifdef QUOTA
   1059 		/*
   1060 		 * Restore user's disk quota because allocation failed.
   1061 		 */
   1062 		(void)chkdq(ip, -btodb(deallocated), cred, FORCE);
   1063 #endif
   1064 		ip->i_ffs2_blocks -= btodb(deallocated);
   1065 		ip->i_flag |= IN_CHANGE | IN_UPDATE;
   1066 	}
   1067 	return (error);
   1068 }
   1069