Home | History | Annotate | Line # | Download | only in lfs
lfs_segment.c revision 1.163
      1 /*	$NetBSD: lfs_segment.c,v 1.163 2005/04/23 19:47:51 perseant Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 /*
     39  * Copyright (c) 1991, 1993
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)lfs_segment.c	8.10 (Berkeley) 6/10/95
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.163 2005/04/23 19:47:51 perseant Exp $");
     71 
     72 #ifdef DEBUG
     73 # define vndebug(vp, str) do {						\
     74 	if (VTOI(vp)->i_flag & IN_CLEANING)				\
     75 		DLOG((DLOG_WVNODE, "not writing ino %d because %s (op %d)\n", \
     76 		     VTOI(vp)->i_number, (str), op));			\
     77 } while(0)
     78 #else
     79 # define vndebug(vp, str)
     80 #endif
     81 #define ivndebug(vp, str) \
     82 	DLOG((DLOG_WVNODE, "ino %d: %s\n", VTOI(vp)->i_number, (str)))
     83 
     84 #if defined(_KERNEL_OPT)
     85 #include "opt_ddb.h"
     86 #endif
     87 
     88 #include <sys/param.h>
     89 #include <sys/systm.h>
     90 #include <sys/namei.h>
     91 #include <sys/kernel.h>
     92 #include <sys/resourcevar.h>
     93 #include <sys/file.h>
     94 #include <sys/stat.h>
     95 #include <sys/buf.h>
     96 #include <sys/proc.h>
     97 #include <sys/vnode.h>
     98 #include <sys/mount.h>
     99 
    100 #include <miscfs/specfs/specdev.h>
    101 #include <miscfs/fifofs/fifo.h>
    102 
    103 #include <ufs/ufs/inode.h>
    104 #include <ufs/ufs/dir.h>
    105 #include <ufs/ufs/ufsmount.h>
    106 #include <ufs/ufs/ufs_extern.h>
    107 
    108 #include <ufs/lfs/lfs.h>
    109 #include <ufs/lfs/lfs_extern.h>
    110 
    111 #include <uvm/uvm.h>
    112 #include <uvm/uvm_extern.h>
    113 
    114 MALLOC_DEFINE(M_SEGMENT, "LFS segment", "Segment for LFS");
    115 
    116 extern int count_lock_queue(void);
    117 extern struct simplelock vnode_free_list_slock;		/* XXX */
    118 extern struct simplelock bqueue_slock;			/* XXX */
    119 
    120 static void lfs_generic_callback(struct buf *, void (*)(struct buf *));
    121 static void lfs_super_aiodone(struct buf *);
    122 static void lfs_cluster_aiodone(struct buf *);
    123 static void lfs_cluster_callback(struct buf *);
    124 
    125 /*
    126  * Determine if it's OK to start a partial in this segment, or if we need
    127  * to go on to a new segment.
    128  */
    129 #define	LFS_PARTIAL_FITS(fs) \
    130 	((fs)->lfs_fsbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
    131 	fragstofsb((fs), (fs)->lfs_frag))
    132 
    133 int	 lfs_match_fake(struct lfs *, struct buf *);
    134 void	 lfs_newseg(struct lfs *);
    135 /* XXX ondisk32 */
    136 void	 lfs_shellsort(struct buf **, int32_t *, int, int);
    137 void	 lfs_supercallback(struct buf *);
    138 void	 lfs_updatemeta(struct segment *);
    139 void	 lfs_writesuper(struct lfs *, daddr_t);
    140 int	 lfs_writevnodes(struct lfs *fs, struct mount *mp,
    141 	    struct segment *sp, int dirops);
    142 
    143 int	lfs_allclean_wakeup;		/* Cleaner wakeup address. */
    144 int	lfs_writeindir = 1;		/* whether to flush indir on non-ckp */
    145 int	lfs_clean_vnhead = 0;		/* Allow freeing to head of vn list */
    146 int	lfs_dirvcount = 0;		/* # active dirops */
    147 
    148 /* Statistics Counters */
    149 int lfs_dostats = 1;
    150 struct lfs_stats lfs_stats;
    151 
    152 /* op values to lfs_writevnodes */
    153 #define	VN_REG		0
    154 #define	VN_DIROP	1
    155 #define	VN_EMPTY	2
    156 #define VN_CLEAN	3
    157 
    158 /*
    159  * XXX KS - Set modification time on the Ifile, so the cleaner can
    160  * read the fs mod time off of it.  We don't set IN_UPDATE here,
    161  * since we don't really need this to be flushed to disk (and in any
    162  * case that wouldn't happen to the Ifile until we checkpoint).
    163  */
    164 void
    165 lfs_imtime(struct lfs *fs)
    166 {
    167 	struct timespec ts;
    168 	struct inode *ip;
    169 
    170 	ASSERT_MAYBE_SEGLOCK(fs);
    171 	TIMEVAL_TO_TIMESPEC(&time, &ts);
    172 	ip = VTOI(fs->lfs_ivnode);
    173 	ip->i_ffs1_mtime = ts.tv_sec;
    174 	ip->i_ffs1_mtimensec = ts.tv_nsec;
    175 }
    176 
    177 /*
    178  * Ifile and meta data blocks are not marked busy, so segment writes MUST be
    179  * single threaded.  Currently, there are two paths into lfs_segwrite, sync()
    180  * and getnewbuf().  They both mark the file system busy.  Lfs_vflush()
    181  * explicitly marks the file system busy.  So lfs_segwrite is safe.  I think.
    182  */
    183 
    184 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
    185 #define IS_FLUSHING(fs,vp)  ((fs)->lfs_flushvp == (vp))
    186 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
    187 
    188 int
    189 lfs_vflush(struct vnode *vp)
    190 {
    191 	struct inode *ip;
    192 	struct lfs *fs;
    193 	struct segment *sp;
    194 	struct buf *bp, *nbp, *tbp, *tnbp;
    195 	int error, s;
    196 	int flushed;
    197 #if 0
    198 	int redo;
    199 #endif
    200 
    201 	ip = VTOI(vp);
    202 	fs = VFSTOUFS(vp->v_mount)->um_lfs;
    203 
    204 	ASSERT_NO_SEGLOCK(fs);
    205 	if (ip->i_flag & IN_CLEANING) {
    206 		ivndebug(vp,"vflush/in_cleaning");
    207 		LFS_CLR_UINO(ip, IN_CLEANING);
    208 		LFS_SET_UINO(ip, IN_MODIFIED);
    209 
    210 		/*
    211 		 * Toss any cleaning buffers that have real counterparts
    212 		 * to avoid losing new data.
    213 		 */
    214 		s = splbio();
    215 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
    216 			nbp = LIST_NEXT(bp, b_vnbufs);
    217 			if (!LFS_IS_MALLOC_BUF(bp))
    218 				continue;
    219 			/*
    220 			 * Look for pages matching the range covered
    221 			 * by cleaning blocks.  It's okay if more dirty
    222 			 * pages appear, so long as none disappear out
    223 			 * from under us.
    224 			 */
    225 			if (bp->b_lblkno > 0 && vp->v_type == VREG &&
    226 			    vp != fs->lfs_ivnode) {
    227 				struct vm_page *pg;
    228 				voff_t off;
    229 
    230 				simple_lock(&vp->v_interlock);
    231 				for (off = lblktosize(fs, bp->b_lblkno);
    232 				     off < lblktosize(fs, bp->b_lblkno + 1);
    233 				     off += PAGE_SIZE) {
    234 					pg = uvm_pagelookup(&vp->v_uobj, off);
    235 					if (pg == NULL)
    236 						continue;
    237 					if ((pg->flags & PG_CLEAN) == 0 ||
    238 					    pmap_is_modified(pg)) {
    239 						fs->lfs_avail += btofsb(fs,
    240 							bp->b_bcount);
    241 						wakeup(&fs->lfs_avail);
    242 						lfs_freebuf(fs, bp);
    243 						bp = NULL;
    244 						goto nextbp;
    245 					}
    246 				}
    247 				simple_unlock(&vp->v_interlock);
    248 			}
    249 			for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp;
    250 			    tbp = tnbp)
    251 			{
    252 				tnbp = LIST_NEXT(tbp, b_vnbufs);
    253 				if (tbp->b_vp == bp->b_vp
    254 				   && tbp->b_lblkno == bp->b_lblkno
    255 				   && tbp != bp)
    256 				{
    257 					fs->lfs_avail += btofsb(fs,
    258 						bp->b_bcount);
    259 					wakeup(&fs->lfs_avail);
    260 					lfs_freebuf(fs, bp);
    261 					bp = NULL;
    262 					break;
    263 				}
    264 			}
    265 		    nextbp:
    266 			;
    267 		}
    268 		splx(s);
    269 	}
    270 
    271 	/* If the node is being written, wait until that is done */
    272 	simple_lock(&vp->v_interlock);
    273 	s = splbio();
    274 	if (WRITEINPROG(vp)) {
    275 		ivndebug(vp,"vflush/writeinprog");
    276 		ltsleep(vp, (PRIBIO+1), "lfs_vw", 0, &vp->v_interlock);
    277 	}
    278 	splx(s);
    279 	simple_unlock(&vp->v_interlock);
    280 
    281 	/* Protect against VXLOCK deadlock in vinvalbuf() */
    282 	lfs_seglock(fs, SEGM_SYNC);
    283 
    284 	/* If we're supposed to flush a freed inode, just toss it */
    285 	/* XXX - seglock, so these buffers can't be gathered, right? */
    286 	if (ip->i_mode == 0) {
    287 		DLOG((DLOG_VNODE, "lfs_vflush: ino %d freed, not flushing\n",
    288 		      ip->i_number));
    289 		s = splbio();
    290 		for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
    291 			nbp = LIST_NEXT(bp, b_vnbufs);
    292 			if (bp->b_flags & B_DELWRI) { /* XXX always true? */
    293 				fs->lfs_avail += btofsb(fs, bp->b_bcount);
    294 				wakeup(&fs->lfs_avail);
    295 			}
    296 			/* Copied from lfs_writeseg */
    297 			if (bp->b_flags & B_CALL) {
    298 				biodone(bp);
    299 			} else {
    300 				bremfree(bp);
    301 				LFS_UNLOCK_BUF(bp);
    302 				bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
    303 					 B_GATHERED);
    304 				bp->b_flags |= B_DONE;
    305 				reassignbuf(bp, vp);
    306 				brelse(bp);
    307 			}
    308 		}
    309 		splx(s);
    310 		LFS_CLR_UINO(ip, IN_CLEANING);
    311 		LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
    312 		ip->i_flag &= ~IN_ALLMOD;
    313 		DLOG((DLOG_VNODE, "lfs_vflush: done not flushing ino %d\n",
    314 		      ip->i_number));
    315 		lfs_segunlock(fs);
    316 		return 0;
    317 	}
    318 
    319 	SET_FLUSHING(fs,vp);
    320 	if (fs->lfs_nactive > LFS_MAX_ACTIVE ||
    321 	    (fs->lfs_sp->seg_flags & SEGM_CKP)) {
    322 		error = lfs_segwrite(vp->v_mount, SEGM_CKP | SEGM_SYNC);
    323 		CLR_FLUSHING(fs,vp);
    324 		lfs_segunlock(fs);
    325 		return error;
    326 	}
    327 	sp = fs->lfs_sp;
    328 
    329 	flushed = 0;
    330 	if (VPISEMPTY(vp)) {
    331 		lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
    332 		++flushed;
    333 	} else if ((ip->i_flag & IN_CLEANING) &&
    334 		  (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
    335 		ivndebug(vp,"vflush/clean");
    336 		lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
    337 		++flushed;
    338 	} else if (lfs_dostats) {
    339 		if (!VPISEMPTY(vp) || (VTOI(vp)->i_flag & IN_ALLMOD))
    340 			++lfs_stats.vflush_invoked;
    341 		ivndebug(vp,"vflush");
    342 	}
    343 
    344 #ifdef DIAGNOSTIC
    345 	if (vp->v_flag & VDIROP) {
    346 		DLOG((DLOG_VNODE, "lfs_vflush: flushing VDIROP\n"));
    347 		/* panic("lfs_vflush: VDIROP being flushed...this can\'t happen"); */
    348 	}
    349 	if (vp->v_usecount < 0) {
    350 		printf("usecount=%ld\n", (long)vp->v_usecount);
    351 		panic("lfs_vflush: usecount<0");
    352 	}
    353 #endif
    354 
    355 #if 1
    356 	do {
    357 		do {
    358 			if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
    359 				lfs_writefile(fs, sp, vp);
    360 		} while (lfs_writeinode(fs, sp, ip));
    361 	} while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
    362 #else
    363 	if (flushed && vp != fs->lfs_ivnode)
    364 		lfs_writeseg(fs, sp);
    365 	else do {
    366 		simple_lock(&fs->lfs_interlock);
    367 		fs->lfs_flags &= ~LFS_IFDIRTY;
    368 		simple_unlock(&fs->lfs_interlock);
    369 		lfs_writefile(fs, sp, vp);
    370 		redo = lfs_writeinode(fs, sp, ip);
    371 		redo += lfs_writeseg(fs, sp);
    372 		simple_lock(&fs->lfs_interlock);
    373 		redo += (fs->lfs_flags & LFS_IFDIRTY);
    374 		simple_unlock(&fs->lfs_interlock);
    375 	} while (redo && vp == fs->lfs_ivnode);
    376 #endif
    377 	if (lfs_dostats) {
    378 		++lfs_stats.nwrites;
    379 		if (sp->seg_flags & SEGM_SYNC)
    380 			++lfs_stats.nsync_writes;
    381 		if (sp->seg_flags & SEGM_CKP)
    382 			++lfs_stats.ncheckpoints;
    383 	}
    384 	/*
    385 	 * If we were called from somewhere that has already held the seglock
    386 	 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
    387 	 * the write to complete because we are still locked.
    388 	 * Since lfs_vflush() must return the vnode with no dirty buffers,
    389 	 * we must explicitly wait, if that is the case.
    390 	 *
    391 	 * We compare the iocount against 1, not 0, because it is
    392 	 * artificially incremented by lfs_seglock().
    393 	 */
    394 	simple_lock(&fs->lfs_interlock);
    395 	if (fs->lfs_seglock > 1) {
    396 		while (fs->lfs_iocount > 1)
    397 			(void)ltsleep(&fs->lfs_iocount, PRIBIO + 1,
    398 				     "lfs_vflush", 0, &fs->lfs_interlock);
    399 	}
    400 	simple_unlock(&fs->lfs_interlock);
    401 
    402 	lfs_segunlock(fs);
    403 
    404 	/* Wait for these buffers to be recovered by aiodoned */
    405 	s = splbio();
    406 	simple_lock(&global_v_numoutput_slock);
    407 	while (vp->v_numoutput > 0) {
    408 		vp->v_flag |= VBWAIT;
    409 		ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf2", 0,
    410 			&global_v_numoutput_slock);
    411 	}
    412 	simple_unlock(&global_v_numoutput_slock);
    413 	splx(s);
    414 
    415 	CLR_FLUSHING(fs,vp);
    416 	return (0);
    417 }
    418 
    419 int
    420 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
    421 {
    422 	struct inode *ip;
    423 	struct vnode *vp, *nvp;
    424 	int inodes_written = 0, only_cleaning;
    425 
    426 	ASSERT_SEGLOCK(fs);
    427 #ifndef LFS_NO_BACKVP_HACK
    428 	/* BEGIN HACK */
    429 #define	VN_OFFSET	\
    430 	(((caddr_t)&LIST_NEXT(vp, v_mntvnodes)) - (caddr_t)vp)
    431 #define	BACK_VP(VP)	\
    432 	((struct vnode *)(((caddr_t)(VP)->v_mntvnodes.le_prev) - VN_OFFSET))
    433 #define	BEG_OF_VLIST	\
    434 	((struct vnode *)(((caddr_t)&LIST_FIRST(&mp->mnt_vnodelist)) \
    435 	- VN_OFFSET))
    436 
    437 	/* Find last vnode. */
    438  loop:	for (vp = LIST_FIRST(&mp->mnt_vnodelist);
    439 	     vp && LIST_NEXT(vp, v_mntvnodes) != NULL;
    440 	     vp = LIST_NEXT(vp, v_mntvnodes));
    441 	for (; vp && vp != BEG_OF_VLIST; vp = nvp) {
    442 		nvp = BACK_VP(vp);
    443 #else
    444 	loop:
    445 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
    446 		nvp = LIST_NEXT(vp, v_mntvnodes);
    447 #endif
    448 		/*
    449 		 * If the vnode that we are about to sync is no longer
    450 		 * associated with this mount point, start over.
    451 		 */
    452 		if (vp->v_mount != mp) {
    453 			DLOG((DLOG_VNODE, "lfs_writevnodes: starting over\n"));
    454 			/*
    455 			 * After this, pages might be busy
    456 			 * due to our own previous putpages.
    457 			 * Start actual segment write here to avoid deadlock.
    458 			 */
    459 			(void)lfs_writeseg(fs, sp);
    460 			goto loop;
    461 		}
    462 
    463 		if (vp->v_type == VNON) {
    464 			continue;
    465 		}
    466 
    467 		ip = VTOI(vp);
    468 		if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
    469 		    (op != VN_DIROP && op != VN_CLEAN &&
    470 		    (vp->v_flag & VDIROP))) {
    471 			vndebug(vp,"dirop");
    472 			continue;
    473 		}
    474 
    475 		if (op == VN_EMPTY && !VPISEMPTY(vp)) {
    476 			vndebug(vp,"empty");
    477 			continue;
    478 		}
    479 
    480 		if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
    481 		   && vp != fs->lfs_flushvp
    482 		   && !(ip->i_flag & IN_CLEANING)) {
    483 			vndebug(vp,"cleaning");
    484 			continue;
    485 		}
    486 
    487 		if (lfs_vref(vp)) {
    488 			vndebug(vp,"vref");
    489 			continue;
    490 		}
    491 
    492 		only_cleaning = 0;
    493 		/*
    494 		 * Write the inode/file if dirty and it's not the IFILE.
    495 		 */
    496 		if ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp)) {
    497 			only_cleaning =
    498 			    ((ip->i_flag & IN_ALLMOD) == IN_CLEANING);
    499 
    500 			if (ip->i_number != LFS_IFILE_INUM) {
    501 				lfs_writefile(fs, sp, vp);
    502 				if (!VPISEMPTY(vp)) {
    503 					if (WRITEINPROG(vp)) {
    504 						ivndebug(vp,"writevnodes/write2");
    505 					} else if (!(ip->i_flag & IN_ALLMOD)) {
    506 						LFS_SET_UINO(ip, IN_MODIFIED);
    507 					}
    508 				}
    509 				(void) lfs_writeinode(fs, sp, ip);
    510 				inodes_written++;
    511 			}
    512 		}
    513 
    514 		if (lfs_clean_vnhead && only_cleaning)
    515 			lfs_vunref_head(vp);
    516 		else
    517 			lfs_vunref(vp);
    518 	}
    519 	return inodes_written;
    520 }
    521 
    522 /*
    523  * Do a checkpoint.
    524  */
    525 int
    526 lfs_segwrite(struct mount *mp, int flags)
    527 {
    528 	struct buf *bp;
    529 	struct inode *ip;
    530 	struct lfs *fs;
    531 	struct segment *sp;
    532 	struct vnode *vp;
    533 	SEGUSE *segusep;
    534 	int do_ckp, did_ckp, error, s;
    535 	unsigned n, segleft, maxseg, sn, i, curseg;
    536 	int writer_set = 0;
    537 	int dirty;
    538 	int redo;
    539 
    540 	fs = VFSTOUFS(mp)->um_lfs;
    541 	ASSERT_MAYBE_SEGLOCK(fs);
    542 
    543 	if (fs->lfs_ronly)
    544 		return EROFS;
    545 
    546 	lfs_imtime(fs);
    547 
    548 	/*
    549 	 * Allocate a segment structure and enough space to hold pointers to
    550 	 * the maximum possible number of buffers which can be described in a
    551 	 * single summary block.
    552 	 */
    553 	do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
    554 	lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
    555 	sp = fs->lfs_sp;
    556 
    557 	/*
    558 	 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
    559 	 * in which case we have to flush *all* buffers off of this vnode.
    560 	 * We don't care about other nodes, but write any non-dirop nodes
    561 	 * anyway in anticipation of another getnewvnode().
    562 	 *
    563 	 * If we're cleaning we only write cleaning and ifile blocks, and
    564 	 * no dirops, since otherwise we'd risk corruption in a crash.
    565 	 */
    566 	if (sp->seg_flags & SEGM_CLEAN)
    567 		lfs_writevnodes(fs, mp, sp, VN_CLEAN);
    568 	else if (!(sp->seg_flags & SEGM_FORCE_CKP)) {
    569 		lfs_writevnodes(fs, mp, sp, VN_REG);
    570 		if (!fs->lfs_dirops || !fs->lfs_flushvp) {
    571 			error = lfs_writer_enter(fs, "lfs writer");
    572 			if (error) {
    573 				DLOG((DLOG_SEG, "segwrite mysterious error\n"));
    574 				/* XXX why not segunlock? */
    575 				pool_put(&fs->lfs_bpppool, sp->bpp);
    576 				sp->bpp = NULL;
    577 				pool_put(&fs->lfs_segpool, sp);
    578 				sp = fs->lfs_sp = NULL;
    579 				return (error);
    580 			}
    581 			writer_set = 1;
    582 			lfs_writevnodes(fs, mp, sp, VN_DIROP);
    583 			((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
    584 		}
    585 	}
    586 
    587 	/*
    588 	 * If we are doing a checkpoint, mark everything since the
    589 	 * last checkpoint as no longer ACTIVE.
    590 	 */
    591 	if (do_ckp) {
    592 		segleft = fs->lfs_nseg;
    593 		curseg = 0;
    594 		for (n = 0; n < fs->lfs_segtabsz; n++) {
    595 			dirty = 0;
    596 			if (bread(fs->lfs_ivnode,
    597 			    fs->lfs_cleansz + n, fs->lfs_bsize, NOCRED, &bp))
    598 				panic("lfs_segwrite: ifile read");
    599 			segusep = (SEGUSE *)bp->b_data;
    600 			maxseg = min(segleft, fs->lfs_sepb);
    601 			for (i = 0; i < maxseg; i++) {
    602 				sn = curseg + i;
    603 				if (sn != dtosn(fs, fs->lfs_curseg) &&
    604 				    segusep->su_flags & SEGUSE_ACTIVE) {
    605 					segusep->su_flags &= ~SEGUSE_ACTIVE;
    606 					--fs->lfs_nactive;
    607 					++dirty;
    608 				}
    609 				fs->lfs_suflags[fs->lfs_activesb][sn] =
    610 					segusep->su_flags;
    611 				if (fs->lfs_version > 1)
    612 					++segusep;
    613 				else
    614 					segusep = (SEGUSE *)
    615 						((SEGUSE_V1 *)segusep + 1);
    616 			}
    617 
    618 			if (dirty)
    619 				error = LFS_BWRITE_LOG(bp); /* Ifile */
    620 			else
    621 				brelse(bp);
    622 			segleft -= fs->lfs_sepb;
    623 			curseg += fs->lfs_sepb;
    624 		}
    625 	}
    626 
    627 	LOCK_ASSERT(LFS_SEGLOCK_HELD(fs));
    628 
    629 	did_ckp = 0;
    630 	if (do_ckp || fs->lfs_doifile) {
    631 		vp = fs->lfs_ivnode;
    632 		vn_lock(vp, LK_EXCLUSIVE);
    633 		do {
    634 #ifdef DEBUG
    635 			LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0, curproc->p_pid);
    636 #endif
    637 			simple_lock(&fs->lfs_interlock);
    638 			fs->lfs_flags &= ~LFS_IFDIRTY;
    639 			simple_unlock(&fs->lfs_interlock);
    640 
    641 			ip = VTOI(vp);
    642 
    643 			if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
    644 				lfs_writefile(fs, sp, vp);
    645 
    646 			if (ip->i_flag & IN_ALLMOD)
    647 				++did_ckp;
    648 			redo = lfs_writeinode(fs, sp, ip);
    649 			redo += lfs_writeseg(fs, sp);
    650 			simple_lock(&fs->lfs_interlock);
    651 			redo += (fs->lfs_flags & LFS_IFDIRTY);
    652 			simple_unlock(&fs->lfs_interlock);
    653 		} while (redo && do_ckp);
    654 
    655 		/*
    656 		 * Unless we are unmounting, the Ifile may continue to have
    657 		 * dirty blocks even after a checkpoint, due to changes to
    658 		 * inodes' atime.  If we're checkpointing, it's "impossible"
    659 		 * for other parts of the Ifile to be dirty after the loop
    660 		 * above, since we hold the segment lock.
    661 		 */
    662 		s = splbio();
    663 		if (LIST_EMPTY(&vp->v_dirtyblkhd)) {
    664 			LFS_CLR_UINO(ip, IN_ALLMOD);
    665 		}
    666 #ifdef DIAGNOSTIC
    667 		else if (do_ckp) {
    668 			int do_panic = 0;
    669 			LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
    670 				if (bp->b_lblkno < fs->lfs_cleansz +
    671 				    fs->lfs_segtabsz &&
    672 				    !(bp->b_flags & B_GATHERED)) {
    673 					printf("ifile lbn %ld still dirty (flags %lx)\n",
    674 						(long)bp->b_lblkno,
    675 						(long)bp->b_flags);
    676 					++do_panic;
    677 				}
    678 			}
    679 			if (do_panic)
    680 				panic("dirty blocks");
    681 		}
    682 #endif
    683 		splx(s);
    684 		VOP_UNLOCK(vp, 0);
    685 	} else {
    686 		(void) lfs_writeseg(fs, sp);
    687 	}
    688 
    689 	/* Note Ifile no longer needs to be written */
    690 	fs->lfs_doifile = 0;
    691 	if (writer_set)
    692 		lfs_writer_leave(fs);
    693 
    694 	/*
    695 	 * If we didn't write the Ifile, we didn't really do anything.
    696 	 * That means that (1) there is a checkpoint on disk and (2)
    697 	 * nothing has changed since it was written.
    698 	 *
    699 	 * Take the flags off of the segment so that lfs_segunlock
    700 	 * doesn't have to write the superblock either.
    701 	 */
    702 	if (do_ckp && !did_ckp) {
    703 		sp->seg_flags &= ~SEGM_CKP;
    704 	}
    705 
    706 	if (lfs_dostats) {
    707 		++lfs_stats.nwrites;
    708 		if (sp->seg_flags & SEGM_SYNC)
    709 			++lfs_stats.nsync_writes;
    710 		if (sp->seg_flags & SEGM_CKP)
    711 			++lfs_stats.ncheckpoints;
    712 	}
    713 	lfs_segunlock(fs);
    714 	return (0);
    715 }
    716 
    717 /*
    718  * Write the dirty blocks associated with a vnode.
    719  */
    720 void
    721 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
    722 {
    723 	struct buf *bp;
    724 	struct finfo *fip;
    725 	struct inode *ip;
    726 	IFILE *ifp;
    727 	int i, frag;
    728 
    729 	ASSERT_SEGLOCK(fs);
    730 	ip = VTOI(vp);
    731 
    732 	if (sp->seg_bytes_left < fs->lfs_bsize ||
    733 	    sp->sum_bytes_left < sizeof(struct finfo))
    734 		(void) lfs_writeseg(fs, sp);
    735 
    736 	sp->sum_bytes_left -= FINFOSIZE;
    737 	++((SEGSUM *)(sp->segsum))->ss_nfinfo;
    738 
    739 	if (vp->v_flag & VDIROP)
    740 		((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
    741 
    742 	fip = sp->fip;
    743 	fip->fi_nblocks = 0;
    744 	fip->fi_ino = ip->i_number;
    745 	LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
    746 	fip->fi_version = ifp->if_version;
    747 	brelse(bp);
    748 
    749 	if (sp->seg_flags & SEGM_CLEAN) {
    750 		lfs_gather(fs, sp, vp, lfs_match_fake);
    751 		/*
    752 		 * For a file being flushed, we need to write *all* blocks.
    753 		 * This means writing the cleaning blocks first, and then
    754 		 * immediately following with any non-cleaning blocks.
    755 		 * The same is true of the Ifile since checkpoints assume
    756 		 * that all valid Ifile blocks are written.
    757 		 */
    758 		if (IS_FLUSHING(fs,vp) || vp == fs->lfs_ivnode) {
    759 			lfs_gather(fs, sp, vp, lfs_match_data);
    760 			/*
    761 			 * Don't call VOP_PUTPAGES: if we're flushing,
    762 			 * we've already done it, and the Ifile doesn't
    763 			 * use the page cache.
    764 			 */
    765 		}
    766 	} else {
    767 		lfs_gather(fs, sp, vp, lfs_match_data);
    768 		/*
    769 		 * If we're flushing, we've already called VOP_PUTPAGES
    770 		 * so don't do it again.  Otherwise, we want to write
    771 		 * everything we've got.
    772 		 */
    773 		if (!IS_FLUSHING(fs, vp)) {
    774 			simple_lock(&vp->v_interlock);
    775 			VOP_PUTPAGES(vp, 0, 0,
    776 				     PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED);
    777 		}
    778 	}
    779 
    780 	/*
    781 	 * It may not be necessary to write the meta-data blocks at this point,
    782 	 * as the roll-forward recovery code should be able to reconstruct the
    783 	 * list.
    784 	 *
    785 	 * We have to write them anyway, though, under two conditions: (1) the
    786 	 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
    787 	 * checkpointing.
    788 	 *
    789 	 * BUT if we are cleaning, we might have indirect blocks that refer to
    790 	 * new blocks not being written yet, in addition to fragments being
    791 	 * moved out of a cleaned segment.  If that is the case, don't
    792 	 * write the indirect blocks, or the finfo will have a small block
    793 	 * in the middle of it!
    794 	 * XXX in this case isn't the inode size wrong too?
    795 	 */
    796 	frag = 0;
    797 	if (sp->seg_flags & SEGM_CLEAN) {
    798 		for (i = 0; i < NDADDR; i++)
    799 			if (ip->i_lfs_fragsize[i] > 0 &&
    800 			    ip->i_lfs_fragsize[i] < fs->lfs_bsize)
    801 				++frag;
    802 	}
    803 #ifdef DIAGNOSTIC
    804 	if (frag > 1)
    805 		panic("lfs_writefile: more than one fragment!");
    806 #endif
    807 	if (IS_FLUSHING(fs, vp) ||
    808 	    (frag == 0 && (lfs_writeindir || (sp->seg_flags & SEGM_CKP)))) {
    809 		lfs_gather(fs, sp, vp, lfs_match_indir);
    810 		lfs_gather(fs, sp, vp, lfs_match_dindir);
    811 		lfs_gather(fs, sp, vp, lfs_match_tindir);
    812 	}
    813 	fip = sp->fip;
    814 	if (fip->fi_nblocks != 0) {
    815 		sp->fip = (FINFO*)((caddr_t)fip + FINFOSIZE +
    816 				   sizeof(int32_t) * (fip->fi_nblocks));
    817 		sp->start_lbp = &sp->fip->fi_blocks[0];
    818 	} else {
    819 		sp->sum_bytes_left += FINFOSIZE;
    820 		--((SEGSUM *)(sp->segsum))->ss_nfinfo;
    821 	}
    822 }
    823 
    824 int
    825 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
    826 {
    827 	struct buf *bp, *ibp;
    828 	struct ufs1_dinode *cdp;
    829 	IFILE *ifp;
    830 	SEGUSE *sup;
    831 	daddr_t daddr;
    832 	int32_t *daddrp;	/* XXX ondisk32 */
    833 	ino_t ino;
    834 	int error, i, ndx, fsb = 0;
    835 	int redo_ifile = 0;
    836 	struct timespec ts;
    837 	int gotblk = 0;
    838 
    839 	ASSERT_SEGLOCK(fs);
    840 	if (!(ip->i_flag & IN_ALLMOD))
    841 		return (0);
    842 
    843 	/* Allocate a new inode block if necessary. */
    844 	if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) &&
    845 	    sp->ibp == NULL) {
    846 		/* Allocate a new segment if necessary. */
    847 		if (sp->seg_bytes_left < fs->lfs_ibsize ||
    848 		    sp->sum_bytes_left < sizeof(int32_t))
    849 			(void) lfs_writeseg(fs, sp);
    850 
    851 		/* Get next inode block. */
    852 		daddr = fs->lfs_offset;
    853 		fs->lfs_offset += btofsb(fs, fs->lfs_ibsize);
    854 		sp->ibp = *sp->cbpp++ =
    855 			getblk(VTOI(fs->lfs_ivnode)->i_devvp,
    856 			    fsbtodb(fs, daddr), fs->lfs_ibsize, 0, 0);
    857 		gotblk++;
    858 
    859 		/* Zero out inode numbers */
    860 		for (i = 0; i < INOPB(fs); ++i)
    861 			((struct ufs1_dinode *)sp->ibp->b_data)[i].di_inumber =
    862 			    0;
    863 
    864 		++sp->start_bpp;
    865 		fs->lfs_avail -= btofsb(fs, fs->lfs_ibsize);
    866 		/* Set remaining space counters. */
    867 		sp->seg_bytes_left -= fs->lfs_ibsize;
    868 		sp->sum_bytes_left -= sizeof(int32_t);
    869 		ndx = fs->lfs_sumsize / sizeof(int32_t) -
    870 			sp->ninodes / INOPB(fs) - 1;
    871 		((int32_t *)(sp->segsum))[ndx] = daddr;
    872 	}
    873 
    874 	/* Update the inode times and copy the inode onto the inode page. */
    875 	TIMEVAL_TO_TIMESPEC(&time, &ts);
    876 	/* XXX kludge --- don't redirty the ifile just to put times on it */
    877 	if (ip->i_number != LFS_IFILE_INUM)
    878 		LFS_ITIMES(ip, &ts, &ts, &ts);
    879 
    880 	/*
    881 	 * If this is the Ifile, and we've already written the Ifile in this
    882 	 * partial segment, just overwrite it (it's not on disk yet) and
    883 	 * continue.
    884 	 *
    885 	 * XXX we know that the bp that we get the second time around has
    886 	 * already been gathered.
    887 	 */
    888 	if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
    889 		*(sp->idp) = *ip->i_din.ffs1_din;
    890 		ip->i_lfs_osize = ip->i_size;
    891 		return 0;
    892 	}
    893 
    894 	bp = sp->ibp;
    895 	cdp = ((struct ufs1_dinode *)bp->b_data) + (sp->ninodes % INOPB(fs));
    896 	*cdp = *ip->i_din.ffs1_din;
    897 
    898 	/*
    899 	 * If we are cleaning, ensure that we don't write UNWRITTEN disk
    900 	 * addresses to disk; possibly change the on-disk record of
    901 	 * the inode size, either by reverting to the previous size
    902 	 * (in the case of cleaning) or by verifying the inode's block
    903 	 * holdings (in the case of files being allocated as they are being
    904 	 * written).
    905 	 * XXX By not writing UNWRITTEN blocks, we are making the lfs_avail
    906 	 * XXX count on disk wrong by the same amount.	We should be
    907 	 * XXX able to "borrow" from lfs_avail and return it after the
    908 	 * XXX Ifile is written.  See also in lfs_writeseg.
    909 	 */
    910 
    911 	/* Check file size based on highest allocated block */
    912 	if (((ip->i_ffs1_mode & IFMT) == IFREG ||
    913 	     (ip->i_ffs1_mode & IFMT) == IFDIR) &&
    914 	    ip->i_size > ((ip->i_lfs_hiblk + 1) << fs->lfs_bshift)) {
    915 		cdp->di_size = (ip->i_lfs_hiblk + 1) << fs->lfs_bshift;
    916 		DLOG((DLOG_SEG, "lfs_writeinode: ino %d size %" PRId64 " -> %"
    917 		      PRId64 "\n", (int)ip->i_number, ip->i_size, cdp->di_size));
    918 	}
    919 	if (ip->i_lfs_effnblks != ip->i_ffs1_blocks) {
    920 		if (ip->i_flags & IN_CLEANING)
    921 			cdp->di_size = ip->i_lfs_osize;
    922 		DLOG((DLOG_SEG, "lfs_writeinode: cleansing ino %d eff %d != nblk %d)"
    923 		      " at %x\n", ip->i_number, ip->i_lfs_effnblks,
    924 		      ip->i_ffs1_blocks, fs->lfs_offset));
    925 		for (daddrp = cdp->di_db; daddrp < cdp->di_ib + NIADDR;
    926 		     daddrp++) {
    927 			if (*daddrp == UNWRITTEN) {
    928 				DLOG((DLOG_SEG, "lfs_writeinode: wiping UNWRITTEN\n"));
    929 				*daddrp = 0;
    930 			}
    931 		}
    932 	} else {
    933 		/* If all blocks are going to disk, update "size on disk" */
    934 		ip->i_lfs_osize = ip->i_size;
    935 	}
    936 
    937 #ifdef DIAGNOSTIC
    938 	/*
    939 	 * Check dinode held blocks against dinode size.
    940 	 * This should be identical to the check in lfs_vget().
    941 	 */
    942 	for (i = (cdp->di_size + fs->lfs_bsize - 1) >> fs->lfs_bshift;
    943 	     i < NDADDR; i++) {
    944 		KASSERT(i >= 0);
    945 		if ((cdp->di_mode & IFMT) == IFLNK)
    946 			continue;
    947 		if (((cdp->di_mode & IFMT) == IFBLK ||
    948 		     (cdp->di_mode & IFMT) == IFCHR) && i == 0)
    949 			continue;
    950 		if (cdp->di_db[i] != 0) {
    951 # ifdef DEBUG
    952 			lfs_dump_dinode(cdp);
    953 # endif
    954 			panic("writing inconsistent inode");
    955 		}
    956 	}
    957 #endif /* DIAGNOSTIC */
    958 
    959 	if (ip->i_flag & IN_CLEANING)
    960 		LFS_CLR_UINO(ip, IN_CLEANING);
    961 	else {
    962 		/* XXX IN_ALLMOD */
    963 		LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
    964 			     IN_UPDATE | IN_MODIFY);
    965 		if (ip->i_lfs_effnblks == ip->i_ffs1_blocks)
    966 			LFS_CLR_UINO(ip, IN_MODIFIED);
    967 		else
    968 			DLOG((DLOG_VNODE, "lfs_writeinode: ino %d: real blks=%d, "
    969 			      "eff=%d\n", ip->i_number, ip->i_ffs1_blocks,
    970 			      ip->i_lfs_effnblks));
    971 	}
    972 
    973 	if (ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
    974 		sp->idp = ((struct ufs1_dinode *)bp->b_data) +
    975 			(sp->ninodes % INOPB(fs));
    976 	if (gotblk) {
    977 		LFS_LOCK_BUF(bp);
    978 		brelse(bp);
    979 	}
    980 
    981 	/* Increment inode count in segment summary block. */
    982 	++((SEGSUM *)(sp->segsum))->ss_ninos;
    983 
    984 	/* If this page is full, set flag to allocate a new page. */
    985 	if (++sp->ninodes % INOPB(fs) == 0)
    986 		sp->ibp = NULL;
    987 
    988 	/*
    989 	 * If updating the ifile, update the super-block.  Update the disk
    990 	 * address and access times for this inode in the ifile.
    991 	 */
    992 	ino = ip->i_number;
    993 	if (ino == LFS_IFILE_INUM) {
    994 		daddr = fs->lfs_idaddr;
    995 		fs->lfs_idaddr = dbtofsb(fs, bp->b_blkno);
    996 	} else {
    997 		LFS_IENTRY(ifp, fs, ino, ibp);
    998 		daddr = ifp->if_daddr;
    999 		ifp->if_daddr = dbtofsb(fs, bp->b_blkno) + fsb;
   1000 		error = LFS_BWRITE_LOG(ibp); /* Ifile */
   1001 	}
   1002 
   1003 	/*
   1004 	 * The inode's last address should not be in the current partial
   1005 	 * segment, except under exceptional circumstances (lfs_writevnodes
   1006 	 * had to start over, and in the meantime more blocks were written
   1007 	 * to a vnode).	 Both inodes will be accounted to this segment
   1008 	 * in lfs_writeseg so we need to subtract the earlier version
   1009 	 * here anyway.	 The segment count can temporarily dip below
   1010 	 * zero here; keep track of how many duplicates we have in
   1011 	 * "dupino" so we don't panic below.
   1012 	 */
   1013 	if (daddr >= fs->lfs_lastpseg && daddr <= dbtofsb(fs, bp->b_blkno)) {
   1014 		++sp->ndupino;
   1015 		DLOG((DLOG_SEG, "lfs_writeinode: last inode addr in current pseg "
   1016 		      "(ino %d daddr 0x%llx) ndupino=%d\n", ino,
   1017 		      (long long)daddr, sp->ndupino));
   1018 	}
   1019 	/*
   1020 	 * Account the inode: it no longer belongs to its former segment,
   1021 	 * though it will not belong to the new segment until that segment
   1022 	 * is actually written.
   1023 	 */
   1024 	if (daddr != LFS_UNUSED_DADDR) {
   1025 		u_int32_t oldsn = dtosn(fs, daddr);
   1026 #ifdef DIAGNOSTIC
   1027 		int ndupino = (sp->seg_number == oldsn) ? sp->ndupino : 0;
   1028 #endif
   1029 		LFS_SEGENTRY(sup, fs, oldsn, bp);
   1030 #ifdef DIAGNOSTIC
   1031 		if (sup->su_nbytes +
   1032 		    sizeof (struct ufs1_dinode) * ndupino
   1033 		      < sizeof (struct ufs1_dinode)) {
   1034 			printf("lfs_writeinode: negative bytes "
   1035 			       "(segment %" PRIu32 " short by %d, "
   1036 			       "oldsn=%" PRIu32 ", cursn=%" PRIu32
   1037 			       ", daddr=%" PRId64 ", su_nbytes=%u, "
   1038 			       "ndupino=%d)\n",
   1039 			       dtosn(fs, daddr),
   1040 			       (int)sizeof (struct ufs1_dinode) *
   1041 				   (1 - sp->ndupino) - sup->su_nbytes,
   1042 			       oldsn, sp->seg_number, daddr,
   1043 			       (unsigned int)sup->su_nbytes,
   1044 			       sp->ndupino);
   1045 			panic("lfs_writeinode: negative bytes");
   1046 			sup->su_nbytes = sizeof (struct ufs1_dinode);
   1047 		}
   1048 #endif
   1049 		DLOG((DLOG_SU, "seg %d -= %d for ino %d inode\n",
   1050 		      dtosn(fs, daddr), sizeof (struct ufs1_dinode), ino));
   1051 		sup->su_nbytes -= sizeof (struct ufs1_dinode);
   1052 		redo_ifile =
   1053 			(ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
   1054 		if (redo_ifile) {
   1055 			simple_lock(&fs->lfs_interlock);
   1056 			fs->lfs_flags |= LFS_IFDIRTY;
   1057 			simple_unlock(&fs->lfs_interlock);
   1058 		}
   1059 		LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */
   1060 	}
   1061 	return (redo_ifile);
   1062 }
   1063 
   1064 int
   1065 lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
   1066 {
   1067 	struct lfs *fs;
   1068 	int version;
   1069 	int j, blksinblk;
   1070 
   1071 	ASSERT_SEGLOCK(sp->fs);
   1072 	/*
   1073 	 * If full, finish this segment.  We may be doing I/O, so
   1074 	 * release and reacquire the splbio().
   1075 	 */
   1076 #ifdef DIAGNOSTIC
   1077 	if (sp->vp == NULL)
   1078 		panic ("lfs_gatherblock: Null vp in segment");
   1079 #endif
   1080 	fs = sp->fs;
   1081 	blksinblk = howmany(bp->b_bcount, fs->lfs_bsize);
   1082 	if (sp->sum_bytes_left < sizeof(int32_t) * blksinblk ||
   1083 	    sp->seg_bytes_left < bp->b_bcount) {
   1084 		if (sptr)
   1085 			splx(*sptr);
   1086 		lfs_updatemeta(sp);
   1087 
   1088 		version = sp->fip->fi_version;
   1089 		(void) lfs_writeseg(fs, sp);
   1090 
   1091 		sp->fip->fi_version = version;
   1092 		sp->fip->fi_ino = VTOI(sp->vp)->i_number;
   1093 		/* Add the current file to the segment summary. */
   1094 		++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1095 		sp->sum_bytes_left -= FINFOSIZE;
   1096 
   1097 		if (sptr)
   1098 			*sptr = splbio();
   1099 		return (1);
   1100 	}
   1101 
   1102 	if (bp->b_flags & B_GATHERED) {
   1103 		DLOG((DLOG_SEG, "lfs_gatherblock: already gathered! Ino %d,"
   1104 		      " lbn %" PRId64 "\n",
   1105 		      sp->fip->fi_ino, bp->b_lblkno));
   1106 		return (0);
   1107 	}
   1108 
   1109 	/* Insert into the buffer list, update the FINFO block. */
   1110 	bp->b_flags |= B_GATHERED;
   1111 
   1112 	*sp->cbpp++ = bp;
   1113 	for (j = 0; j < blksinblk; j++) {
   1114 		sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno + j;
   1115 		/* This block's accounting moves from lfs_favail to lfs_avail */
   1116 		lfs_deregister_block(sp->vp, bp->b_lblkno + j);
   1117 	}
   1118 
   1119 	sp->sum_bytes_left -= sizeof(int32_t) * blksinblk;
   1120 	sp->seg_bytes_left -= bp->b_bcount;
   1121 	return (0);
   1122 }
   1123 
   1124 int
   1125 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp,
   1126     int (*match)(struct lfs *, struct buf *))
   1127 {
   1128 	struct buf *bp, *nbp;
   1129 	int s, count = 0;
   1130 
   1131 	ASSERT_SEGLOCK(fs);
   1132 	KASSERT(sp->vp == NULL);
   1133 	sp->vp = vp;
   1134 	s = splbio();
   1135 
   1136 #ifndef LFS_NO_BACKBUF_HACK
   1137 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
   1138 # define	BUF_OFFSET	\
   1139 	(((caddr_t)&LIST_NEXT(bp, b_vnbufs)) - (caddr_t)bp)
   1140 # define	BACK_BUF(BP)	\
   1141 	((struct buf *)(((caddr_t)(BP)->b_vnbufs.le_prev) - BUF_OFFSET))
   1142 # define	BEG_OF_LIST	\
   1143 	((struct buf *)(((caddr_t)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET))
   1144 
   1145 loop:
   1146 	/* Find last buffer. */
   1147 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
   1148 	     bp && LIST_NEXT(bp, b_vnbufs) != NULL;
   1149 	     bp = LIST_NEXT(bp, b_vnbufs))
   1150 		/* nothing */;
   1151 	for (; bp && bp != BEG_OF_LIST; bp = nbp) {
   1152 		nbp = BACK_BUF(bp);
   1153 #else /* LFS_NO_BACKBUF_HACK */
   1154 loop:
   1155 	for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
   1156 		nbp = LIST_NEXT(bp, b_vnbufs);
   1157 #endif /* LFS_NO_BACKBUF_HACK */
   1158 		if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp)) {
   1159 #ifdef DEBUG
   1160 			if (vp == fs->lfs_ivnode &&
   1161 			    (bp->b_flags & (B_BUSY|B_GATHERED)) == B_BUSY)
   1162 				DLOG((DLOG_SEG, "lfs_gather: ifile lbn %"
   1163 				      PRId64 " busy (%x)",
   1164 				      bp->b_lblkno, bp->b_flags));
   1165 #endif
   1166 			continue;
   1167 		}
   1168 		if (vp->v_type == VBLK) {
   1169 			/* For block devices, just write the blocks. */
   1170 			/* XXX Do we even need to do this? */
   1171 			/*
   1172 			 * Get the block before bwrite,
   1173 			 * so we don't corrupt the free list
   1174 			 */
   1175 			bp->b_flags |= B_BUSY;
   1176 			bremfree(bp);
   1177 			bwrite(bp);
   1178 		} else {
   1179 #ifdef DIAGNOSTIC
   1180 # ifdef LFS_USE_B_INVAL
   1181 			if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) {
   1182 				DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64
   1183 				      " is B_INVAL\n", bp->b_lblkno));
   1184 				VOP_PRINT(bp->b_vp);
   1185 			}
   1186 # endif /* LFS_USE_B_INVAL */
   1187 			if (!(bp->b_flags & B_DELWRI))
   1188 				panic("lfs_gather: bp not B_DELWRI");
   1189 			if (!(bp->b_flags & B_LOCKED)) {
   1190 				DLOG((DLOG_SEG, "lfs_gather: lbn %" PRId64
   1191 				      " blk %" PRId64 " not B_LOCKED\n",
   1192 				      bp->b_lblkno,
   1193 				      dbtofsb(fs, bp->b_blkno)));
   1194 				VOP_PRINT(bp->b_vp);
   1195 				panic("lfs_gather: bp not B_LOCKED");
   1196 			}
   1197 #endif
   1198 			if (lfs_gatherblock(sp, bp, &s)) {
   1199 				goto loop;
   1200 			}
   1201 		}
   1202 		count++;
   1203 	}
   1204 	splx(s);
   1205 	lfs_updatemeta(sp);
   1206 	KASSERT(sp->vp == vp);
   1207 	sp->vp = NULL;
   1208 	return count;
   1209 }
   1210 
   1211 #if DEBUG
   1212 # define DEBUG_OOFF(n) do {						\
   1213 	if (ooff == 0) {						\
   1214 		DLOG((DLOG_SEG, "lfs_updatemeta[%d]: warning: writing " \
   1215 			"ino %d lbn %" PRId64 " at 0x%" PRIx32		\
   1216 			", was 0x0 (or %" PRId64 ")\n",			\
   1217 			(n), ip->i_number, lbn, ndaddr, daddr));	\
   1218 	}								\
   1219 } while (0)
   1220 #else
   1221 # define DEBUG_OOFF(n)
   1222 #endif
   1223 
   1224 /*
   1225  * Change the given block's address to ndaddr, finding its previous
   1226  * location using ufs_bmaparray().
   1227  *
   1228  * Account for this change in the segment table.
   1229  *
   1230  * called with sp == NULL by roll-forwarding code.
   1231  */
   1232 void
   1233 lfs_update_single(struct lfs *fs, struct segment *sp, struct vnode *vp,
   1234     daddr_t lbn, int32_t ndaddr, int size)
   1235 {
   1236 	SEGUSE *sup;
   1237 	struct buf *bp;
   1238 	struct indir a[NIADDR + 2], *ap;
   1239 	struct inode *ip;
   1240 	daddr_t daddr, ooff;
   1241 	int num, error;
   1242 	int bb, osize, obb;
   1243 
   1244 	ASSERT_SEGLOCK(fs);
   1245 	KASSERT(sp == NULL || sp->vp == vp);
   1246 	ip = VTOI(vp);
   1247 
   1248 	error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL, NULL);
   1249 	if (error)
   1250 		panic("lfs_updatemeta: ufs_bmaparray returned %d", error);
   1251 
   1252 	daddr = (daddr_t)((int32_t)daddr); /* XXX ondisk32 */
   1253 	KASSERT(daddr <= LFS_MAX_DADDR);
   1254 	if (daddr > 0)
   1255 		daddr = dbtofsb(fs, daddr);
   1256 
   1257 	bb = fragstofsb(fs, numfrags(fs, size));
   1258 	switch (num) {
   1259 	    case 0:
   1260 		    ooff = ip->i_ffs1_db[lbn];
   1261 		    DEBUG_OOFF(0);
   1262 		    if (ooff == UNWRITTEN)
   1263 			    ip->i_ffs1_blocks += bb;
   1264 		    else {
   1265 			    /* possible fragment truncation or extension */
   1266 			    obb = btofsb(fs, ip->i_lfs_fragsize[lbn]);
   1267 			    ip->i_ffs1_blocks += (bb - obb);
   1268 		    }
   1269 		    ip->i_ffs1_db[lbn] = ndaddr;
   1270 		    break;
   1271 	    case 1:
   1272 		    ooff = ip->i_ffs1_ib[a[0].in_off];
   1273 		    DEBUG_OOFF(1);
   1274 		    if (ooff == UNWRITTEN)
   1275 			    ip->i_ffs1_blocks += bb;
   1276 		    ip->i_ffs1_ib[a[0].in_off] = ndaddr;
   1277 		    break;
   1278 	    default:
   1279 		    ap = &a[num - 1];
   1280 		    if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
   1281 			    panic("lfs_updatemeta: bread bno %" PRId64,
   1282 				  ap->in_lbn);
   1283 
   1284 		    /* XXX ondisk32 */
   1285 		    ooff = ((int32_t *)bp->b_data)[ap->in_off];
   1286 		    DEBUG_OOFF(num);
   1287 		    if (ooff == UNWRITTEN)
   1288 			    ip->i_ffs1_blocks += bb;
   1289 		    /* XXX ondisk32 */
   1290 		    ((int32_t *)bp->b_data)[ap->in_off] = ndaddr;
   1291 		    (void) VOP_BWRITE(bp);
   1292 	}
   1293 
   1294 	KASSERT(ooff == 0 || ooff == UNWRITTEN || ooff == daddr);
   1295 
   1296 	/* Update hiblk when extending the file */
   1297 	if (lbn > ip->i_lfs_hiblk)
   1298 		ip->i_lfs_hiblk = lbn;
   1299 
   1300 	/*
   1301 	 * Though we'd rather it couldn't, this *can* happen right now
   1302 	 * if cleaning blocks and regular blocks coexist.
   1303 	 */
   1304 	/* KASSERT(daddr < fs->lfs_lastpseg || daddr > ndaddr); */
   1305 
   1306 	/*
   1307 	 * Update segment usage information, based on old size
   1308 	 * and location.
   1309 	 */
   1310 	if (daddr > 0) {
   1311 		u_int32_t oldsn = dtosn(fs, daddr);
   1312 #ifdef DIAGNOSTIC
   1313 		int ndupino;
   1314 
   1315 		if (sp && sp->seg_number == oldsn) {
   1316 			ndupino = sp->ndupino;
   1317 		} else {
   1318 			ndupino = 0;
   1319 		}
   1320 #endif
   1321 		KASSERT(oldsn >= 0 && oldsn < fs->lfs_nseg);
   1322 		if (lbn >= 0 && lbn < NDADDR)
   1323 			osize = ip->i_lfs_fragsize[lbn];
   1324 		else
   1325 			osize = fs->lfs_bsize;
   1326 		LFS_SEGENTRY(sup, fs, oldsn, bp);
   1327 #ifdef DIAGNOSTIC
   1328 		if (sup->su_nbytes + sizeof (struct ufs1_dinode) * ndupino
   1329 		    < osize) {
   1330 			printf("lfs_updatemeta: negative bytes "
   1331 			       "(segment %" PRIu32 " short by %" PRId64
   1332 			       ")\n", dtosn(fs, daddr),
   1333 			       (int64_t)osize -
   1334 			       (sizeof (struct ufs1_dinode) * ndupino +
   1335 				sup->su_nbytes));
   1336 			printf("lfs_updatemeta: ino %d, lbn %" PRId64
   1337 			       ", addr = 0x%" PRIx64 "\n",
   1338 			       ip->i_number, lbn, daddr);
   1339 			printf("lfs_updatemeta: ndupino=%d\n", ndupino);
   1340 			panic("lfs_updatemeta: negative bytes");
   1341 			sup->su_nbytes = osize -
   1342 			    sizeof (struct ufs1_dinode) * ndupino;
   1343 		}
   1344 #endif
   1345 		DLOG((DLOG_SU, "seg %" PRIu32 " -= %d for ino %d lbn %" PRId64
   1346 		      " db 0x%" PRIx64 "\n",
   1347 		      dtosn(fs, daddr), osize,
   1348 		      ip->i_number, lbn, daddr));
   1349 		sup->su_nbytes -= osize;
   1350 		if (!(bp->b_flags & B_GATHERED)) {
   1351 			simple_lock(&fs->lfs_interlock);
   1352 			fs->lfs_flags |= LFS_IFDIRTY;
   1353 			simple_unlock(&fs->lfs_interlock);
   1354 		}
   1355 		LFS_WRITESEGENTRY(sup, fs, oldsn, bp);
   1356 	}
   1357 	/*
   1358 	 * Now that this block has a new address, and its old
   1359 	 * segment no longer owns it, we can forget about its
   1360 	 * old size.
   1361 	 */
   1362 	if (lbn >= 0 && lbn < NDADDR)
   1363 		ip->i_lfs_fragsize[lbn] = size;
   1364 }
   1365 
   1366 /*
   1367  * Update the metadata that points to the blocks listed in the FINFO
   1368  * array.
   1369  */
   1370 void
   1371 lfs_updatemeta(struct segment *sp)
   1372 {
   1373 	struct buf *sbp;
   1374 	struct lfs *fs;
   1375 	struct vnode *vp;
   1376 	daddr_t lbn;
   1377 	int i, nblocks, num;
   1378 	int bb;
   1379 	int bytesleft, size;
   1380 
   1381 	ASSERT_SEGLOCK(sp->fs);
   1382 	vp = sp->vp;
   1383 	nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
   1384 	KASSERT(nblocks >= 0);
   1385 	KASSERT(vp != NULL);
   1386 	if (nblocks == 0)
   1387 		return;
   1388 
   1389 	/*
   1390 	 * This count may be high due to oversize blocks from lfs_gop_write.
   1391 	 * Correct for this. (XXX we should be able to keep track of these.)
   1392 	 */
   1393 	fs = sp->fs;
   1394 	for (i = 0; i < nblocks; i++) {
   1395 		if (sp->start_bpp[i] == NULL) {
   1396 			DLOG((DLOG_SEG, "lfs_updatemeta: nblocks = %d, not %d\n", i, nblocks));
   1397 			nblocks = i;
   1398 			break;
   1399 		}
   1400 		num = howmany(sp->start_bpp[i]->b_bcount, fs->lfs_bsize);
   1401 		KASSERT(sp->start_bpp[i]->b_lblkno >= 0 || num == 1);
   1402 		nblocks -= num - 1;
   1403 	}
   1404 
   1405 	KASSERT(vp->v_type == VREG ||
   1406 	   nblocks == &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp);
   1407 	KASSERT(nblocks == sp->cbpp - sp->start_bpp);
   1408 
   1409 	/*
   1410 	 * Sort the blocks.
   1411 	 *
   1412 	 * We have to sort even if the blocks come from the
   1413 	 * cleaner, because there might be other pending blocks on the
   1414 	 * same inode...and if we don't sort, and there are fragments
   1415 	 * present, blocks may be written in the wrong place.
   1416 	 */
   1417 	lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks, fs->lfs_bsize);
   1418 
   1419 	/*
   1420 	 * Record the length of the last block in case it's a fragment.
   1421 	 * If there are indirect blocks present, they sort last.  An
   1422 	 * indirect block will be lfs_bsize and its presence indicates
   1423 	 * that you cannot have fragments.
   1424 	 *
   1425 	 * XXX This last is a lie.  A cleaned fragment can coexist with
   1426 	 * XXX a later indirect block.	This will continue to be
   1427 	 * XXX true until lfs_markv is fixed to do everything with
   1428 	 * XXX fake blocks (including fake inodes and fake indirect blocks).
   1429 	 */
   1430 	sp->fip->fi_lastlength = ((sp->start_bpp[nblocks - 1]->b_bcount - 1) &
   1431 		fs->lfs_bmask) + 1;
   1432 
   1433 	/*
   1434 	 * Assign disk addresses, and update references to the logical
   1435 	 * block and the segment usage information.
   1436 	 */
   1437 	for (i = nblocks; i--; ++sp->start_bpp) {
   1438 		sbp = *sp->start_bpp;
   1439 		lbn = *sp->start_lbp;
   1440 		KASSERT(sbp->b_lblkno == lbn);
   1441 
   1442 		sbp->b_blkno = fsbtodb(fs, fs->lfs_offset);
   1443 
   1444 		/*
   1445 		 * If we write a frag in the wrong place, the cleaner won't
   1446 		 * be able to correctly identify its size later, and the
   1447 		 * segment will be uncleanable.	 (Even worse, it will assume
   1448 		 * that the indirect block that actually ends the list
   1449 		 * is of a smaller size!)
   1450 		 */
   1451 		if ((sbp->b_bcount & fs->lfs_bmask) && i != 0)
   1452 			panic("lfs_updatemeta: fragment is not last block");
   1453 
   1454 		/*
   1455 		 * For each subblock in this possibly oversized block,
   1456 		 * update its address on disk.
   1457 		 */
   1458 		KASSERT(lbn >= 0 || sbp->b_bcount == fs->lfs_bsize);
   1459 		KASSERT(vp == sbp->b_vp);
   1460 		for (bytesleft = sbp->b_bcount; bytesleft > 0;
   1461 		     bytesleft -= fs->lfs_bsize) {
   1462 			size = MIN(bytesleft, fs->lfs_bsize);
   1463 			bb = fragstofsb(fs, numfrags(fs, size));
   1464 			lbn = *sp->start_lbp++;
   1465 			lfs_update_single(fs, sp, sp->vp, lbn, fs->lfs_offset,
   1466 			    size);
   1467 			fs->lfs_offset += bb;
   1468 		}
   1469 
   1470 	}
   1471 }
   1472 
   1473 /*
   1474  * Move lfs_offset to a segment earlier than sn.
   1475  */
   1476 int
   1477 lfs_rewind(struct lfs *fs, int newsn)
   1478 {
   1479 	int sn, osn, isdirty;
   1480 	struct buf *bp;
   1481 	SEGUSE *sup;
   1482 
   1483 	ASSERT_SEGLOCK(fs);
   1484 
   1485 	osn = dtosn(fs, fs->lfs_offset);
   1486 	if (osn < newsn)
   1487 		return 0;
   1488 
   1489 	/* lfs_avail eats the remaining space in this segment */
   1490 	fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset - fs->lfs_curseg);
   1491 
   1492 	/* Find a low-numbered segment */
   1493 	for (sn = 0; sn < fs->lfs_nseg; ++sn) {
   1494 		LFS_SEGENTRY(sup, fs, sn, bp);
   1495 		isdirty = sup->su_flags & SEGUSE_DIRTY;
   1496 		brelse(bp);
   1497 
   1498 		if (!isdirty)
   1499 			break;
   1500 	}
   1501 	if (sn == fs->lfs_nseg)
   1502 		panic("lfs_rewind: no clean segments");
   1503 	if (sn >= newsn)
   1504 		return ENOENT;
   1505 	fs->lfs_nextseg = sn;
   1506 	lfs_newseg(fs);
   1507 	fs->lfs_offset = fs->lfs_curseg;
   1508 
   1509 	return 0;
   1510 }
   1511 
   1512 /*
   1513  * Start a new partial segment.
   1514  *
   1515  * Return 1 when we entered to a new segment.
   1516  * Otherwise, return 0.
   1517  */
   1518 int
   1519 lfs_initseg(struct lfs *fs)
   1520 {
   1521 	struct segment *sp = fs->lfs_sp;
   1522 	SEGSUM *ssp;
   1523 	struct buf *sbp;	/* buffer for SEGSUM */
   1524 	int repeat = 0;		/* return value */
   1525 
   1526 	ASSERT_SEGLOCK(fs);
   1527 	/* Advance to the next segment. */
   1528 	if (!LFS_PARTIAL_FITS(fs)) {
   1529 		SEGUSE *sup;
   1530 		struct buf *bp;
   1531 
   1532 		/* lfs_avail eats the remaining space */
   1533 		fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset -
   1534 						   fs->lfs_curseg);
   1535 		/* Wake up any cleaning procs waiting on this file system. */
   1536 		wakeup(&lfs_allclean_wakeup);
   1537 		wakeup(&fs->lfs_nextseg);
   1538 		lfs_newseg(fs);
   1539 		repeat = 1;
   1540 		fs->lfs_offset = fs->lfs_curseg;
   1541 
   1542 		sp->seg_number = dtosn(fs, fs->lfs_curseg);
   1543 		sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg);
   1544 
   1545 		/*
   1546 		 * If the segment contains a superblock, update the offset
   1547 		 * and summary address to skip over it.
   1548 		 */
   1549 		LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
   1550 		if (sup->su_flags & SEGUSE_SUPERBLOCK) {
   1551 			fs->lfs_offset += btofsb(fs, LFS_SBPAD);
   1552 			sp->seg_bytes_left -= LFS_SBPAD;
   1553 		}
   1554 		brelse(bp);
   1555 		/* Segment zero could also contain the labelpad */
   1556 		if (fs->lfs_version > 1 && sp->seg_number == 0 &&
   1557 		    fs->lfs_start < btofsb(fs, LFS_LABELPAD)) {
   1558 			fs->lfs_offset +=
   1559 			    btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
   1560 			sp->seg_bytes_left -=
   1561 			    LFS_LABELPAD - fsbtob(fs, fs->lfs_start);
   1562 		}
   1563 	} else {
   1564 		sp->seg_number = dtosn(fs, fs->lfs_curseg);
   1565 		sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg -
   1566 				      (fs->lfs_offset - fs->lfs_curseg));
   1567 	}
   1568 	fs->lfs_lastpseg = fs->lfs_offset;
   1569 
   1570 	/* Record first address of this partial segment */
   1571 	if (sp->seg_flags & SEGM_CLEAN) {
   1572 		fs->lfs_cleanint[fs->lfs_cleanind] = fs->lfs_offset;
   1573 		if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) {
   1574 			/* "1" is the artificial inc in lfs_seglock */
   1575 			simple_lock(&fs->lfs_interlock);
   1576 			while (fs->lfs_iocount > 1) {
   1577 				ltsleep(&fs->lfs_iocount, PRIBIO + 1,
   1578 				    "lfs_initseg", 0, &fs->lfs_interlock);
   1579 			}
   1580 			simple_unlock(&fs->lfs_interlock);
   1581 			fs->lfs_cleanind = 0;
   1582 		}
   1583 	}
   1584 
   1585 	sp->fs = fs;
   1586 	sp->ibp = NULL;
   1587 	sp->idp = NULL;
   1588 	sp->ninodes = 0;
   1589 	sp->ndupino = 0;
   1590 
   1591 	sp->cbpp = sp->bpp;
   1592 
   1593 	/* Get a new buffer for SEGSUM */
   1594 	sbp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
   1595 	    fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize, LFS_NB_SUMMARY);
   1596 
   1597 	/* ... and enter it into the buffer list. */
   1598 	*sp->cbpp = sbp;
   1599 	sp->cbpp++;
   1600 	fs->lfs_offset += btofsb(fs, fs->lfs_sumsize);
   1601 
   1602 	sp->start_bpp = sp->cbpp;
   1603 
   1604 	/* Set point to SEGSUM, initialize it. */
   1605 	ssp = sp->segsum = sbp->b_data;
   1606 	memset(ssp, 0, fs->lfs_sumsize);
   1607 	ssp->ss_next = fs->lfs_nextseg;
   1608 	ssp->ss_nfinfo = ssp->ss_ninos = 0;
   1609 	ssp->ss_magic = SS_MAGIC;
   1610 
   1611 	/* Set pointer to first FINFO, initialize it. */
   1612 	sp->fip = (struct finfo *)((caddr_t)sp->segsum + SEGSUM_SIZE(fs));
   1613 	sp->fip->fi_nblocks = 0;
   1614 	sp->start_lbp = &sp->fip->fi_blocks[0];
   1615 	sp->fip->fi_lastlength = 0;
   1616 
   1617 	sp->seg_bytes_left -= fs->lfs_sumsize;
   1618 	sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs);
   1619 
   1620 	return (repeat);
   1621 }
   1622 
   1623 /*
   1624  * Remove SEGUSE_INVAL from all segments.
   1625  */
   1626 void
   1627 lfs_unset_inval_all(struct lfs *fs)
   1628 {
   1629 	SEGUSE *sup;
   1630 	struct buf *bp;
   1631 	int i;
   1632 
   1633 	for (i = 0; i < fs->lfs_nseg; i++) {
   1634 		LFS_SEGENTRY(sup, fs, i, bp);
   1635 		if (sup->su_flags & SEGUSE_INVAL) {
   1636 			sup->su_flags &= ~SEGUSE_INVAL;
   1637 			VOP_BWRITE(bp);
   1638 		} else
   1639 			brelse(bp);
   1640 	}
   1641 }
   1642 
   1643 /*
   1644  * Return the next segment to write.
   1645  */
   1646 void
   1647 lfs_newseg(struct lfs *fs)
   1648 {
   1649 	CLEANERINFO *cip;
   1650 	SEGUSE *sup;
   1651 	struct buf *bp;
   1652 	int curseg, isdirty, sn, skip_inval;
   1653 
   1654 	ASSERT_SEGLOCK(fs);
   1655 	LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
   1656 	DLOG((DLOG_SU, "lfs_newseg: seg %d := 0 in newseg\n",
   1657 	      dtosn(fs, fs->lfs_nextseg)));
   1658 	sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
   1659 	sup->su_nbytes = 0;
   1660 	sup->su_nsums = 0;
   1661 	sup->su_ninos = 0;
   1662 	LFS_WRITESEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
   1663 
   1664 	LFS_CLEANERINFO(cip, fs, bp);
   1665 	--cip->clean;
   1666 	++cip->dirty;
   1667 	fs->lfs_nclean = cip->clean;
   1668 	LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
   1669 
   1670 	fs->lfs_lastseg = fs->lfs_curseg;
   1671 	fs->lfs_curseg = fs->lfs_nextseg;
   1672 	skip_inval = 1;
   1673 	for (sn = curseg = dtosn(fs, fs->lfs_curseg) + fs->lfs_interleave;;) {
   1674 		sn = (sn + 1) % fs->lfs_nseg;
   1675 		if (sn == curseg) {
   1676 			if (skip_inval)
   1677 				skip_inval = 0;
   1678 			else
   1679 				panic("lfs_nextseg: no clean segments");
   1680 		}
   1681 		LFS_SEGENTRY(sup, fs, sn, bp);
   1682 		isdirty = sup->su_flags & (SEGUSE_DIRTY | (skip_inval ? SEGUSE_INVAL : 0));
   1683 		/* Check SEGUSE_EMPTY as we go along */
   1684 		if (isdirty && sup->su_nbytes == 0 &&
   1685 		    !(sup->su_flags & SEGUSE_EMPTY))
   1686 			LFS_WRITESEGENTRY(sup, fs, sn, bp);
   1687 		else
   1688 			brelse(bp);
   1689 
   1690 		if (!isdirty)
   1691 			break;
   1692 	}
   1693 	if (skip_inval == 0)
   1694 		lfs_unset_inval_all(fs);
   1695 
   1696 	++fs->lfs_nactive;
   1697 	fs->lfs_nextseg = sntod(fs, sn);
   1698 	if (lfs_dostats) {
   1699 		++lfs_stats.segsused;
   1700 	}
   1701 }
   1702 
   1703 static struct buf *
   1704 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, int n)
   1705 {
   1706 	struct lfs_cluster *cl;
   1707 	struct buf **bpp, *bp;
   1708 	int s;
   1709 
   1710 	ASSERT_SEGLOCK(fs);
   1711 	cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK);
   1712 	bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK);
   1713 	memset(cl, 0, sizeof(*cl));
   1714 	cl->fs = fs;
   1715 	cl->bpp = bpp;
   1716 	cl->bufcount = 0;
   1717 	cl->bufsize = 0;
   1718 
   1719 	/* If this segment is being written synchronously, note that */
   1720 	if (fs->lfs_sp->seg_flags & SEGM_SYNC) {
   1721 		cl->flags |= LFS_CL_SYNC;
   1722 		cl->seg = fs->lfs_sp;
   1723 		++cl->seg->seg_iocount;
   1724 	}
   1725 
   1726 	/* Get an empty buffer header, or maybe one with something on it */
   1727 	s = splbio();
   1728 	bp = pool_get(&bufpool, PR_WAITOK); /* XXX should use lfs_malloc? */
   1729 	splx(s);
   1730 	memset(bp, 0, sizeof(*bp));
   1731 	BUF_INIT(bp);
   1732 
   1733 	bp->b_flags = B_BUSY | B_CALL;
   1734 	bp->b_dev = NODEV;
   1735 	bp->b_blkno = bp->b_lblkno = addr;
   1736 	bp->b_iodone = lfs_cluster_callback;
   1737 	bp->b_private = cl;
   1738 	bp->b_vp = vp;
   1739 
   1740 	return bp;
   1741 }
   1742 
   1743 int
   1744 lfs_writeseg(struct lfs *fs, struct segment *sp)
   1745 {
   1746 	struct buf **bpp, *bp, *cbp, *newbp;
   1747 	SEGUSE *sup;
   1748 	SEGSUM *ssp;
   1749 	int i, s;
   1750 	int do_again, nblocks, byteoffset;
   1751 	size_t el_size;
   1752 	struct lfs_cluster *cl;
   1753 	u_short ninos;
   1754 	struct vnode *devvp;
   1755 	char *p = NULL;
   1756 	struct vnode *vp;
   1757 	int32_t *daddrp;	/* XXX ondisk32 */
   1758 	int changed;
   1759 	u_int32_t sum;
   1760 
   1761 	ASSERT_SEGLOCK(fs);
   1762 	/*
   1763 	 * If there are no buffers other than the segment summary to write
   1764 	 * and it is not a checkpoint, don't do anything.  On a checkpoint,
   1765 	 * even if there aren't any buffers, you need to write the superblock.
   1766 	 */
   1767 	if ((nblocks = sp->cbpp - sp->bpp) == 1)
   1768 		return (0);
   1769 
   1770 	devvp = VTOI(fs->lfs_ivnode)->i_devvp;
   1771 
   1772 	/* Update the segment usage information. */
   1773 	LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
   1774 
   1775 	/* Loop through all blocks, except the segment summary. */
   1776 	for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
   1777 		if ((*bpp)->b_vp != devvp) {
   1778 			sup->su_nbytes += (*bpp)->b_bcount;
   1779 			DLOG((DLOG_SU, "seg %" PRIu32 " += %ld for ino %d"
   1780 			      " lbn %" PRId64 " db 0x%" PRIx64 "\n",
   1781 			      sp->seg_number, (*bpp)->b_bcount,
   1782 			      VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno,
   1783 			      (*bpp)->b_blkno));
   1784 		}
   1785 	}
   1786 
   1787 	ssp = (SEGSUM *)sp->segsum;
   1788 
   1789 	ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
   1790 	DLOG((DLOG_SU, "seg %d += %d for %d inodes\n",
   1791 	      sp->seg_number, ssp->ss_ninos * sizeof (struct ufs1_dinode),
   1792 	      ssp->ss_ninos));
   1793 	sup->su_nbytes += ssp->ss_ninos * sizeof (struct ufs1_dinode);
   1794 	/* sup->su_nbytes += fs->lfs_sumsize; */
   1795 	if (fs->lfs_version == 1)
   1796 		sup->su_olastmod = time.tv_sec;
   1797 	else
   1798 		sup->su_lastmod = time.tv_sec;
   1799 	sup->su_ninos += ninos;
   1800 	++sup->su_nsums;
   1801 	fs->lfs_dmeta += (btofsb(fs, fs->lfs_sumsize) + btofsb(fs, ninos *
   1802 							 fs->lfs_ibsize));
   1803 	fs->lfs_avail -= btofsb(fs, fs->lfs_sumsize);
   1804 
   1805 	do_again = !(bp->b_flags & B_GATHERED);
   1806 	LFS_WRITESEGENTRY(sup, fs, sp->seg_number, bp); /* Ifile */
   1807 
   1808 	/*
   1809 	 * Mark blocks B_BUSY, to prevent then from being changed between
   1810 	 * the checksum computation and the actual write.
   1811 	 *
   1812 	 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
   1813 	 * there are any, replace them with copies that have UNASSIGNED
   1814 	 * instead.
   1815 	 */
   1816 	for (bpp = sp->bpp, i = nblocks - 1; i--;) {
   1817 		++bpp;
   1818 		bp = *bpp;
   1819 		if (bp->b_flags & B_CALL) { /* UBC or malloced buffer */
   1820 			bp->b_flags |= B_BUSY;
   1821 			continue;
   1822 		}
   1823 
   1824 		simple_lock(&bp->b_interlock);
   1825 		s = splbio();
   1826 		while (bp->b_flags & B_BUSY) {
   1827 			DLOG((DLOG_SEG, "lfs_writeseg: avoiding potential"
   1828 			      " data summary corruption for ino %d, lbn %"
   1829 			      PRId64 "\n",
   1830 			      VTOI(bp->b_vp)->i_number, bp->b_lblkno));
   1831 			bp->b_flags |= B_WANTED;
   1832 			ltsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0,
   1833 				&bp->b_interlock);
   1834 			splx(s);
   1835 			s = splbio();
   1836 		}
   1837 		bp->b_flags |= B_BUSY;
   1838 		splx(s);
   1839 		simple_unlock(&bp->b_interlock);
   1840 
   1841 		/*
   1842 		 * Check and replace indirect block UNWRITTEN bogosity.
   1843 		 * XXX See comment in lfs_writefile.
   1844 		 */
   1845 		if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
   1846 		   VTOI(bp->b_vp)->i_ffs1_blocks !=
   1847 		   VTOI(bp->b_vp)->i_lfs_effnblks) {
   1848 			DLOG((DLOG_VNODE, "lfs_writeseg: cleansing ino %d (%d != %d)\n",
   1849 			      VTOI(bp->b_vp)->i_number,
   1850 			      VTOI(bp->b_vp)->i_lfs_effnblks,
   1851 			      VTOI(bp->b_vp)->i_ffs1_blocks));
   1852 			/* Make a copy we'll make changes to */
   1853 			newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
   1854 					   bp->b_bcount, LFS_NB_IBLOCK);
   1855 			newbp->b_blkno = bp->b_blkno;
   1856 			memcpy(newbp->b_data, bp->b_data,
   1857 			       newbp->b_bcount);
   1858 
   1859 			changed = 0;
   1860 			/* XXX ondisk32 */
   1861 			for (daddrp = (int32_t *)(newbp->b_data);
   1862 			     daddrp < (int32_t *)(newbp->b_data +
   1863 						  newbp->b_bcount); daddrp++) {
   1864 				if (*daddrp == UNWRITTEN) {
   1865 					++changed;
   1866 					*daddrp = 0;
   1867 				}
   1868 			}
   1869 			/*
   1870 			 * Get rid of the old buffer.  Don't mark it clean,
   1871 			 * though, if it still has dirty data on it.
   1872 			 */
   1873 			if (changed) {
   1874 				DLOG((DLOG_SEG, "lfs_writeseg: replacing UNWRITTEN(%d):"
   1875 				      " bp = %p newbp = %p\n", changed, bp,
   1876 				      newbp));
   1877 				*bpp = newbp;
   1878 				bp->b_flags &= ~(B_ERROR | B_GATHERED);
   1879 				if (bp->b_flags & B_CALL) {
   1880 					DLOG((DLOG_SEG, "lfs_writeseg: "
   1881 					      "indir bp should not be B_CALL\n"));
   1882 					s = splbio();
   1883 					biodone(bp);
   1884 					splx(s);
   1885 					bp = NULL;
   1886 				} else {
   1887 					/* Still on free list, leave it there */
   1888 					s = splbio();
   1889 					bp->b_flags &= ~B_BUSY;
   1890 					if (bp->b_flags & B_WANTED)
   1891 						wakeup(bp);
   1892 					splx(s);
   1893 					/*
   1894 					 * We have to re-decrement lfs_avail
   1895 					 * since this block is going to come
   1896 					 * back around to us in the next
   1897 					 * segment.
   1898 					 */
   1899 					fs->lfs_avail -=
   1900 					    btofsb(fs, bp->b_bcount);
   1901 				}
   1902 			} else {
   1903 				lfs_freebuf(fs, newbp);
   1904 			}
   1905 		}
   1906 	}
   1907 	/*
   1908 	 * Compute checksum across data and then across summary; the first
   1909 	 * block (the summary block) is skipped.  Set the create time here
   1910 	 * so that it's guaranteed to be later than the inode mod times.
   1911 	 */
   1912 	sum = 0;
   1913 	if (fs->lfs_version == 1)
   1914 		el_size = sizeof(u_long);
   1915 	else
   1916 		el_size = sizeof(u_int32_t);
   1917 	for (bpp = sp->bpp, i = nblocks - 1; i--; ) {
   1918 		++bpp;
   1919 		/* Loop through gop_write cluster blocks */
   1920 		for (byteoffset = 0; byteoffset < (*bpp)->b_bcount;
   1921 		     byteoffset += fs->lfs_bsize) {
   1922 #ifdef LFS_USE_B_INVAL
   1923 			if (((*bpp)->b_flags & (B_CALL | B_INVAL)) ==
   1924 			    (B_CALL | B_INVAL)) {
   1925 				if (copyin((caddr_t)(*bpp)->b_saveaddr +
   1926 					   byteoffset, dp, el_size)) {
   1927 					panic("lfs_writeseg: copyin failed [1]:"
   1928 						" ino %d blk %" PRId64,
   1929 						VTOI((*bpp)->b_vp)->i_number,
   1930 						(*bpp)->b_lblkno);
   1931 				}
   1932 			} else
   1933 #endif /* LFS_USE_B_INVAL */
   1934 			{
   1935 				sum = lfs_cksum_part(
   1936 				    (*bpp)->b_data + byteoffset, el_size, sum);
   1937 			}
   1938 		}
   1939 	}
   1940 	if (fs->lfs_version == 1)
   1941 		ssp->ss_ocreate = time.tv_sec;
   1942 	else {
   1943 		ssp->ss_create = time.tv_sec;
   1944 		ssp->ss_serial = ++fs->lfs_serial;
   1945 		ssp->ss_ident  = fs->lfs_ident;
   1946 	}
   1947 	ssp->ss_datasum = lfs_cksum_fold(sum);
   1948 	ssp->ss_sumsum = cksum(&ssp->ss_datasum,
   1949 	    fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
   1950 
   1951 	simple_lock(&fs->lfs_interlock);
   1952 	fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) +
   1953 			  btofsb(fs, fs->lfs_sumsize));
   1954 	simple_unlock(&fs->lfs_interlock);
   1955 
   1956 	/*
   1957 	 * When we simply write the blocks we lose a rotation for every block
   1958 	 * written.  To avoid this problem, we cluster the buffers into a
   1959 	 * chunk and write the chunk.  MAXPHYS is the largest size I/O
   1960 	 * devices can handle, use that for the size of the chunks.
   1961 	 *
   1962 	 * Blocks that are already clusters (from GOP_WRITE), however, we
   1963 	 * don't bother to copy into other clusters.
   1964 	 */
   1965 
   1966 #define CHUNKSIZE MAXPHYS
   1967 
   1968 	if (devvp == NULL)
   1969 		panic("devvp is NULL");
   1970 	for (bpp = sp->bpp, i = nblocks; i;) {
   1971 		cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
   1972 		cl = cbp->b_private;
   1973 
   1974 		cbp->b_flags |= B_ASYNC | B_BUSY;
   1975 		cbp->b_bcount = 0;
   1976 
   1977 #if defined(DEBUG) && defined(DIAGNOSTIC)
   1978 		if (bpp - sp->bpp > (fs->lfs_sumsize - SEGSUM_SIZE(fs))
   1979 		    / sizeof(int32_t)) {
   1980 			panic("lfs_writeseg: real bpp overwrite");
   1981 		}
   1982 		if (bpp - sp->bpp > segsize(fs) / fs->lfs_fsize) {
   1983 			panic("lfs_writeseg: theoretical bpp overwrite");
   1984 		}
   1985 #endif
   1986 
   1987 		/*
   1988 		 * Construct the cluster.
   1989 		 */
   1990 		simple_lock(&fs->lfs_interlock);
   1991 		++fs->lfs_iocount;
   1992 		simple_unlock(&fs->lfs_interlock);
   1993 		while (i && cbp->b_bcount < CHUNKSIZE) {
   1994 			bp = *bpp;
   1995 
   1996 			if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
   1997 				break;
   1998 			if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC))
   1999 				break;
   2000 
   2001 			/* Clusters from GOP_WRITE are expedited */
   2002 			if (bp->b_bcount > fs->lfs_bsize) {
   2003 				if (cbp->b_bcount > 0)
   2004 					/* Put in its own buffer */
   2005 					break;
   2006 				else {
   2007 					cbp->b_data = bp->b_data;
   2008 				}
   2009 			} else if (cbp->b_bcount == 0) {
   2010 				p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE,
   2011 							     LFS_NB_CLUSTER);
   2012 				cl->flags |= LFS_CL_MALLOC;
   2013 			}
   2014 #ifdef DIAGNOSTIC
   2015 			if (dtosn(fs, dbtofsb(fs, bp->b_blkno +
   2016 					      btodb(bp->b_bcount - 1))) !=
   2017 			    sp->seg_number) {
   2018 				printf("blk size %d daddr %" PRIx64
   2019 				    " not in seg %d\n",
   2020 				    bp->b_bcount, bp->b_blkno,
   2021 				    sp->seg_number);
   2022 				panic("segment overwrite");
   2023 			}
   2024 #endif
   2025 
   2026 #ifdef LFS_USE_B_INVAL
   2027 			/*
   2028 			 * Fake buffers from the cleaner are marked as B_INVAL.
   2029 			 * We need to copy the data from user space rather than
   2030 			 * from the buffer indicated.
   2031 			 * XXX == what do I do on an error?
   2032 			 */
   2033 			if ((bp->b_flags & (B_CALL|B_INVAL)) ==
   2034 			    (B_CALL|B_INVAL)) {
   2035 				if (copyin(bp->b_saveaddr, p, bp->b_bcount))
   2036 					panic("lfs_writeseg: "
   2037 					    "copyin failed [2]");
   2038 			} else
   2039 #endif /* LFS_USE_B_INVAL */
   2040 			if (cl->flags & LFS_CL_MALLOC) {
   2041 				/* copy data into our cluster. */
   2042 				memcpy(p, bp->b_data, bp->b_bcount);
   2043 				p += bp->b_bcount;
   2044 			}
   2045 
   2046 			cbp->b_bcount += bp->b_bcount;
   2047 			cl->bufsize += bp->b_bcount;
   2048 
   2049 			bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_DONE);
   2050 			cl->bpp[cl->bufcount++] = bp;
   2051 			vp = bp->b_vp;
   2052 			s = splbio();
   2053 			reassignbuf(bp, vp);
   2054 			V_INCR_NUMOUTPUT(vp);
   2055 			splx(s);
   2056 
   2057 			bpp++;
   2058 			i--;
   2059 		}
   2060 		if (fs->lfs_sp->seg_flags & SEGM_SYNC)
   2061 			BIO_SETPRIO(cbp, BPRIO_TIMECRITICAL);
   2062 		else
   2063 			BIO_SETPRIO(cbp, BPRIO_TIMELIMITED);
   2064 		s = splbio();
   2065 		V_INCR_NUMOUTPUT(devvp);
   2066 		splx(s);
   2067 		VOP_STRATEGY(devvp, cbp);
   2068 		curproc->p_stats->p_ru.ru_oublock++;
   2069 	}
   2070 
   2071 	if (lfs_dostats) {
   2072 		++lfs_stats.psegwrites;
   2073 		lfs_stats.blocktot += nblocks - 1;
   2074 		if (fs->lfs_sp->seg_flags & SEGM_SYNC)
   2075 			++lfs_stats.psyncwrites;
   2076 		if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
   2077 			++lfs_stats.pcleanwrites;
   2078 			lfs_stats.cleanblocks += nblocks - 1;
   2079 		}
   2080 	}
   2081 	return (lfs_initseg(fs) || do_again);
   2082 }
   2083 
   2084 void
   2085 lfs_writesuper(struct lfs *fs, daddr_t daddr)
   2086 {
   2087 	struct buf *bp;
   2088 	int s;
   2089 	struct vnode *devvp = VTOI(fs->lfs_ivnode)->i_devvp;
   2090 
   2091 	ASSERT_MAYBE_SEGLOCK(fs);
   2092 #ifdef DIAGNOSTIC
   2093 	KASSERT(fs->lfs_magic == LFS_MAGIC);
   2094 #endif
   2095 	/*
   2096 	 * If we can write one superblock while another is in
   2097 	 * progress, we risk not having a complete checkpoint if we crash.
   2098 	 * So, block here if a superblock write is in progress.
   2099 	 */
   2100 	simple_lock(&fs->lfs_interlock);
   2101 	s = splbio();
   2102 	while (fs->lfs_sbactive) {
   2103 		ltsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0,
   2104 			&fs->lfs_interlock);
   2105 	}
   2106 	fs->lfs_sbactive = daddr;
   2107 	splx(s);
   2108 	simple_unlock(&fs->lfs_interlock);
   2109 
   2110 	/* Set timestamp of this version of the superblock */
   2111 	if (fs->lfs_version == 1)
   2112 		fs->lfs_otstamp = time.tv_sec;
   2113 	fs->lfs_tstamp = time.tv_sec;
   2114 
   2115 	/* Checksum the superblock and copy it into a buffer. */
   2116 	fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
   2117 	bp = lfs_newbuf(fs, devvp,
   2118 	    fsbtodb(fs, daddr), LFS_SBPAD, LFS_NB_SBLOCK);
   2119 	memset(bp->b_data + sizeof(struct dlfs), 0,
   2120 	    LFS_SBPAD - sizeof(struct dlfs));
   2121 	*(struct dlfs *)bp->b_data = fs->lfs_dlfs;
   2122 
   2123 	bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
   2124 	bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
   2125 	bp->b_iodone = lfs_supercallback;
   2126 
   2127 	if (fs->lfs_sp != NULL && fs->lfs_sp->seg_flags & SEGM_SYNC)
   2128 		BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
   2129 	else
   2130 		BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
   2131 	curproc->p_stats->p_ru.ru_oublock++;
   2132 	s = splbio();
   2133 	V_INCR_NUMOUTPUT(bp->b_vp);
   2134 	splx(s);
   2135 	simple_lock(&fs->lfs_interlock);
   2136 	++fs->lfs_iocount;
   2137 	simple_unlock(&fs->lfs_interlock);
   2138 	VOP_STRATEGY(devvp, bp);
   2139 }
   2140 
   2141 /*
   2142  * Logical block number match routines used when traversing the dirty block
   2143  * chain.
   2144  */
   2145 int
   2146 lfs_match_fake(struct lfs *fs, struct buf *bp)
   2147 {
   2148 
   2149 	ASSERT_SEGLOCK(fs);
   2150 	return LFS_IS_MALLOC_BUF(bp);
   2151 }
   2152 
   2153 #if 0
   2154 int
   2155 lfs_match_real(struct lfs *fs, struct buf *bp)
   2156 {
   2157 
   2158 	ASSERT_SEGLOCK(fs);
   2159 	return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp));
   2160 }
   2161 #endif
   2162 
   2163 int
   2164 lfs_match_data(struct lfs *fs, struct buf *bp)
   2165 {
   2166 
   2167 	ASSERT_SEGLOCK(fs);
   2168 	return (bp->b_lblkno >= 0);
   2169 }
   2170 
   2171 int
   2172 lfs_match_indir(struct lfs *fs, struct buf *bp)
   2173 {
   2174 	daddr_t lbn;
   2175 
   2176 	ASSERT_SEGLOCK(fs);
   2177 	lbn = bp->b_lblkno;
   2178 	return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
   2179 }
   2180 
   2181 int
   2182 lfs_match_dindir(struct lfs *fs, struct buf *bp)
   2183 {
   2184 	daddr_t lbn;
   2185 
   2186 	ASSERT_SEGLOCK(fs);
   2187 	lbn = bp->b_lblkno;
   2188 	return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
   2189 }
   2190 
   2191 int
   2192 lfs_match_tindir(struct lfs *fs, struct buf *bp)
   2193 {
   2194 	daddr_t lbn;
   2195 
   2196 	ASSERT_SEGLOCK(fs);
   2197 	lbn = bp->b_lblkno;
   2198 	return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
   2199 }
   2200 
   2201 /*
   2202  * XXX - The only buffers that are going to hit these functions are the
   2203  * segment write blocks, or the segment summaries, or the superblocks.
   2204  *
   2205  * All of the above are created by lfs_newbuf, and so do not need to be
   2206  * released via brelse.
   2207  */
   2208 void
   2209 lfs_callback(struct buf *bp)
   2210 {
   2211 	struct lfs *fs;
   2212 
   2213 	fs = bp->b_private;
   2214 	ASSERT_NO_SEGLOCK(fs);
   2215 	lfs_freebuf(fs, bp);
   2216 }
   2217 
   2218 static void
   2219 lfs_super_aiodone(struct buf *bp)
   2220 {
   2221 	struct lfs *fs;
   2222 
   2223 	fs = bp->b_private;
   2224 	ASSERT_NO_SEGLOCK(fs);
   2225 	simple_lock(&fs->lfs_interlock);
   2226 	fs->lfs_sbactive = 0;
   2227 	if (--fs->lfs_iocount <= 1)
   2228 		wakeup(&fs->lfs_iocount);
   2229 	simple_unlock(&fs->lfs_interlock);
   2230 	wakeup(&fs->lfs_sbactive);
   2231 	lfs_freebuf(fs, bp);
   2232 }
   2233 
   2234 static void
   2235 lfs_cluster_aiodone(struct buf *bp)
   2236 {
   2237 	struct lfs_cluster *cl;
   2238 	struct lfs *fs;
   2239 	struct buf *tbp, *fbp;
   2240 	struct vnode *vp, *devvp;
   2241 	struct inode *ip;
   2242 	int s, error=0;
   2243 
   2244 	if (bp->b_flags & B_ERROR)
   2245 		error = bp->b_error;
   2246 
   2247 	cl = bp->b_private;
   2248 	fs = cl->fs;
   2249 	devvp = VTOI(fs->lfs_ivnode)->i_devvp;
   2250 	ASSERT_NO_SEGLOCK(fs);
   2251 
   2252 	/* Put the pages back, and release the buffer */
   2253 	while (cl->bufcount--) {
   2254 		tbp = cl->bpp[cl->bufcount];
   2255 		KASSERT(tbp->b_flags & B_BUSY);
   2256 		if (error) {
   2257 			tbp->b_flags |= B_ERROR;
   2258 			tbp->b_error = error;
   2259 		}
   2260 
   2261 		/*
   2262 		 * We're done with tbp.	 If it has not been re-dirtied since
   2263 		 * the cluster was written, free it.  Otherwise, keep it on
   2264 		 * the locked list to be written again.
   2265 		 */
   2266 		vp = tbp->b_vp;
   2267 
   2268 		tbp->b_flags &= ~B_GATHERED;
   2269 
   2270 		LFS_BCLEAN_LOG(fs, tbp);
   2271 
   2272 		if (!(tbp->b_flags & B_CALL)) {
   2273 			KASSERT(tbp->b_flags & B_LOCKED);
   2274 			s = splbio();
   2275 			simple_lock(&bqueue_slock);
   2276 			bremfree(tbp);
   2277 			simple_unlock(&bqueue_slock);
   2278 			if (vp)
   2279 				reassignbuf(tbp, vp);
   2280 			splx(s);
   2281 			tbp->b_flags |= B_ASYNC; /* for biodone */
   2282 		}
   2283 
   2284 		if ((tbp->b_flags & (B_LOCKED | B_DELWRI)) == B_LOCKED)
   2285 			LFS_UNLOCK_BUF(tbp);
   2286 
   2287 		if (tbp->b_flags & B_DONE) {
   2288 			DLOG((DLOG_SEG, "blk %d biodone already (flags %lx)\n",
   2289 				cl->bufcount, (long)tbp->b_flags));
   2290 		}
   2291 
   2292 		if ((tbp->b_flags & B_CALL) && !LFS_IS_MALLOC_BUF(tbp)) {
   2293 			/*
   2294 			 * A buffer from the page daemon.
   2295 			 * We use the same iodone as it does,
   2296 			 * so we must manually disassociate its
   2297 			 * buffers from the vp.
   2298 			 */
   2299 			if (tbp->b_vp) {
   2300 				/* This is just silly */
   2301 				s = splbio();
   2302 				brelvp(tbp);
   2303 				tbp->b_vp = vp;
   2304 				splx(s);
   2305 			}
   2306 			/* Put it back the way it was */
   2307 			tbp->b_flags |= B_ASYNC;
   2308 			/* Master buffers have B_AGE */
   2309 			if (tbp->b_private == tbp)
   2310 				tbp->b_flags |= B_AGE;
   2311 		}
   2312 		s = splbio();
   2313 		biodone(tbp);
   2314 
   2315 		/*
   2316 		 * If this is the last block for this vnode, but
   2317 		 * there are other blocks on its dirty list,
   2318 		 * set IN_MODIFIED/IN_CLEANING depending on what
   2319 		 * sort of block.  Only do this for our mount point,
   2320 		 * not for, e.g., inode blocks that are attached to
   2321 		 * the devvp.
   2322 		 * XXX KS - Shouldn't we set *both* if both types
   2323 		 * of blocks are present (traverse the dirty list?)
   2324 		 */
   2325 		simple_lock(&global_v_numoutput_slock);
   2326 		if (vp != devvp && vp->v_numoutput == 0 &&
   2327 		    (fbp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) {
   2328 			ip = VTOI(vp);
   2329 			DLOG((DLOG_SEG, "lfs_cluster_aiodone: mark ino %d\n",
   2330 			       ip->i_number));
   2331 			if (LFS_IS_MALLOC_BUF(fbp))
   2332 				LFS_SET_UINO(ip, IN_CLEANING);
   2333 			else
   2334 				LFS_SET_UINO(ip, IN_MODIFIED);
   2335 		}
   2336 		simple_unlock(&global_v_numoutput_slock);
   2337 		splx(s);
   2338 		wakeup(vp);
   2339 	}
   2340 
   2341 	/* Fix up the cluster buffer, and release it */
   2342 	if (cl->flags & LFS_CL_MALLOC)
   2343 		lfs_free(fs, bp->b_data, LFS_NB_CLUSTER);
   2344 	s = splbio();
   2345 	pool_put(&bufpool, bp); /* XXX should use lfs_free? */
   2346 	splx(s);
   2347 
   2348 	/* Note i/o done */
   2349 	if (cl->flags & LFS_CL_SYNC) {
   2350 		if (--cl->seg->seg_iocount == 0)
   2351 			wakeup(&cl->seg->seg_iocount);
   2352 	}
   2353 	simple_lock(&fs->lfs_interlock);
   2354 #ifdef DIAGNOSTIC
   2355 	if (fs->lfs_iocount == 0)
   2356 		panic("lfs_cluster_aiodone: zero iocount");
   2357 #endif
   2358 	if (--fs->lfs_iocount <= 1)
   2359 		wakeup(&fs->lfs_iocount);
   2360 	simple_unlock(&fs->lfs_interlock);
   2361 
   2362 	pool_put(&fs->lfs_bpppool, cl->bpp);
   2363 	cl->bpp = NULL;
   2364 	pool_put(&fs->lfs_clpool, cl);
   2365 }
   2366 
   2367 static void
   2368 lfs_generic_callback(struct buf *bp, void (*aiodone)(struct buf *))
   2369 {
   2370 	/* reset b_iodone for when this is a single-buf i/o. */
   2371 	bp->b_iodone = aiodone;
   2372 
   2373 	simple_lock(&uvm.aiodoned_lock);	/* locks uvm.aio_done */
   2374 	TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
   2375 	wakeup(&uvm.aiodoned);
   2376 	simple_unlock(&uvm.aiodoned_lock);
   2377 }
   2378 
   2379 static void
   2380 lfs_cluster_callback(struct buf *bp)
   2381 {
   2382 
   2383 	lfs_generic_callback(bp, lfs_cluster_aiodone);
   2384 }
   2385 
   2386 void
   2387 lfs_supercallback(struct buf *bp)
   2388 {
   2389 
   2390 	lfs_generic_callback(bp, lfs_super_aiodone);
   2391 }
   2392 
   2393 /*
   2394  * Shellsort (diminishing increment sort) from Data Structures and
   2395  * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
   2396  * see also Knuth Vol. 3, page 84.  The increments are selected from
   2397  * formula (8), page 95.  Roughly O(N^3/2).
   2398  */
   2399 /*
   2400  * This is our own private copy of shellsort because we want to sort
   2401  * two parallel arrays (the array of buffer pointers and the array of
   2402  * logical block numbers) simultaneously.  Note that we cast the array
   2403  * of logical block numbers to a unsigned in this routine so that the
   2404  * negative block numbers (meta data blocks) sort AFTER the data blocks.
   2405  */
   2406 
   2407 void
   2408 lfs_shellsort(struct buf **bp_array, int32_t *lb_array, int nmemb, int size)
   2409 {
   2410 	static int __rsshell_increments[] = { 4, 1, 0 };
   2411 	int incr, *incrp, t1, t2;
   2412 	struct buf *bp_temp;
   2413 
   2414 #ifdef DEBUG
   2415 	incr = 0;
   2416 	for (t1 = 0; t1 < nmemb; t1++) {
   2417 		for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
   2418 			if (lb_array[incr++] != bp_array[t1]->b_lblkno + t2) {
   2419 				/* dump before panic */
   2420 				printf("lfs_shellsort: nmemb=%d, size=%d\n",
   2421 				    nmemb, size);
   2422 				incr = 0;
   2423 				for (t1 = 0; t1 < nmemb; t1++) {
   2424 					const struct buf *bp = bp_array[t1];
   2425 
   2426 					printf("bp[%d]: lbn=%" PRIu64 ", size=%"
   2427 					    PRIu64 "\n", t1,
   2428 					    (uint64_t)bp->b_bcount,
   2429 					    (uint64_t)bp->b_lblkno);
   2430 					printf("lbns:");
   2431 					for (t2 = 0; t2 * size < bp->b_bcount;
   2432 					    t2++) {
   2433 						printf(" %" PRId32,
   2434 						    lb_array[incr++]);
   2435 					}
   2436 					printf("\n");
   2437 				}
   2438 				panic("lfs_shellsort: inconsistent input");
   2439 			}
   2440 		}
   2441 	}
   2442 #endif
   2443 
   2444 	for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
   2445 		for (t1 = incr; t1 < nmemb; ++t1)
   2446 			for (t2 = t1 - incr; t2 >= 0;)
   2447 				if ((u_int32_t)bp_array[t2]->b_lblkno >
   2448 				    (u_int32_t)bp_array[t2 + incr]->b_lblkno) {
   2449 					bp_temp = bp_array[t2];
   2450 					bp_array[t2] = bp_array[t2 + incr];
   2451 					bp_array[t2 + incr] = bp_temp;
   2452 					t2 -= incr;
   2453 				} else
   2454 					break;
   2455 
   2456 	/* Reform the list of logical blocks */
   2457 	incr = 0;
   2458 	for (t1 = 0; t1 < nmemb; t1++) {
   2459 		for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
   2460 			lb_array[incr++] = bp_array[t1]->b_lblkno + t2;
   2461 		}
   2462 	}
   2463 }
   2464 
   2465 /*
   2466  * Check VXLOCK.  Return 1 if the vnode is locked.  Otherwise, vget it.
   2467  */
   2468 int
   2469 lfs_vref(struct vnode *vp)
   2470 {
   2471 	ASSERT_MAYBE_SEGLOCK(VTOI(vp)->i_lfs);
   2472 	/*
   2473 	 * If we return 1 here during a flush, we risk vinvalbuf() not
   2474 	 * being able to flush all of the pages from this vnode, which
   2475 	 * will cause it to panic.  So, return 0 if a flush is in progress.
   2476 	 */
   2477 	if (vp->v_flag & VXLOCK) {
   2478 		if (IS_FLUSHING(VTOI(vp)->i_lfs, vp)) {
   2479 			return 0;
   2480 		}
   2481 		return (1);
   2482 	}
   2483 	return (vget(vp, 0));
   2484 }
   2485 
   2486 /*
   2487  * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
   2488  * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
   2489  */
   2490 void
   2491 lfs_vunref(struct vnode *vp)
   2492 {
   2493 	ASSERT_MAYBE_SEGLOCK(VTOI(vp)->i_lfs);
   2494 	/*
   2495 	 * Analogous to lfs_vref, if the node is flushing, fake it.
   2496 	 */
   2497 	if ((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs, vp)) {
   2498 		return;
   2499 	}
   2500 
   2501 	simple_lock(&vp->v_interlock);
   2502 #ifdef DIAGNOSTIC
   2503 	if (vp->v_usecount <= 0) {
   2504 		printf("lfs_vunref: inum is %d\n", VTOI(vp)->i_number);
   2505 		printf("lfs_vunref: flags are 0x%lx\n", (u_long)vp->v_flag);
   2506 		printf("lfs_vunref: usecount = %ld\n", (long)vp->v_usecount);
   2507 		panic("lfs_vunref: v_usecount < 0");
   2508 	}
   2509 #endif
   2510 	vp->v_usecount--;
   2511 	if (vp->v_usecount > 0) {
   2512 		simple_unlock(&vp->v_interlock);
   2513 		return;
   2514 	}
   2515 	/*
   2516 	 * insert at tail of LRU list
   2517 	 */
   2518 	simple_lock(&vnode_free_list_slock);
   2519 	if (vp->v_holdcnt > 0)
   2520 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
   2521 	else
   2522 		TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
   2523 	simple_unlock(&vnode_free_list_slock);
   2524 	simple_unlock(&vp->v_interlock);
   2525 }
   2526 
   2527 /*
   2528  * We use this when we have vnodes that were loaded in solely for cleaning.
   2529  * There is no reason to believe that these vnodes will be referenced again
   2530  * soon, since the cleaning process is unrelated to normal filesystem
   2531  * activity.  Putting cleaned vnodes at the tail of the list has the effect
   2532  * of flushing the vnode LRU.  So, put vnodes that were loaded only for
   2533  * cleaning at the head of the list, instead.
   2534  */
   2535 void
   2536 lfs_vunref_head(struct vnode *vp)
   2537 {
   2538 
   2539 	ASSERT_SEGLOCK(VTOI(vp)->i_lfs);
   2540 	simple_lock(&vp->v_interlock);
   2541 #ifdef DIAGNOSTIC
   2542 	if (vp->v_usecount == 0) {
   2543 		panic("lfs_vunref: v_usecount<0");
   2544 	}
   2545 #endif
   2546 	vp->v_usecount--;
   2547 	if (vp->v_usecount > 0) {
   2548 		simple_unlock(&vp->v_interlock);
   2549 		return;
   2550 	}
   2551 	/*
   2552 	 * insert at head of LRU list
   2553 	 */
   2554 	simple_lock(&vnode_free_list_slock);
   2555 	if (vp->v_holdcnt > 0)
   2556 		TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
   2557 	else
   2558 		TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
   2559 	simple_unlock(&vnode_free_list_slock);
   2560 	simple_unlock(&vp->v_interlock);
   2561 }
   2562 
   2563