Home | History | Annotate | Line # | Download | only in lfs
lfs_syscalls.c revision 1.159
      1 /*	$NetBSD: lfs_syscalls.c,v 1.159 2015/05/31 15:45:18 hannken Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008
      5  *    The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Konrad E. Schroder <perseant (at) hhhh.org>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 /*-
     33  * Copyright (c) 1991, 1993, 1994
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)lfs_syscalls.c	8.10 (Berkeley) 5/14/95
     61  */
     62 
     63 #include <sys/cdefs.h>
     64 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.159 2015/05/31 15:45:18 hannken Exp $");
     65 
     66 #ifndef LFS
     67 # define LFS		/* for prototypes in syscallargs.h */
     68 #endif
     69 
     70 #include <sys/param.h>
     71 #include <sys/systm.h>
     72 #include <sys/proc.h>
     73 #include <sys/buf.h>
     74 #include <sys/mount.h>
     75 #include <sys/vnode.h>
     76 #include <sys/kernel.h>
     77 #include <sys/kauth.h>
     78 #include <sys/syscallargs.h>
     79 
     80 #include <ufs/lfs/ulfs_inode.h>
     81 #include <ufs/lfs/ulfsmount.h>
     82 #include <ufs/lfs/ulfs_extern.h>
     83 
     84 #include <ufs/lfs/lfs.h>
     85 #include <ufs/lfs/lfs_kernel.h>
     86 #include <ufs/lfs/lfs_extern.h>
     87 
     88 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *);
     89 int lfs_fasthashget(dev_t, ino_t, struct vnode **);
     90 int lfs_fastvget(struct mount *, ino_t, BLOCK_INFO *, int, struct vnode **);
     91 
     92 pid_t lfs_cleaner_pid = 0;
     93 
     94 /*
     95  * sys_lfs_markv:
     96  *
     97  * This will mark inodes and blocks dirty, so they are written into the log.
     98  * It will block until all the blocks have been written.  The segment create
     99  * time passed in the block_info and inode_info structures is used to decide
    100  * if the data is valid for each block (in case some process dirtied a block
    101  * or inode that is being cleaned between the determination that a block is
    102  * live and the lfs_markv call).
    103  *
    104  *  0 on success
    105  * -1/errno is return on error.
    106  */
    107 #ifdef USE_64BIT_SYSCALLS
    108 int
    109 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
    110 {
    111 	/* {
    112 		syscallarg(fsid_t *) fsidp;
    113 		syscallarg(struct block_info *) blkiov;
    114 		syscallarg(int) blkcnt;
    115 	} */
    116 	BLOCK_INFO *blkiov;
    117 	int blkcnt, error;
    118 	fsid_t fsid;
    119 	struct lfs *fs;
    120 	struct mount *mntp;
    121 
    122 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    123 	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
    124 	if (error)
    125 		return (error);
    126 
    127 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    128 		return (error);
    129 
    130 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    131 		return (ENOENT);
    132 	fs = VFSTOULFS(mntp)->um_lfs;
    133 
    134 	blkcnt = SCARG(uap, blkcnt);
    135 	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
    136 		return (EINVAL);
    137 
    138 	KERNEL_LOCK(1, NULL);
    139 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    140 	if ((error = copyin(SCARG(uap, blkiov), blkiov,
    141 			    blkcnt * sizeof(BLOCK_INFO))) != 0)
    142 		goto out;
    143 
    144 	if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
    145 		copyout(blkiov, SCARG(uap, blkiov),
    146 			blkcnt * sizeof(BLOCK_INFO));
    147     out:
    148 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    149 	KERNEL_UNLOCK_ONE(NULL);
    150 	return error;
    151 }
    152 #else
    153 int
    154 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
    155 {
    156 	/* {
    157 		syscallarg(fsid_t *) fsidp;
    158 		syscallarg(struct block_info *) blkiov;
    159 		syscallarg(int) blkcnt;
    160 	} */
    161 	BLOCK_INFO *blkiov;
    162 	BLOCK_INFO_15 *blkiov15;
    163 	int i, blkcnt, error;
    164 	fsid_t fsid;
    165 	struct lfs *fs;
    166 	struct mount *mntp;
    167 
    168 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    169 	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
    170 	if (error)
    171 		return (error);
    172 
    173 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    174 		return (error);
    175 
    176 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    177 		return (ENOENT);
    178 	fs = VFSTOULFS(mntp)->um_lfs;
    179 
    180 	blkcnt = SCARG(uap, blkcnt);
    181 	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
    182 		return (EINVAL);
    183 
    184 	KERNEL_LOCK(1, NULL);
    185 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    186 	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
    187 	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
    188 			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
    189 		goto out;
    190 
    191 	for (i = 0; i < blkcnt; i++) {
    192 		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
    193 		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
    194 		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
    195 		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
    196 		blkiov[i].bi_version   = blkiov15[i].bi_version;
    197 		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
    198 		blkiov[i].bi_size      = blkiov15[i].bi_size;
    199 	}
    200 
    201 	if ((error = lfs_markv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
    202 		for (i = 0; i < blkcnt; i++) {
    203 			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
    204 			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
    205 			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
    206 			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
    207 			blkiov15[i].bi_version	 = blkiov[i].bi_version;
    208 			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
    209 			blkiov15[i].bi_size	 = blkiov[i].bi_size;
    210 		}
    211 		copyout(blkiov15, SCARG(uap, blkiov),
    212 			blkcnt * sizeof(BLOCK_INFO_15));
    213 	}
    214     out:
    215 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    216 	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
    217 	KERNEL_UNLOCK_ONE(NULL);
    218 	return error;
    219 }
    220 #endif
    221 
    222 #define	LFS_MARKV_MAX_BLOCKS	(LFS_MAX_BUFS)
    223 
    224 int
    225 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
    226     int blkcnt)
    227 {
    228 	BLOCK_INFO *blkp;
    229 	IFILE *ifp;
    230 	struct buf *bp;
    231 	struct inode *ip = NULL;
    232 	struct lfs *fs;
    233 	struct mount *mntp;
    234 	struct ulfsmount *ump;
    235 	struct vnode *vp;
    236 	ino_t lastino;
    237 	daddr_t b_daddr;
    238 	int cnt, error;
    239 	int do_again = 0;
    240 	int numrefed = 0;
    241 	ino_t maxino;
    242 	size_t obsize;
    243 
    244 	/* number of blocks/inodes that we have already bwrite'ed */
    245 	int nblkwritten, ninowritten;
    246 
    247 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    248 		return (ENOENT);
    249 
    250 	ump = VFSTOULFS(mntp);
    251 	fs = ump->um_lfs;
    252 
    253 	if (fs->lfs_ronly)
    254 		return EROFS;
    255 
    256 	maxino = (lfs_fragstoblks(fs, VTOI(fs->lfs_ivnode)->i_ffs1_blocks) -
    257 		      fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
    258 
    259 	cnt = blkcnt;
    260 
    261 	if ((error = vfs_busy(mntp, NULL)) != 0)
    262 		return (error);
    263 
    264 	/*
    265 	 * This seglock is just to prevent the fact that we might have to sleep
    266 	 * from allowing the possibility that our blocks might become
    267 	 * invalid.
    268 	 *
    269 	 * It is also important to note here that unless we specify SEGM_CKP,
    270 	 * any Ifile blocks that we might be asked to clean will never get
    271 	 * to the disk.
    272 	 */
    273 	lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
    274 
    275 	/* Mark blocks/inodes dirty.  */
    276 	error = 0;
    277 
    278 	/* these were inside the initialization for the for loop */
    279 	vp = NULL;
    280 	lastino = LFS_UNUSED_INUM;
    281 	nblkwritten = ninowritten = 0;
    282 	for (blkp = blkiov; cnt--; ++blkp)
    283 	{
    284 		/* Bounds-check incoming data, avoid panic for failed VGET */
    285 		if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
    286 			error = EINVAL;
    287 			goto err3;
    288 		}
    289 		/*
    290 		 * Get the IFILE entry (only once) and see if the file still
    291 		 * exists.
    292 		 */
    293 		if (lastino != blkp->bi_inode) {
    294 			/*
    295 			 * Finish the old file, if there was one.
    296 			 */
    297 			if (vp != NULL) {
    298 				VOP_UNLOCK(vp);
    299 				lfs_vunref(vp);
    300 				vp = NULL;
    301 				numrefed--;
    302 			}
    303 
    304 			/*
    305 			 * Start a new file
    306 			 */
    307 			lastino = blkp->bi_inode;
    308 
    309 			/* Get the vnode/inode. */
    310 			error = lfs_fastvget(mntp, blkp->bi_inode, blkp,
    311 			    LK_EXCLUSIVE | LK_NOWAIT, &vp);
    312 			if (error) {
    313 				DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
    314 				      " failed with %d (ino %d, segment %d)\n",
    315 				      error, blkp->bi_inode,
    316 				      lfs_dtosn(fs, blkp->bi_daddr)));
    317 				/*
    318 				 * If we got EAGAIN, that means that the
    319 				 * Inode was locked.  This is
    320 				 * recoverable: just clean the rest of
    321 				 * this segment, and let the cleaner try
    322 				 * again with another.	(When the
    323 				 * cleaner runs again, this segment will
    324 				 * sort high on the list, since it is
    325 				 * now almost entirely empty.)
    326 				 */
    327 				if (error == EAGAIN) {
    328 					error = 0;
    329 					do_again++;
    330 				} else
    331 					KASSERT(error == ENOENT);
    332 				KASSERT(vp == NULL);
    333 				ip = NULL;
    334 				continue;
    335 			}
    336 
    337 			ip = VTOI(vp);
    338 			numrefed++;
    339 			ninowritten++;
    340 		} else if (vp == NULL) {
    341 			/*
    342 			 * This can only happen if the vnode is dead (or
    343 			 * in any case we can't get it...e.g., it is
    344 			 * inlocked).  Keep going.
    345 			 */
    346 			continue;
    347 		}
    348 
    349 		/* Past this point we are guaranteed that vp, ip are valid. */
    350 
    351 		/* Can't clean VU_DIROP directories in case of truncation */
    352 		/* XXX - maybe we should mark removed dirs specially? */
    353 		if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
    354 			do_again++;
    355 			continue;
    356 		}
    357 
    358 		/* If this BLOCK_INFO didn't contain a block, keep going. */
    359 		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
    360 			/* XXX need to make sure that the inode gets written in this case */
    361 			/* XXX but only write the inode if it's the right one */
    362 			if (blkp->bi_inode != LFS_IFILE_INUM) {
    363 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    364 				if (ifp->if_daddr == blkp->bi_daddr) {
    365 					mutex_enter(&lfs_lock);
    366 					LFS_SET_UINO(ip, IN_CLEANING);
    367 					mutex_exit(&lfs_lock);
    368 				}
    369 				brelse(bp, 0);
    370 			}
    371 			continue;
    372 		}
    373 
    374 		b_daddr = 0;
    375 		if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
    376 		    LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
    377 		{
    378 			if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
    379 			    lfs_dtosn(fs, blkp->bi_daddr))
    380 			{
    381 				DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %llx vs %llx\n",
    382 				      (long long)blkp->bi_daddr, (long long)LFS_DBTOFSB(fs, b_daddr)));
    383 			}
    384 			do_again++;
    385 			continue;
    386 		}
    387 
    388 		/*
    389 		 * Check block sizes.  The blocks being cleaned come from
    390 		 * disk, so they should have the same size as their on-disk
    391 		 * counterparts.
    392 		 */
    393 		if (blkp->bi_lbn >= 0)
    394 			obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
    395 		else
    396 			obsize = fs->lfs_bsize;
    397 		/* Check for fragment size change */
    398 		if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
    399 			obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
    400 		}
    401 		if (obsize != blkp->bi_size) {
    402 			DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %lld wrong"
    403 			      " size (%ld != %d), try again\n",
    404 			      blkp->bi_inode, (long long)blkp->bi_lbn,
    405 			      (long) obsize, blkp->bi_size));
    406 			do_again++;
    407 			continue;
    408 		}
    409 
    410 		/*
    411 		 * If we get to here, then we are keeping the block.  If
    412 		 * it is an indirect block, we want to actually put it
    413 		 * in the buffer cache so that it can be updated in the
    414 		 * finish_meta section.	 If it's not, we need to
    415 		 * allocate a fake buffer so that writeseg can perform
    416 		 * the copyin and write the buffer.
    417 		 */
    418 		if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
    419 			/* Data Block */
    420 			bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
    421 					 blkp->bi_size, blkp->bi_bp);
    422 			/* Pretend we used bread() to get it */
    423 			bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
    424 		} else {
    425 			/* Indirect block or ifile */
    426 			if (blkp->bi_size != fs->lfs_bsize &&
    427 			    ip->i_number != LFS_IFILE_INUM)
    428 				panic("lfs_markv: partial indirect block?"
    429 				    " size=%d\n", blkp->bi_size);
    430 			bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
    431 			if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
    432 				/*
    433 				 * The block in question was not found
    434 				 * in the cache; i.e., the block that
    435 				 * getblk() returned is empty.	So, we
    436 				 * can (and should) copy in the
    437 				 * contents, because we've already
    438 				 * determined that this was the right
    439 				 * version of this block on disk.
    440 				 *
    441 				 * And, it can't have changed underneath
    442 				 * us, because we have the segment lock.
    443 				 */
    444 				error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
    445 				if (error)
    446 					goto err2;
    447 			}
    448 		}
    449 		if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
    450 			goto err2;
    451 
    452 		nblkwritten++;
    453 		/*
    454 		 * XXX should account indirect blocks and ifile pages as well
    455 		 */
    456 		if (nblkwritten + lfs_lblkno(fs, ninowritten * sizeof (struct ulfs1_dinode))
    457 		    > LFS_MARKV_MAX_BLOCKS) {
    458 			DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
    459 			      nblkwritten, ninowritten));
    460 			lfs_segwrite(mntp, SEGM_CLEAN);
    461 			nblkwritten = ninowritten = 0;
    462 		}
    463 	}
    464 
    465 	/*
    466 	 * Finish the old file, if there was one
    467 	 */
    468 	if (vp != NULL) {
    469 		VOP_UNLOCK(vp);
    470 		lfs_vunref(vp);
    471 		vp = NULL;
    472 		numrefed--;
    473 	}
    474 
    475 #ifdef DIAGNOSTIC
    476 	if (numrefed != 0)
    477 		panic("lfs_markv: numrefed=%d", numrefed);
    478 #endif
    479 	DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
    480 	      nblkwritten, ninowritten));
    481 
    482 	/*
    483 	 * The last write has to be SEGM_SYNC, because of calling semantics.
    484 	 * It also has to be SEGM_CKP, because otherwise we could write
    485 	 * over the newly cleaned data contained in a checkpoint, and then
    486 	 * we'd be unhappy at recovery time.
    487 	 */
    488 	lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
    489 
    490 	lfs_segunlock(fs);
    491 
    492 	vfs_unbusy(mntp, false, NULL);
    493 	if (error)
    494 		return (error);
    495 	else if (do_again)
    496 		return EAGAIN;
    497 
    498 	return 0;
    499 
    500 err2:
    501 	DLOG((DLOG_CLEAN, "lfs_markv err2\n"));
    502 
    503 	/*
    504 	 * XXX we're here because copyin() failed.
    505 	 * XXX it means that we can't trust the cleanerd.  too bad.
    506 	 * XXX how can we recover from this?
    507 	 */
    508 
    509 err3:
    510 	/*
    511 	 * XXX should do segwrite here anyway?
    512 	 */
    513 
    514 	if (vp != NULL) {
    515 		VOP_UNLOCK(vp);
    516 		lfs_vunref(vp);
    517 		vp = NULL;
    518 		--numrefed;
    519 	}
    520 
    521 	lfs_segunlock(fs);
    522 	vfs_unbusy(mntp, false, NULL);
    523 #ifdef DIAGNOSTIC
    524 	if (numrefed != 0)
    525 		panic("lfs_markv: numrefed=%d", numrefed);
    526 #endif
    527 
    528 	return (error);
    529 }
    530 
    531 /*
    532  * sys_lfs_bmapv:
    533  *
    534  * This will fill in the current disk address for arrays of blocks.
    535  *
    536  *  0 on success
    537  * -1/errno is return on error.
    538  */
    539 #ifdef USE_64BIT_SYSCALLS
    540 int
    541 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
    542 {
    543 	/* {
    544 		syscallarg(fsid_t *) fsidp;
    545 		syscallarg(struct block_info *) blkiov;
    546 		syscallarg(int) blkcnt;
    547 	} */
    548 	BLOCK_INFO *blkiov;
    549 	int blkcnt, error;
    550 	fsid_t fsid;
    551 	struct lfs *fs;
    552 	struct mount *mntp;
    553 
    554 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    555 	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
    556 	if (error)
    557 		return (error);
    558 
    559 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    560 		return (error);
    561 
    562 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    563 		return (ENOENT);
    564 	fs = VFSTOULFS(mntp)->um_lfs;
    565 
    566 	blkcnt = SCARG(uap, blkcnt);
    567 	if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
    568 		return (EINVAL);
    569 	KERNEL_LOCK(1, NULL);
    570 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    571 	if ((error = copyin(SCARG(uap, blkiov), blkiov,
    572 			    blkcnt * sizeof(BLOCK_INFO))) != 0)
    573 		goto out;
    574 
    575 	if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
    576 		copyout(blkiov, SCARG(uap, blkiov),
    577 			blkcnt * sizeof(BLOCK_INFO));
    578     out:
    579 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    580 	KERNEL_UNLOCK_ONE(NULL);
    581 	return error;
    582 }
    583 #else
    584 int
    585 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
    586 {
    587 	/* {
    588 		syscallarg(fsid_t *) fsidp;
    589 		syscallarg(struct block_info *) blkiov;
    590 		syscallarg(int) blkcnt;
    591 	} */
    592 	BLOCK_INFO *blkiov;
    593 	BLOCK_INFO_15 *blkiov15;
    594 	int i, blkcnt, error;
    595 	fsid_t fsid;
    596 	struct lfs *fs;
    597 	struct mount *mntp;
    598 
    599 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    600 	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
    601 	if (error)
    602 		return (error);
    603 
    604 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    605 		return (error);
    606 
    607 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    608 		return (ENOENT);
    609 	fs = VFSTOULFS(mntp)->um_lfs;
    610 
    611 	blkcnt = SCARG(uap, blkcnt);
    612 	if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
    613 		return (EINVAL);
    614 	KERNEL_LOCK(1, NULL);
    615 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    616 	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
    617 	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
    618 			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
    619 		goto out;
    620 
    621 	for (i = 0; i < blkcnt; i++) {
    622 		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
    623 		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
    624 		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
    625 		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
    626 		blkiov[i].bi_version   = blkiov15[i].bi_version;
    627 		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
    628 		blkiov[i].bi_size      = blkiov15[i].bi_size;
    629 	}
    630 
    631 	if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
    632 		for (i = 0; i < blkcnt; i++) {
    633 			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
    634 			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
    635 			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
    636 			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
    637 			blkiov15[i].bi_version	 = blkiov[i].bi_version;
    638 			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
    639 			blkiov15[i].bi_size	 = blkiov[i].bi_size;
    640 		}
    641 		copyout(blkiov15, SCARG(uap, blkiov),
    642 			blkcnt * sizeof(BLOCK_INFO_15));
    643 	}
    644     out:
    645 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    646 	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
    647 	KERNEL_UNLOCK_ONE(NULL);
    648 	return error;
    649 }
    650 #endif
    651 
    652 int
    653 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
    654 {
    655 	BLOCK_INFO *blkp;
    656 	IFILE *ifp;
    657 	struct buf *bp;
    658 	struct inode *ip = NULL;
    659 	struct lfs *fs;
    660 	struct mount *mntp;
    661 	struct vnode *vp;
    662 	ino_t lastino;
    663 	daddr_t v_daddr;
    664 	int cnt, error;
    665 	int numrefed = 0;
    666 
    667 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    668 		return (ENOENT);
    669 
    670 	if ((error = vfs_busy(mntp, NULL)) != 0)
    671 		return (error);
    672 
    673 	cnt = blkcnt;
    674 
    675 	fs = VFSTOULFS(mntp)->um_lfs;
    676 
    677 	error = 0;
    678 
    679 	/* these were inside the initialization for the for loop */
    680 	vp = NULL;
    681 	v_daddr = LFS_UNUSED_DADDR;
    682 	lastino = LFS_UNUSED_INUM;
    683 	for (blkp = blkiov; cnt--; ++blkp)
    684 	{
    685 		/*
    686 		 * Get the IFILE entry (only once) and see if the file still
    687 		 * exists.
    688 		 */
    689 		if (lastino != blkp->bi_inode) {
    690 			/*
    691 			 * Finish the old file, if there was one.
    692 			 */
    693 			if (vp != NULL) {
    694 				VOP_UNLOCK(vp);
    695 				lfs_vunref(vp);
    696 				vp = NULL;
    697 				numrefed--;
    698 			}
    699 
    700 			/*
    701 			 * Start a new file
    702 			 */
    703 			lastino = blkp->bi_inode;
    704 			if (blkp->bi_inode == LFS_IFILE_INUM)
    705 				v_daddr = fs->lfs_idaddr;
    706 			else {
    707 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    708 				v_daddr = ifp->if_daddr;
    709 				brelse(bp, 0);
    710 			}
    711 			if (v_daddr == LFS_UNUSED_DADDR) {
    712 				blkp->bi_daddr = LFS_UNUSED_DADDR;
    713 				continue;
    714 			}
    715 			error = lfs_fastvget(mntp, blkp->bi_inode, NULL,
    716 			    LK_SHARED, &vp);
    717 			if (error) {
    718 				DLOG((DLOG_CLEAN, "lfs_bmapv: lfs_fastvget ino"
    719 				      "%d failed with %d",
    720 				      blkp->bi_inode,error));
    721 				KASSERT(vp == NULL);
    722 				continue;
    723 			} else {
    724 				KASSERT(VOP_ISLOCKED(vp));
    725 				numrefed++;
    726 			}
    727 			ip = VTOI(vp);
    728 		} else if (vp == NULL) {
    729 			/*
    730 			 * This can only happen if the vnode is dead.
    731 			 * Keep going.	Note that we DO NOT set the
    732 			 * bi_addr to anything -- if we failed to get
    733 			 * the vnode, for example, we want to assume
    734 			 * conservatively that all of its blocks *are*
    735 			 * located in the segment in question.
    736 			 * lfs_markv will throw them out if we are
    737 			 * wrong.
    738 			 */
    739 			continue;
    740 		}
    741 
    742 		/* Past this point we are guaranteed that vp, ip are valid. */
    743 
    744 		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
    745 			/*
    746 			 * We just want the inode address, which is
    747 			 * conveniently in v_daddr.
    748 			 */
    749 			blkp->bi_daddr = v_daddr;
    750 		} else {
    751 			daddr_t bi_daddr;
    752 
    753 			/* XXX ondisk32 */
    754 			error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
    755 					 &bi_daddr, NULL);
    756 			if (error)
    757 			{
    758 				blkp->bi_daddr = LFS_UNUSED_DADDR;
    759 				continue;
    760 			}
    761 			blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
    762 			/* Fill in the block size, too */
    763 			if (blkp->bi_lbn >= 0)
    764 				blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
    765 			else
    766 				blkp->bi_size = fs->lfs_bsize;
    767 		}
    768 	}
    769 
    770 	/*
    771 	 * Finish the old file, if there was one.
    772 	 */
    773 	if (vp != NULL) {
    774 		VOP_UNLOCK(vp);
    775 		lfs_vunref(vp);
    776 		vp = NULL;
    777 		numrefed--;
    778 	}
    779 
    780 #ifdef DIAGNOSTIC
    781 	if (numrefed != 0)
    782 		panic("lfs_bmapv: numrefed=%d", numrefed);
    783 #endif
    784 
    785 	vfs_unbusy(mntp, false, NULL);
    786 
    787 	return 0;
    788 }
    789 
    790 /*
    791  * sys_lfs_segclean:
    792  *
    793  * Mark the segment clean.
    794  *
    795  *  0 on success
    796  * -1/errno is return on error.
    797  */
    798 int
    799 sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
    800 {
    801 	/* {
    802 		syscallarg(fsid_t *) fsidp;
    803 		syscallarg(u_long) segment;
    804 	} */
    805 	struct lfs *fs;
    806 	struct mount *mntp;
    807 	fsid_t fsid;
    808 	int error;
    809 	unsigned long segnum;
    810 
    811 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    812 	    KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
    813 	if (error)
    814 		return (error);
    815 
    816 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    817 		return (error);
    818 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    819 		return (ENOENT);
    820 
    821 	fs = VFSTOULFS(mntp)->um_lfs;
    822 	segnum = SCARG(uap, segment);
    823 
    824 	if ((error = vfs_busy(mntp, NULL)) != 0)
    825 		return (error);
    826 
    827 	KERNEL_LOCK(1, NULL);
    828 	lfs_seglock(fs, SEGM_PROT);
    829 	error = lfs_do_segclean(fs, segnum);
    830 	lfs_segunlock(fs);
    831 	KERNEL_UNLOCK_ONE(NULL);
    832 	vfs_unbusy(mntp, false, NULL);
    833 	return error;
    834 }
    835 
    836 /*
    837  * Actually mark the segment clean.
    838  * Must be called with the segment lock held.
    839  */
    840 int
    841 lfs_do_segclean(struct lfs *fs, unsigned long segnum)
    842 {
    843 	extern int lfs_dostats;
    844 	struct buf *bp;
    845 	CLEANERINFO *cip;
    846 	SEGUSE *sup;
    847 
    848 	if (lfs_dtosn(fs, fs->lfs_curseg) == segnum) {
    849 		return (EBUSY);
    850 	}
    851 
    852 	LFS_SEGENTRY(sup, fs, segnum, bp);
    853 	if (sup->su_nbytes) {
    854 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    855 		      " %d live bytes\n", segnum, sup->su_nbytes));
    856 		brelse(bp, 0);
    857 		return (EBUSY);
    858 	}
    859 	if (sup->su_flags & SEGUSE_ACTIVE) {
    860 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    861 		      " segment is active\n", segnum));
    862 		brelse(bp, 0);
    863 		return (EBUSY);
    864 	}
    865 	if (!(sup->su_flags & SEGUSE_DIRTY)) {
    866 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    867 		      " segment is already clean\n", segnum));
    868 		brelse(bp, 0);
    869 		return (EALREADY);
    870 	}
    871 
    872 	fs->lfs_avail += lfs_segtod(fs, 1);
    873 	if (sup->su_flags & SEGUSE_SUPERBLOCK)
    874 		fs->lfs_avail -= lfs_btofsb(fs, LFS_SBPAD);
    875 	if (fs->lfs_version > 1 && segnum == 0 &&
    876 	    fs->lfs_s0addr < lfs_btofsb(fs, LFS_LABELPAD))
    877 		fs->lfs_avail -= lfs_btofsb(fs, LFS_LABELPAD) - fs->lfs_s0addr;
    878 	mutex_enter(&lfs_lock);
    879 	fs->lfs_bfree += sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) +
    880 		lfs_btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
    881 	fs->lfs_dmeta -= sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) +
    882 		lfs_btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
    883 	if (fs->lfs_dmeta < 0)
    884 		fs->lfs_dmeta = 0;
    885 	mutex_exit(&lfs_lock);
    886 	sup->su_flags &= ~SEGUSE_DIRTY;
    887 	LFS_WRITESEGENTRY(sup, fs, segnum, bp);
    888 
    889 	LFS_CLEANERINFO(cip, fs, bp);
    890 	++cip->clean;
    891 	--cip->dirty;
    892 	fs->lfs_nclean = cip->clean;
    893 	cip->bfree = fs->lfs_bfree;
    894 	mutex_enter(&lfs_lock);
    895 	cip->avail = fs->lfs_avail - fs->lfs_ravail - fs->lfs_favail;
    896 	wakeup(&fs->lfs_avail);
    897 	mutex_exit(&lfs_lock);
    898 	(void) LFS_BWRITE_LOG(bp);
    899 
    900 	if (lfs_dostats)
    901 		++lfs_stats.segs_reclaimed;
    902 
    903 	return (0);
    904 }
    905 
    906 /*
    907  * This will block until a segment in file system fsid is written.  A timeout
    908  * in milliseconds may be specified which will awake the cleaner automatically.
    909  * An fsid of -1 means any file system, and a timeout of 0 means forever.
    910  */
    911 int
    912 lfs_segwait(fsid_t *fsidp, struct timeval *tv)
    913 {
    914 	struct mount *mntp;
    915 	void *addr;
    916 	u_long timeout;
    917 	int error;
    918 
    919 	KERNEL_LOCK(1, NULL);
    920 	if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL)
    921 		addr = &lfs_allclean_wakeup;
    922 	else
    923 		addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextseg;
    924 	/*
    925 	 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
    926 	 * XXX IS THAT WHAT IS INTENDED?
    927 	 */
    928 	timeout = tvtohz(tv);
    929 	error = tsleep(addr, PCATCH | PVFS, "segment", timeout);
    930 	KERNEL_UNLOCK_ONE(NULL);
    931 	return (error == ERESTART ? EINTR : 0);
    932 }
    933 
    934 /*
    935  * sys_lfs_segwait:
    936  *
    937  * System call wrapper around lfs_segwait().
    938  *
    939  *  0 on success
    940  *  1 on timeout
    941  * -1/errno is return on error.
    942  */
    943 int
    944 sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
    945     register_t *retval)
    946 {
    947 	/* {
    948 		syscallarg(fsid_t *) fsidp;
    949 		syscallarg(struct timeval *) tv;
    950 	} */
    951 	struct timeval atv;
    952 	fsid_t fsid;
    953 	int error;
    954 
    955 	/* XXX need we be su to segwait? */
    956 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    957 	    KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL);
    958 	if (error)
    959 		return (error);
    960 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    961 		return (error);
    962 
    963 	if (SCARG(uap, tv)) {
    964 		error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
    965 		if (error)
    966 			return (error);
    967 		if (itimerfix(&atv))
    968 			return (EINVAL);
    969 	} else /* NULL or invalid */
    970 		atv.tv_sec = atv.tv_usec = 0;
    971 	return lfs_segwait(&fsid, &atv);
    972 }
    973 
    974 /*
    975  * VFS_VGET call specialized for the cleaner.  The cleaner already knows the
    976  * daddr from the ifile, so don't look it up again.  If the cleaner is
    977  * processing IINFO structures, it may have the ondisk inode already, so
    978  * don't go retrieving it again.
    979  *
    980  * we lfs_vref, and it is the caller's responsibility to lfs_vunref
    981  * when finished.
    982  */
    983 
    984 int
    985 lfs_fasthashget(dev_t dev, ino_t ino, struct vnode **vpp)
    986 {
    987 	struct vnode *vp;
    988 
    989 	mutex_enter(&ulfs_ihash_lock);
    990 	if ((vp = ulfs_ihashlookup(dev, ino)) != NULL) {
    991 		mutex_enter(vp->v_interlock);
    992 		mutex_exit(&ulfs_ihash_lock);
    993 		if (vdead_check(vp, VDEAD_NOWAIT) != 0) {
    994 			DLOG((DLOG_CLEAN, "lfs_fastvget: ino %d dead\n",
    995 			      ino));
    996 			lfs_stats.clean_vnlocked++;
    997 			mutex_exit(vp->v_interlock);
    998 			return EAGAIN;
    999 		}
   1000 		if (lfs_vref(vp)) {
   1001 			DLOG((DLOG_CLEAN, "lfs_fastvget: lfs_vref failed"
   1002 			      " for ino %d\n", ino));
   1003 			lfs_stats.clean_inlocked++;
   1004 			return EAGAIN;
   1005 		}
   1006 	} else {
   1007 		mutex_exit(&ulfs_ihash_lock);
   1008 	}
   1009 	*vpp = vp;
   1010 
   1011 	return (0);
   1012 }
   1013 
   1014 int
   1015 lfs_fastvget(struct mount *mp, ino_t ino, BLOCK_INFO *blkp, int lk_flags,
   1016     struct vnode **vpp)
   1017 {
   1018 	IFILE *ifp;
   1019 	struct inode *ip;
   1020 	struct ulfs1_dinode *dip, *dinp;
   1021 	struct vnode *vp;
   1022 	struct ulfsmount *ump;
   1023 	daddr_t daddr;
   1024 	dev_t dev;
   1025 	int error, retries;
   1026 	struct buf *bp;
   1027 	struct lfs *fs;
   1028 
   1029 	ump = VFSTOULFS(mp);
   1030 	dev = ump->um_dev;
   1031 	fs = ump->um_lfs;
   1032 
   1033 	/*
   1034 	 * Wait until the filesystem is fully mounted before allowing vget
   1035 	 * to complete.	 This prevents possible problems with roll-forward.
   1036 	 */
   1037 	mutex_enter(&lfs_lock);
   1038 	while (fs->lfs_flags & LFS_NOTYET) {
   1039 		mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0,
   1040 			&lfs_lock);
   1041 	}
   1042 	mutex_exit(&lfs_lock);
   1043 
   1044 	/*
   1045 	 * This is playing fast and loose.  Someone may have the inode
   1046 	 * locked, in which case they are going to be distinctly unhappy
   1047 	 * if we trash something.
   1048 	 */
   1049 
   1050 	error = lfs_fasthashget(dev, ino, vpp);
   1051 	if (error != 0)
   1052 		return error;
   1053 	else if (*vpp != NULL) {
   1054 		error = vn_lock(*vpp, lk_flags);
   1055 		if (error == EBUSY)
   1056 			error = EAGAIN;
   1057 		if (error) {
   1058 			lfs_vunref(*vpp);
   1059 			*vpp = NULL;
   1060 			return error;
   1061 		}
   1062 	}
   1063 
   1064 	if (blkp != NULL && blkp->bi_lbn == LFS_UNUSED_LBN)
   1065 		dinp = blkp->bi_bp;
   1066 	else
   1067 		dinp = NULL;
   1068 
   1069 	if (ino == LFS_IFILE_INUM)
   1070 		daddr = fs->lfs_idaddr;
   1071 	else {
   1072 		LFS_IENTRY(ifp, fs, ino, bp);
   1073 		daddr = ifp->if_daddr;
   1074 		brelse(bp, 0);
   1075 	}
   1076 	if (daddr == LFS_UNUSED_DADDR)
   1077 		return ENOENT;
   1078 
   1079 	/*
   1080 	 * getnewvnode(9) will call vfs_busy, which will block if the
   1081 	 * filesystem is being unmounted; but umount(9) is waiting for
   1082 	 * us because we're already holding the fs busy.
   1083 	 * XXXMP
   1084 	 */
   1085 	if (mp->mnt_iflag & IMNT_UNMOUNT) {
   1086 		*vpp = NULL;
   1087 		return EDEADLK;
   1088 	}
   1089 	error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, NULL, &vp);
   1090 	if (error) {
   1091 		*vpp = NULL;
   1092 		return (error);
   1093 	}
   1094 
   1095 	mutex_enter(&ulfs_hashlock);
   1096 	error = lfs_fasthashget(dev, ino, vpp);
   1097 	if (error != 0 || *vpp != NULL) {
   1098 		mutex_exit(&ulfs_hashlock);
   1099 		ungetnewvnode(vp);
   1100 		return (error);
   1101 	}
   1102 
   1103 	/* Allocate new vnode/inode. */
   1104 	lfs_vcreate(mp, ino, vp);
   1105 
   1106 	/*
   1107 	 * Put it onto its hash chain and lock it so that other requests for
   1108 	 * this inode will block if they arrive while we are sleeping waiting
   1109 	 * for old data structures to be purged or for the contents of the
   1110 	 * disk portion of this inode to be read.
   1111 	 */
   1112 	ip = VTOI(vp);
   1113 	ulfs_ihashins(ip);
   1114 	mutex_exit(&ulfs_hashlock);
   1115 
   1116 #ifdef notyet
   1117 	/* Not found in the cache => this vnode was loaded only for cleaning. */
   1118 	ip->i_lfs_iflags |= LFSI_BMAP;
   1119 #endif
   1120 
   1121 	/*
   1122 	 * XXX
   1123 	 * This may not need to be here, logically it should go down with
   1124 	 * the i_devvp initialization.
   1125 	 * Ask Kirk.
   1126 	 */
   1127 	ip->i_lfs = fs;
   1128 
   1129 	/* Read in the disk contents for the inode, copy into the inode. */
   1130 	if (dinp) {
   1131 		error = copyin(dinp, ip->i_din.ffs1_din, sizeof (struct ulfs1_dinode));
   1132 		if (error) {
   1133 			DLOG((DLOG_CLEAN, "lfs_fastvget: dinode copyin failed"
   1134 			      " for ino %d\n", ino));
   1135 			ulfs_ihashrem(ip);
   1136 
   1137 			/* Unlock and discard unneeded inode. */
   1138 			VOP_UNLOCK(vp);
   1139 			lfs_vunref(vp);
   1140 			*vpp = NULL;
   1141 			return (error);
   1142 		}
   1143 		if (ip->i_number != ino)
   1144 			panic("lfs_fastvget: I was fed the wrong inode!");
   1145 	} else {
   1146 		retries = 0;
   1147 	    again:
   1148 		error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr), fs->lfs_ibsize,
   1149 			      0, &bp);
   1150 		if (error) {
   1151 			DLOG((DLOG_CLEAN, "lfs_fastvget: bread failed (%d)\n",
   1152 			      error));
   1153 			/*
   1154 			 * The inode does not contain anything useful, so it
   1155 			 * would be misleading to leave it on its hash chain.
   1156 			 * Iput() will return it to the free list.
   1157 			 */
   1158 			ulfs_ihashrem(ip);
   1159 
   1160 			/* Unlock and discard unneeded inode. */
   1161 			VOP_UNLOCK(vp);
   1162 			lfs_vunref(vp);
   1163 			*vpp = NULL;
   1164 			return (error);
   1165 		}
   1166 		dip = lfs_ifind(ump->um_lfs, ino, bp);
   1167 		if (dip == NULL) {
   1168 			/* Assume write has not completed yet; try again */
   1169 			brelse(bp, BC_INVAL);
   1170 			++retries;
   1171 			if (retries > LFS_IFIND_RETRIES)
   1172 				panic("lfs_fastvget: dinode not found");
   1173 			DLOG((DLOG_CLEAN, "lfs_fastvget: dinode not found,"
   1174 			      " retrying...\n"));
   1175 			goto again;
   1176 		}
   1177 		*ip->i_din.ffs1_din = *dip;
   1178 		brelse(bp, 0);
   1179 	}
   1180 	lfs_vinit(mp, &vp);
   1181 
   1182 	*vpp = vp;
   1183 
   1184 	KASSERT(VOP_ISLOCKED(vp));
   1185 
   1186 	return (0);
   1187 }
   1188 
   1189 /*
   1190  * Make up a "fake" cleaner buffer, copy the data from userland into it.
   1191  */
   1192 struct buf *
   1193 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, void *uaddr)
   1194 {
   1195 	struct buf *bp;
   1196 	int error;
   1197 
   1198 	KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM);
   1199 
   1200 	bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN);
   1201 	error = copyin(uaddr, bp->b_data, size);
   1202 	if (error) {
   1203 		lfs_freebuf(fs, bp);
   1204 		return NULL;
   1205 	}
   1206 	KDASSERT(bp->b_iodone == lfs_callback);
   1207 
   1208 #if 0
   1209 	mutex_enter(&lfs_lock);
   1210 	++fs->lfs_iocount;
   1211 	mutex_exit(&lfs_lock);
   1212 #endif
   1213 	bp->b_bufsize = size;
   1214 	bp->b_bcount = size;
   1215 	return (bp);
   1216 }
   1217