Home | History | Annotate | Line # | Download | only in lfs
lfs_syscalls.c revision 1.156
      1 /*	$NetBSD: lfs_syscalls.c,v 1.156 2015/03/28 19:24:05 maxv Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008
      5  *    The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Konrad E. Schroder <perseant (at) hhhh.org>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 /*-
     33  * Copyright (c) 1991, 1993, 1994
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)lfs_syscalls.c	8.10 (Berkeley) 5/14/95
     61  */
     62 
     63 #include <sys/cdefs.h>
     64 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.156 2015/03/28 19:24:05 maxv Exp $");
     65 
     66 #ifndef LFS
     67 # define LFS		/* for prototypes in syscallargs.h */
     68 #endif
     69 
     70 #include <sys/param.h>
     71 #include <sys/systm.h>
     72 #include <sys/proc.h>
     73 #include <sys/buf.h>
     74 #include <sys/mount.h>
     75 #include <sys/vnode.h>
     76 #include <sys/kernel.h>
     77 #include <sys/kauth.h>
     78 #include <sys/syscallargs.h>
     79 
     80 #include <ufs/lfs/ulfs_inode.h>
     81 #include <ufs/lfs/ulfsmount.h>
     82 #include <ufs/lfs/ulfs_extern.h>
     83 
     84 #include <ufs/lfs/lfs.h>
     85 #include <ufs/lfs/lfs_kernel.h>
     86 #include <ufs/lfs/lfs_extern.h>
     87 
     88 struct buf *lfs_fakebuf(struct lfs *, struct vnode *, int, size_t, void *);
     89 int lfs_fasthashget(dev_t, ino_t, struct vnode **);
     90 
     91 pid_t lfs_cleaner_pid = 0;
     92 
     93 /*
     94  * sys_lfs_markv:
     95  *
     96  * This will mark inodes and blocks dirty, so they are written into the log.
     97  * It will block until all the blocks have been written.  The segment create
     98  * time passed in the block_info and inode_info structures is used to decide
     99  * if the data is valid for each block (in case some process dirtied a block
    100  * or inode that is being cleaned between the determination that a block is
    101  * live and the lfs_markv call).
    102  *
    103  *  0 on success
    104  * -1/errno is return on error.
    105  */
    106 #ifdef USE_64BIT_SYSCALLS
    107 int
    108 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
    109 {
    110 	/* {
    111 		syscallarg(fsid_t *) fsidp;
    112 		syscallarg(struct block_info *) blkiov;
    113 		syscallarg(int) blkcnt;
    114 	} */
    115 	BLOCK_INFO *blkiov;
    116 	int blkcnt, error;
    117 	fsid_t fsid;
    118 	struct lfs *fs;
    119 	struct mount *mntp;
    120 
    121 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    122 	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
    123 	if (error)
    124 		return (error);
    125 
    126 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    127 		return (error);
    128 
    129 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    130 		return (ENOENT);
    131 	fs = VFSTOULFS(mntp)->um_lfs;
    132 
    133 	blkcnt = SCARG(uap, blkcnt);
    134 	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
    135 		return (EINVAL);
    136 
    137 	KERNEL_LOCK(1, NULL);
    138 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    139 	if ((error = copyin(SCARG(uap, blkiov), blkiov,
    140 			    blkcnt * sizeof(BLOCK_INFO))) != 0)
    141 		goto out;
    142 
    143 	if ((error = lfs_markv(p, &fsid, blkiov, blkcnt)) == 0)
    144 		copyout(blkiov, SCARG(uap, blkiov),
    145 			blkcnt * sizeof(BLOCK_INFO));
    146     out:
    147 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    148 	KERNEL_UNLOCK_ONE(NULL);
    149 	return error;
    150 }
    151 #else
    152 int
    153 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
    154 {
    155 	/* {
    156 		syscallarg(fsid_t *) fsidp;
    157 		syscallarg(struct block_info *) blkiov;
    158 		syscallarg(int) blkcnt;
    159 	} */
    160 	BLOCK_INFO *blkiov;
    161 	BLOCK_INFO_15 *blkiov15;
    162 	int i, blkcnt, error;
    163 	fsid_t fsid;
    164 	struct lfs *fs;
    165 	struct mount *mntp;
    166 
    167 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    168 	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
    169 	if (error)
    170 		return (error);
    171 
    172 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    173 		return (error);
    174 
    175 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    176 		return (ENOENT);
    177 	fs = VFSTOULFS(mntp)->um_lfs;
    178 
    179 	blkcnt = SCARG(uap, blkcnt);
    180 	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
    181 		return (EINVAL);
    182 
    183 	KERNEL_LOCK(1, NULL);
    184 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    185 	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
    186 	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
    187 			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
    188 		goto out;
    189 
    190 	for (i = 0; i < blkcnt; i++) {
    191 		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
    192 		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
    193 		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
    194 		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
    195 		blkiov[i].bi_version   = blkiov15[i].bi_version;
    196 		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
    197 		blkiov[i].bi_size      = blkiov15[i].bi_size;
    198 	}
    199 
    200 	if ((error = lfs_markv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
    201 		for (i = 0; i < blkcnt; i++) {
    202 			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
    203 			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
    204 			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
    205 			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
    206 			blkiov15[i].bi_version	 = blkiov[i].bi_version;
    207 			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
    208 			blkiov15[i].bi_size	 = blkiov[i].bi_size;
    209 		}
    210 		copyout(blkiov15, SCARG(uap, blkiov),
    211 			blkcnt * sizeof(BLOCK_INFO_15));
    212 	}
    213     out:
    214 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    215 	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
    216 	KERNEL_UNLOCK_ONE(NULL);
    217 	return error;
    218 }
    219 #endif
    220 
    221 #define	LFS_MARKV_MAX_BLOCKS	(LFS_MAX_BUFS)
    222 
    223 int
    224 lfs_markv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov,
    225     int blkcnt)
    226 {
    227 	BLOCK_INFO *blkp;
    228 	IFILE *ifp;
    229 	struct buf *bp;
    230 	struct inode *ip = NULL;
    231 	struct lfs *fs;
    232 	struct mount *mntp;
    233 	struct vnode *vp = NULL;
    234 	ino_t lastino;
    235 	daddr_t b_daddr, v_daddr;
    236 	int cnt, error;
    237 	int do_again = 0;
    238 	int numrefed = 0;
    239 	ino_t maxino;
    240 	size_t obsize;
    241 
    242 	/* number of blocks/inodes that we have already bwrite'ed */
    243 	int nblkwritten, ninowritten;
    244 
    245 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    246 		return (ENOENT);
    247 
    248 	fs = VFSTOULFS(mntp)->um_lfs;
    249 
    250 	if (fs->lfs_ronly)
    251 		return EROFS;
    252 
    253 	maxino = (lfs_fragstoblks(fs, VTOI(fs->lfs_ivnode)->i_ffs1_blocks) -
    254 		      fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb;
    255 
    256 	cnt = blkcnt;
    257 
    258 	if ((error = vfs_busy(mntp, NULL)) != 0)
    259 		return (error);
    260 
    261 	/*
    262 	 * This seglock is just to prevent the fact that we might have to sleep
    263 	 * from allowing the possibility that our blocks might become
    264 	 * invalid.
    265 	 *
    266 	 * It is also important to note here that unless we specify SEGM_CKP,
    267 	 * any Ifile blocks that we might be asked to clean will never get
    268 	 * to the disk.
    269 	 */
    270 	lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
    271 
    272 	/* Mark blocks/inodes dirty.  */
    273 	error = 0;
    274 
    275 	/* these were inside the initialization for the for loop */
    276 	v_daddr = LFS_UNUSED_DADDR;
    277 	lastino = LFS_UNUSED_INUM;
    278 	nblkwritten = ninowritten = 0;
    279 	for (blkp = blkiov; cnt--; ++blkp)
    280 	{
    281 		/* Bounds-check incoming data, avoid panic for failed VGET */
    282 		if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
    283 			error = EINVAL;
    284 			goto err3;
    285 		}
    286 		/*
    287 		 * Get the IFILE entry (only once) and see if the file still
    288 		 * exists.
    289 		 */
    290 		if (lastino != blkp->bi_inode) {
    291 			/*
    292 			 * Finish the old file, if there was one.  The presence
    293 			 * of a usable vnode in vp is signaled by a valid v_daddr.
    294 			 */
    295 			if (v_daddr != LFS_UNUSED_DADDR) {
    296 				lfs_vunref(vp);
    297 				numrefed--;
    298 			}
    299 
    300 			/*
    301 			 * Start a new file
    302 			 */
    303 			lastino = blkp->bi_inode;
    304 			if (blkp->bi_inode == LFS_IFILE_INUM)
    305 				v_daddr = fs->lfs_idaddr;
    306 			else {
    307 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    308 				/* XXX fix for force write */
    309 				v_daddr = ifp->if_daddr;
    310 				brelse(bp, 0);
    311 			}
    312 			if (v_daddr == LFS_UNUSED_DADDR)
    313 				continue;
    314 
    315 			/* Get the vnode/inode. */
    316 			error = lfs_fastvget(mntp, blkp->bi_inode, v_daddr,
    317 					   &vp,
    318 					   (blkp->bi_lbn == LFS_UNUSED_LBN
    319 					    ? blkp->bi_bp
    320 					    : NULL));
    321 
    322 			if (!error) {
    323 				numrefed++;
    324 			}
    325 			if (error) {
    326 				DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
    327 				      " failed with %d (ino %d, segment %d)\n",
    328 				      error, blkp->bi_inode,
    329 				      lfs_dtosn(fs, blkp->bi_daddr)));
    330 				/*
    331 				 * If we got EAGAIN, that means that the
    332 				 * Inode was locked.  This is
    333 				 * recoverable: just clean the rest of
    334 				 * this segment, and let the cleaner try
    335 				 * again with another.	(When the
    336 				 * cleaner runs again, this segment will
    337 				 * sort high on the list, since it is
    338 				 * now almost entirely empty.) But, we
    339 				 * still set v_daddr = LFS_UNUSED_ADDR
    340 				 * so as not to test this over and over
    341 				 * again.
    342 				 */
    343 				if (error == EAGAIN) {
    344 					error = 0;
    345 					do_again++;
    346 				}
    347 #ifdef DIAGNOSTIC
    348 				else if (error != ENOENT)
    349 					panic("lfs_markv VFS_VGET FAILED");
    350 #endif
    351 				/* lastino = LFS_UNUSED_INUM; */
    352 				v_daddr = LFS_UNUSED_DADDR;
    353 				vp = NULL;
    354 				ip = NULL;
    355 				continue;
    356 			}
    357 			ip = VTOI(vp);
    358 			ninowritten++;
    359 		} else if (v_daddr == LFS_UNUSED_DADDR) {
    360 			/*
    361 			 * This can only happen if the vnode is dead (or
    362 			 * in any case we can't get it...e.g., it is
    363 			 * inlocked).  Keep going.
    364 			 */
    365 			continue;
    366 		}
    367 
    368 		/* Past this point we are guaranteed that vp, ip are valid. */
    369 
    370 		/* Can't clean VU_DIROP directories in case of truncation */
    371 		/* XXX - maybe we should mark removed dirs specially? */
    372 		if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
    373 			do_again++;
    374 			continue;
    375 		}
    376 
    377 		/* If this BLOCK_INFO didn't contain a block, keep going. */
    378 		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
    379 			/* XXX need to make sure that the inode gets written in this case */
    380 			/* XXX but only write the inode if it's the right one */
    381 			if (blkp->bi_inode != LFS_IFILE_INUM) {
    382 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    383 				if (ifp->if_daddr == blkp->bi_daddr) {
    384 					mutex_enter(&lfs_lock);
    385 					LFS_SET_UINO(ip, IN_CLEANING);
    386 					mutex_exit(&lfs_lock);
    387 				}
    388 				brelse(bp, 0);
    389 			}
    390 			continue;
    391 		}
    392 
    393 		b_daddr = 0;
    394 		if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
    395 		    LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
    396 		{
    397 			if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
    398 			    lfs_dtosn(fs, blkp->bi_daddr))
    399 			{
    400 				DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %llx vs %llx\n",
    401 				      (long long)blkp->bi_daddr, (long long)LFS_DBTOFSB(fs, b_daddr)));
    402 			}
    403 			do_again++;
    404 			continue;
    405 		}
    406 
    407 		/*
    408 		 * Check block sizes.  The blocks being cleaned come from
    409 		 * disk, so they should have the same size as their on-disk
    410 		 * counterparts.
    411 		 */
    412 		if (blkp->bi_lbn >= 0)
    413 			obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
    414 		else
    415 			obsize = fs->lfs_bsize;
    416 		/* Check for fragment size change */
    417 		if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
    418 			obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
    419 		}
    420 		if (obsize != blkp->bi_size) {
    421 			DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %lld wrong"
    422 			      " size (%ld != %d), try again\n",
    423 			      blkp->bi_inode, (long long)blkp->bi_lbn,
    424 			      (long) obsize, blkp->bi_size));
    425 			do_again++;
    426 			continue;
    427 		}
    428 
    429 		/*
    430 		 * If we get to here, then we are keeping the block.  If
    431 		 * it is an indirect block, we want to actually put it
    432 		 * in the buffer cache so that it can be updated in the
    433 		 * finish_meta section.	 If it's not, we need to
    434 		 * allocate a fake buffer so that writeseg can perform
    435 		 * the copyin and write the buffer.
    436 		 */
    437 		if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
    438 			/* Data Block */
    439 			bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
    440 					 blkp->bi_size, blkp->bi_bp);
    441 			/* Pretend we used bread() to get it */
    442 			bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
    443 		} else {
    444 			/* Indirect block or ifile */
    445 			if (blkp->bi_size != fs->lfs_bsize &&
    446 			    ip->i_number != LFS_IFILE_INUM)
    447 				panic("lfs_markv: partial indirect block?"
    448 				    " size=%d\n", blkp->bi_size);
    449 			bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
    450 			if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
    451 				/*
    452 				 * The block in question was not found
    453 				 * in the cache; i.e., the block that
    454 				 * getblk() returned is empty.	So, we
    455 				 * can (and should) copy in the
    456 				 * contents, because we've already
    457 				 * determined that this was the right
    458 				 * version of this block on disk.
    459 				 *
    460 				 * And, it can't have changed underneath
    461 				 * us, because we have the segment lock.
    462 				 */
    463 				error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
    464 				if (error)
    465 					goto err2;
    466 			}
    467 		}
    468 		if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
    469 			goto err2;
    470 
    471 		nblkwritten++;
    472 		/*
    473 		 * XXX should account indirect blocks and ifile pages as well
    474 		 */
    475 		if (nblkwritten + lfs_lblkno(fs, ninowritten * sizeof (struct ulfs1_dinode))
    476 		    > LFS_MARKV_MAX_BLOCKS) {
    477 			DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
    478 			      nblkwritten, ninowritten));
    479 			lfs_segwrite(mntp, SEGM_CLEAN);
    480 			nblkwritten = ninowritten = 0;
    481 		}
    482 	}
    483 
    484 	/*
    485 	 * Finish the old file, if there was one
    486 	 */
    487 	if (v_daddr != LFS_UNUSED_DADDR) {
    488 		lfs_vunref(vp);
    489 		numrefed--;
    490 	}
    491 
    492 #ifdef DIAGNOSTIC
    493 	if (numrefed != 0)
    494 		panic("lfs_markv: numrefed=%d", numrefed);
    495 #endif
    496 	DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
    497 	      nblkwritten, ninowritten));
    498 
    499 	/*
    500 	 * The last write has to be SEGM_SYNC, because of calling semantics.
    501 	 * It also has to be SEGM_CKP, because otherwise we could write
    502 	 * over the newly cleaned data contained in a checkpoint, and then
    503 	 * we'd be unhappy at recovery time.
    504 	 */
    505 	lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
    506 
    507 	lfs_segunlock(fs);
    508 
    509 	vfs_unbusy(mntp, false, NULL);
    510 	if (error)
    511 		return (error);
    512 	else if (do_again)
    513 		return EAGAIN;
    514 
    515 	return 0;
    516 
    517 err2:
    518 	DLOG((DLOG_CLEAN, "lfs_markv err2\n"));
    519 
    520 	/*
    521 	 * XXX we're here because copyin() failed.
    522 	 * XXX it means that we can't trust the cleanerd.  too bad.
    523 	 * XXX how can we recover from this?
    524 	 */
    525 
    526 err3:
    527 	/*
    528 	 * XXX should do segwrite here anyway?
    529 	 */
    530 
    531 	if (v_daddr != LFS_UNUSED_DADDR) {
    532 		lfs_vunref(vp);
    533 		--numrefed;
    534 	}
    535 
    536 	lfs_segunlock(fs);
    537 	vfs_unbusy(mntp, false, NULL);
    538 #ifdef DIAGNOSTIC
    539 	if (numrefed != 0)
    540 		panic("lfs_markv: numrefed=%d", numrefed);
    541 #endif
    542 
    543 	return (error);
    544 }
    545 
    546 /*
    547  * sys_lfs_bmapv:
    548  *
    549  * This will fill in the current disk address for arrays of blocks.
    550  *
    551  *  0 on success
    552  * -1/errno is return on error.
    553  */
    554 #ifdef USE_64BIT_SYSCALLS
    555 int
    556 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
    557 {
    558 	/* {
    559 		syscallarg(fsid_t *) fsidp;
    560 		syscallarg(struct block_info *) blkiov;
    561 		syscallarg(int) blkcnt;
    562 	} */
    563 	BLOCK_INFO *blkiov;
    564 	int blkcnt, error;
    565 	fsid_t fsid;
    566 	struct lfs *fs;
    567 	struct mount *mntp;
    568 
    569 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    570 	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
    571 	if (error)
    572 		return (error);
    573 
    574 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    575 		return (error);
    576 
    577 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    578 		return (ENOENT);
    579 	fs = VFSTOULFS(mntp)->um_lfs;
    580 
    581 	blkcnt = SCARG(uap, blkcnt);
    582 	if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
    583 		return (EINVAL);
    584 	KERNEL_LOCK(1, NULL);
    585 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    586 	if ((error = copyin(SCARG(uap, blkiov), blkiov,
    587 			    blkcnt * sizeof(BLOCK_INFO))) != 0)
    588 		goto out;
    589 
    590 	if ((error = lfs_bmapv(p, &fsid, blkiov, blkcnt)) == 0)
    591 		copyout(blkiov, SCARG(uap, blkiov),
    592 			blkcnt * sizeof(BLOCK_INFO));
    593     out:
    594 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    595 	KERNEL_UNLOCK_ONE(NULL);
    596 	return error;
    597 }
    598 #else
    599 int
    600 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
    601 {
    602 	/* {
    603 		syscallarg(fsid_t *) fsidp;
    604 		syscallarg(struct block_info *) blkiov;
    605 		syscallarg(int) blkcnt;
    606 	} */
    607 	BLOCK_INFO *blkiov;
    608 	BLOCK_INFO_15 *blkiov15;
    609 	int i, blkcnt, error;
    610 	fsid_t fsid;
    611 	struct lfs *fs;
    612 	struct mount *mntp;
    613 
    614 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    615 	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
    616 	if (error)
    617 		return (error);
    618 
    619 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    620 		return (error);
    621 
    622 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    623 		return (ENOENT);
    624 	fs = VFSTOULFS(mntp)->um_lfs;
    625 
    626 	blkcnt = SCARG(uap, blkcnt);
    627 	if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
    628 		return (EINVAL);
    629 	KERNEL_LOCK(1, NULL);
    630 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    631 	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
    632 	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
    633 			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
    634 		goto out;
    635 
    636 	for (i = 0; i < blkcnt; i++) {
    637 		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
    638 		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
    639 		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
    640 		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
    641 		blkiov[i].bi_version   = blkiov15[i].bi_version;
    642 		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
    643 		blkiov[i].bi_size      = blkiov15[i].bi_size;
    644 	}
    645 
    646 	if ((error = lfs_bmapv(l->l_proc, &fsid, blkiov, blkcnt)) == 0) {
    647 		for (i = 0; i < blkcnt; i++) {
    648 			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
    649 			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
    650 			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
    651 			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
    652 			blkiov15[i].bi_version	 = blkiov[i].bi_version;
    653 			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
    654 			blkiov15[i].bi_size	 = blkiov[i].bi_size;
    655 		}
    656 		copyout(blkiov15, SCARG(uap, blkiov),
    657 			blkcnt * sizeof(BLOCK_INFO_15));
    658 	}
    659     out:
    660 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    661 	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
    662 	KERNEL_UNLOCK_ONE(NULL);
    663 	return error;
    664 }
    665 #endif
    666 
    667 int
    668 lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
    669 {
    670 	BLOCK_INFO *blkp;
    671 	IFILE *ifp;
    672 	struct buf *bp;
    673 	struct inode *ip = NULL;
    674 	struct lfs *fs;
    675 	struct mount *mntp;
    676 	struct ulfsmount *ump;
    677 	struct vnode *vp;
    678 	ino_t lastino;
    679 	daddr_t v_daddr;
    680 	int cnt, error;
    681 	int numrefed = 0;
    682 
    683 	lfs_cleaner_pid = p->p_pid;
    684 
    685 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    686 		return (ENOENT);
    687 
    688 	ump = VFSTOULFS(mntp);
    689 	if ((error = vfs_busy(mntp, NULL)) != 0)
    690 		return (error);
    691 
    692 	cnt = blkcnt;
    693 
    694 	fs = VFSTOULFS(mntp)->um_lfs;
    695 
    696 	error = 0;
    697 
    698 	/* these were inside the initialization for the for loop */
    699 	v_daddr = LFS_UNUSED_DADDR;
    700 	lastino = LFS_UNUSED_INUM;
    701 	for (blkp = blkiov; cnt--; ++blkp)
    702 	{
    703 		/*
    704 		 * Get the IFILE entry (only once) and see if the file still
    705 		 * exists.
    706 		 */
    707 		if (lastino != blkp->bi_inode) {
    708 			/*
    709 			 * Finish the old file, if there was one.  The presence
    710 			 * of a usable vnode in vp is signaled by a valid
    711 			 * v_daddr.
    712 			 */
    713 			if (v_daddr != LFS_UNUSED_DADDR) {
    714 				lfs_vunref(vp);
    715 				if (VTOI(vp)->i_lfs_iflags & LFSI_BMAP) {
    716 					mutex_enter(vp->v_interlock);
    717 					if (vget(vp, LK_NOWAIT) == 0) {
    718 						if (! vrecycle(vp))
    719 							vrele(vp);
    720 					}
    721 				}
    722 				numrefed--;
    723 			}
    724 
    725 			/*
    726 			 * Start a new file
    727 			 */
    728 			lastino = blkp->bi_inode;
    729 			if (blkp->bi_inode == LFS_IFILE_INUM)
    730 				v_daddr = fs->lfs_idaddr;
    731 			else {
    732 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    733 				v_daddr = ifp->if_daddr;
    734 				brelse(bp, 0);
    735 			}
    736 			if (v_daddr == LFS_UNUSED_DADDR) {
    737 				blkp->bi_daddr = LFS_UNUSED_DADDR;
    738 				continue;
    739 			}
    740 			/*
    741 			 * A regular call to VFS_VGET could deadlock
    742 			 * here.  Instead, we try an unlocked access.
    743 			 */
    744 			mutex_enter(&ulfs_ihash_lock);
    745 			vp = ulfs_ihashlookup(ump->um_dev, blkp->bi_inode);
    746 			if (vp != NULL)
    747 				mutex_enter(vp->v_interlock);
    748 			if (vp != NULL && vdead_check(vp, VDEAD_NOWAIT) == 0) {
    749 				ip = VTOI(vp);
    750 				mutex_exit(&ulfs_ihash_lock);
    751 				if (lfs_vref(vp)) {
    752 					v_daddr = LFS_UNUSED_DADDR;
    753 					continue;
    754 				}
    755 				numrefed++;
    756 			} else {
    757 				if (vp != NULL)
    758 					mutex_exit(vp->v_interlock);
    759 				mutex_exit(&ulfs_ihash_lock);
    760 				/*
    761 				 * Don't VFS_VGET if we're being unmounted,
    762 				 * since we hold vfs_busy().
    763 				 */
    764 				if (mntp->mnt_iflag & IMNT_UNMOUNT) {
    765 					v_daddr = LFS_UNUSED_DADDR;
    766 					continue;
    767 				}
    768 				error = VFS_VGET(mntp, blkp->bi_inode, &vp);
    769 				if (error) {
    770 					DLOG((DLOG_CLEAN, "lfs_bmapv: vget ino"
    771 					      "%d failed with %d",
    772 					      blkp->bi_inode,error));
    773 					v_daddr = LFS_UNUSED_DADDR;
    774 					continue;
    775 				} else {
    776 					KASSERT(VOP_ISLOCKED(vp));
    777 					VTOI(vp)->i_lfs_iflags |= LFSI_BMAP;
    778 					VOP_UNLOCK(vp);
    779 					numrefed++;
    780 				}
    781 			}
    782 			ip = VTOI(vp);
    783 		} else if (v_daddr == LFS_UNUSED_DADDR) {
    784 			/*
    785 			 * This can only happen if the vnode is dead.
    786 			 * Keep going.	Note that we DO NOT set the
    787 			 * bi_addr to anything -- if we failed to get
    788 			 * the vnode, for example, we want to assume
    789 			 * conservatively that all of its blocks *are*
    790 			 * located in the segment in question.
    791 			 * lfs_markv will throw them out if we are
    792 			 * wrong.
    793 			 */
    794 			/* blkp->bi_daddr = LFS_UNUSED_DADDR; */
    795 			continue;
    796 		}
    797 
    798 		/* Past this point we are guaranteed that vp, ip are valid. */
    799 
    800 		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
    801 			/*
    802 			 * We just want the inode address, which is
    803 			 * conveniently in v_daddr.
    804 			 */
    805 			blkp->bi_daddr = v_daddr;
    806 		} else {
    807 			daddr_t bi_daddr;
    808 
    809 			/* XXX ondisk32 */
    810 			error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
    811 					 &bi_daddr, NULL);
    812 			if (error)
    813 			{
    814 				blkp->bi_daddr = LFS_UNUSED_DADDR;
    815 				continue;
    816 			}
    817 			blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
    818 			/* Fill in the block size, too */
    819 			if (blkp->bi_lbn >= 0)
    820 				blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
    821 			else
    822 				blkp->bi_size = fs->lfs_bsize;
    823 		}
    824 	}
    825 
    826 	/*
    827 	 * Finish the old file, if there was one.  The presence
    828 	 * of a usable vnode in vp is signaled by a valid v_daddr.
    829 	 */
    830 	if (v_daddr != LFS_UNUSED_DADDR) {
    831 		lfs_vunref(vp);
    832 		/* Recycle as above. */
    833 		if (ip->i_lfs_iflags & LFSI_BMAP) {
    834 			mutex_enter(vp->v_interlock);
    835 			if (vget(vp, LK_NOWAIT) == 0) {
    836 				if (! vrecycle(vp))
    837 					vrele(vp);
    838 			}
    839 		}
    840 		numrefed--;
    841 	}
    842 
    843 #ifdef DIAGNOSTIC
    844 	if (numrefed != 0)
    845 		panic("lfs_bmapv: numrefed=%d", numrefed);
    846 #endif
    847 
    848 	vfs_unbusy(mntp, false, NULL);
    849 
    850 	return 0;
    851 }
    852 
    853 /*
    854  * sys_lfs_segclean:
    855  *
    856  * Mark the segment clean.
    857  *
    858  *  0 on success
    859  * -1/errno is return on error.
    860  */
    861 int
    862 sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
    863 {
    864 	/* {
    865 		syscallarg(fsid_t *) fsidp;
    866 		syscallarg(u_long) segment;
    867 	} */
    868 	struct lfs *fs;
    869 	struct mount *mntp;
    870 	fsid_t fsid;
    871 	int error;
    872 	unsigned long segnum;
    873 
    874 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    875 	    KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
    876 	if (error)
    877 		return (error);
    878 
    879 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    880 		return (error);
    881 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    882 		return (ENOENT);
    883 
    884 	fs = VFSTOULFS(mntp)->um_lfs;
    885 	segnum = SCARG(uap, segment);
    886 
    887 	if ((error = vfs_busy(mntp, NULL)) != 0)
    888 		return (error);
    889 
    890 	KERNEL_LOCK(1, NULL);
    891 	lfs_seglock(fs, SEGM_PROT);
    892 	error = lfs_do_segclean(fs, segnum);
    893 	lfs_segunlock(fs);
    894 	KERNEL_UNLOCK_ONE(NULL);
    895 	vfs_unbusy(mntp, false, NULL);
    896 	return error;
    897 }
    898 
    899 /*
    900  * Actually mark the segment clean.
    901  * Must be called with the segment lock held.
    902  */
    903 int
    904 lfs_do_segclean(struct lfs *fs, unsigned long segnum)
    905 {
    906 	extern int lfs_dostats;
    907 	struct buf *bp;
    908 	CLEANERINFO *cip;
    909 	SEGUSE *sup;
    910 
    911 	if (lfs_dtosn(fs, fs->lfs_curseg) == segnum) {
    912 		return (EBUSY);
    913 	}
    914 
    915 	LFS_SEGENTRY(sup, fs, segnum, bp);
    916 	if (sup->su_nbytes) {
    917 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    918 		      " %d live bytes\n", segnum, sup->su_nbytes));
    919 		brelse(bp, 0);
    920 		return (EBUSY);
    921 	}
    922 	if (sup->su_flags & SEGUSE_ACTIVE) {
    923 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    924 		      " segment is active\n", segnum));
    925 		brelse(bp, 0);
    926 		return (EBUSY);
    927 	}
    928 	if (!(sup->su_flags & SEGUSE_DIRTY)) {
    929 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    930 		      " segment is already clean\n", segnum));
    931 		brelse(bp, 0);
    932 		return (EALREADY);
    933 	}
    934 
    935 	fs->lfs_avail += lfs_segtod(fs, 1);
    936 	if (sup->su_flags & SEGUSE_SUPERBLOCK)
    937 		fs->lfs_avail -= lfs_btofsb(fs, LFS_SBPAD);
    938 	if (fs->lfs_version > 1 && segnum == 0 &&
    939 	    fs->lfs_start < lfs_btofsb(fs, LFS_LABELPAD))
    940 		fs->lfs_avail -= lfs_btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
    941 	mutex_enter(&lfs_lock);
    942 	fs->lfs_bfree += sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) +
    943 		lfs_btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
    944 	fs->lfs_dmeta -= sup->su_nsums * lfs_btofsb(fs, fs->lfs_sumsize) +
    945 		lfs_btofsb(fs, sup->su_ninos * fs->lfs_ibsize);
    946 	if (fs->lfs_dmeta < 0)
    947 		fs->lfs_dmeta = 0;
    948 	mutex_exit(&lfs_lock);
    949 	sup->su_flags &= ~SEGUSE_DIRTY;
    950 	LFS_WRITESEGENTRY(sup, fs, segnum, bp);
    951 
    952 	LFS_CLEANERINFO(cip, fs, bp);
    953 	++cip->clean;
    954 	--cip->dirty;
    955 	fs->lfs_nclean = cip->clean;
    956 	cip->bfree = fs->lfs_bfree;
    957 	mutex_enter(&lfs_lock);
    958 	cip->avail = fs->lfs_avail - fs->lfs_ravail - fs->lfs_favail;
    959 	wakeup(&fs->lfs_avail);
    960 	mutex_exit(&lfs_lock);
    961 	(void) LFS_BWRITE_LOG(bp);
    962 
    963 	if (lfs_dostats)
    964 		++lfs_stats.segs_reclaimed;
    965 
    966 	return (0);
    967 }
    968 
    969 /*
    970  * This will block until a segment in file system fsid is written.  A timeout
    971  * in milliseconds may be specified which will awake the cleaner automatically.
    972  * An fsid of -1 means any file system, and a timeout of 0 means forever.
    973  */
    974 int
    975 lfs_segwait(fsid_t *fsidp, struct timeval *tv)
    976 {
    977 	struct mount *mntp;
    978 	void *addr;
    979 	u_long timeout;
    980 	int error;
    981 
    982 	KERNEL_LOCK(1, NULL);
    983 	if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL)
    984 		addr = &lfs_allclean_wakeup;
    985 	else
    986 		addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextseg;
    987 	/*
    988 	 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
    989 	 * XXX IS THAT WHAT IS INTENDED?
    990 	 */
    991 	timeout = tvtohz(tv);
    992 	error = tsleep(addr, PCATCH | PVFS, "segment", timeout);
    993 	KERNEL_UNLOCK_ONE(NULL);
    994 	return (error == ERESTART ? EINTR : 0);
    995 }
    996 
    997 /*
    998  * sys_lfs_segwait:
    999  *
   1000  * System call wrapper around lfs_segwait().
   1001  *
   1002  *  0 on success
   1003  *  1 on timeout
   1004  * -1/errno is return on error.
   1005  */
   1006 int
   1007 sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
   1008     register_t *retval)
   1009 {
   1010 	/* {
   1011 		syscallarg(fsid_t *) fsidp;
   1012 		syscallarg(struct timeval *) tv;
   1013 	} */
   1014 	struct timeval atv;
   1015 	fsid_t fsid;
   1016 	int error;
   1017 
   1018 	/* XXX need we be su to segwait? */
   1019 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
   1020 	    KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL);
   1021 	if (error)
   1022 		return (error);
   1023 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
   1024 		return (error);
   1025 
   1026 	if (SCARG(uap, tv)) {
   1027 		error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
   1028 		if (error)
   1029 			return (error);
   1030 		if (itimerfix(&atv))
   1031 			return (EINVAL);
   1032 	} else /* NULL or invalid */
   1033 		atv.tv_sec = atv.tv_usec = 0;
   1034 	return lfs_segwait(&fsid, &atv);
   1035 }
   1036 
   1037 /*
   1038  * VFS_VGET call specialized for the cleaner.  The cleaner already knows the
   1039  * daddr from the ifile, so don't look it up again.  If the cleaner is
   1040  * processing IINFO structures, it may have the ondisk inode already, so
   1041  * don't go retrieving it again.
   1042  *
   1043  * we lfs_vref, and it is the caller's responsibility to lfs_vunref
   1044  * when finished.
   1045  */
   1046 
   1047 int
   1048 lfs_fasthashget(dev_t dev, ino_t ino, struct vnode **vpp)
   1049 {
   1050 	struct vnode *vp;
   1051 
   1052 	mutex_enter(&ulfs_ihash_lock);
   1053 	if ((vp = ulfs_ihashlookup(dev, ino)) != NULL) {
   1054 		mutex_enter(vp->v_interlock);
   1055 		mutex_exit(&ulfs_ihash_lock);
   1056 		if (vdead_check(vp, VDEAD_NOWAIT) != 0) {
   1057 			DLOG((DLOG_CLEAN, "lfs_fastvget: ino %d dead\n",
   1058 			      ino));
   1059 			lfs_stats.clean_vnlocked++;
   1060 			mutex_exit(vp->v_interlock);
   1061 			return EAGAIN;
   1062 		}
   1063 		if (lfs_vref(vp)) {
   1064 			DLOG((DLOG_CLEAN, "lfs_fastvget: lfs_vref failed"
   1065 			      " for ino %d\n", ino));
   1066 			lfs_stats.clean_inlocked++;
   1067 			return EAGAIN;
   1068 		}
   1069 	} else {
   1070 		mutex_exit(&ulfs_ihash_lock);
   1071 	}
   1072 	*vpp = vp;
   1073 
   1074 	return (0);
   1075 }
   1076 
   1077 int
   1078 lfs_fastvget(struct mount *mp, ino_t ino, daddr_t daddr, struct vnode **vpp,
   1079 	     struct ulfs1_dinode *dinp)
   1080 {
   1081 	struct inode *ip;
   1082 	struct ulfs1_dinode *dip;
   1083 	struct vnode *vp;
   1084 	struct ulfsmount *ump;
   1085 	dev_t dev;
   1086 	int error, retries;
   1087 	struct buf *bp;
   1088 	struct lfs *fs;
   1089 
   1090 	ump = VFSTOULFS(mp);
   1091 	dev = ump->um_dev;
   1092 	fs = ump->um_lfs;
   1093 
   1094 	/*
   1095 	 * Wait until the filesystem is fully mounted before allowing vget
   1096 	 * to complete.	 This prevents possible problems with roll-forward.
   1097 	 */
   1098 	mutex_enter(&lfs_lock);
   1099 	while (fs->lfs_flags & LFS_NOTYET) {
   1100 		mtsleep(&fs->lfs_flags, PRIBIO+1, "lfs_fnotyet", 0,
   1101 			&lfs_lock);
   1102 	}
   1103 	mutex_exit(&lfs_lock);
   1104 
   1105 	/*
   1106 	 * This is playing fast and loose.  Someone may have the inode
   1107 	 * locked, in which case they are going to be distinctly unhappy
   1108 	 * if we trash something.
   1109 	 */
   1110 
   1111 	error = lfs_fasthashget(dev, ino, vpp);
   1112 	if (error != 0 || *vpp != NULL)
   1113 		return (error);
   1114 
   1115 	/*
   1116 	 * getnewvnode(9) will call vfs_busy, which will block if the
   1117 	 * filesystem is being unmounted; but umount(9) is waiting for
   1118 	 * us because we're already holding the fs busy.
   1119 	 * XXXMP
   1120 	 */
   1121 	if (mp->mnt_iflag & IMNT_UNMOUNT) {
   1122 		*vpp = NULL;
   1123 		return EDEADLK;
   1124 	}
   1125 	error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, NULL, &vp);
   1126 	if (error) {
   1127 		*vpp = NULL;
   1128 		return (error);
   1129 	}
   1130 
   1131 	mutex_enter(&ulfs_hashlock);
   1132 	error = lfs_fasthashget(dev, ino, vpp);
   1133 	if (error != 0 || *vpp != NULL) {
   1134 		mutex_exit(&ulfs_hashlock);
   1135 		ungetnewvnode(vp);
   1136 		return (error);
   1137 	}
   1138 
   1139 	/* Allocate new vnode/inode. */
   1140 	lfs_vcreate(mp, ino, vp);
   1141 
   1142 	/*
   1143 	 * Put it onto its hash chain and lock it so that other requests for
   1144 	 * this inode will block if they arrive while we are sleeping waiting
   1145 	 * for old data structures to be purged or for the contents of the
   1146 	 * disk portion of this inode to be read.
   1147 	 */
   1148 	ip = VTOI(vp);
   1149 	ulfs_ihashins(ip);
   1150 	mutex_exit(&ulfs_hashlock);
   1151 
   1152 #ifdef notyet
   1153 	/* Not found in the cache => this vnode was loaded only for cleaning. */
   1154 	ip->i_lfs_iflags |= LFSI_BMAP;
   1155 #endif
   1156 
   1157 	/*
   1158 	 * XXX
   1159 	 * This may not need to be here, logically it should go down with
   1160 	 * the i_devvp initialization.
   1161 	 * Ask Kirk.
   1162 	 */
   1163 	ip->i_lfs = fs;
   1164 
   1165 	/* Read in the disk contents for the inode, copy into the inode. */
   1166 	if (dinp) {
   1167 		error = copyin(dinp, ip->i_din.ffs1_din, sizeof (struct ulfs1_dinode));
   1168 		if (error) {
   1169 			DLOG((DLOG_CLEAN, "lfs_fastvget: dinode copyin failed"
   1170 			      " for ino %d\n", ino));
   1171 			ulfs_ihashrem(ip);
   1172 
   1173 			/* Unlock and discard unneeded inode. */
   1174 			VOP_UNLOCK(vp);
   1175 			lfs_vunref(vp);
   1176 			*vpp = NULL;
   1177 			return (error);
   1178 		}
   1179 		if (ip->i_number != ino)
   1180 			panic("lfs_fastvget: I was fed the wrong inode!");
   1181 	} else {
   1182 		retries = 0;
   1183 	    again:
   1184 		error = bread(ump->um_devvp, LFS_FSBTODB(fs, daddr), fs->lfs_ibsize,
   1185 			      0, &bp);
   1186 		if (error) {
   1187 			DLOG((DLOG_CLEAN, "lfs_fastvget: bread failed (%d)\n",
   1188 			      error));
   1189 			/*
   1190 			 * The inode does not contain anything useful, so it
   1191 			 * would be misleading to leave it on its hash chain.
   1192 			 * Iput() will return it to the free list.
   1193 			 */
   1194 			ulfs_ihashrem(ip);
   1195 
   1196 			/* Unlock and discard unneeded inode. */
   1197 			VOP_UNLOCK(vp);
   1198 			lfs_vunref(vp);
   1199 			*vpp = NULL;
   1200 			return (error);
   1201 		}
   1202 		dip = lfs_ifind(ump->um_lfs, ino, bp);
   1203 		if (dip == NULL) {
   1204 			/* Assume write has not completed yet; try again */
   1205 			brelse(bp, BC_INVAL);
   1206 			++retries;
   1207 			if (retries > LFS_IFIND_RETRIES)
   1208 				panic("lfs_fastvget: dinode not found");
   1209 			DLOG((DLOG_CLEAN, "lfs_fastvget: dinode not found,"
   1210 			      " retrying...\n"));
   1211 			goto again;
   1212 		}
   1213 		*ip->i_din.ffs1_din = *dip;
   1214 		brelse(bp, 0);
   1215 	}
   1216 	lfs_vinit(mp, &vp);
   1217 
   1218 	*vpp = vp;
   1219 
   1220 	KASSERT(VOP_ISLOCKED(vp));
   1221 	VOP_UNLOCK(vp);
   1222 
   1223 	return (0);
   1224 }
   1225 
   1226 /*
   1227  * Make up a "fake" cleaner buffer, copy the data from userland into it.
   1228  */
   1229 struct buf *
   1230 lfs_fakebuf(struct lfs *fs, struct vnode *vp, int lbn, size_t size, void *uaddr)
   1231 {
   1232 	struct buf *bp;
   1233 	int error;
   1234 
   1235 	KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM);
   1236 
   1237 	bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN);
   1238 	error = copyin(uaddr, bp->b_data, size);
   1239 	if (error) {
   1240 		lfs_freebuf(fs, bp);
   1241 		return NULL;
   1242 	}
   1243 	KDASSERT(bp->b_iodone == lfs_callback);
   1244 
   1245 #if 0
   1246 	mutex_enter(&lfs_lock);
   1247 	++fs->lfs_iocount;
   1248 	mutex_exit(&lfs_lock);
   1249 #endif
   1250 	bp->b_bufsize = size;
   1251 	bp->b_bcount = size;
   1252 	return (bp);
   1253 }
   1254