Home | History | Annotate | Line # | Download | only in lfs
      1 /*	$NetBSD: lfs_syscalls.c,v 1.177 2025/10/20 04:20:37 perseant Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003, 2007, 2007, 2008
      5  *    The NetBSD Foundation, Inc.
      6  * All rights reserved.
      7  *
      8  * This code is derived from software contributed to The NetBSD Foundation
      9  * by Konrad E. Schroder <perseant (at) hhhh.org>.
     10  *
     11  * Redistribution and use in source and binary forms, with or without
     12  * modification, are permitted provided that the following conditions
     13  * are met:
     14  * 1. Redistributions of source code must retain the above copyright
     15  *    notice, this list of conditions and the following disclaimer.
     16  * 2. Redistributions in binary form must reproduce the above copyright
     17  *    notice, this list of conditions and the following disclaimer in the
     18  *    documentation and/or other materials provided with the distribution.
     19  *
     20  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     22  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     23  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     24  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     25  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     26  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     27  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     28  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     29  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     30  * POSSIBILITY OF SUCH DAMAGE.
     31  */
     32 /*-
     33  * Copyright (c) 1991, 1993, 1994
     34  *	The Regents of the University of California.  All rights reserved.
     35  *
     36  * Redistribution and use in source and binary forms, with or without
     37  * modification, are permitted provided that the following conditions
     38  * are met:
     39  * 1. Redistributions of source code must retain the above copyright
     40  *    notice, this list of conditions and the following disclaimer.
     41  * 2. Redistributions in binary form must reproduce the above copyright
     42  *    notice, this list of conditions and the following disclaimer in the
     43  *    documentation and/or other materials provided with the distribution.
     44  * 3. Neither the name of the University nor the names of its contributors
     45  *    may be used to endorse or promote products derived from this software
     46  *    without specific prior written permission.
     47  *
     48  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     49  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     50  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     51  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     52  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     53  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     54  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     55  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     56  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     57  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     58  * SUCH DAMAGE.
     59  *
     60  *	@(#)lfs_syscalls.c	8.10 (Berkeley) 5/14/95
     61  */
     62 
     63 #include <sys/cdefs.h>
     64 __KERNEL_RCSID(0, "$NetBSD: lfs_syscalls.c,v 1.177 2025/10/20 04:20:37 perseant Exp $");
     65 
     66 #ifndef LFS
     67 # define LFS		/* for prototypes in syscallargs.h */
     68 #endif
     69 
     70 #include <sys/param.h>
     71 #include <sys/systm.h>
     72 #include <sys/proc.h>
     73 #include <sys/buf.h>
     74 #include <sys/mount.h>
     75 #include <sys/vnode.h>
     76 #include <sys/kernel.h>
     77 #include <sys/kauth.h>
     78 #include <sys/syscallargs.h>
     79 
     80 #include <ufs/lfs/ulfs_inode.h>
     81 #include <ufs/lfs/ulfsmount.h>
     82 #include <ufs/lfs/ulfs_extern.h>
     83 
     84 #include <ufs/lfs/lfs.h>
     85 #include <ufs/lfs/lfs_accessors.h>
     86 #include <ufs/lfs/lfs_kernel.h>
     87 #include <ufs/lfs/lfs_extern.h>
     88 
     89 static int lfs_fastvget(struct mount *, ino_t, BLOCK_INFO *, int,
     90     struct vnode **);
     91 static struct buf *lfs_fakebuf(struct lfs *, struct vnode *, daddr_t,
     92     size_t, void *);
     93 
     94 /*
     95  * sys_lfs_markv:
     96  *
     97  * This will mark inodes and blocks dirty, so they are written into the log.
     98  * It will block until all the blocks have been written.  The segment create
     99  * time passed in the block_info and inode_info structures is used to decide
    100  * if the data is valid for each block (in case some process dirtied a block
    101  * or inode that is being cleaned between the determination that a block is
    102  * live and the lfs_markv call).
    103  *
    104  *  0 on success
    105  * -1/errno is return on error.
    106  */
    107 #ifdef USE_64BIT_SYSCALLS
    108 int
    109 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
    110 {
    111 	/* {
    112 		syscallarg(fsid_t *) fsidp;
    113 		syscallarg(struct block_info *) blkiov;
    114 		syscallarg(int) blkcnt;
    115 	} */
    116 	BLOCK_INFO *blkiov;
    117 	int blkcnt, error;
    118 	fsid_t fsid;
    119 	struct lfs *fs;
    120 	struct mount *mntp;
    121 
    122 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    123 		return (error);
    124 
    125 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    126 		return (ENOENT);
    127 	fs = VFSTOULFS(mntp)->um_lfs;
    128 
    129 	blkcnt = SCARG(uap, blkcnt);
    130 	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
    131 		return (EINVAL);
    132 
    133 	KERNEL_LOCK(1, NULL);
    134 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    135 	if ((error = copyin(SCARG(uap, blkiov), blkiov,
    136 			    blkcnt * sizeof(BLOCK_INFO))) != 0)
    137 		goto out;
    138 
    139 	if ((error = lfs_markv(l, &fsid, blkiov, blkcnt)) == 0)
    140 		copyout(blkiov, SCARG(uap, blkiov),
    141 			blkcnt * sizeof(BLOCK_INFO));
    142     out:
    143 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    144 	KERNEL_UNLOCK_ONE(NULL);
    145 	return error;
    146 }
    147 #else
    148 int
    149 sys_lfs_markv(struct lwp *l, const struct sys_lfs_markv_args *uap, register_t *retval)
    150 {
    151 	/* {
    152 		syscallarg(fsid_t *) fsidp;
    153 		syscallarg(struct block_info *) blkiov;
    154 		syscallarg(int) blkcnt;
    155 	} */
    156 	BLOCK_INFO *blkiov;
    157 	BLOCK_INFO_15 *blkiov15;
    158 	int i, blkcnt, error;
    159 	fsid_t fsid;
    160 	struct lfs *fs;
    161 	struct mount *mntp;
    162 
    163 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    164 		return (error);
    165 
    166 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    167 		return (ENOENT);
    168 	fs = VFSTOULFS(mntp)->um_lfs;
    169 
    170 	blkcnt = SCARG(uap, blkcnt);
    171 	if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
    172 		return (EINVAL);
    173 
    174 	KERNEL_LOCK(1, NULL);
    175 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    176 	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
    177 	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
    178 			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
    179 		goto out;
    180 
    181 	for (i = 0; i < blkcnt; i++) {
    182 		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
    183 		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
    184 		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
    185 		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
    186 		blkiov[i].bi_version   = blkiov15[i].bi_version;
    187 		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
    188 		blkiov[i].bi_size      = blkiov15[i].bi_size;
    189 	}
    190 
    191 	if ((error = lfs_markv(l, &fsid, blkiov, blkcnt)) == 0) {
    192 		for (i = 0; i < blkcnt; i++) {
    193 			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
    194 			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
    195 			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
    196 			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
    197 			blkiov15[i].bi_version	 = blkiov[i].bi_version;
    198 			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
    199 			blkiov15[i].bi_size	 = blkiov[i].bi_size;
    200 		}
    201 		copyout(blkiov15, SCARG(uap, blkiov),
    202 			blkcnt * sizeof(BLOCK_INFO_15));
    203 	}
    204     out:
    205 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    206 	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
    207 	KERNEL_UNLOCK_ONE(NULL);
    208 	return error;
    209 }
    210 #endif
    211 
    212 #define	LFS_MARKV_MAX_BLOCKS	(LFS_MAX_BUFS)
    213 
    214 int
    215 lfs_markv(struct lwp *l, fsid_t *fsidp, BLOCK_INFO *blkiov,
    216     int blkcnt)
    217 {
    218 	BLOCK_INFO *blkp;
    219 	IFILE *ifp;
    220 	struct buf *bp;
    221 	struct inode *ip = NULL;
    222 	struct lfs *fs;
    223 	struct mount *mntp;
    224 	struct ulfsmount *ump;
    225 	struct vnode *vp;
    226 	ino_t lastino;
    227 	daddr_t b_daddr;
    228 	int cnt, error;
    229 	int do_again = 0;
    230 	int numrefed = 0;
    231 	ino_t maxino;
    232 	size_t obsize;
    233 
    234 	/* number of blocks/inodes that we have already bwrite'ed */
    235 	int nblkwritten, ninowritten;
    236 
    237 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    238 	    KAUTH_REQ_SYSTEM_LFS_MARKV, NULL, NULL, NULL);
    239 
    240 	if (error)
    241 		return (error);
    242 
    243 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    244 		return (ENOENT);
    245 
    246 	ump = VFSTOULFS(mntp);
    247 	fs = ump->um_lfs;
    248 
    249 	if (fs->lfs_ronly)
    250 		return EROFS;
    251 
    252 	maxino = (lfs_fragstoblks(fs, lfs_dino_getblocks(fs, VTOI(fs->lfs_ivnode)->i_din)) -
    253 		      lfs_sb_getcleansz(fs) - lfs_sb_getsegtabsz(fs)) * lfs_sb_getifpb(fs);
    254 
    255 	if ((error = vfs_busy(mntp)) != 0)
    256 		return (error);
    257 
    258 	/*
    259 	 * Reference all the vnodes we will need before we lock.
    260 	 * This prevents a reclaimed vnode from being written
    261 	 * in the same partial segment with cleaning blocks.
    262 	 */
    263 	lfs_cleanerlock(fs);
    264 	lastino = LFS_UNUSED_INUM;
    265 	for (cnt = blkcnt, blkp = blkiov; cnt--; ++blkp) {
    266 		/* Bounds-check incoming data, avoid panic for failed VGET */
    267 		if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
    268 			error = EINVAL;
    269 			goto err4;
    270 		}
    271 
    272 		if (lastino != blkp->bi_inode) {
    273 			/* Load the vnode and add a cleaning reference */
    274 			error = lfs_fastvget(mntp, blkp->bi_inode, blkp,
    275 					     LK_EXCLUSIVE | LK_NOWAIT, &vp);
    276 			lfs_setclean(fs, vp);
    277 			vput(vp);
    278 			vp = NULL;
    279 
    280 			lastino = blkp->bi_inode;
    281 		}
    282 	}
    283 
    284 	/*
    285 	 * This seglock is just to prevent the fact that we might have to sleep
    286 	 * from allowing the possibility that our blocks might become
    287 	 * invalid.
    288 	 *
    289 	 * It is also important to note here that unless we specify SEGM_CKP,
    290 	 * any Ifile blocks that we might be asked to clean will never get
    291 	 * to the disk.
    292 	 */
    293 	lfs_seglock(fs, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
    294 
    295 	/* Mark blocks/inodes dirty.  */
    296 	error = 0;
    297 
    298 	/* these were inside the initialization for the for loop */
    299 	vp = NULL;
    300 	lastino = LFS_UNUSED_INUM;
    301 	nblkwritten = ninowritten = 0;
    302 	cnt = blkcnt;
    303 	for (blkp = blkiov; cnt--; ++blkp)
    304 	{
    305 		/* Bounds-check incoming data, avoid panic for failed VGET */
    306 		if (blkp->bi_inode <= 0 || blkp->bi_inode >= maxino) {
    307 			error = EINVAL;
    308 			goto err3;
    309 		}
    310 		/*
    311 		 * Get the IFILE entry (only once) and see if the file still
    312 		 * exists.
    313 		 */
    314 		if (lastino != blkp->bi_inode) {
    315 			/*
    316 			 * Finish the old file, if there was one.
    317 			 */
    318 			if (vp != NULL) {
    319 				vput(vp);
    320 				vp = NULL;
    321 				numrefed--;
    322 			}
    323 
    324 			/*
    325 			 * Start a new file
    326 			 */
    327 			lastino = blkp->bi_inode;
    328 
    329 			/* Get the vnode/inode. */
    330 			error = lfs_fastvget(mntp, blkp->bi_inode, blkp,
    331 			    LK_EXCLUSIVE | LK_NOWAIT, &vp);
    332 			if (error) {
    333 				DLOG((DLOG_CLEAN, "lfs_markv: lfs_fastvget"
    334 				      " failed with %d (ino %d, segment %d)\n",
    335 				      error, blkp->bi_inode,
    336 				      lfs_dtosn(fs, blkp->bi_daddr)));
    337 				/*
    338 				 * If we got EAGAIN, that means that the
    339 				 * Inode was locked.  This is
    340 				 * recoverable: just clean the rest of
    341 				 * this segment, and let the cleaner try
    342 				 * again with another.	(When the
    343 				 * cleaner runs again, this segment will
    344 				 * sort high on the list, since it is
    345 				 * now almost entirely empty.)
    346 				 */
    347 				if (error == EAGAIN) {
    348 					error = 0;
    349 					do_again++;
    350 				} else
    351 					KASSERT(error == ENOENT);
    352 				KASSERT(vp == NULL);
    353 				ip = NULL;
    354 				continue;
    355 			}
    356 
    357 			ip = VTOI(vp);
    358 			numrefed++;
    359 			ninowritten++;
    360 		} else if (vp == NULL) {
    361 			/*
    362 			 * This can only happen if the vnode is dead (or
    363 			 * in any case we can't get it...e.g., it is
    364 			 * inlocked).  Keep going.
    365 			 */
    366 			continue;
    367 		}
    368 
    369 		/* Past this point we are guaranteed that vp, ip are valid. */
    370 
    371 		/* Can't clean VU_DIROP directories in case of truncation */
    372 		/* XXX - maybe we should mark removed dirs specially? */
    373 		if (vp->v_type == VDIR && (vp->v_uflag & VU_DIROP)) {
    374 			do_again++;
    375 			continue;
    376 		}
    377 
    378 		/* If this BLOCK_INFO didn't contain a block, keep going. */
    379 		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
    380 			/* XXX need to make sure that the inode gets written in this case */
    381 			/* XXX but only write the inode if it's the right one */
    382 			if (blkp->bi_inode != LFS_IFILE_INUM) {
    383 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    384 				if (lfs_if_getdaddr(fs, ifp) == blkp->bi_daddr) {
    385 					mutex_enter(&lfs_lock);
    386 					LFS_SET_UINO(ip, IN_CLEANING);
    387 					mutex_exit(&lfs_lock);
    388 				}
    389 				brelse(bp, 0);
    390 			}
    391 			continue;
    392 		}
    393 
    394 		b_daddr = 0;
    395 		if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
    396 		    LFS_DBTOFSB(fs, b_daddr) != blkp->bi_daddr)
    397 		{
    398 			if (lfs_dtosn(fs, LFS_DBTOFSB(fs, b_daddr)) ==
    399 			    lfs_dtosn(fs, blkp->bi_daddr))
    400 			{
    401 				DLOG((DLOG_CLEAN, "lfs_markv: wrong da same seg: %jx vs %jx\n",
    402 				      (intmax_t)blkp->bi_daddr, (intmax_t)LFS_DBTOFSB(fs, b_daddr)));
    403 			}
    404 			do_again++;
    405 			continue;
    406 		}
    407 
    408 		/*
    409 		 * Check block sizes.  The blocks being cleaned come from
    410 		 * disk, so they should have the same size as their on-disk
    411 		 * counterparts.
    412 		 */
    413 		if (blkp->bi_lbn >= 0)
    414 			obsize = lfs_blksize(fs, ip, blkp->bi_lbn);
    415 		else
    416 			obsize = lfs_sb_getbsize(fs);
    417 		/* Check for fragment size change */
    418 		if (blkp->bi_lbn >= 0 && blkp->bi_lbn < ULFS_NDADDR) {
    419 			obsize = ip->i_lfs_fragsize[blkp->bi_lbn];
    420 		}
    421 		if (obsize != blkp->bi_size) {
    422 			DLOG((DLOG_CLEAN, "lfs_markv: ino %d lbn %jd wrong"
    423 			      " size (%ld != %d), try again\n",
    424 			      blkp->bi_inode, (intmax_t)blkp->bi_lbn,
    425 			      (long) obsize, blkp->bi_size));
    426 			do_again++;
    427 			continue;
    428 		}
    429 
    430 		/*
    431 		 * If we get to here, then we are keeping the block.  If
    432 		 * it is an indirect block, we want to actually put it
    433 		 * in the buffer cache so that it can be updated in the
    434 		 * finish_meta section.	 If it's not, we need to
    435 		 * allocate a fake buffer so that writeseg can perform
    436 		 * the copyin and write the buffer.
    437 		 */
    438 		if (ip->i_number != LFS_IFILE_INUM && blkp->bi_lbn >= 0) {
    439 			/* Data Block */
    440 			bp = lfs_fakebuf(fs, vp, blkp->bi_lbn,
    441 					 blkp->bi_size, blkp->bi_bp);
    442 			/* Pretend we used bread() to get it */
    443 			bp->b_blkno = LFS_FSBTODB(fs, blkp->bi_daddr);
    444 		} else {
    445 			/* Indirect block or ifile */
    446 			if (blkp->bi_size != lfs_sb_getbsize(fs) &&
    447 			    ip->i_number != LFS_IFILE_INUM)
    448 				panic("lfs_markv: partial indirect block?"
    449 				    " size=%d\n", blkp->bi_size);
    450 			bp = getblk(vp, blkp->bi_lbn, blkp->bi_size, 0, 0);
    451 			if (!(bp->b_oflags & (BO_DONE|BO_DELWRI))) {
    452 				/*
    453 				 * The block in question was not found
    454 				 * in the cache; i.e., the block that
    455 				 * getblk() returned is empty.	So, we
    456 				 * can (and should) copy in the
    457 				 * contents, because we've already
    458 				 * determined that this was the right
    459 				 * version of this block on disk.
    460 				 *
    461 				 * And, it can't have changed underneath
    462 				 * us, because we have the segment lock.
    463 				 */
    464 				error = copyin(blkp->bi_bp, bp->b_data, blkp->bi_size);
    465 				if (error)
    466 					goto err2;
    467 			}
    468 		}
    469 		if ((error = lfs_bwrite_ext(bp, BW_CLEAN)) != 0)
    470 			goto err2;
    471 
    472 		nblkwritten++;
    473 		/*
    474 		 * XXX should account indirect blocks and ifile pages as well
    475 		 */
    476 		if (nblkwritten + lfs_lblkno(fs, ninowritten * DINOSIZE(fs))
    477 		    > LFS_MARKV_MAX_BLOCKS) {
    478 			DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos\n",
    479 			      nblkwritten, ninowritten));
    480 			lfs_segwrite(mntp, SEGM_CLEAN);
    481 			nblkwritten = ninowritten = 0;
    482 		}
    483 	}
    484 
    485 	/*
    486 	 * Finish the old file, if there was one
    487 	 */
    488 	if (vp != NULL) {
    489 		vput(vp);
    490 		vp = NULL;
    491 		numrefed--;
    492 	}
    493 
    494 	KASSERTMSG((numrefed == 0), "lfs_markv: numrefed=%d", numrefed);
    495 	DLOG((DLOG_CLEAN, "lfs_markv: writing %d blks %d inos (check point)\n",
    496 	      nblkwritten, ninowritten));
    497 
    498 	/*
    499 	 * The last write has to be SEGM_SYNC, because of calling semantics.
    500 	 * It also has to be SEGM_CKP, because otherwise we could write
    501 	 * over the newly cleaned data contained in a checkpoint, and then
    502 	 * we'd be unhappy at recovery time.
    503 	 */
    504 	lfs_segwrite(mntp, SEGM_CLEAN | SEGM_CKP | SEGM_SYNC);
    505 
    506 	lfs_segunlock(fs);
    507 
    508 	vfs_unbusy(mntp);
    509 	if (error)
    510 		return (error);
    511 	else if (do_again)
    512 		return EAGAIN;
    513 
    514 	return 0;
    515 
    516 err2:
    517 	DLOG((DLOG_CLEAN, "lfs_markv err2\n"));
    518 
    519 	/*
    520 	 * XXX we're here because copyin() failed.
    521 	 * XXX it means that we can't trust the cleanerd.  too bad.
    522 	 * XXX how can we recover from this?
    523 	 */
    524 
    525 err3:
    526 	/*
    527 	 * XXX should do segwrite here anyway?
    528 	 */
    529 
    530 	if (vp != NULL) {
    531 		vput(vp);
    532 		vp = NULL;
    533 		--numrefed;
    534 	}
    535 
    536 	lfs_segunlock(fs);
    537 
    538 err4:
    539 	vfs_unbusy(mntp);
    540 	KASSERTMSG((numrefed == 0), "lfs_markv: numrefed=%d", numrefed);
    541 
    542 	return (error);
    543 }
    544 
    545 /*
    546  * sys_lfs_bmapv:
    547  *
    548  * This will fill in the current disk address for arrays of blocks.
    549  *
    550  *  0 on success
    551  * -1/errno is return on error.
    552  */
    553 #ifdef USE_64BIT_SYSCALLS
    554 int
    555 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
    556 {
    557 	/* {
    558 		syscallarg(fsid_t *) fsidp;
    559 		syscallarg(struct block_info *) blkiov;
    560 		syscallarg(int) blkcnt;
    561 	} */
    562 	BLOCK_INFO *blkiov;
    563 	int blkcnt, error;
    564 	fsid_t fsid;
    565 	struct lfs *fs;
    566 	struct mount *mntp;
    567 
    568 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    569 		return (error);
    570 
    571 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    572 		return (ENOENT);
    573 	fs = VFSTOULFS(mntp)->um_lfs;
    574 
    575 	blkcnt = SCARG(uap, blkcnt);
    576 #if SIZE_T_MAX <= UINT_MAX
    577 	if ((u_int) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
    578 		return (EINVAL);
    579 #endif
    580 	KERNEL_LOCK(1, NULL);
    581 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    582 	if ((error = copyin(SCARG(uap, blkiov), blkiov,
    583 			    blkcnt * sizeof(BLOCK_INFO))) != 0)
    584 		goto out;
    585 
    586 	if ((error = lfs_bmapv(l, &fsid, blkiov, blkcnt)) == 0)
    587 		copyout(blkiov, SCARG(uap, blkiov),
    588 			blkcnt * sizeof(BLOCK_INFO));
    589     out:
    590 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    591 	KERNEL_UNLOCK_ONE(NULL);
    592 	return error;
    593 }
    594 #else
    595 int
    596 sys_lfs_bmapv(struct lwp *l, const struct sys_lfs_bmapv_args *uap, register_t *retval)
    597 {
    598 	/* {
    599 		syscallarg(fsid_t *) fsidp;
    600 		syscallarg(struct block_info *) blkiov;
    601 		syscallarg(int) blkcnt;
    602 	} */
    603 	BLOCK_INFO *blkiov;
    604 	BLOCK_INFO_15 *blkiov15;
    605 	int i, blkcnt, error;
    606 	fsid_t fsid;
    607 	struct lfs *fs;
    608 	struct mount *mntp;
    609 
    610 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    611 		return (error);
    612 
    613 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    614 		return (ENOENT);
    615 	fs = VFSTOULFS(mntp)->um_lfs;
    616 
    617 	blkcnt = SCARG(uap, blkcnt);
    618 	if ((size_t) blkcnt > SIZE_T_MAX / sizeof(BLOCK_INFO))
    619 		return (EINVAL);
    620 	KERNEL_LOCK(1, NULL);
    621 	blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
    622 	blkiov15 = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO_15), LFS_NB_BLKIOV);
    623 	if ((error = copyin(SCARG(uap, blkiov), blkiov15,
    624 			    blkcnt * sizeof(BLOCK_INFO_15))) != 0)
    625 		goto out;
    626 
    627 	for (i = 0; i < blkcnt; i++) {
    628 		blkiov[i].bi_inode     = blkiov15[i].bi_inode;
    629 		blkiov[i].bi_lbn       = blkiov15[i].bi_lbn;
    630 		blkiov[i].bi_daddr     = blkiov15[i].bi_daddr;
    631 		blkiov[i].bi_segcreate = blkiov15[i].bi_segcreate;
    632 		blkiov[i].bi_version   = blkiov15[i].bi_version;
    633 		blkiov[i].bi_bp	       = blkiov15[i].bi_bp;
    634 		blkiov[i].bi_size      = blkiov15[i].bi_size;
    635 	}
    636 
    637 	if ((error = lfs_bmapv(l, &fsid, blkiov, blkcnt)) == 0) {
    638 		for (i = 0; i < blkcnt; i++) {
    639 			blkiov15[i].bi_inode	 = blkiov[i].bi_inode;
    640 			blkiov15[i].bi_lbn	 = blkiov[i].bi_lbn;
    641 			blkiov15[i].bi_daddr	 = blkiov[i].bi_daddr;
    642 			blkiov15[i].bi_segcreate = blkiov[i].bi_segcreate;
    643 			blkiov15[i].bi_version	 = blkiov[i].bi_version;
    644 			blkiov15[i].bi_bp	 = blkiov[i].bi_bp;
    645 			blkiov15[i].bi_size	 = blkiov[i].bi_size;
    646 		}
    647 		copyout(blkiov15, SCARG(uap, blkiov),
    648 			blkcnt * sizeof(BLOCK_INFO_15));
    649 	}
    650     out:
    651 	lfs_free(fs, blkiov, LFS_NB_BLKIOV);
    652 	lfs_free(fs, blkiov15, LFS_NB_BLKIOV);
    653 	KERNEL_UNLOCK_ONE(NULL);
    654 	return error;
    655 }
    656 #endif
    657 
    658 int
    659 lfs_bmapv(struct lwp *l, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt)
    660 {
    661 	BLOCK_INFO *blkp;
    662 	IFILE *ifp;
    663 	struct buf *bp;
    664 	struct inode *ip = NULL;
    665 	struct lfs *fs;
    666 	struct mount *mntp;
    667 	struct ulfsmount *ump;
    668 	struct vnode *vp;
    669 	ino_t lastino;
    670 	daddr_t v_daddr;
    671 	int cnt, error;
    672 	int numrefed = 0;
    673 
    674 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    675 	    KAUTH_REQ_SYSTEM_LFS_BMAPV, NULL, NULL, NULL);
    676 	if (error)
    677 		return (error);
    678 
    679 	if ((mntp = vfs_getvfs(fsidp)) == NULL)
    680 		return (ENOENT);
    681 
    682 	if ((error = vfs_busy(mntp)) != 0)
    683 		return (error);
    684 
    685 	ump = VFSTOULFS(mntp);
    686 	fs = ump->um_lfs;
    687 
    688 	if (fs->lfs_cleaner_thread == NULL)
    689 		fs->lfs_cleaner_thread = curlwp;
    690 	KASSERT(fs->lfs_cleaner_thread == curlwp);
    691 
    692 	cnt = blkcnt;
    693 
    694 	error = 0;
    695 
    696 	/* these were inside the initialization for the for loop */
    697 	vp = NULL;
    698 	v_daddr = LFS_UNUSED_DADDR;
    699 	lastino = LFS_UNUSED_INUM;
    700 	for (blkp = blkiov; cnt--; ++blkp)
    701 	{
    702 		/*
    703 		 * Get the IFILE entry (only once) and see if the file still
    704 		 * exists.
    705 		 */
    706 		if (lastino != blkp->bi_inode) {
    707 			/*
    708 			 * Finish the old file, if there was one.
    709 			 */
    710 			if (vp != NULL) {
    711 				vput(vp);
    712 				vp = NULL;
    713 				numrefed--;
    714 			}
    715 
    716 			/*
    717 			 * Start a new file
    718 			 */
    719 			lastino = blkp->bi_inode;
    720 			if (blkp->bi_inode == LFS_IFILE_INUM)
    721 				v_daddr = lfs_sb_getidaddr(fs);
    722 			else {
    723 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    724 				v_daddr = lfs_if_getdaddr(fs, ifp);
    725 				brelse(bp, 0);
    726 			}
    727 			if (v_daddr == LFS_UNUSED_DADDR) {
    728 				blkp->bi_daddr = LFS_UNUSED_DADDR;
    729 				continue;
    730 			}
    731 			error = lfs_fastvget(mntp, blkp->bi_inode, NULL,
    732 			    LK_SHARED, &vp);
    733 			if (error) {
    734 				DLOG((DLOG_CLEAN, "lfs_bmapv: lfs_fastvget ino"
    735 				      "%d failed with %d",
    736 				      blkp->bi_inode,error));
    737 				KASSERT(vp == NULL);
    738 				continue;
    739 			} else {
    740 				KASSERT(VOP_ISLOCKED(vp));
    741 				numrefed++;
    742 			}
    743 			ip = VTOI(vp);
    744 		} else if (vp == NULL) {
    745 			/*
    746 			 * This can only happen if the vnode is dead.
    747 			 * Keep going.	Note that we DO NOT set the
    748 			 * bi_addr to anything -- if we failed to get
    749 			 * the vnode, for example, we want to assume
    750 			 * conservatively that all of its blocks *are*
    751 			 * located in the segment in question.
    752 			 * lfs_markv will throw them out if we are
    753 			 * wrong.
    754 			 */
    755 			continue;
    756 		}
    757 
    758 		/* Past this point we are guaranteed that vp, ip are valid. */
    759 
    760 		if (blkp->bi_lbn == LFS_UNUSED_LBN) {
    761 			/*
    762 			 * We just want the inode address, which is
    763 			 * conveniently in v_daddr.
    764 			 */
    765 			blkp->bi_daddr = v_daddr;
    766 		} else {
    767 			daddr_t bi_daddr;
    768 
    769 			error = VOP_BMAP(vp, blkp->bi_lbn, NULL,
    770 					 &bi_daddr, NULL);
    771 			if (error)
    772 			{
    773 				blkp->bi_daddr = LFS_UNUSED_DADDR;
    774 				continue;
    775 			}
    776 			blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr);
    777 			/* Fill in the block size, too */
    778 			if (blkp->bi_lbn >= 0)
    779 				blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn);
    780 			else
    781 				blkp->bi_size = lfs_sb_getbsize(fs);
    782 		}
    783 	}
    784 
    785 	/*
    786 	 * Finish the old file, if there was one.
    787 	 */
    788 	if (vp != NULL) {
    789 		vput(vp);
    790 		vp = NULL;
    791 		numrefed--;
    792 	}
    793 
    794 	KASSERTMSG((numrefed == 0), "lfs_bmapv: numrefed=%d", numrefed);
    795 
    796 	vfs_unbusy(mntp);
    797 
    798 	return 0;
    799 }
    800 
    801 /*
    802  * sys_lfs_segclean:
    803  *
    804  * Mark the segment clean.
    805  *
    806  *  0 on success
    807  * -1/errno is return on error.
    808  */
    809 int
    810 sys_lfs_segclean(struct lwp *l, const struct sys_lfs_segclean_args *uap, register_t *retval)
    811 {
    812 	/* {
    813 		syscallarg(fsid_t *) fsidp;
    814 		syscallarg(u_long) segment;
    815 	} */
    816 	struct lfs *fs;
    817 	struct mount *mntp;
    818 	fsid_t fsid;
    819 	int error;
    820 	unsigned long segnum;
    821 
    822 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    823 	    KAUTH_REQ_SYSTEM_LFS_SEGCLEAN, NULL, NULL, NULL);
    824 	if (error)
    825 		return (error);
    826 
    827 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    828 		return (error);
    829 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    830 		return (ENOENT);
    831 
    832 	fs = VFSTOULFS(mntp)->um_lfs;
    833 	segnum = SCARG(uap, segment);
    834 
    835 	if ((error = vfs_busy(mntp)) != 0)
    836 		return (error);
    837 
    838 	KERNEL_LOCK(1, NULL);
    839 	lfs_seglock(fs, SEGM_PROT);
    840 	error = lfs_do_segclean(fs, segnum, l->l_cred, l);
    841 	lfs_segunlock(fs);
    842 	KERNEL_UNLOCK_ONE(NULL);
    843 	vfs_unbusy(mntp);
    844 	return error;
    845 }
    846 
    847 /*
    848  * Actually mark the segment clean.
    849  * Must be called with the segment lock held.
    850  */
    851 int
    852 lfs_do_segclean(struct lfs *fs, unsigned long segnum, kauth_cred_t cred, struct lwp *l)
    853 {
    854 	extern int lfs_dostats;
    855 	struct buf *bp;
    856 	CLEANERINFO *cip;
    857 	SEGUSE *sup;
    858 
    859 	if (lfs_dtosn(fs, lfs_sb_getcurseg(fs)) == segnum) {
    860 		return (EBUSY);
    861 	}
    862 
    863 	LFS_SEGENTRY(sup, fs, segnum, bp);
    864 	if (sup->su_nbytes) {
    865 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    866 		      " %d live bytes\n", segnum, sup->su_nbytes));
    867 		brelse(bp, 0);
    868 		return (EBUSY);
    869 	}
    870 	if (sup->su_flags & SEGUSE_ACTIVE) {
    871 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    872 		      " segment is active\n", segnum));
    873 		brelse(bp, 0);
    874 		return (EBUSY);
    875 	}
    876 	if (!(sup->su_flags & SEGUSE_DIRTY)) {
    877 		DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:"
    878 		      " segment is already clean\n", segnum));
    879 		brelse(bp, 0);
    880 		return (EALREADY);
    881 	}
    882 
    883 #ifdef DEBUG
    884 	if (lfs_checkempty(fs, segnum, cred, l) == EEXIST)
    885 		panic("Live data in cleaned segment %jd\n", (intmax_t)segnum);
    886 #endif /* DEBUG */
    887 
    888 	lfs_sb_addavail(fs, lfs_segtod(fs, 1));
    889 	if (sup->su_flags & SEGUSE_SUPERBLOCK)
    890 		lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_SBPAD));
    891 	if (lfs_sb_getversion(fs) > 1 && segnum == 0 &&
    892 	    lfs_sb_gets0addr(fs) < lfs_btofsb(fs, LFS_LABELPAD))
    893 		lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_LABELPAD) - lfs_sb_gets0addr(fs));
    894 	mutex_enter(&lfs_lock);
    895 	lfs_sb_addbfree(fs, sup->su_nsums * lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
    896 		lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs)));
    897 	lfs_sb_subdmeta(fs, sup->su_nsums * lfs_btofsb(fs, lfs_sb_getsumsize(fs)) +
    898 		lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs)));
    899 	if (lfs_sb_getdmeta(fs) < 0)
    900 		lfs_sb_setdmeta(fs, 0);
    901 	mutex_exit(&lfs_lock);
    902 	sup->su_flags &= ~SEGUSE_DIRTY;
    903 	LFS_WRITESEGENTRY(sup, fs, segnum, bp);
    904 
    905 	LFS_CLEANERINFO(cip, fs, bp);
    906 	lfs_ci_shiftdirtytoclean(fs, cip, 1);
    907 	lfs_sb_setnclean(fs, lfs_ci_getclean(fs, cip));
    908 	mutex_enter(&lfs_lock);
    909 	lfs_ci_setbfree(fs, cip, lfs_sb_getbfree(fs));
    910 	lfs_ci_setavail(fs, cip, lfs_sb_getavail(fs)
    911 			- fs->lfs_ravail - fs->lfs_favail);
    912 	wakeup(&fs->lfs_availsleep);
    913 	mutex_exit(&lfs_lock);
    914 	(void) LFS_BWRITE_LOG(bp);
    915 
    916 	if (lfs_dostats)
    917 		++lfs_stats.segs_reclaimed;
    918 
    919 	return (0);
    920 }
    921 
    922 /*
    923  * This will block until a segment in file system fsid is written.  A timeout
    924  * in milliseconds may be specified which will awake the cleaner automatically.
    925  * An fsid of -1 means any file system, and a timeout of 0 means forever.
    926  */
    927 int
    928 lfs_segwait(fsid_t *fsidp, struct timeval *tv)
    929 {
    930 	struct mount *mntp;
    931 	void *addr;
    932 	u_long timeout;
    933 	int error;
    934 
    935 	mutex_enter(&lfs_lock);
    936 	if (fsidp == NULL || (mntp = vfs_getvfs(fsidp)) == NULL)
    937 		addr = &lfs_allclean_wakeup;
    938 	else
    939 		addr = &VFSTOULFS(mntp)->um_lfs->lfs_nextsegsleep;
    940 	/*
    941 	 * XXX THIS COULD SLEEP FOREVER IF TIMEOUT IS {0,0}!
    942 	 * XXX IS THAT WHAT IS INTENDED?
    943 	 */
    944 	timeout = tvtohz(tv);
    945 	error = cv_timedwait_sig(addr, &lfs_lock, timeout);
    946 	mutex_exit(&lfs_lock);
    947 	return (error == ERESTART ? EINTR : 0);
    948 }
    949 
    950 /*
    951  * sys_lfs_segwait:
    952  *
    953  * System call wrapper around lfs_segwait().
    954  *
    955  *  0 on success
    956  *  1 on timeout
    957  * -1/errno is return on error.
    958  */
    959 int
    960 sys___lfs_segwait50(struct lwp *l, const struct sys___lfs_segwait50_args *uap,
    961     register_t *retval)
    962 {
    963 	/* {
    964 		syscallarg(fsid_t *) fsidp;
    965 		syscallarg(struct timeval *) tv;
    966 	} */
    967 	struct timeval atv;
    968 	fsid_t fsid;
    969 	int error;
    970 
    971 	/* XXX need we be su to segwait? */
    972 	error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_LFS,
    973 	    KAUTH_REQ_SYSTEM_LFS_SEGWAIT, NULL, NULL, NULL);
    974 	if (error)
    975 		return (error);
    976 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    977 		return (error);
    978 
    979 	if (SCARG(uap, tv)) {
    980 		error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
    981 		if (error)
    982 			return (error);
    983 		if (itimerfix(&atv))
    984 			return (EINVAL);
    985 	} else /* NULL or invalid */
    986 		atv.tv_sec = atv.tv_usec = 0;
    987 	return lfs_segwait(&fsid, &atv);
    988 }
    989 
    990 /*
    991  * VFS_VGET call specialized for the cleaner.  If the cleaner is
    992  * processing IINFO structures, it may have the ondisk inode already, so
    993  * don't go retrieving it again.
    994  *
    995  * Return the vnode referenced and locked.
    996  */
    997 
    998 static int
    999 lfs_fastvget(struct mount *mp, ino_t ino, BLOCK_INFO *blkp, int lk_flags,
   1000     struct vnode **vpp)
   1001 {
   1002 	struct ulfsmount *ump;
   1003 	struct lfs *fs;
   1004 	int error;
   1005 
   1006 	ump = VFSTOULFS(mp);
   1007 	fs = ump->um_lfs;
   1008 	fs->lfs_cleaner_hint = blkp;
   1009 	error = vcache_get(mp, &ino, sizeof(ino), vpp);
   1010 	fs->lfs_cleaner_hint = NULL;
   1011 	if (error)
   1012 		return error;
   1013 	error = vn_lock(*vpp, lk_flags);
   1014 	if (error) {
   1015 		if (error == EBUSY)
   1016 			error = EAGAIN;
   1017 		vrele(*vpp);
   1018 		*vpp = NULL;
   1019 		return error;
   1020 	}
   1021 
   1022 	return 0;
   1023 }
   1024 
   1025 /*
   1026  * Make up a "fake" cleaner buffer, copy the data from userland into it.
   1027  */
   1028 static struct buf *
   1029 lfs_fakebuf(struct lfs *fs, struct vnode *vp, daddr_t lbn, size_t size, void *uaddr)
   1030 {
   1031 	struct buf *bp;
   1032 	int error;
   1033 
   1034 	KASSERT(VTOI(vp)->i_number != LFS_IFILE_INUM);
   1035 
   1036 	bp = lfs_newbuf(VTOI(vp)->i_lfs, vp, lbn, size, LFS_NB_CLEAN);
   1037 	error = copyin(uaddr, bp->b_data, size);
   1038 	if (error) {
   1039 		lfs_freebuf(fs, bp);
   1040 		return NULL;
   1041 	}
   1042 	KDASSERT(bp->b_iodone == lfs_free_aiodone);
   1043 
   1044 #if 0
   1045 	mutex_enter(&lfs_lock);
   1046 	++fs->lfs_iocount;
   1047 	mutex_exit(&lfs_lock);
   1048 #endif
   1049 	bp->b_bufsize = size;
   1050 	bp->b_bcount = size;
   1051 	return (bp);
   1052 }
   1053