Home | History | Annotate | Line # | Download | only in lfs
lfs_syscalls.c revision 1.19
      1 /*	$NetBSD: lfs_syscalls.c,v 1.19 1998/09/15 10:53:22 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1991, 1993, 1994
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  *	@(#)lfs_syscalls.c	8.10 (Berkeley) 5/14/95
     36  */
     37 
     38 #if defined(_KERNEL) && !defined(_LKM)
     39 #include "fs_lfs.h"		/* for prototypes in syscallargs.h */
     40 #endif
     41 
     42 #include <sys/param.h>
     43 #include <sys/systm.h>
     44 #include <sys/proc.h>
     45 #include <sys/buf.h>
     46 #include <sys/mount.h>
     47 #include <sys/vnode.h>
     48 #include <sys/malloc.h>
     49 #include <sys/kernel.h>
     50 
     51 #include <sys/syscallargs.h>
     52 
     53 #include <ufs/ufs/quota.h>
     54 #include <ufs/ufs/inode.h>
     55 #include <ufs/ufs/ufsmount.h>
     56 #include <ufs/ufs/ufs_extern.h>
     57 
     58 #include <ufs/lfs/lfs.h>
     59 #include <ufs/lfs/lfs_extern.h>
     60 
     61 #define BUMP_FIP(SP) \
     62 	(SP)->fip = (FINFO *) (&(SP)->fip->fi_blocks[(SP)->fip->fi_nblocks])
     63 
     64 #define INC_FINFO(SP) ++((SEGSUM *)((SP)->segsum))->ss_nfinfo
     65 #define DEC_FINFO(SP) --((SEGSUM *)((SP)->segsum))->ss_nfinfo
     66 
     67 /*
     68  * Before committing to add something to a segment summary, make sure there
     69  * is enough room.  S is the bytes added to the summary.
     70  */
     71 #define	CHECK_SEG(s)			\
     72 if (sp->sum_bytes_left < (s)) {		\
     73 	(void) lfs_writeseg(fs, sp);	\
     74 }
     75 struct buf *lfs_fakebuf __P((struct vnode *, int, size_t, caddr_t));
     76 
     77 int debug_cleaner = 0;
     78 int clean_vnlocked = 0;
     79 int clean_inlocked = 0;
     80 
     81 /*
     82  * lfs_markv:
     83  *
     84  * This will mark inodes and blocks dirty, so they are written into the log.
     85  * It will block until all the blocks have been written.  The segment create
     86  * time passed in the block_info and inode_info structures is used to decide
     87  * if the data is valid for each block (in case some process dirtied a block
     88  * or inode that is being cleaned between the determination that a block is
     89  * live and the lfs_markv call).
     90  *
     91  *  0 on success
     92  * -1/errno is return on error.
     93  */
     94 int
     95 lfs_markv(p, v, retval)
     96 	struct proc *p;
     97 	void *v;
     98 	register_t *retval;
     99 {
    100 	struct lfs_markv_args /* {
    101 		syscallarg(fsid_t *) fsidp;
    102 		syscallarg(struct block_info *) blkiov;
    103 		syscallarg(int) blkcnt;
    104 	} */ *uap = v;
    105 	struct segment *sp;
    106 	BLOCK_INFO *blkp;
    107 	IFILE *ifp;
    108 	struct buf *bp, **bpp;
    109 	struct inode *ip = NULL;
    110 	struct lfs *fs;
    111 	struct mount *mntp;
    112 	struct vnode *vp;
    113 	fsid_t fsid;
    114 	void *start;
    115 	ino_t lastino;
    116 	ufs_daddr_t b_daddr, v_daddr;
    117 	u_long bsize;
    118 	int cnt, error;
    119 
    120 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
    121 		return (error);
    122 
    123 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    124 		return (error);
    125 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    126 		return (EINVAL);
    127 
    128 	cnt = SCARG(uap, blkcnt);
    129 	start = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
    130 	error = copyin(SCARG(uap, blkiov), start, cnt * sizeof(BLOCK_INFO));
    131 	if (error)
    132 		goto err1;
    133 
    134 	/* Mark blocks/inodes dirty.  */
    135 	fs = VFSTOUFS(mntp)->um_lfs;
    136 	bsize = fs->lfs_bsize;
    137 	error = 0;
    138 
    139 	lfs_seglock(fs, SEGM_SYNC | SEGM_CLEAN);
    140 	sp = fs->lfs_sp;
    141 	for (v_daddr = LFS_UNUSED_DADDR, lastino = LFS_UNUSED_INUM,
    142 	    blkp = start; cnt--; ++blkp) {
    143 		/*
    144 		 * Get the IFILE entry (only once) and see if the file still
    145 		 * exists.
    146 		 */
    147 		if (lastino != blkp->bi_inode) {
    148 			if (lastino != LFS_UNUSED_INUM) {
    149 				/* Finish up last file */
    150 				if (sp->fip->fi_nblocks == 0) {
    151 					DEC_FINFO(sp);
    152 					sp->sum_bytes_left +=
    153 					    sizeof(FINFO) - sizeof(ufs_daddr_t);
    154 				} else {
    155 					lfs_updatemeta(sp);
    156 					BUMP_FIP(sp);
    157 				}
    158 
    159 				lfs_writeinode(fs, sp, ip);
    160 				lfs_vunref(vp);
    161 			}
    162 
    163 			/* Start a new file */
    164 			CHECK_SEG(sizeof(FINFO));
    165 			sp->sum_bytes_left -= sizeof(FINFO) - sizeof(ufs_daddr_t);
    166 			INC_FINFO(sp);
    167 			sp->start_lbp = &sp->fip->fi_blocks[0];
    168 			sp->vp = NULL;
    169 			sp->fip->fi_version = blkp->bi_version;
    170 			sp->fip->fi_nblocks = 0;
    171 			sp->fip->fi_ino = blkp->bi_inode;
    172 			lastino = blkp->bi_inode;
    173 			if (blkp->bi_inode == LFS_IFILE_INUM)
    174 				v_daddr = fs->lfs_idaddr;
    175 			else {
    176 				LFS_IENTRY(ifp, fs, blkp->bi_inode, bp);
    177 				v_daddr = ifp->if_daddr;
    178 				brelse(bp);
    179 			}
    180 			if (v_daddr == LFS_UNUSED_DADDR)
    181 				continue;
    182 
    183 			/* Get the vnode/inode. */
    184 			if (lfs_fastvget(mntp, blkp->bi_inode, v_daddr, &vp,
    185 			    blkp->bi_lbn == LFS_UNUSED_LBN ?
    186 			    blkp->bi_bp : NULL)) {
    187 #ifdef DIAGNOSTIC
    188 				printf("lfs_markv: VFS_VGET failed (%d)\n",
    189 				    blkp->bi_inode);
    190 				panic("lfs_markv VFS_VGET FAILED");
    191 #endif
    192 				lastino = LFS_UNUSED_INUM;
    193 				v_daddr = LFS_UNUSED_DADDR;
    194 				continue;
    195 			}
    196 			if(((SEGSUM *)(sp->segsum))->ss_nfinfo == 0) {
    197 				sp->sum_bytes_left -=
    198 					sizeof(FINFO) - sizeof(ufs_daddr_t);
    199 				INC_FINFO(sp);
    200 				sp->start_lbp = &sp->fip->fi_blocks[0];
    201 				sp->fip->fi_version = blkp->bi_version;
    202 				sp->fip->fi_nblocks = 0;
    203 				sp->fip->fi_ino = blkp->bi_inode;
    204 			}
    205 			sp->vp = vp;
    206 			ip = VTOI(vp);
    207 		} else if (v_daddr == LFS_UNUSED_DADDR)
    208 			continue;
    209 
    210 		/* If this BLOCK_INFO didn't contain a block, keep going. */
    211 		if (blkp->bi_lbn == LFS_UNUSED_LBN)
    212 			continue;
    213 		if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &b_daddr, NULL) ||
    214 		    b_daddr != blkp->bi_daddr)
    215 			continue;
    216 		/*
    217 		 * If we got to here, then we are keeping the block.  If it
    218 		 * is an indirect block, we want to actually put it in the
    219 		 * buffer cache so that it can be updated in the finish_meta
    220 		 * section.  If it's not, we need to allocate a fake buffer
    221 		 * so that writeseg can perform the copyin and write the buffer.
    222 		 */
    223 		if (blkp->bi_lbn >= 0)	/* Data Block */
    224 			bp = lfs_fakebuf(vp, blkp->bi_lbn, bsize,
    225 			    blkp->bi_bp);
    226 		else {
    227 			bp = getblk(vp, blkp->bi_lbn, bsize, 0, 0);
    228 			if (!(bp->b_flags & (B_DELWRI | B_DONE | B_CACHE)) &&
    229 			    (error = copyin(blkp->bi_bp, bp->b_data,
    230 			    blkp->bi_size)))
    231 				goto err2;
    232 			if ((error = VOP_BWRITE(bp)) != 0)
    233 				goto err2;
    234 		}
    235 		while (lfs_gatherblock(sp, bp, NULL));
    236 	}
    237 	if (sp->vp) {
    238 		if (sp->fip->fi_nblocks == 0) {
    239 			DEC_FINFO(sp);
    240 			sp->sum_bytes_left +=
    241 			    sizeof(FINFO) - sizeof(ufs_daddr_t);
    242 		} else
    243 			lfs_updatemeta(sp);
    244 
    245 		lfs_writeinode(fs, sp, ip);
    246 		lfs_vunref(vp);
    247 	}
    248 	(void) lfs_writeseg(fs, sp);
    249 	lfs_segunlock(fs);
    250 	free(start, M_SEGMENT);
    251 	return (error);
    252 
    253 /*
    254  * XXX
    255  * If we come in to error 2, we might have indirect blocks that were
    256  * updated and now have bad block pointers.  I don't know what to do
    257  * about this.
    258  */
    259 
    260 err2:	lfs_vunref(vp);
    261 	/* Free up fakebuffers */
    262 	for (bpp = --sp->cbpp; bpp >= sp->bpp; --bpp)
    263 		if ((*bpp)->b_flags & B_CALL) {
    264 			brelvp(*bpp);
    265 			free(*bpp, M_SEGMENT);
    266 		} else
    267 			brelse(*bpp);
    268 	lfs_segunlock(fs);
    269 err1:
    270 	free(start, M_SEGMENT);
    271 	return (error);
    272 }
    273 
    274 /*
    275  * lfs_bmapv:
    276  *
    277  * This will fill in the current disk address for arrays of blocks.
    278  *
    279  *  0 on success
    280  * -1/errno is return on error.
    281  */
    282 int
    283 lfs_bmapv(p, v, retval)
    284 	struct proc *p;
    285 	void *v;
    286 	register_t *retval;
    287 {
    288 	struct lfs_bmapv_args /* {
    289 		syscallarg(fsid_t *) fsidp;
    290 		syscallarg(struct block_info *) blkiov;
    291 		syscallarg(int) blkcnt;
    292 	} */ *uap = v;
    293 	BLOCK_INFO *blkp;
    294 	struct mount *mntp;
    295 	struct ufsmount *ump;
    296 	struct vnode *vp;
    297 	fsid_t fsid;
    298 	void *start;
    299 	ufs_daddr_t daddr;
    300 	int cnt, error, step;
    301 
    302 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
    303 		return (error);
    304 
    305 	error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t));
    306 	if (error)
    307 		return (error);
    308 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    309 		return (EINVAL);
    310 
    311 	cnt = SCARG(uap, blkcnt);
    312 	start = blkp = malloc(cnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
    313 	error = copyin(SCARG(uap, blkiov), blkp, cnt * sizeof(BLOCK_INFO));
    314 	if (error) {
    315 		free(blkp, M_SEGMENT);
    316 		return (error);
    317 	}
    318 
    319 	for (step = cnt; step--; ++blkp) {
    320 		if (blkp->bi_lbn == LFS_UNUSED_LBN)
    321 			continue;
    322 		/*
    323 		 * A regular call to VFS_VGET could deadlock
    324 		 * here.  Instead, we try an unlocked access.
    325 		 */
    326 		ump = VFSTOUFS(mntp);
    327 		if ((vp =
    328 		    ufs_ihashlookup(ump->um_dev, blkp->bi_inode)) != NULL) {
    329 			if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &daddr, NULL))
    330 				daddr = LFS_UNUSED_DADDR;
    331 		} else if (VFS_VGET(mntp, blkp->bi_inode, &vp))
    332 			daddr = LFS_UNUSED_DADDR;
    333 		else {
    334 			if (VOP_BMAP(vp, blkp->bi_lbn, NULL, &daddr, NULL))
    335 				daddr = LFS_UNUSED_DADDR;
    336 			vput(vp);
    337 		}
    338 		blkp->bi_daddr = daddr;
    339         }
    340 	copyout(start, SCARG(uap, blkiov), cnt * sizeof(BLOCK_INFO));
    341 	free(start, M_SEGMENT);
    342 	return (0);
    343 }
    344 
    345 /*
    346  * lfs_segclean:
    347  *
    348  * Mark the segment clean.
    349  *
    350  *  0 on success
    351  * -1/errno is return on error.
    352  */
    353 int
    354 lfs_segclean(p, v, retval)
    355 	struct proc *p;
    356 	void *v;
    357 	register_t *retval;
    358 {
    359 	struct lfs_segclean_args /* {
    360 		syscallarg(fsid_t *) fsidp;
    361 		syscallarg(u_long) segment;
    362 	} */ *uap = v;
    363 	CLEANERINFO *cip;
    364 	SEGUSE *sup;
    365 	struct buf *bp;
    366 	struct mount *mntp;
    367 	struct lfs *fs;
    368 	fsid_t fsid;
    369 	int error;
    370 
    371 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
    372 		return (error);
    373 
    374 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    375 		return (error);
    376 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    377 		return (EINVAL);
    378 
    379 	fs = VFSTOUFS(mntp)->um_lfs;
    380 
    381 	if (datosn(fs, fs->lfs_curseg) == SCARG(uap, segment))
    382 		return (EBUSY);
    383 
    384 	LFS_SEGENTRY(sup, fs, SCARG(uap, segment), bp);
    385 	if (sup->su_flags & SEGUSE_ACTIVE) {
    386 		brelse(bp);
    387 		return (EBUSY);
    388 	}
    389 	fs->lfs_avail += fsbtodb(fs, fs->lfs_ssize) - 1;
    390 	fs->lfs_bfree += (sup->su_nsums * LFS_SUMMARY_SIZE / DEV_BSIZE) +
    391 	    sup->su_ninos * btodb(fs->lfs_bsize);
    392 	sup->su_flags &= ~SEGUSE_DIRTY;
    393 	(void) VOP_BWRITE(bp);
    394 
    395 	LFS_CLEANERINFO(cip, fs, bp);
    396 	++cip->clean;
    397 	--cip->dirty;
    398 	(void) VOP_BWRITE(bp);
    399 	wakeup(&fs->lfs_avail);
    400 	return (0);
    401 }
    402 
    403 /*
    404  * lfs_segwait:
    405  *
    406  * This will block until a segment in file system fsid is written.  A timeout
    407  * in milliseconds may be specified which will awake the cleaner automatically.
    408  * An fsid of -1 means any file system, and a timeout of 0 means forever.
    409  *
    410  *  0 on success
    411  *  1 on timeout
    412  * -1/errno is return on error.
    413  */
    414 int
    415 lfs_segwait(p, v, retval)
    416 	struct proc *p;
    417 	void *v;
    418 	register_t *retval;
    419 {
    420 	struct lfs_segwait_args /* {
    421 		syscallarg(fsid_t *) fsidp;
    422 		syscallarg(struct timeval *) tv;
    423 	} */ *uap = v;
    424 	extern int lfs_allclean_wakeup;
    425 	struct mount *mntp;
    426 	struct timeval atv;
    427 	fsid_t fsid;
    428 	void *addr;
    429 	u_long timeout;
    430 	int error, s;
    431 
    432 	if ((error = suser(p->p_ucred, &p->p_acflag)) != 0) {
    433 		return (error);
    434 }
    435 #ifdef WHEN_QUADS_WORK
    436 	if (error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t)))
    437 		return (error);
    438 	if (fsid == (fsid_t)-1)
    439 		addr = &lfs_allclean_wakeup;
    440 	else {
    441 		if ((mntp = vfs_getvfs(&fsid)) == NULL)
    442 			return (EINVAL);
    443 		addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
    444 	}
    445 #else
    446 	if ((error = copyin(SCARG(uap, fsidp), &fsid, sizeof(fsid_t))) != 0)
    447 		return (error);
    448 	if ((mntp = vfs_getvfs(&fsid)) == NULL)
    449 		addr = &lfs_allclean_wakeup;
    450 	else
    451 		addr = &VFSTOUFS(mntp)->um_lfs->lfs_nextseg;
    452 #endif
    453 
    454 	if (SCARG(uap, tv)) {
    455 		error = copyin(SCARG(uap, tv), &atv, sizeof(struct timeval));
    456 		if (error)
    457 			return (error);
    458 		if (itimerfix(&atv))
    459 			return (EINVAL);
    460 		s = splclock();
    461 		timeradd(&atv, &time, &atv);
    462 		timeout = hzto(&atv);
    463 		splx(s);
    464 	} else
    465 		timeout = 0;
    466 
    467 	error = tsleep(addr, PCATCH | PUSER, "segment", timeout);
    468 	return (error == ERESTART ? EINTR : 0);
    469 }
    470 
    471 /*
    472  * VFS_VGET call specialized for the cleaner.  The cleaner already knows the
    473  * daddr from the ifile, so don't look it up again.  If the cleaner is
    474  * processing IINFO structures, it may have the ondisk inode already, so
    475  * don't go retrieving it again.
    476  */
    477 int
    478 lfs_fastvget(mp, ino, daddr, vpp, dinp)
    479 	struct mount *mp;
    480 	ino_t ino;
    481 	ufs_daddr_t daddr;
    482 	struct vnode **vpp;
    483 	struct dinode *dinp;
    484 {
    485 	register struct inode *ip;
    486 	struct vnode *vp;
    487 	struct ufsmount *ump;
    488 	struct buf *bp;
    489 	dev_t dev;
    490 	int error;
    491 
    492 	ump = VFSTOUFS(mp);
    493 	dev = ump->um_dev;
    494 	/*
    495 	 * This is playing fast and loose.  Someone may have the inode
    496 	 * locked, in which case they are going to be distinctly unhappy
    497 	 * if we trash something.
    498 	 */
    499 	if ((*vpp = ufs_ihashlookup(dev, ino)) != NULL) {
    500 		lfs_vref(*vpp);
    501 		if ((*vpp)->v_flag & VXLOCK)
    502 			clean_vnlocked++;
    503 		ip = VTOI(*vpp);
    504 		if (lockstatus(&ip->i_lock))
    505 			clean_inlocked++;
    506 		if (!(ip->i_flag & IN_MODIFIED))
    507 			++ump->um_lfs->lfs_uinodes;
    508 		ip->i_flag |= IN_MODIFIED;
    509 		return (0);
    510 	}
    511 
    512 	/* Allocate new vnode/inode. */
    513 	if ((error = lfs_vcreate(mp, ino, &vp)) != 0) {
    514 		*vpp = NULL;
    515 		return (error);
    516 	}
    517 
    518 	/*
    519 	 * Put it onto its hash chain and lock it so that other requests for
    520 	 * this inode will block if they arrive while we are sleeping waiting
    521 	 * for old data structures to be purged or for the contents of the
    522 	 * disk portion of this inode to be read.
    523 	 */
    524 	ip = VTOI(vp);
    525 	ufs_ihashins(ip);
    526 
    527 	/*
    528 	 * XXX
    529 	 * This may not need to be here, logically it should go down with
    530 	 * the i_devvp initialization.
    531 	 * Ask Kirk.
    532 	 */
    533 	ip->i_lfs = ump->um_lfs;
    534 
    535 	/* Read in the disk contents for the inode, copy into the inode. */
    536 	if (dinp) {
    537 		error = copyin(dinp, &ip->i_din.ffs_din, sizeof(struct dinode));
    538 		if (error)
    539 			return (error);
    540 	}
    541 	else {
    542 		error = bread(ump->um_devvp, daddr,
    543 			      (int)ump->um_lfs->lfs_bsize, NOCRED, &bp);
    544 		if (error) {
    545 			/*
    546 			 * The inode does not contain anything useful, so it
    547 			 * would be misleading to leave it on its hash chain.
    548 			 * Iput() will return it to the free list.
    549 			 */
    550 			ufs_ihashrem(ip);
    551 
    552 			/* Unlock and discard unneeded inode. */
    553 			lfs_vunref(vp);
    554 			brelse(bp);
    555 			*vpp = NULL;
    556 			return (error);
    557 		}
    558 		ip->i_din.ffs_din =
    559 		    *lfs_ifind(ump->um_lfs, ino, (struct dinode *)bp->b_data);
    560 		brelse(bp);
    561 	}
    562 
    563 	/*
    564 	 * Initialize the vnode from the inode, check for aliases.  In all
    565 	 * cases re-init ip, the underlying vnode/inode may have changed.
    566 	 */
    567 	error = ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
    568 	if (error) {
    569 		lfs_vunref(vp);
    570 		*vpp = NULL;
    571 		return (error);
    572 	}
    573 	/*
    574 	 * Finish inode initialization now that aliasing has been resolved.
    575 	 */
    576 	ip->i_devvp = ump->um_devvp;
    577 	ip->i_flag |= IN_MODIFIED;
    578 	++ump->um_lfs->lfs_uinodes;
    579 	VREF(ip->i_devvp);
    580 	*vpp = vp;
    581 	return (0);
    582 }
    583 struct buf *
    584 lfs_fakebuf(vp, lbn, size, uaddr)
    585 	struct vnode *vp;
    586 	int lbn;
    587 	size_t size;
    588 	caddr_t uaddr;
    589 {
    590 	struct buf *bp;
    591 
    592 	bp = lfs_newbuf(vp, lbn, 0);
    593 	bp->b_saveaddr = uaddr;
    594 	bp->b_bufsize = size;
    595 	bp->b_bcount = size;
    596 	bp->b_flags |= B_INVAL;
    597 	return (bp);
    598 }
    599