Home | History | Annotate | Line # | Download | only in lfs
lfs_vnops.c revision 1.144
      1 /*	$NetBSD: lfs_vnops.c,v 1.144 2005/04/16 17:28:37 perseant Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 /*
     39  * Copyright (c) 1986, 1989, 1991, 1993, 1995
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)lfs_vnops.c	8.13 (Berkeley) 6/10/95
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.144 2005/04/16 17:28:37 perseant Exp $");
     71 
     72 #include <sys/param.h>
     73 #include <sys/systm.h>
     74 #include <sys/namei.h>
     75 #include <sys/resourcevar.h>
     76 #include <sys/kernel.h>
     77 #include <sys/file.h>
     78 #include <sys/stat.h>
     79 #include <sys/buf.h>
     80 #include <sys/proc.h>
     81 #include <sys/mount.h>
     82 #include <sys/vnode.h>
     83 #include <sys/pool.h>
     84 #include <sys/signalvar.h>
     85 
     86 #include <miscfs/fifofs/fifo.h>
     87 #include <miscfs/genfs/genfs.h>
     88 #include <miscfs/specfs/specdev.h>
     89 
     90 #include <ufs/ufs/inode.h>
     91 #include <ufs/ufs/dir.h>
     92 #include <ufs/ufs/ufsmount.h>
     93 #include <ufs/ufs/ufs_extern.h>
     94 
     95 #include <uvm/uvm.h>
     96 #include <uvm/uvm_pmap.h>
     97 #include <uvm/uvm_stat.h>
     98 #include <uvm/uvm_pager.h>
     99 
    100 #include <ufs/lfs/lfs.h>
    101 #include <ufs/lfs/lfs_extern.h>
    102 
    103 extern pid_t lfs_writer_daemon;
    104 
    105 /* Global vfs data structures for lfs. */
    106 int (**lfs_vnodeop_p)(void *);
    107 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
    108 	{ &vop_default_desc, vn_default_error },
    109 	{ &vop_lookup_desc, ufs_lookup },		/* lookup */
    110 	{ &vop_create_desc, lfs_create },		/* create */
    111 	{ &vop_whiteout_desc, ufs_whiteout },		/* whiteout */
    112 	{ &vop_mknod_desc, lfs_mknod },			/* mknod */
    113 	{ &vop_open_desc, ufs_open },			/* open */
    114 	{ &vop_close_desc, lfs_close },			/* close */
    115 	{ &vop_access_desc, ufs_access },		/* access */
    116 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    117 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    118 	{ &vop_read_desc, lfs_read },			/* read */
    119 	{ &vop_write_desc, lfs_write },			/* write */
    120 	{ &vop_lease_desc, ufs_lease_check },		/* lease */
    121 	{ &vop_ioctl_desc, ufs_ioctl },			/* ioctl */
    122 	{ &vop_fcntl_desc, lfs_fcntl },			/* fcntl */
    123 	{ &vop_poll_desc, ufs_poll },			/* poll */
    124 	{ &vop_kqfilter_desc, genfs_kqfilter },		/* kqfilter */
    125 	{ &vop_revoke_desc, ufs_revoke },		/* revoke */
    126 	{ &vop_mmap_desc, lfs_mmap },			/* mmap */
    127 	{ &vop_fsync_desc, lfs_fsync },			/* fsync */
    128 	{ &vop_seek_desc, ufs_seek },			/* seek */
    129 	{ &vop_remove_desc, lfs_remove },		/* remove */
    130 	{ &vop_link_desc, lfs_link },			/* link */
    131 	{ &vop_rename_desc, lfs_rename },		/* rename */
    132 	{ &vop_mkdir_desc, lfs_mkdir },			/* mkdir */
    133 	{ &vop_rmdir_desc, lfs_rmdir },			/* rmdir */
    134 	{ &vop_symlink_desc, lfs_symlink },		/* symlink */
    135 	{ &vop_readdir_desc, ufs_readdir },		/* readdir */
    136 	{ &vop_readlink_desc, ufs_readlink },		/* readlink */
    137 	{ &vop_abortop_desc, ufs_abortop },		/* abortop */
    138 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    139 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    140 	{ &vop_lock_desc, ufs_lock },			/* lock */
    141 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    142 	{ &vop_bmap_desc, ufs_bmap },			/* bmap */
    143 	{ &vop_strategy_desc, lfs_strategy },		/* strategy */
    144 	{ &vop_print_desc, ufs_print },			/* print */
    145 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    146 	{ &vop_pathconf_desc, ufs_pathconf },		/* pathconf */
    147 	{ &vop_advlock_desc, ufs_advlock },		/* advlock */
    148 	{ &vop_blkatoff_desc, lfs_blkatoff },		/* blkatoff */
    149 	{ &vop_valloc_desc, lfs_valloc },		/* valloc */
    150 	{ &vop_balloc_desc, lfs_balloc },		/* balloc */
    151 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    152 	{ &vop_truncate_desc, lfs_truncate },		/* truncate */
    153 	{ &vop_update_desc, lfs_update },		/* update */
    154 	{ &vop_bwrite_desc, lfs_bwrite },		/* bwrite */
    155 	{ &vop_getpages_desc, lfs_getpages },		/* getpages */
    156 	{ &vop_putpages_desc, lfs_putpages },		/* putpages */
    157 	{ NULL, NULL }
    158 };
    159 const struct vnodeopv_desc lfs_vnodeop_opv_desc =
    160 	{ &lfs_vnodeop_p, lfs_vnodeop_entries };
    161 
    162 int (**lfs_specop_p)(void *);
    163 const struct vnodeopv_entry_desc lfs_specop_entries[] = {
    164 	{ &vop_default_desc, vn_default_error },
    165 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
    166 	{ &vop_create_desc, spec_create },		/* create */
    167 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
    168 	{ &vop_open_desc, spec_open },			/* open */
    169 	{ &vop_close_desc, lfsspec_close },		/* close */
    170 	{ &vop_access_desc, ufs_access },		/* access */
    171 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    172 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    173 	{ &vop_read_desc, ufsspec_read },		/* read */
    174 	{ &vop_write_desc, ufsspec_write },		/* write */
    175 	{ &vop_lease_desc, spec_lease_check },		/* lease */
    176 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
    177 	{ &vop_fcntl_desc, ufs_fcntl },			/* fcntl */
    178 	{ &vop_poll_desc, spec_poll },			/* poll */
    179 	{ &vop_kqfilter_desc, spec_kqfilter },		/* kqfilter */
    180 	{ &vop_revoke_desc, spec_revoke },		/* revoke */
    181 	{ &vop_mmap_desc, spec_mmap },			/* mmap */
    182 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
    183 	{ &vop_seek_desc, spec_seek },			/* seek */
    184 	{ &vop_remove_desc, spec_remove },		/* remove */
    185 	{ &vop_link_desc, spec_link },			/* link */
    186 	{ &vop_rename_desc, spec_rename },		/* rename */
    187 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
    188 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
    189 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
    190 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
    191 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
    192 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
    193 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    194 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    195 	{ &vop_lock_desc, ufs_lock },			/* lock */
    196 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    197 	{ &vop_bmap_desc, spec_bmap },			/* bmap */
    198 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
    199 	{ &vop_print_desc, ufs_print },			/* print */
    200 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    201 	{ &vop_pathconf_desc, spec_pathconf },		/* pathconf */
    202 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
    203 	{ &vop_blkatoff_desc, spec_blkatoff },		/* blkatoff */
    204 	{ &vop_valloc_desc, spec_valloc },		/* valloc */
    205 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    206 	{ &vop_truncate_desc, spec_truncate },		/* truncate */
    207 	{ &vop_update_desc, lfs_update },		/* update */
    208 	{ &vop_bwrite_desc, vn_bwrite },		/* bwrite */
    209 	{ &vop_getpages_desc, spec_getpages },		/* getpages */
    210 	{ &vop_putpages_desc, spec_putpages },		/* putpages */
    211 	{ NULL, NULL }
    212 };
    213 const struct vnodeopv_desc lfs_specop_opv_desc =
    214 	{ &lfs_specop_p, lfs_specop_entries };
    215 
    216 int (**lfs_fifoop_p)(void *);
    217 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
    218 	{ &vop_default_desc, vn_default_error },
    219 	{ &vop_lookup_desc, fifo_lookup },		/* lookup */
    220 	{ &vop_create_desc, fifo_create },		/* create */
    221 	{ &vop_mknod_desc, fifo_mknod },		/* mknod */
    222 	{ &vop_open_desc, fifo_open },			/* open */
    223 	{ &vop_close_desc, lfsfifo_close },		/* close */
    224 	{ &vop_access_desc, ufs_access },		/* access */
    225 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    226 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    227 	{ &vop_read_desc, ufsfifo_read },		/* read */
    228 	{ &vop_write_desc, ufsfifo_write },		/* write */
    229 	{ &vop_lease_desc, fifo_lease_check },		/* lease */
    230 	{ &vop_ioctl_desc, fifo_ioctl },		/* ioctl */
    231 	{ &vop_fcntl_desc, ufs_fcntl },			/* fcntl */
    232 	{ &vop_poll_desc, fifo_poll },			/* poll */
    233 	{ &vop_kqfilter_desc, fifo_kqfilter },		/* kqfilter */
    234 	{ &vop_revoke_desc, fifo_revoke },		/* revoke */
    235 	{ &vop_mmap_desc, fifo_mmap },			/* mmap */
    236 	{ &vop_fsync_desc, fifo_fsync },		/* fsync */
    237 	{ &vop_seek_desc, fifo_seek },			/* seek */
    238 	{ &vop_remove_desc, fifo_remove },		/* remove */
    239 	{ &vop_link_desc, fifo_link },			/* link */
    240 	{ &vop_rename_desc, fifo_rename },		/* rename */
    241 	{ &vop_mkdir_desc, fifo_mkdir },		/* mkdir */
    242 	{ &vop_rmdir_desc, fifo_rmdir },		/* rmdir */
    243 	{ &vop_symlink_desc, fifo_symlink },		/* symlink */
    244 	{ &vop_readdir_desc, fifo_readdir },		/* readdir */
    245 	{ &vop_readlink_desc, fifo_readlink },		/* readlink */
    246 	{ &vop_abortop_desc, fifo_abortop },		/* abortop */
    247 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    248 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    249 	{ &vop_lock_desc, ufs_lock },			/* lock */
    250 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    251 	{ &vop_bmap_desc, fifo_bmap },			/* bmap */
    252 	{ &vop_strategy_desc, fifo_strategy },		/* strategy */
    253 	{ &vop_print_desc, ufs_print },			/* print */
    254 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    255 	{ &vop_pathconf_desc, fifo_pathconf },		/* pathconf */
    256 	{ &vop_advlock_desc, fifo_advlock },		/* advlock */
    257 	{ &vop_blkatoff_desc, fifo_blkatoff },		/* blkatoff */
    258 	{ &vop_valloc_desc, fifo_valloc },		/* valloc */
    259 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    260 	{ &vop_truncate_desc, fifo_truncate },		/* truncate */
    261 	{ &vop_update_desc, lfs_update },		/* update */
    262 	{ &vop_bwrite_desc, lfs_bwrite },		/* bwrite */
    263 	{ &vop_putpages_desc, fifo_putpages },		/* putpages */
    264 	{ NULL, NULL }
    265 };
    266 const struct vnodeopv_desc lfs_fifoop_opv_desc =
    267 	{ &lfs_fifoop_p, lfs_fifoop_entries };
    268 
    269 static int check_dirty(struct lfs *, struct vnode *, off_t, off_t, off_t, int, int);
    270 
    271 /*
    272  * A function version of LFS_ITIMES, for the UFS functions which call ITIMES
    273  */
    274 void
    275 lfs_itimes(struct inode *ip, struct timespec *acc, struct timespec *mod, struct timespec *cre)
    276 {
    277 	LFS_ITIMES(ip, acc, mod, cre);
    278 }
    279 
    280 #define	LFS_READWRITE
    281 #include <ufs/ufs/ufs_readwrite.c>
    282 #undef	LFS_READWRITE
    283 
    284 /*
    285  * Synch an open file.
    286  */
    287 /* ARGSUSED */
    288 int
    289 lfs_fsync(void *v)
    290 {
    291 	struct vop_fsync_args /* {
    292 		struct vnode *a_vp;
    293 		struct ucred *a_cred;
    294 		int a_flags;
    295 		off_t offlo;
    296 		off_t offhi;
    297 		struct proc *a_p;
    298 	} */ *ap = v;
    299 	struct vnode *vp = ap->a_vp;
    300 	int error, wait;
    301 
    302 	/*
    303 	 * Trickle sync checks for need to do a checkpoint after possible
    304 	 * activity from the pagedaemon.
    305 	 */
    306 	if (ap->a_flags & FSYNC_LAZY) {
    307 		simple_lock(&lfs_subsys_lock);
    308 		wakeup(&lfs_writer_daemon);
    309 		simple_unlock(&lfs_subsys_lock);
    310 		return 0;
    311 	}
    312 
    313 	wait = (ap->a_flags & FSYNC_WAIT);
    314 	simple_lock(&vp->v_interlock);
    315 	error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
    316 			round_page(ap->a_offhi),
    317 			PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
    318 	if (error)
    319 		return error;
    320 	error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
    321 	if (error == 0 && ap->a_flags & FSYNC_CACHE) {
    322 		int l = 0;
    323 		error = VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE,
    324 				  ap->a_p->p_ucred, ap->a_p);
    325 	}
    326 	if (wait && !VPISEMPTY(vp))
    327 		LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
    328 
    329 	return error;
    330 }
    331 
    332 /*
    333  * Take IN_ADIROP off, then call ufs_inactive.
    334  */
    335 int
    336 lfs_inactive(void *v)
    337 {
    338 	struct vop_inactive_args /* {
    339 		struct vnode *a_vp;
    340 		struct proc *a_p;
    341 	} */ *ap = v;
    342 
    343 	KASSERT(VTOI(ap->a_vp)->i_nlink == VTOI(ap->a_vp)->i_ffs_effnlink);
    344 
    345 	lfs_unmark_vnode(ap->a_vp);
    346 
    347 	/*
    348 	 * The Ifile is only ever inactivated on unmount.
    349 	 * Streamline this process by not giving it more dirty blocks.
    350 	 */
    351 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) {
    352 		LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD);
    353 		VOP_UNLOCK(ap->a_vp, 0);
    354 		return 0;
    355 	}
    356 
    357 	return ufs_inactive(v);
    358 }
    359 
    360 /*
    361  * These macros are used to bracket UFS directory ops, so that we can
    362  * identify all the pages touched during directory ops which need to
    363  * be ordered and flushed atomically, so that they may be recovered.
    364  *
    365  * Because we have to mark nodes VDIROP in order to prevent
    366  * the cache from reclaiming them while a dirop is in progress, we must
    367  * also manage the number of nodes so marked (otherwise we can run out).
    368  * We do this by setting lfs_dirvcount to the number of marked vnodes; it
    369  * is decremented during segment write, when VDIROP is taken off.
    370  */
    371 #define	MARK_VNODE(vp)			lfs_mark_vnode(vp)
    372 #define	UNMARK_VNODE(vp)		lfs_unmark_vnode(vp)
    373 #define	SET_DIROP_CREATE(dvp, vpp)	lfs_set_dirop_create((dvp), (vpp))
    374 #define	SET_DIROP_REMOVE(dvp, vp)	lfs_set_dirop((dvp), (vp))
    375 static int lfs_set_dirop_create(struct vnode *, struct vnode **);
    376 static int lfs_set_dirop(struct vnode *, struct vnode *);
    377 
    378 static int
    379 lfs_set_dirop(struct vnode *dvp, struct vnode *vp)
    380 {
    381 	struct lfs *fs;
    382 	int error;
    383 
    384 	KASSERT(VOP_ISLOCKED(dvp));
    385 	KASSERT(vp == NULL || VOP_ISLOCKED(vp));
    386 
    387 	fs = VTOI(dvp)->i_lfs;
    388 
    389 	ASSERT_NO_SEGLOCK(fs);
    390 	/*
    391 	 * LFS_NRESERVE calculates direct and indirect blocks as well
    392 	 * as an inode block; an overestimate in most cases.
    393 	 */
    394 	if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0)
    395 		return (error);
    396 
    397     restart:
    398 	simple_lock(&fs->lfs_interlock);
    399 	if (fs->lfs_dirops == 0) {
    400 		simple_unlock(&fs->lfs_interlock);
    401 		lfs_check(dvp, LFS_UNUSED_LBN, 0);
    402 		simple_lock(&fs->lfs_interlock);
    403 	}
    404 	while (fs->lfs_writer)
    405 		ltsleep(&fs->lfs_dirops, (PRIBIO + 1), "lfs_sdirop", 0,
    406 			&fs->lfs_interlock);
    407 	simple_lock(&lfs_subsys_lock);
    408 	if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
    409 		wakeup(&lfs_writer_daemon);
    410 		simple_unlock(&lfs_subsys_lock);
    411 		simple_unlock(&fs->lfs_interlock);
    412 		preempt(1);
    413 		goto restart;
    414 	}
    415 
    416 	if (lfs_dirvcount > LFS_MAX_DIROP) {
    417 		simple_unlock(&fs->lfs_interlock);
    418 		DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, "
    419 		      "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount));
    420 		if ((error = ltsleep(&lfs_dirvcount,
    421 		    PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0,
    422 		    &lfs_subsys_lock)) != 0) {
    423 			goto unreserve;
    424 		}
    425 		goto restart;
    426 	}
    427 	simple_unlock(&lfs_subsys_lock);
    428 
    429 	++fs->lfs_dirops;
    430 	fs->lfs_doifile = 1;
    431 	simple_unlock(&fs->lfs_interlock);
    432 
    433 	/* Hold a reference so SET_ENDOP will be happy */
    434 	vref(dvp);
    435 	if (vp) {
    436 		vref(vp);
    437 		MARK_VNODE(vp);
    438 	}
    439 
    440 	MARK_VNODE(dvp);
    441 	return 0;
    442 
    443 unreserve:
    444 	lfs_reserve(fs, dvp, vp, -LFS_NRESERVE(fs));
    445 	return error;
    446 }
    447 
    448 /*
    449  * Get a new vnode *before* adjusting the dirop count, to avoid a deadlock
    450  * in getnewvnode(), if we have a stacked filesystem mounted on top
    451  * of us.
    452  *
    453  * NB: this means we have to clear the new vnodes on error.  Fortunately
    454  * SET_ENDOP is there to do that for us.
    455  */
    456 static int
    457 lfs_set_dirop_create(struct vnode *dvp, struct vnode **vpp)
    458 {
    459 	int error;
    460 	struct lfs *fs;
    461 
    462 	fs = VFSTOUFS(dvp->v_mount)->um_lfs;
    463 	ASSERT_NO_SEGLOCK(fs);
    464 	if (fs->lfs_ronly)
    465 		return EROFS;
    466 	if (vpp && (error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, vpp))) {
    467 		DLOG((DLOG_ALLOC, "lfs_set_dirop_create: dvp %p error %d\n",
    468 		      dvp, error));
    469 		return error;
    470 	}
    471 	if ((error = lfs_set_dirop(dvp, NULL)) != 0) {
    472 		if (vpp) {
    473 			ungetnewvnode(*vpp);
    474 			*vpp = NULL;
    475 		}
    476 		return error;
    477 	}
    478 	return 0;
    479 }
    480 
    481 #define	SET_ENDOP_BASE(fs, dvp, str)					\
    482 	do {								\
    483 		simple_lock(&(fs)->lfs_interlock);			\
    484 		--(fs)->lfs_dirops;					\
    485 		if (!(fs)->lfs_dirops) {				\
    486 			if ((fs)->lfs_nadirop) {			\
    487 				panic("SET_ENDOP: %s: no dirops but "	\
    488 					" nadirop=%d", (str),		\
    489 					(fs)->lfs_nadirop);		\
    490 			}						\
    491 			wakeup(&(fs)->lfs_writer);			\
    492 			simple_unlock(&(fs)->lfs_interlock);		\
    493 			lfs_check((dvp), LFS_UNUSED_LBN, 0);		\
    494 		} else							\
    495 			simple_unlock(&(fs)->lfs_interlock);		\
    496 	} while(0)
    497 #define SET_ENDOP_CREATE(fs, dvp, nvpp, str)				\
    498 	do {								\
    499 		UNMARK_VNODE(dvp);					\
    500 		if (nvpp && *nvpp)					\
    501 			UNMARK_VNODE(*nvpp);				\
    502 		/* Check for error return to stem vnode leakage */	\
    503 		if (nvpp && *nvpp && !((*nvpp)->v_flag & VDIROP))	\
    504 			ungetnewvnode(*(nvpp));				\
    505 		SET_ENDOP_BASE((fs), (dvp), (str));			\
    506 		lfs_reserve((fs), (dvp), NULL, -LFS_NRESERVE(fs));	\
    507 		vrele(dvp);						\
    508 	} while(0)
    509 #define SET_ENDOP_CREATE_AP(ap, str)					\
    510 	SET_ENDOP_CREATE(VTOI((ap)->a_dvp)->i_lfs, (ap)->a_dvp,		\
    511 			 (ap)->a_vpp, (str))
    512 #define SET_ENDOP_REMOVE(fs, dvp, ovp, str)				\
    513 	do {								\
    514 		UNMARK_VNODE(dvp);					\
    515 		if (ovp)						\
    516 			UNMARK_VNODE(ovp);				\
    517 		SET_ENDOP_BASE((fs), (dvp), (str));			\
    518 		lfs_reserve((fs), (dvp), (ovp), -LFS_NRESERVE(fs));	\
    519 		vrele(dvp);						\
    520 		if (ovp)						\
    521 			vrele(ovp);					\
    522 	} while(0)
    523 
    524 void
    525 lfs_mark_vnode(struct vnode *vp)
    526 {
    527 	struct inode *ip = VTOI(vp);
    528 	struct lfs *fs = ip->i_lfs;
    529 
    530 	simple_lock(&fs->lfs_interlock);
    531 	if (!(ip->i_flag & IN_ADIROP)) {
    532 		if (!(vp->v_flag & VDIROP)) {
    533 			(void)lfs_vref(vp);
    534 			simple_lock(&lfs_subsys_lock);
    535 			++lfs_dirvcount;
    536 			simple_unlock(&lfs_subsys_lock);
    537 			TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
    538 			vp->v_flag |= VDIROP;
    539 		}
    540 		++fs->lfs_nadirop;
    541 		ip->i_flag |= IN_ADIROP;
    542 	} else
    543 		KASSERT(vp->v_flag & VDIROP);
    544 	simple_unlock(&fs->lfs_interlock);
    545 }
    546 
    547 void
    548 lfs_unmark_vnode(struct vnode *vp)
    549 {
    550 	struct inode *ip = VTOI(vp);
    551 
    552 	if (ip->i_flag & IN_ADIROP) {
    553 		KASSERT(vp->v_flag & VDIROP);
    554 		simple_lock(&ip->i_lfs->lfs_interlock);
    555 		--ip->i_lfs->lfs_nadirop;
    556 		simple_unlock(&ip->i_lfs->lfs_interlock);
    557 		ip->i_flag &= ~IN_ADIROP;
    558 	}
    559 }
    560 
    561 int
    562 lfs_symlink(void *v)
    563 {
    564 	struct vop_symlink_args /* {
    565 		struct vnode *a_dvp;
    566 		struct vnode **a_vpp;
    567 		struct componentname *a_cnp;
    568 		struct vattr *a_vap;
    569 		char *a_target;
    570 	} */ *ap = v;
    571 	int error;
    572 
    573 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    574 		vput(ap->a_dvp);
    575 		return error;
    576 	}
    577 	error = ufs_symlink(ap);
    578 	SET_ENDOP_CREATE_AP(ap, "symlink");
    579 	return (error);
    580 }
    581 
    582 int
    583 lfs_mknod(void *v)
    584 {
    585 	struct vop_mknod_args	/* {
    586 		struct vnode *a_dvp;
    587 		struct vnode **a_vpp;
    588 		struct componentname *a_cnp;
    589 		struct vattr *a_vap;
    590 		} */ *ap = v;
    591 	struct vattr *vap = ap->a_vap;
    592 	struct vnode **vpp = ap->a_vpp;
    593 	struct inode *ip;
    594 	int error;
    595 	struct mount	*mp;
    596 	ino_t		ino;
    597 
    598 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    599 		vput(ap->a_dvp);
    600 		return error;
    601 	}
    602 	error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode),
    603 	    ap->a_dvp, vpp, ap->a_cnp);
    604 
    605 	/* Either way we're done with the dirop at this point */
    606 	SET_ENDOP_CREATE_AP(ap, "mknod");
    607 
    608 	if (error)
    609 		return (error);
    610 
    611 	ip = VTOI(*vpp);
    612 	mp  = (*vpp)->v_mount;
    613 	ino = ip->i_number;
    614 	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
    615 	if (vap->va_rdev != VNOVAL) {
    616 		/*
    617 		 * Want to be able to use this to make badblock
    618 		 * inodes, so don't truncate the dev number.
    619 		 */
    620 #if 0
    621 		ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
    622 		    UFS_MPNEEDSWAP((*vpp)->v_mount));
    623 #else
    624 		ip->i_ffs1_rdev = vap->va_rdev;
    625 #endif
    626 	}
    627 
    628 	/*
    629 	 * Call fsync to write the vnode so that we don't have to deal with
    630 	 * flushing it when it's marked VDIROP|VXLOCK.
    631 	 *
    632 	 * XXX KS - If we can't flush we also can't call vgone(), so must
    633 	 * return.  But, that leaves this vnode in limbo, also not good.
    634 	 * Can this ever happen (barring hardware failure)?
    635 	 */
    636 	if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0,
    637 	    curproc)) != 0) {
    638 		panic("lfs_mknod: couldn't fsync (ino %d)", ino);
    639 		/* return (error); */
    640 	}
    641 	/*
    642 	 * Remove vnode so that it will be reloaded by VFS_VGET and
    643 	 * checked to see if it is an alias of an existing entry in
    644 	 * the inode cache.
    645 	 */
    646 	/* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
    647 
    648 	VOP_UNLOCK(*vpp, 0);
    649 	lfs_vunref(*vpp);
    650 	(*vpp)->v_type = VNON;
    651 	vgone(*vpp);
    652 	error = VFS_VGET(mp, ino, vpp);
    653 
    654 	if (error != 0) {
    655 		*vpp = NULL;
    656 		return (error);
    657 	}
    658 	return (0);
    659 }
    660 
    661 int
    662 lfs_create(void *v)
    663 {
    664 	struct vop_create_args	/* {
    665 		struct vnode *a_dvp;
    666 		struct vnode **a_vpp;
    667 		struct componentname *a_cnp;
    668 		struct vattr *a_vap;
    669 	} */ *ap = v;
    670 	int error;
    671 
    672 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    673 		vput(ap->a_dvp);
    674 		return error;
    675 	}
    676 	error = ufs_create(ap);
    677 	SET_ENDOP_CREATE_AP(ap, "create");
    678 	return (error);
    679 }
    680 
    681 int
    682 lfs_mkdir(void *v)
    683 {
    684 	struct vop_mkdir_args	/* {
    685 		struct vnode *a_dvp;
    686 		struct vnode **a_vpp;
    687 		struct componentname *a_cnp;
    688 		struct vattr *a_vap;
    689 	} */ *ap = v;
    690 	int error;
    691 
    692 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    693 		vput(ap->a_dvp);
    694 		return error;
    695 	}
    696 	error = ufs_mkdir(ap);
    697 	SET_ENDOP_CREATE_AP(ap, "mkdir");
    698 	return (error);
    699 }
    700 
    701 int
    702 lfs_remove(void *v)
    703 {
    704 	struct vop_remove_args	/* {
    705 		struct vnode *a_dvp;
    706 		struct vnode *a_vp;
    707 		struct componentname *a_cnp;
    708 	} */ *ap = v;
    709 	struct vnode *dvp, *vp;
    710 	int error;
    711 
    712 	dvp = ap->a_dvp;
    713 	vp = ap->a_vp;
    714 	if ((error = SET_DIROP_REMOVE(dvp, vp)) != 0) {
    715 		if (dvp == vp)
    716 			vrele(vp);
    717 		else
    718 			vput(vp);
    719 		vput(dvp);
    720 		return error;
    721 	}
    722 	error = ufs_remove(ap);
    723 	SET_ENDOP_REMOVE(VTOI(dvp)->i_lfs, dvp, vp, "remove");
    724 	return (error);
    725 }
    726 
    727 int
    728 lfs_rmdir(void *v)
    729 {
    730 	struct vop_rmdir_args	/* {
    731 		struct vnodeop_desc *a_desc;
    732 		struct vnode *a_dvp;
    733 		struct vnode *a_vp;
    734 		struct componentname *a_cnp;
    735 	} */ *ap = v;
    736 	struct vnode *vp;
    737 	int error;
    738 
    739 	vp = ap->a_vp;
    740 	if ((error = SET_DIROP_REMOVE(ap->a_dvp, ap->a_vp)) != 0) {
    741 		vrele(ap->a_dvp);
    742 		if (ap->a_vp != ap->a_dvp)
    743 			VOP_UNLOCK(ap->a_dvp, 0);
    744 		vput(vp);
    745 		return error;
    746 	}
    747 	error = ufs_rmdir(ap);
    748 	SET_ENDOP_REMOVE(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vp, "rmdir");
    749 	return (error);
    750 }
    751 
    752 int
    753 lfs_link(void *v)
    754 {
    755 	struct vop_link_args	/* {
    756 		struct vnode *a_dvp;
    757 		struct vnode *a_vp;
    758 		struct componentname *a_cnp;
    759 	} */ *ap = v;
    760 	int error;
    761 	struct vnode **vpp = NULL;
    762 
    763 	if ((error = SET_DIROP_CREATE(ap->a_dvp, vpp)) != 0) {
    764 		vput(ap->a_dvp);
    765 		return error;
    766 	}
    767 	error = ufs_link(ap);
    768 	SET_ENDOP_CREATE(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vpp, "link");
    769 	return (error);
    770 }
    771 
    772 int
    773 lfs_rename(void *v)
    774 {
    775 	struct vop_rename_args	/* {
    776 		struct vnode *a_fdvp;
    777 		struct vnode *a_fvp;
    778 		struct componentname *a_fcnp;
    779 		struct vnode *a_tdvp;
    780 		struct vnode *a_tvp;
    781 		struct componentname *a_tcnp;
    782 	} */ *ap = v;
    783 	struct vnode *tvp, *fvp, *tdvp, *fdvp;
    784 	struct componentname *tcnp, *fcnp;
    785 	int error;
    786 	struct lfs *fs;
    787 
    788 	fs = VTOI(ap->a_fdvp)->i_lfs;
    789 	tvp = ap->a_tvp;
    790 	tdvp = ap->a_tdvp;
    791 	tcnp = ap->a_tcnp;
    792 	fvp = ap->a_fvp;
    793 	fdvp = ap->a_fdvp;
    794 	fcnp = ap->a_fcnp;
    795 
    796 	/*
    797 	 * Check for cross-device rename.
    798 	 * If it is, we don't want to set dirops, just error out.
    799 	 * (In particular note that MARK_VNODE(tdvp) will DTWT on
    800 	 * a cross-device rename.)
    801 	 *
    802 	 * Copied from ufs_rename.
    803 	 */
    804 	if ((fvp->v_mount != tdvp->v_mount) ||
    805 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
    806 		error = EXDEV;
    807 		goto errout;
    808 	}
    809 
    810 	/*
    811 	 * Check to make sure we're not renaming a vnode onto itself
    812 	 * (deleting a hard link by renaming one name onto another);
    813 	 * if we are we can't recursively call VOP_REMOVE since that
    814 	 * would leave us with an unaccounted-for number of live dirops.
    815 	 *
    816 	 * Inline the relevant section of ufs_rename here, *before*
    817 	 * calling SET_DIROP_REMOVE.
    818 	 */
    819 	if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) ||
    820 	    (VTOI(tdvp)->i_flags & APPEND))) {
    821 		error = EPERM;
    822 		goto errout;
    823 	}
    824 	if (fvp == tvp) {
    825 		if (fvp->v_type == VDIR) {
    826 			error = EINVAL;
    827 			goto errout;
    828 		}
    829 
    830 		/* Release destination completely. */
    831 		VOP_ABORTOP(tdvp, tcnp);
    832 		vput(tdvp);
    833 		vput(tvp);
    834 
    835 		/* Delete source. */
    836 		vrele(fvp);
    837 		fcnp->cn_flags &= ~(MODMASK | SAVESTART);
    838 		fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
    839 		fcnp->cn_nameiop = DELETE;
    840 		if ((error = relookup(fdvp, &fvp, fcnp))){
    841 			/* relookup blew away fdvp */
    842 			return (error);
    843 		}
    844 		return (VOP_REMOVE(fdvp, fvp, fcnp));
    845 	}
    846 
    847 	if ((error = SET_DIROP_REMOVE(tdvp, tvp)) != 0)
    848 		goto errout;
    849 	MARK_VNODE(fdvp);
    850 	MARK_VNODE(fvp);
    851 
    852 	error = ufs_rename(ap);
    853 	UNMARK_VNODE(fdvp);
    854 	UNMARK_VNODE(fvp);
    855 	SET_ENDOP_REMOVE(fs, tdvp, tvp, "rename");
    856 	return (error);
    857 
    858     errout:
    859 	VOP_ABORTOP(tdvp, ap->a_tcnp); /* XXX, why not in NFS? */
    860 	if (tdvp == tvp)
    861 		vrele(tdvp);
    862 	else
    863 		vput(tdvp);
    864 	if (tvp)
    865 		vput(tvp);
    866 	VOP_ABORTOP(fdvp, ap->a_fcnp); /* XXX, why not in NFS? */
    867 	vrele(fdvp);
    868 	vrele(fvp);
    869 	return (error);
    870 }
    871 
    872 /* XXX hack to avoid calling ITIMES in getattr */
    873 int
    874 lfs_getattr(void *v)
    875 {
    876 	struct vop_getattr_args /* {
    877 		struct vnode *a_vp;
    878 		struct vattr *a_vap;
    879 		struct ucred *a_cred;
    880 		struct proc *a_p;
    881 	} */ *ap = v;
    882 	struct vnode *vp = ap->a_vp;
    883 	struct inode *ip = VTOI(vp);
    884 	struct vattr *vap = ap->a_vap;
    885 	struct lfs *fs = ip->i_lfs;
    886 	/*
    887 	 * Copy from inode table
    888 	 */
    889 	vap->va_fsid = ip->i_dev;
    890 	vap->va_fileid = ip->i_number;
    891 	vap->va_mode = ip->i_mode & ~IFMT;
    892 	vap->va_nlink = ip->i_nlink;
    893 	vap->va_uid = ip->i_uid;
    894 	vap->va_gid = ip->i_gid;
    895 	vap->va_rdev = (dev_t)ip->i_ffs1_rdev;
    896 	vap->va_size = vp->v_size;
    897 	vap->va_atime.tv_sec = ip->i_ffs1_atime;
    898 	vap->va_atime.tv_nsec = ip->i_ffs1_atimensec;
    899 	vap->va_mtime.tv_sec = ip->i_ffs1_mtime;
    900 	vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec;
    901 	vap->va_ctime.tv_sec = ip->i_ffs1_ctime;
    902 	vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec;
    903 	vap->va_flags = ip->i_flags;
    904 	vap->va_gen = ip->i_gen;
    905 	/* this doesn't belong here */
    906 	if (vp->v_type == VBLK)
    907 		vap->va_blocksize = BLKDEV_IOSIZE;
    908 	else if (vp->v_type == VCHR)
    909 		vap->va_blocksize = MAXBSIZE;
    910 	else
    911 		vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
    912 	vap->va_bytes = fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks);
    913 	vap->va_type = vp->v_type;
    914 	vap->va_filerev = ip->i_modrev;
    915 	return (0);
    916 }
    917 
    918 /*
    919  * Check to make sure the inode blocks won't choke the buffer
    920  * cache, then call ufs_setattr as usual.
    921  */
    922 int
    923 lfs_setattr(void *v)
    924 {
    925 	struct vop_getattr_args /* {
    926 		struct vnode *a_vp;
    927 		struct vattr *a_vap;
    928 		struct ucred *a_cred;
    929 		struct proc *a_p;
    930 	} */ *ap = v;
    931 	struct vnode *vp = ap->a_vp;
    932 
    933 	lfs_check(vp, LFS_UNUSED_LBN, 0);
    934 	return ufs_setattr(v);
    935 }
    936 
    937 /*
    938  * Close called
    939  *
    940  * XXX -- we were using ufs_close, but since it updates the
    941  * times on the inode, we might need to bump the uinodes
    942  * count.
    943  */
    944 /* ARGSUSED */
    945 int
    946 lfs_close(void *v)
    947 {
    948 	struct vop_close_args /* {
    949 		struct vnode *a_vp;
    950 		int  a_fflag;
    951 		struct ucred *a_cred;
    952 		struct proc *a_p;
    953 	} */ *ap = v;
    954 	struct vnode *vp = ap->a_vp;
    955 	struct inode *ip = VTOI(vp);
    956 	struct timespec ts;
    957 
    958 	if (vp == ip->i_lfs->lfs_ivnode &&
    959 	    vp->v_mount->mnt_iflag & IMNT_UNMOUNT)
    960 		return 0;
    961 
    962 	if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) {
    963 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    964 		LFS_ITIMES(ip, &ts, &ts, &ts);
    965 	}
    966 	return (0);
    967 }
    968 
    969 /*
    970  * Close wrapper for special devices.
    971  *
    972  * Update the times on the inode then do device close.
    973  */
    974 int
    975 lfsspec_close(void *v)
    976 {
    977 	struct vop_close_args /* {
    978 		struct vnode	*a_vp;
    979 		int		a_fflag;
    980 		struct ucred	*a_cred;
    981 		struct proc	*a_p;
    982 	} */ *ap = v;
    983 	struct vnode	*vp;
    984 	struct inode	*ip;
    985 	struct timespec	ts;
    986 
    987 	vp = ap->a_vp;
    988 	ip = VTOI(vp);
    989 	if (vp->v_usecount > 1) {
    990 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    991 		LFS_ITIMES(ip, &ts, &ts, &ts);
    992 	}
    993 	return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
    994 }
    995 
    996 /*
    997  * Close wrapper for fifo's.
    998  *
    999  * Update the times on the inode then do device close.
   1000  */
   1001 int
   1002 lfsfifo_close(void *v)
   1003 {
   1004 	struct vop_close_args /* {
   1005 		struct vnode	*a_vp;
   1006 		int		a_fflag;
   1007 		struct ucred	*a_cred;
   1008 		struct proc	*a_p;
   1009 	} */ *ap = v;
   1010 	struct vnode	*vp;
   1011 	struct inode	*ip;
   1012 	struct timespec	ts;
   1013 
   1014 	vp = ap->a_vp;
   1015 	ip = VTOI(vp);
   1016 	if (ap->a_vp->v_usecount > 1) {
   1017 		TIMEVAL_TO_TIMESPEC(&time, &ts);
   1018 		LFS_ITIMES(ip, &ts, &ts, &ts);
   1019 	}
   1020 	return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
   1021 }
   1022 
   1023 /*
   1024  * Reclaim an inode so that it can be used for other purposes.
   1025  */
   1026 
   1027 int
   1028 lfs_reclaim(void *v)
   1029 {
   1030 	struct vop_reclaim_args /* {
   1031 		struct vnode *a_vp;
   1032 		struct proc *a_p;
   1033 	} */ *ap = v;
   1034 	struct vnode *vp = ap->a_vp;
   1035 	struct inode *ip = VTOI(vp);
   1036 	int error;
   1037 
   1038 	KASSERT(ip->i_nlink == ip->i_ffs_effnlink);
   1039 
   1040 	LFS_CLR_UINO(ip, IN_ALLMOD);
   1041 	if ((error = ufs_reclaim(vp, ap->a_p)))
   1042 		return (error);
   1043 	pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
   1044 	pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
   1045 	ip->inode_ext.lfs = NULL;
   1046 	pool_put(&lfs_inode_pool, vp->v_data);
   1047 	vp->v_data = NULL;
   1048 	return (0);
   1049 }
   1050 
   1051 /*
   1052  * Read a block from a storage device.
   1053  * In order to avoid reading blocks that are in the process of being
   1054  * written by the cleaner---and hence are not mutexed by the normal
   1055  * buffer cache / page cache mechanisms---check for collisions before
   1056  * reading.
   1057  *
   1058  * We inline ufs_strategy to make sure that the VOP_BMAP occurs *before*
   1059  * the active cleaner test.
   1060  *
   1061  * XXX This code assumes that lfs_markv makes synchronous checkpoints.
   1062  */
   1063 int
   1064 lfs_strategy(void *v)
   1065 {
   1066 	struct vop_strategy_args /* {
   1067 		struct vnode *a_vp;
   1068 		struct buf *a_bp;
   1069 	} */ *ap = v;
   1070 	struct buf	*bp;
   1071 	struct lfs	*fs;
   1072 	struct vnode	*vp;
   1073 	struct inode	*ip;
   1074 	daddr_t		tbn;
   1075 	int		i, sn, error, slept;
   1076 
   1077 	bp = ap->a_bp;
   1078 	vp = ap->a_vp;
   1079 	ip = VTOI(vp);
   1080 	fs = ip->i_lfs;
   1081 
   1082 	/* lfs uses its strategy routine only for read */
   1083 	KASSERT(bp->b_flags & B_READ);
   1084 
   1085 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1086 		panic("lfs_strategy: spec");
   1087 	KASSERT(bp->b_bcount != 0);
   1088 	if (bp->b_blkno == bp->b_lblkno) {
   1089 		error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
   1090 				 NULL);
   1091 		if (error) {
   1092 			bp->b_error = error;
   1093 			bp->b_flags |= B_ERROR;
   1094 			biodone(bp);
   1095 			return (error);
   1096 		}
   1097 		if ((long)bp->b_blkno == -1) /* no valid data */
   1098 			clrbuf(bp);
   1099 	}
   1100 	if ((long)bp->b_blkno < 0) { /* block is not on disk */
   1101 		biodone(bp);
   1102 		return (0);
   1103 	}
   1104 
   1105 	slept = 1;
   1106 	simple_lock(&fs->lfs_interlock);
   1107 	while (slept && fs->lfs_seglock) {
   1108 		simple_unlock(&fs->lfs_interlock);
   1109 		/*
   1110 		 * Look through list of intervals.
   1111 		 * There will only be intervals to look through
   1112 		 * if the cleaner holds the seglock.
   1113 		 * Since the cleaner is synchronous, we can trust
   1114 		 * the list of intervals to be current.
   1115 		 */
   1116 		tbn = dbtofsb(fs, bp->b_blkno);
   1117 		sn = dtosn(fs, tbn);
   1118 		slept = 0;
   1119 		for (i = 0; i < fs->lfs_cleanind; i++) {
   1120 			if (sn == dtosn(fs, fs->lfs_cleanint[i]) &&
   1121 			    tbn >= fs->lfs_cleanint[i]) {
   1122 				DLOG((DLOG_CLEAN,
   1123 				      "lfs_strategy: ino %d lbn %" PRId64
   1124 				       " ind %d sn %d fsb %" PRIx32
   1125 				       " given sn %d fsb %" PRIx64 "\n",
   1126 					ip->i_number, bp->b_lblkno, i,
   1127 					dtosn(fs, fs->lfs_cleanint[i]),
   1128 					fs->lfs_cleanint[i], sn, tbn));
   1129 				DLOG((DLOG_CLEAN,
   1130 				      "lfs_strategy: sleeping on ino %d lbn %"
   1131 				      PRId64 "\n", ip->i_number, bp->b_lblkno));
   1132 				simple_lock(&fs->lfs_interlock);
   1133 				if (fs->lfs_seglock)
   1134 					ltsleep(&fs->lfs_seglock,
   1135 						(PRIBIO + 1) | PNORELOCK,
   1136 						"lfs_strategy", 0,
   1137 						&fs->lfs_interlock);
   1138 				/* Things may be different now; start over. */
   1139 				slept = 1;
   1140 				break;
   1141 			}
   1142 		}
   1143 		simple_lock(&fs->lfs_interlock);
   1144 	}
   1145 	simple_unlock(&fs->lfs_interlock);
   1146 
   1147 	vp = ip->i_devvp;
   1148 	VOP_STRATEGY(vp, bp);
   1149 	return (0);
   1150 }
   1151 
   1152 static void
   1153 lfs_flush_dirops(struct lfs *fs)
   1154 {
   1155 	struct inode *ip, *nip;
   1156 	struct vnode *vp;
   1157 	extern int lfs_dostats;
   1158 	struct segment *sp;
   1159 	int needunlock;
   1160 
   1161 	ASSERT_NO_SEGLOCK(fs);
   1162 
   1163 	if (fs->lfs_ronly)
   1164 		return;
   1165 
   1166 	simple_lock(&fs->lfs_interlock);
   1167 	if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) {
   1168 		simple_unlock(&fs->lfs_interlock);
   1169 		return;
   1170 	} else
   1171 		simple_unlock(&fs->lfs_interlock);
   1172 
   1173 	if (lfs_dostats)
   1174 		++lfs_stats.flush_invoked;
   1175 
   1176 	/*
   1177 	 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
   1178 	 * Technically this is a checkpoint (the on-disk state is valid)
   1179 	 * even though we are leaving out all the file data.
   1180 	 */
   1181 	lfs_imtime(fs);
   1182 	lfs_seglock(fs, SEGM_CKP);
   1183 	sp = fs->lfs_sp;
   1184 
   1185 	/*
   1186 	 * lfs_writevnodes, optimized to get dirops out of the way.
   1187 	 * Only write dirops, and don't flush files' pages, only
   1188 	 * blocks from the directories.
   1189 	 *
   1190 	 * We don't need to vref these files because they are
   1191 	 * dirops and so hold an extra reference until the
   1192 	 * segunlock clears them of that status.
   1193 	 *
   1194 	 * We don't need to check for IN_ADIROP because we know that
   1195 	 * no dirops are active.
   1196 	 *
   1197 	 */
   1198 	simple_lock(&fs->lfs_interlock);
   1199 	for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
   1200 		nip = TAILQ_NEXT(ip, i_lfs_dchain);
   1201 		simple_unlock(&fs->lfs_interlock);
   1202 		vp = ITOV(ip);
   1203 
   1204 		/*
   1205 		 * All writes to directories come from dirops; all
   1206 		 * writes to files' direct blocks go through the page
   1207 		 * cache, which we're not touching.  Reads to files
   1208 		 * and/or directories will not be affected by writing
   1209 		 * directory blocks inodes and file inodes.  So we don't
   1210 		 * really need to lock.  If we don't lock, though,
   1211 		 * make sure that we don't clear IN_MODIFIED
   1212 		 * unnecessarily.
   1213 		 */
   1214 		if (vp->v_flag & VXLOCK)
   1215 			continue;
   1216 		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
   1217 			needunlock = 1;
   1218 		} else {
   1219 			DLOG((DLOG_VNODE, "lfs_flush_dirops: flushing locked ino %d\n",
   1220 			       VTOI(vp)->i_number));
   1221 			needunlock = 0;
   1222 		}
   1223 		if (vp->v_type != VREG &&
   1224 		    ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) {
   1225 			lfs_writefile(fs, sp, vp);
   1226 			if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
   1227 			    !(ip->i_flag & IN_ALLMOD)) {
   1228 				LFS_SET_UINO(ip, IN_MODIFIED);
   1229 			}
   1230 		}
   1231 		(void) lfs_writeinode(fs, sp, ip);
   1232 		if (needunlock)
   1233 			VOP_UNLOCK(vp, 0);
   1234 		else
   1235 			LFS_SET_UINO(ip, IN_MODIFIED);
   1236 		simple_lock(&fs->lfs_interlock);
   1237 	}
   1238 	simple_unlock(&fs->lfs_interlock);
   1239 	/* We've written all the dirops there are */
   1240 	((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
   1241 	(void) lfs_writeseg(fs, sp);
   1242 	lfs_segunlock(fs);
   1243 }
   1244 
   1245 /*
   1246  * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
   1247  */
   1248 int
   1249 lfs_fcntl(void *v)
   1250 {
   1251 	struct vop_fcntl_args /* {
   1252 		struct vnode *a_vp;
   1253 		u_long a_command;
   1254 		caddr_t  a_data;
   1255 		int  a_fflag;
   1256 		struct ucred *a_cred;
   1257 		struct proc *a_p;
   1258 	} */ *ap = v;
   1259 	struct timeval *tvp;
   1260 	BLOCK_INFO *blkiov;
   1261 	CLEANERINFO *cip;
   1262 	int blkcnt, error, oclean;
   1263 	struct lfs_fcntl_markv blkvp;
   1264 	fsid_t *fsidp;
   1265 	struct lfs *fs;
   1266 	struct buf *bp;
   1267 	fhandle_t *fhp;
   1268 	daddr_t off;
   1269 
   1270 	/* Only respect LFS fcntls on fs root or Ifile */
   1271 	if (VTOI(ap->a_vp)->i_number != ROOTINO &&
   1272 	    VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) {
   1273 		return ufs_fcntl(v);
   1274 	}
   1275 
   1276 	/* Avoid locking a draining lock */
   1277 	if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) {
   1278 		return ESHUTDOWN;
   1279 	}
   1280 
   1281 	fs = VTOI(ap->a_vp)->i_lfs;
   1282 	fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx;
   1283 
   1284 	switch (ap->a_command) {
   1285 	    case LFCNSEGWAITALL:
   1286 	    case LFCNSEGWAITALL_COMPAT:
   1287 		fsidp = NULL;
   1288 		/* FALLSTHROUGH */
   1289 	    case LFCNSEGWAIT:
   1290 	    case LFCNSEGWAIT_COMPAT:
   1291 		tvp = (struct timeval *)ap->a_data;
   1292 		simple_lock(&fs->lfs_interlock);
   1293 		++fs->lfs_sleepers;
   1294 		simple_unlock(&fs->lfs_interlock);
   1295 		VOP_UNLOCK(ap->a_vp, 0);
   1296 
   1297 		error = lfs_segwait(fsidp, tvp);
   1298 
   1299 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1300 		simple_lock(&fs->lfs_interlock);
   1301 		if (--fs->lfs_sleepers == 0)
   1302 			wakeup(&fs->lfs_sleepers);
   1303 		simple_unlock(&fs->lfs_interlock);
   1304 		return error;
   1305 
   1306 	    case LFCNBMAPV:
   1307 	    case LFCNMARKV:
   1308 		if ((error = suser(ap->a_p->p_ucred, &ap->a_p->p_acflag)) != 0)
   1309 			return (error);
   1310 		blkvp = *(struct lfs_fcntl_markv *)ap->a_data;
   1311 
   1312 		blkcnt = blkvp.blkcnt;
   1313 		if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
   1314 			return (EINVAL);
   1315 		blkiov = lfs_malloc(fs, blkcnt * sizeof(BLOCK_INFO), LFS_NB_BLKIOV);
   1316 		if ((error = copyin(blkvp.blkiov, blkiov,
   1317 		     blkcnt * sizeof(BLOCK_INFO))) != 0) {
   1318 			lfs_free(fs, blkiov, LFS_NB_BLKIOV);
   1319 			return error;
   1320 		}
   1321 
   1322 		simple_lock(&fs->lfs_interlock);
   1323 		++fs->lfs_sleepers;
   1324 		simple_unlock(&fs->lfs_interlock);
   1325 		VOP_UNLOCK(ap->a_vp, 0);
   1326 		if (ap->a_command == LFCNBMAPV)
   1327 			error = lfs_bmapv(ap->a_p, fsidp, blkiov, blkcnt);
   1328 		else /* LFCNMARKV */
   1329 			error = lfs_markv(ap->a_p, fsidp, blkiov, blkcnt);
   1330 		if (error == 0)
   1331 			error = copyout(blkiov, blkvp.blkiov,
   1332 					blkcnt * sizeof(BLOCK_INFO));
   1333 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1334 		simple_lock(&fs->lfs_interlock);
   1335 		if (--fs->lfs_sleepers == 0)
   1336 			wakeup(&fs->lfs_sleepers);
   1337 		simple_unlock(&fs->lfs_interlock);
   1338 		lfs_free(fs, blkiov, LFS_NB_BLKIOV);
   1339 		return error;
   1340 
   1341 	    case LFCNRECLAIM:
   1342 		/*
   1343 		 * Flush dirops and write Ifile, allowing empty segments
   1344 		 * to be immediately reclaimed.
   1345 		 */
   1346 		VOP_UNLOCK(ap->a_vp, 0);
   1347 		lfs_writer_enter(fs, "pndirop");
   1348 		off = fs->lfs_offset;
   1349 		lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP);
   1350 		lfs_flush_dirops(fs);
   1351 		LFS_CLEANERINFO(cip, fs, bp);
   1352 		oclean = cip->clean;
   1353 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
   1354 		lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP);
   1355 		lfs_segunlock(fs);
   1356 		lfs_writer_leave(fs);
   1357 
   1358 #ifdef DEBUG
   1359 		LFS_CLEANERINFO(cip, fs, bp);
   1360 		DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64
   1361 		      " blocks, cleaned %" PRId32 " segments (activesb %d)\n",
   1362 		      fs->lfs_offset - off, cip->clean - oclean,
   1363 		      fs->lfs_activesb));
   1364 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
   1365 #endif
   1366 
   1367 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1368 		return 0;
   1369 
   1370 	    case LFCNIFILEFH:
   1371 		/* Return the filehandle of the Ifile */
   1372 		if ((error = suser(ap->a_p->p_ucred, &ap->a_p->p_acflag)) != 0)
   1373 			return (error);
   1374 		fhp = (struct fhandle *)ap->a_data;
   1375 		fhp->fh_fsid = *fsidp;
   1376 		return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid));
   1377 
   1378 	    default:
   1379 		return ufs_fcntl(v);
   1380 	}
   1381 	return 0;
   1382 }
   1383 
   1384 int
   1385 lfs_getpages(void *v)
   1386 {
   1387 	struct vop_getpages_args /* {
   1388 		struct vnode *a_vp;
   1389 		voff_t a_offset;
   1390 		struct vm_page **a_m;
   1391 		int *a_count;
   1392 		int a_centeridx;
   1393 		vm_prot_t a_access_type;
   1394 		int a_advice;
   1395 		int a_flags;
   1396 	} */ *ap = v;
   1397 
   1398 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM &&
   1399 	    (ap->a_access_type & VM_PROT_WRITE) != 0) {
   1400 		return EPERM;
   1401 	}
   1402 	if ((ap->a_access_type & VM_PROT_WRITE) != 0) {
   1403 		LFS_SET_UINO(VTOI(ap->a_vp), IN_MODIFIED);
   1404 	}
   1405 
   1406 	/*
   1407 	 * we're relying on the fact that genfs_getpages() always read in
   1408 	 * entire filesystem blocks.
   1409 	 */
   1410 	return genfs_getpages(v);
   1411 }
   1412 
   1413 /*
   1414  * Make sure that for all pages in every block in the given range,
   1415  * either all are dirty or all are clean.  If any of the pages
   1416  * we've seen so far are dirty, put the vnode on the paging chain,
   1417  * and mark it IN_PAGING.
   1418  *
   1419  * If checkfirst != 0, don't check all the pages but return at the
   1420  * first dirty page.
   1421  */
   1422 static int
   1423 check_dirty(struct lfs *fs, struct vnode *vp,
   1424 	    off_t startoffset, off_t endoffset, off_t blkeof,
   1425 	    int flags, int checkfirst)
   1426 {
   1427 	int by_list;
   1428 	struct vm_page *curpg = NULL; /* XXX: gcc */
   1429 	struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg;
   1430 	off_t soff = 0; /* XXX: gcc */
   1431 	voff_t off;
   1432 	int i;
   1433 	int nonexistent;
   1434 	int any_dirty;	/* number of dirty pages */
   1435 	int dirty;	/* number of dirty pages in a block */
   1436 	int tdirty;
   1437 	int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT;
   1438 
   1439 	ASSERT_MAYBE_SEGLOCK(fs);
   1440   top:
   1441 	by_list = (vp->v_uobj.uo_npages <=
   1442 		   ((endoffset - startoffset) >> PAGE_SHIFT) *
   1443 		   UVM_PAGE_HASH_PENALTY);
   1444 	any_dirty = 0;
   1445 
   1446 	if (by_list) {
   1447 		curpg = TAILQ_FIRST(&vp->v_uobj.memq);
   1448 	} else {
   1449 		soff = startoffset;
   1450 	}
   1451 	while (by_list || soff < MIN(blkeof, endoffset)) {
   1452 		if (by_list) {
   1453 			/*
   1454 			 * Find the first page in a block.  Skip
   1455 			 * blocks outside our area of interest or beyond
   1456 			 * the end of file.
   1457 			 */
   1458 			if (pages_per_block > 1) {
   1459 				while (curpg &&
   1460 				       ((curpg->offset & fs->lfs_bmask) ||
   1461 					curpg->offset >= vp->v_size ||
   1462 					curpg->offset >= endoffset))
   1463 					curpg = TAILQ_NEXT(curpg, listq);
   1464 			}
   1465 			if (curpg == NULL)
   1466 				break;
   1467 			soff = curpg->offset;
   1468 		}
   1469 
   1470 		/*
   1471 		 * Mark all pages in extended range busy; find out if any
   1472 		 * of them are dirty.
   1473 		 */
   1474 		nonexistent = dirty = 0;
   1475 		for (i = 0; i == 0 || i < pages_per_block; i++) {
   1476 			if (by_list && pages_per_block <= 1) {
   1477 				pgs[i] = pg = curpg;
   1478 			} else {
   1479 				off = soff + (i << PAGE_SHIFT);
   1480 				pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
   1481 				if (pg == NULL) {
   1482 					++nonexistent;
   1483 					continue;
   1484 				}
   1485 			}
   1486 			KASSERT(pg != NULL);
   1487 			while (pg->flags & PG_BUSY) {
   1488 				pg->flags |= PG_WANTED;
   1489 				UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
   1490 						    "lfsput", 0);
   1491 				simple_lock(&vp->v_interlock);
   1492 				if (by_list) {
   1493 					if (i > 0)
   1494 						uvm_page_unbusy(pgs, i);
   1495 					goto top;
   1496 				}
   1497 			}
   1498 			pg->flags |= PG_BUSY;
   1499 			UVM_PAGE_OWN(pg, "lfs_putpages");
   1500 
   1501 			pmap_page_protect(pg, VM_PROT_NONE);
   1502 			tdirty = (pmap_clear_modify(pg) ||
   1503 				  (pg->flags & PG_CLEAN) == 0);
   1504 			dirty += tdirty;
   1505 		}
   1506 		if (pages_per_block > 0 && nonexistent >= pages_per_block) {
   1507 			if (by_list) {
   1508 				curpg = TAILQ_NEXT(curpg, listq);
   1509 			} else {
   1510 				soff += fs->lfs_bsize;
   1511 			}
   1512 			continue;
   1513 		}
   1514 
   1515 		any_dirty += dirty;
   1516 		KASSERT(nonexistent == 0);
   1517 
   1518 		/*
   1519 		 * If any are dirty make all dirty; unbusy them,
   1520 		 * but if we were asked to clean, wire them so that
   1521 		 * the pagedaemon doesn't bother us about them while
   1522 		 * they're on their way to disk.
   1523 		 */
   1524 		for (i = 0; i == 0 || i < pages_per_block; i++) {
   1525 			pg = pgs[i];
   1526 			KASSERT(!((pg->flags & PG_CLEAN) && (pg->flags & PG_DELWRI)));
   1527 			if (dirty) {
   1528 				pg->flags &= ~PG_CLEAN;
   1529 				if (flags & PGO_FREE) {
   1530 					/* XXXUBC need better way to update */
   1531 					simple_lock(&lfs_subsys_lock);
   1532 					lfs_subsys_pages += MIN(1, pages_per_block);
   1533 					simple_unlock(&lfs_subsys_lock);
   1534 					/*
   1535 					 * Wire the page so that
   1536 					 * pdaemon doesn't see it again.
   1537 					 */
   1538 					uvm_lock_pageq();
   1539 					uvm_pagewire(pg);
   1540 					uvm_unlock_pageq();
   1541 
   1542 					/* Suspended write flag */
   1543 					pg->flags |= PG_DELWRI;
   1544 				}
   1545 			}
   1546 			if (pg->flags & PG_WANTED)
   1547 				wakeup(pg);
   1548 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1549 			UVM_PAGE_OWN(pg, NULL);
   1550 		}
   1551 
   1552 		if (checkfirst && any_dirty)
   1553 			break;
   1554 
   1555 		if (by_list) {
   1556 			curpg = TAILQ_NEXT(curpg, listq);
   1557 		} else {
   1558 			soff += MAX(PAGE_SIZE, fs->lfs_bsize);
   1559 		}
   1560 	}
   1561 
   1562 	/*
   1563 	 * If any pages were dirty, mark this inode as "pageout requested",
   1564 	 * and put it on the paging queue.
   1565 	 * XXXUBC locking (check locking on dchainhd too)
   1566 	 */
   1567 #ifdef notyet
   1568 	if (any_dirty) {
   1569 		if (!(ip->i_flags & IN_PAGING)) {
   1570 			ip->i_flags |= IN_PAGING;
   1571 			simple_lock(&fs->lfs_interlock);
   1572 			TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain);
   1573 			simple_unlock(&fs->lfs_interlock);
   1574 		}
   1575 	}
   1576 #endif
   1577 	return any_dirty;
   1578 }
   1579 
   1580 /*
   1581  * lfs_putpages functions like genfs_putpages except that
   1582  *
   1583  * (1) It needs to bounds-check the incoming requests to ensure that
   1584  *     they are block-aligned; if they are not, expand the range and
   1585  *     do the right thing in case, e.g., the requested range is clean
   1586  *     but the expanded range is dirty.
   1587  * (2) It needs to explicitly send blocks to be written when it is done.
   1588  *     VOP_PUTPAGES is not ever called with the seglock held, so
   1589  *     we simply take the seglock and let lfs_segunlock wait for us.
   1590  *     XXX Actually we can be called with the seglock held, if we have
   1591  *     XXX to flush a vnode while lfs_markv is in operation.  As of this
   1592  *     XXX writing we panic in this case.
   1593  *
   1594  * Assumptions:
   1595  *
   1596  * (1) The caller does not hold any pages in this vnode busy.  If it does,
   1597  *     there is a danger that when we expand the page range and busy the
   1598  *     pages we will deadlock.
   1599  * (2) We are called with vp->v_interlock held; we must return with it
   1600  *     released.
   1601  * (3) We don't absolutely have to free pages right away, provided that
   1602  *     the request does not have PGO_SYNCIO.  When the pagedaemon gives
   1603  *     us a request with PGO_FREE, we take the pages out of the paging
   1604  *     queue and wake up the writer, which will handle freeing them for us.
   1605  *
   1606  *     We ensure that for any filesystem block, all pages for that
   1607  *     block are either resident or not, even if those pages are higher
   1608  *     than EOF; that means that we will be getting requests to free
   1609  *     "unused" pages above EOF all the time, and should ignore them.
   1610  *
   1611  * XXX note that we're (ab)using PGO_LOCKED as "seglock held".
   1612  */
   1613 
   1614 int
   1615 lfs_putpages(void *v)
   1616 {
   1617 	int error;
   1618 	struct vop_putpages_args /* {
   1619 		struct vnode *a_vp;
   1620 		voff_t a_offlo;
   1621 		voff_t a_offhi;
   1622 		int a_flags;
   1623 	} */ *ap = v;
   1624 	struct vnode *vp;
   1625 	struct inode *ip;
   1626 	struct lfs *fs;
   1627 	struct segment *sp;
   1628 	off_t origoffset, startoffset, endoffset, origendoffset, blkeof;
   1629 	off_t off, max_endoffset;
   1630 	int s;
   1631 	boolean_t seglocked, sync, pagedaemon;
   1632 	struct vm_page *pg;
   1633 	UVMHIST_FUNC("lfs_putpages"); UVMHIST_CALLED(ubchist);
   1634 
   1635 	vp = ap->a_vp;
   1636 	ip = VTOI(vp);
   1637 	fs = ip->i_lfs;
   1638 	sync = (ap->a_flags & PGO_SYNCIO) != 0;
   1639 	pagedaemon = (curproc == uvm.pagedaemon_proc);
   1640 
   1641 	/* Putpages does nothing for metadata. */
   1642 	if (vp == fs->lfs_ivnode || vp->v_type != VREG) {
   1643 		simple_unlock(&vp->v_interlock);
   1644 		return 0;
   1645 	}
   1646 
   1647 	/*
   1648 	 * If there are no pages, don't do anything.
   1649 	 */
   1650 	if (vp->v_uobj.uo_npages == 0) {
   1651 		s = splbio();
   1652 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1653 		    (vp->v_flag & VONWORKLST)) {
   1654 			vp->v_flag &= ~VONWORKLST;
   1655 			LIST_REMOVE(vp, v_synclist);
   1656 		}
   1657 		splx(s);
   1658 		simple_unlock(&vp->v_interlock);
   1659 		return 0;
   1660 	}
   1661 
   1662 	blkeof = blkroundup(fs, ip->i_size);
   1663 
   1664 	/*
   1665 	 * Ignore requests to free pages past EOF but in the same block
   1666 	 * as EOF, unless the request is synchronous. (XXX why sync?)
   1667 	 * XXXUBC Make these pages look "active" so the pagedaemon won't
   1668 	 * XXXUBC bother us with them again.
   1669 	 */
   1670 	if (!sync && ap->a_offlo >= ip->i_size && ap->a_offlo < blkeof) {
   1671 		origoffset = ap->a_offlo;
   1672 		for (off = origoffset; off < blkeof; off += fs->lfs_bsize) {
   1673 			pg = uvm_pagelookup(&vp->v_uobj, off);
   1674 			KASSERT(pg != NULL);
   1675 			while (pg->flags & PG_BUSY) {
   1676 				pg->flags |= PG_WANTED;
   1677 				UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
   1678 						    "lfsput2", 0);
   1679 				simple_lock(&vp->v_interlock);
   1680 			}
   1681 			uvm_lock_pageq();
   1682 			uvm_pageactivate(pg);
   1683 			uvm_unlock_pageq();
   1684 		}
   1685 		ap->a_offlo = blkeof;
   1686 		if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) {
   1687 			simple_unlock(&vp->v_interlock);
   1688 			return 0;
   1689 		}
   1690 	}
   1691 
   1692 	/*
   1693 	 * Extend page range to start and end at block boundaries.
   1694 	 * (For the purposes of VOP_PUTPAGES, fragments don't exist.)
   1695 	 */
   1696 	origoffset = ap->a_offlo;
   1697 	origendoffset = ap->a_offhi;
   1698 	startoffset = origoffset & ~(fs->lfs_bmask);
   1699 	max_endoffset = (trunc_page(LLONG_MAX) >> fs->lfs_bshift)
   1700 					       << fs->lfs_bshift;
   1701 
   1702 	if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) {
   1703 		endoffset = max_endoffset;
   1704 		origendoffset = endoffset;
   1705 	} else {
   1706 		origendoffset = round_page(ap->a_offhi);
   1707 		endoffset = round_page(blkroundup(fs, origendoffset));
   1708 	}
   1709 
   1710 	KASSERT(startoffset > 0 || endoffset >= startoffset);
   1711 	if (startoffset == endoffset) {
   1712 		/* Nothing to do, why were we called? */
   1713 		simple_unlock(&vp->v_interlock);
   1714 		DLOG((DLOG_PAGE, "lfs_putpages: startoffset = endoffset = %"
   1715 		      PRId64 "\n", startoffset));
   1716 		return 0;
   1717 	}
   1718 
   1719 	ap->a_offlo = startoffset;
   1720 	ap->a_offhi = endoffset;
   1721 
   1722 	if (!(ap->a_flags & PGO_CLEANIT))
   1723 		return genfs_putpages(v);
   1724 
   1725 	/*
   1726 	 * If there are more than one page per block, we don't want
   1727 	 * to get caught locking them backwards; so set PGO_BUSYFAIL
   1728 	 * to avoid deadlocks.
   1729 	 */
   1730 	ap->a_flags |= PGO_BUSYFAIL;
   1731 
   1732 	do {
   1733 		int r;
   1734 
   1735 		/* If no pages are dirty, we can just use genfs_putpages. */
   1736 		if (check_dirty(fs, vp, startoffset, endoffset, blkeof,
   1737 				ap->a_flags, 1) != 0)
   1738 			break;
   1739 
   1740 		/*
   1741 		 * Sometimes pages are dirtied between the time that
   1742 		 * we check and the time we try to clean them.
   1743 		 * Instruct lfs_gop_write to return EDEADLK in this case
   1744 		 * so we can write them properly.
   1745 		 */
   1746 		ip->i_lfs_iflags |= LFSI_NO_GOP_WRITE;
   1747 		r = genfs_putpages(v);
   1748 		ip->i_lfs_iflags &= ~LFSI_NO_GOP_WRITE;
   1749 		if (r != EDEADLK)
   1750 			return r;
   1751 
   1752 		/* Start over. */
   1753 		preempt(1);
   1754 		simple_lock(&vp->v_interlock);
   1755 	} while(1);
   1756 
   1757 	/*
   1758 	 * Dirty and asked to clean.
   1759 	 *
   1760 	 * Pagedaemon can't actually write LFS pages; wake up
   1761 	 * the writer to take care of that.  The writer will
   1762 	 * notice the pager inode queue and act on that.
   1763 	 */
   1764 	if (pagedaemon) {
   1765 		simple_lock(&fs->lfs_interlock);
   1766 		++fs->lfs_pdflush;
   1767 		simple_unlock(&fs->lfs_interlock);
   1768 		wakeup(&lfs_writer_daemon);
   1769 		simple_unlock(&vp->v_interlock);
   1770 		return EWOULDBLOCK;
   1771 	}
   1772 
   1773 	/*
   1774 	 * If this is a file created in a recent dirop, we can't flush its
   1775 	 * inode until the dirop is complete.  Drain dirops, then flush the
   1776 	 * filesystem (taking care of any other pending dirops while we're
   1777 	 * at it).
   1778 	 */
   1779 	if ((ap->a_flags & (PGO_CLEANIT|PGO_LOCKED)) == PGO_CLEANIT &&
   1780 	    (vp->v_flag & VDIROP)) {
   1781 		int locked;
   1782 
   1783 		DLOG((DLOG_PAGE, "lfs_putpages: flushing VDIROP\n"));
   1784 		locked = VOP_ISLOCKED(vp) && /* XXX */
   1785 			vp->v_lock.lk_lockholder == curproc->p_pid;
   1786 		simple_unlock(&vp->v_interlock);
   1787 		lfs_writer_enter(fs, "ppdirop");
   1788 		if (locked)
   1789 			VOP_UNLOCK(vp, 0);
   1790 
   1791 		simple_lock(&fs->lfs_interlock);
   1792 		lfs_flush_fs(fs, sync ? SEGM_SYNC : 0);
   1793 		simple_unlock(&fs->lfs_interlock);
   1794 
   1795 		simple_lock(&vp->v_interlock);
   1796 		if (locked)
   1797 			VOP_LOCK(vp, LK_EXCLUSIVE);
   1798 		lfs_writer_leave(fs);
   1799 
   1800 		/* XXX the flush should have taken care of this one too! */
   1801 	}
   1802 
   1803 	/*
   1804 	 * This is it.	We are going to write some pages.  From here on
   1805 	 * down it's all just mechanics.
   1806 	 *
   1807 	 * Don't let genfs_putpages wait; lfs_segunlock will wait for us.
   1808 	 */
   1809 	ap->a_flags &= ~PGO_SYNCIO;
   1810 
   1811 	/*
   1812 	 * If we've already got the seglock, flush the node and return.
   1813 	 * The FIP has already been set up for us by lfs_writefile,
   1814 	 * and FIP cleanup and lfs_updatemeta will also be done there,
   1815 	 * unless genfs_putpages returns EDEADLK; then we must flush
   1816 	 * what we have, and correct FIP and segment header accounting.
   1817 	 */
   1818 
   1819 	seglocked = (ap->a_flags & PGO_LOCKED) != 0;
   1820 	if (!seglocked) {
   1821 		simple_unlock(&vp->v_interlock);
   1822 		/*
   1823 		 * Take the seglock, because we are going to be writing pages.
   1824 		 */
   1825 		error = lfs_seglock(fs, SEGM_PROT | (sync ? SEGM_SYNC : 0));
   1826 		if (error != 0)
   1827 			return error;
   1828 		simple_lock(&vp->v_interlock);
   1829 	}
   1830 
   1831 	/*
   1832 	 * VOP_PUTPAGES should not be called while holding the seglock.
   1833 	 * XXXUBC fix lfs_markv, or do this properly.
   1834 	 */
   1835 #ifdef notyet
   1836 	KASSERT(fs->lfs_seglock == 1);
   1837 #endif /* notyet */
   1838 
   1839 	/*
   1840 	 * We assume we're being called with sp->fip pointing at blank space.
   1841 	 * Account for a new FIP in the segment header, and set sp->vp.
   1842 	 * (This should duplicate the setup at the top of lfs_writefile().)
   1843 	 */
   1844 	sp = fs->lfs_sp;
   1845 	if (!seglocked) {
   1846 		if (sp->seg_bytes_left < fs->lfs_bsize ||
   1847 		    sp->sum_bytes_left < sizeof(struct finfo))
   1848 			(void) lfs_writeseg(fs, fs->lfs_sp);
   1849 
   1850 		sp->sum_bytes_left -= FINFOSIZE;
   1851 		++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1852 	}
   1853 	KASSERT(sp->vp == NULL);
   1854 	sp->vp = vp;
   1855 
   1856 	if (!seglocked) {
   1857 		if (vp->v_flag & VDIROP)
   1858 			((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
   1859 	}
   1860 
   1861 	sp->fip->fi_nblocks = 0;
   1862 	sp->fip->fi_ino = ip->i_number;
   1863 	sp->fip->fi_version = ip->i_gen;
   1864 
   1865 	/*
   1866 	 * Loop through genfs_putpages until all pages are gathered.
   1867 	 * genfs_putpages() drops the interlock, so reacquire it if necessary.
   1868 	 * Whenever we lose the interlock we have to rerun check_dirty, as
   1869 	 * well.
   1870 	 */
   1871 again:
   1872 	check_dirty(fs, vp, startoffset, endoffset, blkeof, ap->a_flags, 0);
   1873 
   1874 	if ((error = genfs_putpages(v)) == EDEADLK) {
   1875 		DLOG((DLOG_PAGE, "lfs_putpages: genfs_putpages returned"
   1876 		      " EDEADLK [2] ino %d off %x (seg %d)\n",
   1877 		      ip->i_number, fs->lfs_offset,
   1878 		      dtosn(fs, fs->lfs_offset)));
   1879 		/* If nothing to write, short-circuit */
   1880 		if (sp->cbpp - sp->bpp > 1) {
   1881 			/* Write gathered pages */
   1882 			lfs_updatemeta(sp);
   1883 			(void) lfs_writeseg(fs, sp);
   1884 
   1885 			/*
   1886 			 * Reinitialize brand new FIP and add us to it.
   1887 			 * (This should duplicate the fixup in
   1888 			 * lfs_gatherpages().)
   1889 			 */
   1890 			KASSERT(sp->vp == vp);
   1891 			sp->fip->fi_version = ip->i_gen;
   1892 			sp->fip->fi_ino = ip->i_number;
   1893 			/* Add us to the new segment summary. */
   1894 			++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1895 			sp->sum_bytes_left -= FINFOSIZE;
   1896 		}
   1897 
   1898 		/* Give the write a chance to complete */
   1899 		preempt(1);
   1900 
   1901 		/* We've lost the interlock.  Start over. */
   1902 		simple_lock(&vp->v_interlock);
   1903 		goto again;
   1904 	}
   1905 
   1906 	KASSERT(sp->vp == vp);
   1907 	if (!seglocked) {
   1908 		sp->vp = NULL; /* XXX lfs_gather below will set this */
   1909 
   1910 		/* Write indirect blocks as well */
   1911 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
   1912 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
   1913 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
   1914 
   1915 		KASSERT(sp->vp == NULL);
   1916 		sp->vp = vp;
   1917 	}
   1918 
   1919 	/*
   1920 	 * Blocks are now gathered into a segment waiting to be written.
   1921 	 * All that's left to do is update metadata, and write them.
   1922 	 */
   1923 	lfs_updatemeta(sp);
   1924 	KASSERT(sp->vp == vp);
   1925 	sp->vp = NULL;
   1926 
   1927 	if (seglocked) {
   1928 		/* we're called by lfs_writefile. */
   1929 		return error;
   1930 	}
   1931 
   1932 	/*
   1933 	 * Clean up FIP, since we're done writing this file.
   1934 	 * This should duplicate cleanup at the end of lfs_writefile().
   1935 	 */
   1936 	if (sp->fip->fi_nblocks != 0) {
   1937 		sp->fip = (FINFO*)((caddr_t)sp->fip + FINFOSIZE +
   1938 			sizeof(int32_t) * sp->fip->fi_nblocks);
   1939 		sp->start_lbp = &sp->fip->fi_blocks[0];
   1940 	} else {
   1941 		sp->sum_bytes_left += FINFOSIZE;
   1942 		--((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1943 	}
   1944 	lfs_writeseg(fs, fs->lfs_sp);
   1945 
   1946 	/*
   1947 	 * XXX - with the malloc/copy writeseg, the pages are freed by now
   1948 	 * even if we don't wait (e.g. if we hold a nested lock).  This
   1949 	 * will not be true if we stop using malloc/copy.
   1950 	 */
   1951 	KASSERT(fs->lfs_sp->seg_flags & SEGM_PROT);
   1952 	lfs_segunlock(fs);
   1953 
   1954 	/*
   1955 	 * Wait for v_numoutput to drop to zero.  The seglock should
   1956 	 * take care of this, but there is a slight possibility that
   1957 	 * aiodoned might not have got around to our buffers yet.
   1958 	 */
   1959 	if (sync) {
   1960 		int s;
   1961 
   1962 		s = splbio();
   1963 		simple_lock(&global_v_numoutput_slock);
   1964 		while (vp->v_numoutput > 0) {
   1965 			DLOG((DLOG_PAGE, "lfs_putpages: ino %d sleeping on"
   1966 			      " num %d\n", ip->i_number, vp->v_numoutput));
   1967 			vp->v_flag |= VBWAIT;
   1968 			ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vn", 0,
   1969 			    &global_v_numoutput_slock);
   1970 		}
   1971 		simple_unlock(&global_v_numoutput_slock);
   1972 		splx(s);
   1973 	}
   1974 	return error;
   1975 }
   1976 
   1977 /*
   1978  * Return the last logical file offset that should be written for this file
   1979  * if we're doing a write that ends at "size".	If writing, we need to know
   1980  * about sizes on disk, i.e. fragments if there are any; if reading, we need
   1981  * to know about entire blocks.
   1982  */
   1983 void
   1984 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1985 {
   1986 	struct inode *ip = VTOI(vp);
   1987 	struct lfs *fs = ip->i_lfs;
   1988 	daddr_t olbn, nlbn;
   1989 
   1990 	KASSERT(flags & (GOP_SIZE_READ | GOP_SIZE_WRITE));
   1991 	KASSERT((flags & (GOP_SIZE_READ | GOP_SIZE_WRITE))
   1992 		!= (GOP_SIZE_READ | GOP_SIZE_WRITE));
   1993 
   1994 	olbn = lblkno(fs, ip->i_size);
   1995 	nlbn = lblkno(fs, size);
   1996 	if (!(flags & GOP_SIZE_MEM) && nlbn < NDADDR && olbn <= nlbn) {
   1997 		*eobp = fragroundup(fs, size);
   1998 	} else {
   1999 		*eobp = blkroundup(fs, size);
   2000 	}
   2001 }
   2002 
   2003 #ifdef DEBUG
   2004 void lfs_dump_vop(void *);
   2005 
   2006 void
   2007 lfs_dump_vop(void *v)
   2008 {
   2009 	struct vop_putpages_args /* {
   2010 		struct vnode *a_vp;
   2011 		voff_t a_offlo;
   2012 		voff_t a_offhi;
   2013 		int a_flags;
   2014 	} */ *ap = v;
   2015 
   2016 #ifdef DDB
   2017 	vfs_vnode_print(ap->a_vp, 0, printf);
   2018 #endif
   2019 	lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din);
   2020 }
   2021 #endif
   2022 
   2023 int
   2024 lfs_mmap(void *v)
   2025 {
   2026 	struct vop_mmap_args /* {
   2027 		const struct vnodeop_desc *a_desc;
   2028 		struct vnode *a_vp;
   2029 		int a_fflags;
   2030 		struct ucred *a_cred;
   2031 		struct proc *a_p;
   2032 	} */ *ap = v;
   2033 
   2034 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM)
   2035 		return EOPNOTSUPP;
   2036 	return ufs_mmap(v);
   2037 }
   2038