Home | History | Annotate | Line # | Download | only in lfs
lfs_vnops.c revision 1.143
      1 /*	$NetBSD: lfs_vnops.c,v 1.143 2005/04/14 00:58:26 perseant Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 /*
     39  * Copyright (c) 1986, 1989, 1991, 1993, 1995
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)lfs_vnops.c	8.13 (Berkeley) 6/10/95
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.143 2005/04/14 00:58:26 perseant Exp $");
     71 
     72 #include <sys/param.h>
     73 #include <sys/systm.h>
     74 #include <sys/namei.h>
     75 #include <sys/resourcevar.h>
     76 #include <sys/kernel.h>
     77 #include <sys/file.h>
     78 #include <sys/stat.h>
     79 #include <sys/buf.h>
     80 #include <sys/proc.h>
     81 #include <sys/mount.h>
     82 #include <sys/vnode.h>
     83 #include <sys/malloc.h>
     84 #include <sys/pool.h>
     85 #include <sys/signalvar.h>
     86 
     87 #include <miscfs/fifofs/fifo.h>
     88 #include <miscfs/genfs/genfs.h>
     89 #include <miscfs/specfs/specdev.h>
     90 
     91 #include <ufs/ufs/inode.h>
     92 #include <ufs/ufs/dir.h>
     93 #include <ufs/ufs/ufsmount.h>
     94 #include <ufs/ufs/ufs_extern.h>
     95 
     96 #include <uvm/uvm.h>
     97 #include <uvm/uvm_pmap.h>
     98 #include <uvm/uvm_stat.h>
     99 #include <uvm/uvm_pager.h>
    100 
    101 #include <ufs/lfs/lfs.h>
    102 #include <ufs/lfs/lfs_extern.h>
    103 
    104 extern pid_t lfs_writer_daemon;
    105 
    106 /* Global vfs data structures for lfs. */
    107 int (**lfs_vnodeop_p)(void *);
    108 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
    109 	{ &vop_default_desc, vn_default_error },
    110 	{ &vop_lookup_desc, ufs_lookup },		/* lookup */
    111 	{ &vop_create_desc, lfs_create },		/* create */
    112 	{ &vop_whiteout_desc, ufs_whiteout },		/* whiteout */
    113 	{ &vop_mknod_desc, lfs_mknod },			/* mknod */
    114 	{ &vop_open_desc, ufs_open },			/* open */
    115 	{ &vop_close_desc, lfs_close },			/* close */
    116 	{ &vop_access_desc, ufs_access },		/* access */
    117 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    118 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    119 	{ &vop_read_desc, lfs_read },			/* read */
    120 	{ &vop_write_desc, lfs_write },			/* write */
    121 	{ &vop_lease_desc, ufs_lease_check },		/* lease */
    122 	{ &vop_ioctl_desc, ufs_ioctl },			/* ioctl */
    123 	{ &vop_fcntl_desc, lfs_fcntl },			/* fcntl */
    124 	{ &vop_poll_desc, ufs_poll },			/* poll */
    125 	{ &vop_kqfilter_desc, genfs_kqfilter },		/* kqfilter */
    126 	{ &vop_revoke_desc, ufs_revoke },		/* revoke */
    127 	{ &vop_mmap_desc, lfs_mmap },			/* mmap */
    128 	{ &vop_fsync_desc, lfs_fsync },			/* fsync */
    129 	{ &vop_seek_desc, ufs_seek },			/* seek */
    130 	{ &vop_remove_desc, lfs_remove },		/* remove */
    131 	{ &vop_link_desc, lfs_link },			/* link */
    132 	{ &vop_rename_desc, lfs_rename },		/* rename */
    133 	{ &vop_mkdir_desc, lfs_mkdir },			/* mkdir */
    134 	{ &vop_rmdir_desc, lfs_rmdir },			/* rmdir */
    135 	{ &vop_symlink_desc, lfs_symlink },		/* symlink */
    136 	{ &vop_readdir_desc, ufs_readdir },		/* readdir */
    137 	{ &vop_readlink_desc, ufs_readlink },		/* readlink */
    138 	{ &vop_abortop_desc, ufs_abortop },		/* abortop */
    139 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    140 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    141 	{ &vop_lock_desc, ufs_lock },			/* lock */
    142 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    143 	{ &vop_bmap_desc, ufs_bmap },			/* bmap */
    144 	{ &vop_strategy_desc, lfs_strategy },		/* strategy */
    145 	{ &vop_print_desc, ufs_print },			/* print */
    146 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    147 	{ &vop_pathconf_desc, ufs_pathconf },		/* pathconf */
    148 	{ &vop_advlock_desc, ufs_advlock },		/* advlock */
    149 	{ &vop_blkatoff_desc, lfs_blkatoff },		/* blkatoff */
    150 	{ &vop_valloc_desc, lfs_valloc },		/* valloc */
    151 	{ &vop_balloc_desc, lfs_balloc },		/* balloc */
    152 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    153 	{ &vop_truncate_desc, lfs_truncate },		/* truncate */
    154 	{ &vop_update_desc, lfs_update },		/* update */
    155 	{ &vop_bwrite_desc, lfs_bwrite },		/* bwrite */
    156 	{ &vop_getpages_desc, lfs_getpages },		/* getpages */
    157 	{ &vop_putpages_desc, lfs_putpages },		/* putpages */
    158 	{ NULL, NULL }
    159 };
    160 const struct vnodeopv_desc lfs_vnodeop_opv_desc =
    161 	{ &lfs_vnodeop_p, lfs_vnodeop_entries };
    162 
    163 int (**lfs_specop_p)(void *);
    164 const struct vnodeopv_entry_desc lfs_specop_entries[] = {
    165 	{ &vop_default_desc, vn_default_error },
    166 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
    167 	{ &vop_create_desc, spec_create },		/* create */
    168 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
    169 	{ &vop_open_desc, spec_open },			/* open */
    170 	{ &vop_close_desc, lfsspec_close },		/* close */
    171 	{ &vop_access_desc, ufs_access },		/* access */
    172 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    173 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    174 	{ &vop_read_desc, ufsspec_read },		/* read */
    175 	{ &vop_write_desc, ufsspec_write },		/* write */
    176 	{ &vop_lease_desc, spec_lease_check },		/* lease */
    177 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
    178 	{ &vop_fcntl_desc, ufs_fcntl },			/* fcntl */
    179 	{ &vop_poll_desc, spec_poll },			/* poll */
    180 	{ &vop_kqfilter_desc, spec_kqfilter },		/* kqfilter */
    181 	{ &vop_revoke_desc, spec_revoke },		/* revoke */
    182 	{ &vop_mmap_desc, spec_mmap },			/* mmap */
    183 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
    184 	{ &vop_seek_desc, spec_seek },			/* seek */
    185 	{ &vop_remove_desc, spec_remove },		/* remove */
    186 	{ &vop_link_desc, spec_link },			/* link */
    187 	{ &vop_rename_desc, spec_rename },		/* rename */
    188 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
    189 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
    190 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
    191 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
    192 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
    193 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
    194 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    195 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    196 	{ &vop_lock_desc, ufs_lock },			/* lock */
    197 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    198 	{ &vop_bmap_desc, spec_bmap },			/* bmap */
    199 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
    200 	{ &vop_print_desc, ufs_print },			/* print */
    201 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    202 	{ &vop_pathconf_desc, spec_pathconf },		/* pathconf */
    203 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
    204 	{ &vop_blkatoff_desc, spec_blkatoff },		/* blkatoff */
    205 	{ &vop_valloc_desc, spec_valloc },		/* valloc */
    206 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    207 	{ &vop_truncate_desc, spec_truncate },		/* truncate */
    208 	{ &vop_update_desc, lfs_update },		/* update */
    209 	{ &vop_bwrite_desc, vn_bwrite },		/* bwrite */
    210 	{ &vop_getpages_desc, spec_getpages },		/* getpages */
    211 	{ &vop_putpages_desc, spec_putpages },		/* putpages */
    212 	{ NULL, NULL }
    213 };
    214 const struct vnodeopv_desc lfs_specop_opv_desc =
    215 	{ &lfs_specop_p, lfs_specop_entries };
    216 
    217 int (**lfs_fifoop_p)(void *);
    218 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
    219 	{ &vop_default_desc, vn_default_error },
    220 	{ &vop_lookup_desc, fifo_lookup },		/* lookup */
    221 	{ &vop_create_desc, fifo_create },		/* create */
    222 	{ &vop_mknod_desc, fifo_mknod },		/* mknod */
    223 	{ &vop_open_desc, fifo_open },			/* open */
    224 	{ &vop_close_desc, lfsfifo_close },		/* close */
    225 	{ &vop_access_desc, ufs_access },		/* access */
    226 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    227 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    228 	{ &vop_read_desc, ufsfifo_read },		/* read */
    229 	{ &vop_write_desc, ufsfifo_write },		/* write */
    230 	{ &vop_lease_desc, fifo_lease_check },		/* lease */
    231 	{ &vop_ioctl_desc, fifo_ioctl },		/* ioctl */
    232 	{ &vop_fcntl_desc, ufs_fcntl },			/* fcntl */
    233 	{ &vop_poll_desc, fifo_poll },			/* poll */
    234 	{ &vop_kqfilter_desc, fifo_kqfilter },		/* kqfilter */
    235 	{ &vop_revoke_desc, fifo_revoke },		/* revoke */
    236 	{ &vop_mmap_desc, fifo_mmap },			/* mmap */
    237 	{ &vop_fsync_desc, fifo_fsync },		/* fsync */
    238 	{ &vop_seek_desc, fifo_seek },			/* seek */
    239 	{ &vop_remove_desc, fifo_remove },		/* remove */
    240 	{ &vop_link_desc, fifo_link },			/* link */
    241 	{ &vop_rename_desc, fifo_rename },		/* rename */
    242 	{ &vop_mkdir_desc, fifo_mkdir },		/* mkdir */
    243 	{ &vop_rmdir_desc, fifo_rmdir },		/* rmdir */
    244 	{ &vop_symlink_desc, fifo_symlink },		/* symlink */
    245 	{ &vop_readdir_desc, fifo_readdir },		/* readdir */
    246 	{ &vop_readlink_desc, fifo_readlink },		/* readlink */
    247 	{ &vop_abortop_desc, fifo_abortop },		/* abortop */
    248 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    249 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    250 	{ &vop_lock_desc, ufs_lock },			/* lock */
    251 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    252 	{ &vop_bmap_desc, fifo_bmap },			/* bmap */
    253 	{ &vop_strategy_desc, fifo_strategy },		/* strategy */
    254 	{ &vop_print_desc, ufs_print },			/* print */
    255 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    256 	{ &vop_pathconf_desc, fifo_pathconf },		/* pathconf */
    257 	{ &vop_advlock_desc, fifo_advlock },		/* advlock */
    258 	{ &vop_blkatoff_desc, fifo_blkatoff },		/* blkatoff */
    259 	{ &vop_valloc_desc, fifo_valloc },		/* valloc */
    260 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    261 	{ &vop_truncate_desc, fifo_truncate },		/* truncate */
    262 	{ &vop_update_desc, lfs_update },		/* update */
    263 	{ &vop_bwrite_desc, lfs_bwrite },		/* bwrite */
    264 	{ &vop_putpages_desc, fifo_putpages },		/* putpages */
    265 	{ NULL, NULL }
    266 };
    267 const struct vnodeopv_desc lfs_fifoop_opv_desc =
    268 	{ &lfs_fifoop_p, lfs_fifoop_entries };
    269 
    270 static int check_dirty(struct lfs *, struct vnode *, off_t, off_t, off_t, int, int);
    271 
    272 /*
    273  * A function version of LFS_ITIMES, for the UFS functions which call ITIMES
    274  */
    275 void
    276 lfs_itimes(struct inode *ip, struct timespec *acc, struct timespec *mod, struct timespec *cre)
    277 {
    278 	LFS_ITIMES(ip, acc, mod, cre);
    279 }
    280 
    281 #define	LFS_READWRITE
    282 #include <ufs/ufs/ufs_readwrite.c>
    283 #undef	LFS_READWRITE
    284 
    285 /*
    286  * Synch an open file.
    287  */
    288 /* ARGSUSED */
    289 int
    290 lfs_fsync(void *v)
    291 {
    292 	struct vop_fsync_args /* {
    293 		struct vnode *a_vp;
    294 		struct ucred *a_cred;
    295 		int a_flags;
    296 		off_t offlo;
    297 		off_t offhi;
    298 		struct proc *a_p;
    299 	} */ *ap = v;
    300 	struct vnode *vp = ap->a_vp;
    301 	int error, wait;
    302 
    303 	/*
    304 	 * Trickle sync checks for need to do a checkpoint after possible
    305 	 * activity from the pagedaemon.
    306 	 */
    307 	if (ap->a_flags & FSYNC_LAZY) {
    308 		simple_lock(&lfs_subsys_lock);
    309 		wakeup(&lfs_writer_daemon);
    310 		simple_unlock(&lfs_subsys_lock);
    311 		return 0;
    312 	}
    313 
    314 	wait = (ap->a_flags & FSYNC_WAIT);
    315 	simple_lock(&vp->v_interlock);
    316 	error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
    317 			round_page(ap->a_offhi),
    318 			PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
    319 	if (error)
    320 		return error;
    321 	error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
    322 	if (error == 0 && ap->a_flags & FSYNC_CACHE) {
    323 		int l = 0;
    324 		error = VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE,
    325 				  ap->a_p->p_ucred, ap->a_p);
    326 	}
    327 	if (wait && !VPISEMPTY(vp))
    328 		LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
    329 
    330 	return error;
    331 }
    332 
    333 /*
    334  * Take IN_ADIROP off, then call ufs_inactive.
    335  */
    336 int
    337 lfs_inactive(void *v)
    338 {
    339 	struct vop_inactive_args /* {
    340 		struct vnode *a_vp;
    341 		struct proc *a_p;
    342 	} */ *ap = v;
    343 
    344 	KASSERT(VTOI(ap->a_vp)->i_nlink == VTOI(ap->a_vp)->i_ffs_effnlink);
    345 
    346 	lfs_unmark_vnode(ap->a_vp);
    347 
    348 	/*
    349 	 * The Ifile is only ever inactivated on unmount.
    350 	 * Streamline this process by not giving it more dirty blocks.
    351 	 */
    352 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) {
    353 		LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD);
    354 		VOP_UNLOCK(ap->a_vp, 0);
    355 		return 0;
    356 	}
    357 
    358 	return ufs_inactive(v);
    359 }
    360 
    361 /*
    362  * These macros are used to bracket UFS directory ops, so that we can
    363  * identify all the pages touched during directory ops which need to
    364  * be ordered and flushed atomically, so that they may be recovered.
    365  *
    366  * Because we have to mark nodes VDIROP in order to prevent
    367  * the cache from reclaiming them while a dirop is in progress, we must
    368  * also manage the number of nodes so marked (otherwise we can run out).
    369  * We do this by setting lfs_dirvcount to the number of marked vnodes; it
    370  * is decremented during segment write, when VDIROP is taken off.
    371  */
    372 #define	MARK_VNODE(vp)			lfs_mark_vnode(vp)
    373 #define	UNMARK_VNODE(vp)		lfs_unmark_vnode(vp)
    374 #define	SET_DIROP_CREATE(dvp, vpp)	lfs_set_dirop_create((dvp), (vpp))
    375 #define	SET_DIROP_REMOVE(dvp, vp)	lfs_set_dirop((dvp), (vp))
    376 static int lfs_set_dirop_create(struct vnode *, struct vnode **);
    377 static int lfs_set_dirop(struct vnode *, struct vnode *);
    378 
    379 static int
    380 lfs_set_dirop(struct vnode *dvp, struct vnode *vp)
    381 {
    382 	struct lfs *fs;
    383 	int error;
    384 
    385 	KASSERT(VOP_ISLOCKED(dvp));
    386 	KASSERT(vp == NULL || VOP_ISLOCKED(vp));
    387 
    388 	fs = VTOI(dvp)->i_lfs;
    389 
    390 	ASSERT_NO_SEGLOCK(fs);
    391 	/*
    392 	 * LFS_NRESERVE calculates direct and indirect blocks as well
    393 	 * as an inode block; an overestimate in most cases.
    394 	 */
    395 	if ((error = lfs_reserve(fs, dvp, vp, LFS_NRESERVE(fs))) != 0)
    396 		return (error);
    397 
    398     restart:
    399 	simple_lock(&fs->lfs_interlock);
    400 	if (fs->lfs_dirops == 0) {
    401 		simple_unlock(&fs->lfs_interlock);
    402 		lfs_check(dvp, LFS_UNUSED_LBN, 0);
    403 		simple_lock(&fs->lfs_interlock);
    404 	}
    405 	while (fs->lfs_writer)
    406 		ltsleep(&fs->lfs_dirops, (PRIBIO + 1), "lfs_sdirop", 0,
    407 			&fs->lfs_interlock);
    408 	simple_lock(&lfs_subsys_lock);
    409 	if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
    410 		wakeup(&lfs_writer_daemon);
    411 		simple_unlock(&lfs_subsys_lock);
    412 		simple_unlock(&fs->lfs_interlock);
    413 		preempt(1);
    414 		goto restart;
    415 	}
    416 
    417 	if (lfs_dirvcount > LFS_MAX_DIROP) {
    418 		simple_unlock(&fs->lfs_interlock);
    419 		DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, "
    420 		      "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount));
    421 		if ((error = ltsleep(&lfs_dirvcount,
    422 		    PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0,
    423 		    &lfs_subsys_lock)) != 0) {
    424 			goto unreserve;
    425 		}
    426 		goto restart;
    427 	}
    428 	simple_unlock(&lfs_subsys_lock);
    429 
    430 	++fs->lfs_dirops;
    431 	fs->lfs_doifile = 1;
    432 	simple_unlock(&fs->lfs_interlock);
    433 
    434 	/* Hold a reference so SET_ENDOP will be happy */
    435 	vref(dvp);
    436 	if (vp) {
    437 		vref(vp);
    438 		MARK_VNODE(vp);
    439 	}
    440 
    441 	MARK_VNODE(dvp);
    442 	return 0;
    443 
    444 unreserve:
    445 	lfs_reserve(fs, dvp, vp, -LFS_NRESERVE(fs));
    446 	return error;
    447 }
    448 
    449 /*
    450  * Get a new vnode *before* adjusting the dirop count, to avoid a deadlock
    451  * in getnewvnode(), if we have a stacked filesystem mounted on top
    452  * of us.
    453  *
    454  * NB: this means we have to clear the new vnodes on error.  Fortunately
    455  * SET_ENDOP is there to do that for us.
    456  */
    457 static int
    458 lfs_set_dirop_create(struct vnode *dvp, struct vnode **vpp)
    459 {
    460 	int error;
    461 	struct lfs *fs;
    462 
    463 	fs = VFSTOUFS(dvp->v_mount)->um_lfs;
    464 	ASSERT_NO_SEGLOCK(fs);
    465 	if (fs->lfs_ronly)
    466 		return EROFS;
    467 	if (vpp && (error = getnewvnode(VT_LFS, dvp->v_mount, lfs_vnodeop_p, vpp))) {
    468 		DLOG((DLOG_ALLOC, "lfs_set_dirop_create: dvp %p error %d\n",
    469 		      dvp, error));
    470 		return error;
    471 	}
    472 	if ((error = lfs_set_dirop(dvp, NULL)) != 0) {
    473 		if (vpp) {
    474 			ungetnewvnode(*vpp);
    475 			*vpp = NULL;
    476 		}
    477 		return error;
    478 	}
    479 	return 0;
    480 }
    481 
    482 #define	SET_ENDOP_BASE(fs, dvp, str)					\
    483 	do {								\
    484 		simple_lock(&(fs)->lfs_interlock);			\
    485 		--(fs)->lfs_dirops;					\
    486 		if (!(fs)->lfs_dirops) {				\
    487 			if ((fs)->lfs_nadirop) {			\
    488 				panic("SET_ENDOP: %s: no dirops but "	\
    489 					" nadirop=%d", (str),		\
    490 					(fs)->lfs_nadirop);		\
    491 			}						\
    492 			wakeup(&(fs)->lfs_writer);			\
    493 			simple_unlock(&(fs)->lfs_interlock);		\
    494 			lfs_check((dvp), LFS_UNUSED_LBN, 0);		\
    495 		} else							\
    496 			simple_unlock(&(fs)->lfs_interlock);		\
    497 	} while(0)
    498 #define SET_ENDOP_CREATE(fs, dvp, nvpp, str)				\
    499 	do {								\
    500 		UNMARK_VNODE(dvp);					\
    501 		if (nvpp && *nvpp)					\
    502 			UNMARK_VNODE(*nvpp);				\
    503 		/* Check for error return to stem vnode leakage */	\
    504 		if (nvpp && *nvpp && !((*nvpp)->v_flag & VDIROP))	\
    505 			ungetnewvnode(*(nvpp));				\
    506 		SET_ENDOP_BASE((fs), (dvp), (str));			\
    507 		lfs_reserve((fs), (dvp), NULL, -LFS_NRESERVE(fs));	\
    508 		vrele(dvp);						\
    509 	} while(0)
    510 #define SET_ENDOP_CREATE_AP(ap, str)					\
    511 	SET_ENDOP_CREATE(VTOI((ap)->a_dvp)->i_lfs, (ap)->a_dvp,		\
    512 			 (ap)->a_vpp, (str))
    513 #define SET_ENDOP_REMOVE(fs, dvp, ovp, str)				\
    514 	do {								\
    515 		UNMARK_VNODE(dvp);					\
    516 		if (ovp)						\
    517 			UNMARK_VNODE(ovp);				\
    518 		SET_ENDOP_BASE((fs), (dvp), (str));			\
    519 		lfs_reserve((fs), (dvp), (ovp), -LFS_NRESERVE(fs));	\
    520 		vrele(dvp);						\
    521 		if (ovp)						\
    522 			vrele(ovp);					\
    523 	} while(0)
    524 
    525 void
    526 lfs_mark_vnode(struct vnode *vp)
    527 {
    528 	struct inode *ip = VTOI(vp);
    529 	struct lfs *fs = ip->i_lfs;
    530 
    531 	simple_lock(&fs->lfs_interlock);
    532 	if (!(ip->i_flag & IN_ADIROP)) {
    533 		if (!(vp->v_flag & VDIROP)) {
    534 			(void)lfs_vref(vp);
    535 			simple_lock(&lfs_subsys_lock);
    536 			++lfs_dirvcount;
    537 			simple_unlock(&lfs_subsys_lock);
    538 			TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
    539 			vp->v_flag |= VDIROP;
    540 		}
    541 		++fs->lfs_nadirop;
    542 		ip->i_flag |= IN_ADIROP;
    543 	} else
    544 		KASSERT(vp->v_flag & VDIROP);
    545 	simple_unlock(&fs->lfs_interlock);
    546 }
    547 
    548 void
    549 lfs_unmark_vnode(struct vnode *vp)
    550 {
    551 	struct inode *ip = VTOI(vp);
    552 
    553 	if (ip->i_flag & IN_ADIROP) {
    554 		KASSERT(vp->v_flag & VDIROP);
    555 		simple_lock(&ip->i_lfs->lfs_interlock);
    556 		--ip->i_lfs->lfs_nadirop;
    557 		simple_unlock(&ip->i_lfs->lfs_interlock);
    558 		ip->i_flag &= ~IN_ADIROP;
    559 	}
    560 }
    561 
    562 int
    563 lfs_symlink(void *v)
    564 {
    565 	struct vop_symlink_args /* {
    566 		struct vnode *a_dvp;
    567 		struct vnode **a_vpp;
    568 		struct componentname *a_cnp;
    569 		struct vattr *a_vap;
    570 		char *a_target;
    571 	} */ *ap = v;
    572 	int error;
    573 
    574 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    575 		vput(ap->a_dvp);
    576 		return error;
    577 	}
    578 	error = ufs_symlink(ap);
    579 	SET_ENDOP_CREATE_AP(ap, "symlink");
    580 	return (error);
    581 }
    582 
    583 int
    584 lfs_mknod(void *v)
    585 {
    586 	struct vop_mknod_args	/* {
    587 		struct vnode *a_dvp;
    588 		struct vnode **a_vpp;
    589 		struct componentname *a_cnp;
    590 		struct vattr *a_vap;
    591 		} */ *ap = v;
    592 	struct vattr *vap = ap->a_vap;
    593 	struct vnode **vpp = ap->a_vpp;
    594 	struct inode *ip;
    595 	int error;
    596 	struct mount	*mp;
    597 	ino_t		ino;
    598 
    599 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    600 		vput(ap->a_dvp);
    601 		return error;
    602 	}
    603 	error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode),
    604 	    ap->a_dvp, vpp, ap->a_cnp);
    605 
    606 	/* Either way we're done with the dirop at this point */
    607 	SET_ENDOP_CREATE_AP(ap, "mknod");
    608 
    609 	if (error)
    610 		return (error);
    611 
    612 	ip = VTOI(*vpp);
    613 	mp  = (*vpp)->v_mount;
    614 	ino = ip->i_number;
    615 	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
    616 	if (vap->va_rdev != VNOVAL) {
    617 		/*
    618 		 * Want to be able to use this to make badblock
    619 		 * inodes, so don't truncate the dev number.
    620 		 */
    621 #if 0
    622 		ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
    623 		    UFS_MPNEEDSWAP((*vpp)->v_mount));
    624 #else
    625 		ip->i_ffs1_rdev = vap->va_rdev;
    626 #endif
    627 	}
    628 
    629 	/*
    630 	 * Call fsync to write the vnode so that we don't have to deal with
    631 	 * flushing it when it's marked VDIROP|VXLOCK.
    632 	 *
    633 	 * XXX KS - If we can't flush we also can't call vgone(), so must
    634 	 * return.  But, that leaves this vnode in limbo, also not good.
    635 	 * Can this ever happen (barring hardware failure)?
    636 	 */
    637 	if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0,
    638 	    curproc)) != 0) {
    639 		panic("lfs_mknod: couldn't fsync (ino %d)", ino);
    640 		/* return (error); */
    641 	}
    642 	/*
    643 	 * Remove vnode so that it will be reloaded by VFS_VGET and
    644 	 * checked to see if it is an alias of an existing entry in
    645 	 * the inode cache.
    646 	 */
    647 	/* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
    648 
    649 	VOP_UNLOCK(*vpp, 0);
    650 	lfs_vunref(*vpp);
    651 	(*vpp)->v_type = VNON;
    652 	vgone(*vpp);
    653 	error = VFS_VGET(mp, ino, vpp);
    654 
    655 	if (error != 0) {
    656 		*vpp = NULL;
    657 		return (error);
    658 	}
    659 	return (0);
    660 }
    661 
    662 int
    663 lfs_create(void *v)
    664 {
    665 	struct vop_create_args	/* {
    666 		struct vnode *a_dvp;
    667 		struct vnode **a_vpp;
    668 		struct componentname *a_cnp;
    669 		struct vattr *a_vap;
    670 	} */ *ap = v;
    671 	int error;
    672 
    673 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    674 		vput(ap->a_dvp);
    675 		return error;
    676 	}
    677 	error = ufs_create(ap);
    678 	SET_ENDOP_CREATE_AP(ap, "create");
    679 	return (error);
    680 }
    681 
    682 int
    683 lfs_mkdir(void *v)
    684 {
    685 	struct vop_mkdir_args	/* {
    686 		struct vnode *a_dvp;
    687 		struct vnode **a_vpp;
    688 		struct componentname *a_cnp;
    689 		struct vattr *a_vap;
    690 	} */ *ap = v;
    691 	int error;
    692 
    693 	if ((error = SET_DIROP_CREATE(ap->a_dvp, ap->a_vpp)) != 0) {
    694 		vput(ap->a_dvp);
    695 		return error;
    696 	}
    697 	error = ufs_mkdir(ap);
    698 	SET_ENDOP_CREATE_AP(ap, "mkdir");
    699 	return (error);
    700 }
    701 
    702 int
    703 lfs_remove(void *v)
    704 {
    705 	struct vop_remove_args	/* {
    706 		struct vnode *a_dvp;
    707 		struct vnode *a_vp;
    708 		struct componentname *a_cnp;
    709 	} */ *ap = v;
    710 	struct vnode *dvp, *vp;
    711 	int error;
    712 
    713 	dvp = ap->a_dvp;
    714 	vp = ap->a_vp;
    715 	if ((error = SET_DIROP_REMOVE(dvp, vp)) != 0) {
    716 		if (dvp == vp)
    717 			vrele(vp);
    718 		else
    719 			vput(vp);
    720 		vput(dvp);
    721 		return error;
    722 	}
    723 	error = ufs_remove(ap);
    724 	SET_ENDOP_REMOVE(VTOI(dvp)->i_lfs, dvp, vp, "remove");
    725 	return (error);
    726 }
    727 
    728 int
    729 lfs_rmdir(void *v)
    730 {
    731 	struct vop_rmdir_args	/* {
    732 		struct vnodeop_desc *a_desc;
    733 		struct vnode *a_dvp;
    734 		struct vnode *a_vp;
    735 		struct componentname *a_cnp;
    736 	} */ *ap = v;
    737 	struct vnode *vp;
    738 	int error;
    739 
    740 	vp = ap->a_vp;
    741 	if ((error = SET_DIROP_REMOVE(ap->a_dvp, ap->a_vp)) != 0) {
    742 		vrele(ap->a_dvp);
    743 		if (ap->a_vp != ap->a_dvp)
    744 			VOP_UNLOCK(ap->a_dvp, 0);
    745 		vput(vp);
    746 		return error;
    747 	}
    748 	error = ufs_rmdir(ap);
    749 	SET_ENDOP_REMOVE(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vp, "rmdir");
    750 	return (error);
    751 }
    752 
    753 int
    754 lfs_link(void *v)
    755 {
    756 	struct vop_link_args	/* {
    757 		struct vnode *a_dvp;
    758 		struct vnode *a_vp;
    759 		struct componentname *a_cnp;
    760 	} */ *ap = v;
    761 	int error;
    762 	struct vnode **vpp = NULL;
    763 
    764 	if ((error = SET_DIROP_CREATE(ap->a_dvp, vpp)) != 0) {
    765 		vput(ap->a_dvp);
    766 		return error;
    767 	}
    768 	error = ufs_link(ap);
    769 	SET_ENDOP_CREATE(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vpp, "link");
    770 	return (error);
    771 }
    772 
    773 int
    774 lfs_rename(void *v)
    775 {
    776 	struct vop_rename_args	/* {
    777 		struct vnode *a_fdvp;
    778 		struct vnode *a_fvp;
    779 		struct componentname *a_fcnp;
    780 		struct vnode *a_tdvp;
    781 		struct vnode *a_tvp;
    782 		struct componentname *a_tcnp;
    783 	} */ *ap = v;
    784 	struct vnode *tvp, *fvp, *tdvp, *fdvp;
    785 	struct componentname *tcnp, *fcnp;
    786 	int error;
    787 	struct lfs *fs;
    788 
    789 	fs = VTOI(ap->a_fdvp)->i_lfs;
    790 	tvp = ap->a_tvp;
    791 	tdvp = ap->a_tdvp;
    792 	tcnp = ap->a_tcnp;
    793 	fvp = ap->a_fvp;
    794 	fdvp = ap->a_fdvp;
    795 	fcnp = ap->a_fcnp;
    796 
    797 	/*
    798 	 * Check for cross-device rename.
    799 	 * If it is, we don't want to set dirops, just error out.
    800 	 * (In particular note that MARK_VNODE(tdvp) will DTWT on
    801 	 * a cross-device rename.)
    802 	 *
    803 	 * Copied from ufs_rename.
    804 	 */
    805 	if ((fvp->v_mount != tdvp->v_mount) ||
    806 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
    807 		error = EXDEV;
    808 		goto errout;
    809 	}
    810 
    811 	/*
    812 	 * Check to make sure we're not renaming a vnode onto itself
    813 	 * (deleting a hard link by renaming one name onto another);
    814 	 * if we are we can't recursively call VOP_REMOVE since that
    815 	 * would leave us with an unaccounted-for number of live dirops.
    816 	 *
    817 	 * Inline the relevant section of ufs_rename here, *before*
    818 	 * calling SET_DIROP_REMOVE.
    819 	 */
    820 	if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) ||
    821 	    (VTOI(tdvp)->i_flags & APPEND))) {
    822 		error = EPERM;
    823 		goto errout;
    824 	}
    825 	if (fvp == tvp) {
    826 		if (fvp->v_type == VDIR) {
    827 			error = EINVAL;
    828 			goto errout;
    829 		}
    830 
    831 		/* Release destination completely. */
    832 		VOP_ABORTOP(tdvp, tcnp);
    833 		vput(tdvp);
    834 		vput(tvp);
    835 
    836 		/* Delete source. */
    837 		vrele(fvp);
    838 		fcnp->cn_flags &= ~(MODMASK | SAVESTART);
    839 		fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
    840 		fcnp->cn_nameiop = DELETE;
    841 		if ((error = relookup(fdvp, &fvp, fcnp))){
    842 			/* relookup blew away fdvp */
    843 			return (error);
    844 		}
    845 		return (VOP_REMOVE(fdvp, fvp, fcnp));
    846 	}
    847 
    848 	if ((error = SET_DIROP_REMOVE(tdvp, tvp)) != 0)
    849 		goto errout;
    850 	MARK_VNODE(fdvp);
    851 	MARK_VNODE(fvp);
    852 
    853 	error = ufs_rename(ap);
    854 	UNMARK_VNODE(fdvp);
    855 	UNMARK_VNODE(fvp);
    856 	SET_ENDOP_REMOVE(fs, tdvp, tvp, "rename");
    857 	return (error);
    858 
    859     errout:
    860 	VOP_ABORTOP(tdvp, ap->a_tcnp); /* XXX, why not in NFS? */
    861 	if (tdvp == tvp)
    862 		vrele(tdvp);
    863 	else
    864 		vput(tdvp);
    865 	if (tvp)
    866 		vput(tvp);
    867 	VOP_ABORTOP(fdvp, ap->a_fcnp); /* XXX, why not in NFS? */
    868 	vrele(fdvp);
    869 	vrele(fvp);
    870 	return (error);
    871 }
    872 
    873 /* XXX hack to avoid calling ITIMES in getattr */
    874 int
    875 lfs_getattr(void *v)
    876 {
    877 	struct vop_getattr_args /* {
    878 		struct vnode *a_vp;
    879 		struct vattr *a_vap;
    880 		struct ucred *a_cred;
    881 		struct proc *a_p;
    882 	} */ *ap = v;
    883 	struct vnode *vp = ap->a_vp;
    884 	struct inode *ip = VTOI(vp);
    885 	struct vattr *vap = ap->a_vap;
    886 	struct lfs *fs = ip->i_lfs;
    887 	/*
    888 	 * Copy from inode table
    889 	 */
    890 	vap->va_fsid = ip->i_dev;
    891 	vap->va_fileid = ip->i_number;
    892 	vap->va_mode = ip->i_mode & ~IFMT;
    893 	vap->va_nlink = ip->i_nlink;
    894 	vap->va_uid = ip->i_uid;
    895 	vap->va_gid = ip->i_gid;
    896 	vap->va_rdev = (dev_t)ip->i_ffs1_rdev;
    897 	vap->va_size = vp->v_size;
    898 	vap->va_atime.tv_sec = ip->i_ffs1_atime;
    899 	vap->va_atime.tv_nsec = ip->i_ffs1_atimensec;
    900 	vap->va_mtime.tv_sec = ip->i_ffs1_mtime;
    901 	vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec;
    902 	vap->va_ctime.tv_sec = ip->i_ffs1_ctime;
    903 	vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec;
    904 	vap->va_flags = ip->i_flags;
    905 	vap->va_gen = ip->i_gen;
    906 	/* this doesn't belong here */
    907 	if (vp->v_type == VBLK)
    908 		vap->va_blocksize = BLKDEV_IOSIZE;
    909 	else if (vp->v_type == VCHR)
    910 		vap->va_blocksize = MAXBSIZE;
    911 	else
    912 		vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
    913 	vap->va_bytes = fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks);
    914 	vap->va_type = vp->v_type;
    915 	vap->va_filerev = ip->i_modrev;
    916 	return (0);
    917 }
    918 
    919 /*
    920  * Check to make sure the inode blocks won't choke the buffer
    921  * cache, then call ufs_setattr as usual.
    922  */
    923 int
    924 lfs_setattr(void *v)
    925 {
    926 	struct vop_getattr_args /* {
    927 		struct vnode *a_vp;
    928 		struct vattr *a_vap;
    929 		struct ucred *a_cred;
    930 		struct proc *a_p;
    931 	} */ *ap = v;
    932 	struct vnode *vp = ap->a_vp;
    933 
    934 	lfs_check(vp, LFS_UNUSED_LBN, 0);
    935 	return ufs_setattr(v);
    936 }
    937 
    938 /*
    939  * Close called
    940  *
    941  * XXX -- we were using ufs_close, but since it updates the
    942  * times on the inode, we might need to bump the uinodes
    943  * count.
    944  */
    945 /* ARGSUSED */
    946 int
    947 lfs_close(void *v)
    948 {
    949 	struct vop_close_args /* {
    950 		struct vnode *a_vp;
    951 		int  a_fflag;
    952 		struct ucred *a_cred;
    953 		struct proc *a_p;
    954 	} */ *ap = v;
    955 	struct vnode *vp = ap->a_vp;
    956 	struct inode *ip = VTOI(vp);
    957 	struct timespec ts;
    958 
    959 	if (vp == ip->i_lfs->lfs_ivnode &&
    960 	    vp->v_mount->mnt_iflag & IMNT_UNMOUNT)
    961 		return 0;
    962 
    963 	if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) {
    964 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    965 		LFS_ITIMES(ip, &ts, &ts, &ts);
    966 	}
    967 	return (0);
    968 }
    969 
    970 /*
    971  * Close wrapper for special devices.
    972  *
    973  * Update the times on the inode then do device close.
    974  */
    975 int
    976 lfsspec_close(void *v)
    977 {
    978 	struct vop_close_args /* {
    979 		struct vnode	*a_vp;
    980 		int		a_fflag;
    981 		struct ucred	*a_cred;
    982 		struct proc	*a_p;
    983 	} */ *ap = v;
    984 	struct vnode	*vp;
    985 	struct inode	*ip;
    986 	struct timespec	ts;
    987 
    988 	vp = ap->a_vp;
    989 	ip = VTOI(vp);
    990 	if (vp->v_usecount > 1) {
    991 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    992 		LFS_ITIMES(ip, &ts, &ts, &ts);
    993 	}
    994 	return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
    995 }
    996 
    997 /*
    998  * Close wrapper for fifo's.
    999  *
   1000  * Update the times on the inode then do device close.
   1001  */
   1002 int
   1003 lfsfifo_close(void *v)
   1004 {
   1005 	struct vop_close_args /* {
   1006 		struct vnode	*a_vp;
   1007 		int		a_fflag;
   1008 		struct ucred	*a_cred;
   1009 		struct proc	*a_p;
   1010 	} */ *ap = v;
   1011 	struct vnode	*vp;
   1012 	struct inode	*ip;
   1013 	struct timespec	ts;
   1014 
   1015 	vp = ap->a_vp;
   1016 	ip = VTOI(vp);
   1017 	if (ap->a_vp->v_usecount > 1) {
   1018 		TIMEVAL_TO_TIMESPEC(&time, &ts);
   1019 		LFS_ITIMES(ip, &ts, &ts, &ts);
   1020 	}
   1021 	return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
   1022 }
   1023 
   1024 /*
   1025  * Reclaim an inode so that it can be used for other purposes.
   1026  */
   1027 
   1028 int
   1029 lfs_reclaim(void *v)
   1030 {
   1031 	struct vop_reclaim_args /* {
   1032 		struct vnode *a_vp;
   1033 		struct proc *a_p;
   1034 	} */ *ap = v;
   1035 	struct vnode *vp = ap->a_vp;
   1036 	struct inode *ip = VTOI(vp);
   1037 	int error;
   1038 
   1039 	KASSERT(ip->i_nlink == ip->i_ffs_effnlink);
   1040 
   1041 	LFS_CLR_UINO(ip, IN_ALLMOD);
   1042 	if ((error = ufs_reclaim(vp, ap->a_p)))
   1043 		return (error);
   1044 	pool_put(&lfs_dinode_pool, ip->i_din.ffs1_din);
   1045 	pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
   1046 	ip->inode_ext.lfs = NULL;
   1047 	pool_put(&lfs_inode_pool, vp->v_data);
   1048 	vp->v_data = NULL;
   1049 	return (0);
   1050 }
   1051 
   1052 /*
   1053  * Read a block from a storage device.
   1054  * In order to avoid reading blocks that are in the process of being
   1055  * written by the cleaner---and hence are not mutexed by the normal
   1056  * buffer cache / page cache mechanisms---check for collisions before
   1057  * reading.
   1058  *
   1059  * We inline ufs_strategy to make sure that the VOP_BMAP occurs *before*
   1060  * the active cleaner test.
   1061  *
   1062  * XXX This code assumes that lfs_markv makes synchronous checkpoints.
   1063  */
   1064 int
   1065 lfs_strategy(void *v)
   1066 {
   1067 	struct vop_strategy_args /* {
   1068 		struct vnode *a_vp;
   1069 		struct buf *a_bp;
   1070 	} */ *ap = v;
   1071 	struct buf	*bp;
   1072 	struct lfs	*fs;
   1073 	struct vnode	*vp;
   1074 	struct inode	*ip;
   1075 	daddr_t		tbn;
   1076 	int		i, sn, error, slept;
   1077 
   1078 	bp = ap->a_bp;
   1079 	vp = ap->a_vp;
   1080 	ip = VTOI(vp);
   1081 	fs = ip->i_lfs;
   1082 
   1083 	/* lfs uses its strategy routine only for read */
   1084 	KASSERT(bp->b_flags & B_READ);
   1085 
   1086 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1087 		panic("lfs_strategy: spec");
   1088 	KASSERT(bp->b_bcount != 0);
   1089 	if (bp->b_blkno == bp->b_lblkno) {
   1090 		error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
   1091 				 NULL);
   1092 		if (error) {
   1093 			bp->b_error = error;
   1094 			bp->b_flags |= B_ERROR;
   1095 			biodone(bp);
   1096 			return (error);
   1097 		}
   1098 		if ((long)bp->b_blkno == -1) /* no valid data */
   1099 			clrbuf(bp);
   1100 	}
   1101 	if ((long)bp->b_blkno < 0) { /* block is not on disk */
   1102 		biodone(bp);
   1103 		return (0);
   1104 	}
   1105 
   1106 	slept = 1;
   1107 	simple_lock(&fs->lfs_interlock);
   1108 	while (slept && fs->lfs_seglock) {
   1109 		simple_unlock(&fs->lfs_interlock);
   1110 		/*
   1111 		 * Look through list of intervals.
   1112 		 * There will only be intervals to look through
   1113 		 * if the cleaner holds the seglock.
   1114 		 * Since the cleaner is synchronous, we can trust
   1115 		 * the list of intervals to be current.
   1116 		 */
   1117 		tbn = dbtofsb(fs, bp->b_blkno);
   1118 		sn = dtosn(fs, tbn);
   1119 		slept = 0;
   1120 		for (i = 0; i < fs->lfs_cleanind; i++) {
   1121 			if (sn == dtosn(fs, fs->lfs_cleanint[i]) &&
   1122 			    tbn >= fs->lfs_cleanint[i]) {
   1123 				DLOG((DLOG_CLEAN,
   1124 				      "lfs_strategy: ino %d lbn %" PRId64
   1125 				       " ind %d sn %d fsb %" PRIx32
   1126 				       " given sn %d fsb %" PRIx64 "\n",
   1127 					ip->i_number, bp->b_lblkno, i,
   1128 					dtosn(fs, fs->lfs_cleanint[i]),
   1129 					fs->lfs_cleanint[i], sn, tbn));
   1130 				DLOG((DLOG_CLEAN,
   1131 				      "lfs_strategy: sleeping on ino %d lbn %"
   1132 				      PRId64 "\n", ip->i_number, bp->b_lblkno));
   1133 				simple_lock(&fs->lfs_interlock);
   1134 				if (fs->lfs_seglock)
   1135 					ltsleep(&fs->lfs_seglock,
   1136 						(PRIBIO + 1) | PNORELOCK,
   1137 						"lfs_strategy", 0,
   1138 						&fs->lfs_interlock);
   1139 				/* Things may be different now; start over. */
   1140 				slept = 1;
   1141 				break;
   1142 			}
   1143 		}
   1144 		simple_lock(&fs->lfs_interlock);
   1145 	}
   1146 	simple_unlock(&fs->lfs_interlock);
   1147 
   1148 	vp = ip->i_devvp;
   1149 	VOP_STRATEGY(vp, bp);
   1150 	return (0);
   1151 }
   1152 
   1153 static void
   1154 lfs_flush_dirops(struct lfs *fs)
   1155 {
   1156 	struct inode *ip, *nip;
   1157 	struct vnode *vp;
   1158 	extern int lfs_dostats;
   1159 	struct segment *sp;
   1160 	int needunlock;
   1161 
   1162 	ASSERT_NO_SEGLOCK(fs);
   1163 
   1164 	if (fs->lfs_ronly)
   1165 		return;
   1166 
   1167 	simple_lock(&fs->lfs_interlock);
   1168 	if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL) {
   1169 		simple_unlock(&fs->lfs_interlock);
   1170 		return;
   1171 	} else
   1172 		simple_unlock(&fs->lfs_interlock);
   1173 
   1174 	if (lfs_dostats)
   1175 		++lfs_stats.flush_invoked;
   1176 
   1177 	/*
   1178 	 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
   1179 	 * Technically this is a checkpoint (the on-disk state is valid)
   1180 	 * even though we are leaving out all the file data.
   1181 	 */
   1182 	lfs_imtime(fs);
   1183 	lfs_seglock(fs, SEGM_CKP);
   1184 	sp = fs->lfs_sp;
   1185 
   1186 	/*
   1187 	 * lfs_writevnodes, optimized to get dirops out of the way.
   1188 	 * Only write dirops, and don't flush files' pages, only
   1189 	 * blocks from the directories.
   1190 	 *
   1191 	 * We don't need to vref these files because they are
   1192 	 * dirops and so hold an extra reference until the
   1193 	 * segunlock clears them of that status.
   1194 	 *
   1195 	 * We don't need to check for IN_ADIROP because we know that
   1196 	 * no dirops are active.
   1197 	 *
   1198 	 */
   1199 	simple_lock(&fs->lfs_interlock);
   1200 	for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
   1201 		nip = TAILQ_NEXT(ip, i_lfs_dchain);
   1202 		simple_unlock(&fs->lfs_interlock);
   1203 		vp = ITOV(ip);
   1204 
   1205 		/*
   1206 		 * All writes to directories come from dirops; all
   1207 		 * writes to files' direct blocks go through the page
   1208 		 * cache, which we're not touching.  Reads to files
   1209 		 * and/or directories will not be affected by writing
   1210 		 * directory blocks inodes and file inodes.  So we don't
   1211 		 * really need to lock.  If we don't lock, though,
   1212 		 * make sure that we don't clear IN_MODIFIED
   1213 		 * unnecessarily.
   1214 		 */
   1215 		if (vp->v_flag & VXLOCK)
   1216 			continue;
   1217 		if (vn_lock(vp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
   1218 			needunlock = 1;
   1219 		} else {
   1220 			DLOG((DLOG_VNODE, "lfs_flush_dirops: flushing locked ino %d\n",
   1221 			       VTOI(vp)->i_number));
   1222 			needunlock = 0;
   1223 		}
   1224 		if (vp->v_type != VREG &&
   1225 		    ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) {
   1226 			lfs_writefile(fs, sp, vp);
   1227 			if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
   1228 			    !(ip->i_flag & IN_ALLMOD)) {
   1229 				LFS_SET_UINO(ip, IN_MODIFIED);
   1230 			}
   1231 		}
   1232 		(void) lfs_writeinode(fs, sp, ip);
   1233 		if (needunlock)
   1234 			VOP_UNLOCK(vp, 0);
   1235 		else
   1236 			LFS_SET_UINO(ip, IN_MODIFIED);
   1237 		simple_lock(&fs->lfs_interlock);
   1238 	}
   1239 	simple_unlock(&fs->lfs_interlock);
   1240 	/* We've written all the dirops there are */
   1241 	((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
   1242 	(void) lfs_writeseg(fs, sp);
   1243 	lfs_segunlock(fs);
   1244 }
   1245 
   1246 /*
   1247  * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
   1248  */
   1249 int
   1250 lfs_fcntl(void *v)
   1251 {
   1252 	struct vop_fcntl_args /* {
   1253 		struct vnode *a_vp;
   1254 		u_long a_command;
   1255 		caddr_t  a_data;
   1256 		int  a_fflag;
   1257 		struct ucred *a_cred;
   1258 		struct proc *a_p;
   1259 	} */ *ap = v;
   1260 	struct timeval *tvp;
   1261 	BLOCK_INFO *blkiov;
   1262 	CLEANERINFO *cip;
   1263 	int blkcnt, error, oclean;
   1264 	struct lfs_fcntl_markv blkvp;
   1265 	fsid_t *fsidp;
   1266 	struct lfs *fs;
   1267 	struct buf *bp;
   1268 	fhandle_t *fhp;
   1269 	daddr_t off;
   1270 
   1271 	/* Only respect LFS fcntls on fs root or Ifile */
   1272 	if (VTOI(ap->a_vp)->i_number != ROOTINO &&
   1273 	    VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) {
   1274 		return ufs_fcntl(v);
   1275 	}
   1276 
   1277 	/* Avoid locking a draining lock */
   1278 	if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) {
   1279 		return ESHUTDOWN;
   1280 	}
   1281 
   1282 	fs = VTOI(ap->a_vp)->i_lfs;
   1283 	fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx;
   1284 
   1285 	switch (ap->a_command) {
   1286 	    case LFCNSEGWAITALL:
   1287 	    case LFCNSEGWAITALL_COMPAT:
   1288 		fsidp = NULL;
   1289 		/* FALLSTHROUGH */
   1290 	    case LFCNSEGWAIT:
   1291 	    case LFCNSEGWAIT_COMPAT:
   1292 		tvp = (struct timeval *)ap->a_data;
   1293 		simple_lock(&fs->lfs_interlock);
   1294 		++fs->lfs_sleepers;
   1295 		simple_unlock(&fs->lfs_interlock);
   1296 		VOP_UNLOCK(ap->a_vp, 0);
   1297 
   1298 		error = lfs_segwait(fsidp, tvp);
   1299 
   1300 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1301 		simple_lock(&fs->lfs_interlock);
   1302 		if (--fs->lfs_sleepers == 0)
   1303 			wakeup(&fs->lfs_sleepers);
   1304 		simple_unlock(&fs->lfs_interlock);
   1305 		return error;
   1306 
   1307 	    case LFCNBMAPV:
   1308 	    case LFCNMARKV:
   1309 		if ((error = suser(ap->a_p->p_ucred, &ap->a_p->p_acflag)) != 0)
   1310 			return (error);
   1311 		blkvp = *(struct lfs_fcntl_markv *)ap->a_data;
   1312 
   1313 		blkcnt = blkvp.blkcnt;
   1314 		if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
   1315 			return (EINVAL);
   1316 		blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
   1317 		if ((error = copyin(blkvp.blkiov, blkiov,
   1318 		     blkcnt * sizeof(BLOCK_INFO))) != 0) {
   1319 			free(blkiov, M_SEGMENT);
   1320 			return error;
   1321 		}
   1322 
   1323 		simple_lock(&fs->lfs_interlock);
   1324 		++fs->lfs_sleepers;
   1325 		simple_unlock(&fs->lfs_interlock);
   1326 		VOP_UNLOCK(ap->a_vp, 0);
   1327 		if (ap->a_command == LFCNBMAPV)
   1328 			error = lfs_bmapv(ap->a_p, fsidp, blkiov, blkcnt);
   1329 		else /* LFCNMARKV */
   1330 			error = lfs_markv(ap->a_p, fsidp, blkiov, blkcnt);
   1331 		if (error == 0)
   1332 			error = copyout(blkiov, blkvp.blkiov,
   1333 					blkcnt * sizeof(BLOCK_INFO));
   1334 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1335 		simple_lock(&fs->lfs_interlock);
   1336 		if (--fs->lfs_sleepers == 0)
   1337 			wakeup(&fs->lfs_sleepers);
   1338 		simple_unlock(&fs->lfs_interlock);
   1339 		free(blkiov, M_SEGMENT);
   1340 		return error;
   1341 
   1342 	    case LFCNRECLAIM:
   1343 		/*
   1344 		 * Flush dirops and write Ifile, allowing empty segments
   1345 		 * to be immediately reclaimed.
   1346 		 */
   1347 		VOP_UNLOCK(ap->a_vp, 0);
   1348 		lfs_writer_enter(fs, "pndirop");
   1349 		off = fs->lfs_offset;
   1350 		lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP);
   1351 		lfs_flush_dirops(fs);
   1352 		LFS_CLEANERINFO(cip, fs, bp);
   1353 		oclean = cip->clean;
   1354 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
   1355 		lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP);
   1356 		lfs_segunlock(fs);
   1357 		lfs_writer_leave(fs);
   1358 
   1359 #ifdef DEBUG
   1360 		LFS_CLEANERINFO(cip, fs, bp);
   1361 		DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64
   1362 		      " blocks, cleaned %" PRId32 " segments (activesb %d)\n",
   1363 		      fs->lfs_offset - off, cip->clean - oclean,
   1364 		      fs->lfs_activesb));
   1365 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
   1366 #endif
   1367 
   1368 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1369 		return 0;
   1370 
   1371 	    case LFCNIFILEFH:
   1372 		/* Return the filehandle of the Ifile */
   1373 		if ((error = suser(ap->a_p->p_ucred, &ap->a_p->p_acflag)) != 0)
   1374 			return (error);
   1375 		fhp = (struct fhandle *)ap->a_data;
   1376 		fhp->fh_fsid = *fsidp;
   1377 		return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid));
   1378 
   1379 	    default:
   1380 		return ufs_fcntl(v);
   1381 	}
   1382 	return 0;
   1383 }
   1384 
   1385 int
   1386 lfs_getpages(void *v)
   1387 {
   1388 	struct vop_getpages_args /* {
   1389 		struct vnode *a_vp;
   1390 		voff_t a_offset;
   1391 		struct vm_page **a_m;
   1392 		int *a_count;
   1393 		int a_centeridx;
   1394 		vm_prot_t a_access_type;
   1395 		int a_advice;
   1396 		int a_flags;
   1397 	} */ *ap = v;
   1398 
   1399 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM &&
   1400 	    (ap->a_access_type & VM_PROT_WRITE) != 0) {
   1401 		return EPERM;
   1402 	}
   1403 	if ((ap->a_access_type & VM_PROT_WRITE) != 0) {
   1404 		LFS_SET_UINO(VTOI(ap->a_vp), IN_MODIFIED);
   1405 	}
   1406 
   1407 	/*
   1408 	 * we're relying on the fact that genfs_getpages() always read in
   1409 	 * entire filesystem blocks.
   1410 	 */
   1411 	return genfs_getpages(v);
   1412 }
   1413 
   1414 /*
   1415  * Make sure that for all pages in every block in the given range,
   1416  * either all are dirty or all are clean.  If any of the pages
   1417  * we've seen so far are dirty, put the vnode on the paging chain,
   1418  * and mark it IN_PAGING.
   1419  *
   1420  * If checkfirst != 0, don't check all the pages but return at the
   1421  * first dirty page.
   1422  */
   1423 static int
   1424 check_dirty(struct lfs *fs, struct vnode *vp,
   1425 	    off_t startoffset, off_t endoffset, off_t blkeof,
   1426 	    int flags, int checkfirst)
   1427 {
   1428 	int by_list;
   1429 	struct vm_page *curpg = NULL; /* XXX: gcc */
   1430 	struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg;
   1431 	off_t soff = 0; /* XXX: gcc */
   1432 	voff_t off;
   1433 	int i;
   1434 	int nonexistent;
   1435 	int any_dirty;	/* number of dirty pages */
   1436 	int dirty;	/* number of dirty pages in a block */
   1437 	int tdirty;
   1438 	int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT;
   1439 
   1440 	ASSERT_MAYBE_SEGLOCK(fs);
   1441   top:
   1442 	by_list = (vp->v_uobj.uo_npages <=
   1443 		   ((endoffset - startoffset) >> PAGE_SHIFT) *
   1444 		   UVM_PAGE_HASH_PENALTY);
   1445 	any_dirty = 0;
   1446 
   1447 	if (by_list) {
   1448 		curpg = TAILQ_FIRST(&vp->v_uobj.memq);
   1449 	} else {
   1450 		soff = startoffset;
   1451 	}
   1452 	while (by_list || soff < MIN(blkeof, endoffset)) {
   1453 		if (by_list) {
   1454 			/*
   1455 			 * Find the first page in a block.  Skip
   1456 			 * blocks outside our area of interest or beyond
   1457 			 * the end of file.
   1458 			 */
   1459 			if (pages_per_block > 1) {
   1460 				while (curpg &&
   1461 				       ((curpg->offset & fs->lfs_bmask) ||
   1462 					curpg->offset >= vp->v_size ||
   1463 					curpg->offset >= endoffset))
   1464 					curpg = TAILQ_NEXT(curpg, listq);
   1465 			}
   1466 			if (curpg == NULL)
   1467 				break;
   1468 			soff = curpg->offset;
   1469 		}
   1470 
   1471 		/*
   1472 		 * Mark all pages in extended range busy; find out if any
   1473 		 * of them are dirty.
   1474 		 */
   1475 		nonexistent = dirty = 0;
   1476 		for (i = 0; i == 0 || i < pages_per_block; i++) {
   1477 			if (by_list && pages_per_block <= 1) {
   1478 				pgs[i] = pg = curpg;
   1479 			} else {
   1480 				off = soff + (i << PAGE_SHIFT);
   1481 				pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
   1482 				if (pg == NULL) {
   1483 					++nonexistent;
   1484 					continue;
   1485 				}
   1486 			}
   1487 			KASSERT(pg != NULL);
   1488 			while (pg->flags & PG_BUSY) {
   1489 				pg->flags |= PG_WANTED;
   1490 				UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
   1491 						    "lfsput", 0);
   1492 				simple_lock(&vp->v_interlock);
   1493 				if (by_list) {
   1494 					if (i > 0)
   1495 						uvm_page_unbusy(pgs, i);
   1496 					goto top;
   1497 				}
   1498 			}
   1499 			pg->flags |= PG_BUSY;
   1500 			UVM_PAGE_OWN(pg, "lfs_putpages");
   1501 
   1502 			pmap_page_protect(pg, VM_PROT_NONE);
   1503 			tdirty = (pmap_clear_modify(pg) ||
   1504 				  (pg->flags & PG_CLEAN) == 0);
   1505 			dirty += tdirty;
   1506 		}
   1507 		if (pages_per_block > 0 && nonexistent >= pages_per_block) {
   1508 			if (by_list) {
   1509 				curpg = TAILQ_NEXT(curpg, listq);
   1510 			} else {
   1511 				soff += fs->lfs_bsize;
   1512 			}
   1513 			continue;
   1514 		}
   1515 
   1516 		any_dirty += dirty;
   1517 		KASSERT(nonexistent == 0);
   1518 
   1519 		/*
   1520 		 * If any are dirty make all dirty; unbusy them,
   1521 		 * but if we were asked to clean, wire them so that
   1522 		 * the pagedaemon doesn't bother us about them while
   1523 		 * they're on their way to disk.
   1524 		 */
   1525 		for (i = 0; i == 0 || i < pages_per_block; i++) {
   1526 			pg = pgs[i];
   1527 			KASSERT(!((pg->flags & PG_CLEAN) && (pg->flags & PG_DELWRI)));
   1528 			if (dirty) {
   1529 				pg->flags &= ~PG_CLEAN;
   1530 				if (flags & PGO_FREE) {
   1531 					/* XXXUBC need better way to update */
   1532 					simple_lock(&lfs_subsys_lock);
   1533 					lfs_subsys_pages += MIN(1, pages_per_block);
   1534 					simple_unlock(&lfs_subsys_lock);
   1535 					/*
   1536 					 * Wire the page so that
   1537 					 * pdaemon doesn't see it again.
   1538 					 */
   1539 					uvm_lock_pageq();
   1540 					uvm_pagewire(pg);
   1541 					uvm_unlock_pageq();
   1542 
   1543 					/* Suspended write flag */
   1544 					pg->flags |= PG_DELWRI;
   1545 				}
   1546 			}
   1547 			if (pg->flags & PG_WANTED)
   1548 				wakeup(pg);
   1549 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1550 			UVM_PAGE_OWN(pg, NULL);
   1551 		}
   1552 
   1553 		if (checkfirst && any_dirty)
   1554 			break;
   1555 
   1556 		if (by_list) {
   1557 			curpg = TAILQ_NEXT(curpg, listq);
   1558 		} else {
   1559 			soff += MAX(PAGE_SIZE, fs->lfs_bsize);
   1560 		}
   1561 	}
   1562 
   1563 	/*
   1564 	 * If any pages were dirty, mark this inode as "pageout requested",
   1565 	 * and put it on the paging queue.
   1566 	 * XXXUBC locking (check locking on dchainhd too)
   1567 	 */
   1568 #ifdef notyet
   1569 	if (any_dirty) {
   1570 		if (!(ip->i_flags & IN_PAGING)) {
   1571 			ip->i_flags |= IN_PAGING;
   1572 			simple_lock(&fs->lfs_interlock);
   1573 			TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain);
   1574 			simple_unlock(&fs->lfs_interlock);
   1575 		}
   1576 	}
   1577 #endif
   1578 	return any_dirty;
   1579 }
   1580 
   1581 /*
   1582  * lfs_putpages functions like genfs_putpages except that
   1583  *
   1584  * (1) It needs to bounds-check the incoming requests to ensure that
   1585  *     they are block-aligned; if they are not, expand the range and
   1586  *     do the right thing in case, e.g., the requested range is clean
   1587  *     but the expanded range is dirty.
   1588  * (2) It needs to explicitly send blocks to be written when it is done.
   1589  *     VOP_PUTPAGES is not ever called with the seglock held, so
   1590  *     we simply take the seglock and let lfs_segunlock wait for us.
   1591  *     XXX Actually we can be called with the seglock held, if we have
   1592  *     XXX to flush a vnode while lfs_markv is in operation.  As of this
   1593  *     XXX writing we panic in this case.
   1594  *
   1595  * Assumptions:
   1596  *
   1597  * (1) The caller does not hold any pages in this vnode busy.  If it does,
   1598  *     there is a danger that when we expand the page range and busy the
   1599  *     pages we will deadlock.
   1600  * (2) We are called with vp->v_interlock held; we must return with it
   1601  *     released.
   1602  * (3) We don't absolutely have to free pages right away, provided that
   1603  *     the request does not have PGO_SYNCIO.  When the pagedaemon gives
   1604  *     us a request with PGO_FREE, we take the pages out of the paging
   1605  *     queue and wake up the writer, which will handle freeing them for us.
   1606  *
   1607  *     We ensure that for any filesystem block, all pages for that
   1608  *     block are either resident or not, even if those pages are higher
   1609  *     than EOF; that means that we will be getting requests to free
   1610  *     "unused" pages above EOF all the time, and should ignore them.
   1611  *
   1612  * XXX note that we're (ab)using PGO_LOCKED as "seglock held".
   1613  */
   1614 
   1615 int
   1616 lfs_putpages(void *v)
   1617 {
   1618 	int error;
   1619 	struct vop_putpages_args /* {
   1620 		struct vnode *a_vp;
   1621 		voff_t a_offlo;
   1622 		voff_t a_offhi;
   1623 		int a_flags;
   1624 	} */ *ap = v;
   1625 	struct vnode *vp;
   1626 	struct inode *ip;
   1627 	struct lfs *fs;
   1628 	struct segment *sp;
   1629 	off_t origoffset, startoffset, endoffset, origendoffset, blkeof;
   1630 	off_t off, max_endoffset;
   1631 	int s;
   1632 	boolean_t seglocked, sync, pagedaemon;
   1633 	struct vm_page *pg;
   1634 	UVMHIST_FUNC("lfs_putpages"); UVMHIST_CALLED(ubchist);
   1635 
   1636 	vp = ap->a_vp;
   1637 	ip = VTOI(vp);
   1638 	fs = ip->i_lfs;
   1639 	sync = (ap->a_flags & PGO_SYNCIO) != 0;
   1640 	pagedaemon = (curproc == uvm.pagedaemon_proc);
   1641 
   1642 	/* Putpages does nothing for metadata. */
   1643 	if (vp == fs->lfs_ivnode || vp->v_type != VREG) {
   1644 		simple_unlock(&vp->v_interlock);
   1645 		return 0;
   1646 	}
   1647 
   1648 	/*
   1649 	 * If there are no pages, don't do anything.
   1650 	 */
   1651 	if (vp->v_uobj.uo_npages == 0) {
   1652 		s = splbio();
   1653 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1654 		    (vp->v_flag & VONWORKLST)) {
   1655 			vp->v_flag &= ~VONWORKLST;
   1656 			LIST_REMOVE(vp, v_synclist);
   1657 		}
   1658 		splx(s);
   1659 		simple_unlock(&vp->v_interlock);
   1660 		return 0;
   1661 	}
   1662 
   1663 	blkeof = blkroundup(fs, ip->i_size);
   1664 
   1665 	/*
   1666 	 * Ignore requests to free pages past EOF but in the same block
   1667 	 * as EOF, unless the request is synchronous. (XXX why sync?)
   1668 	 * XXXUBC Make these pages look "active" so the pagedaemon won't
   1669 	 * XXXUBC bother us with them again.
   1670 	 */
   1671 	if (!sync && ap->a_offlo >= ip->i_size && ap->a_offlo < blkeof) {
   1672 		origoffset = ap->a_offlo;
   1673 		for (off = origoffset; off < blkeof; off += fs->lfs_bsize) {
   1674 			pg = uvm_pagelookup(&vp->v_uobj, off);
   1675 			KASSERT(pg != NULL);
   1676 			while (pg->flags & PG_BUSY) {
   1677 				pg->flags |= PG_WANTED;
   1678 				UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
   1679 						    "lfsput2", 0);
   1680 				simple_lock(&vp->v_interlock);
   1681 			}
   1682 			uvm_lock_pageq();
   1683 			uvm_pageactivate(pg);
   1684 			uvm_unlock_pageq();
   1685 		}
   1686 		ap->a_offlo = blkeof;
   1687 		if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) {
   1688 			simple_unlock(&vp->v_interlock);
   1689 			return 0;
   1690 		}
   1691 	}
   1692 
   1693 	/*
   1694 	 * Extend page range to start and end at block boundaries.
   1695 	 * (For the purposes of VOP_PUTPAGES, fragments don't exist.)
   1696 	 */
   1697 	origoffset = ap->a_offlo;
   1698 	origendoffset = ap->a_offhi;
   1699 	startoffset = origoffset & ~(fs->lfs_bmask);
   1700 	max_endoffset = (trunc_page(LLONG_MAX) >> fs->lfs_bshift)
   1701 					       << fs->lfs_bshift;
   1702 
   1703 	if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) {
   1704 		endoffset = max_endoffset;
   1705 		origendoffset = endoffset;
   1706 	} else {
   1707 		origendoffset = round_page(ap->a_offhi);
   1708 		endoffset = round_page(blkroundup(fs, origendoffset));
   1709 	}
   1710 
   1711 	KASSERT(startoffset > 0 || endoffset >= startoffset);
   1712 	if (startoffset == endoffset) {
   1713 		/* Nothing to do, why were we called? */
   1714 		simple_unlock(&vp->v_interlock);
   1715 		DLOG((DLOG_PAGE, "lfs_putpages: startoffset = endoffset = %"
   1716 		      PRId64 "\n", startoffset));
   1717 		return 0;
   1718 	}
   1719 
   1720 	ap->a_offlo = startoffset;
   1721 	ap->a_offhi = endoffset;
   1722 
   1723 	if (!(ap->a_flags & PGO_CLEANIT))
   1724 		return genfs_putpages(v);
   1725 
   1726 	/*
   1727 	 * If there are more than one page per block, we don't want
   1728 	 * to get caught locking them backwards; so set PGO_BUSYFAIL
   1729 	 * to avoid deadlocks.
   1730 	 */
   1731 	ap->a_flags |= PGO_BUSYFAIL;
   1732 
   1733 	do {
   1734 		int r;
   1735 
   1736 		/* If no pages are dirty, we can just use genfs_putpages. */
   1737 		if (check_dirty(fs, vp, startoffset, endoffset, blkeof,
   1738 				ap->a_flags, 1) != 0)
   1739 			break;
   1740 
   1741 		/*
   1742 		 * Sometimes pages are dirtied between the time that
   1743 		 * we check and the time we try to clean them.
   1744 		 * Instruct lfs_gop_write to return EDEADLK in this case
   1745 		 * so we can write them properly.
   1746 		 */
   1747 		ip->i_lfs_iflags |= LFSI_NO_GOP_WRITE;
   1748 		r = genfs_putpages(v);
   1749 		ip->i_lfs_iflags &= ~LFSI_NO_GOP_WRITE;
   1750 		if (r != EDEADLK)
   1751 			return r;
   1752 
   1753 		/* Start over. */
   1754 		preempt(1);
   1755 		simple_lock(&vp->v_interlock);
   1756 	} while(1);
   1757 
   1758 	/*
   1759 	 * Dirty and asked to clean.
   1760 	 *
   1761 	 * Pagedaemon can't actually write LFS pages; wake up
   1762 	 * the writer to take care of that.  The writer will
   1763 	 * notice the pager inode queue and act on that.
   1764 	 */
   1765 	if (pagedaemon) {
   1766 		simple_lock(&fs->lfs_interlock);
   1767 		++fs->lfs_pdflush;
   1768 		simple_unlock(&fs->lfs_interlock);
   1769 		wakeup(&lfs_writer_daemon);
   1770 		simple_unlock(&vp->v_interlock);
   1771 		return EWOULDBLOCK;
   1772 	}
   1773 
   1774 	/*
   1775 	 * If this is a file created in a recent dirop, we can't flush its
   1776 	 * inode until the dirop is complete.  Drain dirops, then flush the
   1777 	 * filesystem (taking care of any other pending dirops while we're
   1778 	 * at it).
   1779 	 */
   1780 	if ((ap->a_flags & (PGO_CLEANIT|PGO_LOCKED)) == PGO_CLEANIT &&
   1781 	    (vp->v_flag & VDIROP)) {
   1782 		int locked;
   1783 
   1784 		DLOG((DLOG_PAGE, "lfs_putpages: flushing VDIROP\n"));
   1785 		locked = VOP_ISLOCKED(vp) && /* XXX */
   1786 			vp->v_lock.lk_lockholder == curproc->p_pid;
   1787 		simple_unlock(&vp->v_interlock);
   1788 		lfs_writer_enter(fs, "ppdirop");
   1789 		if (locked)
   1790 			VOP_UNLOCK(vp, 0);
   1791 
   1792 		simple_lock(&fs->lfs_interlock);
   1793 		lfs_flush_fs(fs, sync ? SEGM_SYNC : 0);
   1794 		simple_unlock(&fs->lfs_interlock);
   1795 
   1796 		simple_lock(&vp->v_interlock);
   1797 		if (locked)
   1798 			VOP_LOCK(vp, LK_EXCLUSIVE);
   1799 		lfs_writer_leave(fs);
   1800 
   1801 		/* XXX the flush should have taken care of this one too! */
   1802 	}
   1803 
   1804 	/*
   1805 	 * This is it.	We are going to write some pages.  From here on
   1806 	 * down it's all just mechanics.
   1807 	 *
   1808 	 * Don't let genfs_putpages wait; lfs_segunlock will wait for us.
   1809 	 */
   1810 	ap->a_flags &= ~PGO_SYNCIO;
   1811 
   1812 	/*
   1813 	 * If we've already got the seglock, flush the node and return.
   1814 	 * The FIP has already been set up for us by lfs_writefile,
   1815 	 * and FIP cleanup and lfs_updatemeta will also be done there,
   1816 	 * unless genfs_putpages returns EDEADLK; then we must flush
   1817 	 * what we have, and correct FIP and segment header accounting.
   1818 	 */
   1819 
   1820 	seglocked = (ap->a_flags & PGO_LOCKED) != 0;
   1821 	if (!seglocked) {
   1822 		simple_unlock(&vp->v_interlock);
   1823 		/*
   1824 		 * Take the seglock, because we are going to be writing pages.
   1825 		 */
   1826 		error = lfs_seglock(fs, SEGM_PROT | (sync ? SEGM_SYNC : 0));
   1827 		if (error != 0)
   1828 			return error;
   1829 		simple_lock(&vp->v_interlock);
   1830 	}
   1831 
   1832 	/*
   1833 	 * VOP_PUTPAGES should not be called while holding the seglock.
   1834 	 * XXXUBC fix lfs_markv, or do this properly.
   1835 	 */
   1836 #ifdef notyet
   1837 	KASSERT(fs->lfs_seglock == 1);
   1838 #endif /* notyet */
   1839 
   1840 	/*
   1841 	 * We assume we're being called with sp->fip pointing at blank space.
   1842 	 * Account for a new FIP in the segment header, and set sp->vp.
   1843 	 * (This should duplicate the setup at the top of lfs_writefile().)
   1844 	 */
   1845 	sp = fs->lfs_sp;
   1846 	if (!seglocked) {
   1847 		if (sp->seg_bytes_left < fs->lfs_bsize ||
   1848 		    sp->sum_bytes_left < sizeof(struct finfo))
   1849 			(void) lfs_writeseg(fs, fs->lfs_sp);
   1850 
   1851 		sp->sum_bytes_left -= FINFOSIZE;
   1852 		++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1853 	}
   1854 	KASSERT(sp->vp == NULL);
   1855 	sp->vp = vp;
   1856 
   1857 	if (!seglocked) {
   1858 		if (vp->v_flag & VDIROP)
   1859 			((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
   1860 	}
   1861 
   1862 	sp->fip->fi_nblocks = 0;
   1863 	sp->fip->fi_ino = ip->i_number;
   1864 	sp->fip->fi_version = ip->i_gen;
   1865 
   1866 	/*
   1867 	 * Loop through genfs_putpages until all pages are gathered.
   1868 	 * genfs_putpages() drops the interlock, so reacquire it if necessary.
   1869 	 * Whenever we lose the interlock we have to rerun check_dirty, as
   1870 	 * well.
   1871 	 */
   1872 again:
   1873 	check_dirty(fs, vp, startoffset, endoffset, blkeof, ap->a_flags, 0);
   1874 
   1875 	if ((error = genfs_putpages(v)) == EDEADLK) {
   1876 		DLOG((DLOG_PAGE, "lfs_putpages: genfs_putpages returned"
   1877 		      " EDEADLK [2] ino %d off %x (seg %d)\n",
   1878 		      ip->i_number, fs->lfs_offset,
   1879 		      dtosn(fs, fs->lfs_offset)));
   1880 		/* If nothing to write, short-circuit */
   1881 		if (sp->cbpp - sp->bpp > 1) {
   1882 			/* Write gathered pages */
   1883 			lfs_updatemeta(sp);
   1884 			(void) lfs_writeseg(fs, sp);
   1885 
   1886 			/*
   1887 			 * Reinitialize brand new FIP and add us to it.
   1888 			 * (This should duplicate the fixup in
   1889 			 * lfs_gatherpages().)
   1890 			 */
   1891 			KASSERT(sp->vp == vp);
   1892 			sp->fip->fi_version = ip->i_gen;
   1893 			sp->fip->fi_ino = ip->i_number;
   1894 			/* Add us to the new segment summary. */
   1895 			++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1896 			sp->sum_bytes_left -= FINFOSIZE;
   1897 		}
   1898 
   1899 		/* Give the write a chance to complete */
   1900 		preempt(1);
   1901 
   1902 		/* We've lost the interlock.  Start over. */
   1903 		simple_lock(&vp->v_interlock);
   1904 		goto again;
   1905 	}
   1906 
   1907 	KASSERT(sp->vp == vp);
   1908 	if (!seglocked) {
   1909 		sp->vp = NULL; /* XXX lfs_gather below will set this */
   1910 
   1911 		/* Write indirect blocks as well */
   1912 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
   1913 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
   1914 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
   1915 
   1916 		KASSERT(sp->vp == NULL);
   1917 		sp->vp = vp;
   1918 	}
   1919 
   1920 	/*
   1921 	 * Blocks are now gathered into a segment waiting to be written.
   1922 	 * All that's left to do is update metadata, and write them.
   1923 	 */
   1924 	lfs_updatemeta(sp);
   1925 	KASSERT(sp->vp == vp);
   1926 	sp->vp = NULL;
   1927 
   1928 	if (seglocked) {
   1929 		/* we're called by lfs_writefile. */
   1930 		return error;
   1931 	}
   1932 
   1933 	/*
   1934 	 * Clean up FIP, since we're done writing this file.
   1935 	 * This should duplicate cleanup at the end of lfs_writefile().
   1936 	 */
   1937 	if (sp->fip->fi_nblocks != 0) {
   1938 		sp->fip = (FINFO*)((caddr_t)sp->fip + FINFOSIZE +
   1939 			sizeof(int32_t) * sp->fip->fi_nblocks);
   1940 		sp->start_lbp = &sp->fip->fi_blocks[0];
   1941 	} else {
   1942 		sp->sum_bytes_left += FINFOSIZE;
   1943 		--((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1944 	}
   1945 	lfs_writeseg(fs, fs->lfs_sp);
   1946 
   1947 	/*
   1948 	 * XXX - with the malloc/copy writeseg, the pages are freed by now
   1949 	 * even if we don't wait (e.g. if we hold a nested lock).  This
   1950 	 * will not be true if we stop using malloc/copy.
   1951 	 */
   1952 	KASSERT(fs->lfs_sp->seg_flags & SEGM_PROT);
   1953 	lfs_segunlock(fs);
   1954 
   1955 	/*
   1956 	 * Wait for v_numoutput to drop to zero.  The seglock should
   1957 	 * take care of this, but there is a slight possibility that
   1958 	 * aiodoned might not have got around to our buffers yet.
   1959 	 */
   1960 	if (sync) {
   1961 		int s;
   1962 
   1963 		s = splbio();
   1964 		simple_lock(&global_v_numoutput_slock);
   1965 		while (vp->v_numoutput > 0) {
   1966 			DLOG((DLOG_PAGE, "lfs_putpages: ino %d sleeping on"
   1967 			      " num %d\n", ip->i_number, vp->v_numoutput));
   1968 			vp->v_flag |= VBWAIT;
   1969 			ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vn", 0,
   1970 			    &global_v_numoutput_slock);
   1971 		}
   1972 		simple_unlock(&global_v_numoutput_slock);
   1973 		splx(s);
   1974 	}
   1975 	return error;
   1976 }
   1977 
   1978 /*
   1979  * Return the last logical file offset that should be written for this file
   1980  * if we're doing a write that ends at "size".	If writing, we need to know
   1981  * about sizes on disk, i.e. fragments if there are any; if reading, we need
   1982  * to know about entire blocks.
   1983  */
   1984 void
   1985 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1986 {
   1987 	struct inode *ip = VTOI(vp);
   1988 	struct lfs *fs = ip->i_lfs;
   1989 	daddr_t olbn, nlbn;
   1990 
   1991 	KASSERT(flags & (GOP_SIZE_READ | GOP_SIZE_WRITE));
   1992 	KASSERT((flags & (GOP_SIZE_READ | GOP_SIZE_WRITE))
   1993 		!= (GOP_SIZE_READ | GOP_SIZE_WRITE));
   1994 
   1995 	olbn = lblkno(fs, ip->i_size);
   1996 	nlbn = lblkno(fs, size);
   1997 	if (!(flags & GOP_SIZE_MEM) && nlbn < NDADDR && olbn <= nlbn) {
   1998 		*eobp = fragroundup(fs, size);
   1999 	} else {
   2000 		*eobp = blkroundup(fs, size);
   2001 	}
   2002 }
   2003 
   2004 #ifdef DEBUG
   2005 void lfs_dump_vop(void *);
   2006 
   2007 void
   2008 lfs_dump_vop(void *v)
   2009 {
   2010 	struct vop_putpages_args /* {
   2011 		struct vnode *a_vp;
   2012 		voff_t a_offlo;
   2013 		voff_t a_offhi;
   2014 		int a_flags;
   2015 	} */ *ap = v;
   2016 
   2017 #ifdef DDB
   2018 	vfs_vnode_print(ap->a_vp, 0, printf);
   2019 #endif
   2020 	lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din);
   2021 }
   2022 #endif
   2023 
   2024 int
   2025 lfs_mmap(void *v)
   2026 {
   2027 	struct vop_mmap_args /* {
   2028 		const struct vnodeop_desc *a_desc;
   2029 		struct vnode *a_vp;
   2030 		int a_fflags;
   2031 		struct ucred *a_cred;
   2032 		struct proc *a_p;
   2033 	} */ *ap = v;
   2034 
   2035 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM)
   2036 		return EOPNOTSUPP;
   2037 	return ufs_mmap(v);
   2038 }
   2039