Home | History | Annotate | Line # | Download | only in lfs
lfs_vnops.c revision 1.109.2.9
      1 /*	$NetBSD: lfs_vnops.c,v 1.109.2.9 2005/03/08 13:53:12 skrll Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *	This product includes software developed by the NetBSD
     21  *	Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 /*
     39  * Copyright (c) 1986, 1989, 1991, 1993, 1995
     40  *	The Regents of the University of California.  All rights reserved.
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. Neither the name of the University nor the names of its contributors
     51  *    may be used to endorse or promote products derived from this software
     52  *    without specific prior written permission.
     53  *
     54  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     55  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     56  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     57  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     58  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     59  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     60  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     61  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     62  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     63  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     64  * SUCH DAMAGE.
     65  *
     66  *	@(#)lfs_vnops.c	8.13 (Berkeley) 6/10/95
     67  */
     68 
     69 #include <sys/cdefs.h>
     70 __KERNEL_RCSID(0, "$NetBSD: lfs_vnops.c,v 1.109.2.9 2005/03/08 13:53:12 skrll Exp $");
     71 
     72 #include <sys/param.h>
     73 #include <sys/systm.h>
     74 #include <sys/namei.h>
     75 #include <sys/resourcevar.h>
     76 #include <sys/kernel.h>
     77 #include <sys/file.h>
     78 #include <sys/stat.h>
     79 #include <sys/buf.h>
     80 #include <sys/proc.h>
     81 #include <sys/mount.h>
     82 #include <sys/vnode.h>
     83 #include <sys/malloc.h>
     84 #include <sys/pool.h>
     85 #include <sys/signalvar.h>
     86 
     87 #include <miscfs/fifofs/fifo.h>
     88 #include <miscfs/genfs/genfs.h>
     89 #include <miscfs/specfs/specdev.h>
     90 
     91 #include <ufs/ufs/inode.h>
     92 #include <ufs/ufs/dir.h>
     93 #include <ufs/ufs/ufsmount.h>
     94 #include <ufs/ufs/ufs_extern.h>
     95 
     96 #include <uvm/uvm.h>
     97 #include <uvm/uvm_pmap.h>
     98 #include <uvm/uvm_stat.h>
     99 #include <uvm/uvm_pager.h>
    100 
    101 #include <ufs/lfs/lfs.h>
    102 #include <ufs/lfs/lfs_extern.h>
    103 
    104 extern pid_t lfs_writer_daemon;
    105 
    106 /* Global vfs data structures for lfs. */
    107 int (**lfs_vnodeop_p)(void *);
    108 const struct vnodeopv_entry_desc lfs_vnodeop_entries[] = {
    109 	{ &vop_default_desc, vn_default_error },
    110 	{ &vop_lookup_desc, ufs_lookup },		/* lookup */
    111 	{ &vop_create_desc, lfs_create },		/* create */
    112 	{ &vop_whiteout_desc, ufs_whiteout },		/* whiteout */
    113 	{ &vop_mknod_desc, lfs_mknod },			/* mknod */
    114 	{ &vop_open_desc, ufs_open },			/* open */
    115 	{ &vop_close_desc, lfs_close },			/* close */
    116 	{ &vop_access_desc, ufs_access },		/* access */
    117 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    118 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    119 	{ &vop_read_desc, lfs_read },			/* read */
    120 	{ &vop_write_desc, lfs_write },			/* write */
    121 	{ &vop_lease_desc, ufs_lease_check },		/* lease */
    122 	{ &vop_ioctl_desc, ufs_ioctl },			/* ioctl */
    123 	{ &vop_fcntl_desc, lfs_fcntl },			/* fcntl */
    124 	{ &vop_poll_desc, ufs_poll },			/* poll */
    125 	{ &vop_kqfilter_desc, genfs_kqfilter },		/* kqfilter */
    126 	{ &vop_revoke_desc, ufs_revoke },		/* revoke */
    127 	{ &vop_mmap_desc, lfs_mmap },			/* mmap */
    128 	{ &vop_fsync_desc, lfs_fsync },			/* fsync */
    129 	{ &vop_seek_desc, ufs_seek },			/* seek */
    130 	{ &vop_remove_desc, lfs_remove },		/* remove */
    131 	{ &vop_link_desc, lfs_link },			/* link */
    132 	{ &vop_rename_desc, lfs_rename },		/* rename */
    133 	{ &vop_mkdir_desc, lfs_mkdir },			/* mkdir */
    134 	{ &vop_rmdir_desc, lfs_rmdir },			/* rmdir */
    135 	{ &vop_symlink_desc, lfs_symlink },		/* symlink */
    136 	{ &vop_readdir_desc, ufs_readdir },		/* readdir */
    137 	{ &vop_readlink_desc, ufs_readlink },		/* readlink */
    138 	{ &vop_abortop_desc, ufs_abortop },		/* abortop */
    139 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    140 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    141 	{ &vop_lock_desc, ufs_lock },			/* lock */
    142 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    143 	{ &vop_bmap_desc, ufs_bmap },			/* bmap */
    144 	{ &vop_strategy_desc, lfs_strategy },		/* strategy */
    145 	{ &vop_print_desc, ufs_print },			/* print */
    146 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    147 	{ &vop_pathconf_desc, ufs_pathconf },		/* pathconf */
    148 	{ &vop_advlock_desc, ufs_advlock },		/* advlock */
    149 	{ &vop_blkatoff_desc, lfs_blkatoff },		/* blkatoff */
    150 	{ &vop_valloc_desc, lfs_valloc },		/* valloc */
    151 	{ &vop_balloc_desc, lfs_balloc },		/* balloc */
    152 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    153 	{ &vop_truncate_desc, lfs_truncate },		/* truncate */
    154 	{ &vop_update_desc, lfs_update },		/* update */
    155 	{ &vop_bwrite_desc, lfs_bwrite },		/* bwrite */
    156 	{ &vop_getpages_desc, lfs_getpages },		/* getpages */
    157 	{ &vop_putpages_desc, lfs_putpages },		/* putpages */
    158 	{ NULL, NULL }
    159 };
    160 const struct vnodeopv_desc lfs_vnodeop_opv_desc =
    161 	{ &lfs_vnodeop_p, lfs_vnodeop_entries };
    162 
    163 int (**lfs_specop_p)(void *);
    164 const struct vnodeopv_entry_desc lfs_specop_entries[] = {
    165 	{ &vop_default_desc, vn_default_error },
    166 	{ &vop_lookup_desc, spec_lookup },		/* lookup */
    167 	{ &vop_create_desc, spec_create },		/* create */
    168 	{ &vop_mknod_desc, spec_mknod },		/* mknod */
    169 	{ &vop_open_desc, spec_open },			/* open */
    170 	{ &vop_close_desc, lfsspec_close },		/* close */
    171 	{ &vop_access_desc, ufs_access },		/* access */
    172 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    173 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    174 	{ &vop_read_desc, ufsspec_read },		/* read */
    175 	{ &vop_write_desc, ufsspec_write },		/* write */
    176 	{ &vop_lease_desc, spec_lease_check },		/* lease */
    177 	{ &vop_ioctl_desc, spec_ioctl },		/* ioctl */
    178 	{ &vop_fcntl_desc, ufs_fcntl },			/* fcntl */
    179 	{ &vop_poll_desc, spec_poll },			/* poll */
    180 	{ &vop_kqfilter_desc, spec_kqfilter },		/* kqfilter */
    181 	{ &vop_revoke_desc, spec_revoke },		/* revoke */
    182 	{ &vop_mmap_desc, spec_mmap },			/* mmap */
    183 	{ &vop_fsync_desc, spec_fsync },		/* fsync */
    184 	{ &vop_seek_desc, spec_seek },			/* seek */
    185 	{ &vop_remove_desc, spec_remove },		/* remove */
    186 	{ &vop_link_desc, spec_link },			/* link */
    187 	{ &vop_rename_desc, spec_rename },		/* rename */
    188 	{ &vop_mkdir_desc, spec_mkdir },		/* mkdir */
    189 	{ &vop_rmdir_desc, spec_rmdir },		/* rmdir */
    190 	{ &vop_symlink_desc, spec_symlink },		/* symlink */
    191 	{ &vop_readdir_desc, spec_readdir },		/* readdir */
    192 	{ &vop_readlink_desc, spec_readlink },		/* readlink */
    193 	{ &vop_abortop_desc, spec_abortop },		/* abortop */
    194 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    195 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    196 	{ &vop_lock_desc, ufs_lock },			/* lock */
    197 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    198 	{ &vop_bmap_desc, spec_bmap },			/* bmap */
    199 	{ &vop_strategy_desc, spec_strategy },		/* strategy */
    200 	{ &vop_print_desc, ufs_print },			/* print */
    201 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    202 	{ &vop_pathconf_desc, spec_pathconf },		/* pathconf */
    203 	{ &vop_advlock_desc, spec_advlock },		/* advlock */
    204 	{ &vop_blkatoff_desc, spec_blkatoff },		/* blkatoff */
    205 	{ &vop_valloc_desc, spec_valloc },		/* valloc */
    206 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    207 	{ &vop_truncate_desc, spec_truncate },		/* truncate */
    208 	{ &vop_update_desc, lfs_update },		/* update */
    209 	{ &vop_bwrite_desc, vn_bwrite },		/* bwrite */
    210 	{ &vop_getpages_desc, spec_getpages },		/* getpages */
    211 	{ &vop_putpages_desc, spec_putpages },		/* putpages */
    212 	{ NULL, NULL }
    213 };
    214 const struct vnodeopv_desc lfs_specop_opv_desc =
    215 	{ &lfs_specop_p, lfs_specop_entries };
    216 
    217 int (**lfs_fifoop_p)(void *);
    218 const struct vnodeopv_entry_desc lfs_fifoop_entries[] = {
    219 	{ &vop_default_desc, vn_default_error },
    220 	{ &vop_lookup_desc, fifo_lookup },		/* lookup */
    221 	{ &vop_create_desc, fifo_create },		/* create */
    222 	{ &vop_mknod_desc, fifo_mknod },		/* mknod */
    223 	{ &vop_open_desc, fifo_open },			/* open */
    224 	{ &vop_close_desc, lfsfifo_close },		/* close */
    225 	{ &vop_access_desc, ufs_access },		/* access */
    226 	{ &vop_getattr_desc, lfs_getattr },		/* getattr */
    227 	{ &vop_setattr_desc, lfs_setattr },		/* setattr */
    228 	{ &vop_read_desc, ufsfifo_read },		/* read */
    229 	{ &vop_write_desc, ufsfifo_write },		/* write */
    230 	{ &vop_lease_desc, fifo_lease_check },		/* lease */
    231 	{ &vop_ioctl_desc, fifo_ioctl },		/* ioctl */
    232 	{ &vop_fcntl_desc, ufs_fcntl },			/* fcntl */
    233 	{ &vop_poll_desc, fifo_poll },			/* poll */
    234 	{ &vop_kqfilter_desc, fifo_kqfilter },		/* kqfilter */
    235 	{ &vop_revoke_desc, fifo_revoke },		/* revoke */
    236 	{ &vop_mmap_desc, fifo_mmap },			/* mmap */
    237 	{ &vop_fsync_desc, fifo_fsync },		/* fsync */
    238 	{ &vop_seek_desc, fifo_seek },			/* seek */
    239 	{ &vop_remove_desc, fifo_remove },		/* remove */
    240 	{ &vop_link_desc, fifo_link },			/* link */
    241 	{ &vop_rename_desc, fifo_rename },		/* rename */
    242 	{ &vop_mkdir_desc, fifo_mkdir },		/* mkdir */
    243 	{ &vop_rmdir_desc, fifo_rmdir },		/* rmdir */
    244 	{ &vop_symlink_desc, fifo_symlink },		/* symlink */
    245 	{ &vop_readdir_desc, fifo_readdir },		/* readdir */
    246 	{ &vop_readlink_desc, fifo_readlink },		/* readlink */
    247 	{ &vop_abortop_desc, fifo_abortop },		/* abortop */
    248 	{ &vop_inactive_desc, lfs_inactive },		/* inactive */
    249 	{ &vop_reclaim_desc, lfs_reclaim },		/* reclaim */
    250 	{ &vop_lock_desc, ufs_lock },			/* lock */
    251 	{ &vop_unlock_desc, ufs_unlock },		/* unlock */
    252 	{ &vop_bmap_desc, fifo_bmap },			/* bmap */
    253 	{ &vop_strategy_desc, fifo_strategy },		/* strategy */
    254 	{ &vop_print_desc, ufs_print },			/* print */
    255 	{ &vop_islocked_desc, ufs_islocked },		/* islocked */
    256 	{ &vop_pathconf_desc, fifo_pathconf },		/* pathconf */
    257 	{ &vop_advlock_desc, fifo_advlock },		/* advlock */
    258 	{ &vop_blkatoff_desc, fifo_blkatoff },		/* blkatoff */
    259 	{ &vop_valloc_desc, fifo_valloc },		/* valloc */
    260 	{ &vop_vfree_desc, lfs_vfree },			/* vfree */
    261 	{ &vop_truncate_desc, fifo_truncate },		/* truncate */
    262 	{ &vop_update_desc, lfs_update },		/* update */
    263 	{ &vop_bwrite_desc, lfs_bwrite },		/* bwrite */
    264 	{ &vop_putpages_desc, fifo_putpages },		/* putpages */
    265 	{ NULL, NULL }
    266 };
    267 const struct vnodeopv_desc lfs_fifoop_opv_desc =
    268 	{ &lfs_fifoop_p, lfs_fifoop_entries };
    269 
    270 static int check_dirty(struct lfs *, struct vnode *, off_t, off_t, off_t, int, int);
    271 
    272 /*
    273  * A function version of LFS_ITIMES, for the UFS functions which call ITIMES
    274  */
    275 void
    276 lfs_itimes(struct inode *ip, struct timespec *acc, struct timespec *mod, struct timespec *cre)
    277 {
    278 	LFS_ITIMES(ip, acc, mod, cre);
    279 }
    280 
    281 #define	LFS_READWRITE
    282 #include <ufs/ufs/ufs_readwrite.c>
    283 #undef	LFS_READWRITE
    284 
    285 /*
    286  * Synch an open file.
    287  */
    288 /* ARGSUSED */
    289 int
    290 lfs_fsync(void *v)
    291 {
    292 	struct vop_fsync_args /* {
    293 		struct vnode *a_vp;
    294 		struct ucred *a_cred;
    295 		int a_flags;
    296 		off_t offlo;
    297 		off_t offhi;
    298 		struct lwp *a_l;
    299 	} */ *ap = v;
    300 	struct vnode *vp = ap->a_vp;
    301 	int error, wait;
    302 
    303 	/*
    304 	 * Trickle sync checks for need to do a checkpoint after possible
    305 	 * activity from the pagedaemon.
    306 	 */
    307 	if (ap->a_flags & FSYNC_LAZY) {
    308 		simple_lock(&lfs_subsys_lock);
    309 		wakeup(&lfs_writer_daemon);
    310 		simple_unlock(&lfs_subsys_lock);
    311 		return 0;
    312 	}
    313 
    314 	wait = (ap->a_flags & FSYNC_WAIT);
    315 	simple_lock(&vp->v_interlock);
    316 	error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo),
    317 			round_page(ap->a_offhi),
    318 			PGO_CLEANIT | (wait ? PGO_SYNCIO : 0));
    319 	if (error)
    320 		return error;
    321 	error = VOP_UPDATE(vp, NULL, NULL, wait ? UPDATE_WAIT : 0);
    322 	if (error == 0 && ap->a_flags & FSYNC_CACHE) {
    323 		int l = 0;
    324 		error = VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE,
    325 				  ap->a_l->l_proc->p_ucred, ap->a_l);
    326 	}
    327 	if (wait && !VPISEMPTY(vp))
    328 		LFS_SET_UINO(VTOI(vp), IN_MODIFIED);
    329 
    330 	return error;
    331 }
    332 
    333 /*
    334  * Take IN_ADIROP off, then call ufs_inactive.
    335  */
    336 int
    337 lfs_inactive(void *v)
    338 {
    339 	struct vop_inactive_args /* {
    340 		struct vnode *a_vp;
    341 		struct lwp *a_l;
    342 	} */ *ap = v;
    343 
    344 	KASSERT(VTOI(ap->a_vp)->i_nlink == VTOI(ap->a_vp)->i_ffs_effnlink);
    345 
    346 	lfs_unmark_vnode(ap->a_vp);
    347 
    348 	/*
    349 	 * The Ifile is only ever inactivated on unmount.
    350 	 * Streamline this process by not giving it more dirty blocks.
    351 	 */
    352 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM) {
    353 		LFS_CLR_UINO(VTOI(ap->a_vp), IN_ALLMOD);
    354 		VOP_UNLOCK(ap->a_vp, 0);
    355 		return 0;
    356 	}
    357 
    358 	return ufs_inactive(v);
    359 }
    360 
    361 /*
    362  * These macros are used to bracket UFS directory ops, so that we can
    363  * identify all the pages touched during directory ops which need to
    364  * be ordered and flushed atomically, so that they may be recovered.
    365  */
    366 /*
    367  * XXX KS - Because we have to mark nodes VDIROP in order to prevent
    368  * the cache from reclaiming them while a dirop is in progress, we must
    369  * also manage the number of nodes so marked (otherwise we can run out).
    370  * We do this by setting lfs_dirvcount to the number of marked vnodes; it
    371  * is decremented during segment write, when VDIROP is taken off.
    372  */
    373 #define	SET_DIROP(vp)		SET_DIROP2((vp), NULL)
    374 #define	SET_DIROP2(vp, vp2)	lfs_set_dirop((vp), (vp2))
    375 static int lfs_set_dirop(struct vnode *, struct vnode *);
    376 
    377 static int
    378 lfs_set_dirop(struct vnode *vp, struct vnode *vp2)
    379 {
    380 	struct lfs *fs;
    381 	int error;
    382 
    383 	KASSERT(VOP_ISLOCKED(vp));
    384 	KASSERT(vp2 == NULL || VOP_ISLOCKED(vp2));
    385 
    386 	fs = VTOI(vp)->i_lfs;
    387 	/*
    388 	 * LFS_NRESERVE calculates direct and indirect blocks as well
    389 	 * as an inode block; an overestimate in most cases.
    390 	 */
    391 	if ((error = lfs_reserve(fs, vp, vp2, LFS_NRESERVE(fs))) != 0)
    392 		return (error);
    393 
    394 	if (fs->lfs_dirops == 0)
    395 		lfs_check(vp, LFS_UNUSED_LBN, 0);
    396 restart:
    397 	simple_lock(&fs->lfs_interlock);
    398 	if (fs->lfs_writer) {
    399 		ltsleep(&fs->lfs_dirops, (PRIBIO + 1) | PNORELOCK,
    400 		    "lfs_sdirop", 0, &fs->lfs_interlock);
    401 		goto restart;
    402 	}
    403 	simple_lock(&lfs_subsys_lock);
    404 	if (lfs_dirvcount > LFS_MAX_DIROP && fs->lfs_dirops == 0) {
    405 		wakeup(&lfs_writer_daemon);
    406 		simple_unlock(&lfs_subsys_lock);
    407 		simple_unlock(&fs->lfs_interlock);
    408 		preempt(1);
    409 		goto restart;
    410 	}
    411 
    412 	if (lfs_dirvcount > LFS_MAX_DIROP) {
    413 		simple_unlock(&fs->lfs_interlock);
    414 		DLOG((DLOG_DIROP, "lfs_set_dirop: sleeping with dirops=%d, "
    415 		      "dirvcount=%d\n", fs->lfs_dirops, lfs_dirvcount));
    416 		if ((error = ltsleep(&lfs_dirvcount,
    417 		    PCATCH | PUSER | PNORELOCK, "lfs_maxdirop", 0,
    418 		    &lfs_subsys_lock)) != 0) {
    419 			goto unreserve;
    420 		}
    421 		goto restart;
    422 	}
    423 	simple_unlock(&lfs_subsys_lock);
    424 
    425 	++fs->lfs_dirops;
    426 	fs->lfs_doifile = 1;
    427 	simple_unlock(&fs->lfs_interlock);
    428 
    429 	/* Hold a reference so SET_ENDOP will be happy */
    430 	vref(vp);
    431 	if (vp2)
    432 		vref(vp2);
    433 
    434 	return 0;
    435 
    436 unreserve:
    437 	lfs_reserve(fs, vp, vp2, -LFS_NRESERVE(fs));
    438 	return error;
    439 }
    440 
    441 #define	SET_ENDOP(fs, vp, str)	SET_ENDOP2((fs), (vp), NULL, (str))
    442 #define	SET_ENDOP2(fs, vp, vp2, str) {					\
    443 	--(fs)->lfs_dirops;						\
    444 	if (!(fs)->lfs_dirops) {					\
    445 		if ((fs)->lfs_nadirop) {				\
    446 			panic("SET_ENDOP: %s: no dirops but nadirop=%d", \
    447 			      (str), (fs)->lfs_nadirop);		\
    448 		}							\
    449 		wakeup(&(fs)->lfs_writer);				\
    450 		lfs_check((vp),LFS_UNUSED_LBN,0);			\
    451 	}								\
    452 	lfs_reserve((fs), vp, vp2, -LFS_NRESERVE(fs)); /* XXX */	\
    453 	vrele(vp);							\
    454 	if (vp2)							\
    455 		vrele(vp2);						\
    456 }
    457 
    458 #define	MARK_VNODE(vp)		lfs_mark_vnode(vp)
    459 #define	UNMARK_VNODE(vp)	lfs_unmark_vnode(vp)
    460 
    461 void
    462 lfs_mark_vnode(struct vnode *vp)
    463 {
    464 	struct inode *ip = VTOI(vp);
    465 	struct lfs *fs = ip->i_lfs;
    466 
    467 	if (!(ip->i_flag & IN_ADIROP)) {
    468 		if (!(vp->v_flag & VDIROP)) {
    469 			(void)lfs_vref(vp);
    470 			++lfs_dirvcount;
    471 			TAILQ_INSERT_TAIL(&fs->lfs_dchainhd, ip, i_lfs_dchain);
    472 			vp->v_flag |= VDIROP;
    473 		}
    474 		++fs->lfs_nadirop;
    475 		ip->i_flag |= IN_ADIROP;
    476 	} else
    477 		KASSERT(vp->v_flag & VDIROP);
    478 }
    479 
    480 void
    481 lfs_unmark_vnode(struct vnode *vp)
    482 {
    483 	struct inode *ip = VTOI(vp);
    484 
    485 	if (ip->i_flag & IN_ADIROP) {
    486 		KASSERT(vp->v_flag & VDIROP);
    487 		--ip->i_lfs->lfs_nadirop;
    488 		ip->i_flag &= ~IN_ADIROP;
    489 	}
    490 }
    491 
    492 int
    493 lfs_symlink(void *v)
    494 {
    495 	struct vop_symlink_args /* {
    496 		struct vnode *a_dvp;
    497 		struct vnode **a_vpp;
    498 		struct componentname *a_cnp;
    499 		struct vattr *a_vap;
    500 		char *a_target;
    501 	} */ *ap = v;
    502 	int error;
    503 
    504 	if ((error = SET_DIROP(ap->a_dvp)) != 0) {
    505 		vput(ap->a_dvp);
    506 		return error;
    507 	}
    508 	MARK_VNODE(ap->a_dvp);
    509 	error = ufs_symlink(ap);
    510 	UNMARK_VNODE(ap->a_dvp);
    511 	if (*(ap->a_vpp))
    512 		UNMARK_VNODE(*(ap->a_vpp));
    513 	SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"symlink");
    514 	return (error);
    515 }
    516 
    517 int
    518 lfs_mknod(void *v)
    519 {
    520 	struct vop_mknod_args	/* {
    521 		struct vnode *a_dvp;
    522 		struct vnode **a_vpp;
    523 		struct componentname *a_cnp;
    524 		struct vattr *a_vap;
    525 		} */ *ap = v;
    526 	struct vattr *vap = ap->a_vap;
    527 	struct vnode **vpp = ap->a_vpp;
    528 	struct componentname *cnp = ap->a_cnp;
    529 	struct inode *ip;
    530 	int error;
    531 	struct mount	*mp;
    532 	ino_t		ino;
    533 
    534 	if ((error = SET_DIROP(ap->a_dvp)) != 0) {
    535 		vput(ap->a_dvp);
    536 		return error;
    537 	}
    538 	MARK_VNODE(ap->a_dvp);
    539 	error = ufs_makeinode(MAKEIMODE(vap->va_type, vap->va_mode),
    540 	    ap->a_dvp, vpp, cnp);
    541 	UNMARK_VNODE(ap->a_dvp);
    542 	if (*(ap->a_vpp))
    543 		UNMARK_VNODE(*(ap->a_vpp));
    544 
    545 	/* Either way we're done with the dirop at this point */
    546 	SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"mknod");
    547 
    548 	if (error)
    549 		return (error);
    550 
    551 	ip = VTOI(*vpp);
    552 	mp  = (*vpp)->v_mount;
    553 	ino = ip->i_number;
    554 	ip->i_flag |= IN_ACCESS | IN_CHANGE | IN_UPDATE;
    555 	if (vap->va_rdev != VNOVAL) {
    556 		/*
    557 		 * Want to be able to use this to make badblock
    558 		 * inodes, so don't truncate the dev number.
    559 		 */
    560 #if 0
    561 		ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
    562 		    UFS_MPNEEDSWAP((*vpp)->v_mount));
    563 #else
    564 		ip->i_ffs1_rdev = vap->va_rdev;
    565 #endif
    566 	}
    567 
    568 	/*
    569 	 * Call fsync to write the vnode so that we don't have to deal with
    570 	 * flushing it when it's marked VDIROP|VXLOCK.
    571 	 *
    572 	 * XXX KS - If we can't flush we also can't call vgone(), so must
    573 	 * return.  But, that leaves this vnode in limbo, also not good.
    574 	 * Can this ever happen (barring hardware failure)?
    575 	 */
    576 	if ((error = VOP_FSYNC(*vpp, NOCRED, FSYNC_WAIT, 0, 0,
    577 	    curlwp)) != 0) {
    578 		panic("lfs_mknod: couldn't fsync (ino %d)", ino);
    579 		/* return (error); */
    580 	}
    581 	/*
    582 	 * Remove vnode so that it will be reloaded by VFS_VGET and
    583 	 * checked to see if it is an alias of an existing entry in
    584 	 * the inode cache.
    585 	 */
    586 	/* Used to be vput, but that causes us to call VOP_INACTIVE twice. */
    587 
    588 	VOP_UNLOCK(*vpp, 0);
    589 	lfs_vunref(*vpp);
    590 	(*vpp)->v_type = VNON;
    591 	vgone(*vpp);
    592 	error = VFS_VGET(mp, ino, vpp);
    593 
    594 	if (error != 0) {
    595 		*vpp = NULL;
    596 		return (error);
    597 	}
    598 	return (0);
    599 }
    600 
    601 int
    602 lfs_create(void *v)
    603 {
    604 	struct vop_create_args	/* {
    605 		struct vnode *a_dvp;
    606 		struct vnode **a_vpp;
    607 		struct componentname *a_cnp;
    608 		struct vattr *a_vap;
    609 	} */ *ap = v;
    610 	int error;
    611 
    612 	if ((error = SET_DIROP(ap->a_dvp)) != 0) {
    613 		vput(ap->a_dvp);
    614 		return error;
    615 	}
    616 	MARK_VNODE(ap->a_dvp);
    617 	error = ufs_create(ap);
    618 	UNMARK_VNODE(ap->a_dvp);
    619 	if (*(ap->a_vpp))
    620 		UNMARK_VNODE(*(ap->a_vpp));
    621 	SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"create");
    622 	return (error);
    623 }
    624 
    625 int
    626 lfs_mkdir(void *v)
    627 {
    628 	struct vop_mkdir_args	/* {
    629 		struct vnode *a_dvp;
    630 		struct vnode **a_vpp;
    631 		struct componentname *a_cnp;
    632 		struct vattr *a_vap;
    633 	} */ *ap = v;
    634 	int error;
    635 
    636 	if ((error = SET_DIROP(ap->a_dvp)) != 0) {
    637 		vput(ap->a_dvp);
    638 		return error;
    639 	}
    640 	MARK_VNODE(ap->a_dvp);
    641 	error = ufs_mkdir(ap);
    642 	UNMARK_VNODE(ap->a_dvp);
    643 	if (*(ap->a_vpp))
    644 		UNMARK_VNODE(*(ap->a_vpp));
    645 	SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"mkdir");
    646 	return (error);
    647 }
    648 
    649 int
    650 lfs_remove(void *v)
    651 {
    652 	struct vop_remove_args	/* {
    653 		struct vnode *a_dvp;
    654 		struct vnode *a_vp;
    655 		struct componentname *a_cnp;
    656 	} */ *ap = v;
    657 	struct vnode *dvp, *vp;
    658 	int error;
    659 
    660 	dvp = ap->a_dvp;
    661 	vp = ap->a_vp;
    662 	if ((error = SET_DIROP2(dvp, vp)) != 0) {
    663 		if (dvp == vp)
    664 			vrele(vp);
    665 		else
    666 			vput(vp);
    667 		vput(dvp);
    668 		return error;
    669 	}
    670 	MARK_VNODE(dvp);
    671 	MARK_VNODE(vp);
    672 	error = ufs_remove(ap);
    673 	UNMARK_VNODE(dvp);
    674 	UNMARK_VNODE(vp);
    675 
    676 	SET_ENDOP2(VTOI(dvp)->i_lfs, dvp, vp, "remove");
    677 	return (error);
    678 }
    679 
    680 int
    681 lfs_rmdir(void *v)
    682 {
    683 	struct vop_rmdir_args	/* {
    684 		struct vnodeop_desc *a_desc;
    685 		struct vnode *a_dvp;
    686 		struct vnode *a_vp;
    687 		struct componentname *a_cnp;
    688 	} */ *ap = v;
    689 	struct vnode *vp;
    690 	int error;
    691 
    692 	vp = ap->a_vp;
    693 	if ((error = SET_DIROP2(ap->a_dvp, ap->a_vp)) != 0) {
    694 		vrele(ap->a_dvp);
    695 		if (ap->a_vp != ap->a_dvp)
    696 			VOP_UNLOCK(ap->a_dvp, 0);
    697 		vput(vp);
    698 		return error;
    699 	}
    700 	MARK_VNODE(ap->a_dvp);
    701 	MARK_VNODE(vp);
    702 	error = ufs_rmdir(ap);
    703 	UNMARK_VNODE(ap->a_dvp);
    704 	UNMARK_VNODE(vp);
    705 
    706 	SET_ENDOP2(VTOI(ap->a_dvp)->i_lfs, ap->a_dvp, vp, "rmdir");
    707 	return (error);
    708 }
    709 
    710 int
    711 lfs_link(void *v)
    712 {
    713 	struct vop_link_args	/* {
    714 		struct vnode *a_dvp;
    715 		struct vnode *a_vp;
    716 		struct componentname *a_cnp;
    717 	} */ *ap = v;
    718 	int error;
    719 
    720 	if ((error = SET_DIROP(ap->a_dvp)) != 0) {
    721 		vput(ap->a_dvp);
    722 		return error;
    723 	}
    724 	MARK_VNODE(ap->a_dvp);
    725 	error = ufs_link(ap);
    726 	UNMARK_VNODE(ap->a_dvp);
    727 	SET_ENDOP(VTOI(ap->a_dvp)->i_lfs,ap->a_dvp,"link");
    728 	return (error);
    729 }
    730 
    731 int
    732 lfs_rename(void *v)
    733 {
    734 	struct vop_rename_args	/* {
    735 		struct vnode *a_fdvp;
    736 		struct vnode *a_fvp;
    737 		struct componentname *a_fcnp;
    738 		struct vnode *a_tdvp;
    739 		struct vnode *a_tvp;
    740 		struct componentname *a_tcnp;
    741 	} */ *ap = v;
    742 	struct vnode *tvp, *fvp, *tdvp, *fdvp;
    743 	struct componentname *tcnp, *fcnp;
    744 	int error;
    745 	struct lfs *fs;
    746 
    747 	fs = VTOI(ap->a_fdvp)->i_lfs;
    748 	tvp = ap->a_tvp;
    749 	tdvp = ap->a_tdvp;
    750 	tcnp = ap->a_tcnp;
    751 	fvp = ap->a_fvp;
    752 	fdvp = ap->a_fdvp;
    753 	fcnp = ap->a_fcnp;
    754 
    755 	/*
    756 	 * Check for cross-device rename.
    757 	 * If it is, we don't want to set dirops, just error out.
    758 	 * (In particular note that MARK_VNODE(tdvp) will DTWT on
    759 	 * a cross-device rename.)
    760 	 *
    761 	 * Copied from ufs_rename.
    762 	 */
    763 	if ((fvp->v_mount != tdvp->v_mount) ||
    764 	    (tvp && (fvp->v_mount != tvp->v_mount))) {
    765 		error = EXDEV;
    766 		goto errout;
    767 	}
    768 
    769 	/*
    770 	 * Check to make sure we're not renaming a vnode onto itself
    771 	 * (deleting a hard link by renaming one name onto another);
    772 	 * if we are we can't recursively call VOP_REMOVE since that
    773 	 * would leave us with an unaccounted-for number of live dirops.
    774 	 *
    775 	 * Inline the relevant section of ufs_rename here, *before*
    776 	 * calling SET_DIROP2.
    777 	 */
    778 	if (tvp && ((VTOI(tvp)->i_flags & (IMMUTABLE | APPEND)) ||
    779 	    (VTOI(tdvp)->i_flags & APPEND))) {
    780 		error = EPERM;
    781 		goto errout;
    782 	}
    783 	if (fvp == tvp) {
    784 		if (fvp->v_type == VDIR) {
    785 			error = EINVAL;
    786 			goto errout;
    787 		}
    788 
    789 		/* Release destination completely. */
    790 		VOP_ABORTOP(tdvp, tcnp);
    791 		vput(tdvp);
    792 		vput(tvp);
    793 
    794 		/* Delete source. */
    795 		vrele(fvp);
    796 		fcnp->cn_flags &= ~(MODMASK | SAVESTART);
    797 		fcnp->cn_flags |= LOCKPARENT | LOCKLEAF;
    798 		fcnp->cn_nameiop = DELETE;
    799 		if ((error = relookup(fdvp, &fvp, fcnp))){
    800 			/* relookup blew away fdvp */
    801 			return (error);
    802 		}
    803 		return (VOP_REMOVE(fdvp, fvp, fcnp));
    804 	}
    805 
    806 	if ((error = SET_DIROP2(tdvp, tvp)) != 0)
    807 		goto errout;
    808 	MARK_VNODE(fdvp);
    809 	MARK_VNODE(tdvp);
    810 	MARK_VNODE(fvp);
    811 	if (tvp) {
    812 		MARK_VNODE(tvp);
    813 	}
    814 
    815 	error = ufs_rename(ap);
    816 	UNMARK_VNODE(fdvp);
    817 	UNMARK_VNODE(tdvp);
    818 	UNMARK_VNODE(fvp);
    819 	if (tvp) {
    820 		UNMARK_VNODE(tvp);
    821 	}
    822 	SET_ENDOP2(fs, tdvp, tvp, "rename");
    823 	return (error);
    824 
    825     errout:
    826 	VOP_ABORTOP(tdvp, ap->a_tcnp); /* XXX, why not in NFS? */
    827 	if (tdvp == tvp)
    828 		vrele(tdvp);
    829 	else
    830 		vput(tdvp);
    831 	if (tvp)
    832 		vput(tvp);
    833 	VOP_ABORTOP(fdvp, ap->a_fcnp); /* XXX, why not in NFS? */
    834 	vrele(fdvp);
    835 	vrele(fvp);
    836 	return (error);
    837 }
    838 
    839 /* XXX hack to avoid calling ITIMES in getattr */
    840 int
    841 lfs_getattr(void *v)
    842 {
    843 	struct vop_getattr_args /* {
    844 		struct vnode *a_vp;
    845 		struct vattr *a_vap;
    846 		struct ucred *a_cred;
    847 		struct lwp *a_l;
    848 	} */ *ap = v;
    849 	struct vnode *vp = ap->a_vp;
    850 	struct inode *ip = VTOI(vp);
    851 	struct vattr *vap = ap->a_vap;
    852 	struct lfs *fs = ip->i_lfs;
    853 	/*
    854 	 * Copy from inode table
    855 	 */
    856 	vap->va_fsid = ip->i_dev;
    857 	vap->va_fileid = ip->i_number;
    858 	vap->va_mode = ip->i_mode & ~IFMT;
    859 	vap->va_nlink = ip->i_nlink;
    860 	vap->va_uid = ip->i_uid;
    861 	vap->va_gid = ip->i_gid;
    862 	vap->va_rdev = (dev_t)ip->i_ffs1_rdev;
    863 	vap->va_size = vp->v_size;
    864 	vap->va_atime.tv_sec = ip->i_ffs1_atime;
    865 	vap->va_atime.tv_nsec = ip->i_ffs1_atimensec;
    866 	vap->va_mtime.tv_sec = ip->i_ffs1_mtime;
    867 	vap->va_mtime.tv_nsec = ip->i_ffs1_mtimensec;
    868 	vap->va_ctime.tv_sec = ip->i_ffs1_ctime;
    869 	vap->va_ctime.tv_nsec = ip->i_ffs1_ctimensec;
    870 	vap->va_flags = ip->i_flags;
    871 	vap->va_gen = ip->i_gen;
    872 	/* this doesn't belong here */
    873 	if (vp->v_type == VBLK)
    874 		vap->va_blocksize = BLKDEV_IOSIZE;
    875 	else if (vp->v_type == VCHR)
    876 		vap->va_blocksize = MAXBSIZE;
    877 	else
    878 		vap->va_blocksize = vp->v_mount->mnt_stat.f_iosize;
    879 	vap->va_bytes = fsbtob(fs, (u_quad_t)ip->i_lfs_effnblks);
    880 	vap->va_type = vp->v_type;
    881 	vap->va_filerev = ip->i_modrev;
    882 	return (0);
    883 }
    884 
    885 /*
    886  * Check to make sure the inode blocks won't choke the buffer
    887  * cache, then call ufs_setattr as usual.
    888  */
    889 int
    890 lfs_setattr(void *v)
    891 {
    892 	struct vop_getattr_args /* {
    893 		struct vnode *a_vp;
    894 		struct vattr *a_vap;
    895 		struct ucred *a_cred;
    896 		struct lwp *a_l;
    897 	} */ *ap = v;
    898 	struct vnode *vp = ap->a_vp;
    899 
    900 	lfs_check(vp, LFS_UNUSED_LBN, 0);
    901 	return ufs_setattr(v);
    902 }
    903 
    904 /*
    905  * Close called
    906  *
    907  * XXX -- we were using ufs_close, but since it updates the
    908  * times on the inode, we might need to bump the uinodes
    909  * count.
    910  */
    911 /* ARGSUSED */
    912 int
    913 lfs_close(void *v)
    914 {
    915 	struct vop_close_args /* {
    916 		struct vnode *a_vp;
    917 		int  a_fflag;
    918 		struct ucred *a_cred;
    919 		struct lwp *a_l;
    920 	} */ *ap = v;
    921 	struct vnode *vp = ap->a_vp;
    922 	struct inode *ip = VTOI(vp);
    923 	struct timespec ts;
    924 
    925 	if (vp == ip->i_lfs->lfs_ivnode &&
    926 	    vp->v_mount->mnt_iflag & IMNT_UNMOUNT)
    927 		return 0;
    928 
    929 	if (vp->v_usecount > 1 && vp != ip->i_lfs->lfs_ivnode) {
    930 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    931 		LFS_ITIMES(ip, &ts, &ts, &ts);
    932 	}
    933 	return (0);
    934 }
    935 
    936 /*
    937  * Close wrapper for special devices.
    938  *
    939  * Update the times on the inode then do device close.
    940  */
    941 int
    942 lfsspec_close(void *v)
    943 {
    944 	struct vop_close_args /* {
    945 		struct vnode	*a_vp;
    946 		int		a_fflag;
    947 		struct ucred	*a_cred;
    948 		struct lwp	*a_l;
    949 	} */ *ap = v;
    950 	struct vnode	*vp;
    951 	struct inode	*ip;
    952 	struct timespec	ts;
    953 
    954 	vp = ap->a_vp;
    955 	ip = VTOI(vp);
    956 	if (vp->v_usecount > 1) {
    957 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    958 		LFS_ITIMES(ip, &ts, &ts, &ts);
    959 	}
    960 	return (VOCALL (spec_vnodeop_p, VOFFSET(vop_close), ap));
    961 }
    962 
    963 /*
    964  * Close wrapper for fifo's.
    965  *
    966  * Update the times on the inode then do device close.
    967  */
    968 int
    969 lfsfifo_close(void *v)
    970 {
    971 	struct vop_close_args /* {
    972 		struct vnode	*a_vp;
    973 		int		a_fflag;
    974 		struct ucred	*a_cred;
    975 		struct lwp	*a_l;
    976 	} */ *ap = v;
    977 	struct vnode	*vp;
    978 	struct inode	*ip;
    979 	struct timespec	ts;
    980 
    981 	vp = ap->a_vp;
    982 	ip = VTOI(vp);
    983 	if (ap->a_vp->v_usecount > 1) {
    984 		TIMEVAL_TO_TIMESPEC(&time, &ts);
    985 		LFS_ITIMES(ip, &ts, &ts, &ts);
    986 	}
    987 	return (VOCALL (fifo_vnodeop_p, VOFFSET(vop_close), ap));
    988 }
    989 
    990 /*
    991  * Reclaim an inode so that it can be used for other purposes.
    992  */
    993 
    994 int
    995 lfs_reclaim(void *v)
    996 {
    997 	struct vop_reclaim_args /* {
    998 		struct vnode *a_vp;
    999 		struct lwp *a_l;
   1000 	} */ *ap = v;
   1001 	struct vnode *vp = ap->a_vp;
   1002 	struct inode *ip = VTOI(vp);
   1003 	int error;
   1004 
   1005 	KASSERT(ip->i_nlink == ip->i_ffs_effnlink);
   1006 
   1007 	LFS_CLR_UINO(ip, IN_ALLMOD);
   1008 	if ((error = ufs_reclaim(vp, ap->a_l)))
   1009 		return (error);
   1010 	lfs_deregister_all(vp);
   1011 	pool_put(&lfs_dinode_pool, VTOI(vp)->i_din.ffs1_din);
   1012 	pool_put(&lfs_inoext_pool, ip->inode_ext.lfs);
   1013 	ip->inode_ext.lfs = NULL;
   1014 	pool_put(&lfs_inode_pool, vp->v_data);
   1015 	vp->v_data = NULL;
   1016 	return (0);
   1017 }
   1018 
   1019 /*
   1020  * Read a block from a storage device.
   1021  * In order to avoid reading blocks that are in the process of being
   1022  * written by the cleaner---and hence are not mutexed by the normal
   1023  * buffer cache / page cache mechanisms---check for collisions before
   1024  * reading.
   1025  *
   1026  * We inline ufs_strategy to make sure that the VOP_BMAP occurs *before*
   1027  * the active cleaner test.
   1028  *
   1029  * XXX This code assumes that lfs_markv makes synchronous checkpoints.
   1030  */
   1031 int
   1032 lfs_strategy(void *v)
   1033 {
   1034 	struct vop_strategy_args /* {
   1035 		struct vnode *a_vp;
   1036 		struct buf *a_bp;
   1037 	} */ *ap = v;
   1038 	struct buf	*bp;
   1039 	struct lfs	*fs;
   1040 	struct vnode	*vp;
   1041 	struct inode	*ip;
   1042 	daddr_t		tbn;
   1043 	int		i, sn, error, slept;
   1044 
   1045 	bp = ap->a_bp;
   1046 	vp = ap->a_vp;
   1047 	ip = VTOI(vp);
   1048 	fs = ip->i_lfs;
   1049 
   1050 	/* lfs uses its strategy routine only for read */
   1051 	KASSERT(bp->b_flags & B_READ);
   1052 
   1053 	if (vp->v_type == VBLK || vp->v_type == VCHR)
   1054 		panic("lfs_strategy: spec");
   1055 	KASSERT(bp->b_bcount != 0);
   1056 	if (bp->b_blkno == bp->b_lblkno) {
   1057 		error = VOP_BMAP(vp, bp->b_lblkno, NULL, &bp->b_blkno,
   1058 				 NULL);
   1059 		if (error) {
   1060 			bp->b_error = error;
   1061 			bp->b_flags |= B_ERROR;
   1062 			biodone(bp);
   1063 			return (error);
   1064 		}
   1065 		if ((long)bp->b_blkno == -1) /* no valid data */
   1066 			clrbuf(bp);
   1067 	}
   1068 	if ((long)bp->b_blkno < 0) { /* block is not on disk */
   1069 		biodone(bp);
   1070 		return (0);
   1071 	}
   1072 
   1073 	slept = 1;
   1074 	simple_lock(&fs->lfs_interlock);
   1075 	while (slept && fs->lfs_seglock) {
   1076 		simple_unlock(&fs->lfs_interlock);
   1077 		/*
   1078 		 * Look through list of intervals.
   1079 		 * There will only be intervals to look through
   1080 		 * if the cleaner holds the seglock.
   1081 		 * Since the cleaner is synchronous, we can trust
   1082 		 * the list of intervals to be current.
   1083 		 */
   1084 		tbn = dbtofsb(fs, bp->b_blkno);
   1085 		sn = dtosn(fs, tbn);
   1086 		slept = 0;
   1087 		for (i = 0; i < fs->lfs_cleanind; i++) {
   1088 			if (sn == dtosn(fs, fs->lfs_cleanint[i]) &&
   1089 			    tbn >= fs->lfs_cleanint[i]) {
   1090 				DLOG((DLOG_CLEAN,
   1091 				      "lfs_strategy: ino %d lbn %" PRId64
   1092 				       " ind %d sn %d fsb %" PRIx32
   1093 				       " given sn %d fsb %" PRIx64 "\n",
   1094 					ip->i_number, bp->b_lblkno, i,
   1095 					dtosn(fs, fs->lfs_cleanint[i]),
   1096 					fs->lfs_cleanint[i], sn, tbn));
   1097 				DLOG((DLOG_CLEAN,
   1098 				      "lfs_strategy: sleeping on ino %d lbn %"
   1099 				      PRId64 "\n", ip->i_number, bp->b_lblkno));
   1100 				tsleep(&fs->lfs_seglock, PRIBIO+1,
   1101 					"lfs_strategy", 0);
   1102 				/* Things may be different now; start over. */
   1103 				slept = 1;
   1104 				break;
   1105 			}
   1106 		}
   1107 		simple_lock(&fs->lfs_interlock);
   1108 	}
   1109 	simple_unlock(&fs->lfs_interlock);
   1110 
   1111 	vp = ip->i_devvp;
   1112 	VOP_STRATEGY(vp, bp);
   1113 	return (0);
   1114 }
   1115 
   1116 static void
   1117 lfs_flush_dirops(struct lfs *fs)
   1118 {
   1119 	struct inode *ip, *nip;
   1120 	struct vnode *vp;
   1121 	extern int lfs_dostats;
   1122 	struct segment *sp;
   1123 	int needunlock;
   1124 
   1125 	if (fs->lfs_ronly)
   1126 		return;
   1127 
   1128 	if (TAILQ_FIRST(&fs->lfs_dchainhd) == NULL)
   1129 		return;
   1130 
   1131 	if (lfs_dostats)
   1132 		++lfs_stats.flush_invoked;
   1133 
   1134 	/*
   1135 	 * Inline lfs_segwrite/lfs_writevnodes, but just for dirops.
   1136 	 * Technically this is a checkpoint (the on-disk state is valid)
   1137 	 * even though we are leaving out all the file data.
   1138 	 */
   1139 	lfs_imtime(fs);
   1140 	lfs_seglock(fs, SEGM_CKP);
   1141 	sp = fs->lfs_sp;
   1142 
   1143 	/*
   1144 	 * lfs_writevnodes, optimized to get dirops out of the way.
   1145 	 * Only write dirops, and don't flush files' pages, only
   1146 	 * blocks from the directories.
   1147 	 *
   1148 	 * We don't need to vref these files because they are
   1149 	 * dirops and so hold an extra reference until the
   1150 	 * segunlock clears them of that status.
   1151 	 *
   1152 	 * We don't need to check for IN_ADIROP because we know that
   1153 	 * no dirops are active.
   1154 	 *
   1155 	 */
   1156 	for (ip = TAILQ_FIRST(&fs->lfs_dchainhd); ip != NULL; ip = nip) {
   1157 		nip = TAILQ_NEXT(ip, i_lfs_dchain);
   1158 		vp = ITOV(ip);
   1159 
   1160 		/*
   1161 		 * All writes to directories come from dirops; all
   1162 		 * writes to files' direct blocks go through the page
   1163 		 * cache, which we're not touching.  Reads to files
   1164 		 * and/or directories will not be affected by writing
   1165 		 * directory blocks inodes and file inodes.  So we don't
   1166 		 * really need to lock.  If we don't lock, though,
   1167 		 * make sure that we don't clear IN_MODIFIED
   1168 		 * unnecessarily.
   1169 		 */
   1170 		if (vp->v_flag & VXLOCK)
   1171 			continue;
   1172 		if (vn_lock(vp, LK_EXCLUSIVE | LK_CANRECURSE |
   1173 			    LK_NOWAIT) == 0) {
   1174 			needunlock = 1;
   1175 		} else {
   1176 			DLOG((DLOG_VNODE, "lfs_flush_dirops: flushing locked ino %d\n",
   1177 			       VTOI(vp)->i_number));
   1178 			needunlock = 0;
   1179 		}
   1180 		if (vp->v_type != VREG &&
   1181 		    ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp))) {
   1182 			lfs_writefile(fs, sp, vp);
   1183 			if (!VPISEMPTY(vp) && !WRITEINPROG(vp) &&
   1184 			    !(ip->i_flag & IN_ALLMOD)) {
   1185 				LFS_SET_UINO(ip, IN_MODIFIED);
   1186 			}
   1187 		}
   1188 		(void) lfs_writeinode(fs, sp, ip);
   1189 		if (needunlock)
   1190 			VOP_UNLOCK(vp, 0);
   1191 		else
   1192 			LFS_SET_UINO(ip, IN_MODIFIED);
   1193 	}
   1194 	/* We've written all the dirops there are */
   1195 	((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
   1196 	(void) lfs_writeseg(fs, sp);
   1197 	lfs_segunlock(fs);
   1198 }
   1199 
   1200 /*
   1201  * Provide a fcntl interface to sys_lfs_{segwait,bmapv,markv}.
   1202  */
   1203 int
   1204 lfs_fcntl(void *v)
   1205 {
   1206 	struct vop_fcntl_args /* {
   1207 		struct vnode *a_vp;
   1208 		u_long a_command;
   1209 		caddr_t  a_data;
   1210 		int  a_fflag;
   1211 		struct ucred *a_cred;
   1212 		struct lwp *a_l;
   1213 	} */ *ap = v;
   1214 	struct timeval *tvp;
   1215 	BLOCK_INFO *blkiov;
   1216 	CLEANERINFO *cip;
   1217 	int blkcnt, error, oclean;
   1218 	struct lfs_fcntl_markv blkvp;
   1219 	struct proc *p;
   1220 	fsid_t *fsidp;
   1221 	struct lfs *fs;
   1222 	struct buf *bp;
   1223 	fhandle_t *fhp;
   1224 	daddr_t off;
   1225 
   1226 	/* Only respect LFS fcntls on fs root or Ifile */
   1227 	if (VTOI(ap->a_vp)->i_number != ROOTINO &&
   1228 	    VTOI(ap->a_vp)->i_number != LFS_IFILE_INUM) {
   1229 		return ufs_fcntl(v);
   1230 	}
   1231 
   1232 	/* Avoid locking a draining lock */
   1233 	if (ap->a_vp->v_mount->mnt_iflag & IMNT_UNMOUNT) {
   1234 		return ESHUTDOWN;
   1235 	}
   1236 
   1237 	p = ap->a_l->l_proc;
   1238 	fs = VTOI(ap->a_vp)->i_lfs;
   1239 	fsidp = &ap->a_vp->v_mount->mnt_stat.f_fsidx;
   1240 
   1241 	switch (ap->a_command) {
   1242 	    case LFCNSEGWAITALL:
   1243 	    case LFCNSEGWAITALL_COMPAT:
   1244 		fsidp = NULL;
   1245 		/* FALLSTHROUGH */
   1246 	    case LFCNSEGWAIT:
   1247 	    case LFCNSEGWAIT_COMPAT:
   1248 		tvp = (struct timeval *)ap->a_data;
   1249 		simple_lock(&fs->lfs_interlock);
   1250 		++fs->lfs_sleepers;
   1251 		simple_unlock(&fs->lfs_interlock);
   1252 		VOP_UNLOCK(ap->a_vp, 0);
   1253 
   1254 		error = lfs_segwait(fsidp, tvp);
   1255 
   1256 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1257 		simple_lock(&fs->lfs_interlock);
   1258 		if (--fs->lfs_sleepers == 0)
   1259 			wakeup(&fs->lfs_sleepers);
   1260 		simple_unlock(&fs->lfs_interlock);
   1261 		return error;
   1262 
   1263 	    case LFCNBMAPV:
   1264 	    case LFCNMARKV:
   1265 		if ((error = suser(p->p_ucred, &p->p_acflag)) != 0)
   1266 			return (error);
   1267 		blkvp = *(struct lfs_fcntl_markv *)ap->a_data;
   1268 
   1269 		blkcnt = blkvp.blkcnt;
   1270 		if ((u_int) blkcnt > LFS_MARKV_MAXBLKCNT)
   1271 			return (EINVAL);
   1272 		blkiov = malloc(blkcnt * sizeof(BLOCK_INFO), M_SEGMENT, M_WAITOK);
   1273 		if ((error = copyin(blkvp.blkiov, blkiov,
   1274 		     blkcnt * sizeof(BLOCK_INFO))) != 0) {
   1275 			free(blkiov, M_SEGMENT);
   1276 			return error;
   1277 		}
   1278 
   1279 		simple_lock(&fs->lfs_interlock);
   1280 		++fs->lfs_sleepers;
   1281 		simple_unlock(&fs->lfs_interlock);
   1282 		VOP_UNLOCK(ap->a_vp, 0);
   1283 		if (ap->a_command == LFCNBMAPV)
   1284 			error = lfs_bmapv(p, fsidp, blkiov, blkcnt);
   1285 		else /* LFCNMARKV */
   1286 			error = lfs_markv(p, fsidp, blkiov, blkcnt);
   1287 		if (error == 0)
   1288 			error = copyout(blkiov, blkvp.blkiov,
   1289 					blkcnt * sizeof(BLOCK_INFO));
   1290 		VOP_LOCK(ap->a_vp, LK_EXCLUSIVE);
   1291 		simple_lock(&fs->lfs_interlock);
   1292 		if (--fs->lfs_sleepers == 0)
   1293 			wakeup(&fs->lfs_sleepers);
   1294 		simple_unlock(&fs->lfs_interlock);
   1295 		free(blkiov, M_SEGMENT);
   1296 		return error;
   1297 
   1298 	    case LFCNRECLAIM:
   1299 		/*
   1300 		 * Flush dirops and write Ifile, allowing empty segments
   1301 		 * to be immediately reclaimed.
   1302 		 */
   1303 		lfs_writer_enter(fs, "pndirop");
   1304 		off = fs->lfs_offset;
   1305 		lfs_seglock(fs, SEGM_FORCE_CKP | SEGM_CKP);
   1306 		lfs_flush_dirops(fs);
   1307 		LFS_CLEANERINFO(cip, fs, bp);
   1308 		oclean = cip->clean;
   1309 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
   1310 		lfs_segwrite(ap->a_vp->v_mount, SEGM_FORCE_CKP);
   1311 		lfs_segunlock(fs);
   1312 		lfs_writer_leave(fs);
   1313 
   1314 #ifdef DEBUG
   1315 		LFS_CLEANERINFO(cip, fs, bp);
   1316 		DLOG((DLOG_CLEAN, "lfs_fcntl: reclaim wrote %" PRId64
   1317 		      " blocks, cleaned %" PRId32 " segments (activesb %d)\n",
   1318 		      fs->lfs_offset - off, cip->clean - oclean,
   1319 		      fs->lfs_activesb));
   1320 		LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
   1321 #endif
   1322 
   1323 		return 0;
   1324 
   1325 	    case LFCNIFILEFH:
   1326 		/* Return the filehandle of the Ifile */
   1327 		if ((error = suser(ap->a_l->l_proc->p_ucred, &ap->a_l->l_proc->p_acflag)) != 0)
   1328 			return (error);
   1329 		fhp = (struct fhandle *)ap->a_data;
   1330 		fhp->fh_fsid = *fsidp;
   1331 		return lfs_vptofh(fs->lfs_ivnode, &(fhp->fh_fid));
   1332 
   1333 	    default:
   1334 		return ufs_fcntl(v);
   1335 	}
   1336 	return 0;
   1337 }
   1338 
   1339 int
   1340 lfs_getpages(void *v)
   1341 {
   1342 	struct vop_getpages_args /* {
   1343 		struct vnode *a_vp;
   1344 		voff_t a_offset;
   1345 		struct vm_page **a_m;
   1346 		int *a_count;
   1347 		int a_centeridx;
   1348 		vm_prot_t a_access_type;
   1349 		int a_advice;
   1350 		int a_flags;
   1351 	} */ *ap = v;
   1352 
   1353 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM &&
   1354 	    (ap->a_access_type & VM_PROT_WRITE) != 0) {
   1355 		return EPERM;
   1356 	}
   1357 	if ((ap->a_access_type & VM_PROT_WRITE) != 0) {
   1358 		LFS_SET_UINO(VTOI(ap->a_vp), IN_MODIFIED);
   1359 	}
   1360 
   1361 	/*
   1362 	 * we're relying on the fact that genfs_getpages() always read in
   1363 	 * entire filesystem blocks.
   1364 	 */
   1365 	return genfs_getpages(v);
   1366 }
   1367 
   1368 /*
   1369  * Make sure that for all pages in every block in the given range,
   1370  * either all are dirty or all are clean.  If any of the pages
   1371  * we've seen so far are dirty, put the vnode on the paging chain,
   1372  * and mark it IN_PAGING.
   1373  *
   1374  * If checkfirst != 0, don't check all the pages but return at the
   1375  * first dirty page.
   1376  */
   1377 static int
   1378 check_dirty(struct lfs *fs, struct vnode *vp,
   1379 	    off_t startoffset, off_t endoffset, off_t blkeof,
   1380 	    int flags, int checkfirst)
   1381 {
   1382 	int by_list;
   1383 	struct vm_page *curpg = NULL; /* XXX: gcc */
   1384 	struct vm_page *pgs[MAXBSIZE / PAGE_SIZE], *pg;
   1385 	off_t soff = 0; /* XXX: gcc */
   1386 	voff_t off;
   1387 	int i;
   1388 	int nonexistent;
   1389 	int any_dirty;	/* number of dirty pages */
   1390 	int dirty;	/* number of dirty pages in a block */
   1391 	int tdirty;
   1392 	int pages_per_block = fs->lfs_bsize >> PAGE_SHIFT;
   1393 
   1394   top:
   1395 	by_list = (vp->v_uobj.uo_npages <=
   1396 		   ((endoffset - startoffset) >> PAGE_SHIFT) *
   1397 		   UVM_PAGE_HASH_PENALTY);
   1398 	any_dirty = 0;
   1399 
   1400 	if (by_list) {
   1401 		curpg = TAILQ_FIRST(&vp->v_uobj.memq);
   1402 	} else {
   1403 		soff = startoffset;
   1404 	}
   1405 	while (by_list || soff < MIN(blkeof, endoffset)) {
   1406 		if (by_list) {
   1407 			/*
   1408 			 * find the first page in a block.
   1409 			 */
   1410 			if (pages_per_block > 1) {
   1411 				while (curpg && (curpg->offset & fs->lfs_bmask))
   1412 					curpg = TAILQ_NEXT(curpg, listq);
   1413 			}
   1414 			if (curpg == NULL)
   1415 				break;
   1416 			soff = curpg->offset;
   1417 		}
   1418 
   1419 		/*
   1420 		 * Mark all pages in extended range busy; find out if any
   1421 		 * of them are dirty.
   1422 		 */
   1423 		nonexistent = dirty = 0;
   1424 		for (i = 0; i == 0 || i < pages_per_block; i++) {
   1425 			if (by_list && pages_per_block <= 1) {
   1426 				pgs[i] = pg = curpg;
   1427 			} else {
   1428 				off = soff + (i << PAGE_SHIFT);
   1429 				pgs[i] = pg = uvm_pagelookup(&vp->v_uobj, off);
   1430 				if (pg == NULL) {
   1431 					++nonexistent;
   1432 					continue;
   1433 				}
   1434 			}
   1435 			KASSERT(pg != NULL);
   1436 			while (pg->flags & PG_BUSY) {
   1437 				pg->flags |= PG_WANTED;
   1438 				UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
   1439 						    "lfsput", 0);
   1440 				simple_lock(&vp->v_interlock);
   1441 				if (by_list) {
   1442 					if (i > 0)
   1443 						uvm_page_unbusy(pgs, i);
   1444 					goto top;
   1445 				}
   1446 			}
   1447 			pg->flags |= PG_BUSY;
   1448 			UVM_PAGE_OWN(pg, "lfs_putpages");
   1449 
   1450 			pmap_page_protect(pg, VM_PROT_NONE);
   1451 			tdirty = (pmap_clear_modify(pg) ||
   1452 				  (pg->flags & PG_CLEAN) == 0);
   1453 			dirty += tdirty;
   1454 		}
   1455 		if (pages_per_block > 0 && nonexistent >= pages_per_block) {
   1456 			if (by_list) {
   1457 				curpg = TAILQ_NEXT(curpg, listq);
   1458 			} else {
   1459 				soff += fs->lfs_bsize;
   1460 			}
   1461 			continue;
   1462 		}
   1463 
   1464 		any_dirty += dirty;
   1465 		KASSERT(nonexistent == 0);
   1466 
   1467 		/*
   1468 		 * If any are dirty make all dirty; unbusy them,
   1469 		 * but if we were asked to clean, wire them so that
   1470 		 * the pagedaemon doesn't bother us about them while
   1471 		 * they're on their way to disk.
   1472 		 */
   1473 		for (i = 0; i == 0 || i < pages_per_block; i++) {
   1474 			pg = pgs[i];
   1475 			KASSERT(!((pg->flags & PG_CLEAN) && (pg->flags & PG_DELWRI)));
   1476 			if (dirty) {
   1477 				pg->flags &= ~PG_CLEAN;
   1478 				if (flags & PGO_FREE) {
   1479 					/* XXXUBC need better way to update */
   1480 					simple_lock(&lfs_subsys_lock);
   1481 					lfs_subsys_pages += MIN(1, pages_per_block);
   1482 					simple_unlock(&lfs_subsys_lock);
   1483 					/*
   1484 					 * Wire the page so that
   1485 					 * pdaemon doesn't see it again.
   1486 					 */
   1487 					uvm_lock_pageq();
   1488 					uvm_pagewire(pg);
   1489 					uvm_unlock_pageq();
   1490 
   1491 					/* Suspended write flag */
   1492 					pg->flags |= PG_DELWRI;
   1493 				}
   1494 			}
   1495 			if (pg->flags & PG_WANTED)
   1496 				wakeup(pg);
   1497 			pg->flags &= ~(PG_WANTED|PG_BUSY);
   1498 			UVM_PAGE_OWN(pg, NULL);
   1499 		}
   1500 
   1501 		if (checkfirst && any_dirty)
   1502 			break;
   1503 
   1504 		if (by_list) {
   1505 			curpg = TAILQ_NEXT(curpg, listq);
   1506 		} else {
   1507 			soff += MAX(PAGE_SIZE, fs->lfs_bsize);
   1508 		}
   1509 	}
   1510 
   1511 	/*
   1512 	 * If any pages were dirty, mark this inode as "pageout requested",
   1513 	 * and put it on the paging queue.
   1514 	 * XXXUBC locking (check locking on dchainhd too)
   1515 	 */
   1516 #ifdef notyet
   1517 	if (any_dirty) {
   1518 		if (!(ip->i_flags & IN_PAGING)) {
   1519 			ip->i_flags |= IN_PAGING;
   1520 			TAILQ_INSERT_TAIL(&fs->lfs_pchainhd, ip, i_lfs_pchain);
   1521 		}
   1522 	}
   1523 #endif
   1524 	return any_dirty;
   1525 }
   1526 
   1527 /*
   1528  * lfs_putpages functions like genfs_putpages except that
   1529  *
   1530  * (1) It needs to bounds-check the incoming requests to ensure that
   1531  *     they are block-aligned; if they are not, expand the range and
   1532  *     do the right thing in case, e.g., the requested range is clean
   1533  *     but the expanded range is dirty.
   1534  * (2) It needs to explicitly send blocks to be written when it is done.
   1535  *     VOP_PUTPAGES is not ever called with the seglock held, so
   1536  *     we simply take the seglock and let lfs_segunlock wait for us.
   1537  *     XXX Actually we can be called with the seglock held, if we have
   1538  *     XXX to flush a vnode while lfs_markv is in operation.  As of this
   1539  *     XXX writing we panic in this case.
   1540  *
   1541  * Assumptions:
   1542  *
   1543  * (1) The caller does not hold any pages in this vnode busy.  If it does,
   1544  *     there is a danger that when we expand the page range and busy the
   1545  *     pages we will deadlock.
   1546  * (2) We are called with vp->v_interlock held; we must return with it
   1547  *     released.
   1548  * (3) We don't absolutely have to free pages right away, provided that
   1549  *     the request does not have PGO_SYNCIO.  When the pagedaemon gives
   1550  *     us a request with PGO_FREE, we take the pages out of the paging
   1551  *     queue and wake up the writer, which will handle freeing them for us.
   1552  *
   1553  *     We ensure that for any filesystem block, all pages for that
   1554  *     block are either resident or not, even if those pages are higher
   1555  *     than EOF; that means that we will be getting requests to free
   1556  *     "unused" pages above EOF all the time, and should ignore them.
   1557  *
   1558  * XXX note that we're (ab)using PGO_LOCKED as "seglock held".
   1559  */
   1560 
   1561 int
   1562 lfs_putpages(void *v)
   1563 {
   1564 	int error;
   1565 	struct vop_putpages_args /* {
   1566 		struct vnode *a_vp;
   1567 		voff_t a_offlo;
   1568 		voff_t a_offhi;
   1569 		int a_flags;
   1570 	} */ *ap = v;
   1571 	struct vnode *vp;
   1572 	struct inode *ip;
   1573 	struct lfs *fs;
   1574 	struct segment *sp;
   1575 	off_t origoffset, startoffset, endoffset, origendoffset, blkeof;
   1576 	off_t off, max_endoffset;
   1577 	int s;
   1578 	boolean_t seglocked, sync, pagedaemon;
   1579 	struct vm_page *pg;
   1580 	UVMHIST_FUNC("lfs_putpages"); UVMHIST_CALLED(ubchist);
   1581 
   1582 	vp = ap->a_vp;
   1583 	ip = VTOI(vp);
   1584 	fs = ip->i_lfs;
   1585 	sync = (ap->a_flags & PGO_SYNCIO) != 0;
   1586 	pagedaemon = (curproc == uvm.pagedaemon_proc);
   1587 
   1588 	/* Putpages does nothing for metadata. */
   1589 	if (vp == fs->lfs_ivnode || vp->v_type != VREG) {
   1590 		simple_unlock(&vp->v_interlock);
   1591 		return 0;
   1592 	}
   1593 
   1594 	/*
   1595 	 * If there are no pages, don't do anything.
   1596 	 */
   1597 	if (vp->v_uobj.uo_npages == 0) {
   1598 		s = splbio();
   1599 		if (LIST_FIRST(&vp->v_dirtyblkhd) == NULL &&
   1600 		    (vp->v_flag & VONWORKLST)) {
   1601 			vp->v_flag &= ~VONWORKLST;
   1602 			LIST_REMOVE(vp, v_synclist);
   1603 		}
   1604 		splx(s);
   1605 		simple_unlock(&vp->v_interlock);
   1606 		return 0;
   1607 	}
   1608 
   1609 	blkeof = blkroundup(fs, ip->i_size);
   1610 
   1611 	/*
   1612 	 * Ignore requests to free pages past EOF but in the same block
   1613 	 * as EOF, unless the request is synchronous. (XXX why sync?)
   1614 	 * XXXUBC Make these pages look "active" so the pagedaemon won't
   1615 	 * XXXUBC bother us with them again.
   1616 	 */
   1617 	if (!sync && ap->a_offlo >= ip->i_size && ap->a_offlo < blkeof) {
   1618 		origoffset = ap->a_offlo;
   1619 		for (off = origoffset; off < blkeof; off += fs->lfs_bsize) {
   1620 			pg = uvm_pagelookup(&vp->v_uobj, off);
   1621 			KASSERT(pg != NULL);
   1622 			while (pg->flags & PG_BUSY) {
   1623 				pg->flags |= PG_WANTED;
   1624 				UVM_UNLOCK_AND_WAIT(pg, &vp->v_interlock, 0,
   1625 						    "lfsput2", 0);
   1626 				simple_lock(&vp->v_interlock);
   1627 			}
   1628 			uvm_lock_pageq();
   1629 			uvm_pageactivate(pg);
   1630 			uvm_unlock_pageq();
   1631 		}
   1632 		ap->a_offlo = blkeof;
   1633 		if (ap->a_offhi > 0 && ap->a_offhi <= ap->a_offlo) {
   1634 			simple_unlock(&vp->v_interlock);
   1635 			return 0;
   1636 		}
   1637 	}
   1638 
   1639 	/*
   1640 	 * Extend page range to start and end at block boundaries.
   1641 	 * (For the purposes of VOP_PUTPAGES, fragments don't exist.)
   1642 	 */
   1643 	origoffset = ap->a_offlo;
   1644 	origendoffset = ap->a_offhi;
   1645 	startoffset = origoffset & ~(fs->lfs_bmask);
   1646 	max_endoffset = (trunc_page(LLONG_MAX) >> fs->lfs_bshift)
   1647 					       << fs->lfs_bshift;
   1648 
   1649 	if (origendoffset == 0 || ap->a_flags & PGO_ALLPAGES) {
   1650 		endoffset = max_endoffset;
   1651 		origendoffset = endoffset;
   1652 	} else {
   1653 		origendoffset = round_page(ap->a_offhi);
   1654 		endoffset = round_page(blkroundup(fs, origendoffset));
   1655 	}
   1656 
   1657 	KASSERT(startoffset > 0 || endoffset >= startoffset);
   1658 	if (startoffset == endoffset) {
   1659 		/* Nothing to do, why were we called? */
   1660 		simple_unlock(&vp->v_interlock);
   1661 		DLOG((DLOG_PAGE, "lfs_putpages: startoffset = endoffset = %"
   1662 		      PRId64 "\n", startoffset));
   1663 		return 0;
   1664 	}
   1665 
   1666 	ap->a_offlo = startoffset;
   1667 	ap->a_offhi = endoffset;
   1668 
   1669 	if (!(ap->a_flags & PGO_CLEANIT))
   1670 		return genfs_putpages(v);
   1671 
   1672 	/*
   1673 	 * If there are more than one page per block, we don't want
   1674 	 * to get caught locking them backwards; so set PGO_BUSYFAIL
   1675 	 * to avoid deadlocks.
   1676 	 */
   1677 	ap->a_flags |= PGO_BUSYFAIL;
   1678 
   1679 	do {
   1680 		int r;
   1681 
   1682 		/* If no pages are dirty, we can just use genfs_putpages. */
   1683 		if (check_dirty(fs, vp, startoffset, endoffset, blkeof,
   1684 				ap->a_flags, 1) != 0)
   1685 			break;
   1686 
   1687 		/*
   1688 		 * Sometimes pages are dirtied between the time that
   1689 		 * we check and the time we try to clean them.
   1690 		 * Instruct lfs_gop_write to return EDEADLK in this case
   1691 		 * so we can write them properly.
   1692 		 */
   1693 		ip->i_lfs_iflags |= LFSI_NO_GOP_WRITE;
   1694 		r = genfs_putpages(v);
   1695 		ip->i_lfs_iflags &= ~LFSI_NO_GOP_WRITE;
   1696 		if (r != EDEADLK)
   1697 			return r;
   1698 
   1699 		/* Start over. */
   1700 		preempt(1);
   1701 		simple_lock(&vp->v_interlock);
   1702 	} while(1);
   1703 
   1704 	/*
   1705 	 * Dirty and asked to clean.
   1706 	 *
   1707 	 * Pagedaemon can't actually write LFS pages; wake up
   1708 	 * the writer to take care of that.  The writer will
   1709 	 * notice the pager inode queue and act on that.
   1710 	 */
   1711 	if (pagedaemon) {
   1712 		++fs->lfs_pdflush;
   1713 		wakeup(&lfs_writer_daemon);
   1714 		simple_unlock(&vp->v_interlock);
   1715 		return EWOULDBLOCK;
   1716 	}
   1717 
   1718 	/*
   1719 	 * If this is a file created in a recent dirop, we can't flush its
   1720 	 * inode until the dirop is complete.  Drain dirops, then flush the
   1721 	 * filesystem (taking care of any other pending dirops while we're
   1722 	 * at it).
   1723 	 */
   1724 	if ((ap->a_flags & (PGO_CLEANIT|PGO_LOCKED)) == PGO_CLEANIT &&
   1725 	    (vp->v_flag & VDIROP)) {
   1726 		int locked;
   1727 
   1728 		DLOG((DLOG_PAGE, "lfs_putpages: flushing VDIROP\n"));
   1729 		lfs_writer_enter(fs, "ppdirop");
   1730 		locked = VOP_ISLOCKED(vp) && /* XXX */
   1731 			vp->v_lock.lk_lockholder == curproc->p_pid;
   1732 		if (locked)
   1733 			VOP_UNLOCK(vp, 0);
   1734 		simple_unlock(&vp->v_interlock);
   1735 
   1736 		lfs_flush_fs(fs, sync ? SEGM_SYNC : 0);
   1737 
   1738 		simple_lock(&vp->v_interlock);
   1739 		if (locked)
   1740 			VOP_LOCK(vp, LK_EXCLUSIVE);
   1741 		lfs_writer_leave(fs);
   1742 
   1743 		/* XXX the flush should have taken care of this one too! */
   1744 	}
   1745 
   1746 	/*
   1747 	 * This is it.	We are going to write some pages.  From here on
   1748 	 * down it's all just mechanics.
   1749 	 *
   1750 	 * Don't let genfs_putpages wait; lfs_segunlock will wait for us.
   1751 	 */
   1752 	ap->a_flags &= ~PGO_SYNCIO;
   1753 
   1754 	/*
   1755 	 * If we've already got the seglock, flush the node and return.
   1756 	 * The FIP has already been set up for us by lfs_writefile,
   1757 	 * and FIP cleanup and lfs_updatemeta will also be done there,
   1758 	 * unless genfs_putpages returns EDEADLK; then we must flush
   1759 	 * what we have, and correct FIP and segment header accounting.
   1760 	 */
   1761 
   1762 	seglocked = (ap->a_flags & PGO_LOCKED) != 0;
   1763 	if (!seglocked) {
   1764 		simple_unlock(&vp->v_interlock);
   1765 		/*
   1766 		 * Take the seglock, because we are going to be writing pages.
   1767 		 */
   1768 		error = lfs_seglock(fs, SEGM_PROT | (sync ? SEGM_SYNC : 0));
   1769 		if (error != 0)
   1770 			return error;
   1771 		simple_lock(&vp->v_interlock);
   1772 	}
   1773 
   1774 	/*
   1775 	 * VOP_PUTPAGES should not be called while holding the seglock.
   1776 	 * XXXUBC fix lfs_markv, or do this properly.
   1777 	 */
   1778 	/* KASSERT(fs->lfs_seglock == 1); */
   1779 
   1780 	/*
   1781 	 * We assume we're being called with sp->fip pointing at blank space.
   1782 	 * Account for a new FIP in the segment header, and set sp->vp.
   1783 	 * (This should duplicate the setup at the top of lfs_writefile().)
   1784 	 */
   1785 	sp = fs->lfs_sp;
   1786 	if (!seglocked) {
   1787 		if (sp->seg_bytes_left < fs->lfs_bsize ||
   1788 		    sp->sum_bytes_left < sizeof(struct finfo))
   1789 			(void) lfs_writeseg(fs, fs->lfs_sp);
   1790 
   1791 		sp->sum_bytes_left -= FINFOSIZE;
   1792 		++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1793 	}
   1794 	KASSERT(sp->vp == NULL);
   1795 	sp->vp = vp;
   1796 
   1797 	if (!seglocked) {
   1798 		if (vp->v_flag & VDIROP)
   1799 			((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
   1800 	}
   1801 
   1802 	sp->fip->fi_nblocks = 0;
   1803 	sp->fip->fi_ino = ip->i_number;
   1804 	sp->fip->fi_version = ip->i_gen;
   1805 
   1806 	/*
   1807 	 * Loop through genfs_putpages until all pages are gathered.
   1808 	 * genfs_putpages() drops the interlock, so reacquire it if necessary.
   1809 	 * Whenever we lose the interlock we have to rerun check_dirty, as
   1810 	 * well.
   1811 	 */
   1812 again:
   1813 	check_dirty(fs, vp, startoffset, endoffset, blkeof, ap->a_flags, 0);
   1814 
   1815 	if ((error = genfs_putpages(v)) == EDEADLK) {
   1816 		DLOG((DLOG_PAGE, "lfs_putpages: genfs_putpages returned"
   1817 		      " EDEADLK [2] ino %d off %x (seg %d)\n",
   1818 		      ip->i_number, fs->lfs_offset,
   1819 		      dtosn(fs, fs->lfs_offset)));
   1820 		/* If nothing to write, short-circuit */
   1821 		if (sp->cbpp - sp->bpp > 1) {
   1822 			/* Write gathered pages */
   1823 			lfs_updatemeta(sp);
   1824 			(void) lfs_writeseg(fs, sp);
   1825 
   1826 			/*
   1827 			 * Reinitialize brand new FIP and add us to it.
   1828 			 * (This should duplicate the fixup in
   1829 			 * lfs_gatherpages().)
   1830 			 */
   1831 			KASSERT(sp->vp == vp);
   1832 			sp->fip->fi_version = ip->i_gen;
   1833 			sp->fip->fi_ino = ip->i_number;
   1834 			/* Add us to the new segment summary. */
   1835 			++((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1836 			sp->sum_bytes_left -= FINFOSIZE;
   1837 		}
   1838 
   1839 		/* Give the write a chance to complete */
   1840 		preempt(1);
   1841 
   1842 		/* We've lost the interlock.  Start over. */
   1843 		simple_lock(&vp->v_interlock);
   1844 		goto again;
   1845 	}
   1846 
   1847 	KASSERT(sp->vp == vp);
   1848 	if (!seglocked) {
   1849 		sp->vp = NULL; /* XXX lfs_gather below will set this */
   1850 
   1851 		/* Write indirect blocks as well */
   1852 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_indir);
   1853 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_dindir);
   1854 		lfs_gather(fs, fs->lfs_sp, vp, lfs_match_tindir);
   1855 
   1856 		KASSERT(sp->vp == NULL);
   1857 		sp->vp = vp;
   1858 	}
   1859 
   1860 	/*
   1861 	 * Blocks are now gathered into a segment waiting to be written.
   1862 	 * All that's left to do is update metadata, and write them.
   1863 	 */
   1864 	lfs_updatemeta(sp);
   1865 	KASSERT(sp->vp == vp);
   1866 	sp->vp = NULL;
   1867 
   1868 	if (seglocked) {
   1869 		/* we're called by lfs_writefile. */
   1870 		return error;
   1871 	}
   1872 
   1873 	/*
   1874 	 * Clean up FIP, since we're done writing this file.
   1875 	 * This should duplicate cleanup at the end of lfs_writefile().
   1876 	 */
   1877 	if (sp->fip->fi_nblocks != 0) {
   1878 		sp->fip = (FINFO*)((caddr_t)sp->fip + FINFOSIZE +
   1879 			sizeof(int32_t) * sp->fip->fi_nblocks);
   1880 		sp->start_lbp = &sp->fip->fi_blocks[0];
   1881 	} else {
   1882 		sp->sum_bytes_left += FINFOSIZE;
   1883 		--((SEGSUM *)(sp->segsum))->ss_nfinfo;
   1884 	}
   1885 	lfs_writeseg(fs, fs->lfs_sp);
   1886 
   1887 	/*
   1888 	 * XXX - with the malloc/copy writeseg, the pages are freed by now
   1889 	 * even if we don't wait (e.g. if we hold a nested lock).  This
   1890 	 * will not be true if we stop using malloc/copy.
   1891 	 */
   1892 	KASSERT(fs->lfs_sp->seg_flags & SEGM_PROT);
   1893 	lfs_segunlock(fs);
   1894 
   1895 	/*
   1896 	 * Wait for v_numoutput to drop to zero.  The seglock should
   1897 	 * take care of this, but there is a slight possibility that
   1898 	 * aiodoned might not have got around to our buffers yet.
   1899 	 */
   1900 	if (sync) {
   1901 		int s;
   1902 
   1903 		s = splbio();
   1904 		simple_lock(&global_v_numoutput_slock);
   1905 		while (vp->v_numoutput > 0) {
   1906 			DLOG((DLOG_PAGE, "lfs_putpages: ino %d sleeping on"
   1907 			      " num %d\n", ip->i_number, vp->v_numoutput));
   1908 			vp->v_flag |= VBWAIT;
   1909 			ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vn", 0,
   1910 			    &global_v_numoutput_slock);
   1911 		}
   1912 		simple_unlock(&global_v_numoutput_slock);
   1913 		splx(s);
   1914 	}
   1915 	return error;
   1916 }
   1917 
   1918 /*
   1919  * Return the last logical file offset that should be written for this file
   1920  * if we're doing a write that ends at "size".	If writing, we need to know
   1921  * about sizes on disk, i.e. fragments if there are any; if reading, we need
   1922  * to know about entire blocks.
   1923  */
   1924 void
   1925 lfs_gop_size(struct vnode *vp, off_t size, off_t *eobp, int flags)
   1926 {
   1927 	struct inode *ip = VTOI(vp);
   1928 	struct lfs *fs = ip->i_lfs;
   1929 	daddr_t olbn, nlbn;
   1930 
   1931 	KASSERT(flags & (GOP_SIZE_READ | GOP_SIZE_WRITE));
   1932 	KASSERT((flags & (GOP_SIZE_READ | GOP_SIZE_WRITE))
   1933 		!= (GOP_SIZE_READ | GOP_SIZE_WRITE));
   1934 
   1935 	olbn = lblkno(fs, ip->i_size);
   1936 	nlbn = lblkno(fs, size);
   1937 	if (!(flags & GOP_SIZE_MEM) && nlbn < NDADDR && olbn <= nlbn) {
   1938 		*eobp = fragroundup(fs, size);
   1939 	} else {
   1940 		*eobp = blkroundup(fs, size);
   1941 	}
   1942 }
   1943 
   1944 #ifdef DEBUG
   1945 void lfs_dump_vop(void *);
   1946 
   1947 void
   1948 lfs_dump_vop(void *v)
   1949 {
   1950 	struct vop_putpages_args /* {
   1951 		struct vnode *a_vp;
   1952 		voff_t a_offlo;
   1953 		voff_t a_offhi;
   1954 		int a_flags;
   1955 	} */ *ap = v;
   1956 
   1957 #ifdef DDB
   1958 	vfs_vnode_print(ap->a_vp, 0, printf);
   1959 #endif
   1960 	lfs_dump_dinode(VTOI(ap->a_vp)->i_din.ffs1_din);
   1961 }
   1962 #endif
   1963 
   1964 int
   1965 lfs_mmap(void *v)
   1966 {
   1967 	struct vop_mmap_args /* {
   1968 		const struct vnodeop_desc *a_desc;
   1969 		struct vnode *a_vp;
   1970 		int a_fflags;
   1971 		struct ucred *a_cred;
   1972 		struct lwp *a_l;
   1973 	} */ *ap = v;
   1974 
   1975 	if (VTOI(ap->a_vp)->i_number == LFS_IFILE_INUM)
   1976 		return EOPNOTSUPP;
   1977 	return ufs_mmap(v);
   1978 }
   1979