Home | History | Annotate | Line # | Download | only in ffs
ffs_vfsops.c revision 1.221
      1 /*	$NetBSD: ffs_vfsops.c,v 1.221 2008/01/28 14:31:20 dholland Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1991, 1993, 1994
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.221 2008/01/28 14:31:20 dholland Exp $");
     36 
     37 #if defined(_KERNEL_OPT)
     38 #include "opt_ffs.h"
     39 #include "opt_quota.h"
     40 #include "opt_softdep.h"
     41 #endif
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/namei.h>
     46 #include <sys/proc.h>
     47 #include <sys/kernel.h>
     48 #include <sys/vnode.h>
     49 #include <sys/socket.h>
     50 #include <sys/mount.h>
     51 #include <sys/buf.h>
     52 #include <sys/device.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/file.h>
     55 #include <sys/disklabel.h>
     56 #include <sys/ioctl.h>
     57 #include <sys/errno.h>
     58 #include <sys/malloc.h>
     59 #include <sys/pool.h>
     60 #include <sys/lock.h>
     61 #include <sys/sysctl.h>
     62 #include <sys/conf.h>
     63 #include <sys/kauth.h>
     64 #include <sys/fstrans.h>
     65 
     66 #include <miscfs/genfs/genfs.h>
     67 #include <miscfs/specfs/specdev.h>
     68 
     69 #include <ufs/ufs/quota.h>
     70 #include <ufs/ufs/ufsmount.h>
     71 #include <ufs/ufs/inode.h>
     72 #include <ufs/ufs/dir.h>
     73 #include <ufs/ufs/ufs_extern.h>
     74 #include <ufs/ufs/ufs_bswap.h>
     75 
     76 #include <ufs/ffs/fs.h>
     77 #include <ufs/ffs/ffs_extern.h>
     78 
     79 /* how many times ffs_init() was called */
     80 int ffs_initcount = 0;
     81 
     82 extern kmutex_t ufs_hashlock;
     83 
     84 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
     85 extern const struct vnodeopv_desc ffs_specop_opv_desc;
     86 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
     87 
     88 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
     89 	&ffs_vnodeop_opv_desc,
     90 	&ffs_specop_opv_desc,
     91 	&ffs_fifoop_opv_desc,
     92 	NULL,
     93 };
     94 
     95 struct vfsops ffs_vfsops = {
     96 	MOUNT_FFS,
     97 	sizeof (struct ufs_args),
     98 	ffs_mount,
     99 	ufs_start,
    100 	ffs_unmount,
    101 	ufs_root,
    102 	ufs_quotactl,
    103 	ffs_statvfs,
    104 	ffs_sync,
    105 	ffs_vget,
    106 	ffs_fhtovp,
    107 	ffs_vptofh,
    108 	ffs_init,
    109 	ffs_reinit,
    110 	ffs_done,
    111 	ffs_mountroot,
    112 	ffs_snapshot,
    113 	ffs_extattrctl,
    114 	ffs_suspendctl,
    115 	genfs_renamelock_enter,
    116 	genfs_renamelock_exit,
    117 	ffs_vnodeopv_descs,
    118 	0,
    119 	{ NULL, NULL },
    120 };
    121 VFS_ATTACH(ffs_vfsops);
    122 
    123 static const struct genfs_ops ffs_genfsops = {
    124 	.gop_size = ffs_gop_size,
    125 	.gop_alloc = ufs_gop_alloc,
    126 	.gop_write = genfs_gop_write,
    127 	.gop_markupdate = ufs_gop_markupdate,
    128 };
    129 
    130 static const struct ufs_ops ffs_ufsops = {
    131 	.uo_itimes = ffs_itimes,
    132 	.uo_update = ffs_update,
    133 	.uo_truncate = ffs_truncate,
    134 	.uo_valloc = ffs_valloc,
    135 	.uo_vfree = ffs_vfree,
    136 	.uo_balloc = ffs_balloc,
    137 };
    138 
    139 pool_cache_t ffs_inode_cache;
    140 pool_cache_t ffs_dinode1_cache;
    141 pool_cache_t ffs_dinode2_cache;
    142 
    143 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
    144 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
    145 
    146 /*
    147  * Called by main() when ffs is going to be mounted as root.
    148  */
    149 
    150 int
    151 ffs_mountroot(void)
    152 {
    153 	struct fs *fs;
    154 	struct mount *mp;
    155 	struct lwp *l = curlwp;			/* XXX */
    156 	struct ufsmount *ump;
    157 	int error;
    158 
    159 	if (device_class(root_device) != DV_DISK)
    160 		return (ENODEV);
    161 
    162 	if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
    163 		vrele(rootvp);
    164 		return (error);
    165 	}
    166 	if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
    167 		mp->mnt_op->vfs_refcount--;
    168 		vfs_unbusy(mp);
    169 		vfs_destroy(mp);
    170 		return (error);
    171 	}
    172 	mutex_enter(&mountlist_lock);
    173 	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
    174 	mutex_exit(&mountlist_lock);
    175 	ump = VFSTOUFS(mp);
    176 	fs = ump->um_fs;
    177 	memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
    178 	(void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
    179 	(void)ffs_statvfs(mp, &mp->mnt_stat);
    180 	vfs_unbusy(mp);
    181 	setrootfstime((time_t)fs->fs_time);
    182 	return (0);
    183 }
    184 
    185 /*
    186  * VFS Operations.
    187  *
    188  * mount system call
    189  */
    190 int
    191 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
    192 {
    193 	struct lwp *l = curlwp;
    194 	struct nameidata nd;
    195 	struct vnode *vp, *devvp = NULL;
    196 	struct ufs_args *args = data;
    197 	struct ufsmount *ump = NULL;
    198 	struct fs *fs;
    199 	int error = 0, flags, update;
    200 	mode_t accessmode;
    201 
    202 	if (*data_len < sizeof *args)
    203 		return EINVAL;
    204 
    205 	if (mp->mnt_flag & MNT_GETARGS) {
    206 		ump = VFSTOUFS(mp);
    207 		if (ump == NULL)
    208 			return EIO;
    209 		args->fspec = NULL;
    210 		*data_len = sizeof *args;
    211 		return 0;
    212 	}
    213 
    214 #if !defined(SOFTDEP)
    215 	mp->mnt_flag &= ~MNT_SOFTDEP;
    216 #endif
    217 
    218 	update = mp->mnt_flag & MNT_UPDATE;
    219 
    220 	/* Check arguments */
    221 	if (args->fspec != NULL) {
    222 		/*
    223 		 * Look up the name and verify that it's sane.
    224 		 */
    225 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
    226 		if ((error = namei(&nd)) != 0)
    227 			return (error);
    228 		devvp = nd.ni_vp;
    229 
    230 		if (!update) {
    231 			/*
    232 			 * Be sure this is a valid block device
    233 			 */
    234 			if (devvp->v_type != VBLK)
    235 				error = ENOTBLK;
    236 			else if (bdevsw_lookup(devvp->v_rdev) == NULL)
    237 				error = ENXIO;
    238 		} else {
    239 			/*
    240 			 * Be sure we're still naming the same device
    241 			 * used for our initial mount
    242 			 */
    243 			ump = VFSTOUFS(mp);
    244 			if (devvp != ump->um_devvp) {
    245 				if (devvp->v_rdev != ump->um_devvp->v_rdev)
    246 					error = EINVAL;
    247 				else {
    248 					vrele(devvp);
    249 					devvp = ump->um_devvp;
    250 					vref(devvp);
    251 				}
    252 			}
    253 		}
    254 	} else {
    255 		if (!update) {
    256 			/* New mounts must have a filename for the device */
    257 			return (EINVAL);
    258 		} else {
    259 			/* Use the extant mount */
    260 			ump = VFSTOUFS(mp);
    261 			devvp = ump->um_devvp;
    262 			vref(devvp);
    263 		}
    264 	}
    265 
    266 	/*
    267 	 * Mark the device and any existing vnodes as involved in
    268 	 * softdep processing.
    269 	 */
    270 	if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
    271 		devvp->v_uflag |= VU_SOFTDEP;
    272 		mutex_enter(&mntvnode_lock);
    273 		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
    274 			if (vp->v_mount != mp || vismarker(vp))
    275 				continue;
    276 			vp->v_uflag |= VU_SOFTDEP;
    277 		}
    278 		mutex_exit(&mntvnode_lock);
    279 	}
    280 
    281 	/*
    282 	 * If mount by non-root, then verify that user has necessary
    283 	 * permissions on the device.
    284 	 */
    285 	if (error == 0 && kauth_authorize_generic(l->l_cred,
    286 	    KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    287 		accessmode = VREAD;
    288 		if (update ?
    289 		    (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
    290 		    (mp->mnt_flag & MNT_RDONLY) == 0)
    291 			accessmode |= VWRITE;
    292 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    293 		error = VOP_ACCESS(devvp, accessmode, l->l_cred);
    294 		VOP_UNLOCK(devvp, 0);
    295 	}
    296 
    297 	if (error) {
    298 		vrele(devvp);
    299 		return (error);
    300 	}
    301 
    302 	if (!update) {
    303 		int xflags;
    304 
    305 		if (mp->mnt_flag & MNT_RDONLY)
    306 			xflags = FREAD;
    307 		else
    308 			xflags = FREAD|FWRITE;
    309 		error = VOP_OPEN(devvp, xflags, FSCRED);
    310 		if (error)
    311 			goto fail;
    312 		error = ffs_mountfs(devvp, mp, l);
    313 		if (error) {
    314 			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    315 			(void)VOP_CLOSE(devvp, xflags, NOCRED);
    316 			VOP_UNLOCK(devvp, 0);
    317 			goto fail;
    318 		}
    319 
    320 		ump = VFSTOUFS(mp);
    321 		fs = ump->um_fs;
    322 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    323 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    324 			printf("%s fs uses soft updates, "
    325 			    "ignoring async mode\n",
    326 			    fs->fs_fsmnt);
    327 			mp->mnt_flag &= ~MNT_ASYNC;
    328 		}
    329 	} else {
    330 		/*
    331 		 * Update the mount.
    332 		 */
    333 
    334 		/*
    335 		 * The initial mount got a reference on this
    336 		 * device, so drop the one obtained via
    337 		 * namei(), above.
    338 		 */
    339 		vrele(devvp);
    340 
    341 		ump = VFSTOUFS(mp);
    342 		fs = ump->um_fs;
    343 		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
    344 			/*
    345 			 * Changing from r/w to r/o
    346 			 */
    347 			flags = WRITECLOSE;
    348 			if (mp->mnt_flag & MNT_FORCE)
    349 				flags |= FORCECLOSE;
    350 			if (mp->mnt_flag & MNT_SOFTDEP)
    351 				error = softdep_flushfiles(mp, flags, l);
    352 			else
    353 				error = ffs_flushfiles(mp, flags, l);
    354 			if (fs->fs_pendingblocks != 0 ||
    355 			    fs->fs_pendinginodes != 0) {
    356 				printf("%s: update error: blocks %" PRId64
    357 				       " files %d\n",
    358 				    fs->fs_fsmnt, fs->fs_pendingblocks,
    359 				    fs->fs_pendinginodes);
    360 				fs->fs_pendingblocks = 0;
    361 				fs->fs_pendinginodes = 0;
    362 			}
    363 			if (error == 0 &&
    364 			    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    365 			    fs->fs_clean & FS_WASCLEAN) {
    366 				if (mp->mnt_flag & MNT_SOFTDEP)
    367 					fs->fs_flags &= ~FS_DOSOFTDEP;
    368 				fs->fs_clean = FS_ISCLEAN;
    369 				(void) ffs_sbupdate(ump, MNT_WAIT);
    370 			}
    371 			if (error)
    372 				return (error);
    373 			fs->fs_ronly = 1;
    374 			fs->fs_fmod = 0;
    375 		}
    376 
    377 		/*
    378 		 * Flush soft dependencies if disabling it via an update
    379 		 * mount. This may leave some items to be processed,
    380 		 * so don't do this yet XXX.
    381 		 */
    382 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
    383 		    !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    384 #ifdef notyet
    385 			flags = WRITECLOSE;
    386 			if (mp->mnt_flag & MNT_FORCE)
    387 				flags |= FORCECLOSE;
    388 			error = softdep_flushfiles(mp, flags, l);
    389 			if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
    390 				fs->fs_flags &= ~FS_DOSOFTDEP;
    391 				(void) ffs_sbupdate(ump, MNT_WAIT);
    392 #elif defined(SOFTDEP)
    393 			mp->mnt_flag |= MNT_SOFTDEP;
    394 #endif
    395 		}
    396 
    397 		/*
    398 		 * When upgrading to a softdep mount, we must first flush
    399 		 * all vnodes. (not done yet -- see above)
    400 		 */
    401 		if (!(fs->fs_flags & FS_DOSOFTDEP) &&
    402 		    (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    403 #ifdef notyet
    404 			flags = WRITECLOSE;
    405 			if (mp->mnt_flag & MNT_FORCE)
    406 				flags |= FORCECLOSE;
    407 			error = ffs_flushfiles(mp, flags, l);
    408 #else
    409 			mp->mnt_flag &= ~MNT_SOFTDEP;
    410 #endif
    411 		}
    412 
    413 		if (mp->mnt_flag & MNT_RELOAD) {
    414 			error = ffs_reload(mp, l->l_cred, l);
    415 			if (error)
    416 				return (error);
    417 		}
    418 
    419 		if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
    420 			/*
    421 			 * Changing from read-only to read/write
    422 			 */
    423 			fs->fs_ronly = 0;
    424 			fs->fs_clean <<= 1;
    425 			fs->fs_fmod = 1;
    426 			if ((fs->fs_flags & FS_DOSOFTDEP)) {
    427 				error = softdep_mount(devvp, mp, fs,
    428 				    l->l_cred);
    429 				if (error)
    430 					return (error);
    431 			}
    432 			if (fs->fs_snapinum[0] != 0)
    433 				ffs_snapshot_mount(mp);
    434 		}
    435 		if (args->fspec == NULL)
    436 			return EINVAL;
    437 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    438 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    439 			printf("%s fs uses soft updates, ignoring async mode\n",
    440 			    fs->fs_fsmnt);
    441 			mp->mnt_flag &= ~MNT_ASYNC;
    442 		}
    443 	}
    444 
    445 	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
    446 	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
    447 	if (error == 0)
    448 		(void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
    449 		    sizeof(fs->fs_fsmnt));
    450 	if (mp->mnt_flag & MNT_SOFTDEP)
    451 		fs->fs_flags |= FS_DOSOFTDEP;
    452 	else
    453 		fs->fs_flags &= ~FS_DOSOFTDEP;
    454 	if (fs->fs_fmod != 0) {	/* XXX */
    455 		fs->fs_fmod = 0;
    456 		if (fs->fs_clean & FS_WASCLEAN)
    457 			fs->fs_time = time_second;
    458 		else {
    459 			printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
    460 			    mp->mnt_stat.f_mntfromname, fs->fs_clean);
    461 			printf("%s: lost blocks %" PRId64 " files %d\n",
    462 			    mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
    463 			    fs->fs_pendinginodes);
    464 		}
    465 		(void) ffs_cgupdate(ump, MNT_WAIT);
    466 	}
    467 	return (error);
    468 
    469 fail:
    470 	vrele(devvp);
    471 	return (error);
    472 }
    473 
    474 /*
    475  * Reload all incore data for a filesystem (used after running fsck on
    476  * the root filesystem and finding things to fix). The filesystem must
    477  * be mounted read-only.
    478  *
    479  * Things to do to update the mount:
    480  *	1) invalidate all cached meta-data.
    481  *	2) re-read superblock from disk.
    482  *	3) re-read summary information from disk.
    483  *	4) invalidate all inactive vnodes.
    484  *	5) invalidate all cached file data.
    485  *	6) re-read inode data for all active vnodes.
    486  */
    487 int
    488 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
    489 {
    490 	struct vnode *vp, *mvp, *devvp;
    491 	struct inode *ip;
    492 	void *space;
    493 	struct buf *bp;
    494 	struct fs *fs, *newfs;
    495 	struct partinfo dpart;
    496 	int i, blks, size, error;
    497 	int32_t *lp;
    498 	struct ufsmount *ump;
    499 	daddr_t sblockloc;
    500 
    501 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
    502 		return (EINVAL);
    503 
    504 	ump = VFSTOUFS(mp);
    505 	/*
    506 	 * Step 1: invalidate all cached meta-data.
    507 	 */
    508 	devvp = ump->um_devvp;
    509 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    510 	error = vinvalbuf(devvp, 0, cred, l, 0, 0);
    511 	VOP_UNLOCK(devvp, 0);
    512 	if (error)
    513 		panic("ffs_reload: dirty1");
    514 	/*
    515 	 * Step 2: re-read superblock from disk.
    516 	 */
    517 	fs = ump->um_fs;
    518 	if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
    519 		size = DEV_BSIZE;
    520 	else
    521 		size = dpart.disklab->d_secsize;
    522 	/* XXX we don't handle possibility that superblock moved. */
    523 	error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
    524 		      NOCRED, &bp);
    525 	if (error) {
    526 		brelse(bp, 0);
    527 		return (error);
    528 	}
    529 	newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
    530 	memcpy(newfs, bp->b_data, fs->fs_sbsize);
    531 #ifdef FFS_EI
    532 	if (ump->um_flags & UFS_NEEDSWAP) {
    533 		ffs_sb_swap((struct fs*)bp->b_data, newfs);
    534 		fs->fs_flags |= FS_SWAPPED;
    535 	} else
    536 #endif
    537 		fs->fs_flags &= ~FS_SWAPPED;
    538 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
    539 	     newfs->fs_magic != FS_UFS2_MAGIC)||
    540 	     newfs->fs_bsize > MAXBSIZE ||
    541 	     newfs->fs_bsize < sizeof(struct fs)) {
    542 		brelse(bp, 0);
    543 		free(newfs, M_UFSMNT);
    544 		return (EIO);		/* XXX needs translation */
    545 	}
    546 	/* Store off old fs_sblockloc for fs_oldfscompat_read. */
    547 	sblockloc = fs->fs_sblockloc;
    548 	/*
    549 	 * Copy pointer fields back into superblock before copying in	XXX
    550 	 * new superblock. These should really be in the ufsmount.	XXX
    551 	 * Note that important parameters (eg fs_ncg) are unchanged.
    552 	 */
    553 	newfs->fs_csp = fs->fs_csp;
    554 	newfs->fs_maxcluster = fs->fs_maxcluster;
    555 	newfs->fs_contigdirs = fs->fs_contigdirs;
    556 	newfs->fs_ronly = fs->fs_ronly;
    557 	newfs->fs_active = fs->fs_active;
    558 	memcpy(fs, newfs, (u_int)fs->fs_sbsize);
    559 	brelse(bp, 0);
    560 	free(newfs, M_UFSMNT);
    561 
    562 	/* Recheck for apple UFS filesystem */
    563 	ump->um_flags &= ~UFS_ISAPPLEUFS;
    564 	/* First check to see if this is tagged as an Apple UFS filesystem
    565 	 * in the disklabel
    566 	 */
    567 	if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
    568 		(dpart.part->p_fstype == FS_APPLEUFS)) {
    569 		ump->um_flags |= UFS_ISAPPLEUFS;
    570 	}
    571 #ifdef APPLE_UFS
    572 	else {
    573 		/* Manually look for an apple ufs label, and if a valid one
    574 		 * is found, then treat it like an Apple UFS filesystem anyway
    575 		 */
    576 		error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
    577 			APPLEUFS_LABEL_SIZE, cred, &bp);
    578 		if (error) {
    579 			brelse(bp, 0);
    580 			return (error);
    581 		}
    582 		error = ffs_appleufs_validate(fs->fs_fsmnt,
    583 			(struct appleufslabel *)bp->b_data,NULL);
    584 		if (error == 0)
    585 			ump->um_flags |= UFS_ISAPPLEUFS;
    586 		brelse(bp, 0);
    587 		bp = NULL;
    588 	}
    589 #else
    590 	if (ump->um_flags & UFS_ISAPPLEUFS)
    591 		return (EIO);
    592 #endif
    593 
    594 	if (UFS_MPISAPPLEUFS(ump)) {
    595 		/* see comment about NeXT below */
    596 		ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
    597 		ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
    598 		mp->mnt_iflag |= IMNT_DTYPE;
    599 	} else {
    600 		ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
    601 		ump->um_dirblksiz = DIRBLKSIZ;
    602 		if (ump->um_maxsymlinklen > 0)
    603 			mp->mnt_iflag |= IMNT_DTYPE;
    604 		else
    605 			mp->mnt_iflag &= ~IMNT_DTYPE;
    606 	}
    607 	ffs_oldfscompat_read(fs, ump, sblockloc);
    608 	mutex_enter(&ump->um_lock);
    609 	ump->um_maxfilesize = fs->fs_maxfilesize;
    610 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
    611 		fs->fs_pendingblocks = 0;
    612 		fs->fs_pendinginodes = 0;
    613 	}
    614 	mutex_exit(&ump->um_lock);
    615 
    616 	ffs_statvfs(mp, &mp->mnt_stat);
    617 	/*
    618 	 * Step 3: re-read summary information from disk.
    619 	 */
    620 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
    621 	space = fs->fs_csp;
    622 	for (i = 0; i < blks; i += fs->fs_frag) {
    623 		size = fs->fs_bsize;
    624 		if (i + fs->fs_frag > blks)
    625 			size = (blks - i) * fs->fs_fsize;
    626 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    627 			      NOCRED, &bp);
    628 		if (error) {
    629 			brelse(bp, 0);
    630 			return (error);
    631 		}
    632 #ifdef FFS_EI
    633 		if (UFS_FSNEEDSWAP(fs))
    634 			ffs_csum_swap((struct csum *)bp->b_data,
    635 			    (struct csum *)space, size);
    636 		else
    637 #endif
    638 			memcpy(space, bp->b_data, (size_t)size);
    639 		space = (char *)space + size;
    640 		brelse(bp, 0);
    641 	}
    642 	if ((fs->fs_flags & FS_DOSOFTDEP))
    643 		softdep_mount(devvp, mp, fs, cred);
    644 	if (fs->fs_snapinum[0] != 0)
    645 		ffs_snapshot_mount(mp);
    646 	/*
    647 	 * We no longer know anything about clusters per cylinder group.
    648 	 */
    649 	if (fs->fs_contigsumsize > 0) {
    650 		lp = fs->fs_maxcluster;
    651 		for (i = 0; i < fs->fs_ncg; i++)
    652 			*lp++ = fs->fs_contigsumsize;
    653 	}
    654 
    655 	/* Allocate a marker vnode. */
    656 	if ((mvp = vnalloc(mp)) == NULL)
    657 		return ENOMEM;
    658 	/*
    659 	 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
    660 	 * and vclean() can be called indirectly
    661 	 */
    662 	mutex_enter(&mntvnode_lock);
    663  loop:
    664 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    665 		vmark(mvp, vp);
    666 		if (vp->v_mount != mp || vismarker(vp))
    667 			continue;
    668 		/*
    669 		 * Step 4: invalidate all inactive vnodes.
    670 		 */
    671 		if (vrecycle(vp, &mntvnode_lock, l)) {
    672 			mutex_enter(&mntvnode_lock);
    673 			(void)vunmark(mvp);
    674 			goto loop;
    675 		}
    676 		/*
    677 		 * Step 5: invalidate all cached file data.
    678 		 */
    679 		mutex_enter(&vp->v_interlock);
    680 		mutex_exit(&mntvnode_lock);
    681 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    682 			(void)vunmark(mvp);
    683 			goto loop;
    684 		}
    685 		if (vinvalbuf(vp, 0, cred, l, 0, 0))
    686 			panic("ffs_reload: dirty2");
    687 		/*
    688 		 * Step 6: re-read inode data for all active vnodes.
    689 		 */
    690 		ip = VTOI(vp);
    691 		error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
    692 			      (int)fs->fs_bsize, NOCRED, &bp);
    693 		if (error) {
    694 			brelse(bp, 0);
    695 			vput(vp);
    696 			(void)vunmark(mvp);
    697 			break;
    698 		}
    699 		ffs_load_inode(bp, ip, fs, ip->i_number);
    700 		ip->i_ffs_effnlink = ip->i_nlink;
    701 		brelse(bp, 0);
    702 		vput(vp);
    703 		mutex_enter(&mntvnode_lock);
    704 	}
    705 	mutex_exit(&mntvnode_lock);
    706 	vnfree(mvp);
    707 	return (error);
    708 }
    709 
    710 /*
    711  * Possible superblock locations ordered from most to least likely.
    712  */
    713 static const int sblock_try[] = SBLOCKSEARCH;
    714 
    715 /*
    716  * Common code for mount and mountroot
    717  */
    718 int
    719 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
    720 {
    721 	struct ufsmount *ump;
    722 	struct buf *bp;
    723 	struct fs *fs;
    724 	dev_t dev;
    725 	struct partinfo dpart;
    726 	void *space;
    727 	daddr_t sblockloc, fsblockloc;
    728 	int blks, fstype;
    729 	int error, i, size, ronly, bset = 0;
    730 #ifdef FFS_EI
    731 	int needswap = 0;		/* keep gcc happy */
    732 #endif
    733 	int32_t *lp;
    734 	kauth_cred_t cred;
    735 	u_int32_t sbsize = 8192;	/* keep gcc happy*/
    736 
    737 	dev = devvp->v_rdev;
    738 	cred = l ? l->l_cred : NOCRED;
    739 
    740 	/* Flush out any old buffers remaining from a previous use. */
    741 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    742 	error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
    743 	VOP_UNLOCK(devvp, 0);
    744 	if (error)
    745 		return (error);
    746 
    747 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
    748 	if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
    749 		size = DEV_BSIZE;
    750 	else
    751 		size = dpart.disklab->d_secsize;
    752 
    753 	bp = NULL;
    754 	ump = NULL;
    755 	fs = NULL;
    756 	sblockloc = 0;
    757 	fstype = 0;
    758 
    759 	error = fstrans_mount(mp);
    760 	if (error)
    761 		return error;
    762 
    763 	/*
    764 	 * Try reading the superblock in each of its possible locations.
    765 	 */
    766 	for (i = 0; ; i++) {
    767 		if (bp != NULL) {
    768 			brelse(bp, BC_NOCACHE);
    769 			bp = NULL;
    770 		}
    771 		if (sblock_try[i] == -1) {
    772 			error = EINVAL;
    773 			fs = NULL;
    774 			goto out;
    775 		}
    776 		error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
    777 			      &bp);
    778 		if (error) {
    779 			fs = NULL;
    780 			goto out;
    781 		}
    782 		fs = (struct fs*)bp->b_data;
    783 		fsblockloc = sblockloc = sblock_try[i];
    784 		if (fs->fs_magic == FS_UFS1_MAGIC) {
    785 			sbsize = fs->fs_sbsize;
    786 			fstype = UFS1;
    787 #ifdef FFS_EI
    788 			needswap = 0;
    789 		} else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
    790 			sbsize = bswap32(fs->fs_sbsize);
    791 			fstype = UFS1;
    792 			needswap = 1;
    793 #endif
    794 		} else if (fs->fs_magic == FS_UFS2_MAGIC) {
    795 			sbsize = fs->fs_sbsize;
    796 			fstype = UFS2;
    797 #ifdef FFS_EI
    798 			needswap = 0;
    799 		} else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
    800 			sbsize = bswap32(fs->fs_sbsize);
    801 			fstype = UFS2;
    802 			needswap = 1;
    803 #endif
    804 		} else
    805 			continue;
    806 
    807 
    808 		/* fs->fs_sblockloc isn't defined for old filesystems */
    809 		if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
    810 			if (sblockloc == SBLOCK_UFS2)
    811 				/*
    812 				 * This is likely to be the first alternate
    813 				 * in a filesystem with 64k blocks.
    814 				 * Don't use it.
    815 				 */
    816 				continue;
    817 			fsblockloc = sblockloc;
    818 		} else {
    819 			fsblockloc = fs->fs_sblockloc;
    820 #ifdef FFS_EI
    821 			if (needswap)
    822 				fsblockloc = bswap64(fsblockloc);
    823 #endif
    824 		}
    825 
    826 		/* Check we haven't found an alternate superblock */
    827 		if (fsblockloc != sblockloc)
    828 			continue;
    829 
    830 		/* Validate size of superblock */
    831 		if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
    832 			continue;
    833 
    834 		/* Ok seems to be a good superblock */
    835 		break;
    836 	}
    837 
    838 	fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
    839 	memcpy(fs, bp->b_data, sbsize);
    840 
    841 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
    842 	memset(ump, 0, sizeof *ump);
    843 	mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
    844 	ump->um_fs = fs;
    845 	ump->um_ops = &ffs_ufsops;
    846 
    847 #ifdef FFS_EI
    848 	if (needswap) {
    849 		ffs_sb_swap((struct fs*)bp->b_data, fs);
    850 		fs->fs_flags |= FS_SWAPPED;
    851 	} else
    852 #endif
    853 		fs->fs_flags &= ~FS_SWAPPED;
    854 
    855 	ffs_oldfscompat_read(fs, ump, sblockloc);
    856 	ump->um_maxfilesize = fs->fs_maxfilesize;
    857 
    858 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
    859 		fs->fs_pendingblocks = 0;
    860 		fs->fs_pendinginodes = 0;
    861 	}
    862 
    863 	ump->um_fstype = fstype;
    864 	if (fs->fs_sbsize < SBLOCKSIZE)
    865 		brelse(bp, BC_INVAL);
    866 	else
    867 		brelse(bp, 0);
    868 	bp = NULL;
    869 
    870 	/* First check to see if this is tagged as an Apple UFS filesystem
    871 	 * in the disklabel
    872 	 */
    873 	if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
    874 		(dpart.part->p_fstype == FS_APPLEUFS)) {
    875 		ump->um_flags |= UFS_ISAPPLEUFS;
    876 	}
    877 #ifdef APPLE_UFS
    878 	else {
    879 		/* Manually look for an apple ufs label, and if a valid one
    880 		 * is found, then treat it like an Apple UFS filesystem anyway
    881 		 */
    882 		error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
    883 			APPLEUFS_LABEL_SIZE, cred, &bp);
    884 		if (error)
    885 			goto out;
    886 		error = ffs_appleufs_validate(fs->fs_fsmnt,
    887 			(struct appleufslabel *)bp->b_data,NULL);
    888 		if (error == 0) {
    889 			ump->um_flags |= UFS_ISAPPLEUFS;
    890 		}
    891 		brelse(bp, 0);
    892 		bp = NULL;
    893 	}
    894 #else
    895 	if (ump->um_flags & UFS_ISAPPLEUFS) {
    896 		error = EINVAL;
    897 		goto out;
    898 	}
    899 #endif
    900 
    901 	/*
    902 	 * verify that we can access the last block in the fs
    903 	 * if we're mounting read/write.
    904 	 */
    905 
    906 	if (!ronly) {
    907 		error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
    908 		    cred, &bp);
    909 		if (bp->b_bcount != fs->fs_fsize)
    910 			error = EINVAL;
    911 		if (error) {
    912 			bset = BC_INVAL;
    913 			goto out;
    914 		}
    915 		brelse(bp, BC_INVAL);
    916 		bp = NULL;
    917 	}
    918 
    919 	fs->fs_ronly = ronly;
    920 	if (ronly == 0) {
    921 		fs->fs_clean <<= 1;
    922 		fs->fs_fmod = 1;
    923 	}
    924 	size = fs->fs_cssize;
    925 	blks = howmany(size, fs->fs_fsize);
    926 	if (fs->fs_contigsumsize > 0)
    927 		size += fs->fs_ncg * sizeof(int32_t);
    928 	size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    929 	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
    930 	fs->fs_csp = space;
    931 	for (i = 0; i < blks; i += fs->fs_frag) {
    932 		size = fs->fs_bsize;
    933 		if (i + fs->fs_frag > blks)
    934 			size = (blks - i) * fs->fs_fsize;
    935 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    936 			      cred, &bp);
    937 		if (error) {
    938 			free(fs->fs_csp, M_UFSMNT);
    939 			goto out;
    940 		}
    941 #ifdef FFS_EI
    942 		if (needswap)
    943 			ffs_csum_swap((struct csum *)bp->b_data,
    944 				(struct csum *)space, size);
    945 		else
    946 #endif
    947 			memcpy(space, bp->b_data, (u_int)size);
    948 
    949 		space = (char *)space + size;
    950 		brelse(bp, 0);
    951 		bp = NULL;
    952 	}
    953 	if (fs->fs_contigsumsize > 0) {
    954 		fs->fs_maxcluster = lp = space;
    955 		for (i = 0; i < fs->fs_ncg; i++)
    956 			*lp++ = fs->fs_contigsumsize;
    957 		space = lp;
    958 	}
    959 	size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    960 	fs->fs_contigdirs = space;
    961 	space = (char *)space + size;
    962 	memset(fs->fs_contigdirs, 0, size);
    963 		/* Compatibility for old filesystems - XXX */
    964 	if (fs->fs_avgfilesize <= 0)
    965 		fs->fs_avgfilesize = AVFILESIZ;
    966 	if (fs->fs_avgfpdir <= 0)
    967 		fs->fs_avgfpdir = AFPDIR;
    968 	fs->fs_active = NULL;
    969 	mp->mnt_data = ump;
    970 	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
    971 	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
    972 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
    973 	mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
    974 	if (UFS_MPISAPPLEUFS(ump)) {
    975 		/* NeXT used to keep short symlinks in the inode even
    976 		 * when using FS_42INODEFMT.  In that case fs->fs_maxsymlinklen
    977 		 * is probably -1, but we still need to be able to identify
    978 		 * short symlinks.
    979 		 */
    980 		ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
    981 		ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
    982 		mp->mnt_iflag |= IMNT_DTYPE;
    983 	} else {
    984 		ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
    985 		ump->um_dirblksiz = DIRBLKSIZ;
    986 		if (ump->um_maxsymlinklen > 0)
    987 			mp->mnt_iflag |= IMNT_DTYPE;
    988 		else
    989 			mp->mnt_iflag &= ~IMNT_DTYPE;
    990 	}
    991 	mp->mnt_fs_bshift = fs->fs_bshift;
    992 	mp->mnt_dev_bshift = DEV_BSHIFT;	/* XXX */
    993 	mp->mnt_flag |= MNT_LOCAL;
    994 	mp->mnt_iflag |= IMNT_MPSAFE;
    995 #ifdef FFS_EI
    996 	if (needswap)
    997 		ump->um_flags |= UFS_NEEDSWAP;
    998 #endif
    999 	ump->um_mountp = mp;
   1000 	ump->um_dev = dev;
   1001 	ump->um_devvp = devvp;
   1002 	ump->um_nindir = fs->fs_nindir;
   1003 	ump->um_lognindir = ffs(fs->fs_nindir) - 1;
   1004 	ump->um_bptrtodb = fs->fs_fsbtodb;
   1005 	ump->um_seqinc = fs->fs_frag;
   1006 	for (i = 0; i < MAXQUOTAS; i++)
   1007 		ump->um_quotas[i] = NULLVP;
   1008 	devvp->v_specmountpoint = mp;
   1009 	if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
   1010 		error = softdep_mount(devvp, mp, fs, cred);
   1011 		if (error) {
   1012 			free(fs->fs_csp, M_UFSMNT);
   1013 			goto out;
   1014 		}
   1015 	}
   1016 	if (ronly == 0 && fs->fs_snapinum[0] != 0)
   1017 		ffs_snapshot_mount(mp);
   1018 #ifdef UFS_EXTATTR
   1019 	/*
   1020 	 * Initialize file-backed extended attributes on UFS1 file
   1021 	 * systems.
   1022 	 */
   1023 	if (ump->um_fstype == UFS1) {
   1024 		ufs_extattr_uepm_init(&ump->um_extattr);
   1025 #ifdef UFS_EXTATTR_AUTOSTART
   1026 		/*
   1027 		 * XXX Just ignore errors.  Not clear that we should
   1028 		 * XXX fail the mount in this case.
   1029 		 */
   1030 		(void) ufs_extattr_autostart(mp, l);
   1031 #endif
   1032 	}
   1033 #endif /* UFS_EXTATTR */
   1034 	return (0);
   1035 out:
   1036 	fstrans_unmount(mp);
   1037 	if (fs)
   1038 		free(fs, M_UFSMNT);
   1039 	devvp->v_specmountpoint = NULL;
   1040 	if (bp)
   1041 		brelse(bp, bset);
   1042 	if (ump) {
   1043 		if (ump->um_oldfscompat)
   1044 			free(ump->um_oldfscompat, M_UFSMNT);
   1045 		mutex_destroy(&ump->um_lock);
   1046 		free(ump, M_UFSMNT);
   1047 		mp->mnt_data = NULL;
   1048 	}
   1049 	return (error);
   1050 }
   1051 
   1052 /*
   1053  * Sanity checks for loading old filesystem superblocks.
   1054  * See ffs_oldfscompat_write below for unwound actions.
   1055  *
   1056  * XXX - Parts get retired eventually.
   1057  * Unfortunately new bits get added.
   1058  */
   1059 static void
   1060 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
   1061 {
   1062 	off_t maxfilesize;
   1063 	int32_t *extrasave;
   1064 
   1065 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1066 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1067 		return;
   1068 
   1069 	if (!ump->um_oldfscompat)
   1070 		ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
   1071 		    M_UFSMNT, M_WAITOK);
   1072 
   1073 	memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
   1074 	extrasave = ump->um_oldfscompat;
   1075 	extrasave += 512/sizeof(int32_t);
   1076 	extrasave[0] = fs->fs_old_npsect;
   1077 	extrasave[1] = fs->fs_old_interleave;
   1078 	extrasave[2] = fs->fs_old_trackskew;
   1079 
   1080 	/* These fields will be overwritten by their
   1081 	 * original values in fs_oldfscompat_write, so it is harmless
   1082 	 * to modify them here.
   1083 	 */
   1084 	fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
   1085 	fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
   1086 	fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
   1087 	fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
   1088 
   1089 	fs->fs_maxbsize = fs->fs_bsize;
   1090 	fs->fs_time = fs->fs_old_time;
   1091 	fs->fs_size = fs->fs_old_size;
   1092 	fs->fs_dsize = fs->fs_old_dsize;
   1093 	fs->fs_csaddr = fs->fs_old_csaddr;
   1094 	fs->fs_sblockloc = sblockloc;
   1095 
   1096         fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
   1097 
   1098 	if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
   1099 		fs->fs_old_nrpos = 8;
   1100 		fs->fs_old_npsect = fs->fs_old_nsect;
   1101 		fs->fs_old_interleave = 1;
   1102 		fs->fs_old_trackskew = 0;
   1103 	}
   1104 
   1105 	if (fs->fs_old_inodefmt < FS_44INODEFMT) {
   1106 		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
   1107 		fs->fs_qbmask = ~fs->fs_bmask;
   1108 		fs->fs_qfmask = ~fs->fs_fmask;
   1109 	}
   1110 
   1111 	maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
   1112 	if (fs->fs_maxfilesize > maxfilesize)
   1113 		fs->fs_maxfilesize = maxfilesize;
   1114 
   1115 	/* Compatibility for old filesystems */
   1116 	if (fs->fs_avgfilesize <= 0)
   1117 		fs->fs_avgfilesize = AVFILESIZ;
   1118 	if (fs->fs_avgfpdir <= 0)
   1119 		fs->fs_avgfpdir = AFPDIR;
   1120 
   1121 #if 0
   1122 	if (bigcgs) {
   1123 		fs->fs_save_cgsize = fs->fs_cgsize;
   1124 		fs->fs_cgsize = fs->fs_bsize;
   1125 	}
   1126 #endif
   1127 }
   1128 
   1129 /*
   1130  * Unwinding superblock updates for old filesystems.
   1131  * See ffs_oldfscompat_read above for details.
   1132  *
   1133  * XXX - Parts get retired eventually.
   1134  * Unfortunately new bits get added.
   1135  */
   1136 static void
   1137 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
   1138 {
   1139 	int32_t *extrasave;
   1140 
   1141 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1142 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1143 		return;
   1144 
   1145 	fs->fs_old_time = fs->fs_time;
   1146 	fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
   1147 	fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
   1148 	fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
   1149 	fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
   1150 	fs->fs_old_flags = fs->fs_flags;
   1151 
   1152 #if 0
   1153 	if (bigcgs) {
   1154 		fs->fs_cgsize = fs->fs_save_cgsize;
   1155 	}
   1156 #endif
   1157 
   1158 	memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
   1159 	extrasave = ump->um_oldfscompat;
   1160 	extrasave += 512/sizeof(int32_t);
   1161 	fs->fs_old_npsect = extrasave[0];
   1162 	fs->fs_old_interleave = extrasave[1];
   1163 	fs->fs_old_trackskew = extrasave[2];
   1164 
   1165 }
   1166 
   1167 /*
   1168  * unmount system call
   1169  */
   1170 int
   1171 ffs_unmount(struct mount *mp, int mntflags)
   1172 {
   1173 	struct lwp *l = curlwp;
   1174 	struct ufsmount *ump = VFSTOUFS(mp);
   1175 	struct fs *fs = ump->um_fs;
   1176 	int error, flags, penderr;
   1177 
   1178 	penderr = 0;
   1179 	flags = 0;
   1180 	if (mntflags & MNT_FORCE)
   1181 		flags |= FORCECLOSE;
   1182 #ifdef UFS_EXTATTR
   1183 	if (ump->um_fstype == UFS1) {
   1184 		ufs_extattr_stop(mp, l);
   1185 		ufs_extattr_uepm_destroy(&ump->um_extattr);
   1186 	}
   1187 #endif /* UFS_EXTATTR */
   1188 	if (mp->mnt_flag & MNT_SOFTDEP) {
   1189 		if ((error = softdep_flushfiles(mp, flags, l)) != 0)
   1190 			return (error);
   1191 	} else {
   1192 		if ((error = ffs_flushfiles(mp, flags, l)) != 0)
   1193 			return (error);
   1194 	}
   1195 	mutex_enter(&ump->um_lock);
   1196 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
   1197 		printf("%s: unmount pending error: blocks %" PRId64
   1198 		       " files %d\n",
   1199 		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
   1200 		fs->fs_pendingblocks = 0;
   1201 		fs->fs_pendinginodes = 0;
   1202 		penderr = 1;
   1203 	}
   1204 	mutex_exit(&ump->um_lock);
   1205 	if (fs->fs_ronly == 0 &&
   1206 	    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
   1207 	    fs->fs_clean & FS_WASCLEAN) {
   1208 		/*
   1209 		 * XXXX don't mark fs clean in the case of softdep
   1210 		 * pending block errors, until they are fixed.
   1211 		 */
   1212 		if (penderr == 0) {
   1213 			if (mp->mnt_flag & MNT_SOFTDEP)
   1214 				fs->fs_flags &= ~FS_DOSOFTDEP;
   1215 			fs->fs_clean = FS_ISCLEAN;
   1216 		}
   1217 		fs->fs_fmod = 0;
   1218 		(void) ffs_sbupdate(ump, MNT_WAIT);
   1219 	}
   1220 	if (ump->um_devvp->v_type != VBAD)
   1221 		ump->um_devvp->v_specmountpoint = NULL;
   1222 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1223 	(void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
   1224 		NOCRED);
   1225 	vput(ump->um_devvp);
   1226 	free(fs->fs_csp, M_UFSMNT);
   1227 	free(fs, M_UFSMNT);
   1228 	if (ump->um_oldfscompat != NULL)
   1229 		free(ump->um_oldfscompat, M_UFSMNT);
   1230 	softdep_unmount(mp);
   1231 	mutex_destroy(&ump->um_lock);
   1232 	free(ump, M_UFSMNT);
   1233 	mp->mnt_data = NULL;
   1234 	mp->mnt_flag &= ~MNT_LOCAL;
   1235 	fstrans_unmount(mp);
   1236 	return (0);
   1237 }
   1238 
   1239 /*
   1240  * Flush out all the files in a filesystem.
   1241  */
   1242 int
   1243 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
   1244 {
   1245 	extern int doforce;
   1246 	struct ufsmount *ump;
   1247 	int error;
   1248 
   1249 	if (!doforce)
   1250 		flags &= ~FORCECLOSE;
   1251 	ump = VFSTOUFS(mp);
   1252 #ifdef QUOTA
   1253 	if (mp->mnt_flag & MNT_QUOTA) {
   1254 		int i;
   1255 		if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
   1256 			return (error);
   1257 		for (i = 0; i < MAXQUOTAS; i++) {
   1258 			if (ump->um_quotas[i] == NULLVP)
   1259 				continue;
   1260 			quotaoff(l, mp, i);
   1261 		}
   1262 		/*
   1263 		 * Here we fall through to vflush again to ensure
   1264 		 * that we have gotten rid of all the system vnodes.
   1265 		 */
   1266 	}
   1267 #endif
   1268 	if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
   1269 		return (error);
   1270 	ffs_snapshot_unmount(mp);
   1271 	/*
   1272 	 * Flush all the files.
   1273 	 */
   1274 	error = vflush(mp, NULLVP, flags);
   1275 	if (error)
   1276 		return (error);
   1277 	/*
   1278 	 * Flush filesystem metadata.
   1279 	 */
   1280 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1281 	error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
   1282 	VOP_UNLOCK(ump->um_devvp, 0);
   1283 	return (error);
   1284 }
   1285 
   1286 /*
   1287  * Get file system statistics.
   1288  */
   1289 int
   1290 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
   1291 {
   1292 	struct ufsmount *ump;
   1293 	struct fs *fs;
   1294 
   1295 	ump = VFSTOUFS(mp);
   1296 	fs = ump->um_fs;
   1297 	mutex_enter(&ump->um_lock);
   1298 	sbp->f_bsize = fs->fs_bsize;
   1299 	sbp->f_frsize = fs->fs_fsize;
   1300 	sbp->f_iosize = fs->fs_bsize;
   1301 	sbp->f_blocks = fs->fs_dsize;
   1302 	sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
   1303 		fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
   1304 	sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
   1305 	    fs->fs_minfree) / (u_int64_t) 100;
   1306 	if (sbp->f_bfree > sbp->f_bresvd)
   1307 		sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
   1308 	else
   1309 		sbp->f_bavail = 0;
   1310 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
   1311 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
   1312 	sbp->f_favail = sbp->f_ffree;
   1313 	sbp->f_fresvd = 0;
   1314 	mutex_exit(&ump->um_lock);
   1315 	copy_statvfs_info(sbp, mp);
   1316 
   1317 	return (0);
   1318 }
   1319 
   1320 /*
   1321  * Go through the disk queues to initiate sandbagged IO;
   1322  * go through the inodes to write those that have been modified;
   1323  * initiate the writing of the super block if it has been modified.
   1324  *
   1325  * Note: we are always called with the filesystem marked `MPBUSY'.
   1326  */
   1327 int
   1328 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
   1329 {
   1330 	struct lwp *l = curlwp;
   1331 	struct vnode *vp, *mvp;
   1332 	struct inode *ip;
   1333 	struct ufsmount *ump = VFSTOUFS(mp);
   1334 	struct fs *fs;
   1335 	int error, count, allerror = 0;
   1336 
   1337 	fs = ump->um_fs;
   1338 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
   1339 		printf("fs = %s\n", fs->fs_fsmnt);
   1340 		panic("update: rofs mod");
   1341 	}
   1342 
   1343 	/* Allocate a marker vnode. */
   1344 	if ((mvp = vnalloc(mp)) == NULL)
   1345 		return (ENOMEM);
   1346 
   1347 	fstrans_start(mp, FSTRANS_SHARED);
   1348 	/*
   1349 	 * Write back each (modified) inode.
   1350 	 */
   1351 	mutex_enter(&mntvnode_lock);
   1352 loop:
   1353 	/*
   1354 	 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
   1355 	 * and vclean() can be called indirectly
   1356 	 */
   1357 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
   1358 		vmark(mvp, vp);
   1359 		/*
   1360 		 * If the vnode that we are about to sync is no longer
   1361 		 * associated with this mount point, start over.
   1362 		 */
   1363 		if (vp->v_mount != mp || vismarker(vp))
   1364 			continue;
   1365 		mutex_enter(&vp->v_interlock);
   1366 		ip = VTOI(vp);
   1367 		if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 ||
   1368 		    vp->v_type == VNON || ((ip->i_flag &
   1369 		    (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
   1370 		    LIST_EMPTY(&vp->v_dirtyblkhd) &&
   1371 		    UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
   1372 		{
   1373 			mutex_exit(&vp->v_interlock);
   1374 			continue;
   1375 		}
   1376 		if (vp->v_type == VBLK &&
   1377 		    fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
   1378 			mutex_exit(&vp->v_interlock);
   1379 			continue;
   1380 		}
   1381 		mutex_exit(&mntvnode_lock);
   1382 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
   1383 		if (error) {
   1384 			mutex_enter(&mntvnode_lock);
   1385 			if (error == ENOENT) {
   1386 				(void)vunmark(mvp);
   1387 				goto loop;
   1388 			}
   1389 			continue;
   1390 		}
   1391 		if (vp->v_type == VREG && waitfor == MNT_LAZY)
   1392 			error = ffs_update(vp, NULL, NULL, 0);
   1393 		else
   1394 			error = VOP_FSYNC(vp, cred,
   1395 			    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0);
   1396 		if (error)
   1397 			allerror = error;
   1398 		vput(vp);
   1399 		mutex_enter(&mntvnode_lock);
   1400 	}
   1401 	mutex_exit(&mntvnode_lock);
   1402 	/*
   1403 	 * Force stale file system control information to be flushed.
   1404 	 */
   1405 	if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
   1406 		if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
   1407 			allerror = error;
   1408 		/* Flushed work items may create new vnodes to clean */
   1409 		if (allerror == 0 && count) {
   1410 			mutex_enter(&mntvnode_lock);
   1411 			goto loop;
   1412 		}
   1413 	}
   1414 	if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
   1415 	    !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
   1416 		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1417 		if ((error = VOP_FSYNC(ump->um_devvp, cred,
   1418 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0)
   1419 			allerror = error;
   1420 		VOP_UNLOCK(ump->um_devvp, 0);
   1421 		if (allerror == 0 && waitfor == MNT_WAIT) {
   1422 			mutex_enter(&mntvnode_lock);
   1423 			goto loop;
   1424 		}
   1425 	}
   1426 #ifdef QUOTA
   1427 	qsync(mp);
   1428 #endif
   1429 	/*
   1430 	 * Write back modified superblock.
   1431 	 */
   1432 	if (fs->fs_fmod != 0) {
   1433 		fs->fs_fmod = 0;
   1434 		fs->fs_time = time_second;
   1435 		if ((error = ffs_cgupdate(ump, waitfor)))
   1436 			allerror = error;
   1437 	}
   1438 	fstrans_done(mp);
   1439 	vnfree(mvp);
   1440 	return (allerror);
   1441 }
   1442 
   1443 /*
   1444  * Look up a FFS dinode number to find its incore vnode, otherwise read it
   1445  * in from disk.  If it is in core, wait for the lock bit to clear, then
   1446  * return the inode locked.  Detection and handling of mount points must be
   1447  * done by the calling routine.
   1448  */
   1449 int
   1450 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
   1451 {
   1452 	struct fs *fs;
   1453 	struct inode *ip;
   1454 	struct ufsmount *ump;
   1455 	struct buf *bp;
   1456 	struct vnode *vp;
   1457 	dev_t dev;
   1458 	int error;
   1459 
   1460 	ump = VFSTOUFS(mp);
   1461 	dev = ump->um_dev;
   1462 
   1463  retry:
   1464 	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
   1465 		return (0);
   1466 
   1467 	/* Allocate a new vnode/inode. */
   1468 	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
   1469 		*vpp = NULL;
   1470 		return (error);
   1471 	}
   1472 	ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
   1473 
   1474 	/*
   1475 	 * If someone beat us to it, put back the freshly allocated
   1476 	 * vnode/inode pair and retry.
   1477 	 */
   1478 	mutex_enter(&ufs_hashlock);
   1479 	if (ufs_ihashget(dev, ino, 0) != NULL) {
   1480 		mutex_exit(&ufs_hashlock);
   1481 		ungetnewvnode(vp);
   1482 		pool_cache_put(ffs_inode_cache, ip);
   1483 		goto retry;
   1484 	}
   1485 
   1486 	vp->v_vflag |= VV_LOCKSWORK;
   1487 	if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
   1488 		vp->v_uflag |= VU_SOFTDEP;
   1489 
   1490 	/*
   1491 	 * XXX MFS ends up here, too, to allocate an inode.  Should we
   1492 	 * XXX create another pool for MFS inodes?
   1493 	 */
   1494 
   1495 	memset(ip, 0, sizeof(struct inode));
   1496 	vp->v_data = ip;
   1497 	ip->i_vnode = vp;
   1498 	ip->i_ump = ump;
   1499 	ip->i_fs = fs = ump->um_fs;
   1500 	ip->i_dev = dev;
   1501 	ip->i_number = ino;
   1502 	LIST_INIT(&ip->i_pcbufhd);
   1503 #ifdef QUOTA
   1504 	ufsquota_init(ip);
   1505 #endif
   1506 
   1507 	/*
   1508 	 * Initialize genfs node, we might proceed to destroy it in
   1509 	 * error branches.
   1510 	 */
   1511 	genfs_node_init(vp, &ffs_genfsops);
   1512 
   1513 	/*
   1514 	 * Put it onto its hash chain and lock it so that other requests for
   1515 	 * this inode will block if they arrive while we are sleeping waiting
   1516 	 * for old data structures to be purged or for the contents of the
   1517 	 * disk portion of this inode to be read.
   1518 	 */
   1519 
   1520 	ufs_ihashins(ip);
   1521 	mutex_exit(&ufs_hashlock);
   1522 
   1523 	/* Read in the disk contents for the inode, copy into the inode. */
   1524 	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
   1525 		      (int)fs->fs_bsize, NOCRED, &bp);
   1526 	if (error) {
   1527 
   1528 		/*
   1529 		 * The inode does not contain anything useful, so it would
   1530 		 * be misleading to leave it on its hash chain. With mode
   1531 		 * still zero, it will be unlinked and returned to the free
   1532 		 * list by vput().
   1533 		 */
   1534 
   1535 		vput(vp);
   1536 		brelse(bp, 0);
   1537 		*vpp = NULL;
   1538 		return (error);
   1539 	}
   1540 	if (ip->i_ump->um_fstype == UFS1)
   1541 		ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
   1542 		    PR_WAITOK);
   1543 	else
   1544 		ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
   1545 		    PR_WAITOK);
   1546 	ffs_load_inode(bp, ip, fs, ino);
   1547 	if (DOINGSOFTDEP(vp))
   1548 		softdep_load_inodeblock(ip);
   1549 	else
   1550 		ip->i_ffs_effnlink = ip->i_nlink;
   1551 	brelse(bp, 0);
   1552 
   1553 	/*
   1554 	 * Initialize the vnode from the inode, check for aliases.
   1555 	 * Note that the underlying vnode may have changed.
   1556 	 */
   1557 
   1558 	ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
   1559 
   1560 	/*
   1561 	 * Finish inode initialization now that aliasing has been resolved.
   1562 	 */
   1563 
   1564 	ip->i_devvp = ump->um_devvp;
   1565 	VREF(ip->i_devvp);
   1566 
   1567 	/*
   1568 	 * Ensure that uid and gid are correct. This is a temporary
   1569 	 * fix until fsck has been changed to do the update.
   1570 	 */
   1571 
   1572 	if (fs->fs_old_inodefmt < FS_44INODEFMT) {		/* XXX */
   1573 		ip->i_uid = ip->i_ffs1_ouid;			/* XXX */
   1574 		ip->i_gid = ip->i_ffs1_ogid;			/* XXX */
   1575 	}							/* XXX */
   1576 	uvm_vnp_setsize(vp, ip->i_size);
   1577 	*vpp = vp;
   1578 	return (0);
   1579 }
   1580 
   1581 /*
   1582  * File handle to vnode
   1583  *
   1584  * Have to be really careful about stale file handles:
   1585  * - check that the inode number is valid
   1586  * - call ffs_vget() to get the locked inode
   1587  * - check for an unallocated inode (i_mode == 0)
   1588  * - check that the given client host has export rights and return
   1589  *   those rights via. exflagsp and credanonp
   1590  */
   1591 int
   1592 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
   1593 {
   1594 	struct ufid ufh;
   1595 	struct fs *fs;
   1596 
   1597 	if (fhp->fid_len != sizeof(struct ufid))
   1598 		return EINVAL;
   1599 
   1600 	memcpy(&ufh, fhp, sizeof(ufh));
   1601 	fs = VFSTOUFS(mp)->um_fs;
   1602 	if (ufh.ufid_ino < ROOTINO ||
   1603 	    ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
   1604 		return (ESTALE);
   1605 	return (ufs_fhtovp(mp, &ufh, vpp));
   1606 }
   1607 
   1608 /*
   1609  * Vnode pointer to File handle
   1610  */
   1611 /* ARGSUSED */
   1612 int
   1613 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
   1614 {
   1615 	struct inode *ip;
   1616 	struct ufid ufh;
   1617 
   1618 	if (*fh_size < sizeof(struct ufid)) {
   1619 		*fh_size = sizeof(struct ufid);
   1620 		return E2BIG;
   1621 	}
   1622 	ip = VTOI(vp);
   1623 	*fh_size = sizeof(struct ufid);
   1624 	memset(&ufh, 0, sizeof(ufh));
   1625 	ufh.ufid_len = sizeof(struct ufid);
   1626 	ufh.ufid_ino = ip->i_number;
   1627 	ufh.ufid_gen = ip->i_gen;
   1628 	memcpy(fhp, &ufh, sizeof(ufh));
   1629 	return (0);
   1630 }
   1631 
   1632 void
   1633 ffs_init(void)
   1634 {
   1635 	if (ffs_initcount++ > 0)
   1636 		return;
   1637 
   1638 	ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
   1639 	    "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
   1640 	ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
   1641 	    "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
   1642 	ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
   1643 	    "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
   1644 	softdep_initialize();
   1645 	ffs_snapshot_init();
   1646 	ufs_init();
   1647 }
   1648 
   1649 void
   1650 ffs_reinit(void)
   1651 {
   1652 	softdep_reinitialize();
   1653 	ufs_reinit();
   1654 }
   1655 
   1656 void
   1657 ffs_done(void)
   1658 {
   1659 	if (--ffs_initcount > 0)
   1660 		return;
   1661 
   1662 	/* XXX softdep cleanup ? */
   1663 	ffs_snapshot_fini();
   1664 	ufs_done();
   1665 	pool_cache_destroy(ffs_dinode2_cache);
   1666 	pool_cache_destroy(ffs_dinode1_cache);
   1667 	pool_cache_destroy(ffs_inode_cache);
   1668 }
   1669 
   1670 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
   1671 {
   1672 #if 0
   1673 	extern int doasyncfree;
   1674 #endif
   1675 	extern int ffs_log_changeopt;
   1676 
   1677 	sysctl_createv(clog, 0, NULL, NULL,
   1678 		       CTLFLAG_PERMANENT,
   1679 		       CTLTYPE_NODE, "vfs", NULL,
   1680 		       NULL, 0, NULL, 0,
   1681 		       CTL_VFS, CTL_EOL);
   1682 	sysctl_createv(clog, 0, NULL, NULL,
   1683 		       CTLFLAG_PERMANENT,
   1684 		       CTLTYPE_NODE, "ffs",
   1685 		       SYSCTL_DESCR("Berkeley Fast File System"),
   1686 		       NULL, 0, NULL, 0,
   1687 		       CTL_VFS, 1, CTL_EOL);
   1688 
   1689 	/*
   1690 	 * @@@ should we even bother with these first three?
   1691 	 */
   1692 	sysctl_createv(clog, 0, NULL, NULL,
   1693 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1694 		       CTLTYPE_INT, "doclusterread", NULL,
   1695 		       sysctl_notavail, 0, NULL, 0,
   1696 		       CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
   1697 	sysctl_createv(clog, 0, NULL, NULL,
   1698 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1699 		       CTLTYPE_INT, "doclusterwrite", NULL,
   1700 		       sysctl_notavail, 0, NULL, 0,
   1701 		       CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
   1702 	sysctl_createv(clog, 0, NULL, NULL,
   1703 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1704 		       CTLTYPE_INT, "doreallocblks", NULL,
   1705 		       sysctl_notavail, 0, NULL, 0,
   1706 		       CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
   1707 #if 0
   1708 	sysctl_createv(clog, 0, NULL, NULL,
   1709 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1710 		       CTLTYPE_INT, "doasyncfree",
   1711 		       SYSCTL_DESCR("Release dirty blocks asynchronously"),
   1712 		       NULL, 0, &doasyncfree, 0,
   1713 		       CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
   1714 #endif
   1715 	sysctl_createv(clog, 0, NULL, NULL,
   1716 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1717 		       CTLTYPE_INT, "log_changeopt",
   1718 		       SYSCTL_DESCR("Log changes in optimization strategy"),
   1719 		       NULL, 0, &ffs_log_changeopt, 0,
   1720 		       CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
   1721 }
   1722 
   1723 /*
   1724  * Write a superblock and associated information back to disk.
   1725  */
   1726 int
   1727 ffs_sbupdate(struct ufsmount *mp, int waitfor)
   1728 {
   1729 	struct fs *fs = mp->um_fs;
   1730 	struct buf *bp;
   1731 	int error = 0;
   1732 	u_int32_t saveflag;
   1733 
   1734 	bp = getblk(mp->um_devvp,
   1735 	    fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
   1736 	    (int)fs->fs_sbsize, 0, 0);
   1737 	saveflag = fs->fs_flags & FS_INTERNAL;
   1738 	fs->fs_flags &= ~FS_INTERNAL;
   1739 
   1740 	memcpy(bp->b_data, fs, fs->fs_sbsize);
   1741 
   1742 	ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
   1743 #ifdef FFS_EI
   1744 	if (mp->um_flags & UFS_NEEDSWAP)
   1745 		ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
   1746 #endif
   1747 	fs->fs_flags |= saveflag;
   1748 
   1749 	if (waitfor == MNT_WAIT)
   1750 		error = bwrite(bp);
   1751 	else
   1752 		bawrite(bp);
   1753 	return (error);
   1754 }
   1755 
   1756 int
   1757 ffs_cgupdate(struct ufsmount *mp, int waitfor)
   1758 {
   1759 	struct fs *fs = mp->um_fs;
   1760 	struct buf *bp;
   1761 	int blks;
   1762 	void *space;
   1763 	int i, size, error = 0, allerror = 0;
   1764 
   1765 	allerror = ffs_sbupdate(mp, waitfor);
   1766 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
   1767 	space = fs->fs_csp;
   1768 	for (i = 0; i < blks; i += fs->fs_frag) {
   1769 		size = fs->fs_bsize;
   1770 		if (i + fs->fs_frag > blks)
   1771 			size = (blks - i) * fs->fs_fsize;
   1772 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
   1773 		    size, 0, 0);
   1774 #ifdef FFS_EI
   1775 		if (mp->um_flags & UFS_NEEDSWAP)
   1776 			ffs_csum_swap((struct csum*)space,
   1777 			    (struct csum*)bp->b_data, size);
   1778 		else
   1779 #endif
   1780 			memcpy(bp->b_data, space, (u_int)size);
   1781 		space = (char *)space + size;
   1782 		if (waitfor == MNT_WAIT)
   1783 			error = bwrite(bp);
   1784 		else
   1785 			bawrite(bp);
   1786 	}
   1787 	if (!allerror && error)
   1788 		allerror = error;
   1789 	return (allerror);
   1790 }
   1791 
   1792 int
   1793 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
   1794     int attrnamespace, const char *attrname)
   1795 {
   1796 #ifdef UFS_EXTATTR
   1797 	/*
   1798 	 * File-backed extended attributes are only supported on UFS1.
   1799 	 * UFS2 has native extended attributes.
   1800 	 */
   1801 	if (VFSTOUFS(mp)->um_fstype == UFS1)
   1802 		return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
   1803 #endif
   1804 	return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
   1805 }
   1806 
   1807 int
   1808 ffs_suspendctl(struct mount *mp, int cmd)
   1809 {
   1810 	int error;
   1811 	struct lwp *l = curlwp;
   1812 
   1813 	switch (cmd) {
   1814 	case SUSPEND_SUSPEND:
   1815 		if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
   1816 			return error;
   1817 		error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
   1818 		if (error == 0)
   1819 			error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
   1820 		if (error != 0) {
   1821 			(void) fstrans_setstate(mp, FSTRANS_NORMAL);
   1822 			return error;
   1823 		}
   1824 		return 0;
   1825 
   1826 	case SUSPEND_RESUME:
   1827 		return fstrans_setstate(mp, FSTRANS_NORMAL);
   1828 
   1829 	default:
   1830 		return EINVAL;
   1831 	}
   1832 }
   1833