Home | History | Annotate | Line # | Download | only in ffs
ffs_vfsops.c revision 1.222
      1 /*	$NetBSD: ffs_vfsops.c,v 1.222 2008/01/30 11:47:04 ad Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1991, 1993, 1994
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.222 2008/01/30 11:47:04 ad Exp $");
     36 
     37 #if defined(_KERNEL_OPT)
     38 #include "opt_ffs.h"
     39 #include "opt_quota.h"
     40 #include "opt_softdep.h"
     41 #endif
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/namei.h>
     46 #include <sys/proc.h>
     47 #include <sys/kernel.h>
     48 #include <sys/vnode.h>
     49 #include <sys/socket.h>
     50 #include <sys/mount.h>
     51 #include <sys/buf.h>
     52 #include <sys/device.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/file.h>
     55 #include <sys/disklabel.h>
     56 #include <sys/ioctl.h>
     57 #include <sys/errno.h>
     58 #include <sys/malloc.h>
     59 #include <sys/pool.h>
     60 #include <sys/lock.h>
     61 #include <sys/sysctl.h>
     62 #include <sys/conf.h>
     63 #include <sys/kauth.h>
     64 #include <sys/fstrans.h>
     65 
     66 #include <miscfs/genfs/genfs.h>
     67 #include <miscfs/specfs/specdev.h>
     68 
     69 #include <ufs/ufs/quota.h>
     70 #include <ufs/ufs/ufsmount.h>
     71 #include <ufs/ufs/inode.h>
     72 #include <ufs/ufs/dir.h>
     73 #include <ufs/ufs/ufs_extern.h>
     74 #include <ufs/ufs/ufs_bswap.h>
     75 
     76 #include <ufs/ffs/fs.h>
     77 #include <ufs/ffs/ffs_extern.h>
     78 
     79 /* how many times ffs_init() was called */
     80 int ffs_initcount = 0;
     81 
     82 extern kmutex_t ufs_hashlock;
     83 
     84 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
     85 extern const struct vnodeopv_desc ffs_specop_opv_desc;
     86 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
     87 
     88 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
     89 	&ffs_vnodeop_opv_desc,
     90 	&ffs_specop_opv_desc,
     91 	&ffs_fifoop_opv_desc,
     92 	NULL,
     93 };
     94 
     95 struct vfsops ffs_vfsops = {
     96 	MOUNT_FFS,
     97 	sizeof (struct ufs_args),
     98 	ffs_mount,
     99 	ufs_start,
    100 	ffs_unmount,
    101 	ufs_root,
    102 	ufs_quotactl,
    103 	ffs_statvfs,
    104 	ffs_sync,
    105 	ffs_vget,
    106 	ffs_fhtovp,
    107 	ffs_vptofh,
    108 	ffs_init,
    109 	ffs_reinit,
    110 	ffs_done,
    111 	ffs_mountroot,
    112 	ffs_snapshot,
    113 	ffs_extattrctl,
    114 	ffs_suspendctl,
    115 	genfs_renamelock_enter,
    116 	genfs_renamelock_exit,
    117 	ffs_vnodeopv_descs,
    118 	0,
    119 	{ NULL, NULL },
    120 };
    121 VFS_ATTACH(ffs_vfsops);
    122 
    123 static const struct genfs_ops ffs_genfsops = {
    124 	.gop_size = ffs_gop_size,
    125 	.gop_alloc = ufs_gop_alloc,
    126 	.gop_write = genfs_gop_write,
    127 	.gop_markupdate = ufs_gop_markupdate,
    128 };
    129 
    130 static const struct ufs_ops ffs_ufsops = {
    131 	.uo_itimes = ffs_itimes,
    132 	.uo_update = ffs_update,
    133 	.uo_truncate = ffs_truncate,
    134 	.uo_valloc = ffs_valloc,
    135 	.uo_vfree = ffs_vfree,
    136 	.uo_balloc = ffs_balloc,
    137 };
    138 
    139 pool_cache_t ffs_inode_cache;
    140 pool_cache_t ffs_dinode1_cache;
    141 pool_cache_t ffs_dinode2_cache;
    142 
    143 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
    144 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
    145 
    146 /*
    147  * Called by main() when ffs is going to be mounted as root.
    148  */
    149 
    150 int
    151 ffs_mountroot(void)
    152 {
    153 	struct fs *fs;
    154 	struct mount *mp;
    155 	struct lwp *l = curlwp;			/* XXX */
    156 	struct ufsmount *ump;
    157 	int error;
    158 
    159 	if (device_class(root_device) != DV_DISK)
    160 		return (ENODEV);
    161 
    162 	if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
    163 		vrele(rootvp);
    164 		return (error);
    165 	}
    166 	if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
    167 		vfs_unbusy(mp, false);
    168 		vfs_destroy(mp);
    169 		return (error);
    170 	}
    171 	mutex_enter(&mountlist_lock);
    172 	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
    173 	mutex_exit(&mountlist_lock);
    174 	ump = VFSTOUFS(mp);
    175 	fs = ump->um_fs;
    176 	memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
    177 	(void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
    178 	(void)ffs_statvfs(mp, &mp->mnt_stat);
    179 	vfs_unbusy(mp, false);
    180 	setrootfstime((time_t)fs->fs_time);
    181 	return (0);
    182 }
    183 
    184 /*
    185  * VFS Operations.
    186  *
    187  * mount system call
    188  */
    189 int
    190 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
    191 {
    192 	struct lwp *l = curlwp;
    193 	struct nameidata nd;
    194 	struct vnode *vp, *devvp = NULL;
    195 	struct ufs_args *args = data;
    196 	struct ufsmount *ump = NULL;
    197 	struct fs *fs;
    198 	int error = 0, flags, update;
    199 	mode_t accessmode;
    200 
    201 	if (*data_len < sizeof *args)
    202 		return EINVAL;
    203 
    204 	if (mp->mnt_flag & MNT_GETARGS) {
    205 		ump = VFSTOUFS(mp);
    206 		if (ump == NULL)
    207 			return EIO;
    208 		args->fspec = NULL;
    209 		*data_len = sizeof *args;
    210 		return 0;
    211 	}
    212 
    213 #if !defined(SOFTDEP)
    214 	mp->mnt_flag &= ~MNT_SOFTDEP;
    215 #endif
    216 
    217 	update = mp->mnt_flag & MNT_UPDATE;
    218 
    219 	/* Check arguments */
    220 	if (args->fspec != NULL) {
    221 		/*
    222 		 * Look up the name and verify that it's sane.
    223 		 */
    224 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
    225 		if ((error = namei(&nd)) != 0)
    226 			return (error);
    227 		devvp = nd.ni_vp;
    228 
    229 		if (!update) {
    230 			/*
    231 			 * Be sure this is a valid block device
    232 			 */
    233 			if (devvp->v_type != VBLK)
    234 				error = ENOTBLK;
    235 			else if (bdevsw_lookup(devvp->v_rdev) == NULL)
    236 				error = ENXIO;
    237 		} else {
    238 			/*
    239 			 * Be sure we're still naming the same device
    240 			 * used for our initial mount
    241 			 */
    242 			ump = VFSTOUFS(mp);
    243 			if (devvp != ump->um_devvp) {
    244 				if (devvp->v_rdev != ump->um_devvp->v_rdev)
    245 					error = EINVAL;
    246 				else {
    247 					vrele(devvp);
    248 					devvp = ump->um_devvp;
    249 					vref(devvp);
    250 				}
    251 			}
    252 		}
    253 	} else {
    254 		if (!update) {
    255 			/* New mounts must have a filename for the device */
    256 			return (EINVAL);
    257 		} else {
    258 			/* Use the extant mount */
    259 			ump = VFSTOUFS(mp);
    260 			devvp = ump->um_devvp;
    261 			vref(devvp);
    262 		}
    263 	}
    264 
    265 	/*
    266 	 * Mark the device and any existing vnodes as involved in
    267 	 * softdep processing.
    268 	 */
    269 	if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
    270 		devvp->v_uflag |= VU_SOFTDEP;
    271 		mutex_enter(&mntvnode_lock);
    272 		TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
    273 			if (vp->v_mount != mp || vismarker(vp))
    274 				continue;
    275 			vp->v_uflag |= VU_SOFTDEP;
    276 		}
    277 		mutex_exit(&mntvnode_lock);
    278 	}
    279 
    280 	/*
    281 	 * If mount by non-root, then verify that user has necessary
    282 	 * permissions on the device.
    283 	 */
    284 	if (error == 0 && kauth_authorize_generic(l->l_cred,
    285 	    KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    286 		accessmode = VREAD;
    287 		if (update ?
    288 		    (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
    289 		    (mp->mnt_flag & MNT_RDONLY) == 0)
    290 			accessmode |= VWRITE;
    291 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    292 		error = VOP_ACCESS(devvp, accessmode, l->l_cred);
    293 		VOP_UNLOCK(devvp, 0);
    294 	}
    295 
    296 	if (error) {
    297 		vrele(devvp);
    298 		return (error);
    299 	}
    300 
    301 	if (!update) {
    302 		int xflags;
    303 
    304 		if (mp->mnt_flag & MNT_RDONLY)
    305 			xflags = FREAD;
    306 		else
    307 			xflags = FREAD|FWRITE;
    308 		error = VOP_OPEN(devvp, xflags, FSCRED);
    309 		if (error)
    310 			goto fail;
    311 		error = ffs_mountfs(devvp, mp, l);
    312 		if (error) {
    313 			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    314 			(void)VOP_CLOSE(devvp, xflags, NOCRED);
    315 			VOP_UNLOCK(devvp, 0);
    316 			goto fail;
    317 		}
    318 
    319 		ump = VFSTOUFS(mp);
    320 		fs = ump->um_fs;
    321 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    322 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    323 			printf("%s fs uses soft updates, "
    324 			    "ignoring async mode\n",
    325 			    fs->fs_fsmnt);
    326 			mp->mnt_flag &= ~MNT_ASYNC;
    327 		}
    328 	} else {
    329 		/*
    330 		 * Update the mount.
    331 		 */
    332 
    333 		/*
    334 		 * The initial mount got a reference on this
    335 		 * device, so drop the one obtained via
    336 		 * namei(), above.
    337 		 */
    338 		vrele(devvp);
    339 
    340 		ump = VFSTOUFS(mp);
    341 		fs = ump->um_fs;
    342 		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
    343 			/*
    344 			 * Changing from r/w to r/o
    345 			 */
    346 			flags = WRITECLOSE;
    347 			if (mp->mnt_flag & MNT_FORCE)
    348 				flags |= FORCECLOSE;
    349 			if (mp->mnt_flag & MNT_SOFTDEP)
    350 				error = softdep_flushfiles(mp, flags, l);
    351 			else
    352 				error = ffs_flushfiles(mp, flags, l);
    353 			if (fs->fs_pendingblocks != 0 ||
    354 			    fs->fs_pendinginodes != 0) {
    355 				printf("%s: update error: blocks %" PRId64
    356 				       " files %d\n",
    357 				    fs->fs_fsmnt, fs->fs_pendingblocks,
    358 				    fs->fs_pendinginodes);
    359 				fs->fs_pendingblocks = 0;
    360 				fs->fs_pendinginodes = 0;
    361 			}
    362 			if (error == 0 &&
    363 			    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    364 			    fs->fs_clean & FS_WASCLEAN) {
    365 				if (mp->mnt_flag & MNT_SOFTDEP)
    366 					fs->fs_flags &= ~FS_DOSOFTDEP;
    367 				fs->fs_clean = FS_ISCLEAN;
    368 				(void) ffs_sbupdate(ump, MNT_WAIT);
    369 			}
    370 			if (error)
    371 				return (error);
    372 			fs->fs_ronly = 1;
    373 			fs->fs_fmod = 0;
    374 		}
    375 
    376 		/*
    377 		 * Flush soft dependencies if disabling it via an update
    378 		 * mount. This may leave some items to be processed,
    379 		 * so don't do this yet XXX.
    380 		 */
    381 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
    382 		    !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    383 #ifdef notyet
    384 			flags = WRITECLOSE;
    385 			if (mp->mnt_flag & MNT_FORCE)
    386 				flags |= FORCECLOSE;
    387 			error = softdep_flushfiles(mp, flags, l);
    388 			if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
    389 				fs->fs_flags &= ~FS_DOSOFTDEP;
    390 				(void) ffs_sbupdate(ump, MNT_WAIT);
    391 #elif defined(SOFTDEP)
    392 			mp->mnt_flag |= MNT_SOFTDEP;
    393 #endif
    394 		}
    395 
    396 		/*
    397 		 * When upgrading to a softdep mount, we must first flush
    398 		 * all vnodes. (not done yet -- see above)
    399 		 */
    400 		if (!(fs->fs_flags & FS_DOSOFTDEP) &&
    401 		    (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    402 #ifdef notyet
    403 			flags = WRITECLOSE;
    404 			if (mp->mnt_flag & MNT_FORCE)
    405 				flags |= FORCECLOSE;
    406 			error = ffs_flushfiles(mp, flags, l);
    407 #else
    408 			mp->mnt_flag &= ~MNT_SOFTDEP;
    409 #endif
    410 		}
    411 
    412 		if (mp->mnt_flag & MNT_RELOAD) {
    413 			error = ffs_reload(mp, l->l_cred, l);
    414 			if (error)
    415 				return (error);
    416 		}
    417 
    418 		if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
    419 			/*
    420 			 * Changing from read-only to read/write
    421 			 */
    422 			fs->fs_ronly = 0;
    423 			fs->fs_clean <<= 1;
    424 			fs->fs_fmod = 1;
    425 			if ((fs->fs_flags & FS_DOSOFTDEP)) {
    426 				error = softdep_mount(devvp, mp, fs,
    427 				    l->l_cred);
    428 				if (error)
    429 					return (error);
    430 			}
    431 			if (fs->fs_snapinum[0] != 0)
    432 				ffs_snapshot_mount(mp);
    433 		}
    434 		if (args->fspec == NULL)
    435 			return EINVAL;
    436 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    437 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    438 			printf("%s fs uses soft updates, ignoring async mode\n",
    439 			    fs->fs_fsmnt);
    440 			mp->mnt_flag &= ~MNT_ASYNC;
    441 		}
    442 	}
    443 
    444 	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
    445 	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
    446 	if (error == 0)
    447 		(void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
    448 		    sizeof(fs->fs_fsmnt));
    449 	if (mp->mnt_flag & MNT_SOFTDEP)
    450 		fs->fs_flags |= FS_DOSOFTDEP;
    451 	else
    452 		fs->fs_flags &= ~FS_DOSOFTDEP;
    453 	if (fs->fs_fmod != 0) {	/* XXX */
    454 		fs->fs_fmod = 0;
    455 		if (fs->fs_clean & FS_WASCLEAN)
    456 			fs->fs_time = time_second;
    457 		else {
    458 			printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
    459 			    mp->mnt_stat.f_mntfromname, fs->fs_clean);
    460 			printf("%s: lost blocks %" PRId64 " files %d\n",
    461 			    mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
    462 			    fs->fs_pendinginodes);
    463 		}
    464 		(void) ffs_cgupdate(ump, MNT_WAIT);
    465 	}
    466 	return (error);
    467 
    468 fail:
    469 	vrele(devvp);
    470 	return (error);
    471 }
    472 
    473 /*
    474  * Reload all incore data for a filesystem (used after running fsck on
    475  * the root filesystem and finding things to fix). The filesystem must
    476  * be mounted read-only.
    477  *
    478  * Things to do to update the mount:
    479  *	1) invalidate all cached meta-data.
    480  *	2) re-read superblock from disk.
    481  *	3) re-read summary information from disk.
    482  *	4) invalidate all inactive vnodes.
    483  *	5) invalidate all cached file data.
    484  *	6) re-read inode data for all active vnodes.
    485  */
    486 int
    487 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
    488 {
    489 	struct vnode *vp, *mvp, *devvp;
    490 	struct inode *ip;
    491 	void *space;
    492 	struct buf *bp;
    493 	struct fs *fs, *newfs;
    494 	struct partinfo dpart;
    495 	int i, blks, size, error;
    496 	int32_t *lp;
    497 	struct ufsmount *ump;
    498 	daddr_t sblockloc;
    499 
    500 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
    501 		return (EINVAL);
    502 
    503 	ump = VFSTOUFS(mp);
    504 	/*
    505 	 * Step 1: invalidate all cached meta-data.
    506 	 */
    507 	devvp = ump->um_devvp;
    508 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    509 	error = vinvalbuf(devvp, 0, cred, l, 0, 0);
    510 	VOP_UNLOCK(devvp, 0);
    511 	if (error)
    512 		panic("ffs_reload: dirty1");
    513 	/*
    514 	 * Step 2: re-read superblock from disk.
    515 	 */
    516 	fs = ump->um_fs;
    517 	if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
    518 		size = DEV_BSIZE;
    519 	else
    520 		size = dpart.disklab->d_secsize;
    521 	/* XXX we don't handle possibility that superblock moved. */
    522 	error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
    523 		      NOCRED, &bp);
    524 	if (error) {
    525 		brelse(bp, 0);
    526 		return (error);
    527 	}
    528 	newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
    529 	memcpy(newfs, bp->b_data, fs->fs_sbsize);
    530 #ifdef FFS_EI
    531 	if (ump->um_flags & UFS_NEEDSWAP) {
    532 		ffs_sb_swap((struct fs*)bp->b_data, newfs);
    533 		fs->fs_flags |= FS_SWAPPED;
    534 	} else
    535 #endif
    536 		fs->fs_flags &= ~FS_SWAPPED;
    537 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
    538 	     newfs->fs_magic != FS_UFS2_MAGIC)||
    539 	     newfs->fs_bsize > MAXBSIZE ||
    540 	     newfs->fs_bsize < sizeof(struct fs)) {
    541 		brelse(bp, 0);
    542 		free(newfs, M_UFSMNT);
    543 		return (EIO);		/* XXX needs translation */
    544 	}
    545 	/* Store off old fs_sblockloc for fs_oldfscompat_read. */
    546 	sblockloc = fs->fs_sblockloc;
    547 	/*
    548 	 * Copy pointer fields back into superblock before copying in	XXX
    549 	 * new superblock. These should really be in the ufsmount.	XXX
    550 	 * Note that important parameters (eg fs_ncg) are unchanged.
    551 	 */
    552 	newfs->fs_csp = fs->fs_csp;
    553 	newfs->fs_maxcluster = fs->fs_maxcluster;
    554 	newfs->fs_contigdirs = fs->fs_contigdirs;
    555 	newfs->fs_ronly = fs->fs_ronly;
    556 	newfs->fs_active = fs->fs_active;
    557 	memcpy(fs, newfs, (u_int)fs->fs_sbsize);
    558 	brelse(bp, 0);
    559 	free(newfs, M_UFSMNT);
    560 
    561 	/* Recheck for apple UFS filesystem */
    562 	ump->um_flags &= ~UFS_ISAPPLEUFS;
    563 	/* First check to see if this is tagged as an Apple UFS filesystem
    564 	 * in the disklabel
    565 	 */
    566 	if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
    567 		(dpart.part->p_fstype == FS_APPLEUFS)) {
    568 		ump->um_flags |= UFS_ISAPPLEUFS;
    569 	}
    570 #ifdef APPLE_UFS
    571 	else {
    572 		/* Manually look for an apple ufs label, and if a valid one
    573 		 * is found, then treat it like an Apple UFS filesystem anyway
    574 		 */
    575 		error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
    576 			APPLEUFS_LABEL_SIZE, cred, &bp);
    577 		if (error) {
    578 			brelse(bp, 0);
    579 			return (error);
    580 		}
    581 		error = ffs_appleufs_validate(fs->fs_fsmnt,
    582 			(struct appleufslabel *)bp->b_data,NULL);
    583 		if (error == 0)
    584 			ump->um_flags |= UFS_ISAPPLEUFS;
    585 		brelse(bp, 0);
    586 		bp = NULL;
    587 	}
    588 #else
    589 	if (ump->um_flags & UFS_ISAPPLEUFS)
    590 		return (EIO);
    591 #endif
    592 
    593 	if (UFS_MPISAPPLEUFS(ump)) {
    594 		/* see comment about NeXT below */
    595 		ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
    596 		ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
    597 		mp->mnt_iflag |= IMNT_DTYPE;
    598 	} else {
    599 		ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
    600 		ump->um_dirblksiz = DIRBLKSIZ;
    601 		if (ump->um_maxsymlinklen > 0)
    602 			mp->mnt_iflag |= IMNT_DTYPE;
    603 		else
    604 			mp->mnt_iflag &= ~IMNT_DTYPE;
    605 	}
    606 	ffs_oldfscompat_read(fs, ump, sblockloc);
    607 	mutex_enter(&ump->um_lock);
    608 	ump->um_maxfilesize = fs->fs_maxfilesize;
    609 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
    610 		fs->fs_pendingblocks = 0;
    611 		fs->fs_pendinginodes = 0;
    612 	}
    613 	mutex_exit(&ump->um_lock);
    614 
    615 	ffs_statvfs(mp, &mp->mnt_stat);
    616 	/*
    617 	 * Step 3: re-read summary information from disk.
    618 	 */
    619 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
    620 	space = fs->fs_csp;
    621 	for (i = 0; i < blks; i += fs->fs_frag) {
    622 		size = fs->fs_bsize;
    623 		if (i + fs->fs_frag > blks)
    624 			size = (blks - i) * fs->fs_fsize;
    625 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    626 			      NOCRED, &bp);
    627 		if (error) {
    628 			brelse(bp, 0);
    629 			return (error);
    630 		}
    631 #ifdef FFS_EI
    632 		if (UFS_FSNEEDSWAP(fs))
    633 			ffs_csum_swap((struct csum *)bp->b_data,
    634 			    (struct csum *)space, size);
    635 		else
    636 #endif
    637 			memcpy(space, bp->b_data, (size_t)size);
    638 		space = (char *)space + size;
    639 		brelse(bp, 0);
    640 	}
    641 	if ((fs->fs_flags & FS_DOSOFTDEP))
    642 		softdep_mount(devvp, mp, fs, cred);
    643 	if (fs->fs_snapinum[0] != 0)
    644 		ffs_snapshot_mount(mp);
    645 	/*
    646 	 * We no longer know anything about clusters per cylinder group.
    647 	 */
    648 	if (fs->fs_contigsumsize > 0) {
    649 		lp = fs->fs_maxcluster;
    650 		for (i = 0; i < fs->fs_ncg; i++)
    651 			*lp++ = fs->fs_contigsumsize;
    652 	}
    653 
    654 	/* Allocate a marker vnode. */
    655 	if ((mvp = vnalloc(mp)) == NULL)
    656 		return ENOMEM;
    657 	/*
    658 	 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
    659 	 * and vclean() can be called indirectly
    660 	 */
    661 	mutex_enter(&mntvnode_lock);
    662  loop:
    663 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    664 		vmark(mvp, vp);
    665 		if (vp->v_mount != mp || vismarker(vp))
    666 			continue;
    667 		/*
    668 		 * Step 4: invalidate all inactive vnodes.
    669 		 */
    670 		if (vrecycle(vp, &mntvnode_lock, l)) {
    671 			mutex_enter(&mntvnode_lock);
    672 			(void)vunmark(mvp);
    673 			goto loop;
    674 		}
    675 		/*
    676 		 * Step 5: invalidate all cached file data.
    677 		 */
    678 		mutex_enter(&vp->v_interlock);
    679 		mutex_exit(&mntvnode_lock);
    680 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    681 			(void)vunmark(mvp);
    682 			goto loop;
    683 		}
    684 		if (vinvalbuf(vp, 0, cred, l, 0, 0))
    685 			panic("ffs_reload: dirty2");
    686 		/*
    687 		 * Step 6: re-read inode data for all active vnodes.
    688 		 */
    689 		ip = VTOI(vp);
    690 		error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
    691 			      (int)fs->fs_bsize, NOCRED, &bp);
    692 		if (error) {
    693 			brelse(bp, 0);
    694 			vput(vp);
    695 			(void)vunmark(mvp);
    696 			break;
    697 		}
    698 		ffs_load_inode(bp, ip, fs, ip->i_number);
    699 		ip->i_ffs_effnlink = ip->i_nlink;
    700 		brelse(bp, 0);
    701 		vput(vp);
    702 		mutex_enter(&mntvnode_lock);
    703 	}
    704 	mutex_exit(&mntvnode_lock);
    705 	vnfree(mvp);
    706 	return (error);
    707 }
    708 
    709 /*
    710  * Possible superblock locations ordered from most to least likely.
    711  */
    712 static const int sblock_try[] = SBLOCKSEARCH;
    713 
    714 /*
    715  * Common code for mount and mountroot
    716  */
    717 int
    718 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
    719 {
    720 	struct ufsmount *ump;
    721 	struct buf *bp;
    722 	struct fs *fs;
    723 	dev_t dev;
    724 	struct partinfo dpart;
    725 	void *space;
    726 	daddr_t sblockloc, fsblockloc;
    727 	int blks, fstype;
    728 	int error, i, size, ronly, bset = 0;
    729 #ifdef FFS_EI
    730 	int needswap = 0;		/* keep gcc happy */
    731 #endif
    732 	int32_t *lp;
    733 	kauth_cred_t cred;
    734 	u_int32_t sbsize = 8192;	/* keep gcc happy*/
    735 
    736 	dev = devvp->v_rdev;
    737 	cred = l ? l->l_cred : NOCRED;
    738 
    739 	/* Flush out any old buffers remaining from a previous use. */
    740 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    741 	error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
    742 	VOP_UNLOCK(devvp, 0);
    743 	if (error)
    744 		return (error);
    745 
    746 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
    747 	if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
    748 		size = DEV_BSIZE;
    749 	else
    750 		size = dpart.disklab->d_secsize;
    751 
    752 	bp = NULL;
    753 	ump = NULL;
    754 	fs = NULL;
    755 	sblockloc = 0;
    756 	fstype = 0;
    757 
    758 	error = fstrans_mount(mp);
    759 	if (error)
    760 		return error;
    761 
    762 	/*
    763 	 * Try reading the superblock in each of its possible locations.
    764 	 */
    765 	for (i = 0; ; i++) {
    766 		if (bp != NULL) {
    767 			brelse(bp, BC_NOCACHE);
    768 			bp = NULL;
    769 		}
    770 		if (sblock_try[i] == -1) {
    771 			error = EINVAL;
    772 			fs = NULL;
    773 			goto out;
    774 		}
    775 		error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
    776 			      &bp);
    777 		if (error) {
    778 			fs = NULL;
    779 			goto out;
    780 		}
    781 		fs = (struct fs*)bp->b_data;
    782 		fsblockloc = sblockloc = sblock_try[i];
    783 		if (fs->fs_magic == FS_UFS1_MAGIC) {
    784 			sbsize = fs->fs_sbsize;
    785 			fstype = UFS1;
    786 #ifdef FFS_EI
    787 			needswap = 0;
    788 		} else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
    789 			sbsize = bswap32(fs->fs_sbsize);
    790 			fstype = UFS1;
    791 			needswap = 1;
    792 #endif
    793 		} else if (fs->fs_magic == FS_UFS2_MAGIC) {
    794 			sbsize = fs->fs_sbsize;
    795 			fstype = UFS2;
    796 #ifdef FFS_EI
    797 			needswap = 0;
    798 		} else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
    799 			sbsize = bswap32(fs->fs_sbsize);
    800 			fstype = UFS2;
    801 			needswap = 1;
    802 #endif
    803 		} else
    804 			continue;
    805 
    806 
    807 		/* fs->fs_sblockloc isn't defined for old filesystems */
    808 		if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
    809 			if (sblockloc == SBLOCK_UFS2)
    810 				/*
    811 				 * This is likely to be the first alternate
    812 				 * in a filesystem with 64k blocks.
    813 				 * Don't use it.
    814 				 */
    815 				continue;
    816 			fsblockloc = sblockloc;
    817 		} else {
    818 			fsblockloc = fs->fs_sblockloc;
    819 #ifdef FFS_EI
    820 			if (needswap)
    821 				fsblockloc = bswap64(fsblockloc);
    822 #endif
    823 		}
    824 
    825 		/* Check we haven't found an alternate superblock */
    826 		if (fsblockloc != sblockloc)
    827 			continue;
    828 
    829 		/* Validate size of superblock */
    830 		if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
    831 			continue;
    832 
    833 		/* Ok seems to be a good superblock */
    834 		break;
    835 	}
    836 
    837 	fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
    838 	memcpy(fs, bp->b_data, sbsize);
    839 
    840 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
    841 	memset(ump, 0, sizeof *ump);
    842 	mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
    843 	ump->um_fs = fs;
    844 	ump->um_ops = &ffs_ufsops;
    845 
    846 #ifdef FFS_EI
    847 	if (needswap) {
    848 		ffs_sb_swap((struct fs*)bp->b_data, fs);
    849 		fs->fs_flags |= FS_SWAPPED;
    850 	} else
    851 #endif
    852 		fs->fs_flags &= ~FS_SWAPPED;
    853 
    854 	ffs_oldfscompat_read(fs, ump, sblockloc);
    855 	ump->um_maxfilesize = fs->fs_maxfilesize;
    856 
    857 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
    858 		fs->fs_pendingblocks = 0;
    859 		fs->fs_pendinginodes = 0;
    860 	}
    861 
    862 	ump->um_fstype = fstype;
    863 	if (fs->fs_sbsize < SBLOCKSIZE)
    864 		brelse(bp, BC_INVAL);
    865 	else
    866 		brelse(bp, 0);
    867 	bp = NULL;
    868 
    869 	/* First check to see if this is tagged as an Apple UFS filesystem
    870 	 * in the disklabel
    871 	 */
    872 	if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
    873 		(dpart.part->p_fstype == FS_APPLEUFS)) {
    874 		ump->um_flags |= UFS_ISAPPLEUFS;
    875 	}
    876 #ifdef APPLE_UFS
    877 	else {
    878 		/* Manually look for an apple ufs label, and if a valid one
    879 		 * is found, then treat it like an Apple UFS filesystem anyway
    880 		 */
    881 		error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
    882 			APPLEUFS_LABEL_SIZE, cred, &bp);
    883 		if (error)
    884 			goto out;
    885 		error = ffs_appleufs_validate(fs->fs_fsmnt,
    886 			(struct appleufslabel *)bp->b_data,NULL);
    887 		if (error == 0) {
    888 			ump->um_flags |= UFS_ISAPPLEUFS;
    889 		}
    890 		brelse(bp, 0);
    891 		bp = NULL;
    892 	}
    893 #else
    894 	if (ump->um_flags & UFS_ISAPPLEUFS) {
    895 		error = EINVAL;
    896 		goto out;
    897 	}
    898 #endif
    899 
    900 	/*
    901 	 * verify that we can access the last block in the fs
    902 	 * if we're mounting read/write.
    903 	 */
    904 
    905 	if (!ronly) {
    906 		error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
    907 		    cred, &bp);
    908 		if (bp->b_bcount != fs->fs_fsize)
    909 			error = EINVAL;
    910 		if (error) {
    911 			bset = BC_INVAL;
    912 			goto out;
    913 		}
    914 		brelse(bp, BC_INVAL);
    915 		bp = NULL;
    916 	}
    917 
    918 	fs->fs_ronly = ronly;
    919 	if (ronly == 0) {
    920 		fs->fs_clean <<= 1;
    921 		fs->fs_fmod = 1;
    922 	}
    923 	size = fs->fs_cssize;
    924 	blks = howmany(size, fs->fs_fsize);
    925 	if (fs->fs_contigsumsize > 0)
    926 		size += fs->fs_ncg * sizeof(int32_t);
    927 	size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    928 	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
    929 	fs->fs_csp = space;
    930 	for (i = 0; i < blks; i += fs->fs_frag) {
    931 		size = fs->fs_bsize;
    932 		if (i + fs->fs_frag > blks)
    933 			size = (blks - i) * fs->fs_fsize;
    934 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    935 			      cred, &bp);
    936 		if (error) {
    937 			free(fs->fs_csp, M_UFSMNT);
    938 			goto out;
    939 		}
    940 #ifdef FFS_EI
    941 		if (needswap)
    942 			ffs_csum_swap((struct csum *)bp->b_data,
    943 				(struct csum *)space, size);
    944 		else
    945 #endif
    946 			memcpy(space, bp->b_data, (u_int)size);
    947 
    948 		space = (char *)space + size;
    949 		brelse(bp, 0);
    950 		bp = NULL;
    951 	}
    952 	if (fs->fs_contigsumsize > 0) {
    953 		fs->fs_maxcluster = lp = space;
    954 		for (i = 0; i < fs->fs_ncg; i++)
    955 			*lp++ = fs->fs_contigsumsize;
    956 		space = lp;
    957 	}
    958 	size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    959 	fs->fs_contigdirs = space;
    960 	space = (char *)space + size;
    961 	memset(fs->fs_contigdirs, 0, size);
    962 		/* Compatibility for old filesystems - XXX */
    963 	if (fs->fs_avgfilesize <= 0)
    964 		fs->fs_avgfilesize = AVFILESIZ;
    965 	if (fs->fs_avgfpdir <= 0)
    966 		fs->fs_avgfpdir = AFPDIR;
    967 	fs->fs_active = NULL;
    968 	mp->mnt_data = ump;
    969 	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
    970 	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
    971 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
    972 	mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
    973 	if (UFS_MPISAPPLEUFS(ump)) {
    974 		/* NeXT used to keep short symlinks in the inode even
    975 		 * when using FS_42INODEFMT.  In that case fs->fs_maxsymlinklen
    976 		 * is probably -1, but we still need to be able to identify
    977 		 * short symlinks.
    978 		 */
    979 		ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
    980 		ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
    981 		mp->mnt_iflag |= IMNT_DTYPE;
    982 	} else {
    983 		ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
    984 		ump->um_dirblksiz = DIRBLKSIZ;
    985 		if (ump->um_maxsymlinklen > 0)
    986 			mp->mnt_iflag |= IMNT_DTYPE;
    987 		else
    988 			mp->mnt_iflag &= ~IMNT_DTYPE;
    989 	}
    990 	mp->mnt_fs_bshift = fs->fs_bshift;
    991 	mp->mnt_dev_bshift = DEV_BSHIFT;	/* XXX */
    992 	mp->mnt_flag |= MNT_LOCAL;
    993 	mp->mnt_iflag |= IMNT_MPSAFE;
    994 #ifdef FFS_EI
    995 	if (needswap)
    996 		ump->um_flags |= UFS_NEEDSWAP;
    997 #endif
    998 	ump->um_mountp = mp;
    999 	ump->um_dev = dev;
   1000 	ump->um_devvp = devvp;
   1001 	ump->um_nindir = fs->fs_nindir;
   1002 	ump->um_lognindir = ffs(fs->fs_nindir) - 1;
   1003 	ump->um_bptrtodb = fs->fs_fsbtodb;
   1004 	ump->um_seqinc = fs->fs_frag;
   1005 	for (i = 0; i < MAXQUOTAS; i++)
   1006 		ump->um_quotas[i] = NULLVP;
   1007 	devvp->v_specmountpoint = mp;
   1008 	if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
   1009 		error = softdep_mount(devvp, mp, fs, cred);
   1010 		if (error) {
   1011 			free(fs->fs_csp, M_UFSMNT);
   1012 			goto out;
   1013 		}
   1014 	}
   1015 	if (ronly == 0 && fs->fs_snapinum[0] != 0)
   1016 		ffs_snapshot_mount(mp);
   1017 #ifdef UFS_EXTATTR
   1018 	/*
   1019 	 * Initialize file-backed extended attributes on UFS1 file
   1020 	 * systems.
   1021 	 */
   1022 	if (ump->um_fstype == UFS1) {
   1023 		ufs_extattr_uepm_init(&ump->um_extattr);
   1024 #ifdef UFS_EXTATTR_AUTOSTART
   1025 		/*
   1026 		 * XXX Just ignore errors.  Not clear that we should
   1027 		 * XXX fail the mount in this case.
   1028 		 */
   1029 		(void) ufs_extattr_autostart(mp, l);
   1030 #endif
   1031 	}
   1032 #endif /* UFS_EXTATTR */
   1033 	return (0);
   1034 out:
   1035 	fstrans_unmount(mp);
   1036 	if (fs)
   1037 		free(fs, M_UFSMNT);
   1038 	devvp->v_specmountpoint = NULL;
   1039 	if (bp)
   1040 		brelse(bp, bset);
   1041 	if (ump) {
   1042 		if (ump->um_oldfscompat)
   1043 			free(ump->um_oldfscompat, M_UFSMNT);
   1044 		mutex_destroy(&ump->um_lock);
   1045 		free(ump, M_UFSMNT);
   1046 		mp->mnt_data = NULL;
   1047 	}
   1048 	return (error);
   1049 }
   1050 
   1051 /*
   1052  * Sanity checks for loading old filesystem superblocks.
   1053  * See ffs_oldfscompat_write below for unwound actions.
   1054  *
   1055  * XXX - Parts get retired eventually.
   1056  * Unfortunately new bits get added.
   1057  */
   1058 static void
   1059 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
   1060 {
   1061 	off_t maxfilesize;
   1062 	int32_t *extrasave;
   1063 
   1064 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1065 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1066 		return;
   1067 
   1068 	if (!ump->um_oldfscompat)
   1069 		ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
   1070 		    M_UFSMNT, M_WAITOK);
   1071 
   1072 	memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
   1073 	extrasave = ump->um_oldfscompat;
   1074 	extrasave += 512/sizeof(int32_t);
   1075 	extrasave[0] = fs->fs_old_npsect;
   1076 	extrasave[1] = fs->fs_old_interleave;
   1077 	extrasave[2] = fs->fs_old_trackskew;
   1078 
   1079 	/* These fields will be overwritten by their
   1080 	 * original values in fs_oldfscompat_write, so it is harmless
   1081 	 * to modify them here.
   1082 	 */
   1083 	fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
   1084 	fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
   1085 	fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
   1086 	fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
   1087 
   1088 	fs->fs_maxbsize = fs->fs_bsize;
   1089 	fs->fs_time = fs->fs_old_time;
   1090 	fs->fs_size = fs->fs_old_size;
   1091 	fs->fs_dsize = fs->fs_old_dsize;
   1092 	fs->fs_csaddr = fs->fs_old_csaddr;
   1093 	fs->fs_sblockloc = sblockloc;
   1094 
   1095         fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
   1096 
   1097 	if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
   1098 		fs->fs_old_nrpos = 8;
   1099 		fs->fs_old_npsect = fs->fs_old_nsect;
   1100 		fs->fs_old_interleave = 1;
   1101 		fs->fs_old_trackskew = 0;
   1102 	}
   1103 
   1104 	if (fs->fs_old_inodefmt < FS_44INODEFMT) {
   1105 		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
   1106 		fs->fs_qbmask = ~fs->fs_bmask;
   1107 		fs->fs_qfmask = ~fs->fs_fmask;
   1108 	}
   1109 
   1110 	maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
   1111 	if (fs->fs_maxfilesize > maxfilesize)
   1112 		fs->fs_maxfilesize = maxfilesize;
   1113 
   1114 	/* Compatibility for old filesystems */
   1115 	if (fs->fs_avgfilesize <= 0)
   1116 		fs->fs_avgfilesize = AVFILESIZ;
   1117 	if (fs->fs_avgfpdir <= 0)
   1118 		fs->fs_avgfpdir = AFPDIR;
   1119 
   1120 #if 0
   1121 	if (bigcgs) {
   1122 		fs->fs_save_cgsize = fs->fs_cgsize;
   1123 		fs->fs_cgsize = fs->fs_bsize;
   1124 	}
   1125 #endif
   1126 }
   1127 
   1128 /*
   1129  * Unwinding superblock updates for old filesystems.
   1130  * See ffs_oldfscompat_read above for details.
   1131  *
   1132  * XXX - Parts get retired eventually.
   1133  * Unfortunately new bits get added.
   1134  */
   1135 static void
   1136 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
   1137 {
   1138 	int32_t *extrasave;
   1139 
   1140 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1141 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1142 		return;
   1143 
   1144 	fs->fs_old_time = fs->fs_time;
   1145 	fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
   1146 	fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
   1147 	fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
   1148 	fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
   1149 	fs->fs_old_flags = fs->fs_flags;
   1150 
   1151 #if 0
   1152 	if (bigcgs) {
   1153 		fs->fs_cgsize = fs->fs_save_cgsize;
   1154 	}
   1155 #endif
   1156 
   1157 	memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
   1158 	extrasave = ump->um_oldfscompat;
   1159 	extrasave += 512/sizeof(int32_t);
   1160 	fs->fs_old_npsect = extrasave[0];
   1161 	fs->fs_old_interleave = extrasave[1];
   1162 	fs->fs_old_trackskew = extrasave[2];
   1163 
   1164 }
   1165 
   1166 /*
   1167  * unmount system call
   1168  */
   1169 int
   1170 ffs_unmount(struct mount *mp, int mntflags)
   1171 {
   1172 	struct lwp *l = curlwp;
   1173 	struct ufsmount *ump = VFSTOUFS(mp);
   1174 	struct fs *fs = ump->um_fs;
   1175 	int error, flags, penderr;
   1176 
   1177 	penderr = 0;
   1178 	flags = 0;
   1179 	if (mntflags & MNT_FORCE)
   1180 		flags |= FORCECLOSE;
   1181 #ifdef UFS_EXTATTR
   1182 	if (ump->um_fstype == UFS1) {
   1183 		ufs_extattr_stop(mp, l);
   1184 		ufs_extattr_uepm_destroy(&ump->um_extattr);
   1185 	}
   1186 #endif /* UFS_EXTATTR */
   1187 	if (mp->mnt_flag & MNT_SOFTDEP) {
   1188 		if ((error = softdep_flushfiles(mp, flags, l)) != 0)
   1189 			return (error);
   1190 	} else {
   1191 		if ((error = ffs_flushfiles(mp, flags, l)) != 0)
   1192 			return (error);
   1193 	}
   1194 	mutex_enter(&ump->um_lock);
   1195 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
   1196 		printf("%s: unmount pending error: blocks %" PRId64
   1197 		       " files %d\n",
   1198 		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
   1199 		fs->fs_pendingblocks = 0;
   1200 		fs->fs_pendinginodes = 0;
   1201 		penderr = 1;
   1202 	}
   1203 	mutex_exit(&ump->um_lock);
   1204 	if (fs->fs_ronly == 0 &&
   1205 	    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
   1206 	    fs->fs_clean & FS_WASCLEAN) {
   1207 		/*
   1208 		 * XXXX don't mark fs clean in the case of softdep
   1209 		 * pending block errors, until they are fixed.
   1210 		 */
   1211 		if (penderr == 0) {
   1212 			if (mp->mnt_flag & MNT_SOFTDEP)
   1213 				fs->fs_flags &= ~FS_DOSOFTDEP;
   1214 			fs->fs_clean = FS_ISCLEAN;
   1215 		}
   1216 		fs->fs_fmod = 0;
   1217 		(void) ffs_sbupdate(ump, MNT_WAIT);
   1218 	}
   1219 	if (ump->um_devvp->v_type != VBAD)
   1220 		ump->um_devvp->v_specmountpoint = NULL;
   1221 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1222 	(void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
   1223 		NOCRED);
   1224 	vput(ump->um_devvp);
   1225 	free(fs->fs_csp, M_UFSMNT);
   1226 	free(fs, M_UFSMNT);
   1227 	if (ump->um_oldfscompat != NULL)
   1228 		free(ump->um_oldfscompat, M_UFSMNT);
   1229 	softdep_unmount(mp);
   1230 	mutex_destroy(&ump->um_lock);
   1231 	free(ump, M_UFSMNT);
   1232 	mp->mnt_data = NULL;
   1233 	mp->mnt_flag &= ~MNT_LOCAL;
   1234 	fstrans_unmount(mp);
   1235 	return (0);
   1236 }
   1237 
   1238 /*
   1239  * Flush out all the files in a filesystem.
   1240  */
   1241 int
   1242 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
   1243 {
   1244 	extern int doforce;
   1245 	struct ufsmount *ump;
   1246 	int error;
   1247 
   1248 	if (!doforce)
   1249 		flags &= ~FORCECLOSE;
   1250 	ump = VFSTOUFS(mp);
   1251 #ifdef QUOTA
   1252 	if (mp->mnt_flag & MNT_QUOTA) {
   1253 		int i;
   1254 		if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
   1255 			return (error);
   1256 		for (i = 0; i < MAXQUOTAS; i++) {
   1257 			if (ump->um_quotas[i] == NULLVP)
   1258 				continue;
   1259 			quotaoff(l, mp, i);
   1260 		}
   1261 		/*
   1262 		 * Here we fall through to vflush again to ensure
   1263 		 * that we have gotten rid of all the system vnodes.
   1264 		 */
   1265 	}
   1266 #endif
   1267 	if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
   1268 		return (error);
   1269 	ffs_snapshot_unmount(mp);
   1270 	/*
   1271 	 * Flush all the files.
   1272 	 */
   1273 	error = vflush(mp, NULLVP, flags);
   1274 	if (error)
   1275 		return (error);
   1276 	/*
   1277 	 * Flush filesystem metadata.
   1278 	 */
   1279 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1280 	error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
   1281 	VOP_UNLOCK(ump->um_devvp, 0);
   1282 	return (error);
   1283 }
   1284 
   1285 /*
   1286  * Get file system statistics.
   1287  */
   1288 int
   1289 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
   1290 {
   1291 	struct ufsmount *ump;
   1292 	struct fs *fs;
   1293 
   1294 	ump = VFSTOUFS(mp);
   1295 	fs = ump->um_fs;
   1296 	mutex_enter(&ump->um_lock);
   1297 	sbp->f_bsize = fs->fs_bsize;
   1298 	sbp->f_frsize = fs->fs_fsize;
   1299 	sbp->f_iosize = fs->fs_bsize;
   1300 	sbp->f_blocks = fs->fs_dsize;
   1301 	sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
   1302 		fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
   1303 	sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
   1304 	    fs->fs_minfree) / (u_int64_t) 100;
   1305 	if (sbp->f_bfree > sbp->f_bresvd)
   1306 		sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
   1307 	else
   1308 		sbp->f_bavail = 0;
   1309 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
   1310 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
   1311 	sbp->f_favail = sbp->f_ffree;
   1312 	sbp->f_fresvd = 0;
   1313 	mutex_exit(&ump->um_lock);
   1314 	copy_statvfs_info(sbp, mp);
   1315 
   1316 	return (0);
   1317 }
   1318 
   1319 /*
   1320  * Go through the disk queues to initiate sandbagged IO;
   1321  * go through the inodes to write those that have been modified;
   1322  * initiate the writing of the super block if it has been modified.
   1323  *
   1324  * Note: we are always called with the filesystem marked `MPBUSY'.
   1325  */
   1326 int
   1327 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
   1328 {
   1329 	struct lwp *l = curlwp;
   1330 	struct vnode *vp, *mvp;
   1331 	struct inode *ip;
   1332 	struct ufsmount *ump = VFSTOUFS(mp);
   1333 	struct fs *fs;
   1334 	int error, count, allerror = 0;
   1335 
   1336 	fs = ump->um_fs;
   1337 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
   1338 		printf("fs = %s\n", fs->fs_fsmnt);
   1339 		panic("update: rofs mod");
   1340 	}
   1341 
   1342 	/* Allocate a marker vnode. */
   1343 	if ((mvp = vnalloc(mp)) == NULL)
   1344 		return (ENOMEM);
   1345 
   1346 	fstrans_start(mp, FSTRANS_SHARED);
   1347 	/*
   1348 	 * Write back each (modified) inode.
   1349 	 */
   1350 	mutex_enter(&mntvnode_lock);
   1351 loop:
   1352 	/*
   1353 	 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
   1354 	 * and vclean() can be called indirectly
   1355 	 */
   1356 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
   1357 		vmark(mvp, vp);
   1358 		/*
   1359 		 * If the vnode that we are about to sync is no longer
   1360 		 * associated with this mount point, start over.
   1361 		 */
   1362 		if (vp->v_mount != mp || vismarker(vp))
   1363 			continue;
   1364 		mutex_enter(&vp->v_interlock);
   1365 		ip = VTOI(vp);
   1366 		if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 ||
   1367 		    vp->v_type == VNON || ((ip->i_flag &
   1368 		    (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
   1369 		    LIST_EMPTY(&vp->v_dirtyblkhd) &&
   1370 		    UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
   1371 		{
   1372 			mutex_exit(&vp->v_interlock);
   1373 			continue;
   1374 		}
   1375 		if (vp->v_type == VBLK &&
   1376 		    fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
   1377 			mutex_exit(&vp->v_interlock);
   1378 			continue;
   1379 		}
   1380 		mutex_exit(&mntvnode_lock);
   1381 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
   1382 		if (error) {
   1383 			mutex_enter(&mntvnode_lock);
   1384 			if (error == ENOENT) {
   1385 				(void)vunmark(mvp);
   1386 				goto loop;
   1387 			}
   1388 			continue;
   1389 		}
   1390 		if (vp->v_type == VREG && waitfor == MNT_LAZY)
   1391 			error = ffs_update(vp, NULL, NULL, 0);
   1392 		else
   1393 			error = VOP_FSYNC(vp, cred,
   1394 			    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0);
   1395 		if (error)
   1396 			allerror = error;
   1397 		vput(vp);
   1398 		mutex_enter(&mntvnode_lock);
   1399 	}
   1400 	mutex_exit(&mntvnode_lock);
   1401 	/*
   1402 	 * Force stale file system control information to be flushed.
   1403 	 */
   1404 	if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
   1405 		if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
   1406 			allerror = error;
   1407 		/* Flushed work items may create new vnodes to clean */
   1408 		if (allerror == 0 && count) {
   1409 			mutex_enter(&mntvnode_lock);
   1410 			goto loop;
   1411 		}
   1412 	}
   1413 	if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
   1414 	    !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
   1415 		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1416 		if ((error = VOP_FSYNC(ump->um_devvp, cred,
   1417 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0)
   1418 			allerror = error;
   1419 		VOP_UNLOCK(ump->um_devvp, 0);
   1420 		if (allerror == 0 && waitfor == MNT_WAIT) {
   1421 			mutex_enter(&mntvnode_lock);
   1422 			goto loop;
   1423 		}
   1424 	}
   1425 #ifdef QUOTA
   1426 	qsync(mp);
   1427 #endif
   1428 	/*
   1429 	 * Write back modified superblock.
   1430 	 */
   1431 	if (fs->fs_fmod != 0) {
   1432 		fs->fs_fmod = 0;
   1433 		fs->fs_time = time_second;
   1434 		if ((error = ffs_cgupdate(ump, waitfor)))
   1435 			allerror = error;
   1436 	}
   1437 	fstrans_done(mp);
   1438 	vnfree(mvp);
   1439 	return (allerror);
   1440 }
   1441 
   1442 /*
   1443  * Look up a FFS dinode number to find its incore vnode, otherwise read it
   1444  * in from disk.  If it is in core, wait for the lock bit to clear, then
   1445  * return the inode locked.  Detection and handling of mount points must be
   1446  * done by the calling routine.
   1447  */
   1448 int
   1449 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
   1450 {
   1451 	struct fs *fs;
   1452 	struct inode *ip;
   1453 	struct ufsmount *ump;
   1454 	struct buf *bp;
   1455 	struct vnode *vp;
   1456 	dev_t dev;
   1457 	int error;
   1458 
   1459 	ump = VFSTOUFS(mp);
   1460 	dev = ump->um_dev;
   1461 
   1462  retry:
   1463 	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
   1464 		return (0);
   1465 
   1466 	/* Allocate a new vnode/inode. */
   1467 	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
   1468 		*vpp = NULL;
   1469 		return (error);
   1470 	}
   1471 	ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
   1472 
   1473 	/*
   1474 	 * If someone beat us to it, put back the freshly allocated
   1475 	 * vnode/inode pair and retry.
   1476 	 */
   1477 	mutex_enter(&ufs_hashlock);
   1478 	if (ufs_ihashget(dev, ino, 0) != NULL) {
   1479 		mutex_exit(&ufs_hashlock);
   1480 		ungetnewvnode(vp);
   1481 		pool_cache_put(ffs_inode_cache, ip);
   1482 		goto retry;
   1483 	}
   1484 
   1485 	vp->v_vflag |= VV_LOCKSWORK;
   1486 	if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
   1487 		vp->v_uflag |= VU_SOFTDEP;
   1488 
   1489 	/*
   1490 	 * XXX MFS ends up here, too, to allocate an inode.  Should we
   1491 	 * XXX create another pool for MFS inodes?
   1492 	 */
   1493 
   1494 	memset(ip, 0, sizeof(struct inode));
   1495 	vp->v_data = ip;
   1496 	ip->i_vnode = vp;
   1497 	ip->i_ump = ump;
   1498 	ip->i_fs = fs = ump->um_fs;
   1499 	ip->i_dev = dev;
   1500 	ip->i_number = ino;
   1501 	LIST_INIT(&ip->i_pcbufhd);
   1502 #ifdef QUOTA
   1503 	ufsquota_init(ip);
   1504 #endif
   1505 
   1506 	/*
   1507 	 * Initialize genfs node, we might proceed to destroy it in
   1508 	 * error branches.
   1509 	 */
   1510 	genfs_node_init(vp, &ffs_genfsops);
   1511 
   1512 	/*
   1513 	 * Put it onto its hash chain and lock it so that other requests for
   1514 	 * this inode will block if they arrive while we are sleeping waiting
   1515 	 * for old data structures to be purged or for the contents of the
   1516 	 * disk portion of this inode to be read.
   1517 	 */
   1518 
   1519 	ufs_ihashins(ip);
   1520 	mutex_exit(&ufs_hashlock);
   1521 
   1522 	/* Read in the disk contents for the inode, copy into the inode. */
   1523 	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
   1524 		      (int)fs->fs_bsize, NOCRED, &bp);
   1525 	if (error) {
   1526 
   1527 		/*
   1528 		 * The inode does not contain anything useful, so it would
   1529 		 * be misleading to leave it on its hash chain. With mode
   1530 		 * still zero, it will be unlinked and returned to the free
   1531 		 * list by vput().
   1532 		 */
   1533 
   1534 		vput(vp);
   1535 		brelse(bp, 0);
   1536 		*vpp = NULL;
   1537 		return (error);
   1538 	}
   1539 	if (ip->i_ump->um_fstype == UFS1)
   1540 		ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
   1541 		    PR_WAITOK);
   1542 	else
   1543 		ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
   1544 		    PR_WAITOK);
   1545 	ffs_load_inode(bp, ip, fs, ino);
   1546 	if (DOINGSOFTDEP(vp))
   1547 		softdep_load_inodeblock(ip);
   1548 	else
   1549 		ip->i_ffs_effnlink = ip->i_nlink;
   1550 	brelse(bp, 0);
   1551 
   1552 	/*
   1553 	 * Initialize the vnode from the inode, check for aliases.
   1554 	 * Note that the underlying vnode may have changed.
   1555 	 */
   1556 
   1557 	ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
   1558 
   1559 	/*
   1560 	 * Finish inode initialization now that aliasing has been resolved.
   1561 	 */
   1562 
   1563 	ip->i_devvp = ump->um_devvp;
   1564 	VREF(ip->i_devvp);
   1565 
   1566 	/*
   1567 	 * Ensure that uid and gid are correct. This is a temporary
   1568 	 * fix until fsck has been changed to do the update.
   1569 	 */
   1570 
   1571 	if (fs->fs_old_inodefmt < FS_44INODEFMT) {		/* XXX */
   1572 		ip->i_uid = ip->i_ffs1_ouid;			/* XXX */
   1573 		ip->i_gid = ip->i_ffs1_ogid;			/* XXX */
   1574 	}							/* XXX */
   1575 	uvm_vnp_setsize(vp, ip->i_size);
   1576 	*vpp = vp;
   1577 	return (0);
   1578 }
   1579 
   1580 /*
   1581  * File handle to vnode
   1582  *
   1583  * Have to be really careful about stale file handles:
   1584  * - check that the inode number is valid
   1585  * - call ffs_vget() to get the locked inode
   1586  * - check for an unallocated inode (i_mode == 0)
   1587  * - check that the given client host has export rights and return
   1588  *   those rights via. exflagsp and credanonp
   1589  */
   1590 int
   1591 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
   1592 {
   1593 	struct ufid ufh;
   1594 	struct fs *fs;
   1595 
   1596 	if (fhp->fid_len != sizeof(struct ufid))
   1597 		return EINVAL;
   1598 
   1599 	memcpy(&ufh, fhp, sizeof(ufh));
   1600 	fs = VFSTOUFS(mp)->um_fs;
   1601 	if (ufh.ufid_ino < ROOTINO ||
   1602 	    ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
   1603 		return (ESTALE);
   1604 	return (ufs_fhtovp(mp, &ufh, vpp));
   1605 }
   1606 
   1607 /*
   1608  * Vnode pointer to File handle
   1609  */
   1610 /* ARGSUSED */
   1611 int
   1612 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
   1613 {
   1614 	struct inode *ip;
   1615 	struct ufid ufh;
   1616 
   1617 	if (*fh_size < sizeof(struct ufid)) {
   1618 		*fh_size = sizeof(struct ufid);
   1619 		return E2BIG;
   1620 	}
   1621 	ip = VTOI(vp);
   1622 	*fh_size = sizeof(struct ufid);
   1623 	memset(&ufh, 0, sizeof(ufh));
   1624 	ufh.ufid_len = sizeof(struct ufid);
   1625 	ufh.ufid_ino = ip->i_number;
   1626 	ufh.ufid_gen = ip->i_gen;
   1627 	memcpy(fhp, &ufh, sizeof(ufh));
   1628 	return (0);
   1629 }
   1630 
   1631 void
   1632 ffs_init(void)
   1633 {
   1634 	if (ffs_initcount++ > 0)
   1635 		return;
   1636 
   1637 	ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
   1638 	    "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
   1639 	ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
   1640 	    "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
   1641 	ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
   1642 	    "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
   1643 	softdep_initialize();
   1644 	ffs_snapshot_init();
   1645 	ufs_init();
   1646 }
   1647 
   1648 void
   1649 ffs_reinit(void)
   1650 {
   1651 	softdep_reinitialize();
   1652 	ufs_reinit();
   1653 }
   1654 
   1655 void
   1656 ffs_done(void)
   1657 {
   1658 	if (--ffs_initcount > 0)
   1659 		return;
   1660 
   1661 	/* XXX softdep cleanup ? */
   1662 	ffs_snapshot_fini();
   1663 	ufs_done();
   1664 	pool_cache_destroy(ffs_dinode2_cache);
   1665 	pool_cache_destroy(ffs_dinode1_cache);
   1666 	pool_cache_destroy(ffs_inode_cache);
   1667 }
   1668 
   1669 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
   1670 {
   1671 #if 0
   1672 	extern int doasyncfree;
   1673 #endif
   1674 	extern int ffs_log_changeopt;
   1675 
   1676 	sysctl_createv(clog, 0, NULL, NULL,
   1677 		       CTLFLAG_PERMANENT,
   1678 		       CTLTYPE_NODE, "vfs", NULL,
   1679 		       NULL, 0, NULL, 0,
   1680 		       CTL_VFS, CTL_EOL);
   1681 	sysctl_createv(clog, 0, NULL, NULL,
   1682 		       CTLFLAG_PERMANENT,
   1683 		       CTLTYPE_NODE, "ffs",
   1684 		       SYSCTL_DESCR("Berkeley Fast File System"),
   1685 		       NULL, 0, NULL, 0,
   1686 		       CTL_VFS, 1, CTL_EOL);
   1687 
   1688 	/*
   1689 	 * @@@ should we even bother with these first three?
   1690 	 */
   1691 	sysctl_createv(clog, 0, NULL, NULL,
   1692 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1693 		       CTLTYPE_INT, "doclusterread", NULL,
   1694 		       sysctl_notavail, 0, NULL, 0,
   1695 		       CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
   1696 	sysctl_createv(clog, 0, NULL, NULL,
   1697 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1698 		       CTLTYPE_INT, "doclusterwrite", NULL,
   1699 		       sysctl_notavail, 0, NULL, 0,
   1700 		       CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
   1701 	sysctl_createv(clog, 0, NULL, NULL,
   1702 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1703 		       CTLTYPE_INT, "doreallocblks", NULL,
   1704 		       sysctl_notavail, 0, NULL, 0,
   1705 		       CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
   1706 #if 0
   1707 	sysctl_createv(clog, 0, NULL, NULL,
   1708 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1709 		       CTLTYPE_INT, "doasyncfree",
   1710 		       SYSCTL_DESCR("Release dirty blocks asynchronously"),
   1711 		       NULL, 0, &doasyncfree, 0,
   1712 		       CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
   1713 #endif
   1714 	sysctl_createv(clog, 0, NULL, NULL,
   1715 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1716 		       CTLTYPE_INT, "log_changeopt",
   1717 		       SYSCTL_DESCR("Log changes in optimization strategy"),
   1718 		       NULL, 0, &ffs_log_changeopt, 0,
   1719 		       CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
   1720 }
   1721 
   1722 /*
   1723  * Write a superblock and associated information back to disk.
   1724  */
   1725 int
   1726 ffs_sbupdate(struct ufsmount *mp, int waitfor)
   1727 {
   1728 	struct fs *fs = mp->um_fs;
   1729 	struct buf *bp;
   1730 	int error = 0;
   1731 	u_int32_t saveflag;
   1732 
   1733 	bp = getblk(mp->um_devvp,
   1734 	    fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
   1735 	    (int)fs->fs_sbsize, 0, 0);
   1736 	saveflag = fs->fs_flags & FS_INTERNAL;
   1737 	fs->fs_flags &= ~FS_INTERNAL;
   1738 
   1739 	memcpy(bp->b_data, fs, fs->fs_sbsize);
   1740 
   1741 	ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
   1742 #ifdef FFS_EI
   1743 	if (mp->um_flags & UFS_NEEDSWAP)
   1744 		ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
   1745 #endif
   1746 	fs->fs_flags |= saveflag;
   1747 
   1748 	if (waitfor == MNT_WAIT)
   1749 		error = bwrite(bp);
   1750 	else
   1751 		bawrite(bp);
   1752 	return (error);
   1753 }
   1754 
   1755 int
   1756 ffs_cgupdate(struct ufsmount *mp, int waitfor)
   1757 {
   1758 	struct fs *fs = mp->um_fs;
   1759 	struct buf *bp;
   1760 	int blks;
   1761 	void *space;
   1762 	int i, size, error = 0, allerror = 0;
   1763 
   1764 	allerror = ffs_sbupdate(mp, waitfor);
   1765 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
   1766 	space = fs->fs_csp;
   1767 	for (i = 0; i < blks; i += fs->fs_frag) {
   1768 		size = fs->fs_bsize;
   1769 		if (i + fs->fs_frag > blks)
   1770 			size = (blks - i) * fs->fs_fsize;
   1771 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
   1772 		    size, 0, 0);
   1773 #ifdef FFS_EI
   1774 		if (mp->um_flags & UFS_NEEDSWAP)
   1775 			ffs_csum_swap((struct csum*)space,
   1776 			    (struct csum*)bp->b_data, size);
   1777 		else
   1778 #endif
   1779 			memcpy(bp->b_data, space, (u_int)size);
   1780 		space = (char *)space + size;
   1781 		if (waitfor == MNT_WAIT)
   1782 			error = bwrite(bp);
   1783 		else
   1784 			bawrite(bp);
   1785 	}
   1786 	if (!allerror && error)
   1787 		allerror = error;
   1788 	return (allerror);
   1789 }
   1790 
   1791 int
   1792 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
   1793     int attrnamespace, const char *attrname)
   1794 {
   1795 #ifdef UFS_EXTATTR
   1796 	/*
   1797 	 * File-backed extended attributes are only supported on UFS1.
   1798 	 * UFS2 has native extended attributes.
   1799 	 */
   1800 	if (VFSTOUFS(mp)->um_fstype == UFS1)
   1801 		return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
   1802 #endif
   1803 	return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
   1804 }
   1805 
   1806 int
   1807 ffs_suspendctl(struct mount *mp, int cmd)
   1808 {
   1809 	int error;
   1810 	struct lwp *l = curlwp;
   1811 
   1812 	switch (cmd) {
   1813 	case SUSPEND_SUSPEND:
   1814 		if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
   1815 			return error;
   1816 		error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
   1817 		if (error == 0)
   1818 			error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
   1819 		if (error != 0) {
   1820 			(void) fstrans_setstate(mp, FSTRANS_NORMAL);
   1821 			return error;
   1822 		}
   1823 		return 0;
   1824 
   1825 	case SUSPEND_RESUME:
   1826 		return fstrans_setstate(mp, FSTRANS_NORMAL);
   1827 
   1828 	default:
   1829 		return EINVAL;
   1830 	}
   1831 }
   1832