Home | History | Annotate | Line # | Download | only in ffs
ffs_vfsops.c revision 1.215
      1 /*	$NetBSD: ffs_vfsops.c,v 1.215 2008/01/03 01:26:32 pooka Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1991, 1993, 1994
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. Neither the name of the University nor the names of its contributors
     16  *    may be used to endorse or promote products derived from this software
     17  *    without specific prior written permission.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     20  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     21  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     22  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     23  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     24  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     25  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     26  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     27  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     28  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     29  * SUCH DAMAGE.
     30  *
     31  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
     32  */
     33 
     34 #include <sys/cdefs.h>
     35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.215 2008/01/03 01:26:32 pooka Exp $");
     36 
     37 #if defined(_KERNEL_OPT)
     38 #include "opt_ffs.h"
     39 #include "opt_quota.h"
     40 #include "opt_softdep.h"
     41 #endif
     42 
     43 #include <sys/param.h>
     44 #include <sys/systm.h>
     45 #include <sys/namei.h>
     46 #include <sys/proc.h>
     47 #include <sys/kernel.h>
     48 #include <sys/vnode.h>
     49 #include <sys/socket.h>
     50 #include <sys/mount.h>
     51 #include <sys/buf.h>
     52 #include <sys/device.h>
     53 #include <sys/mbuf.h>
     54 #include <sys/file.h>
     55 #include <sys/disklabel.h>
     56 #include <sys/ioctl.h>
     57 #include <sys/errno.h>
     58 #include <sys/malloc.h>
     59 #include <sys/pool.h>
     60 #include <sys/lock.h>
     61 #include <sys/sysctl.h>
     62 #include <sys/conf.h>
     63 #include <sys/kauth.h>
     64 #include <sys/fstrans.h>
     65 
     66 #include <miscfs/specfs/specdev.h>
     67 
     68 #include <ufs/ufs/quota.h>
     69 #include <ufs/ufs/ufsmount.h>
     70 #include <ufs/ufs/inode.h>
     71 #include <ufs/ufs/dir.h>
     72 #include <ufs/ufs/ufs_extern.h>
     73 #include <ufs/ufs/ufs_bswap.h>
     74 
     75 #include <ufs/ffs/fs.h>
     76 #include <ufs/ffs/ffs_extern.h>
     77 
     78 /* how many times ffs_init() was called */
     79 int ffs_initcount = 0;
     80 
     81 extern kmutex_t ufs_hashlock;
     82 
     83 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
     84 extern const struct vnodeopv_desc ffs_specop_opv_desc;
     85 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
     86 
     87 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
     88 	&ffs_vnodeop_opv_desc,
     89 	&ffs_specop_opv_desc,
     90 	&ffs_fifoop_opv_desc,
     91 	NULL,
     92 };
     93 
     94 struct vfsops ffs_vfsops = {
     95 	MOUNT_FFS,
     96 	sizeof (struct ufs_args),
     97 	ffs_mount,
     98 	ufs_start,
     99 	ffs_unmount,
    100 	ufs_root,
    101 	ufs_quotactl,
    102 	ffs_statvfs,
    103 	ffs_sync,
    104 	ffs_vget,
    105 	ffs_fhtovp,
    106 	ffs_vptofh,
    107 	ffs_init,
    108 	ffs_reinit,
    109 	ffs_done,
    110 	ffs_mountroot,
    111 	ffs_snapshot,
    112 	ffs_extattrctl,
    113 	ffs_suspendctl,
    114 	ffs_vnodeopv_descs,
    115 	0,
    116 	{ NULL, NULL },
    117 };
    118 VFS_ATTACH(ffs_vfsops);
    119 
    120 static const struct genfs_ops ffs_genfsops = {
    121 	.gop_size = ffs_gop_size,
    122 	.gop_alloc = ufs_gop_alloc,
    123 	.gop_write = genfs_gop_write,
    124 	.gop_markupdate = ufs_gop_markupdate,
    125 };
    126 
    127 static const struct ufs_ops ffs_ufsops = {
    128 	.uo_itimes = ffs_itimes,
    129 	.uo_update = ffs_update,
    130 	.uo_truncate = ffs_truncate,
    131 	.uo_valloc = ffs_valloc,
    132 	.uo_vfree = ffs_vfree,
    133 	.uo_balloc = ffs_balloc,
    134 };
    135 
    136 struct pool ffs_inode_pool;
    137 struct pool ffs_dinode1_pool;
    138 struct pool ffs_dinode2_pool;
    139 
    140 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
    141 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
    142 
    143 /*
    144  * Called by main() when ffs is going to be mounted as root.
    145  */
    146 
    147 int
    148 ffs_mountroot(void)
    149 {
    150 	struct fs *fs;
    151 	struct mount *mp;
    152 	struct lwp *l = curlwp;			/* XXX */
    153 	struct ufsmount *ump;
    154 	int error;
    155 
    156 	if (device_class(root_device) != DV_DISK)
    157 		return (ENODEV);
    158 
    159 	if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
    160 		vrele(rootvp);
    161 		return (error);
    162 	}
    163 	if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
    164 		mp->mnt_op->vfs_refcount--;
    165 		vfs_unbusy(mp);
    166 		vfs_destroy(mp);
    167 		return (error);
    168 	}
    169 	mutex_enter(&mountlist_lock);
    170 	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
    171 	mutex_exit(&mountlist_lock);
    172 	ump = VFSTOUFS(mp);
    173 	fs = ump->um_fs;
    174 	memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
    175 	(void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
    176 	(void)ffs_statvfs(mp, &mp->mnt_stat);
    177 	vfs_unbusy(mp);
    178 	setrootfstime((time_t)fs->fs_time);
    179 	return (0);
    180 }
    181 
    182 /*
    183  * VFS Operations.
    184  *
    185  * mount system call
    186  */
    187 int
    188 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
    189 {
    190 	struct lwp *l = curlwp;
    191 	struct nameidata nd;
    192 	struct vnode *devvp = NULL;
    193 	struct ufs_args *args = data;
    194 	struct ufsmount *ump = NULL;
    195 	struct fs *fs;
    196 	int error = 0, flags, update;
    197 	mode_t accessmode;
    198 
    199 	if (*data_len < sizeof *args)
    200 		return EINVAL;
    201 
    202 	if (mp->mnt_flag & MNT_GETARGS) {
    203 		ump = VFSTOUFS(mp);
    204 		if (ump == NULL)
    205 			return EIO;
    206 		args->fspec = NULL;
    207 		*data_len = sizeof *args;
    208 		return 0;
    209 	}
    210 
    211 #if !defined(SOFTDEP)
    212 	mp->mnt_flag &= ~MNT_SOFTDEP;
    213 #endif
    214 
    215 	update = mp->mnt_flag & MNT_UPDATE;
    216 
    217 	/* Check arguments */
    218 	if (args->fspec != NULL) {
    219 		/*
    220 		 * Look up the name and verify that it's sane.
    221 		 */
    222 		NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
    223 		if ((error = namei(&nd)) != 0)
    224 			return (error);
    225 		devvp = nd.ni_vp;
    226 
    227 		if (!update) {
    228 			/*
    229 			 * Be sure this is a valid block device
    230 			 */
    231 			if (devvp->v_type != VBLK)
    232 				error = ENOTBLK;
    233 			else if (bdevsw_lookup(devvp->v_rdev) == NULL)
    234 				error = ENXIO;
    235 		} else {
    236 			/*
    237 			 * Be sure we're still naming the same device
    238 			 * used for our initial mount
    239 			 */
    240 			ump = VFSTOUFS(mp);
    241 			if (devvp != ump->um_devvp) {
    242 				if (devvp->v_rdev != ump->um_devvp->v_rdev)
    243 					error = EINVAL;
    244 				else {
    245 					vrele(devvp);
    246 					devvp = ump->um_devvp;
    247 					vref(devvp);
    248 				}
    249 			}
    250 		}
    251 	} else {
    252 		if (!update) {
    253 			/* New mounts must have a filename for the device */
    254 			return (EINVAL);
    255 		} else {
    256 			/* Use the extant mount */
    257 			ump = VFSTOUFS(mp);
    258 			devvp = ump->um_devvp;
    259 			vref(devvp);
    260 		}
    261 	}
    262 
    263 	/*
    264 	 * If mount by non-root, then verify that user has necessary
    265 	 * permissions on the device.
    266 	 */
    267 	if (error == 0 && kauth_authorize_generic(l->l_cred,
    268 	    KAUTH_GENERIC_ISSUSER, NULL) != 0) {
    269 		accessmode = VREAD;
    270 		if (update ?
    271 		    (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
    272 		    (mp->mnt_flag & MNT_RDONLY) == 0)
    273 			accessmode |= VWRITE;
    274 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    275 		error = VOP_ACCESS(devvp, accessmode, l->l_cred);
    276 		VOP_UNLOCK(devvp, 0);
    277 	}
    278 
    279 	if (error) {
    280 		vrele(devvp);
    281 		return (error);
    282 	}
    283 
    284 	if (!update) {
    285 		int xflags;
    286 
    287 		/*
    288 		 * Disallow multiple mounts of the same device.
    289 		 * Disallow mounting of a device that is currently in use
    290 		 * (except for root, which might share swap device for
    291 		 * miniroot).
    292 		 */
    293 		error = vfs_mountedon(devvp);
    294 		if (error)
    295 			goto fail;
    296 		if (vcount(devvp) > 1 && devvp != rootvp) {
    297 			error = EBUSY;
    298 			goto fail;
    299 		}
    300 		if (mp->mnt_flag & MNT_RDONLY)
    301 			xflags = FREAD;
    302 		else
    303 			xflags = FREAD|FWRITE;
    304 		error = VOP_OPEN(devvp, xflags, FSCRED);
    305 		if (error)
    306 			goto fail;
    307 		error = ffs_mountfs(devvp, mp, l);
    308 		if (error) {
    309 			vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    310 			(void)VOP_CLOSE(devvp, xflags, NOCRED);
    311 			VOP_UNLOCK(devvp, 0);
    312 			goto fail;
    313 		}
    314 
    315 		ump = VFSTOUFS(mp);
    316 		fs = ump->um_fs;
    317 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    318 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    319 			printf("%s fs uses soft updates, "
    320 			    "ignoring async mode\n",
    321 			    fs->fs_fsmnt);
    322 			mp->mnt_flag &= ~MNT_ASYNC;
    323 		}
    324 	} else {
    325 		/*
    326 		 * Update the mount.
    327 		 */
    328 
    329 		/*
    330 		 * The initial mount got a reference on this
    331 		 * device, so drop the one obtained via
    332 		 * namei(), above.
    333 		 */
    334 		vrele(devvp);
    335 
    336 		ump = VFSTOUFS(mp);
    337 		fs = ump->um_fs;
    338 		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
    339 			/*
    340 			 * Changing from r/w to r/o
    341 			 */
    342 			flags = WRITECLOSE;
    343 			if (mp->mnt_flag & MNT_FORCE)
    344 				flags |= FORCECLOSE;
    345 			if (mp->mnt_flag & MNT_SOFTDEP)
    346 				error = softdep_flushfiles(mp, flags, l);
    347 			else
    348 				error = ffs_flushfiles(mp, flags, l);
    349 			if (fs->fs_pendingblocks != 0 ||
    350 			    fs->fs_pendinginodes != 0) {
    351 				printf("%s: update error: blocks %" PRId64
    352 				       " files %d\n",
    353 				    fs->fs_fsmnt, fs->fs_pendingblocks,
    354 				    fs->fs_pendinginodes);
    355 				fs->fs_pendingblocks = 0;
    356 				fs->fs_pendinginodes = 0;
    357 			}
    358 			if (error == 0 &&
    359 			    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    360 			    fs->fs_clean & FS_WASCLEAN) {
    361 				if (mp->mnt_flag & MNT_SOFTDEP)
    362 					fs->fs_flags &= ~FS_DOSOFTDEP;
    363 				fs->fs_clean = FS_ISCLEAN;
    364 				(void) ffs_sbupdate(ump, MNT_WAIT);
    365 			}
    366 			if (error)
    367 				return (error);
    368 			fs->fs_ronly = 1;
    369 			fs->fs_fmod = 0;
    370 		}
    371 
    372 		/*
    373 		 * Flush soft dependencies if disabling it via an update
    374 		 * mount. This may leave some items to be processed,
    375 		 * so don't do this yet XXX.
    376 		 */
    377 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
    378 		    !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    379 #ifdef notyet
    380 			flags = WRITECLOSE;
    381 			if (mp->mnt_flag & MNT_FORCE)
    382 				flags |= FORCECLOSE;
    383 			error = softdep_flushfiles(mp, flags, l);
    384 			if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
    385 				fs->fs_flags &= ~FS_DOSOFTDEP;
    386 				(void) ffs_sbupdate(ump, MNT_WAIT);
    387 #elif defined(SOFTDEP)
    388 			mp->mnt_flag |= MNT_SOFTDEP;
    389 #endif
    390 		}
    391 
    392 		/*
    393 		 * When upgrading to a softdep mount, we must first flush
    394 		 * all vnodes. (not done yet -- see above)
    395 		 */
    396 		if (!(fs->fs_flags & FS_DOSOFTDEP) &&
    397 		    (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    398 #ifdef notyet
    399 			flags = WRITECLOSE;
    400 			if (mp->mnt_flag & MNT_FORCE)
    401 				flags |= FORCECLOSE;
    402 			error = ffs_flushfiles(mp, flags, l);
    403 #else
    404 			mp->mnt_flag &= ~MNT_SOFTDEP;
    405 #endif
    406 		}
    407 
    408 		if (mp->mnt_flag & MNT_RELOAD) {
    409 			error = ffs_reload(mp, l->l_cred, l);
    410 			if (error)
    411 				return (error);
    412 		}
    413 
    414 		if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
    415 			/*
    416 			 * Changing from read-only to read/write
    417 			 */
    418 			fs->fs_ronly = 0;
    419 			fs->fs_clean <<= 1;
    420 			fs->fs_fmod = 1;
    421 			if ((fs->fs_flags & FS_DOSOFTDEP)) {
    422 				error = softdep_mount(devvp, mp, fs,
    423 				    l->l_cred);
    424 				if (error)
    425 					return (error);
    426 			}
    427 			if (fs->fs_snapinum[0] != 0)
    428 				ffs_snapshot_mount(mp);
    429 		}
    430 		if (args->fspec == NULL)
    431 			return EINVAL;
    432 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    433 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    434 			printf("%s fs uses soft updates, ignoring async mode\n",
    435 			    fs->fs_fsmnt);
    436 			mp->mnt_flag &= ~MNT_ASYNC;
    437 		}
    438 	}
    439 
    440 	error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
    441 	    UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
    442 	if (error == 0)
    443 		(void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
    444 		    sizeof(fs->fs_fsmnt));
    445 	if (mp->mnt_flag & MNT_SOFTDEP)
    446 		fs->fs_flags |= FS_DOSOFTDEP;
    447 	else
    448 		fs->fs_flags &= ~FS_DOSOFTDEP;
    449 	if (fs->fs_fmod != 0) {	/* XXX */
    450 		fs->fs_fmod = 0;
    451 		if (fs->fs_clean & FS_WASCLEAN)
    452 			fs->fs_time = time_second;
    453 		else {
    454 			printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
    455 			    mp->mnt_stat.f_mntfromname, fs->fs_clean);
    456 			printf("%s: lost blocks %" PRId64 " files %d\n",
    457 			    mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
    458 			    fs->fs_pendinginodes);
    459 		}
    460 		(void) ffs_cgupdate(ump, MNT_WAIT);
    461 	}
    462 	return (error);
    463 
    464 fail:
    465 	vrele(devvp);
    466 	return (error);
    467 }
    468 
    469 /*
    470  * Reload all incore data for a filesystem (used after running fsck on
    471  * the root filesystem and finding things to fix). The filesystem must
    472  * be mounted read-only.
    473  *
    474  * Things to do to update the mount:
    475  *	1) invalidate all cached meta-data.
    476  *	2) re-read superblock from disk.
    477  *	3) re-read summary information from disk.
    478  *	4) invalidate all inactive vnodes.
    479  *	5) invalidate all cached file data.
    480  *	6) re-read inode data for all active vnodes.
    481  */
    482 int
    483 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
    484 {
    485 	struct vnode *vp, *mvp, *devvp;
    486 	struct inode *ip;
    487 	void *space;
    488 	struct buf *bp;
    489 	struct fs *fs, *newfs;
    490 	struct partinfo dpart;
    491 	int i, blks, size, error;
    492 	int32_t *lp;
    493 	struct ufsmount *ump;
    494 	daddr_t sblockloc;
    495 
    496 	if ((mp->mnt_flag & MNT_RDONLY) == 0)
    497 		return (EINVAL);
    498 
    499 	ump = VFSTOUFS(mp);
    500 	/*
    501 	 * Step 1: invalidate all cached meta-data.
    502 	 */
    503 	devvp = ump->um_devvp;
    504 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    505 	error = vinvalbuf(devvp, 0, cred, l, 0, 0);
    506 	VOP_UNLOCK(devvp, 0);
    507 	if (error)
    508 		panic("ffs_reload: dirty1");
    509 	/*
    510 	 * Step 2: re-read superblock from disk.
    511 	 */
    512 	fs = ump->um_fs;
    513 	if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
    514 		size = DEV_BSIZE;
    515 	else
    516 		size = dpart.disklab->d_secsize;
    517 	/* XXX we don't handle possibility that superblock moved. */
    518 	error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
    519 		      NOCRED, &bp);
    520 	if (error) {
    521 		brelse(bp, 0);
    522 		return (error);
    523 	}
    524 	newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
    525 	memcpy(newfs, bp->b_data, fs->fs_sbsize);
    526 #ifdef FFS_EI
    527 	if (ump->um_flags & UFS_NEEDSWAP) {
    528 		ffs_sb_swap((struct fs*)bp->b_data, newfs);
    529 		fs->fs_flags |= FS_SWAPPED;
    530 	} else
    531 #endif
    532 		fs->fs_flags &= ~FS_SWAPPED;
    533 	if ((newfs->fs_magic != FS_UFS1_MAGIC &&
    534 	     newfs->fs_magic != FS_UFS2_MAGIC)||
    535 	     newfs->fs_bsize > MAXBSIZE ||
    536 	     newfs->fs_bsize < sizeof(struct fs)) {
    537 		brelse(bp, 0);
    538 		free(newfs, M_UFSMNT);
    539 		return (EIO);		/* XXX needs translation */
    540 	}
    541 	/* Store off old fs_sblockloc for fs_oldfscompat_read. */
    542 	sblockloc = fs->fs_sblockloc;
    543 	/*
    544 	 * Copy pointer fields back into superblock before copying in	XXX
    545 	 * new superblock. These should really be in the ufsmount.	XXX
    546 	 * Note that important parameters (eg fs_ncg) are unchanged.
    547 	 */
    548 	newfs->fs_csp = fs->fs_csp;
    549 	newfs->fs_maxcluster = fs->fs_maxcluster;
    550 	newfs->fs_contigdirs = fs->fs_contigdirs;
    551 	newfs->fs_ronly = fs->fs_ronly;
    552 	newfs->fs_active = fs->fs_active;
    553 	memcpy(fs, newfs, (u_int)fs->fs_sbsize);
    554 	brelse(bp, 0);
    555 	free(newfs, M_UFSMNT);
    556 
    557 	/* Recheck for apple UFS filesystem */
    558 	ump->um_flags &= ~UFS_ISAPPLEUFS;
    559 	/* First check to see if this is tagged as an Apple UFS filesystem
    560 	 * in the disklabel
    561 	 */
    562 	if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
    563 		(dpart.part->p_fstype == FS_APPLEUFS)) {
    564 		ump->um_flags |= UFS_ISAPPLEUFS;
    565 	}
    566 #ifdef APPLE_UFS
    567 	else {
    568 		/* Manually look for an apple ufs label, and if a valid one
    569 		 * is found, then treat it like an Apple UFS filesystem anyway
    570 		 */
    571 		error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
    572 			APPLEUFS_LABEL_SIZE, cred, &bp);
    573 		if (error) {
    574 			brelse(bp, 0);
    575 			return (error);
    576 		}
    577 		error = ffs_appleufs_validate(fs->fs_fsmnt,
    578 			(struct appleufslabel *)bp->b_data,NULL);
    579 		if (error == 0)
    580 			ump->um_flags |= UFS_ISAPPLEUFS;
    581 		brelse(bp, 0);
    582 		bp = NULL;
    583 	}
    584 #else
    585 	if (ump->um_flags & UFS_ISAPPLEUFS)
    586 		return (EIO);
    587 #endif
    588 
    589 	if (UFS_MPISAPPLEUFS(ump)) {
    590 		/* see comment about NeXT below */
    591 		ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
    592 		ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
    593 		mp->mnt_iflag |= IMNT_DTYPE;
    594 	} else {
    595 		ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
    596 		ump->um_dirblksiz = DIRBLKSIZ;
    597 		if (ump->um_maxsymlinklen > 0)
    598 			mp->mnt_iflag |= IMNT_DTYPE;
    599 		else
    600 			mp->mnt_iflag &= ~IMNT_DTYPE;
    601 	}
    602 	ffs_oldfscompat_read(fs, ump, sblockloc);
    603 	mutex_enter(&ump->um_lock);
    604 	ump->um_maxfilesize = fs->fs_maxfilesize;
    605 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
    606 		fs->fs_pendingblocks = 0;
    607 		fs->fs_pendinginodes = 0;
    608 	}
    609 	mutex_exit(&ump->um_lock);
    610 
    611 	ffs_statvfs(mp, &mp->mnt_stat);
    612 	/*
    613 	 * Step 3: re-read summary information from disk.
    614 	 */
    615 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
    616 	space = fs->fs_csp;
    617 	for (i = 0; i < blks; i += fs->fs_frag) {
    618 		size = fs->fs_bsize;
    619 		if (i + fs->fs_frag > blks)
    620 			size = (blks - i) * fs->fs_fsize;
    621 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    622 			      NOCRED, &bp);
    623 		if (error) {
    624 			brelse(bp, 0);
    625 			return (error);
    626 		}
    627 #ifdef FFS_EI
    628 		if (UFS_FSNEEDSWAP(fs))
    629 			ffs_csum_swap((struct csum *)bp->b_data,
    630 			    (struct csum *)space, size);
    631 		else
    632 #endif
    633 			memcpy(space, bp->b_data, (size_t)size);
    634 		space = (char *)space + size;
    635 		brelse(bp, 0);
    636 	}
    637 	if ((fs->fs_flags & FS_DOSOFTDEP))
    638 		softdep_mount(devvp, mp, fs, cred);
    639 	if (fs->fs_snapinum[0] != 0)
    640 		ffs_snapshot_mount(mp);
    641 	/*
    642 	 * We no longer know anything about clusters per cylinder group.
    643 	 */
    644 	if (fs->fs_contigsumsize > 0) {
    645 		lp = fs->fs_maxcluster;
    646 		for (i = 0; i < fs->fs_ncg; i++)
    647 			*lp++ = fs->fs_contigsumsize;
    648 	}
    649 
    650 	/* Allocate a marker vnode. */
    651 	if ((mvp = vnalloc(mp)) == NULL)
    652 		return ENOMEM;
    653 	/*
    654 	 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
    655 	 * and vclean() can be called indirectly
    656 	 */
    657 	mutex_enter(&mntvnode_lock);
    658  loop:
    659 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
    660 		vmark(mvp, vp);
    661 		if (vp->v_mount != mp || vismarker(vp))
    662 			continue;
    663 		/*
    664 		 * Step 4: invalidate all inactive vnodes.
    665 		 */
    666 		if (vrecycle(vp, &mntvnode_lock, l)) {
    667 			mutex_enter(&mntvnode_lock);
    668 			(void)vunmark(mvp);
    669 			goto loop;
    670 		}
    671 		/*
    672 		 * Step 5: invalidate all cached file data.
    673 		 */
    674 		mutex_enter(&vp->v_interlock);
    675 		mutex_exit(&mntvnode_lock);
    676 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
    677 			(void)vunmark(mvp);
    678 			goto loop;
    679 		}
    680 		if (vinvalbuf(vp, 0, cred, l, 0, 0))
    681 			panic("ffs_reload: dirty2");
    682 		/*
    683 		 * Step 6: re-read inode data for all active vnodes.
    684 		 */
    685 		ip = VTOI(vp);
    686 		error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
    687 			      (int)fs->fs_bsize, NOCRED, &bp);
    688 		if (error) {
    689 			brelse(bp, 0);
    690 			vput(vp);
    691 			(void)vunmark(mvp);
    692 			break;
    693 		}
    694 		ffs_load_inode(bp, ip, fs, ip->i_number);
    695 		ip->i_ffs_effnlink = ip->i_nlink;
    696 		brelse(bp, 0);
    697 		vput(vp);
    698 		mutex_enter(&mntvnode_lock);
    699 	}
    700 	mutex_exit(&mntvnode_lock);
    701 	vnfree(mvp);
    702 	return (error);
    703 }
    704 
    705 /*
    706  * Possible superblock locations ordered from most to least likely.
    707  */
    708 static const int sblock_try[] = SBLOCKSEARCH;
    709 
    710 /*
    711  * Common code for mount and mountroot
    712  */
    713 int
    714 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
    715 {
    716 	struct ufsmount *ump;
    717 	struct buf *bp;
    718 	struct fs *fs;
    719 	dev_t dev;
    720 	struct partinfo dpart;
    721 	void *space;
    722 	daddr_t sblockloc, fsblockloc;
    723 	int blks, fstype;
    724 	int error, i, size, ronly, bset = 0;
    725 #ifdef FFS_EI
    726 	int needswap = 0;		/* keep gcc happy */
    727 #endif
    728 	int32_t *lp;
    729 	kauth_cred_t cred;
    730 	u_int32_t sbsize = 8192;	/* keep gcc happy*/
    731 
    732 	dev = devvp->v_rdev;
    733 	cred = l ? l->l_cred : NOCRED;
    734 
    735 	/* Flush out any old buffers remaining from a previous use. */
    736 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    737 	error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
    738 	VOP_UNLOCK(devvp, 0);
    739 	if (error)
    740 		return (error);
    741 
    742 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
    743 	if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
    744 		size = DEV_BSIZE;
    745 	else
    746 		size = dpart.disklab->d_secsize;
    747 
    748 	bp = NULL;
    749 	ump = NULL;
    750 	fs = NULL;
    751 	sblockloc = 0;
    752 	fstype = 0;
    753 
    754 	error = fstrans_mount(mp);
    755 	if (error)
    756 		return error;
    757 
    758 	/*
    759 	 * Try reading the superblock in each of its possible locations.
    760 	 */
    761 	for (i = 0; ; i++) {
    762 		if (bp != NULL) {
    763 			brelse(bp, BC_NOCACHE);
    764 			bp = NULL;
    765 		}
    766 		if (sblock_try[i] == -1) {
    767 			error = EINVAL;
    768 			fs = NULL;
    769 			goto out;
    770 		}
    771 		error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
    772 			      &bp);
    773 		if (error) {
    774 			fs = NULL;
    775 			goto out;
    776 		}
    777 		fs = (struct fs*)bp->b_data;
    778 		fsblockloc = sblockloc = sblock_try[i];
    779 		if (fs->fs_magic == FS_UFS1_MAGIC) {
    780 			sbsize = fs->fs_sbsize;
    781 			fstype = UFS1;
    782 #ifdef FFS_EI
    783 			needswap = 0;
    784 		} else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
    785 			sbsize = bswap32(fs->fs_sbsize);
    786 			fstype = UFS1;
    787 			needswap = 1;
    788 #endif
    789 		} else if (fs->fs_magic == FS_UFS2_MAGIC) {
    790 			sbsize = fs->fs_sbsize;
    791 			fstype = UFS2;
    792 #ifdef FFS_EI
    793 			needswap = 0;
    794 		} else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
    795 			sbsize = bswap32(fs->fs_sbsize);
    796 			fstype = UFS2;
    797 			needswap = 1;
    798 #endif
    799 		} else
    800 			continue;
    801 
    802 
    803 		/* fs->fs_sblockloc isn't defined for old filesystems */
    804 		if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
    805 			if (sblockloc == SBLOCK_UFS2)
    806 				/*
    807 				 * This is likely to be the first alternate
    808 				 * in a filesystem with 64k blocks.
    809 				 * Don't use it.
    810 				 */
    811 				continue;
    812 			fsblockloc = sblockloc;
    813 		} else {
    814 			fsblockloc = fs->fs_sblockloc;
    815 #ifdef FFS_EI
    816 			if (needswap)
    817 				fsblockloc = bswap64(fsblockloc);
    818 #endif
    819 		}
    820 
    821 		/* Check we haven't found an alternate superblock */
    822 		if (fsblockloc != sblockloc)
    823 			continue;
    824 
    825 		/* Validate size of superblock */
    826 		if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
    827 			continue;
    828 
    829 		/* Ok seems to be a good superblock */
    830 		break;
    831 	}
    832 
    833 	fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
    834 	memcpy(fs, bp->b_data, sbsize);
    835 
    836 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
    837 	memset(ump, 0, sizeof *ump);
    838 	mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
    839 	ump->um_fs = fs;
    840 	ump->um_ops = &ffs_ufsops;
    841 
    842 #ifdef FFS_EI
    843 	if (needswap) {
    844 		ffs_sb_swap((struct fs*)bp->b_data, fs);
    845 		fs->fs_flags |= FS_SWAPPED;
    846 	} else
    847 #endif
    848 		fs->fs_flags &= ~FS_SWAPPED;
    849 
    850 	ffs_oldfscompat_read(fs, ump, sblockloc);
    851 	ump->um_maxfilesize = fs->fs_maxfilesize;
    852 
    853 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
    854 		fs->fs_pendingblocks = 0;
    855 		fs->fs_pendinginodes = 0;
    856 	}
    857 
    858 	ump->um_fstype = fstype;
    859 	if (fs->fs_sbsize < SBLOCKSIZE)
    860 		brelse(bp, BC_INVAL);
    861 	else
    862 		brelse(bp, 0);
    863 	bp = NULL;
    864 
    865 	/* First check to see if this is tagged as an Apple UFS filesystem
    866 	 * in the disklabel
    867 	 */
    868 	if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
    869 		(dpart.part->p_fstype == FS_APPLEUFS)) {
    870 		ump->um_flags |= UFS_ISAPPLEUFS;
    871 	}
    872 #ifdef APPLE_UFS
    873 	else {
    874 		/* Manually look for an apple ufs label, and if a valid one
    875 		 * is found, then treat it like an Apple UFS filesystem anyway
    876 		 */
    877 		error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
    878 			APPLEUFS_LABEL_SIZE, cred, &bp);
    879 		if (error)
    880 			goto out;
    881 		error = ffs_appleufs_validate(fs->fs_fsmnt,
    882 			(struct appleufslabel *)bp->b_data,NULL);
    883 		if (error == 0) {
    884 			ump->um_flags |= UFS_ISAPPLEUFS;
    885 		}
    886 		brelse(bp, 0);
    887 		bp = NULL;
    888 	}
    889 #else
    890 	if (ump->um_flags & UFS_ISAPPLEUFS) {
    891 		error = EINVAL;
    892 		goto out;
    893 	}
    894 #endif
    895 
    896 	/*
    897 	 * verify that we can access the last block in the fs
    898 	 * if we're mounting read/write.
    899 	 */
    900 
    901 	if (!ronly) {
    902 		error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
    903 		    cred, &bp);
    904 		if (bp->b_bcount != fs->fs_fsize)
    905 			error = EINVAL;
    906 		if (error) {
    907 			bset = BC_INVAL;
    908 			goto out;
    909 		}
    910 		brelse(bp, BC_INVAL);
    911 		bp = NULL;
    912 	}
    913 
    914 	fs->fs_ronly = ronly;
    915 	if (ronly == 0) {
    916 		fs->fs_clean <<= 1;
    917 		fs->fs_fmod = 1;
    918 	}
    919 	size = fs->fs_cssize;
    920 	blks = howmany(size, fs->fs_fsize);
    921 	if (fs->fs_contigsumsize > 0)
    922 		size += fs->fs_ncg * sizeof(int32_t);
    923 	size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    924 	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
    925 	fs->fs_csp = space;
    926 	for (i = 0; i < blks; i += fs->fs_frag) {
    927 		size = fs->fs_bsize;
    928 		if (i + fs->fs_frag > blks)
    929 			size = (blks - i) * fs->fs_fsize;
    930 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    931 			      cred, &bp);
    932 		if (error) {
    933 			free(fs->fs_csp, M_UFSMNT);
    934 			goto out;
    935 		}
    936 #ifdef FFS_EI
    937 		if (needswap)
    938 			ffs_csum_swap((struct csum *)bp->b_data,
    939 				(struct csum *)space, size);
    940 		else
    941 #endif
    942 			memcpy(space, bp->b_data, (u_int)size);
    943 
    944 		space = (char *)space + size;
    945 		brelse(bp, 0);
    946 		bp = NULL;
    947 	}
    948 	if (fs->fs_contigsumsize > 0) {
    949 		fs->fs_maxcluster = lp = space;
    950 		for (i = 0; i < fs->fs_ncg; i++)
    951 			*lp++ = fs->fs_contigsumsize;
    952 		space = lp;
    953 	}
    954 	size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    955 	fs->fs_contigdirs = space;
    956 	space = (char *)space + size;
    957 	memset(fs->fs_contigdirs, 0, size);
    958 		/* Compatibility for old filesystems - XXX */
    959 	if (fs->fs_avgfilesize <= 0)
    960 		fs->fs_avgfilesize = AVFILESIZ;
    961 	if (fs->fs_avgfpdir <= 0)
    962 		fs->fs_avgfpdir = AFPDIR;
    963 	fs->fs_active = NULL;
    964 	mp->mnt_data = ump;
    965 	mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
    966 	mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
    967 	mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
    968 	mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
    969 	if (UFS_MPISAPPLEUFS(ump)) {
    970 		/* NeXT used to keep short symlinks in the inode even
    971 		 * when using FS_42INODEFMT.  In that case fs->fs_maxsymlinklen
    972 		 * is probably -1, but we still need to be able to identify
    973 		 * short symlinks.
    974 		 */
    975 		ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
    976 		ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
    977 		mp->mnt_iflag |= IMNT_DTYPE;
    978 	} else {
    979 		ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
    980 		ump->um_dirblksiz = DIRBLKSIZ;
    981 		if (ump->um_maxsymlinklen > 0)
    982 			mp->mnt_iflag |= IMNT_DTYPE;
    983 		else
    984 			mp->mnt_iflag &= ~IMNT_DTYPE;
    985 	}
    986 	mp->mnt_fs_bshift = fs->fs_bshift;
    987 	mp->mnt_dev_bshift = DEV_BSHIFT;	/* XXX */
    988 	mp->mnt_flag |= MNT_LOCAL;
    989 	mp->mnt_iflag |= IMNT_MPSAFE;
    990 #ifdef FFS_EI
    991 	if (needswap)
    992 		ump->um_flags |= UFS_NEEDSWAP;
    993 #endif
    994 	ump->um_mountp = mp;
    995 	ump->um_dev = dev;
    996 	ump->um_devvp = devvp;
    997 	ump->um_nindir = fs->fs_nindir;
    998 	ump->um_lognindir = ffs(fs->fs_nindir) - 1;
    999 	ump->um_bptrtodb = fs->fs_fsbtodb;
   1000 	ump->um_seqinc = fs->fs_frag;
   1001 	for (i = 0; i < MAXQUOTAS; i++)
   1002 		ump->um_quotas[i] = NULLVP;
   1003 	devvp->v_specmountpoint = mp;
   1004 	if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
   1005 		error = softdep_mount(devvp, mp, fs, cred);
   1006 		if (error) {
   1007 			free(fs->fs_csp, M_UFSMNT);
   1008 			goto out;
   1009 		}
   1010 	}
   1011 	if (ronly == 0 && fs->fs_snapinum[0] != 0)
   1012 		ffs_snapshot_mount(mp);
   1013 #ifdef UFS_EXTATTR
   1014 	/*
   1015 	 * Initialize file-backed extended attributes on UFS1 file
   1016 	 * systems.
   1017 	 */
   1018 	if (ump->um_fstype == UFS1) {
   1019 		ufs_extattr_uepm_init(&ump->um_extattr);
   1020 #ifdef UFS_EXTATTR_AUTOSTART
   1021 		/*
   1022 		 * XXX Just ignore errors.  Not clear that we should
   1023 		 * XXX fail the mount in this case.
   1024 		 */
   1025 		(void) ufs_extattr_autostart(mp, l);
   1026 #endif
   1027 	}
   1028 #endif /* UFS_EXTATTR */
   1029 	return (0);
   1030 out:
   1031 	fstrans_unmount(mp);
   1032 	if (fs)
   1033 		free(fs, M_UFSMNT);
   1034 	devvp->v_specmountpoint = NULL;
   1035 	if (bp)
   1036 		brelse(bp, bset);
   1037 	if (ump) {
   1038 		if (ump->um_oldfscompat)
   1039 			free(ump->um_oldfscompat, M_UFSMNT);
   1040 		mutex_destroy(&ump->um_lock);
   1041 		free(ump, M_UFSMNT);
   1042 		mp->mnt_data = NULL;
   1043 	}
   1044 	return (error);
   1045 }
   1046 
   1047 /*
   1048  * Sanity checks for loading old filesystem superblocks.
   1049  * See ffs_oldfscompat_write below for unwound actions.
   1050  *
   1051  * XXX - Parts get retired eventually.
   1052  * Unfortunately new bits get added.
   1053  */
   1054 static void
   1055 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
   1056 {
   1057 	off_t maxfilesize;
   1058 	int32_t *extrasave;
   1059 
   1060 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1061 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1062 		return;
   1063 
   1064 	if (!ump->um_oldfscompat)
   1065 		ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
   1066 		    M_UFSMNT, M_WAITOK);
   1067 
   1068 	memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
   1069 	extrasave = ump->um_oldfscompat;
   1070 	extrasave += 512/sizeof(int32_t);
   1071 	extrasave[0] = fs->fs_old_npsect;
   1072 	extrasave[1] = fs->fs_old_interleave;
   1073 	extrasave[2] = fs->fs_old_trackskew;
   1074 
   1075 	/* These fields will be overwritten by their
   1076 	 * original values in fs_oldfscompat_write, so it is harmless
   1077 	 * to modify them here.
   1078 	 */
   1079 	fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
   1080 	fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
   1081 	fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
   1082 	fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
   1083 
   1084 	fs->fs_maxbsize = fs->fs_bsize;
   1085 	fs->fs_time = fs->fs_old_time;
   1086 	fs->fs_size = fs->fs_old_size;
   1087 	fs->fs_dsize = fs->fs_old_dsize;
   1088 	fs->fs_csaddr = fs->fs_old_csaddr;
   1089 	fs->fs_sblockloc = sblockloc;
   1090 
   1091         fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
   1092 
   1093 	if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
   1094 		fs->fs_old_nrpos = 8;
   1095 		fs->fs_old_npsect = fs->fs_old_nsect;
   1096 		fs->fs_old_interleave = 1;
   1097 		fs->fs_old_trackskew = 0;
   1098 	}
   1099 
   1100 	if (fs->fs_old_inodefmt < FS_44INODEFMT) {
   1101 		fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
   1102 		fs->fs_qbmask = ~fs->fs_bmask;
   1103 		fs->fs_qfmask = ~fs->fs_fmask;
   1104 	}
   1105 
   1106 	maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
   1107 	if (fs->fs_maxfilesize > maxfilesize)
   1108 		fs->fs_maxfilesize = maxfilesize;
   1109 
   1110 	/* Compatibility for old filesystems */
   1111 	if (fs->fs_avgfilesize <= 0)
   1112 		fs->fs_avgfilesize = AVFILESIZ;
   1113 	if (fs->fs_avgfpdir <= 0)
   1114 		fs->fs_avgfpdir = AFPDIR;
   1115 
   1116 #if 0
   1117 	if (bigcgs) {
   1118 		fs->fs_save_cgsize = fs->fs_cgsize;
   1119 		fs->fs_cgsize = fs->fs_bsize;
   1120 	}
   1121 #endif
   1122 }
   1123 
   1124 /*
   1125  * Unwinding superblock updates for old filesystems.
   1126  * See ffs_oldfscompat_read above for details.
   1127  *
   1128  * XXX - Parts get retired eventually.
   1129  * Unfortunately new bits get added.
   1130  */
   1131 static void
   1132 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
   1133 {
   1134 	int32_t *extrasave;
   1135 
   1136 	if ((fs->fs_magic != FS_UFS1_MAGIC) ||
   1137 	    (fs->fs_old_flags & FS_FLAGS_UPDATED))
   1138 		return;
   1139 
   1140 	fs->fs_old_time = fs->fs_time;
   1141 	fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
   1142 	fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
   1143 	fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
   1144 	fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
   1145 	fs->fs_old_flags = fs->fs_flags;
   1146 
   1147 #if 0
   1148 	if (bigcgs) {
   1149 		fs->fs_cgsize = fs->fs_save_cgsize;
   1150 	}
   1151 #endif
   1152 
   1153 	memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
   1154 	extrasave = ump->um_oldfscompat;
   1155 	extrasave += 512/sizeof(int32_t);
   1156 	fs->fs_old_npsect = extrasave[0];
   1157 	fs->fs_old_interleave = extrasave[1];
   1158 	fs->fs_old_trackskew = extrasave[2];
   1159 
   1160 }
   1161 
   1162 /*
   1163  * unmount system call
   1164  */
   1165 int
   1166 ffs_unmount(struct mount *mp, int mntflags)
   1167 {
   1168 	struct lwp *l = curlwp;
   1169 	struct ufsmount *ump = VFSTOUFS(mp);
   1170 	struct fs *fs = ump->um_fs;
   1171 	int error, flags, penderr;
   1172 
   1173 	penderr = 0;
   1174 	flags = 0;
   1175 	if (mntflags & MNT_FORCE)
   1176 		flags |= FORCECLOSE;
   1177 #ifdef UFS_EXTATTR
   1178 	if (ump->um_fstype == UFS1) {
   1179 		error = ufs_extattr_stop(mp, l);
   1180 		if (error) {
   1181 			if (error != EOPNOTSUPP)
   1182 				printf("%s: ufs_extattr_stop returned %d\n",
   1183 				    fs->fs_fsmnt, error);
   1184 		} else
   1185 			ufs_extattr_uepm_destroy(&ump->um_extattr);
   1186 	}
   1187 #endif /* UFS_EXTATTR */
   1188 	if (mp->mnt_flag & MNT_SOFTDEP) {
   1189 		if ((error = softdep_flushfiles(mp, flags, l)) != 0)
   1190 			return (error);
   1191 	} else {
   1192 		if ((error = ffs_flushfiles(mp, flags, l)) != 0)
   1193 			return (error);
   1194 	}
   1195 	mutex_enter(&ump->um_lock);
   1196 	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
   1197 		printf("%s: unmount pending error: blocks %" PRId64
   1198 		       " files %d\n",
   1199 		    fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
   1200 		fs->fs_pendingblocks = 0;
   1201 		fs->fs_pendinginodes = 0;
   1202 		penderr = 1;
   1203 	}
   1204 	mutex_exit(&ump->um_lock);
   1205 	if (fs->fs_ronly == 0 &&
   1206 	    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
   1207 	    fs->fs_clean & FS_WASCLEAN) {
   1208 		/*
   1209 		 * XXXX don't mark fs clean in the case of softdep
   1210 		 * pending block errors, until they are fixed.
   1211 		 */
   1212 		if (penderr == 0) {
   1213 			if (mp->mnt_flag & MNT_SOFTDEP)
   1214 				fs->fs_flags &= ~FS_DOSOFTDEP;
   1215 			fs->fs_clean = FS_ISCLEAN;
   1216 		}
   1217 		fs->fs_fmod = 0;
   1218 		(void) ffs_sbupdate(ump, MNT_WAIT);
   1219 	}
   1220 	if (ump->um_devvp->v_type != VBAD)
   1221 		ump->um_devvp->v_specmountpoint = NULL;
   1222 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1223 	(void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
   1224 		NOCRED);
   1225 	vput(ump->um_devvp);
   1226 	free(fs->fs_csp, M_UFSMNT);
   1227 	free(fs, M_UFSMNT);
   1228 	if (ump->um_oldfscompat != NULL)
   1229 		free(ump->um_oldfscompat, M_UFSMNT);
   1230 	softdep_unmount(mp);
   1231 	mutex_destroy(&ump->um_lock);
   1232 	free(ump, M_UFSMNT);
   1233 	mp->mnt_data = NULL;
   1234 	mp->mnt_flag &= ~MNT_LOCAL;
   1235 	fstrans_unmount(mp);
   1236 	return (0);
   1237 }
   1238 
   1239 /*
   1240  * Flush out all the files in a filesystem.
   1241  */
   1242 int
   1243 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
   1244 {
   1245 	extern int doforce;
   1246 	struct ufsmount *ump;
   1247 	int error;
   1248 
   1249 	if (!doforce)
   1250 		flags &= ~FORCECLOSE;
   1251 	ump = VFSTOUFS(mp);
   1252 #ifdef QUOTA
   1253 	if (mp->mnt_flag & MNT_QUOTA) {
   1254 		int i;
   1255 		if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
   1256 			return (error);
   1257 		for (i = 0; i < MAXQUOTAS; i++) {
   1258 			if (ump->um_quotas[i] == NULLVP)
   1259 				continue;
   1260 			quotaoff(l, mp, i);
   1261 		}
   1262 		/*
   1263 		 * Here we fall through to vflush again to ensure
   1264 		 * that we have gotten rid of all the system vnodes.
   1265 		 */
   1266 	}
   1267 #endif
   1268 	if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
   1269 		return (error);
   1270 	ffs_snapshot_unmount(mp);
   1271 	/*
   1272 	 * Flush all the files.
   1273 	 */
   1274 	error = vflush(mp, NULLVP, flags);
   1275 	if (error)
   1276 		return (error);
   1277 	/*
   1278 	 * Flush filesystem metadata.
   1279 	 */
   1280 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1281 	error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
   1282 	VOP_UNLOCK(ump->um_devvp, 0);
   1283 	return (error);
   1284 }
   1285 
   1286 /*
   1287  * Get file system statistics.
   1288  */
   1289 int
   1290 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
   1291 {
   1292 	struct ufsmount *ump;
   1293 	struct fs *fs;
   1294 
   1295 	ump = VFSTOUFS(mp);
   1296 	fs = ump->um_fs;
   1297 	mutex_enter(&ump->um_lock);
   1298 	sbp->f_bsize = fs->fs_bsize;
   1299 	sbp->f_frsize = fs->fs_fsize;
   1300 	sbp->f_iosize = fs->fs_bsize;
   1301 	sbp->f_blocks = fs->fs_dsize;
   1302 	sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
   1303 		fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
   1304 	sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
   1305 	    fs->fs_minfree) / (u_int64_t) 100;
   1306 	if (sbp->f_bfree > sbp->f_bresvd)
   1307 		sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
   1308 	else
   1309 		sbp->f_bavail = 0;
   1310 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
   1311 	sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
   1312 	sbp->f_favail = sbp->f_ffree;
   1313 	sbp->f_fresvd = 0;
   1314 	mutex_exit(&ump->um_lock);
   1315 	copy_statvfs_info(sbp, mp);
   1316 
   1317 	return (0);
   1318 }
   1319 
   1320 /*
   1321  * Go through the disk queues to initiate sandbagged IO;
   1322  * go through the inodes to write those that have been modified;
   1323  * initiate the writing of the super block if it has been modified.
   1324  *
   1325  * Note: we are always called with the filesystem marked `MPBUSY'.
   1326  */
   1327 int
   1328 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
   1329 {
   1330 	struct lwp *l = curlwp;
   1331 	struct vnode *vp, *mvp;
   1332 	struct inode *ip;
   1333 	struct ufsmount *ump = VFSTOUFS(mp);
   1334 	struct fs *fs;
   1335 	int error, count, allerror = 0;
   1336 
   1337 	fs = ump->um_fs;
   1338 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
   1339 		printf("fs = %s\n", fs->fs_fsmnt);
   1340 		panic("update: rofs mod");
   1341 	}
   1342 
   1343 	/* Allocate a marker vnode. */
   1344 	if ((mvp = vnalloc(mp)) == NULL)
   1345 		return (ENOMEM);
   1346 
   1347 	fstrans_start(mp, FSTRANS_SHARED);
   1348 	/*
   1349 	 * Write back each (modified) inode.
   1350 	 */
   1351 	mutex_enter(&mntvnode_lock);
   1352 loop:
   1353 	/*
   1354 	 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
   1355 	 * and vclean() can be called indirectly
   1356 	 */
   1357 	for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
   1358 		vmark(mvp, vp);
   1359 		/*
   1360 		 * If the vnode that we are about to sync is no longer
   1361 		 * associated with this mount point, start over.
   1362 		 */
   1363 		if (vp->v_mount != mp || vismarker(vp))
   1364 			continue;
   1365 		mutex_enter(&vp->v_interlock);
   1366 		ip = VTOI(vp);
   1367 		if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 ||
   1368 		    vp->v_type == VNON || ((ip->i_flag &
   1369 		    (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
   1370 		    LIST_EMPTY(&vp->v_dirtyblkhd) &&
   1371 		    UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
   1372 		{
   1373 			mutex_exit(&vp->v_interlock);
   1374 			continue;
   1375 		}
   1376 		if (vp->v_type == VBLK &&
   1377 		    fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
   1378 			mutex_exit(&vp->v_interlock);
   1379 			continue;
   1380 		}
   1381 		mutex_exit(&mntvnode_lock);
   1382 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
   1383 		if (error) {
   1384 			mutex_enter(&mntvnode_lock);
   1385 			if (error == ENOENT) {
   1386 				(void)vunmark(mvp);
   1387 				goto loop;
   1388 			}
   1389 			continue;
   1390 		}
   1391 		if (vp->v_type == VREG && waitfor == MNT_LAZY)
   1392 			error = ffs_update(vp, NULL, NULL, 0);
   1393 		else
   1394 			error = VOP_FSYNC(vp, cred,
   1395 			    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0);
   1396 		if (error)
   1397 			allerror = error;
   1398 		vput(vp);
   1399 		mutex_enter(&mntvnode_lock);
   1400 	}
   1401 	mutex_exit(&mntvnode_lock);
   1402 	/*
   1403 	 * Force stale file system control information to be flushed.
   1404 	 */
   1405 	if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
   1406 		if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
   1407 			allerror = error;
   1408 		/* Flushed work items may create new vnodes to clean */
   1409 		if (allerror == 0 && count) {
   1410 			mutex_enter(&mntvnode_lock);
   1411 			goto loop;
   1412 		}
   1413 	}
   1414 	if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
   1415 	    !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
   1416 		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1417 		if ((error = VOP_FSYNC(ump->um_devvp, cred,
   1418 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0)
   1419 			allerror = error;
   1420 		VOP_UNLOCK(ump->um_devvp, 0);
   1421 		if (allerror == 0 && waitfor == MNT_WAIT) {
   1422 			mutex_enter(&mntvnode_lock);
   1423 			goto loop;
   1424 		}
   1425 	}
   1426 #ifdef QUOTA
   1427 	qsync(mp);
   1428 #endif
   1429 	/*
   1430 	 * Write back modified superblock.
   1431 	 */
   1432 	if (fs->fs_fmod != 0) {
   1433 		fs->fs_fmod = 0;
   1434 		fs->fs_time = time_second;
   1435 		if ((error = ffs_cgupdate(ump, waitfor)))
   1436 			allerror = error;
   1437 	}
   1438 	fstrans_done(mp);
   1439 	vnfree(mvp);
   1440 	return (allerror);
   1441 }
   1442 
   1443 /*
   1444  * Look up a FFS dinode number to find its incore vnode, otherwise read it
   1445  * in from disk.  If it is in core, wait for the lock bit to clear, then
   1446  * return the inode locked.  Detection and handling of mount points must be
   1447  * done by the calling routine.
   1448  */
   1449 int
   1450 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
   1451 {
   1452 	struct fs *fs;
   1453 	struct inode *ip;
   1454 	struct ufsmount *ump;
   1455 	struct buf *bp;
   1456 	struct vnode *vp;
   1457 	dev_t dev;
   1458 	int error;
   1459 
   1460 	ump = VFSTOUFS(mp);
   1461 	dev = ump->um_dev;
   1462 
   1463  retry:
   1464 	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
   1465 		return (0);
   1466 
   1467 	/* Allocate a new vnode/inode. */
   1468 	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
   1469 		*vpp = NULL;
   1470 		return (error);
   1471 	}
   1472 	ip = pool_get(&ffs_inode_pool, PR_WAITOK);
   1473 
   1474 	/*
   1475 	 * If someone beat us to it, put back the freshly allocated
   1476 	 * vnode/inode pair and retry.
   1477 	 */
   1478 	mutex_enter(&ufs_hashlock);
   1479 	if (ufs_ihashget(dev, ino, 0) != NULL) {
   1480 		mutex_exit(&ufs_hashlock);
   1481 		ungetnewvnode(vp);
   1482 		pool_put(&ffs_inode_pool, ip);
   1483 		goto retry;
   1484 	}
   1485 
   1486 	vp->v_vflag |= VV_LOCKSWORK;
   1487 
   1488 	/*
   1489 	 * XXX MFS ends up here, too, to allocate an inode.  Should we
   1490 	 * XXX create another pool for MFS inodes?
   1491 	 */
   1492 
   1493 	memset(ip, 0, sizeof(struct inode));
   1494 	vp->v_data = ip;
   1495 	ip->i_vnode = vp;
   1496 	ip->i_ump = ump;
   1497 	ip->i_fs = fs = ump->um_fs;
   1498 	ip->i_dev = dev;
   1499 	ip->i_number = ino;
   1500 	LIST_INIT(&ip->i_pcbufhd);
   1501 #ifdef QUOTA
   1502 	ufsquota_init(ip);
   1503 #endif
   1504 
   1505 	/*
   1506 	 * Initialize genfs node, we might proceed to destroy it in
   1507 	 * error branches.
   1508 	 */
   1509 	genfs_node_init(vp, &ffs_genfsops);
   1510 
   1511 	/*
   1512 	 * Put it onto its hash chain and lock it so that other requests for
   1513 	 * this inode will block if they arrive while we are sleeping waiting
   1514 	 * for old data structures to be purged or for the contents of the
   1515 	 * disk portion of this inode to be read.
   1516 	 */
   1517 
   1518 	ufs_ihashins(ip);
   1519 	mutex_exit(&ufs_hashlock);
   1520 
   1521 	/* Read in the disk contents for the inode, copy into the inode. */
   1522 	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
   1523 		      (int)fs->fs_bsize, NOCRED, &bp);
   1524 	if (error) {
   1525 
   1526 		/*
   1527 		 * The inode does not contain anything useful, so it would
   1528 		 * be misleading to leave it on its hash chain. With mode
   1529 		 * still zero, it will be unlinked and returned to the free
   1530 		 * list by vput().
   1531 		 */
   1532 
   1533 		vput(vp);
   1534 		brelse(bp, 0);
   1535 		*vpp = NULL;
   1536 		return (error);
   1537 	}
   1538 	if (ip->i_ump->um_fstype == UFS1)
   1539 		ip->i_din.ffs1_din = pool_get(&ffs_dinode1_pool, PR_WAITOK);
   1540 	else
   1541 		ip->i_din.ffs2_din = pool_get(&ffs_dinode2_pool, PR_WAITOK);
   1542 	ffs_load_inode(bp, ip, fs, ino);
   1543 	if (DOINGSOFTDEP(vp))
   1544 		softdep_load_inodeblock(ip);
   1545 	else
   1546 		ip->i_ffs_effnlink = ip->i_nlink;
   1547 	brelse(bp, 0);
   1548 
   1549 	/*
   1550 	 * Initialize the vnode from the inode, check for aliases.
   1551 	 * Note that the underlying vnode may have changed.
   1552 	 */
   1553 
   1554 	ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
   1555 
   1556 	/*
   1557 	 * Finish inode initialization now that aliasing has been resolved.
   1558 	 */
   1559 
   1560 	ip->i_devvp = ump->um_devvp;
   1561 	VREF(ip->i_devvp);
   1562 
   1563 	/*
   1564 	 * Ensure that uid and gid are correct. This is a temporary
   1565 	 * fix until fsck has been changed to do the update.
   1566 	 */
   1567 
   1568 	if (fs->fs_old_inodefmt < FS_44INODEFMT) {		/* XXX */
   1569 		ip->i_uid = ip->i_ffs1_ouid;			/* XXX */
   1570 		ip->i_gid = ip->i_ffs1_ogid;			/* XXX */
   1571 	}							/* XXX */
   1572 	uvm_vnp_setsize(vp, ip->i_size);
   1573 	*vpp = vp;
   1574 	return (0);
   1575 }
   1576 
   1577 /*
   1578  * File handle to vnode
   1579  *
   1580  * Have to be really careful about stale file handles:
   1581  * - check that the inode number is valid
   1582  * - call ffs_vget() to get the locked inode
   1583  * - check for an unallocated inode (i_mode == 0)
   1584  * - check that the given client host has export rights and return
   1585  *   those rights via. exflagsp and credanonp
   1586  */
   1587 int
   1588 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
   1589 {
   1590 	struct ufid ufh;
   1591 	struct fs *fs;
   1592 
   1593 	if (fhp->fid_len != sizeof(struct ufid))
   1594 		return EINVAL;
   1595 
   1596 	memcpy(&ufh, fhp, sizeof(ufh));
   1597 	fs = VFSTOUFS(mp)->um_fs;
   1598 	if (ufh.ufid_ino < ROOTINO ||
   1599 	    ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
   1600 		return (ESTALE);
   1601 	return (ufs_fhtovp(mp, &ufh, vpp));
   1602 }
   1603 
   1604 /*
   1605  * Vnode pointer to File handle
   1606  */
   1607 /* ARGSUSED */
   1608 int
   1609 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
   1610 {
   1611 	struct inode *ip;
   1612 	struct ufid ufh;
   1613 
   1614 	if (*fh_size < sizeof(struct ufid)) {
   1615 		*fh_size = sizeof(struct ufid);
   1616 		return E2BIG;
   1617 	}
   1618 	ip = VTOI(vp);
   1619 	*fh_size = sizeof(struct ufid);
   1620 	memset(&ufh, 0, sizeof(ufh));
   1621 	ufh.ufid_len = sizeof(struct ufid);
   1622 	ufh.ufid_ino = ip->i_number;
   1623 	ufh.ufid_gen = ip->i_gen;
   1624 	memcpy(fhp, &ufh, sizeof(ufh));
   1625 	return (0);
   1626 }
   1627 
   1628 void
   1629 ffs_init(void)
   1630 {
   1631 	if (ffs_initcount++ > 0)
   1632 		return;
   1633 
   1634 	pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0,
   1635 		  "ffsinopl", &pool_allocator_nointr, IPL_NONE);
   1636 	pool_init(&ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0,
   1637 		  "dino1pl", &pool_allocator_nointr, IPL_NONE);
   1638 	pool_init(&ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0,
   1639 		  "dino2pl", &pool_allocator_nointr, IPL_NONE);
   1640 	softdep_initialize();
   1641 	ffs_snapshot_init();
   1642 	ufs_init();
   1643 }
   1644 
   1645 void
   1646 ffs_reinit(void)
   1647 {
   1648 	softdep_reinitialize();
   1649 	ufs_reinit();
   1650 }
   1651 
   1652 void
   1653 ffs_done(void)
   1654 {
   1655 	if (--ffs_initcount > 0)
   1656 		return;
   1657 
   1658 	/* XXX softdep cleanup ? */
   1659 	ffs_snapshot_fini();
   1660 	ufs_done();
   1661 	pool_destroy(&ffs_dinode2_pool);
   1662 	pool_destroy(&ffs_dinode1_pool);
   1663 	pool_destroy(&ffs_inode_pool);
   1664 }
   1665 
   1666 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
   1667 {
   1668 #if 0
   1669 	extern int doasyncfree;
   1670 #endif
   1671 	extern int ffs_log_changeopt;
   1672 
   1673 	sysctl_createv(clog, 0, NULL, NULL,
   1674 		       CTLFLAG_PERMANENT,
   1675 		       CTLTYPE_NODE, "vfs", NULL,
   1676 		       NULL, 0, NULL, 0,
   1677 		       CTL_VFS, CTL_EOL);
   1678 	sysctl_createv(clog, 0, NULL, NULL,
   1679 		       CTLFLAG_PERMANENT,
   1680 		       CTLTYPE_NODE, "ffs",
   1681 		       SYSCTL_DESCR("Berkeley Fast File System"),
   1682 		       NULL, 0, NULL, 0,
   1683 		       CTL_VFS, 1, CTL_EOL);
   1684 
   1685 	/*
   1686 	 * @@@ should we even bother with these first three?
   1687 	 */
   1688 	sysctl_createv(clog, 0, NULL, NULL,
   1689 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1690 		       CTLTYPE_INT, "doclusterread", NULL,
   1691 		       sysctl_notavail, 0, NULL, 0,
   1692 		       CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
   1693 	sysctl_createv(clog, 0, NULL, NULL,
   1694 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1695 		       CTLTYPE_INT, "doclusterwrite", NULL,
   1696 		       sysctl_notavail, 0, NULL, 0,
   1697 		       CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
   1698 	sysctl_createv(clog, 0, NULL, NULL,
   1699 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1700 		       CTLTYPE_INT, "doreallocblks", NULL,
   1701 		       sysctl_notavail, 0, NULL, 0,
   1702 		       CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
   1703 #if 0
   1704 	sysctl_createv(clog, 0, NULL, NULL,
   1705 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1706 		       CTLTYPE_INT, "doasyncfree",
   1707 		       SYSCTL_DESCR("Release dirty blocks asynchronously"),
   1708 		       NULL, 0, &doasyncfree, 0,
   1709 		       CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
   1710 #endif
   1711 	sysctl_createv(clog, 0, NULL, NULL,
   1712 		       CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
   1713 		       CTLTYPE_INT, "log_changeopt",
   1714 		       SYSCTL_DESCR("Log changes in optimization strategy"),
   1715 		       NULL, 0, &ffs_log_changeopt, 0,
   1716 		       CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
   1717 }
   1718 
   1719 /*
   1720  * Write a superblock and associated information back to disk.
   1721  */
   1722 int
   1723 ffs_sbupdate(struct ufsmount *mp, int waitfor)
   1724 {
   1725 	struct fs *fs = mp->um_fs;
   1726 	struct buf *bp;
   1727 	int error = 0;
   1728 	u_int32_t saveflag;
   1729 
   1730 	bp = getblk(mp->um_devvp,
   1731 	    fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
   1732 	    (int)fs->fs_sbsize, 0, 0);
   1733 	saveflag = fs->fs_flags & FS_INTERNAL;
   1734 	fs->fs_flags &= ~FS_INTERNAL;
   1735 
   1736 	memcpy(bp->b_data, fs, fs->fs_sbsize);
   1737 
   1738 	ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
   1739 #ifdef FFS_EI
   1740 	if (mp->um_flags & UFS_NEEDSWAP)
   1741 		ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
   1742 #endif
   1743 	fs->fs_flags |= saveflag;
   1744 
   1745 	if (waitfor == MNT_WAIT)
   1746 		error = bwrite(bp);
   1747 	else
   1748 		bawrite(bp);
   1749 	return (error);
   1750 }
   1751 
   1752 int
   1753 ffs_cgupdate(struct ufsmount *mp, int waitfor)
   1754 {
   1755 	struct fs *fs = mp->um_fs;
   1756 	struct buf *bp;
   1757 	int blks;
   1758 	void *space;
   1759 	int i, size, error = 0, allerror = 0;
   1760 
   1761 	allerror = ffs_sbupdate(mp, waitfor);
   1762 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
   1763 	space = fs->fs_csp;
   1764 	for (i = 0; i < blks; i += fs->fs_frag) {
   1765 		size = fs->fs_bsize;
   1766 		if (i + fs->fs_frag > blks)
   1767 			size = (blks - i) * fs->fs_fsize;
   1768 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
   1769 		    size, 0, 0);
   1770 #ifdef FFS_EI
   1771 		if (mp->um_flags & UFS_NEEDSWAP)
   1772 			ffs_csum_swap((struct csum*)space,
   1773 			    (struct csum*)bp->b_data, size);
   1774 		else
   1775 #endif
   1776 			memcpy(bp->b_data, space, (u_int)size);
   1777 		space = (char *)space + size;
   1778 		if (waitfor == MNT_WAIT)
   1779 			error = bwrite(bp);
   1780 		else
   1781 			bawrite(bp);
   1782 	}
   1783 	if (!allerror && error)
   1784 		allerror = error;
   1785 	return (allerror);
   1786 }
   1787 
   1788 int
   1789 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
   1790     int attrnamespace, const char *attrname)
   1791 {
   1792 #ifdef UFS_EXTATTR
   1793 	/*
   1794 	 * File-backed extended attributes are only supported on UFS1.
   1795 	 * UFS2 has native extended attributes.
   1796 	 */
   1797 	if (VFSTOUFS(mp)->um_fstype == UFS1)
   1798 		return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
   1799 #endif
   1800 	return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
   1801 }
   1802 
   1803 int
   1804 ffs_suspendctl(struct mount *mp, int cmd)
   1805 {
   1806 	int error;
   1807 	struct lwp *l = curlwp;
   1808 
   1809 	switch (cmd) {
   1810 	case SUSPEND_SUSPEND:
   1811 		if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
   1812 			return error;
   1813 		error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
   1814 		if (error == 0)
   1815 			error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
   1816 		if (error != 0) {
   1817 			(void) fstrans_setstate(mp, FSTRANS_NORMAL);
   1818 			return error;
   1819 		}
   1820 		return 0;
   1821 
   1822 	case SUSPEND_RESUME:
   1823 		return fstrans_setstate(mp, FSTRANS_NORMAL);
   1824 
   1825 	default:
   1826 		return EINVAL;
   1827 	}
   1828 }
   1829