Home | History | Annotate | Line # | Download | only in ffs
ffs_vfsops.c revision 1.59
      1 /*	$NetBSD: ffs_vfsops.c,v 1.59 2000/03/16 18:20:06 jdolecek Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1991, 1993, 1994
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
     36  */
     37 
     38 #if defined(_KERNEL) && !defined(_LKM)
     39 #include "opt_ffs.h"
     40 #include "opt_quota.h"
     41 #include "opt_compat_netbsd.h"
     42 #endif
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/namei.h>
     47 #include <sys/proc.h>
     48 #include <sys/kernel.h>
     49 #include <sys/vnode.h>
     50 #include <sys/socket.h>
     51 #include <sys/mount.h>
     52 #include <sys/buf.h>
     53 #include <sys/device.h>
     54 #include <sys/mbuf.h>
     55 #include <sys/file.h>
     56 #include <sys/disklabel.h>
     57 #include <sys/ioctl.h>
     58 #include <sys/errno.h>
     59 #include <sys/malloc.h>
     60 #include <sys/pool.h>
     61 #include <sys/lock.h>
     62 #include <vm/vm.h>
     63 #include <sys/sysctl.h>
     64 
     65 #include <miscfs/specfs/specdev.h>
     66 
     67 #include <ufs/ufs/quota.h>
     68 #include <ufs/ufs/ufsmount.h>
     69 #include <ufs/ufs/inode.h>
     70 #include <ufs/ufs/dir.h>
     71 #include <ufs/ufs/ufs_extern.h>
     72 #include <ufs/ufs/ufs_bswap.h>
     73 
     74 #include <ufs/ffs/fs.h>
     75 #include <ufs/ffs/ffs_extern.h>
     76 
     77 /* how many times ffs_init() was called */
     78 int ffs_initcount = 0;
     79 
     80 extern struct lock ufs_hashlock;
     81 
     82 int ffs_sbupdate __P((struct ufsmount *, int));
     83 
     84 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
     85 extern struct vnodeopv_desc ffs_specop_opv_desc;
     86 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
     87 
     88 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
     89 	&ffs_vnodeop_opv_desc,
     90 	&ffs_specop_opv_desc,
     91 	&ffs_fifoop_opv_desc,
     92 	NULL,
     93 };
     94 
     95 struct vfsops ffs_vfsops = {
     96 	MOUNT_FFS,
     97 	ffs_mount,
     98 	ufs_start,
     99 	ffs_unmount,
    100 	ufs_root,
    101 	ufs_quotactl,
    102 	ffs_statfs,
    103 	ffs_sync,
    104 	ffs_vget,
    105 	ffs_fhtovp,
    106 	ffs_vptofh,
    107 	ffs_init,
    108 	ffs_done,
    109 	ffs_sysctl,
    110 	ffs_mountroot,
    111 	ufs_check_export,
    112 	ffs_vnodeopv_descs,
    113 };
    114 
    115 struct pool ffs_inode_pool;
    116 
    117 /*
    118  * Called by main() when ffs is going to be mounted as root.
    119  */
    120 
    121 int
    122 ffs_mountroot()
    123 {
    124 	extern struct vnode *rootvp;
    125 	struct fs *fs;
    126 	struct mount *mp;
    127 	struct proc *p = curproc;	/* XXX */
    128 	struct ufsmount *ump;
    129 	int error;
    130 
    131 	if (root_device->dv_class != DV_DISK)
    132 		return (ENODEV);
    133 
    134 	/*
    135 	 * Get vnodes for rootdev.
    136 	 */
    137 	if (bdevvp(rootdev, &rootvp))
    138 		panic("ffs_mountroot: can't setup bdevvp's");
    139 
    140 	if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
    141 		vrele(rootvp);
    142 		return (error);
    143 	}
    144 	if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
    145 		mp->mnt_op->vfs_refcount--;
    146 		vfs_unbusy(mp);
    147 		free(mp, M_MOUNT);
    148 		vrele(rootvp);
    149 		return (error);
    150 	}
    151 	simple_lock(&mountlist_slock);
    152 	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
    153 	simple_unlock(&mountlist_slock);
    154 	ump = VFSTOUFS(mp);
    155 	fs = ump->um_fs;
    156 	memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
    157 	(void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
    158 	(void)ffs_statfs(mp, &mp->mnt_stat, p);
    159 	vfs_unbusy(mp);
    160 	inittodr(fs->fs_time);
    161 	return (0);
    162 }
    163 
    164 /*
    165  * VFS Operations.
    166  *
    167  * mount system call
    168  */
    169 int
    170 ffs_mount(mp, path, data, ndp, p)
    171 	register struct mount *mp;
    172 	const char *path;
    173 	void *data;
    174 	struct nameidata *ndp;
    175 	struct proc *p;
    176 {
    177 	struct vnode *devvp;
    178 	struct ufs_args args;
    179 	struct ufsmount *ump = NULL;
    180 	register struct fs *fs;
    181 	size_t size;
    182 	int error, flags;
    183 	mode_t accessmode;
    184 
    185 	error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
    186 	if (error)
    187 		return (error);
    188 	/*
    189 	 * If updating, check whether changing from read-only to
    190 	 * read/write; if there is no device name, that's all we do.
    191 	 */
    192 	if (mp->mnt_flag & MNT_UPDATE) {
    193 		ump = VFSTOUFS(mp);
    194 		fs = ump->um_fs;
    195 		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
    196 			flags = WRITECLOSE;
    197 			if (mp->mnt_flag & MNT_FORCE)
    198 				flags |= FORCECLOSE;
    199 			if (mp->mnt_flag & MNT_SOFTDEP)
    200 				error = softdep_flushfiles(mp, flags, p);
    201 			else
    202 				error = ffs_flushfiles(mp, flags, p);
    203 			if (error == 0 &&
    204 			    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    205 			    fs->fs_clean & FS_WASCLEAN) {
    206 				fs->fs_clean = FS_ISCLEAN;
    207 				(void) ffs_sbupdate(ump, MNT_WAIT);
    208 			}
    209 			if (error)
    210 				return (error);
    211 			fs->fs_ronly = 1;
    212 		}
    213 		if (mp->mnt_flag & MNT_RELOAD) {
    214 			error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
    215 			if (error)
    216 				return (error);
    217 		}
    218 		if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
    219 			/*
    220 			 * If upgrade to read-write by non-root, then verify
    221 			 * that user has necessary permissions on the device.
    222 			 */
    223 			devvp = ump->um_devvp;
    224 			if (p->p_ucred->cr_uid != 0) {
    225 				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    226 				error = VOP_ACCESS(devvp, VREAD | VWRITE,
    227 						   p->p_ucred, p);
    228 				VOP_UNLOCK(devvp, 0);
    229 				if (error)
    230 					return (error);
    231 			}
    232 			fs->fs_ronly = 0;
    233 			fs->fs_clean <<= 1;
    234 			fs->fs_fmod = 1;
    235 			if ((fs->fs_flags & FS_DOSOFTDEP)) {
    236 				error = softdep_mount(devvp, mp, fs,
    237 				    p->p_ucred);
    238 				if (error)
    239 					return (error);
    240 			} else
    241 				mp->mnt_flag &= ~MNT_SOFTDEP;
    242 		}
    243 		if (args.fspec == 0) {
    244 			/*
    245 			 * Process export requests.
    246 			 */
    247 			return (vfs_export(mp, &ump->um_export, &args.export));
    248 		}
    249 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    250 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    251 			printf("%s fs uses soft updates, ignoring async mode\n",
    252 			    fs->fs_fsmnt);
    253 			mp->mnt_flag &= ~MNT_ASYNC;
    254 		}
    255 	}
    256 	/*
    257 	 * Not an update, or updating the name: look up the name
    258 	 * and verify that it refers to a sensible block device.
    259 	 */
    260 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
    261 	if ((error = namei(ndp)) != 0)
    262 		return (error);
    263 	devvp = ndp->ni_vp;
    264 
    265 	if (devvp->v_type != VBLK) {
    266 		vrele(devvp);
    267 		return (ENOTBLK);
    268 	}
    269 	if (major(devvp->v_rdev) >= nblkdev) {
    270 		vrele(devvp);
    271 		return (ENXIO);
    272 	}
    273 	/*
    274 	 * If mount by non-root, then verify that user has necessary
    275 	 * permissions on the device.
    276 	 */
    277 	if (p->p_ucred->cr_uid != 0) {
    278 		accessmode = VREAD;
    279 		if ((mp->mnt_flag & MNT_RDONLY) == 0)
    280 			accessmode |= VWRITE;
    281 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    282 		error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
    283 		VOP_UNLOCK(devvp, 0);
    284 		if (error) {
    285 			vrele(devvp);
    286 			return (error);
    287 		}
    288 	}
    289 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
    290 		error = ffs_mountfs(devvp, mp, p);
    291 		if (!error) {
    292 			ump = VFSTOUFS(mp);
    293 			fs = ump->um_fs;
    294 			if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    295 			    (MNT_SOFTDEP | MNT_ASYNC)) {
    296 				printf("%s fs uses soft updates, "
    297 				       "ignoring async mode\n",
    298 				    fs->fs_fsmnt);
    299 				mp->mnt_flag &= ~MNT_ASYNC;
    300 			}
    301 		}
    302 	}
    303 	else {
    304 		if (devvp != ump->um_devvp)
    305 			error = EINVAL;	/* needs translation */
    306 		else
    307 			vrele(devvp);
    308 	}
    309 	if (error) {
    310 		vrele(devvp);
    311 		return (error);
    312 	}
    313 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
    314 	memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
    315 	memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
    316 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
    317 	    &size);
    318 	memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
    319 	if (fs->fs_fmod != 0) {	/* XXX */
    320 		fs->fs_fmod = 0;
    321 		if (fs->fs_clean & FS_WASCLEAN)
    322 			fs->fs_time = time.tv_sec;
    323 		else
    324 			printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
    325 			    mp->mnt_stat.f_mntfromname, fs->fs_clean);
    326 		(void) ffs_cgupdate(ump, MNT_WAIT);
    327 	}
    328 	return (0);
    329 }
    330 
    331 /*
    332  * Reload all incore data for a filesystem (used after running fsck on
    333  * the root filesystem and finding things to fix). The filesystem must
    334  * be mounted read-only.
    335  *
    336  * Things to do to update the mount:
    337  *	1) invalidate all cached meta-data.
    338  *	2) re-read superblock from disk.
    339  *	3) re-read summary information from disk.
    340  *	4) invalidate all inactive vnodes.
    341  *	5) invalidate all cached file data.
    342  *	6) re-read inode data for all active vnodes.
    343  */
    344 int
    345 ffs_reload(mountp, cred, p)
    346 	register struct mount *mountp;
    347 	struct ucred *cred;
    348 	struct proc *p;
    349 {
    350 	register struct vnode *vp, *nvp, *devvp;
    351 	struct inode *ip;
    352 	struct buf *bp;
    353 	struct fs *fs, *newfs;
    354 	struct partinfo dpart;
    355 	int i, blks, size, error;
    356 	int32_t *lp;
    357 	caddr_t cp;
    358 
    359 	if ((mountp->mnt_flag & MNT_RDONLY) == 0)
    360 		return (EINVAL);
    361 	/*
    362 	 * Step 1: invalidate all cached meta-data.
    363 	 */
    364 	devvp = VFSTOUFS(mountp)->um_devvp;
    365 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    366 	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
    367 	VOP_UNLOCK(devvp, 0);
    368 	if (error)
    369 		panic("ffs_reload: dirty1");
    370 	/*
    371 	 * Step 2: re-read superblock from disk.
    372 	 */
    373 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
    374 		size = DEV_BSIZE;
    375 	else
    376 		size = dpart.disklab->d_secsize;
    377 	error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
    378 	if (error) {
    379 		brelse(bp);
    380 		return (error);
    381 	}
    382 	fs = VFSTOUFS(mountp)->um_fs;
    383 	newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
    384 	memcpy(newfs, bp->b_data, fs->fs_sbsize);
    385 #ifdef FFS_EI
    386 	if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
    387 		ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
    388 		fs->fs_flags |= FS_SWAPPED;
    389 	}
    390 #endif
    391 	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
    392 	    newfs->fs_bsize < sizeof(struct fs)) {
    393 		brelse(bp);
    394 		free(newfs, M_UFSMNT);
    395 		return (EIO);		/* XXX needs translation */
    396 	}
    397 	/*
    398 	 * Copy pointer fields back into superblock before copying in	XXX
    399 	 * new superblock. These should really be in the ufsmount.	XXX
    400 	 * Note that important parameters (eg fs_ncg) are unchanged.
    401 	 */
    402 	memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
    403 	newfs->fs_maxcluster = fs->fs_maxcluster;
    404 	memcpy(fs, newfs, (u_int)fs->fs_sbsize);
    405 	if (fs->fs_sbsize < SBSIZE)
    406 		bp->b_flags |= B_INVAL;
    407 	brelse(bp);
    408 	free(newfs, M_UFSMNT);
    409 	mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
    410 	ffs_oldfscompat(fs);
    411 	ffs_statfs(mountp, &mountp->mnt_stat, p);
    412 	/*
    413 	 * Step 3: re-read summary information from disk.
    414 	 */
    415 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
    416 	for (i = 0; i < blks; i += fs->fs_frag) {
    417 		size = fs->fs_bsize;
    418 		if (i + fs->fs_frag > blks)
    419 			size = (blks - i) * fs->fs_fsize;
    420 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    421 			      NOCRED, &bp);
    422 		if (error) {
    423 			brelse(bp);
    424 			return (error);
    425 		}
    426 #ifdef FFS_EI
    427 		if (UFS_FSNEEDSWAP(fs))
    428 			ffs_csum_swap((struct csum*)bp->b_data,
    429 			    (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
    430 		else
    431 #endif
    432 			memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
    433 			    (size_t)size);
    434 		brelse(bp);
    435 	}
    436 	if ((fs->fs_flags & FS_DOSOFTDEP))
    437 		softdep_mount(devvp, mountp, fs, cred);
    438 	else
    439 		mountp->mnt_flag &= ~MNT_SOFTDEP;
    440 	/*
    441 	 * We no longer know anything about clusters per cylinder group.
    442 	 */
    443 	if (fs->fs_contigsumsize > 0) {
    444 		lp = fs->fs_maxcluster;
    445 		for (i = 0; i < fs->fs_ncg; i++)
    446 			*lp++ = fs->fs_contigsumsize;
    447 	}
    448 
    449 loop:
    450 	simple_lock(&mntvnode_slock);
    451 	for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
    452 		if (vp->v_mount != mountp) {
    453 			simple_unlock(&mntvnode_slock);
    454 			goto loop;
    455 		}
    456 		nvp = vp->v_mntvnodes.le_next;
    457 		/*
    458 		 * Step 4: invalidate all inactive vnodes.
    459 		 */
    460 		if (vrecycle(vp, &mntvnode_slock, p))
    461 			goto loop;
    462 		/*
    463 		 * Step 5: invalidate all cached file data.
    464 		 */
    465 		simple_lock(&vp->v_interlock);
    466 		simple_unlock(&mntvnode_slock);
    467 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
    468 			goto loop;
    469 		if (vinvalbuf(vp, 0, cred, p, 0, 0))
    470 			panic("ffs_reload: dirty2");
    471 		/*
    472 		 * Step 6: re-read inode data for all active vnodes.
    473 		 */
    474 		ip = VTOI(vp);
    475 		error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
    476 			      (int)fs->fs_bsize, NOCRED, &bp);
    477 		if (error) {
    478 			brelse(bp);
    479 			vput(vp);
    480 			return (error);
    481 		}
    482 		cp = (caddr_t)bp->b_data +
    483 		    (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
    484 #ifdef FFS_EI
    485 		if (UFS_FSNEEDSWAP(fs))
    486 			ffs_dinode_swap((struct dinode *)cp,
    487 			    &ip->i_din.ffs_din);
    488 		else
    489 #endif
    490 			memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
    491 		ip->i_ffs_effnlink = ip->i_ffs_nlink;
    492 		brelse(bp);
    493 		vput(vp);
    494 		simple_lock(&mntvnode_slock);
    495 	}
    496 	simple_unlock(&mntvnode_slock);
    497 	return (0);
    498 }
    499 
    500 /*
    501  * Common code for mount and mountroot
    502  */
    503 int
    504 ffs_mountfs(devvp, mp, p)
    505 	register struct vnode *devvp;
    506 	struct mount *mp;
    507 	struct proc *p;
    508 {
    509 	struct ufsmount *ump;
    510 	struct buf *bp;
    511 	struct fs *fs;
    512 	dev_t dev;
    513 	struct partinfo dpart;
    514 	caddr_t base, space;
    515 	int blks;
    516 	int error, i, size, ronly;
    517 #ifdef FFS_EI
    518 	int needswap;
    519 #endif
    520 	int32_t *lp;
    521 	struct ucred *cred;
    522 	extern struct vnode *rootvp;
    523 	u_int64_t maxfilesize;					/* XXX */
    524 	u_int32_t sbsize;
    525 
    526 	dev = devvp->v_rdev;
    527 	cred = p ? p->p_ucred : NOCRED;
    528 	/*
    529 	 * Disallow multiple mounts of the same device.
    530 	 * Disallow mounting of a device that is currently in use
    531 	 * (except for root, which might share swap device for miniroot).
    532 	 * Flush out any old buffers remaining from a previous use.
    533 	 */
    534 	if ((error = vfs_mountedon(devvp)) != 0)
    535 		return (error);
    536 	if (vcount(devvp) > 1 && devvp != rootvp)
    537 		return (EBUSY);
    538 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    539 	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
    540 	VOP_UNLOCK(devvp, 0);
    541 	if (error)
    542 		return (error);
    543 
    544 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
    545 	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
    546 	if (error)
    547 		return (error);
    548 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
    549 		size = DEV_BSIZE;
    550 	else
    551 		size = dpart.disklab->d_secsize;
    552 
    553 	bp = NULL;
    554 	ump = NULL;
    555 	error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
    556 	if (error)
    557 		goto out;
    558 
    559 	fs = (struct fs*)bp->b_data;
    560 	if (fs->fs_magic == FS_MAGIC) {
    561 		sbsize = fs->fs_sbsize;
    562 #ifdef FFS_EI
    563 		needswap = 0;
    564 	} else if (fs->fs_magic == bswap32(FS_MAGIC)) {
    565 		sbsize = bswap32(fs->fs_sbsize);
    566 		needswap = 1;
    567 #endif
    568 	} else {
    569 		error = EINVAL;
    570 		goto out;
    571 	}
    572 	if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
    573 		error = EINVAL;
    574 		goto out;
    575 	}
    576 
    577 	fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
    578 	memcpy(fs, bp->b_data, sbsize);
    579 #ifdef FFS_EI
    580 	if (needswap) {
    581 		ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
    582 		fs->fs_flags |= FS_SWAPPED;
    583 	}
    584 #endif
    585 	ffs_oldfscompat(fs);
    586 
    587 	if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
    588 		error = EINVAL;
    589 		goto out;
    590 	}
    591 	 /* make sure cylinder group summary area is a reasonable size. */
    592 	if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
    593 	    fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
    594 	    fs->fs_cssize >
    595 	    fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
    596 		error = EINVAL;		/* XXX needs translation */
    597 		goto out2;
    598 	}
    599 	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
    600 	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
    601 		error = EROFS;		/* XXX what should be returned? */
    602 		goto out2;
    603 	}
    604 
    605 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
    606 	memset((caddr_t)ump, 0, sizeof *ump);
    607 	ump->um_fs = fs;
    608 	if (fs->fs_sbsize < SBSIZE)
    609 		bp->b_flags |= B_INVAL;
    610 	brelse(bp);
    611 	bp = NULL;
    612 	fs->fs_ronly = ronly;
    613 	if (ronly == 0) {
    614 		fs->fs_clean <<= 1;
    615 		fs->fs_fmod = 1;
    616 	}
    617 	size = fs->fs_cssize;
    618 	blks = howmany(size, fs->fs_fsize);
    619 	if (fs->fs_contigsumsize > 0)
    620 		size += fs->fs_ncg * sizeof(int32_t);
    621 	base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
    622 	for (i = 0; i < blks; i += fs->fs_frag) {
    623 		size = fs->fs_bsize;
    624 		if (i + fs->fs_frag > blks)
    625 			size = (blks - i) * fs->fs_fsize;
    626 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    627 			      cred, &bp);
    628 		if (error) {
    629 			free(base, M_UFSMNT);
    630 			goto out2;
    631 		}
    632 #ifdef FFS_EI
    633 		if (needswap)
    634 			ffs_csum_swap((struct csum*)bp->b_data,
    635 				(struct csum*)space, size);
    636 		else
    637 #endif
    638 			memcpy(space, bp->b_data, (u_int)size);
    639 
    640 		fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
    641 		space += size;
    642 		brelse(bp);
    643 		bp = NULL;
    644 	}
    645 	if (fs->fs_contigsumsize > 0) {
    646 		fs->fs_maxcluster = lp = (int32_t *)space;
    647 		for (i = 0; i < fs->fs_ncg; i++)
    648 			*lp++ = fs->fs_contigsumsize;
    649 	}
    650 	mp->mnt_data = (qaddr_t)ump;
    651 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
    652 	mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
    653 	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
    654 	mp->mnt_flag |= MNT_LOCAL;
    655 #ifdef FFS_EI
    656 	if (needswap)
    657 		ump->um_flags |= UFS_NEEDSWAP;
    658 #endif
    659 	ump->um_mountp = mp;
    660 	ump->um_dev = dev;
    661 	ump->um_devvp = devvp;
    662 	ump->um_nindir = fs->fs_nindir;
    663 	ump->um_bptrtodb = fs->fs_fsbtodb;
    664 	ump->um_seqinc = fs->fs_frag;
    665 	for (i = 0; i < MAXQUOTAS; i++)
    666 		ump->um_quotas[i] = NULLVP;
    667 	devvp->v_specmountpoint = mp;
    668 	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
    669 	maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;	/* XXX */
    670 	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
    671 		fs->fs_maxfilesize = maxfilesize;		/* XXX */
    672 	if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
    673 		error = softdep_mount(devvp, mp, fs, cred);
    674 		if (error) {
    675 			free(base, M_UFSMNT);
    676 			goto out;
    677 		}
    678 	}
    679 	return (0);
    680 out2:
    681 	free(fs, M_UFSMNT);
    682 out:
    683 	devvp->v_specmountpoint = NULL;
    684 	if (bp)
    685 		brelse(bp);
    686 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    687 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
    688 	VOP_UNLOCK(devvp, 0);
    689 	if (ump) {
    690 		free(ump, M_UFSMNT);
    691 		mp->mnt_data = (qaddr_t)0;
    692 	}
    693 	return (error);
    694 }
    695 
    696 /*
    697  * Sanity checks for old file systems.
    698  *
    699  * XXX - goes away some day.
    700  */
    701 int
    702 ffs_oldfscompat(fs)
    703 	struct fs *fs;
    704 {
    705 	int i;
    706 
    707 	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
    708 	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
    709 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
    710 		fs->fs_nrpos = 8;				/* XXX */
    711 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
    712 		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
    713 								/* XXX */
    714 		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
    715 		for (i = 0; i < NIADDR; i++) {			/* XXX */
    716 			sizepb *= NINDIR(fs);			/* XXX */
    717 			fs->fs_maxfilesize += sizepb;		/* XXX */
    718 		}						/* XXX */
    719 		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
    720 		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
    721 	}							/* XXX */
    722 	return (0);
    723 }
    724 
    725 /*
    726  * unmount system call
    727  */
    728 int
    729 ffs_unmount(mp, mntflags, p)
    730 	struct mount *mp;
    731 	int mntflags;
    732 	struct proc *p;
    733 {
    734 	register struct ufsmount *ump;
    735 	register struct fs *fs;
    736 	int error, flags;
    737 
    738 	flags = 0;
    739 	if (mntflags & MNT_FORCE)
    740 		flags |= FORCECLOSE;
    741 	if (mp->mnt_flag & MNT_SOFTDEP) {
    742 		if ((error = softdep_flushfiles(mp, flags, p)) != 0)
    743 			return (error);
    744 	} else {
    745 		if ((error = ffs_flushfiles(mp, flags, p)) != 0)
    746 			return (error);
    747 	}
    748 	ump = VFSTOUFS(mp);
    749 	fs = ump->um_fs;
    750 	if (fs->fs_ronly == 0 &&
    751 	    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    752 	    fs->fs_clean & FS_WASCLEAN) {
    753 		fs->fs_clean = FS_ISCLEAN;
    754 		(void) ffs_sbupdate(ump, MNT_WAIT);
    755 	}
    756 	if (ump->um_devvp->v_type != VBAD)
    757 		ump->um_devvp->v_specmountpoint = NULL;
    758 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
    759 	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
    760 		NOCRED, p);
    761 	vput(ump->um_devvp);
    762 	free(fs->fs_csp[0], M_UFSMNT);
    763 	free(fs, M_UFSMNT);
    764 	free(ump, M_UFSMNT);
    765 	mp->mnt_data = (qaddr_t)0;
    766 	mp->mnt_flag &= ~MNT_LOCAL;
    767 	return (error);
    768 }
    769 
    770 /*
    771  * Flush out all the files in a filesystem.
    772  */
    773 int
    774 ffs_flushfiles(mp, flags, p)
    775 	register struct mount *mp;
    776 	int flags;
    777 	struct proc *p;
    778 {
    779 	extern int doforce;
    780 	register struct ufsmount *ump;
    781 	int error;
    782 
    783 	if (!doforce)
    784 		flags &= ~FORCECLOSE;
    785 	ump = VFSTOUFS(mp);
    786 #ifdef QUOTA
    787 	if (mp->mnt_flag & MNT_QUOTA) {
    788 		int i;
    789 		if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
    790 			return (error);
    791 		for (i = 0; i < MAXQUOTAS; i++) {
    792 			if (ump->um_quotas[i] == NULLVP)
    793 				continue;
    794 			quotaoff(p, mp, i);
    795 		}
    796 		/*
    797 		 * Here we fall through to vflush again to ensure
    798 		 * that we have gotten rid of all the system vnodes.
    799 		 */
    800 	}
    801 #endif
    802 	/*
    803 	 * Flush all the files.
    804 	 */
    805 	error = vflush(mp, NULLVP, flags);
    806 	if (error)
    807 		return (error);
    808 	/*
    809 	 * Flush filesystem metadata.
    810 	 */
    811 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
    812 	error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, p);
    813 	VOP_UNLOCK(ump->um_devvp, 0);
    814 	return (error);
    815 }
    816 
    817 /*
    818  * Get file system statistics.
    819  */
    820 int
    821 ffs_statfs(mp, sbp, p)
    822 	struct mount *mp;
    823 	register struct statfs *sbp;
    824 	struct proc *p;
    825 {
    826 	register struct ufsmount *ump;
    827 	register struct fs *fs;
    828 
    829 	ump = VFSTOUFS(mp);
    830 	fs = ump->um_fs;
    831 	if (fs->fs_magic != FS_MAGIC)
    832 		panic("ffs_statfs");
    833 #ifdef COMPAT_09
    834 	sbp->f_type = 1;
    835 #else
    836 	sbp->f_type = 0;
    837 #endif
    838 	sbp->f_bsize = fs->fs_fsize;
    839 	sbp->f_iosize = fs->fs_bsize;
    840 	sbp->f_blocks = fs->fs_dsize;
    841 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
    842 		fs->fs_cstotal.cs_nffree;
    843 	sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
    844 	    (100 - fs->fs_minfree) / (u_int64_t) 100) -
    845 	    (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
    846 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
    847 	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
    848 	if (sbp != &mp->mnt_stat) {
    849 		memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
    850 		memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
    851 	}
    852 	strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
    853 	return (0);
    854 }
    855 
    856 /*
    857  * Go through the disk queues to initiate sandbagged IO;
    858  * go through the inodes to write those that have been modified;
    859  * initiate the writing of the super block if it has been modified.
    860  *
    861  * Note: we are always called with the filesystem marked `MPBUSY'.
    862  */
    863 int
    864 ffs_sync(mp, waitfor, cred, p)
    865 	struct mount *mp;
    866 	int waitfor;
    867 	struct ucred *cred;
    868 	struct proc *p;
    869 {
    870 	struct vnode *vp, *nvp;
    871 	struct inode *ip;
    872 	struct ufsmount *ump = VFSTOUFS(mp);
    873 	struct fs *fs;
    874 	int error, allerror = 0;
    875 
    876 	fs = ump->um_fs;
    877 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
    878 		printf("fs = %s\n", fs->fs_fsmnt);
    879 		panic("update: rofs mod");
    880 	}
    881 	/*
    882 	 * Write back each (modified) inode.
    883 	 */
    884 	simple_lock(&mntvnode_slock);
    885 loop:
    886 	for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
    887 		/*
    888 		 * If the vnode that we are about to sync is no longer
    889 		 * associated with this mount point, start over.
    890 		 */
    891 		if (vp->v_mount != mp)
    892 			goto loop;
    893 		simple_lock(&vp->v_interlock);
    894 		nvp = vp->v_mntvnodes.le_next;
    895 		ip = VTOI(vp);
    896 		if (vp->v_type == VNON ||
    897 		    ((ip->i_flag &
    898 		      (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
    899 		     vp->v_dirtyblkhd.lh_first == NULL))
    900 		{
    901 			simple_unlock(&vp->v_interlock);
    902 			continue;
    903 		}
    904 		simple_unlock(&mntvnode_slock);
    905 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    906 		if (error) {
    907 			simple_lock(&mntvnode_slock);
    908 			if (error == ENOENT)
    909 				goto loop;
    910 			continue;
    911 		}
    912 		if ((error = VOP_FSYNC(vp, cred,
    913 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
    914 			allerror = error;
    915 		vput(vp);
    916 		simple_lock(&mntvnode_slock);
    917 	}
    918 	simple_unlock(&mntvnode_slock);
    919 	/*
    920 	 * Force stale file system control information to be flushed.
    921 	 */
    922 	if (waitfor != MNT_LAZY) {
    923 		if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
    924 			waitfor = MNT_NOWAIT;
    925 		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
    926 		if ((error = VOP_FSYNC(ump->um_devvp, cred,
    927 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
    928 			allerror = error;
    929 		VOP_UNLOCK(ump->um_devvp, 0);
    930 	}
    931 #ifdef QUOTA
    932 	qsync(mp);
    933 #endif
    934 	/*
    935 	 * Write back modified superblock.
    936 	 */
    937 	if (fs->fs_fmod != 0) {
    938 		fs->fs_fmod = 0;
    939 		fs->fs_time = time.tv_sec;
    940 		allerror = ffs_cgupdate(ump, waitfor);
    941 	}
    942 	return (allerror);
    943 }
    944 
    945 /*
    946  * Look up a FFS dinode number to find its incore vnode, otherwise read it
    947  * in from disk.  If it is in core, wait for the lock bit to clear, then
    948  * return the inode locked.  Detection and handling of mount points must be
    949  * done by the calling routine.
    950  */
    951 int
    952 ffs_vget(mp, ino, vpp)
    953 	struct mount *mp;
    954 	ino_t ino;
    955 	struct vnode **vpp;
    956 {
    957 	struct fs *fs;
    958 	struct inode *ip;
    959 	struct ufsmount *ump;
    960 	struct buf *bp;
    961 	struct vnode *vp;
    962 	dev_t dev;
    963 	int error;
    964 	caddr_t cp;
    965 
    966 	ump = VFSTOUFS(mp);
    967 	dev = ump->um_dev;
    968 	do {
    969 		if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
    970 			return (0);
    971 	} while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
    972 
    973 	/* Allocate a new vnode/inode. */
    974 	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
    975 		*vpp = NULL;
    976 		lockmgr(&ufs_hashlock, LK_RELEASE, 0);
    977 		return (error);
    978 	}
    979 	/*
    980 	 * XXX MFS ends up here, too, to allocate an inode.  Should we
    981 	 * XXX create another pool for MFS inodes?
    982 	 */
    983 	ip = pool_get(&ffs_inode_pool, PR_WAITOK);
    984 	memset((caddr_t)ip, 0, sizeof(struct inode));
    985 	vp->v_data = ip;
    986 	ip->i_vnode = vp;
    987 	ip->i_fs = fs = ump->um_fs;
    988 	ip->i_dev = dev;
    989 	ip->i_number = ino;
    990 #ifdef QUOTA
    991 	{
    992 		int i;
    993 
    994 		for (i = 0; i < MAXQUOTAS; i++)
    995 			ip->i_dquot[i] = NODQUOT;
    996 	}
    997 #endif
    998 	/*
    999 	 * Put it onto its hash chain and lock it so that other requests for
   1000 	 * this inode will block if they arrive while we are sleeping waiting
   1001 	 * for old data structures to be purged or for the contents of the
   1002 	 * disk portion of this inode to be read.
   1003 	 */
   1004 	ufs_ihashins(ip);
   1005 	lockmgr(&ufs_hashlock, LK_RELEASE, 0);
   1006 
   1007 	/* Read in the disk contents for the inode, copy into the inode. */
   1008 	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
   1009 		      (int)fs->fs_bsize, NOCRED, &bp);
   1010 	if (error) {
   1011 		/*
   1012 		 * The inode does not contain anything useful, so it would
   1013 		 * be misleading to leave it on its hash chain. With mode
   1014 		 * still zero, it will be unlinked and returned to the free
   1015 		 * list by vput().
   1016 		 */
   1017 		vput(vp);
   1018 		brelse(bp);
   1019 		*vpp = NULL;
   1020 		return (error);
   1021 	}
   1022 	cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
   1023 #ifdef FFS_EI
   1024 	if (UFS_FSNEEDSWAP(fs))
   1025 		ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
   1026 	else
   1027 #endif
   1028 		memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
   1029 	if (DOINGSOFTDEP(vp))
   1030 		softdep_load_inodeblock(ip);
   1031 	else
   1032 		ip->i_ffs_effnlink = ip->i_ffs_nlink;
   1033 	brelse(bp);
   1034 
   1035 	/*
   1036 	 * Initialize the vnode from the inode, check for aliases.
   1037 	 * Note that the underlying vnode may have changed.
   1038 	 */
   1039 	error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
   1040 	if (error) {
   1041 		vput(vp);
   1042 		*vpp = NULL;
   1043 		return (error);
   1044 	}
   1045 	/*
   1046 	 * Finish inode initialization now that aliasing has been resolved.
   1047 	 */
   1048 	ip->i_devvp = ump->um_devvp;
   1049 	VREF(ip->i_devvp);
   1050 	/*
   1051 	 * Ensure that uid and gid are correct. This is a temporary
   1052 	 * fix until fsck has been changed to do the update.
   1053 	 */
   1054 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
   1055 		ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid;	/* XXX */
   1056 		ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid;	/* XXX */
   1057 	}							/* XXX */
   1058 
   1059 	*vpp = vp;
   1060 	return (0);
   1061 }
   1062 
   1063 /*
   1064  * File handle to vnode
   1065  *
   1066  * Have to be really careful about stale file handles:
   1067  * - check that the inode number is valid
   1068  * - call ffs_vget() to get the locked inode
   1069  * - check for an unallocated inode (i_mode == 0)
   1070  * - check that the given client host has export rights and return
   1071  *   those rights via. exflagsp and credanonp
   1072  */
   1073 int
   1074 ffs_fhtovp(mp, fhp, vpp)
   1075 	register struct mount *mp;
   1076 	struct fid *fhp;
   1077 	struct vnode **vpp;
   1078 {
   1079 	register struct ufid *ufhp;
   1080 	struct fs *fs;
   1081 
   1082 	ufhp = (struct ufid *)fhp;
   1083 	fs = VFSTOUFS(mp)->um_fs;
   1084 	if (ufhp->ufid_ino < ROOTINO ||
   1085 	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
   1086 		return (ESTALE);
   1087 	return (ufs_fhtovp(mp, ufhp, vpp));
   1088 }
   1089 
   1090 /*
   1091  * Vnode pointer to File handle
   1092  */
   1093 /* ARGSUSED */
   1094 int
   1095 ffs_vptofh(vp, fhp)
   1096 	struct vnode *vp;
   1097 	struct fid *fhp;
   1098 {
   1099 	register struct inode *ip;
   1100 	register struct ufid *ufhp;
   1101 
   1102 	ip = VTOI(vp);
   1103 	ufhp = (struct ufid *)fhp;
   1104 	ufhp->ufid_len = sizeof(struct ufid);
   1105 	ufhp->ufid_ino = ip->i_number;
   1106 	ufhp->ufid_gen = ip->i_ffs_gen;
   1107 	return (0);
   1108 }
   1109 
   1110 void
   1111 ffs_init()
   1112 {
   1113 	if (ffs_initcount++ > 0)
   1114 		return;
   1115 
   1116 	softdep_initialize();
   1117 	ufs_init();
   1118 
   1119 	pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
   1120 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
   1121 }
   1122 
   1123 void
   1124 ffs_done()
   1125 {
   1126 	if (--ffs_initcount > 0)
   1127 		return;
   1128 
   1129 	/* XXX softdep cleanup ? */
   1130 	ufs_done();
   1131 	pool_destroy(&ffs_inode_pool);
   1132 }
   1133 
   1134 int
   1135 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
   1136 	int *name;
   1137 	u_int namelen;
   1138 	void *oldp;
   1139 	size_t *oldlenp;
   1140 	void *newp;
   1141 	size_t newlen;
   1142 	struct proc *p;
   1143 {
   1144 	extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
   1145 
   1146 	/* all sysctl names at this level are terminal */
   1147 	if (namelen != 1)
   1148 		return (ENOTDIR);		/* overloaded */
   1149 
   1150 	switch (name[0]) {
   1151 	case FFS_CLUSTERREAD:
   1152 		return (sysctl_int(oldp, oldlenp, newp, newlen,
   1153 		    &doclusterread));
   1154 	case FFS_CLUSTERWRITE:
   1155 		return (sysctl_int(oldp, oldlenp, newp, newlen,
   1156 		    &doclusterwrite));
   1157 	case FFS_REALLOCBLKS:
   1158 		return (sysctl_int(oldp, oldlenp, newp, newlen,
   1159 		    &doreallocblks));
   1160 	case FFS_ASYNCFREE:
   1161 		return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
   1162 	default:
   1163 		return (EOPNOTSUPP);
   1164 	}
   1165 	/* NOTREACHED */
   1166 }
   1167 
   1168 /*
   1169  * Write a superblock and associated information back to disk.
   1170  */
   1171 int
   1172 ffs_sbupdate(mp, waitfor)
   1173 	struct ufsmount *mp;
   1174 	int waitfor;
   1175 {
   1176 	register struct fs *fs = mp->um_fs;
   1177 	register struct buf *bp;
   1178 	int i, error = 0;
   1179 	int32_t saved_nrpos = fs->fs_nrpos;
   1180 	int64_t saved_qbmask = fs->fs_qbmask;
   1181 	int64_t saved_qfmask = fs->fs_qfmask;
   1182 	u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
   1183 	u_int8_t saveflag;
   1184 
   1185 	/* Restore compatibility to old file systems.		   XXX */
   1186 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
   1187 		fs->fs_nrpos = -1;		/* XXX */
   1188 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
   1189 		int32_t *lp, tmp;				/* XXX */
   1190 								/* XXX */
   1191 		lp = (int32_t *)&fs->fs_qbmask;	/* XXX nuke qfmask too */
   1192 		tmp = lp[4];					/* XXX */
   1193 		for (i = 4; i > 0; i--)				/* XXX */
   1194 			lp[i] = lp[i-1];			/* XXX */
   1195 		lp[0] = tmp;					/* XXX */
   1196 	}							/* XXX */
   1197 	fs->fs_maxfilesize = mp->um_savedmaxfilesize;	/* XXX */
   1198 
   1199 	bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
   1200 	    (int)fs->fs_sbsize, 0, 0);
   1201 	saveflag = fs->fs_flags & FS_INTERNAL;
   1202 	fs->fs_flags &= ~FS_INTERNAL;
   1203 	memcpy(bp->b_data, fs, fs->fs_sbsize);
   1204 #ifdef FFS_EI
   1205 	if (mp->um_flags & UFS_NEEDSWAP)
   1206 		ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
   1207 #endif
   1208 
   1209 	fs->fs_flags |= saveflag;
   1210 	fs->fs_nrpos = saved_nrpos; /* XXX */
   1211 	fs->fs_qbmask = saved_qbmask; /* XXX */
   1212 	fs->fs_qfmask = saved_qfmask; /* XXX */
   1213 	fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
   1214 
   1215 	if (waitfor == MNT_WAIT)
   1216 		error = bwrite(bp);
   1217 	else
   1218 		bawrite(bp);
   1219 	return (error);
   1220 }
   1221 
   1222 int
   1223 ffs_cgupdate(mp, waitfor)
   1224 	struct ufsmount *mp;
   1225 	int waitfor;
   1226 {
   1227 	register struct fs *fs = mp->um_fs;
   1228 	register struct buf *bp;
   1229 	int blks;
   1230 	caddr_t space;
   1231 	int i, size, error = 0, allerror = 0;
   1232 
   1233 	allerror = ffs_sbupdate(mp, waitfor);
   1234 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
   1235 	space = (caddr_t)fs->fs_csp[0];
   1236 	for (i = 0; i < blks; i += fs->fs_frag) {
   1237 		size = fs->fs_bsize;
   1238 		if (i + fs->fs_frag > blks)
   1239 			size = (blks - i) * fs->fs_fsize;
   1240 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
   1241 		    size, 0, 0);
   1242 #ifdef FFS_EI
   1243 		if (mp->um_flags & UFS_NEEDSWAP)
   1244 			ffs_csum_swap((struct csum*)space,
   1245 			    (struct csum*)bp->b_data, size);
   1246 		else
   1247 #endif
   1248 			memcpy(bp->b_data, space, (u_int)size);
   1249 		space += size;
   1250 		if (waitfor == MNT_WAIT)
   1251 			error = bwrite(bp);
   1252 		else
   1253 			bawrite(bp);
   1254 	}
   1255 	if (!allerror && error)
   1256 		allerror = error;
   1257 	return (allerror);
   1258 }
   1259