Home | History | Annotate | Line # | Download | only in ffs
ffs_vfsops.c revision 1.80.2.4
      1 /*	$NetBSD: ffs_vfsops.c,v 1.80.2.4 2001/09/21 22:37:05 nathanw Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1989, 1991, 1993, 1994
      5  *	The Regents of the University of California.  All rights reserved.
      6  *
      7  * Redistribution and use in source and binary forms, with or without
      8  * modification, are permitted provided that the following conditions
      9  * are met:
     10  * 1. Redistributions of source code must retain the above copyright
     11  *    notice, this list of conditions and the following disclaimer.
     12  * 2. Redistributions in binary form must reproduce the above copyright
     13  *    notice, this list of conditions and the following disclaimer in the
     14  *    documentation and/or other materials provided with the distribution.
     15  * 3. All advertising materials mentioning features or use of this software
     16  *    must display the following acknowledgement:
     17  *	This product includes software developed by the University of
     18  *	California, Berkeley and its contributors.
     19  * 4. Neither the name of the University nor the names of its contributors
     20  *    may be used to endorse or promote products derived from this software
     21  *    without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     33  * SUCH DAMAGE.
     34  *
     35  *	@(#)ffs_vfsops.c	8.31 (Berkeley) 5/20/95
     36  */
     37 
     38 #if defined(_KERNEL_OPT)
     39 #include "opt_ffs.h"
     40 #include "opt_quota.h"
     41 #include "opt_compat_netbsd.h"
     42 #include "opt_softdep.h"
     43 #endif
     44 
     45 #include <sys/param.h>
     46 #include <sys/systm.h>
     47 #include <sys/namei.h>
     48 #include <sys/lwp.h>
     49 #include <sys/proc.h>
     50 #include <sys/kernel.h>
     51 #include <sys/vnode.h>
     52 #include <sys/socket.h>
     53 #include <sys/mount.h>
     54 #include <sys/buf.h>
     55 #include <sys/device.h>
     56 #include <sys/mbuf.h>
     57 #include <sys/file.h>
     58 #include <sys/disklabel.h>
     59 #include <sys/ioctl.h>
     60 #include <sys/errno.h>
     61 #include <sys/malloc.h>
     62 #include <sys/pool.h>
     63 #include <sys/lock.h>
     64 #include <sys/sysctl.h>
     65 
     66 #include <miscfs/specfs/specdev.h>
     67 
     68 #include <ufs/ufs/quota.h>
     69 #include <ufs/ufs/ufsmount.h>
     70 #include <ufs/ufs/inode.h>
     71 #include <ufs/ufs/dir.h>
     72 #include <ufs/ufs/ufs_extern.h>
     73 #include <ufs/ufs/ufs_bswap.h>
     74 
     75 #include <ufs/ffs/fs.h>
     76 #include <ufs/ffs/ffs_extern.h>
     77 
     78 /* how many times ffs_init() was called */
     79 int ffs_initcount = 0;
     80 
     81 extern struct lock ufs_hashlock;
     82 
     83 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
     84 extern struct vnodeopv_desc ffs_specop_opv_desc;
     85 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
     86 
     87 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
     88 	&ffs_vnodeop_opv_desc,
     89 	&ffs_specop_opv_desc,
     90 	&ffs_fifoop_opv_desc,
     91 	NULL,
     92 };
     93 
     94 struct vfsops ffs_vfsops = {
     95 	MOUNT_FFS,
     96 	ffs_mount,
     97 	ufs_start,
     98 	ffs_unmount,
     99 	ufs_root,
    100 	ufs_quotactl,
    101 	ffs_statfs,
    102 	ffs_sync,
    103 	ffs_vget,
    104 	ffs_fhtovp,
    105 	ffs_vptofh,
    106 	ffs_init,
    107 	ffs_reinit,
    108 	ffs_done,
    109 	ffs_sysctl,
    110 	ffs_mountroot,
    111 	ufs_check_export,
    112 	ffs_vnodeopv_descs,
    113 };
    114 
    115 struct genfs_ops ffs_genfsops = {
    116 	ffs_gop_size,
    117 	ffs_gop_alloc,
    118 	genfs_gop_write,
    119 };
    120 
    121 struct pool ffs_inode_pool;
    122 
    123 /*
    124  * Called by main() when ffs is going to be mounted as root.
    125  */
    126 
    127 int
    128 ffs_mountroot()
    129 {
    130 	struct fs *fs;
    131 	struct mount *mp;
    132 	struct proc *p = curproc->l_proc;	/* XXX */
    133 	struct ufsmount *ump;
    134 	int error;
    135 
    136 	if (root_device->dv_class != DV_DISK)
    137 		return (ENODEV);
    138 
    139 	/*
    140 	 * Get vnodes for rootdev.
    141 	 */
    142 	if (bdevvp(rootdev, &rootvp))
    143 		panic("ffs_mountroot: can't setup bdevvp's");
    144 
    145 	if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
    146 		vrele(rootvp);
    147 		return (error);
    148 	}
    149 	if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
    150 		mp->mnt_op->vfs_refcount--;
    151 		vfs_unbusy(mp);
    152 		free(mp, M_MOUNT);
    153 		vrele(rootvp);
    154 		return (error);
    155 	}
    156 	simple_lock(&mountlist_slock);
    157 	CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
    158 	simple_unlock(&mountlist_slock);
    159 	ump = VFSTOUFS(mp);
    160 	fs = ump->um_fs;
    161 	memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
    162 	(void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
    163 	(void)ffs_statfs(mp, &mp->mnt_stat, p);
    164 	vfs_unbusy(mp);
    165 	inittodr(fs->fs_time);
    166 	return (0);
    167 }
    168 
    169 /*
    170  * VFS Operations.
    171  *
    172  * mount system call
    173  */
    174 int
    175 ffs_mount(mp, path, data, ndp, p)
    176 	struct mount *mp;
    177 	const char *path;
    178 	void *data;
    179 	struct nameidata *ndp;
    180 	struct proc *p;
    181 {
    182 	struct vnode *devvp;
    183 	struct ufs_args args;
    184 	struct ufsmount *ump = NULL;
    185 	struct fs *fs;
    186 	size_t size;
    187 	int error, flags;
    188 	mode_t accessmode;
    189 
    190 	error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
    191 	if (error)
    192 		return (error);
    193 
    194 #if !defined(SOFTDEP)
    195 	mp->mnt_flag &= ~MNT_SOFTDEP;
    196 #endif
    197 
    198 	/*
    199 	 * If updating, check whether changing from read-only to
    200 	 * read/write; if there is no device name, that's all we do.
    201 	 */
    202 	if (mp->mnt_flag & MNT_UPDATE) {
    203 		ump = VFSTOUFS(mp);
    204 		fs = ump->um_fs;
    205 		if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
    206 			flags = WRITECLOSE;
    207 			if (mp->mnt_flag & MNT_FORCE)
    208 				flags |= FORCECLOSE;
    209 			if (mp->mnt_flag & MNT_SOFTDEP)
    210 				error = softdep_flushfiles(mp, flags, p);
    211 			else
    212 				error = ffs_flushfiles(mp, flags, p);
    213 			if (error == 0 &&
    214 			    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    215 			    fs->fs_clean & FS_WASCLEAN) {
    216 				if (mp->mnt_flag & MNT_SOFTDEP)
    217 					fs->fs_flags &= ~FS_DOSOFTDEP;
    218 				fs->fs_clean = FS_ISCLEAN;
    219 				(void) ffs_sbupdate(ump, MNT_WAIT);
    220 			}
    221 			if (error)
    222 				return (error);
    223 			fs->fs_ronly = 1;
    224 			fs->fs_fmod = 0;
    225 		}
    226 
    227 		/*
    228 		 * Flush soft dependencies if disabling it via an update
    229 		 * mount. This may leave some items to be processed,
    230 		 * so don't do this yet XXX.
    231 		 */
    232 		if ((fs->fs_flags & FS_DOSOFTDEP) &&
    233 		    !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    234 #ifdef notyet
    235 			flags = WRITECLOSE;
    236 			if (mp->mnt_flag & MNT_FORCE)
    237 				flags |= FORCECLOSE;
    238 			error = softdep_flushfiles(mp, flags, p);
    239 			if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
    240 				fs->fs_flags &= ~FS_DOSOFTDEP;
    241 				(void) ffs_sbupdate(ump, MNT_WAIT);
    242 #elif defined(SOFTDEP)
    243 			mp->mnt_flag |= MNT_SOFTDEP;
    244 #endif
    245 		}
    246 
    247 		/*
    248 		 * When upgrading to a softdep mount, we must first flush
    249 		 * all vnodes. (not done yet -- see above)
    250 		 */
    251 		if (!(fs->fs_flags & FS_DOSOFTDEP) &&
    252 		    (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
    253 #ifdef notyet
    254 			flags = WRITECLOSE;
    255 			if (mp->mnt_flag & MNT_FORCE)
    256 				flags |= FORCECLOSE;
    257 			error = ffs_flushfiles(mp, flags, p);
    258 #else
    259 			mp->mnt_flag &= ~MNT_SOFTDEP;
    260 #endif
    261 		}
    262 
    263 		if (mp->mnt_flag & MNT_RELOAD) {
    264 			error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
    265 			if (error)
    266 				return (error);
    267 		}
    268 		if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
    269 			/*
    270 			 * If upgrade to read-write by non-root, then verify
    271 			 * that user has necessary permissions on the device.
    272 			 */
    273 			devvp = ump->um_devvp;
    274 			if (p->p_ucred->cr_uid != 0) {
    275 				vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    276 				error = VOP_ACCESS(devvp, VREAD | VWRITE,
    277 						   p->p_ucred, p);
    278 				VOP_UNLOCK(devvp, 0);
    279 				if (error)
    280 					return (error);
    281 			}
    282 			fs->fs_ronly = 0;
    283 			fs->fs_clean <<= 1;
    284 			fs->fs_fmod = 1;
    285 			if ((fs->fs_flags & FS_DOSOFTDEP)) {
    286 				error = softdep_mount(devvp, mp, fs,
    287 				    p->p_ucred);
    288 				if (error)
    289 					return (error);
    290 			}
    291 		}
    292 		if (args.fspec == 0) {
    293 			/*
    294 			 * Process export requests.
    295 			 */
    296 			return (vfs_export(mp, &ump->um_export, &args.export));
    297 		}
    298 		if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    299 		    (MNT_SOFTDEP | MNT_ASYNC)) {
    300 			printf("%s fs uses soft updates, ignoring async mode\n",
    301 			    fs->fs_fsmnt);
    302 			mp->mnt_flag &= ~MNT_ASYNC;
    303 		}
    304 	}
    305 	/*
    306 	 * Not an update, or updating the name: look up the name
    307 	 * and verify that it refers to a sensible block device.
    308 	 */
    309 	NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
    310 	if ((error = namei(ndp)) != 0)
    311 		return (error);
    312 	devvp = ndp->ni_vp;
    313 
    314 	if (devvp->v_type != VBLK) {
    315 		vrele(devvp);
    316 		return (ENOTBLK);
    317 	}
    318 	if (major(devvp->v_rdev) >= nblkdev) {
    319 		vrele(devvp);
    320 		return (ENXIO);
    321 	}
    322 	/*
    323 	 * If mount by non-root, then verify that user has necessary
    324 	 * permissions on the device.
    325 	 */
    326 	if (p->p_ucred->cr_uid != 0) {
    327 		accessmode = VREAD;
    328 		if ((mp->mnt_flag & MNT_RDONLY) == 0)
    329 			accessmode |= VWRITE;
    330 		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    331 		error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
    332 		VOP_UNLOCK(devvp, 0);
    333 		if (error) {
    334 			vrele(devvp);
    335 			return (error);
    336 		}
    337 	}
    338 	if ((mp->mnt_flag & MNT_UPDATE) == 0) {
    339 		error = ffs_mountfs(devvp, mp, p);
    340 		if (!error) {
    341 			ump = VFSTOUFS(mp);
    342 			fs = ump->um_fs;
    343 			if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
    344 			    (MNT_SOFTDEP | MNT_ASYNC)) {
    345 				printf("%s fs uses soft updates, "
    346 				       "ignoring async mode\n",
    347 				    fs->fs_fsmnt);
    348 				mp->mnt_flag &= ~MNT_ASYNC;
    349 			}
    350 		}
    351 	}
    352 	else {
    353 		if (devvp != ump->um_devvp)
    354 			error = EINVAL;	/* needs translation */
    355 		else
    356 			vrele(devvp);
    357 	}
    358 	if (error) {
    359 		vrele(devvp);
    360 		return (error);
    361 	}
    362 	(void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
    363 	memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
    364 	memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
    365 	(void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
    366 	    &size);
    367 	memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
    368 	if (mp->mnt_flag & MNT_SOFTDEP)
    369 		fs->fs_flags |= FS_DOSOFTDEP;
    370 	else
    371 		fs->fs_flags &= ~FS_DOSOFTDEP;
    372 	if (fs->fs_fmod != 0) {	/* XXX */
    373 		fs->fs_fmod = 0;
    374 		if (fs->fs_clean & FS_WASCLEAN)
    375 			fs->fs_time = time.tv_sec;
    376 		else
    377 			printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
    378 			    mp->mnt_stat.f_mntfromname, fs->fs_clean);
    379 		(void) ffs_cgupdate(ump, MNT_WAIT);
    380 	}
    381 	return (0);
    382 }
    383 
    384 /*
    385  * Reload all incore data for a filesystem (used after running fsck on
    386  * the root filesystem and finding things to fix). The filesystem must
    387  * be mounted read-only.
    388  *
    389  * Things to do to update the mount:
    390  *	1) invalidate all cached meta-data.
    391  *	2) re-read superblock from disk.
    392  *	3) re-read summary information from disk.
    393  *	4) invalidate all inactive vnodes.
    394  *	5) invalidate all cached file data.
    395  *	6) re-read inode data for all active vnodes.
    396  */
    397 int
    398 ffs_reload(mountp, cred, p)
    399 	struct mount *mountp;
    400 	struct ucred *cred;
    401 	struct proc *p;
    402 {
    403 	struct vnode *vp, *nvp, *devvp;
    404 	struct inode *ip;
    405 	void *space;
    406 	struct buf *bp;
    407 	struct fs *fs, *newfs;
    408 	struct partinfo dpart;
    409 	int i, blks, size, error;
    410 	int32_t *lp;
    411 	caddr_t cp;
    412 
    413 	if ((mountp->mnt_flag & MNT_RDONLY) == 0)
    414 		return (EINVAL);
    415 	/*
    416 	 * Step 1: invalidate all cached meta-data.
    417 	 */
    418 	devvp = VFSTOUFS(mountp)->um_devvp;
    419 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    420 	error = vinvalbuf(devvp, 0, cred, p, 0, 0);
    421 	VOP_UNLOCK(devvp, 0);
    422 	if (error)
    423 		panic("ffs_reload: dirty1");
    424 	/*
    425 	 * Step 2: re-read superblock from disk.
    426 	 */
    427 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
    428 		size = DEV_BSIZE;
    429 	else
    430 		size = dpart.disklab->d_secsize;
    431 	error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
    432 	if (error) {
    433 		brelse(bp);
    434 		return (error);
    435 	}
    436 	fs = VFSTOUFS(mountp)->um_fs;
    437 	newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
    438 	memcpy(newfs, bp->b_data, fs->fs_sbsize);
    439 #ifdef FFS_EI
    440 	if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
    441 		ffs_sb_swap((struct fs*)bp->b_data, newfs);
    442 		fs->fs_flags |= FS_SWAPPED;
    443 	}
    444 #endif
    445 	if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
    446 	    newfs->fs_bsize < sizeof(struct fs)) {
    447 		brelse(bp);
    448 		free(newfs, M_UFSMNT);
    449 		return (EIO);		/* XXX needs translation */
    450 	}
    451 	/*
    452 	 * Copy pointer fields back into superblock before copying in	XXX
    453 	 * new superblock. These should really be in the ufsmount.	XXX
    454 	 * Note that important parameters (eg fs_ncg) are unchanged.
    455 	 */
    456 	newfs->fs_csp = fs->fs_csp;
    457 	newfs->fs_maxcluster = fs->fs_maxcluster;
    458 	newfs->fs_contigdirs = fs->fs_contigdirs;
    459 	newfs->fs_ronly = fs->fs_ronly;
    460 	memcpy(fs, newfs, (u_int)fs->fs_sbsize);
    461 	if (fs->fs_sbsize < SBSIZE)
    462 		bp->b_flags |= B_INVAL;
    463 	brelse(bp);
    464 	free(newfs, M_UFSMNT);
    465 	mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
    466 	ffs_oldfscompat(fs);
    467 		/* An old fsck may have zeroed these fields, so recheck them. */
    468 	if (fs->fs_avgfilesize <= 0)
    469 		fs->fs_avgfilesize = AVFILESIZ;
    470 	if (fs->fs_avgfpdir <= 0)
    471 		fs->fs_avgfpdir = AFPDIR;
    472 
    473 	ffs_statfs(mountp, &mountp->mnt_stat, p);
    474 	/*
    475 	 * Step 3: re-read summary information from disk.
    476 	 */
    477 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
    478 	space = fs->fs_csp;
    479 	for (i = 0; i < blks; i += fs->fs_frag) {
    480 		size = fs->fs_bsize;
    481 		if (i + fs->fs_frag > blks)
    482 			size = (blks - i) * fs->fs_fsize;
    483 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    484 			      NOCRED, &bp);
    485 		if (error) {
    486 			brelse(bp);
    487 			return (error);
    488 		}
    489 #ifdef FFS_EI
    490 		if (UFS_FSNEEDSWAP(fs))
    491 			ffs_csum_swap((struct csum *)bp->b_data,
    492 			    (struct csum *)space, size);
    493 		else
    494 #endif
    495 			memcpy(space, bp->b_data, (size_t)size);
    496 		space = (char *)space + size;
    497 		brelse(bp);
    498 	}
    499 	if ((fs->fs_flags & FS_DOSOFTDEP))
    500 		softdep_mount(devvp, mountp, fs, cred);
    501 	/*
    502 	 * We no longer know anything about clusters per cylinder group.
    503 	 */
    504 	if (fs->fs_contigsumsize > 0) {
    505 		lp = fs->fs_maxcluster;
    506 		for (i = 0; i < fs->fs_ncg; i++)
    507 			*lp++ = fs->fs_contigsumsize;
    508 	}
    509 
    510 loop:
    511 	simple_lock(&mntvnode_slock);
    512 	for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
    513 		if (vp->v_mount != mountp) {
    514 			simple_unlock(&mntvnode_slock);
    515 			goto loop;
    516 		}
    517 		nvp = vp->v_mntvnodes.le_next;
    518 		/*
    519 		 * Step 4: invalidate all inactive vnodes.
    520 		 */
    521 		if (vrecycle(vp, &mntvnode_slock, p))
    522 			goto loop;
    523 		/*
    524 		 * Step 5: invalidate all cached file data.
    525 		 */
    526 		simple_lock(&vp->v_interlock);
    527 		simple_unlock(&mntvnode_slock);
    528 		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
    529 			goto loop;
    530 		if (vinvalbuf(vp, 0, cred, p, 0, 0))
    531 			panic("ffs_reload: dirty2");
    532 		/*
    533 		 * Step 6: re-read inode data for all active vnodes.
    534 		 */
    535 		ip = VTOI(vp);
    536 		error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
    537 			      (int)fs->fs_bsize, NOCRED, &bp);
    538 		if (error) {
    539 			brelse(bp);
    540 			vput(vp);
    541 			return (error);
    542 		}
    543 		cp = (caddr_t)bp->b_data +
    544 		    (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
    545 #ifdef FFS_EI
    546 		if (UFS_FSNEEDSWAP(fs))
    547 			ffs_dinode_swap((struct dinode *)cp,
    548 			    &ip->i_din.ffs_din);
    549 		else
    550 #endif
    551 			memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
    552 		ip->i_ffs_effnlink = ip->i_ffs_nlink;
    553 		brelse(bp);
    554 		vput(vp);
    555 		simple_lock(&mntvnode_slock);
    556 	}
    557 	simple_unlock(&mntvnode_slock);
    558 	return (0);
    559 }
    560 
    561 /*
    562  * Common code for mount and mountroot
    563  */
    564 int
    565 ffs_mountfs(devvp, mp, p)
    566 	struct vnode *devvp;
    567 	struct mount *mp;
    568 	struct proc *p;
    569 {
    570 	struct ufsmount *ump;
    571 	struct buf *bp;
    572 	struct fs *fs;
    573 	dev_t dev;
    574 	struct partinfo dpart;
    575 	void *space;
    576 	int blks;
    577 	int error, i, size, ronly;
    578 #ifdef FFS_EI
    579 	int needswap;
    580 #endif
    581 	int32_t *lp;
    582 	struct ucred *cred;
    583 	u_int64_t maxfilesize;					/* XXX */
    584 	u_int32_t sbsize;
    585 
    586 	dev = devvp->v_rdev;
    587 	cred = p ? p->p_ucred : NOCRED;
    588 	/*
    589 	 * Disallow multiple mounts of the same device.
    590 	 * Disallow mounting of a device that is currently in use
    591 	 * (except for root, which might share swap device for miniroot).
    592 	 * Flush out any old buffers remaining from a previous use.
    593 	 */
    594 	if ((error = vfs_mountedon(devvp)) != 0)
    595 		return (error);
    596 	if (vcount(devvp) > 1 && devvp != rootvp)
    597 		return (EBUSY);
    598 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    599 	error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
    600 	VOP_UNLOCK(devvp, 0);
    601 	if (error)
    602 		return (error);
    603 
    604 	ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
    605 	error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
    606 	if (error)
    607 		return (error);
    608 	if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
    609 		size = DEV_BSIZE;
    610 	else
    611 		size = dpart.disklab->d_secsize;
    612 
    613 	bp = NULL;
    614 	ump = NULL;
    615 	error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
    616 	if (error)
    617 		goto out;
    618 
    619 	fs = (struct fs*)bp->b_data;
    620 	if (fs->fs_magic == FS_MAGIC) {
    621 		sbsize = fs->fs_sbsize;
    622 #ifdef FFS_EI
    623 		needswap = 0;
    624 	} else if (fs->fs_magic == bswap32(FS_MAGIC)) {
    625 		sbsize = bswap32(fs->fs_sbsize);
    626 		needswap = 1;
    627 #endif
    628 	} else {
    629 		error = EINVAL;
    630 		goto out;
    631 	}
    632 	if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
    633 		error = EINVAL;
    634 		goto out;
    635 	}
    636 
    637 	fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
    638 	memcpy(fs, bp->b_data, sbsize);
    639 #ifdef FFS_EI
    640 	if (needswap) {
    641 		ffs_sb_swap((struct fs*)bp->b_data, fs);
    642 		fs->fs_flags |= FS_SWAPPED;
    643 	}
    644 #endif
    645 	ffs_oldfscompat(fs);
    646 
    647 	if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
    648 		error = EINVAL;
    649 		goto out;
    650 	}
    651 	 /* make sure cylinder group summary area is a reasonable size. */
    652 	if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
    653 	    fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
    654 	    fs->fs_cssize >
    655 	    fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
    656 		error = EINVAL;		/* XXX needs translation */
    657 		goto out2;
    658 	}
    659 	/* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
    660 	if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
    661 		error = EROFS;		/* XXX what should be returned? */
    662 		goto out2;
    663 	}
    664 
    665 	ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
    666 	memset((caddr_t)ump, 0, sizeof *ump);
    667 	ump->um_fs = fs;
    668 	if (fs->fs_sbsize < SBSIZE)
    669 		bp->b_flags |= B_INVAL;
    670 	brelse(bp);
    671 	bp = NULL;
    672 	fs->fs_ronly = ronly;
    673 	if (ronly == 0) {
    674 		fs->fs_clean <<= 1;
    675 		fs->fs_fmod = 1;
    676 	}
    677 	size = fs->fs_cssize;
    678 	blks = howmany(size, fs->fs_fsize);
    679 	if (fs->fs_contigsumsize > 0)
    680 		size += fs->fs_ncg * sizeof(int32_t);
    681 	size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    682 	space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
    683 	fs->fs_csp = space;
    684 	for (i = 0; i < blks; i += fs->fs_frag) {
    685 		size = fs->fs_bsize;
    686 		if (i + fs->fs_frag > blks)
    687 			size = (blks - i) * fs->fs_fsize;
    688 		error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
    689 			      cred, &bp);
    690 		if (error) {
    691 			free(fs->fs_csp, M_UFSMNT);
    692 			goto out2;
    693 		}
    694 #ifdef FFS_EI
    695 		if (needswap)
    696 			ffs_csum_swap((struct csum *)bp->b_data,
    697 				(struct csum *)space, size);
    698 		else
    699 #endif
    700 			memcpy(space, bp->b_data, (u_int)size);
    701 
    702 		space = (char *)space + size;
    703 		brelse(bp);
    704 		bp = NULL;
    705 	}
    706 	if (fs->fs_contigsumsize > 0) {
    707 		fs->fs_maxcluster = lp = space;
    708 		for (i = 0; i < fs->fs_ncg; i++)
    709 			*lp++ = fs->fs_contigsumsize;
    710 		space = lp;
    711 	}
    712 	size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
    713 	fs->fs_contigdirs = space;
    714 	space = (char *)space + size;
    715 	memset(fs->fs_contigdirs, 0, size);
    716 		/* Compatibility for old filesystems - XXX */
    717 	if (fs->fs_avgfilesize <= 0)
    718 		fs->fs_avgfilesize = AVFILESIZ;
    719 	if (fs->fs_avgfpdir <= 0)
    720 		fs->fs_avgfpdir = AFPDIR;
    721 	mp->mnt_data = (qaddr_t)ump;
    722 	mp->mnt_stat.f_fsid.val[0] = (long)dev;
    723 	mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
    724 	mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
    725 	mp->mnt_fs_bshift = fs->fs_bshift;
    726 	mp->mnt_dev_bshift = DEV_BSHIFT;	/* XXX */
    727 	mp->mnt_flag |= MNT_LOCAL;
    728 #ifdef FFS_EI
    729 	if (needswap)
    730 		ump->um_flags |= UFS_NEEDSWAP;
    731 #endif
    732 	ump->um_mountp = mp;
    733 	ump->um_dev = dev;
    734 	ump->um_devvp = devvp;
    735 	ump->um_nindir = fs->fs_nindir;
    736 	ump->um_lognindir = ffs(fs->fs_nindir) - 1;
    737 	ump->um_bptrtodb = fs->fs_fsbtodb;
    738 	ump->um_seqinc = fs->fs_frag;
    739 	for (i = 0; i < MAXQUOTAS; i++)
    740 		ump->um_quotas[i] = NULLVP;
    741 	devvp->v_specmountpoint = mp;
    742 	ump->um_savedmaxfilesize = fs->fs_maxfilesize;		/* XXX */
    743 	maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;	/* XXX */
    744 	if (fs->fs_maxfilesize > maxfilesize)			/* XXX */
    745 		fs->fs_maxfilesize = maxfilesize;		/* XXX */
    746 	if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
    747 		error = softdep_mount(devvp, mp, fs, cred);
    748 		if (error) {
    749 			free(fs->fs_csp, M_UFSMNT);
    750 			goto out;
    751 		}
    752 	}
    753 	return (0);
    754 out2:
    755 	free(fs, M_UFSMNT);
    756 out:
    757 	devvp->v_specmountpoint = NULL;
    758 	if (bp)
    759 		brelse(bp);
    760 	vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
    761 	(void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
    762 	VOP_UNLOCK(devvp, 0);
    763 	if (ump) {
    764 		free(ump, M_UFSMNT);
    765 		mp->mnt_data = (qaddr_t)0;
    766 	}
    767 	return (error);
    768 }
    769 
    770 /*
    771  * Sanity checks for old file systems.
    772  *
    773  * XXX - goes away some day.
    774  */
    775 int
    776 ffs_oldfscompat(fs)
    777 	struct fs *fs;
    778 {
    779 	int i;
    780 
    781 	fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect);	/* XXX */
    782 	fs->fs_interleave = max(fs->fs_interleave, 1);		/* XXX */
    783 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
    784 		fs->fs_nrpos = 8;				/* XXX */
    785 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
    786 		u_int64_t sizepb = fs->fs_bsize;		/* XXX */
    787 								/* XXX */
    788 		fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1;	/* XXX */
    789 		for (i = 0; i < NIADDR; i++) {			/* XXX */
    790 			sizepb *= NINDIR(fs);			/* XXX */
    791 			fs->fs_maxfilesize += sizepb;		/* XXX */
    792 		}						/* XXX */
    793 		fs->fs_qbmask = ~fs->fs_bmask;			/* XXX */
    794 		fs->fs_qfmask = ~fs->fs_fmask;			/* XXX */
    795 	}							/* XXX */
    796 	return (0);
    797 }
    798 
    799 /*
    800  * unmount system call
    801  */
    802 int
    803 ffs_unmount(mp, mntflags, p)
    804 	struct mount *mp;
    805 	int mntflags;
    806 	struct proc *p;
    807 {
    808 	struct ufsmount *ump;
    809 	struct fs *fs;
    810 	int error, flags;
    811 
    812 	flags = 0;
    813 	if (mntflags & MNT_FORCE)
    814 		flags |= FORCECLOSE;
    815 	if (mp->mnt_flag & MNT_SOFTDEP) {
    816 		if ((error = softdep_flushfiles(mp, flags, p)) != 0)
    817 			return (error);
    818 	} else {
    819 		if ((error = ffs_flushfiles(mp, flags, p)) != 0)
    820 			return (error);
    821 	}
    822 	ump = VFSTOUFS(mp);
    823 	fs = ump->um_fs;
    824 	if (fs->fs_ronly == 0 &&
    825 	    ffs_cgupdate(ump, MNT_WAIT) == 0 &&
    826 	    fs->fs_clean & FS_WASCLEAN) {
    827 		if (mp->mnt_flag & MNT_SOFTDEP)
    828 			fs->fs_flags &= ~FS_DOSOFTDEP;
    829 		fs->fs_clean = FS_ISCLEAN;
    830 		(void) ffs_sbupdate(ump, MNT_WAIT);
    831 	}
    832 	if (ump->um_devvp->v_type != VBAD)
    833 		ump->um_devvp->v_specmountpoint = NULL;
    834 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
    835 	error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
    836 		NOCRED, p);
    837 	vput(ump->um_devvp);
    838 	free(fs->fs_csp, M_UFSMNT);
    839 	free(fs, M_UFSMNT);
    840 	free(ump, M_UFSMNT);
    841 	mp->mnt_data = (qaddr_t)0;
    842 	mp->mnt_flag &= ~MNT_LOCAL;
    843 	return (error);
    844 }
    845 
    846 /*
    847  * Flush out all the files in a filesystem.
    848  */
    849 int
    850 ffs_flushfiles(mp, flags, p)
    851 	struct mount *mp;
    852 	int flags;
    853 	struct proc *p;
    854 {
    855 	extern int doforce;
    856 	struct ufsmount *ump;
    857 	int error;
    858 
    859 	if (!doforce)
    860 		flags &= ~FORCECLOSE;
    861 	ump = VFSTOUFS(mp);
    862 #ifdef QUOTA
    863 	if (mp->mnt_flag & MNT_QUOTA) {
    864 		int i;
    865 		if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
    866 			return (error);
    867 		for (i = 0; i < MAXQUOTAS; i++) {
    868 			if (ump->um_quotas[i] == NULLVP)
    869 				continue;
    870 			quotaoff(p, mp, i);
    871 		}
    872 		/*
    873 		 * Here we fall through to vflush again to ensure
    874 		 * that we have gotten rid of all the system vnodes.
    875 		 */
    876 	}
    877 #endif
    878 	/*
    879 	 * Flush all the files.
    880 	 */
    881 	error = vflush(mp, NULLVP, flags);
    882 	if (error)
    883 		return (error);
    884 	/*
    885 	 * Flush filesystem metadata.
    886 	 */
    887 	vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
    888 	error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
    889 	VOP_UNLOCK(ump->um_devvp, 0);
    890 	return (error);
    891 }
    892 
    893 /*
    894  * Get file system statistics.
    895  */
    896 int
    897 ffs_statfs(mp, sbp, p)
    898 	struct mount *mp;
    899 	struct statfs *sbp;
    900 	struct proc *p;
    901 {
    902 	struct ufsmount *ump;
    903 	struct fs *fs;
    904 
    905 	ump = VFSTOUFS(mp);
    906 	fs = ump->um_fs;
    907 	if (fs->fs_magic != FS_MAGIC)
    908 		panic("ffs_statfs");
    909 #ifdef COMPAT_09
    910 	sbp->f_type = 1;
    911 #else
    912 	sbp->f_type = 0;
    913 #endif
    914 	sbp->f_bsize = fs->fs_fsize;
    915 	sbp->f_iosize = fs->fs_bsize;
    916 	sbp->f_blocks = fs->fs_dsize;
    917 	sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
    918 		fs->fs_cstotal.cs_nffree;
    919 	sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
    920 	    (100 - fs->fs_minfree) / (u_int64_t) 100) -
    921 	    (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
    922 	sbp->f_files =  fs->fs_ncg * fs->fs_ipg - ROOTINO;
    923 	sbp->f_ffree = fs->fs_cstotal.cs_nifree;
    924 	if (sbp != &mp->mnt_stat) {
    925 		memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
    926 		memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
    927 	}
    928 	strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
    929 	return (0);
    930 }
    931 
    932 /*
    933  * Go through the disk queues to initiate sandbagged IO;
    934  * go through the inodes to write those that have been modified;
    935  * initiate the writing of the super block if it has been modified.
    936  *
    937  * Note: we are always called with the filesystem marked `MPBUSY'.
    938  */
    939 int
    940 ffs_sync(mp, waitfor, cred, p)
    941 	struct mount *mp;
    942 	int waitfor;
    943 	struct ucred *cred;
    944 	struct proc *p;
    945 {
    946 	struct vnode *vp, *nvp;
    947 	struct inode *ip;
    948 	struct ufsmount *ump = VFSTOUFS(mp);
    949 	struct fs *fs;
    950 	int error, allerror = 0;
    951 
    952 	fs = ump->um_fs;
    953 	if (fs->fs_fmod != 0 && fs->fs_ronly != 0) {		/* XXX */
    954 		printf("fs = %s\n", fs->fs_fsmnt);
    955 		panic("update: rofs mod");
    956 	}
    957 	/*
    958 	 * Write back each (modified) inode.
    959 	 */
    960 	simple_lock(&mntvnode_slock);
    961 loop:
    962 	for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
    963 		/*
    964 		 * If the vnode that we are about to sync is no longer
    965 		 * associated with this mount point, start over.
    966 		 */
    967 		if (vp->v_mount != mp)
    968 			goto loop;
    969 		simple_lock(&vp->v_interlock);
    970 		nvp = LIST_NEXT(vp, v_mntvnodes);
    971 		ip = VTOI(vp);
    972 		if (vp->v_type == VNON ||
    973 		    ((ip->i_flag &
    974 		      (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
    975 		     LIST_EMPTY(&vp->v_dirtyblkhd) &&
    976 		     vp->v_uobj.uo_npages == 0))
    977 		{
    978 			simple_unlock(&vp->v_interlock);
    979 			continue;
    980 		}
    981 		simple_unlock(&mntvnode_slock);
    982 		error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
    983 		if (error) {
    984 			simple_lock(&mntvnode_slock);
    985 			if (error == ENOENT)
    986 				goto loop;
    987 			continue;
    988 		}
    989 		if ((error = VOP_FSYNC(vp, cred,
    990 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
    991 			allerror = error;
    992 		vput(vp);
    993 		simple_lock(&mntvnode_slock);
    994 	}
    995 	simple_unlock(&mntvnode_slock);
    996 	/*
    997 	 * Force stale file system control information to be flushed.
    998 	 */
    999 	if (waitfor != MNT_LAZY) {
   1000 		if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
   1001 			waitfor = MNT_NOWAIT;
   1002 		vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
   1003 		if ((error = VOP_FSYNC(ump->um_devvp, cred,
   1004 		    waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
   1005 			allerror = error;
   1006 		VOP_UNLOCK(ump->um_devvp, 0);
   1007 	}
   1008 #ifdef QUOTA
   1009 	qsync(mp);
   1010 #endif
   1011 	/*
   1012 	 * Write back modified superblock.
   1013 	 */
   1014 	if (fs->fs_fmod != 0) {
   1015 		fs->fs_fmod = 0;
   1016 		fs->fs_time = time.tv_sec;
   1017 		if ((error = ffs_cgupdate(ump, waitfor)))
   1018 			allerror = error;
   1019 	}
   1020 	return (allerror);
   1021 }
   1022 
   1023 /*
   1024  * Look up a FFS dinode number to find its incore vnode, otherwise read it
   1025  * in from disk.  If it is in core, wait for the lock bit to clear, then
   1026  * return the inode locked.  Detection and handling of mount points must be
   1027  * done by the calling routine.
   1028  */
   1029 int
   1030 ffs_vget(mp, ino, vpp)
   1031 	struct mount *mp;
   1032 	ino_t ino;
   1033 	struct vnode **vpp;
   1034 {
   1035 	struct fs *fs;
   1036 	struct inode *ip;
   1037 	struct ufsmount *ump;
   1038 	struct buf *bp;
   1039 	struct vnode *vp;
   1040 	dev_t dev;
   1041 	int error;
   1042 	caddr_t cp;
   1043 
   1044 	ump = VFSTOUFS(mp);
   1045 	dev = ump->um_dev;
   1046 
   1047 	if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
   1048 		return (0);
   1049 
   1050 	/* Allocate a new vnode/inode. */
   1051 	if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
   1052 		*vpp = NULL;
   1053 		return (error);
   1054 	}
   1055 
   1056 	/*
   1057 	 * If someone beat us to it while sleeping in getnewvnode(),
   1058 	 * push back the freshly allocated vnode we don't need, and return.
   1059 	 */
   1060 
   1061 	do {
   1062 		if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
   1063 			ungetnewvnode(vp);
   1064 			return (0);
   1065 		}
   1066 	} while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
   1067 
   1068 	/*
   1069 	 * XXX MFS ends up here, too, to allocate an inode.  Should we
   1070 	 * XXX create another pool for MFS inodes?
   1071 	 */
   1072 
   1073 	ip = pool_get(&ffs_inode_pool, PR_WAITOK);
   1074 	memset(ip, 0, sizeof(struct inode));
   1075 	vp->v_data = ip;
   1076 	ip->i_vnode = vp;
   1077 	ip->i_fs = fs = ump->um_fs;
   1078 	ip->i_dev = dev;
   1079 	ip->i_number = ino;
   1080 	LIST_INIT(&ip->i_pcbufhd);
   1081 #ifdef QUOTA
   1082 	{
   1083 		int i;
   1084 
   1085 		for (i = 0; i < MAXQUOTAS; i++)
   1086 			ip->i_dquot[i] = NODQUOT;
   1087 	}
   1088 #endif
   1089 
   1090 	/*
   1091 	 * Put it onto its hash chain and lock it so that other requests for
   1092 	 * this inode will block if they arrive while we are sleeping waiting
   1093 	 * for old data structures to be purged or for the contents of the
   1094 	 * disk portion of this inode to be read.
   1095 	 */
   1096 
   1097 	ufs_ihashins(ip);
   1098 	lockmgr(&ufs_hashlock, LK_RELEASE, 0);
   1099 
   1100 	/* Read in the disk contents for the inode, copy into the inode. */
   1101 	error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
   1102 		      (int)fs->fs_bsize, NOCRED, &bp);
   1103 	if (error) {
   1104 
   1105 		/*
   1106 		 * The inode does not contain anything useful, so it would
   1107 		 * be misleading to leave it on its hash chain. With mode
   1108 		 * still zero, it will be unlinked and returned to the free
   1109 		 * list by vput().
   1110 		 */
   1111 
   1112 		vput(vp);
   1113 		brelse(bp);
   1114 		*vpp = NULL;
   1115 		return (error);
   1116 	}
   1117 	cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
   1118 #ifdef FFS_EI
   1119 	if (UFS_FSNEEDSWAP(fs))
   1120 		ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
   1121 	else
   1122 #endif
   1123 		memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
   1124 	if (DOINGSOFTDEP(vp))
   1125 		softdep_load_inodeblock(ip);
   1126 	else
   1127 		ip->i_ffs_effnlink = ip->i_ffs_nlink;
   1128 	brelse(bp);
   1129 
   1130 	/*
   1131 	 * Initialize the vnode from the inode, check for aliases.
   1132 	 * Note that the underlying vnode may have changed.
   1133 	 */
   1134 
   1135 	ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
   1136 
   1137 	/*
   1138 	 * Finish inode initialization now that aliasing has been resolved.
   1139 	 */
   1140 
   1141 	genfs_node_init(vp, &ffs_genfsops);
   1142 	ip->i_devvp = ump->um_devvp;
   1143 	VREF(ip->i_devvp);
   1144 
   1145 	/*
   1146 	 * Ensure that uid and gid are correct. This is a temporary
   1147 	 * fix until fsck has been changed to do the update.
   1148 	 */
   1149 
   1150 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
   1151 		ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid;	/* XXX */
   1152 		ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid;	/* XXX */
   1153 	}							/* XXX */
   1154 	uvm_vnp_setsize(vp, ip->i_ffs_size);
   1155 	*vpp = vp;
   1156 	return (0);
   1157 }
   1158 
   1159 /*
   1160  * File handle to vnode
   1161  *
   1162  * Have to be really careful about stale file handles:
   1163  * - check that the inode number is valid
   1164  * - call ffs_vget() to get the locked inode
   1165  * - check for an unallocated inode (i_mode == 0)
   1166  * - check that the given client host has export rights and return
   1167  *   those rights via. exflagsp and credanonp
   1168  */
   1169 int
   1170 ffs_fhtovp(mp, fhp, vpp)
   1171 	struct mount *mp;
   1172 	struct fid *fhp;
   1173 	struct vnode **vpp;
   1174 {
   1175 	struct ufid *ufhp;
   1176 	struct fs *fs;
   1177 
   1178 	ufhp = (struct ufid *)fhp;
   1179 	fs = VFSTOUFS(mp)->um_fs;
   1180 	if (ufhp->ufid_ino < ROOTINO ||
   1181 	    ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
   1182 		return (ESTALE);
   1183 	return (ufs_fhtovp(mp, ufhp, vpp));
   1184 }
   1185 
   1186 /*
   1187  * Vnode pointer to File handle
   1188  */
   1189 /* ARGSUSED */
   1190 int
   1191 ffs_vptofh(vp, fhp)
   1192 	struct vnode *vp;
   1193 	struct fid *fhp;
   1194 {
   1195 	struct inode *ip;
   1196 	struct ufid *ufhp;
   1197 
   1198 	ip = VTOI(vp);
   1199 	ufhp = (struct ufid *)fhp;
   1200 	ufhp->ufid_len = sizeof(struct ufid);
   1201 	ufhp->ufid_ino = ip->i_number;
   1202 	ufhp->ufid_gen = ip->i_ffs_gen;
   1203 	return (0);
   1204 }
   1205 
   1206 void
   1207 ffs_init()
   1208 {
   1209 	if (ffs_initcount++ > 0)
   1210 		return;
   1211 
   1212 	softdep_initialize();
   1213 	ufs_init();
   1214 
   1215 	pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
   1216 	    0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
   1217 }
   1218 
   1219 void
   1220 ffs_reinit()
   1221 {
   1222 	softdep_reinitialize();
   1223 	ufs_reinit();
   1224 }
   1225 
   1226 void
   1227 ffs_done()
   1228 {
   1229 	if (--ffs_initcount > 0)
   1230 		return;
   1231 
   1232 	/* XXX softdep cleanup ? */
   1233 	ufs_done();
   1234 	pool_destroy(&ffs_inode_pool);
   1235 }
   1236 
   1237 int
   1238 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
   1239 	int *name;
   1240 	u_int namelen;
   1241 	void *oldp;
   1242 	size_t *oldlenp;
   1243 	void *newp;
   1244 	size_t newlen;
   1245 	struct proc *p;
   1246 {
   1247 	extern int doasyncfree;
   1248 	extern int ffs_log_changeopt;
   1249 
   1250 	/* all sysctl names at this level are terminal */
   1251 	if (namelen != 1)
   1252 		return (ENOTDIR);		/* overloaded */
   1253 
   1254 	switch (name[0]) {
   1255 	case FFS_ASYNCFREE:
   1256 		return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
   1257 	case FFS_LOG_CHANGEOPT:
   1258 		return (sysctl_int(oldp, oldlenp, newp, newlen,
   1259 			&ffs_log_changeopt));
   1260 	default:
   1261 		return (EOPNOTSUPP);
   1262 	}
   1263 	/* NOTREACHED */
   1264 }
   1265 
   1266 /*
   1267  * Write a superblock and associated information back to disk.
   1268  */
   1269 int
   1270 ffs_sbupdate(mp, waitfor)
   1271 	struct ufsmount *mp;
   1272 	int waitfor;
   1273 {
   1274 	struct fs *fs = mp->um_fs;
   1275 	struct buf *bp;
   1276 	int i, error = 0;
   1277 	int32_t saved_nrpos = fs->fs_nrpos;
   1278 	int64_t saved_qbmask = fs->fs_qbmask;
   1279 	int64_t saved_qfmask = fs->fs_qfmask;
   1280 	u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
   1281 	u_int8_t saveflag;
   1282 
   1283 	/* Restore compatibility to old file systems.		   XXX */
   1284 	if (fs->fs_postblformat == FS_42POSTBLFMT)		/* XXX */
   1285 		fs->fs_nrpos = -1;		/* XXX */
   1286 	if (fs->fs_inodefmt < FS_44INODEFMT) {			/* XXX */
   1287 		int32_t *lp, tmp;				/* XXX */
   1288 								/* XXX */
   1289 		lp = (int32_t *)&fs->fs_qbmask;	/* XXX nuke qfmask too */
   1290 		tmp = lp[4];					/* XXX */
   1291 		for (i = 4; i > 0; i--)				/* XXX */
   1292 			lp[i] = lp[i-1];			/* XXX */
   1293 		lp[0] = tmp;					/* XXX */
   1294 	}							/* XXX */
   1295 	fs->fs_maxfilesize = mp->um_savedmaxfilesize;	/* XXX */
   1296 
   1297 	bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
   1298 	    (int)fs->fs_sbsize, 0, 0);
   1299 	saveflag = fs->fs_flags & FS_INTERNAL;
   1300 	fs->fs_flags &= ~FS_INTERNAL;
   1301 	memcpy(bp->b_data, fs, fs->fs_sbsize);
   1302 #ifdef FFS_EI
   1303 	if (mp->um_flags & UFS_NEEDSWAP)
   1304 		ffs_sb_swap(fs, (struct fs*)bp->b_data);
   1305 #endif
   1306 
   1307 	fs->fs_flags |= saveflag;
   1308 	fs->fs_nrpos = saved_nrpos; /* XXX */
   1309 	fs->fs_qbmask = saved_qbmask; /* XXX */
   1310 	fs->fs_qfmask = saved_qfmask; /* XXX */
   1311 	fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
   1312 
   1313 	if (waitfor == MNT_WAIT)
   1314 		error = bwrite(bp);
   1315 	else
   1316 		bawrite(bp);
   1317 	return (error);
   1318 }
   1319 
   1320 int
   1321 ffs_cgupdate(mp, waitfor)
   1322 	struct ufsmount *mp;
   1323 	int waitfor;
   1324 {
   1325 	struct fs *fs = mp->um_fs;
   1326 	struct buf *bp;
   1327 	int blks;
   1328 	void *space;
   1329 	int i, size, error = 0, allerror = 0;
   1330 
   1331 	allerror = ffs_sbupdate(mp, waitfor);
   1332 	blks = howmany(fs->fs_cssize, fs->fs_fsize);
   1333 	space = fs->fs_csp;
   1334 	for (i = 0; i < blks; i += fs->fs_frag) {
   1335 		size = fs->fs_bsize;
   1336 		if (i + fs->fs_frag > blks)
   1337 			size = (blks - i) * fs->fs_fsize;
   1338 		bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
   1339 		    size, 0, 0);
   1340 #ifdef FFS_EI
   1341 		if (mp->um_flags & UFS_NEEDSWAP)
   1342 			ffs_csum_swap((struct csum*)space,
   1343 			    (struct csum*)bp->b_data, size);
   1344 		else
   1345 #endif
   1346 			memcpy(bp->b_data, space, (u_int)size);
   1347 		space = (char *)space + size;
   1348 		if (waitfor == MNT_WAIT)
   1349 			error = bwrite(bp);
   1350 		else
   1351 			bawrite(bp);
   1352 	}
   1353 	if (!allerror && error)
   1354 		allerror = error;
   1355 	return (allerror);
   1356 }
   1357