ffs_vfsops.c revision 1.86 1 /* $NetBSD: ffs_vfsops.c,v 1.86 2001/09/15 16:13:05 chs Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL_OPT)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #include "opt_softdep.h"
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/namei.h>
48 #include <sys/proc.h>
49 #include <sys/kernel.h>
50 #include <sys/vnode.h>
51 #include <sys/socket.h>
52 #include <sys/mount.h>
53 #include <sys/buf.h>
54 #include <sys/device.h>
55 #include <sys/mbuf.h>
56 #include <sys/file.h>
57 #include <sys/disklabel.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #include <sys/malloc.h>
61 #include <sys/pool.h>
62 #include <sys/lock.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 /* how many times ffs_init() was called */
78 int ffs_initcount = 0;
79
80 extern struct lock ufs_hashlock;
81
82 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
83 extern struct vnodeopv_desc ffs_specop_opv_desc;
84 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
85
86 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
87 &ffs_vnodeop_opv_desc,
88 &ffs_specop_opv_desc,
89 &ffs_fifoop_opv_desc,
90 NULL,
91 };
92
93 struct vfsops ffs_vfsops = {
94 MOUNT_FFS,
95 ffs_mount,
96 ufs_start,
97 ffs_unmount,
98 ufs_root,
99 ufs_quotactl,
100 ffs_statfs,
101 ffs_sync,
102 ffs_vget,
103 ffs_fhtovp,
104 ffs_vptofh,
105 ffs_init,
106 ffs_reinit,
107 ffs_done,
108 ffs_sysctl,
109 ffs_mountroot,
110 ufs_check_export,
111 ffs_vnodeopv_descs,
112 };
113
114 struct pool ffs_inode_pool;
115
116 /*
117 * Called by main() when ffs is going to be mounted as root.
118 */
119
120 int
121 ffs_mountroot()
122 {
123 struct fs *fs;
124 struct mount *mp;
125 struct proc *p = curproc; /* XXX */
126 struct ufsmount *ump;
127 int error;
128
129 if (root_device->dv_class != DV_DISK)
130 return (ENODEV);
131
132 /*
133 * Get vnodes for rootdev.
134 */
135 if (bdevvp(rootdev, &rootvp))
136 panic("ffs_mountroot: can't setup bdevvp's");
137
138 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
139 vrele(rootvp);
140 return (error);
141 }
142 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
143 mp->mnt_op->vfs_refcount--;
144 vfs_unbusy(mp);
145 free(mp, M_MOUNT);
146 vrele(rootvp);
147 return (error);
148 }
149 simple_lock(&mountlist_slock);
150 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
151 simple_unlock(&mountlist_slock);
152 ump = VFSTOUFS(mp);
153 fs = ump->um_fs;
154 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
155 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
156 (void)ffs_statfs(mp, &mp->mnt_stat, p);
157 vfs_unbusy(mp);
158 inittodr(fs->fs_time);
159 return (0);
160 }
161
162 /*
163 * VFS Operations.
164 *
165 * mount system call
166 */
167 int
168 ffs_mount(mp, path, data, ndp, p)
169 struct mount *mp;
170 const char *path;
171 void *data;
172 struct nameidata *ndp;
173 struct proc *p;
174 {
175 struct vnode *devvp;
176 struct ufs_args args;
177 struct ufsmount *ump = NULL;
178 struct fs *fs;
179 size_t size;
180 int error, flags;
181 mode_t accessmode;
182
183 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
184 if (error)
185 return (error);
186
187 #if !defined(SOFTDEP)
188 mp->mnt_flag &= ~MNT_SOFTDEP;
189 #endif
190
191 /*
192 * If updating, check whether changing from read-only to
193 * read/write; if there is no device name, that's all we do.
194 */
195 if (mp->mnt_flag & MNT_UPDATE) {
196 ump = VFSTOUFS(mp);
197 fs = ump->um_fs;
198 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
199 flags = WRITECLOSE;
200 if (mp->mnt_flag & MNT_FORCE)
201 flags |= FORCECLOSE;
202 if (mp->mnt_flag & MNT_SOFTDEP)
203 error = softdep_flushfiles(mp, flags, p);
204 else
205 error = ffs_flushfiles(mp, flags, p);
206 if (error == 0 &&
207 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
208 fs->fs_clean & FS_WASCLEAN) {
209 if (mp->mnt_flag & MNT_SOFTDEP)
210 fs->fs_flags &= ~FS_DOSOFTDEP;
211 fs->fs_clean = FS_ISCLEAN;
212 (void) ffs_sbupdate(ump, MNT_WAIT);
213 }
214 if (error)
215 return (error);
216 fs->fs_ronly = 1;
217 fs->fs_fmod = 0;
218 }
219
220 /*
221 * Flush soft dependencies if disabling it via an update
222 * mount. This may leave some items to be processed,
223 * so don't do this yet XXX.
224 */
225 if ((fs->fs_flags & FS_DOSOFTDEP) &&
226 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
227 #ifdef notyet
228 flags = WRITECLOSE;
229 if (mp->mnt_flag & MNT_FORCE)
230 flags |= FORCECLOSE;
231 error = softdep_flushfiles(mp, flags, p);
232 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
233 fs->fs_flags &= ~FS_DOSOFTDEP;
234 (void) ffs_sbupdate(ump, MNT_WAIT);
235 #elif defined(SOFTDEP)
236 mp->mnt_flag |= MNT_SOFTDEP;
237 #endif
238 }
239
240 /*
241 * When upgrading to a softdep mount, we must first flush
242 * all vnodes. (not done yet -- see above)
243 */
244 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
245 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
246 #ifdef notyet
247 flags = WRITECLOSE;
248 if (mp->mnt_flag & MNT_FORCE)
249 flags |= FORCECLOSE;
250 error = ffs_flushfiles(mp, flags, p);
251 #else
252 mp->mnt_flag &= ~MNT_SOFTDEP;
253 #endif
254 }
255
256 if (mp->mnt_flag & MNT_RELOAD) {
257 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
258 if (error)
259 return (error);
260 }
261 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
262 /*
263 * If upgrade to read-write by non-root, then verify
264 * that user has necessary permissions on the device.
265 */
266 devvp = ump->um_devvp;
267 if (p->p_ucred->cr_uid != 0) {
268 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
269 error = VOP_ACCESS(devvp, VREAD | VWRITE,
270 p->p_ucred, p);
271 VOP_UNLOCK(devvp, 0);
272 if (error)
273 return (error);
274 }
275 fs->fs_ronly = 0;
276 fs->fs_clean <<= 1;
277 fs->fs_fmod = 1;
278 if ((fs->fs_flags & FS_DOSOFTDEP)) {
279 error = softdep_mount(devvp, mp, fs,
280 p->p_ucred);
281 if (error)
282 return (error);
283 }
284 }
285 if (args.fspec == 0) {
286 /*
287 * Process export requests.
288 */
289 return (vfs_export(mp, &ump->um_export, &args.export));
290 }
291 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
292 (MNT_SOFTDEP | MNT_ASYNC)) {
293 printf("%s fs uses soft updates, ignoring async mode\n",
294 fs->fs_fsmnt);
295 mp->mnt_flag &= ~MNT_ASYNC;
296 }
297 }
298 /*
299 * Not an update, or updating the name: look up the name
300 * and verify that it refers to a sensible block device.
301 */
302 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
303 if ((error = namei(ndp)) != 0)
304 return (error);
305 devvp = ndp->ni_vp;
306
307 if (devvp->v_type != VBLK) {
308 vrele(devvp);
309 return (ENOTBLK);
310 }
311 if (major(devvp->v_rdev) >= nblkdev) {
312 vrele(devvp);
313 return (ENXIO);
314 }
315 /*
316 * If mount by non-root, then verify that user has necessary
317 * permissions on the device.
318 */
319 if (p->p_ucred->cr_uid != 0) {
320 accessmode = VREAD;
321 if ((mp->mnt_flag & MNT_RDONLY) == 0)
322 accessmode |= VWRITE;
323 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
324 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
325 VOP_UNLOCK(devvp, 0);
326 if (error) {
327 vrele(devvp);
328 return (error);
329 }
330 }
331 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
332 error = ffs_mountfs(devvp, mp, p);
333 if (!error) {
334 ump = VFSTOUFS(mp);
335 fs = ump->um_fs;
336 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
337 (MNT_SOFTDEP | MNT_ASYNC)) {
338 printf("%s fs uses soft updates, "
339 "ignoring async mode\n",
340 fs->fs_fsmnt);
341 mp->mnt_flag &= ~MNT_ASYNC;
342 }
343 }
344 }
345 else {
346 if (devvp != ump->um_devvp)
347 error = EINVAL; /* needs translation */
348 else
349 vrele(devvp);
350 }
351 if (error) {
352 vrele(devvp);
353 return (error);
354 }
355 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
356 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
357 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
358 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
359 &size);
360 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
361 if (mp->mnt_flag & MNT_SOFTDEP)
362 fs->fs_flags |= FS_DOSOFTDEP;
363 else
364 fs->fs_flags &= ~FS_DOSOFTDEP;
365 if (fs->fs_fmod != 0) { /* XXX */
366 fs->fs_fmod = 0;
367 if (fs->fs_clean & FS_WASCLEAN)
368 fs->fs_time = time.tv_sec;
369 else
370 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
371 mp->mnt_stat.f_mntfromname, fs->fs_clean);
372 (void) ffs_cgupdate(ump, MNT_WAIT);
373 }
374 return (0);
375 }
376
377 /*
378 * Reload all incore data for a filesystem (used after running fsck on
379 * the root filesystem and finding things to fix). The filesystem must
380 * be mounted read-only.
381 *
382 * Things to do to update the mount:
383 * 1) invalidate all cached meta-data.
384 * 2) re-read superblock from disk.
385 * 3) re-read summary information from disk.
386 * 4) invalidate all inactive vnodes.
387 * 5) invalidate all cached file data.
388 * 6) re-read inode data for all active vnodes.
389 */
390 int
391 ffs_reload(mountp, cred, p)
392 struct mount *mountp;
393 struct ucred *cred;
394 struct proc *p;
395 {
396 struct vnode *vp, *nvp, *devvp;
397 struct inode *ip;
398 void *space;
399 struct buf *bp;
400 struct fs *fs, *newfs;
401 struct partinfo dpart;
402 int i, blks, size, error;
403 int32_t *lp;
404 caddr_t cp;
405
406 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
407 return (EINVAL);
408 /*
409 * Step 1: invalidate all cached meta-data.
410 */
411 devvp = VFSTOUFS(mountp)->um_devvp;
412 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
413 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
414 VOP_UNLOCK(devvp, 0);
415 if (error)
416 panic("ffs_reload: dirty1");
417 /*
418 * Step 2: re-read superblock from disk.
419 */
420 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
421 size = DEV_BSIZE;
422 else
423 size = dpart.disklab->d_secsize;
424 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
425 if (error) {
426 brelse(bp);
427 return (error);
428 }
429 fs = VFSTOUFS(mountp)->um_fs;
430 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
431 memcpy(newfs, bp->b_data, fs->fs_sbsize);
432 #ifdef FFS_EI
433 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
434 ffs_sb_swap((struct fs*)bp->b_data, newfs);
435 fs->fs_flags |= FS_SWAPPED;
436 }
437 #endif
438 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
439 newfs->fs_bsize < sizeof(struct fs)) {
440 brelse(bp);
441 free(newfs, M_UFSMNT);
442 return (EIO); /* XXX needs translation */
443 }
444 /*
445 * Copy pointer fields back into superblock before copying in XXX
446 * new superblock. These should really be in the ufsmount. XXX
447 * Note that important parameters (eg fs_ncg) are unchanged.
448 */
449 newfs->fs_csp = fs->fs_csp;
450 newfs->fs_maxcluster = fs->fs_maxcluster;
451 newfs->fs_contigdirs = fs->fs_contigdirs;
452 newfs->fs_ronly = fs->fs_ronly;
453 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
454 if (fs->fs_sbsize < SBSIZE)
455 bp->b_flags |= B_INVAL;
456 brelse(bp);
457 free(newfs, M_UFSMNT);
458 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
459 ffs_oldfscompat(fs);
460 /* An old fsck may have zeroed these fields, so recheck them. */
461 if (fs->fs_avgfilesize <= 0)
462 fs->fs_avgfilesize = AVFILESIZ;
463 if (fs->fs_avgfpdir <= 0)
464 fs->fs_avgfpdir = AFPDIR;
465
466 ffs_statfs(mountp, &mountp->mnt_stat, p);
467 /*
468 * Step 3: re-read summary information from disk.
469 */
470 blks = howmany(fs->fs_cssize, fs->fs_fsize);
471 space = fs->fs_csp;
472 for (i = 0; i < blks; i += fs->fs_frag) {
473 size = fs->fs_bsize;
474 if (i + fs->fs_frag > blks)
475 size = (blks - i) * fs->fs_fsize;
476 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
477 NOCRED, &bp);
478 if (error) {
479 brelse(bp);
480 return (error);
481 }
482 #ifdef FFS_EI
483 if (UFS_FSNEEDSWAP(fs))
484 ffs_csum_swap((struct csum *)bp->b_data,
485 (struct csum *)space, size);
486 else
487 #endif
488 memcpy(space, bp->b_data, (size_t)size);
489 space = (char *)space + size;
490 brelse(bp);
491 }
492 if ((fs->fs_flags & FS_DOSOFTDEP))
493 softdep_mount(devvp, mountp, fs, cred);
494 /*
495 * We no longer know anything about clusters per cylinder group.
496 */
497 if (fs->fs_contigsumsize > 0) {
498 lp = fs->fs_maxcluster;
499 for (i = 0; i < fs->fs_ncg; i++)
500 *lp++ = fs->fs_contigsumsize;
501 }
502
503 loop:
504 simple_lock(&mntvnode_slock);
505 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
506 if (vp->v_mount != mountp) {
507 simple_unlock(&mntvnode_slock);
508 goto loop;
509 }
510 nvp = vp->v_mntvnodes.le_next;
511 /*
512 * Step 4: invalidate all inactive vnodes.
513 */
514 if (vrecycle(vp, &mntvnode_slock, p))
515 goto loop;
516 /*
517 * Step 5: invalidate all cached file data.
518 */
519 simple_lock(&vp->v_interlock);
520 simple_unlock(&mntvnode_slock);
521 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
522 goto loop;
523 if (vinvalbuf(vp, 0, cred, p, 0, 0))
524 panic("ffs_reload: dirty2");
525 /*
526 * Step 6: re-read inode data for all active vnodes.
527 */
528 ip = VTOI(vp);
529 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
530 (int)fs->fs_bsize, NOCRED, &bp);
531 if (error) {
532 brelse(bp);
533 vput(vp);
534 return (error);
535 }
536 cp = (caddr_t)bp->b_data +
537 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
538 #ifdef FFS_EI
539 if (UFS_FSNEEDSWAP(fs))
540 ffs_dinode_swap((struct dinode *)cp,
541 &ip->i_din.ffs_din);
542 else
543 #endif
544 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
545 ip->i_ffs_effnlink = ip->i_ffs_nlink;
546 brelse(bp);
547 vput(vp);
548 simple_lock(&mntvnode_slock);
549 }
550 simple_unlock(&mntvnode_slock);
551 return (0);
552 }
553
554 /*
555 * Common code for mount and mountroot
556 */
557 int
558 ffs_mountfs(devvp, mp, p)
559 struct vnode *devvp;
560 struct mount *mp;
561 struct proc *p;
562 {
563 struct ufsmount *ump;
564 struct buf *bp;
565 struct fs *fs;
566 dev_t dev;
567 struct partinfo dpart;
568 void *space;
569 int blks;
570 int error, i, size, ronly;
571 #ifdef FFS_EI
572 int needswap;
573 #endif
574 int32_t *lp;
575 struct ucred *cred;
576 u_int64_t maxfilesize; /* XXX */
577 u_int32_t sbsize;
578
579 dev = devvp->v_rdev;
580 cred = p ? p->p_ucred : NOCRED;
581 /*
582 * Disallow multiple mounts of the same device.
583 * Disallow mounting of a device that is currently in use
584 * (except for root, which might share swap device for miniroot).
585 * Flush out any old buffers remaining from a previous use.
586 */
587 if ((error = vfs_mountedon(devvp)) != 0)
588 return (error);
589 if (vcount(devvp) > 1 && devvp != rootvp)
590 return (EBUSY);
591 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
592 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
593 VOP_UNLOCK(devvp, 0);
594 if (error)
595 return (error);
596
597 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
598 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
599 if (error)
600 return (error);
601 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
602 size = DEV_BSIZE;
603 else
604 size = dpart.disklab->d_secsize;
605
606 bp = NULL;
607 ump = NULL;
608 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
609 if (error)
610 goto out;
611
612 fs = (struct fs*)bp->b_data;
613 if (fs->fs_magic == FS_MAGIC) {
614 sbsize = fs->fs_sbsize;
615 #ifdef FFS_EI
616 needswap = 0;
617 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
618 sbsize = bswap32(fs->fs_sbsize);
619 needswap = 1;
620 #endif
621 } else {
622 error = EINVAL;
623 goto out;
624 }
625 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
626 error = EINVAL;
627 goto out;
628 }
629
630 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
631 memcpy(fs, bp->b_data, sbsize);
632 #ifdef FFS_EI
633 if (needswap) {
634 ffs_sb_swap((struct fs*)bp->b_data, fs);
635 fs->fs_flags |= FS_SWAPPED;
636 }
637 #endif
638 ffs_oldfscompat(fs);
639
640 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
641 error = EINVAL;
642 goto out;
643 }
644 /* make sure cylinder group summary area is a reasonable size. */
645 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
646 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
647 fs->fs_cssize >
648 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
649 error = EINVAL; /* XXX needs translation */
650 goto out2;
651 }
652 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
653 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
654 error = EROFS; /* XXX what should be returned? */
655 goto out2;
656 }
657
658 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
659 memset((caddr_t)ump, 0, sizeof *ump);
660 ump->um_fs = fs;
661 if (fs->fs_sbsize < SBSIZE)
662 bp->b_flags |= B_INVAL;
663 brelse(bp);
664 bp = NULL;
665 fs->fs_ronly = ronly;
666 if (ronly == 0) {
667 fs->fs_clean <<= 1;
668 fs->fs_fmod = 1;
669 }
670 size = fs->fs_cssize;
671 blks = howmany(size, fs->fs_fsize);
672 if (fs->fs_contigsumsize > 0)
673 size += fs->fs_ncg * sizeof(int32_t);
674 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
675 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
676 fs->fs_csp = space;
677 for (i = 0; i < blks; i += fs->fs_frag) {
678 size = fs->fs_bsize;
679 if (i + fs->fs_frag > blks)
680 size = (blks - i) * fs->fs_fsize;
681 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
682 cred, &bp);
683 if (error) {
684 free(fs->fs_csp, M_UFSMNT);
685 goto out2;
686 }
687 #ifdef FFS_EI
688 if (needswap)
689 ffs_csum_swap((struct csum *)bp->b_data,
690 (struct csum *)space, size);
691 else
692 #endif
693 memcpy(space, bp->b_data, (u_int)size);
694
695 space = (char *)space + size;
696 brelse(bp);
697 bp = NULL;
698 }
699 if (fs->fs_contigsumsize > 0) {
700 fs->fs_maxcluster = lp = space;
701 for (i = 0; i < fs->fs_ncg; i++)
702 *lp++ = fs->fs_contigsumsize;
703 space = lp;
704 }
705 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
706 fs->fs_contigdirs = space;
707 space = (char *)space + size;
708 memset(fs->fs_contigdirs, 0, size);
709 /* Compatibility for old filesystems - XXX */
710 if (fs->fs_avgfilesize <= 0)
711 fs->fs_avgfilesize = AVFILESIZ;
712 if (fs->fs_avgfpdir <= 0)
713 fs->fs_avgfpdir = AFPDIR;
714 mp->mnt_data = (qaddr_t)ump;
715 mp->mnt_stat.f_fsid.val[0] = (long)dev;
716 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
717 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
718 mp->mnt_fs_bshift = fs->fs_bshift;
719 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
720 mp->mnt_flag |= MNT_LOCAL;
721 #ifdef FFS_EI
722 if (needswap)
723 ump->um_flags |= UFS_NEEDSWAP;
724 #endif
725 ump->um_mountp = mp;
726 ump->um_dev = dev;
727 ump->um_devvp = devvp;
728 ump->um_nindir = fs->fs_nindir;
729 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
730 ump->um_bptrtodb = fs->fs_fsbtodb;
731 ump->um_seqinc = fs->fs_frag;
732 for (i = 0; i < MAXQUOTAS; i++)
733 ump->um_quotas[i] = NULLVP;
734 devvp->v_specmountpoint = mp;
735 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
736 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
737 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
738 fs->fs_maxfilesize = maxfilesize; /* XXX */
739 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
740 error = softdep_mount(devvp, mp, fs, cred);
741 if (error) {
742 free(fs->fs_csp, M_UFSMNT);
743 goto out;
744 }
745 }
746 return (0);
747 out2:
748 free(fs, M_UFSMNT);
749 out:
750 devvp->v_specmountpoint = NULL;
751 if (bp)
752 brelse(bp);
753 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
754 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
755 VOP_UNLOCK(devvp, 0);
756 if (ump) {
757 free(ump, M_UFSMNT);
758 mp->mnt_data = (qaddr_t)0;
759 }
760 return (error);
761 }
762
763 /*
764 * Sanity checks for old file systems.
765 *
766 * XXX - goes away some day.
767 */
768 int
769 ffs_oldfscompat(fs)
770 struct fs *fs;
771 {
772 int i;
773
774 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
775 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
776 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
777 fs->fs_nrpos = 8; /* XXX */
778 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
779 u_int64_t sizepb = fs->fs_bsize; /* XXX */
780 /* XXX */
781 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
782 for (i = 0; i < NIADDR; i++) { /* XXX */
783 sizepb *= NINDIR(fs); /* XXX */
784 fs->fs_maxfilesize += sizepb; /* XXX */
785 } /* XXX */
786 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
787 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
788 } /* XXX */
789 return (0);
790 }
791
792 /*
793 * unmount system call
794 */
795 int
796 ffs_unmount(mp, mntflags, p)
797 struct mount *mp;
798 int mntflags;
799 struct proc *p;
800 {
801 struct ufsmount *ump;
802 struct fs *fs;
803 int error, flags;
804
805 flags = 0;
806 if (mntflags & MNT_FORCE)
807 flags |= FORCECLOSE;
808 if (mp->mnt_flag & MNT_SOFTDEP) {
809 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
810 return (error);
811 } else {
812 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
813 return (error);
814 }
815 ump = VFSTOUFS(mp);
816 fs = ump->um_fs;
817 if (fs->fs_ronly == 0 &&
818 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
819 fs->fs_clean & FS_WASCLEAN) {
820 if (mp->mnt_flag & MNT_SOFTDEP)
821 fs->fs_flags &= ~FS_DOSOFTDEP;
822 fs->fs_clean = FS_ISCLEAN;
823 (void) ffs_sbupdate(ump, MNT_WAIT);
824 }
825 if (ump->um_devvp->v_type != VBAD)
826 ump->um_devvp->v_specmountpoint = NULL;
827 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
828 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
829 NOCRED, p);
830 vput(ump->um_devvp);
831 free(fs->fs_csp, M_UFSMNT);
832 free(fs, M_UFSMNT);
833 free(ump, M_UFSMNT);
834 mp->mnt_data = (qaddr_t)0;
835 mp->mnt_flag &= ~MNT_LOCAL;
836 return (error);
837 }
838
839 /*
840 * Flush out all the files in a filesystem.
841 */
842 int
843 ffs_flushfiles(mp, flags, p)
844 struct mount *mp;
845 int flags;
846 struct proc *p;
847 {
848 extern int doforce;
849 struct ufsmount *ump;
850 int error;
851
852 if (!doforce)
853 flags &= ~FORCECLOSE;
854 ump = VFSTOUFS(mp);
855 #ifdef QUOTA
856 if (mp->mnt_flag & MNT_QUOTA) {
857 int i;
858 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
859 return (error);
860 for (i = 0; i < MAXQUOTAS; i++) {
861 if (ump->um_quotas[i] == NULLVP)
862 continue;
863 quotaoff(p, mp, i);
864 }
865 /*
866 * Here we fall through to vflush again to ensure
867 * that we have gotten rid of all the system vnodes.
868 */
869 }
870 #endif
871 /*
872 * Flush all the files.
873 */
874 error = vflush(mp, NULLVP, flags);
875 if (error)
876 return (error);
877 /*
878 * Flush filesystem metadata.
879 */
880 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
881 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
882 VOP_UNLOCK(ump->um_devvp, 0);
883 return (error);
884 }
885
886 /*
887 * Get file system statistics.
888 */
889 int
890 ffs_statfs(mp, sbp, p)
891 struct mount *mp;
892 struct statfs *sbp;
893 struct proc *p;
894 {
895 struct ufsmount *ump;
896 struct fs *fs;
897
898 ump = VFSTOUFS(mp);
899 fs = ump->um_fs;
900 if (fs->fs_magic != FS_MAGIC)
901 panic("ffs_statfs");
902 #ifdef COMPAT_09
903 sbp->f_type = 1;
904 #else
905 sbp->f_type = 0;
906 #endif
907 sbp->f_bsize = fs->fs_fsize;
908 sbp->f_iosize = fs->fs_bsize;
909 sbp->f_blocks = fs->fs_dsize;
910 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
911 fs->fs_cstotal.cs_nffree;
912 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
913 (100 - fs->fs_minfree) / (u_int64_t) 100) -
914 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
915 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
916 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
917 if (sbp != &mp->mnt_stat) {
918 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
919 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
920 }
921 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
922 return (0);
923 }
924
925 /*
926 * Go through the disk queues to initiate sandbagged IO;
927 * go through the inodes to write those that have been modified;
928 * initiate the writing of the super block if it has been modified.
929 *
930 * Note: we are always called with the filesystem marked `MPBUSY'.
931 */
932 int
933 ffs_sync(mp, waitfor, cred, p)
934 struct mount *mp;
935 int waitfor;
936 struct ucred *cred;
937 struct proc *p;
938 {
939 struct vnode *vp, *nvp;
940 struct inode *ip;
941 struct ufsmount *ump = VFSTOUFS(mp);
942 struct fs *fs;
943 int error, allerror = 0;
944
945 fs = ump->um_fs;
946 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
947 printf("fs = %s\n", fs->fs_fsmnt);
948 panic("update: rofs mod");
949 }
950 /*
951 * Write back each (modified) inode.
952 */
953 simple_lock(&mntvnode_slock);
954 loop:
955 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
956 /*
957 * If the vnode that we are about to sync is no longer
958 * associated with this mount point, start over.
959 */
960 if (vp->v_mount != mp)
961 goto loop;
962 simple_lock(&vp->v_interlock);
963 nvp = LIST_NEXT(vp, v_mntvnodes);
964 ip = VTOI(vp);
965 if (vp->v_type == VNON ||
966 ((ip->i_flag &
967 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
968 LIST_EMPTY(&vp->v_dirtyblkhd) &&
969 vp->v_uvm.u_obj.uo_npages == 0))
970 {
971 simple_unlock(&vp->v_interlock);
972 continue;
973 }
974 simple_unlock(&mntvnode_slock);
975 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
976 if (error) {
977 simple_lock(&mntvnode_slock);
978 if (error == ENOENT)
979 goto loop;
980 continue;
981 }
982 if ((error = VOP_FSYNC(vp, cred,
983 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
984 allerror = error;
985 vput(vp);
986 simple_lock(&mntvnode_slock);
987 }
988 simple_unlock(&mntvnode_slock);
989 /*
990 * Force stale file system control information to be flushed.
991 */
992 if (waitfor != MNT_LAZY) {
993 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
994 waitfor = MNT_NOWAIT;
995 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
996 if ((error = VOP_FSYNC(ump->um_devvp, cred,
997 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
998 allerror = error;
999 VOP_UNLOCK(ump->um_devvp, 0);
1000 }
1001 #ifdef QUOTA
1002 qsync(mp);
1003 #endif
1004 /*
1005 * Write back modified superblock.
1006 */
1007 if (fs->fs_fmod != 0) {
1008 fs->fs_fmod = 0;
1009 fs->fs_time = time.tv_sec;
1010 if ((error = ffs_cgupdate(ump, waitfor)))
1011 allerror = error;
1012 }
1013 return (allerror);
1014 }
1015
1016 /*
1017 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1018 * in from disk. If it is in core, wait for the lock bit to clear, then
1019 * return the inode locked. Detection and handling of mount points must be
1020 * done by the calling routine.
1021 */
1022 int
1023 ffs_vget(mp, ino, vpp)
1024 struct mount *mp;
1025 ino_t ino;
1026 struct vnode **vpp;
1027 {
1028 struct fs *fs;
1029 struct inode *ip;
1030 struct ufsmount *ump;
1031 struct buf *bp;
1032 struct vnode *vp;
1033 dev_t dev;
1034 int error;
1035 caddr_t cp;
1036
1037 ump = VFSTOUFS(mp);
1038 dev = ump->um_dev;
1039
1040 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1041 return (0);
1042
1043 /* Allocate a new vnode/inode. */
1044 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1045 *vpp = NULL;
1046 return (error);
1047 }
1048
1049 /*
1050 * If someone beat us to it while sleeping in getnewvnode(),
1051 * push back the freshly allocated vnode we don't need, and return.
1052 */
1053 do {
1054 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1055 ungetnewvnode(vp);
1056 return (0);
1057 }
1058 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1059
1060 /*
1061 * XXX MFS ends up here, too, to allocate an inode. Should we
1062 * XXX create another pool for MFS inodes?
1063 */
1064 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1065 memset((caddr_t)ip, 0, sizeof(struct inode));
1066 vp->v_data = ip;
1067 ip->i_vnode = vp;
1068 ip->i_fs = fs = ump->um_fs;
1069 ip->i_dev = dev;
1070 ip->i_number = ino;
1071 LIST_INIT(&ip->i_pcbufhd);
1072 #ifdef QUOTA
1073 {
1074 int i;
1075
1076 for (i = 0; i < MAXQUOTAS; i++)
1077 ip->i_dquot[i] = NODQUOT;
1078 }
1079 #endif
1080
1081 /*
1082 * Put it onto its hash chain and lock it so that other requests for
1083 * this inode will block if they arrive while we are sleeping waiting
1084 * for old data structures to be purged or for the contents of the
1085 * disk portion of this inode to be read.
1086 */
1087 ufs_ihashins(ip);
1088 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1089
1090 /* Read in the disk contents for the inode, copy into the inode. */
1091 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1092 (int)fs->fs_bsize, NOCRED, &bp);
1093 if (error) {
1094 /*
1095 * The inode does not contain anything useful, so it would
1096 * be misleading to leave it on its hash chain. With mode
1097 * still zero, it will be unlinked and returned to the free
1098 * list by vput().
1099 */
1100 vput(vp);
1101 brelse(bp);
1102 *vpp = NULL;
1103 return (error);
1104 }
1105 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1106 #ifdef FFS_EI
1107 if (UFS_FSNEEDSWAP(fs))
1108 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1109 else
1110 #endif
1111 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1112 if (DOINGSOFTDEP(vp))
1113 softdep_load_inodeblock(ip);
1114 else
1115 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1116 brelse(bp);
1117
1118 /*
1119 * Initialize the vnode from the inode, check for aliases.
1120 * Note that the underlying vnode may have changed.
1121 */
1122 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1123 if (error) {
1124 vput(vp);
1125 *vpp = NULL;
1126 return (error);
1127 }
1128 /*
1129 * Finish inode initialization now that aliasing has been resolved.
1130 */
1131 ip->i_devvp = ump->um_devvp;
1132 VREF(ip->i_devvp);
1133 /*
1134 * Ensure that uid and gid are correct. This is a temporary
1135 * fix until fsck has been changed to do the update.
1136 */
1137 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1138 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1139 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1140 } /* XXX */
1141 uvm_vnp_setsize(vp, ip->i_ffs_size);
1142
1143 *vpp = vp;
1144 return (0);
1145 }
1146
1147 /*
1148 * File handle to vnode
1149 *
1150 * Have to be really careful about stale file handles:
1151 * - check that the inode number is valid
1152 * - call ffs_vget() to get the locked inode
1153 * - check for an unallocated inode (i_mode == 0)
1154 * - check that the given client host has export rights and return
1155 * those rights via. exflagsp and credanonp
1156 */
1157 int
1158 ffs_fhtovp(mp, fhp, vpp)
1159 struct mount *mp;
1160 struct fid *fhp;
1161 struct vnode **vpp;
1162 {
1163 struct ufid *ufhp;
1164 struct fs *fs;
1165
1166 ufhp = (struct ufid *)fhp;
1167 fs = VFSTOUFS(mp)->um_fs;
1168 if (ufhp->ufid_ino < ROOTINO ||
1169 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1170 return (ESTALE);
1171 return (ufs_fhtovp(mp, ufhp, vpp));
1172 }
1173
1174 /*
1175 * Vnode pointer to File handle
1176 */
1177 /* ARGSUSED */
1178 int
1179 ffs_vptofh(vp, fhp)
1180 struct vnode *vp;
1181 struct fid *fhp;
1182 {
1183 struct inode *ip;
1184 struct ufid *ufhp;
1185
1186 ip = VTOI(vp);
1187 ufhp = (struct ufid *)fhp;
1188 ufhp->ufid_len = sizeof(struct ufid);
1189 ufhp->ufid_ino = ip->i_number;
1190 ufhp->ufid_gen = ip->i_ffs_gen;
1191 return (0);
1192 }
1193
1194 void
1195 ffs_init()
1196 {
1197 if (ffs_initcount++ > 0)
1198 return;
1199
1200 softdep_initialize();
1201 ufs_init();
1202
1203 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1204 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1205 }
1206
1207 void
1208 ffs_reinit()
1209 {
1210 softdep_reinitialize();
1211 ufs_reinit();
1212 }
1213
1214 void
1215 ffs_done()
1216 {
1217 if (--ffs_initcount > 0)
1218 return;
1219
1220 /* XXX softdep cleanup ? */
1221 ufs_done();
1222 pool_destroy(&ffs_inode_pool);
1223 }
1224
1225 int
1226 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1227 int *name;
1228 u_int namelen;
1229 void *oldp;
1230 size_t *oldlenp;
1231 void *newp;
1232 size_t newlen;
1233 struct proc *p;
1234 {
1235 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1236 extern int ffs_log_changeopt;
1237
1238 /* all sysctl names at this level are terminal */
1239 if (namelen != 1)
1240 return (ENOTDIR); /* overloaded */
1241
1242 switch (name[0]) {
1243 case FFS_CLUSTERREAD:
1244 return (sysctl_int(oldp, oldlenp, newp, newlen,
1245 &doclusterread));
1246 case FFS_CLUSTERWRITE:
1247 return (sysctl_int(oldp, oldlenp, newp, newlen,
1248 &doclusterwrite));
1249 case FFS_REALLOCBLKS:
1250 return (sysctl_int(oldp, oldlenp, newp, newlen,
1251 &doreallocblks));
1252 case FFS_ASYNCFREE:
1253 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1254 case FFS_LOG_CHANGEOPT:
1255 return (sysctl_int(oldp, oldlenp, newp, newlen,
1256 &ffs_log_changeopt));
1257 default:
1258 return (EOPNOTSUPP);
1259 }
1260 /* NOTREACHED */
1261 }
1262
1263 /*
1264 * Write a superblock and associated information back to disk.
1265 */
1266 int
1267 ffs_sbupdate(mp, waitfor)
1268 struct ufsmount *mp;
1269 int waitfor;
1270 {
1271 struct fs *fs = mp->um_fs;
1272 struct buf *bp;
1273 int i, error = 0;
1274 int32_t saved_nrpos = fs->fs_nrpos;
1275 int64_t saved_qbmask = fs->fs_qbmask;
1276 int64_t saved_qfmask = fs->fs_qfmask;
1277 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1278 u_int8_t saveflag;
1279
1280 /* Restore compatibility to old file systems. XXX */
1281 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1282 fs->fs_nrpos = -1; /* XXX */
1283 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1284 int32_t *lp, tmp; /* XXX */
1285 /* XXX */
1286 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1287 tmp = lp[4]; /* XXX */
1288 for (i = 4; i > 0; i--) /* XXX */
1289 lp[i] = lp[i-1]; /* XXX */
1290 lp[0] = tmp; /* XXX */
1291 } /* XXX */
1292 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1293
1294 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1295 (int)fs->fs_sbsize, 0, 0);
1296 saveflag = fs->fs_flags & FS_INTERNAL;
1297 fs->fs_flags &= ~FS_INTERNAL;
1298 memcpy(bp->b_data, fs, fs->fs_sbsize);
1299 #ifdef FFS_EI
1300 if (mp->um_flags & UFS_NEEDSWAP)
1301 ffs_sb_swap(fs, (struct fs*)bp->b_data);
1302 #endif
1303
1304 fs->fs_flags |= saveflag;
1305 fs->fs_nrpos = saved_nrpos; /* XXX */
1306 fs->fs_qbmask = saved_qbmask; /* XXX */
1307 fs->fs_qfmask = saved_qfmask; /* XXX */
1308 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1309
1310 if (waitfor == MNT_WAIT)
1311 error = bwrite(bp);
1312 else
1313 bawrite(bp);
1314 return (error);
1315 }
1316
1317 int
1318 ffs_cgupdate(mp, waitfor)
1319 struct ufsmount *mp;
1320 int waitfor;
1321 {
1322 struct fs *fs = mp->um_fs;
1323 struct buf *bp;
1324 int blks;
1325 void *space;
1326 int i, size, error = 0, allerror = 0;
1327
1328 allerror = ffs_sbupdate(mp, waitfor);
1329 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1330 space = fs->fs_csp;
1331 for (i = 0; i < blks; i += fs->fs_frag) {
1332 size = fs->fs_bsize;
1333 if (i + fs->fs_frag > blks)
1334 size = (blks - i) * fs->fs_fsize;
1335 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1336 size, 0, 0);
1337 #ifdef FFS_EI
1338 if (mp->um_flags & UFS_NEEDSWAP)
1339 ffs_csum_swap((struct csum*)space,
1340 (struct csum*)bp->b_data, size);
1341 else
1342 #endif
1343 memcpy(bp->b_data, space, (u_int)size);
1344 space = (char *)space + size;
1345 if (waitfor == MNT_WAIT)
1346 error = bwrite(bp);
1347 else
1348 bawrite(bp);
1349 }
1350 if (!allerror && error)
1351 allerror = error;
1352 return (allerror);
1353 }
1354