ffs_vfsops.c revision 1.85.2.2 1 /* $NetBSD: ffs_vfsops.c,v 1.85.2.2 2001/09/26 15:28:27 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL_OPT)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #include "opt_softdep.h"
43 #endif
44
45 #include <sys/param.h>
46 #include <sys/systm.h>
47 #include <sys/namei.h>
48 #include <sys/proc.h>
49 #include <sys/kernel.h>
50 #include <sys/vnode.h>
51 #include <sys/socket.h>
52 #include <sys/mount.h>
53 #include <sys/buf.h>
54 #include <sys/device.h>
55 #include <sys/mbuf.h>
56 #include <sys/file.h>
57 #include <sys/disklabel.h>
58 #include <sys/ioctl.h>
59 #include <sys/errno.h>
60 #include <sys/malloc.h>
61 #include <sys/pool.h>
62 #include <sys/lock.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 /* how many times ffs_init() was called */
78 int ffs_initcount = 0;
79
80 extern struct lock ufs_hashlock;
81
82 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
83 extern struct vnodeopv_desc ffs_specop_opv_desc;
84 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
85
86 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
87 &ffs_vnodeop_opv_desc,
88 &ffs_specop_opv_desc,
89 &ffs_fifoop_opv_desc,
90 NULL,
91 };
92
93 struct vfsops ffs_vfsops = {
94 MOUNT_FFS,
95 ffs_mount,
96 ufs_start,
97 ffs_unmount,
98 ufs_root,
99 ufs_quotactl,
100 ffs_statfs,
101 ffs_sync,
102 ffs_vget,
103 ffs_fhtovp,
104 ffs_vptofh,
105 ffs_init,
106 ffs_done,
107 ffs_sysctl,
108 ffs_mountroot,
109 ufs_check_export,
110 ffs_vnodeopv_descs,
111 };
112
113 struct pool ffs_inode_pool;
114
115 /*
116 * Called by main() when ffs is going to be mounted as root.
117 */
118
119 int
120 ffs_mountroot()
121 {
122 struct fs *fs;
123 struct mount *mp;
124 struct proc *p = curproc; /* XXX */
125 struct ufsmount *ump;
126 int error;
127
128 if (root_device->dv_class != DV_DISK)
129 return (ENODEV);
130
131 /*
132 * Get vnodes for rootdev.
133 */
134 if (bdevvp(rootdev, &rootvp))
135 panic("ffs_mountroot: can't setup bdevvp's");
136
137 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
138 vrele(rootvp);
139 return (error);
140 }
141
142 vn_lock(rootvp, LK_EXCLUSIVE | LK_RETRY);
143
144 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
145 mp->mnt_op->vfs_refcount--;
146 vfs_unbusy(mp);
147 free(mp, M_MOUNT);
148 vput(rootvp);
149 return (error);
150 }
151
152 VOP_UNLOCK(rootvp, 0);
153
154 simple_lock(&mountlist_slock);
155 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
156 simple_unlock(&mountlist_slock);
157 ump = VFSTOUFS(mp);
158 fs = ump->um_fs;
159 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
160 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
161 (void)ffs_statfs(mp, &mp->mnt_stat, p);
162 vfs_unbusy(mp);
163 inittodr(fs->fs_time);
164 return (0);
165 }
166
167 /*
168 * VFS Operations.
169 *
170 * mount system call
171 */
172 int
173 ffs_mount(mp, path, data, ndp, p)
174 struct mount *mp;
175 const char *path;
176 void *data;
177 struct nameidata *ndp;
178 struct proc *p;
179 {
180 struct vnode *devvp;
181 struct ufs_args args;
182 struct ufsmount *ump = NULL;
183 struct fs *fs;
184 size_t size;
185 int error, flags;
186 mode_t accessmode;
187
188 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
189 if (error)
190 return (error);
191
192 #if !defined(SOFTDEP)
193 mp->mnt_flag &= ~MNT_SOFTDEP;
194 #endif
195
196 /*
197 * If updating, check whether changing from read-only to
198 * read/write; if there is no device name, that's all we do.
199 */
200 if (mp->mnt_flag & MNT_UPDATE) {
201 ump = VFSTOUFS(mp);
202 fs = ump->um_fs;
203 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
204 flags = WRITECLOSE;
205 if (mp->mnt_flag & MNT_FORCE)
206 flags |= FORCECLOSE;
207 if (mp->mnt_flag & MNT_SOFTDEP)
208 error = softdep_flushfiles(mp, flags, p);
209 else
210 error = ffs_flushfiles(mp, flags, p);
211 if (error == 0 &&
212 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
213 fs->fs_clean & FS_WASCLEAN) {
214 if (mp->mnt_flag & MNT_SOFTDEP)
215 fs->fs_flags &= ~FS_DOSOFTDEP;
216 fs->fs_clean = FS_ISCLEAN;
217 (void) ffs_sbupdate(ump, MNT_WAIT);
218 }
219 if (error)
220 return (error);
221 fs->fs_ronly = 1;
222 fs->fs_fmod = 0;
223 }
224
225 /*
226 * Flush soft dependencies if disabling it via an update
227 * mount. This may leave some items to be processed,
228 * so don't do this yet XXX.
229 */
230 if ((fs->fs_flags & FS_DOSOFTDEP) &&
231 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
232 #ifdef notyet
233 flags = WRITECLOSE;
234 if (mp->mnt_flag & MNT_FORCE)
235 flags |= FORCECLOSE;
236 error = softdep_flushfiles(mp, flags, p);
237 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
238 fs->fs_flags &= ~FS_DOSOFTDEP;
239 (void) ffs_sbupdate(ump, MNT_WAIT);
240 #elif defined(SOFTDEP)
241 mp->mnt_flag |= MNT_SOFTDEP;
242 #endif
243 }
244
245 /*
246 * When upgrading to a softdep mount, we must first flush
247 * all vnodes. (not done yet -- see above)
248 */
249 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
250 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
251 #ifdef notyet
252 flags = WRITECLOSE;
253 if (mp->mnt_flag & MNT_FORCE)
254 flags |= FORCECLOSE;
255 error = ffs_flushfiles(mp, flags, p);
256 #else
257 mp->mnt_flag &= ~MNT_SOFTDEP;
258 #endif
259 }
260
261 if (mp->mnt_flag & MNT_RELOAD) {
262 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
263 if (error)
264 return (error);
265 }
266 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
267 /*
268 * If upgrade to read-write by non-root, then verify
269 * that user has necessary permissions on the device.
270 */
271 devvp = ump->um_devvp;
272 if (p->p_ucred->cr_uid != 0) {
273 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
274 error = VOP_ACCESS(devvp, VREAD | VWRITE,
275 p->p_ucred, p);
276 VOP_UNLOCK(devvp, 0);
277 if (error)
278 return (error);
279 }
280 fs->fs_ronly = 0;
281 fs->fs_clean <<= 1;
282 fs->fs_fmod = 1;
283 if ((fs->fs_flags & FS_DOSOFTDEP)) {
284 error = softdep_mount(devvp, mp, fs,
285 p->p_ucred);
286 if (error)
287 return (error);
288 }
289 }
290 if (args.fspec == 0) {
291 /*
292 * Process export requests.
293 */
294 return (vfs_export(mp, &ump->um_export, &args.export));
295 }
296 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
297 (MNT_SOFTDEP | MNT_ASYNC)) {
298 printf("%s fs uses soft updates, ignoring async mode\n",
299 fs->fs_fsmnt);
300 mp->mnt_flag &= ~MNT_ASYNC;
301 }
302 }
303 /*
304 * Not an update, or updating the name: look up the name
305 * and verify that it refers to a sensible block device.
306 */
307 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
308 if ((error = namei(ndp)) != 0)
309 return (error);
310 devvp = ndp->ni_vp;
311
312 if (devvp->v_type != VBLK) {
313 vrele(devvp);
314 return (ENOTBLK);
315 }
316 if (major(devvp->v_rdev) >= nblkdev) {
317 vrele(devvp);
318 return (ENXIO);
319 }
320
321 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
322
323 /*
324 * If mount by non-root, then verify that user has necessary
325 * permissions on the device.
326 */
327 if (p->p_ucred->cr_uid != 0) {
328 accessmode = VREAD;
329 if ((mp->mnt_flag & MNT_RDONLY) == 0)
330 accessmode |= VWRITE;
331 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
332 if (error) {
333 vput(devvp);
334 return (error);
335 }
336 }
337 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
338 error = ffs_mountfs(devvp, mp, p);
339 if (!error) {
340 ump = VFSTOUFS(mp);
341 fs = ump->um_fs;
342 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
343 (MNT_SOFTDEP | MNT_ASYNC)) {
344 printf("%s fs uses soft updates, "
345 "ignoring async mode\n",
346 fs->fs_fsmnt);
347 mp->mnt_flag &= ~MNT_ASYNC;
348 }
349 }
350 } else {
351 if (devvp != ump->um_devvp)
352 error = EINVAL; /* needs translation */
353 else
354 vrele(devvp);
355 }
356 if (error) {
357 vput(devvp);
358 return (error);
359 }
360
361 VOP_UNLOCK(devvp, 0);
362
363 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
364 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
365 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
366 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
367 &size);
368 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
369 if (mp->mnt_flag & MNT_SOFTDEP)
370 fs->fs_flags |= FS_DOSOFTDEP;
371 else
372 fs->fs_flags &= ~FS_DOSOFTDEP;
373 if (fs->fs_fmod != 0) { /* XXX */
374 fs->fs_fmod = 0;
375 if (fs->fs_clean & FS_WASCLEAN)
376 fs->fs_time = time.tv_sec;
377 else
378 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
379 mp->mnt_stat.f_mntfromname, fs->fs_clean);
380 (void) ffs_cgupdate(ump, MNT_WAIT);
381 }
382 return (0);
383 }
384
385 /*
386 * Reload all incore data for a filesystem (used after running fsck on
387 * the root filesystem and finding things to fix). The filesystem must
388 * be mounted read-only.
389 *
390 * Things to do to update the mount:
391 * 1) invalidate all cached meta-data.
392 * 2) re-read superblock from disk.
393 * 3) re-read summary information from disk.
394 * 4) invalidate all inactive vnodes.
395 * 5) invalidate all cached file data.
396 * 6) re-read inode data for all active vnodes.
397 */
398 int
399 ffs_reload(mountp, cred, p)
400 struct mount *mountp;
401 struct ucred *cred;
402 struct proc *p;
403 {
404 struct vnode *vp, *nvp, *devvp;
405 struct inode *ip;
406 void *space;
407 struct buf *bp;
408 struct fs *fs, *newfs;
409 struct partinfo dpart;
410 int i, blks, size, error;
411 int32_t *lp;
412 caddr_t cp;
413
414 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
415 return (EINVAL);
416 /*
417 * Step 1: invalidate all cached meta-data.
418 */
419 devvp = VFSTOUFS(mountp)->um_devvp;
420 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
421 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
422 VOP_UNLOCK(devvp, 0);
423 if (error)
424 panic("ffs_reload: dirty1");
425 /*
426 * Step 2: re-read superblock from disk.
427 */
428 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
429 size = DEV_BSIZE;
430 else
431 size = dpart.disklab->d_secsize;
432 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
433 if (error) {
434 brelse(bp);
435 return (error);
436 }
437 fs = VFSTOUFS(mountp)->um_fs;
438 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
439 memcpy(newfs, bp->b_data, fs->fs_sbsize);
440 #ifdef FFS_EI
441 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
442 ffs_sb_swap((struct fs*)bp->b_data, newfs);
443 fs->fs_flags |= FS_SWAPPED;
444 }
445 #endif
446 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
447 newfs->fs_bsize < sizeof(struct fs)) {
448 brelse(bp);
449 free(newfs, M_UFSMNT);
450 return (EIO); /* XXX needs translation */
451 }
452 /*
453 * Copy pointer fields back into superblock before copying in XXX
454 * new superblock. These should really be in the ufsmount. XXX
455 * Note that important parameters (eg fs_ncg) are unchanged.
456 */
457 newfs->fs_csp = fs->fs_csp;
458 newfs->fs_maxcluster = fs->fs_maxcluster;
459 newfs->fs_contigdirs = fs->fs_contigdirs;
460 newfs->fs_ronly = fs->fs_ronly;
461 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
462 if (fs->fs_sbsize < SBSIZE)
463 bp->b_flags |= B_INVAL;
464 brelse(bp);
465 free(newfs, M_UFSMNT);
466 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
467 ffs_oldfscompat(fs);
468 /* An old fsck may have zeroed these fields, so recheck them. */
469 if (fs->fs_avgfilesize <= 0)
470 fs->fs_avgfilesize = AVFILESIZ;
471 if (fs->fs_avgfpdir <= 0)
472 fs->fs_avgfpdir = AFPDIR;
473
474 ffs_statfs(mountp, &mountp->mnt_stat, p);
475 /*
476 * Step 3: re-read summary information from disk.
477 */
478 blks = howmany(fs->fs_cssize, fs->fs_fsize);
479 space = fs->fs_csp;
480 for (i = 0; i < blks; i += fs->fs_frag) {
481 size = fs->fs_bsize;
482 if (i + fs->fs_frag > blks)
483 size = (blks - i) * fs->fs_fsize;
484 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
485 NOCRED, &bp);
486 if (error) {
487 brelse(bp);
488 return (error);
489 }
490 #ifdef FFS_EI
491 if (UFS_FSNEEDSWAP(fs))
492 ffs_csum_swap((struct csum *)bp->b_data,
493 (struct csum *)space, size);
494 else
495 #endif
496 memcpy(space, bp->b_data, (size_t)size);
497 space = (char *)space + size;
498 brelse(bp);
499 }
500 if ((fs->fs_flags & FS_DOSOFTDEP))
501 softdep_mount(devvp, mountp, fs, cred);
502 /*
503 * We no longer know anything about clusters per cylinder group.
504 */
505 if (fs->fs_contigsumsize > 0) {
506 lp = fs->fs_maxcluster;
507 for (i = 0; i < fs->fs_ncg; i++)
508 *lp++ = fs->fs_contigsumsize;
509 }
510
511 loop:
512 simple_lock(&mntvnode_slock);
513 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
514 if (vp->v_mount != mountp) {
515 simple_unlock(&mntvnode_slock);
516 goto loop;
517 }
518 nvp = vp->v_mntvnodes.le_next;
519 /*
520 * Step 4: invalidate all inactive vnodes.
521 */
522 if (vrecycle(vp, &mntvnode_slock, p))
523 goto loop;
524 /*
525 * Step 5: invalidate all cached file data.
526 */
527 simple_lock(&vp->v_interlock);
528 simple_unlock(&mntvnode_slock);
529 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
530 goto loop;
531 if (vinvalbuf(vp, 0, cred, p, 0, 0))
532 panic("ffs_reload: dirty2");
533 /*
534 * Step 6: re-read inode data for all active vnodes.
535 */
536 ip = VTOI(vp);
537 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
538 (int)fs->fs_bsize, NOCRED, &bp);
539 if (error) {
540 brelse(bp);
541 vput(vp);
542 return (error);
543 }
544 cp = (caddr_t)bp->b_data +
545 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
546 #ifdef FFS_EI
547 if (UFS_FSNEEDSWAP(fs))
548 ffs_dinode_swap((struct dinode *)cp,
549 &ip->i_din.ffs_din);
550 else
551 #endif
552 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
553 ip->i_ffs_effnlink = ip->i_ffs_nlink;
554 brelse(bp);
555 vput(vp);
556 simple_lock(&mntvnode_slock);
557 }
558 simple_unlock(&mntvnode_slock);
559 return (0);
560 }
561
562 /*
563 * Common code for mount and mountroot
564 */
565 int
566 ffs_mountfs(devvp, mp, p)
567 struct vnode *devvp;
568 struct mount *mp;
569 struct proc *p;
570 {
571 struct ufsmount *ump;
572 struct buf *bp;
573 struct fs *fs;
574 dev_t dev;
575 struct partinfo dpart;
576 void *space;
577 int blks;
578 int error, i, size, ronly;
579 #ifdef FFS_EI
580 int needswap;
581 #endif
582 int32_t *lp;
583 struct ucred *cred;
584 u_int64_t maxfilesize; /* XXX */
585 u_int32_t sbsize;
586
587 dev = devvp->v_rdev;
588 cred = p ? p->p_ucred : NOCRED;
589 /*
590 * Disallow multiple mounts of the same device.
591 * Disallow mounting of a device that is currently in use
592 * (except for root, which might share swap device for miniroot).
593 * Flush out any old buffers remaining from a previous use.
594 */
595 if ((error = vfs_mountedon(devvp)) != 0)
596 return (error);
597 if (vcount(devvp) > 1 && devvp != rootvp)
598 return (EBUSY);
599 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
600 if (error)
601 return (error);
602
603 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
604 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p, NULL);
605 if (error)
606 return (error);
607 VOP_UNLOCK(devvp, 0);
608 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
609 size = DEV_BSIZE;
610 else
611 size = dpart.disklab->d_secsize;
612 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
613
614 bp = NULL;
615 ump = NULL;
616 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
617 if (error)
618 goto out;
619
620 fs = (struct fs*)bp->b_data;
621 if (fs->fs_magic == FS_MAGIC) {
622 sbsize = fs->fs_sbsize;
623 #ifdef FFS_EI
624 needswap = 0;
625 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
626 sbsize = bswap32(fs->fs_sbsize);
627 needswap = 1;
628 #endif
629 } else {
630 error = EINVAL;
631 goto out;
632 }
633 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
634 error = EINVAL;
635 goto out;
636 }
637
638 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
639 memcpy(fs, bp->b_data, sbsize);
640 #ifdef FFS_EI
641 if (needswap) {
642 ffs_sb_swap((struct fs*)bp->b_data, fs);
643 fs->fs_flags |= FS_SWAPPED;
644 }
645 #endif
646 ffs_oldfscompat(fs);
647
648 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
649 error = EINVAL;
650 goto out;
651 }
652 /* make sure cylinder group summary area is a reasonable size. */
653 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
654 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
655 fs->fs_cssize >
656 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
657 error = EINVAL; /* XXX needs translation */
658 goto out2;
659 }
660 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
661 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
662 error = EROFS; /* XXX what should be returned? */
663 goto out2;
664 }
665
666 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
667 memset((caddr_t)ump, 0, sizeof *ump);
668 ump->um_fs = fs;
669 if (fs->fs_sbsize < SBSIZE)
670 bp->b_flags |= B_INVAL;
671 brelse(bp);
672 bp = NULL;
673 fs->fs_ronly = ronly;
674 if (ronly == 0) {
675 fs->fs_clean <<= 1;
676 fs->fs_fmod = 1;
677 }
678 size = fs->fs_cssize;
679 blks = howmany(size, fs->fs_fsize);
680 if (fs->fs_contigsumsize > 0)
681 size += fs->fs_ncg * sizeof(int32_t);
682 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
683 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
684 fs->fs_csp = space;
685 for (i = 0; i < blks; i += fs->fs_frag) {
686 size = fs->fs_bsize;
687 if (i + fs->fs_frag > blks)
688 size = (blks - i) * fs->fs_fsize;
689 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
690 cred, &bp);
691 if (error) {
692 free(fs->fs_csp, M_UFSMNT);
693 goto out2;
694 }
695 #ifdef FFS_EI
696 if (needswap)
697 ffs_csum_swap((struct csum *)bp->b_data,
698 (struct csum *)space, size);
699 else
700 #endif
701 memcpy(space, bp->b_data, (u_int)size);
702
703 space = (char *)space + size;
704 brelse(bp);
705 bp = NULL;
706 }
707 if (fs->fs_contigsumsize > 0) {
708 fs->fs_maxcluster = lp = space;
709 for (i = 0; i < fs->fs_ncg; i++)
710 *lp++ = fs->fs_contigsumsize;
711 space = lp;
712 }
713 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
714 fs->fs_contigdirs = space;
715 space = (char *)space + size;
716 memset(fs->fs_contigdirs, 0, size);
717 /* Compatibility for old filesystems - XXX */
718 if (fs->fs_avgfilesize <= 0)
719 fs->fs_avgfilesize = AVFILESIZ;
720 if (fs->fs_avgfpdir <= 0)
721 fs->fs_avgfpdir = AFPDIR;
722 mp->mnt_data = (qaddr_t)ump;
723 mp->mnt_stat.f_fsid.val[0] = (long)dev;
724 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
725 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
726 mp->mnt_fs_bshift = fs->fs_bshift;
727 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
728 mp->mnt_flag |= MNT_LOCAL;
729 #ifdef FFS_EI
730 if (needswap)
731 ump->um_flags |= UFS_NEEDSWAP;
732 #endif
733 ump->um_mountp = mp;
734 ump->um_dev = dev;
735 ump->um_devvp = devvp;
736 ump->um_nindir = fs->fs_nindir;
737 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
738 ump->um_bptrtodb = fs->fs_fsbtodb;
739 ump->um_seqinc = fs->fs_frag;
740 for (i = 0; i < MAXQUOTAS; i++)
741 ump->um_quotas[i] = NULLVP;
742 devvp->v_specmountpoint = mp;
743 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
744 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
745 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
746 fs->fs_maxfilesize = maxfilesize; /* XXX */
747 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
748 error = softdep_mount(devvp, mp, fs, cred);
749 if (error) {
750 free(fs->fs_csp, M_UFSMNT);
751 goto out;
752 }
753 }
754 return (0);
755 out2:
756 free(fs, M_UFSMNT);
757 out:
758 devvp->v_specmountpoint = NULL;
759 if (bp)
760 brelse(bp);
761 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
762 if (ump) {
763 free(ump, M_UFSMNT);
764 mp->mnt_data = (qaddr_t)0;
765 }
766 return (error);
767 }
768
769 /*
770 * Sanity checks for old file systems.
771 *
772 * XXX - goes away some day.
773 */
774 int
775 ffs_oldfscompat(fs)
776 struct fs *fs;
777 {
778 int i;
779
780 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
781 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
782 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
783 fs->fs_nrpos = 8; /* XXX */
784 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
785 u_int64_t sizepb = fs->fs_bsize; /* XXX */
786 /* XXX */
787 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
788 for (i = 0; i < NIADDR; i++) { /* XXX */
789 sizepb *= NINDIR(fs); /* XXX */
790 fs->fs_maxfilesize += sizepb; /* XXX */
791 } /* XXX */
792 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
793 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
794 } /* XXX */
795 return (0);
796 }
797
798 /*
799 * unmount system call
800 */
801 int
802 ffs_unmount(mp, mntflags, p)
803 struct mount *mp;
804 int mntflags;
805 struct proc *p;
806 {
807 struct ufsmount *ump;
808 struct fs *fs;
809 int error, flags;
810
811 flags = 0;
812 if (mntflags & MNT_FORCE)
813 flags |= FORCECLOSE;
814 if (mp->mnt_flag & MNT_SOFTDEP) {
815 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
816 return (error);
817 } else {
818 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
819 return (error);
820 }
821 ump = VFSTOUFS(mp);
822 fs = ump->um_fs;
823 if (fs->fs_ronly == 0 &&
824 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
825 fs->fs_clean & FS_WASCLEAN) {
826 if (mp->mnt_flag & MNT_SOFTDEP)
827 fs->fs_flags &= ~FS_DOSOFTDEP;
828 fs->fs_clean = FS_ISCLEAN;
829 (void) ffs_sbupdate(ump, MNT_WAIT);
830 }
831 if (ump->um_devvp->v_type != VBAD)
832 ump->um_devvp->v_specmountpoint = NULL;
833 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
834 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
835 NOCRED, p);
836 vput(ump->um_devvp);
837 free(fs->fs_csp, M_UFSMNT);
838 free(fs, M_UFSMNT);
839 free(ump, M_UFSMNT);
840 mp->mnt_data = (qaddr_t)0;
841 mp->mnt_flag &= ~MNT_LOCAL;
842 return (error);
843 }
844
845 /*
846 * Flush out all the files in a filesystem.
847 */
848 int
849 ffs_flushfiles(mp, flags, p)
850 struct mount *mp;
851 int flags;
852 struct proc *p;
853 {
854 extern int doforce;
855 struct ufsmount *ump;
856 int error;
857
858 if (!doforce)
859 flags &= ~FORCECLOSE;
860 ump = VFSTOUFS(mp);
861 #ifdef QUOTA
862 if (mp->mnt_flag & MNT_QUOTA) {
863 int i;
864 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
865 return (error);
866 for (i = 0; i < MAXQUOTAS; i++) {
867 if (ump->um_quotas[i] == NULLVP)
868 continue;
869 quotaoff(p, mp, i);
870 }
871 /*
872 * Here we fall through to vflush again to ensure
873 * that we have gotten rid of all the system vnodes.
874 */
875 }
876 #endif
877 /*
878 * Flush all the files.
879 */
880 error = vflush(mp, NULLVP, flags);
881 if (error)
882 return (error);
883 /*
884 * Flush filesystem metadata.
885 */
886 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
887 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
888 VOP_UNLOCK(ump->um_devvp, 0);
889 return (error);
890 }
891
892 /*
893 * Get file system statistics.
894 */
895 int
896 ffs_statfs(mp, sbp, p)
897 struct mount *mp;
898 struct statfs *sbp;
899 struct proc *p;
900 {
901 struct ufsmount *ump;
902 struct fs *fs;
903
904 ump = VFSTOUFS(mp);
905 fs = ump->um_fs;
906 if (fs->fs_magic != FS_MAGIC)
907 panic("ffs_statfs");
908 #ifdef COMPAT_09
909 sbp->f_type = 1;
910 #else
911 sbp->f_type = 0;
912 #endif
913 sbp->f_bsize = fs->fs_fsize;
914 sbp->f_iosize = fs->fs_bsize;
915 sbp->f_blocks = fs->fs_dsize;
916 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
917 fs->fs_cstotal.cs_nffree;
918 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
919 (100 - fs->fs_minfree) / (u_int64_t) 100) -
920 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
921 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
922 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
923 if (sbp != &mp->mnt_stat) {
924 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
925 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
926 }
927 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
928 return (0);
929 }
930
931 /*
932 * Go through the disk queues to initiate sandbagged IO;
933 * go through the inodes to write those that have been modified;
934 * initiate the writing of the super block if it has been modified.
935 *
936 * Note: we are always called with the filesystem marked `MPBUSY'.
937 */
938 int
939 ffs_sync(mp, waitfor, cred, p)
940 struct mount *mp;
941 int waitfor;
942 struct ucred *cred;
943 struct proc *p;
944 {
945 struct vnode *vp, *nvp;
946 struct inode *ip;
947 struct ufsmount *ump = VFSTOUFS(mp);
948 struct fs *fs;
949 int error, allerror = 0;
950
951 fs = ump->um_fs;
952 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
953 printf("fs = %s\n", fs->fs_fsmnt);
954 panic("update: rofs mod");
955 }
956 /*
957 * Write back each (modified) inode.
958 */
959 simple_lock(&mntvnode_slock);
960 loop:
961 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
962 /*
963 * If the vnode that we are about to sync is no longer
964 * associated with this mount point, start over.
965 */
966 if (vp->v_mount != mp)
967 goto loop;
968 simple_lock(&vp->v_interlock);
969 nvp = LIST_NEXT(vp, v_mntvnodes);
970 ip = VTOI(vp);
971 if (vp->v_type == VNON ||
972 ((ip->i_flag &
973 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
974 LIST_EMPTY(&vp->v_dirtyblkhd) &&
975 vp->v_uvm.u_obj.uo_npages == 0))
976 {
977 simple_unlock(&vp->v_interlock);
978 continue;
979 }
980 simple_unlock(&mntvnode_slock);
981 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
982 if (error) {
983 simple_lock(&mntvnode_slock);
984 if (error == ENOENT)
985 goto loop;
986 continue;
987 }
988 if ((error = VOP_FSYNC(vp, cred,
989 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
990 allerror = error;
991 vput(vp);
992 simple_lock(&mntvnode_slock);
993 }
994 simple_unlock(&mntvnode_slock);
995 /*
996 * Force stale file system control information to be flushed.
997 */
998 if (waitfor != MNT_LAZY) {
999 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
1000 waitfor = MNT_NOWAIT;
1001 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1002 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1003 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1004 allerror = error;
1005 VOP_UNLOCK(ump->um_devvp, 0);
1006 }
1007 #ifdef QUOTA
1008 qsync(mp);
1009 #endif
1010 /*
1011 * Write back modified superblock.
1012 */
1013 if (fs->fs_fmod != 0) {
1014 fs->fs_fmod = 0;
1015 fs->fs_time = time.tv_sec;
1016 if ((error = ffs_cgupdate(ump, waitfor)))
1017 allerror = error;
1018 }
1019 return (allerror);
1020 }
1021
1022 /*
1023 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1024 * in from disk. If it is in core, wait for the lock bit to clear, then
1025 * return the inode locked. Detection and handling of mount points must be
1026 * done by the calling routine.
1027 */
1028 int
1029 ffs_vget(mp, ino, vpp)
1030 struct mount *mp;
1031 ino_t ino;
1032 struct vnode **vpp;
1033 {
1034 struct fs *fs;
1035 struct inode *ip;
1036 struct ufsmount *ump;
1037 struct buf *bp;
1038 struct vnode *vp;
1039 dev_t dev;
1040 int error;
1041 caddr_t cp;
1042
1043 ump = VFSTOUFS(mp);
1044 dev = ump->um_dev;
1045
1046 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1047 return (0);
1048
1049 /* Allocate a new vnode/inode. */
1050 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1051 *vpp = NULL;
1052 return (error);
1053 }
1054
1055 /*
1056 * If someone beat us to it while sleeping in getnewvnode(),
1057 * push back the freshly allocated vnode we don't need, and return.
1058 */
1059 do {
1060 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1061 ungetnewvnode(vp);
1062 return (0);
1063 }
1064 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1065
1066 /*
1067 * XXX MFS ends up here, too, to allocate an inode. Should we
1068 * XXX create another pool for MFS inodes?
1069 */
1070 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1071 memset((caddr_t)ip, 0, sizeof(struct inode));
1072 vp->v_data = ip;
1073 ip->i_vnode = vp;
1074 ip->i_fs = fs = ump->um_fs;
1075 ip->i_dev = dev;
1076 ip->i_number = ino;
1077 LIST_INIT(&ip->i_pcbufhd);
1078 #ifdef QUOTA
1079 {
1080 int i;
1081
1082 for (i = 0; i < MAXQUOTAS; i++)
1083 ip->i_dquot[i] = NODQUOT;
1084 }
1085 #endif
1086 /*
1087 * Put it onto its hash chain and lock it so that other requests for
1088 * this inode will block if they arrive while we are sleeping waiting
1089 * for old data structures to be purged or for the contents of the
1090 * disk portion of this inode to be read.
1091 */
1092 ufs_ihashins(ip);
1093 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1094
1095 /* Read in the disk contents for the inode, copy into the inode. */
1096 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1097 (int)fs->fs_bsize, NOCRED, &bp);
1098 if (error) {
1099 /*
1100 * The inode does not contain anything useful, so it would
1101 * be misleading to leave it on its hash chain. With mode
1102 * still zero, it will be unlinked and returned to the free
1103 * list by vput().
1104 */
1105 vput(vp);
1106 brelse(bp);
1107 *vpp = NULL;
1108 return (error);
1109 }
1110 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1111 #ifdef FFS_EI
1112 if (UFS_FSNEEDSWAP(fs))
1113 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1114 else
1115 #endif
1116 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1117 if (DOINGSOFTDEP(vp))
1118 softdep_load_inodeblock(ip);
1119 else
1120 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1121 brelse(bp);
1122
1123 /*
1124 * Initialize the vnode from the inode, check for aliases.
1125 * Note that the underlying vnode may have changed.
1126 */
1127 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1128 if (error) {
1129 vput(vp);
1130 *vpp = NULL;
1131 return (error);
1132 }
1133 /*
1134 * Finish inode initialization now that aliasing has been resolved.
1135 */
1136 ip->i_devvp = ump->um_devvp;
1137 VREF(ip->i_devvp);
1138 /*
1139 * Ensure that uid and gid are correct. This is a temporary
1140 * fix until fsck has been changed to do the update.
1141 */
1142 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1143 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1144 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1145 } /* XXX */
1146 uvm_vnp_setsize(vp, ip->i_ffs_size);
1147
1148 *vpp = vp;
1149 return (0);
1150 }
1151
1152 /*
1153 * File handle to vnode
1154 *
1155 * Have to be really careful about stale file handles:
1156 * - check that the inode number is valid
1157 * - call ffs_vget() to get the locked inode
1158 * - check for an unallocated inode (i_mode == 0)
1159 * - check that the given client host has export rights and return
1160 * those rights via. exflagsp and credanonp
1161 */
1162 int
1163 ffs_fhtovp(mp, fhp, vpp)
1164 struct mount *mp;
1165 struct fid *fhp;
1166 struct vnode **vpp;
1167 {
1168 struct ufid *ufhp;
1169 struct fs *fs;
1170
1171 ufhp = (struct ufid *)fhp;
1172 fs = VFSTOUFS(mp)->um_fs;
1173 if (ufhp->ufid_ino < ROOTINO ||
1174 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1175 return (ESTALE);
1176 return (ufs_fhtovp(mp, ufhp, vpp));
1177 }
1178
1179 /*
1180 * Vnode pointer to File handle
1181 */
1182 /* ARGSUSED */
1183 int
1184 ffs_vptofh(vp, fhp)
1185 struct vnode *vp;
1186 struct fid *fhp;
1187 {
1188 struct inode *ip;
1189 struct ufid *ufhp;
1190
1191 ip = VTOI(vp);
1192 ufhp = (struct ufid *)fhp;
1193 ufhp->ufid_len = sizeof(struct ufid);
1194 ufhp->ufid_ino = ip->i_number;
1195 ufhp->ufid_gen = ip->i_ffs_gen;
1196 return (0);
1197 }
1198
1199 void
1200 ffs_init()
1201 {
1202 if (ffs_initcount++ > 0)
1203 return;
1204
1205 softdep_initialize();
1206 ufs_init();
1207
1208 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1209 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1210 }
1211
1212 void
1213 ffs_done()
1214 {
1215 if (--ffs_initcount > 0)
1216 return;
1217
1218 /* XXX softdep cleanup ? */
1219 ufs_done();
1220 pool_destroy(&ffs_inode_pool);
1221 }
1222
1223 int
1224 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1225 int *name;
1226 u_int namelen;
1227 void *oldp;
1228 size_t *oldlenp;
1229 void *newp;
1230 size_t newlen;
1231 struct proc *p;
1232 {
1233 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1234 extern int ffs_log_changeopt;
1235
1236 /* all sysctl names at this level are terminal */
1237 if (namelen != 1)
1238 return (ENOTDIR); /* overloaded */
1239
1240 switch (name[0]) {
1241 case FFS_CLUSTERREAD:
1242 return (sysctl_int(oldp, oldlenp, newp, newlen,
1243 &doclusterread));
1244 case FFS_CLUSTERWRITE:
1245 return (sysctl_int(oldp, oldlenp, newp, newlen,
1246 &doclusterwrite));
1247 case FFS_REALLOCBLKS:
1248 return (sysctl_int(oldp, oldlenp, newp, newlen,
1249 &doreallocblks));
1250 case FFS_ASYNCFREE:
1251 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1252 case FFS_LOG_CHANGEOPT:
1253 return (sysctl_int(oldp, oldlenp, newp, newlen,
1254 &ffs_log_changeopt));
1255 default:
1256 return (EOPNOTSUPP);
1257 }
1258 /* NOTREACHED */
1259 }
1260
1261 /*
1262 * Write a superblock and associated information back to disk.
1263 */
1264 int
1265 ffs_sbupdate(mp, waitfor)
1266 struct ufsmount *mp;
1267 int waitfor;
1268 {
1269 struct fs *fs = mp->um_fs;
1270 struct buf *bp;
1271 int i, error = 0;
1272 int32_t saved_nrpos = fs->fs_nrpos;
1273 int64_t saved_qbmask = fs->fs_qbmask;
1274 int64_t saved_qfmask = fs->fs_qfmask;
1275 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1276 u_int8_t saveflag;
1277
1278 /* Restore compatibility to old file systems. XXX */
1279 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1280 fs->fs_nrpos = -1; /* XXX */
1281 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1282 int32_t *lp, tmp; /* XXX */
1283 /* XXX */
1284 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1285 tmp = lp[4]; /* XXX */
1286 for (i = 4; i > 0; i--) /* XXX */
1287 lp[i] = lp[i-1]; /* XXX */
1288 lp[0] = tmp; /* XXX */
1289 } /* XXX */
1290 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1291
1292 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1293 (int)fs->fs_sbsize, 0, 0);
1294 saveflag = fs->fs_flags & FS_INTERNAL;
1295 fs->fs_flags &= ~FS_INTERNAL;
1296 memcpy(bp->b_data, fs, fs->fs_sbsize);
1297 #ifdef FFS_EI
1298 if (mp->um_flags & UFS_NEEDSWAP)
1299 ffs_sb_swap(fs, (struct fs*)bp->b_data);
1300 #endif
1301
1302 fs->fs_flags |= saveflag;
1303 fs->fs_nrpos = saved_nrpos; /* XXX */
1304 fs->fs_qbmask = saved_qbmask; /* XXX */
1305 fs->fs_qfmask = saved_qfmask; /* XXX */
1306 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1307
1308 if (waitfor == MNT_WAIT)
1309 error = bwrite(bp);
1310 else
1311 bawrite(bp);
1312 return (error);
1313 }
1314
1315 int
1316 ffs_cgupdate(mp, waitfor)
1317 struct ufsmount *mp;
1318 int waitfor;
1319 {
1320 struct fs *fs = mp->um_fs;
1321 struct buf *bp;
1322 int blks;
1323 void *space;
1324 int i, size, error = 0, allerror = 0;
1325
1326 allerror = ffs_sbupdate(mp, waitfor);
1327 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1328 space = fs->fs_csp;
1329 for (i = 0; i < blks; i += fs->fs_frag) {
1330 size = fs->fs_bsize;
1331 if (i + fs->fs_frag > blks)
1332 size = (blks - i) * fs->fs_fsize;
1333 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1334 size, 0, 0);
1335 #ifdef FFS_EI
1336 if (mp->um_flags & UFS_NEEDSWAP)
1337 ffs_csum_swap((struct csum*)space,
1338 (struct csum*)bp->b_data, size);
1339 else
1340 #endif
1341 memcpy(bp->b_data, space, (u_int)size);
1342 space = (char *)space + size;
1343 if (waitfor == MNT_WAIT)
1344 error = bwrite(bp);
1345 else
1346 bawrite(bp);
1347 }
1348 if (!allerror && error)
1349 allerror = error;
1350 return (allerror);
1351 }
1352