ffs_vfsops.c revision 1.65 1 /* $NetBSD: ffs_vfsops.c,v 1.65 2000/06/15 22:35:37 fvdl Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL) && !defined(_LKM)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 /* how many times ffs_init() was called */
78 int ffs_initcount = 0;
79
80 extern struct lock ufs_hashlock;
81
82 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
83 extern struct vnodeopv_desc ffs_specop_opv_desc;
84 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
85
86 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
87 &ffs_vnodeop_opv_desc,
88 &ffs_specop_opv_desc,
89 &ffs_fifoop_opv_desc,
90 NULL,
91 };
92
93 struct vfsops ffs_vfsops = {
94 MOUNT_FFS,
95 ffs_mount,
96 ufs_start,
97 ffs_unmount,
98 ufs_root,
99 ufs_quotactl,
100 ffs_statfs,
101 ffs_sync,
102 ffs_vget,
103 ffs_fhtovp,
104 ffs_vptofh,
105 ffs_init,
106 ffs_done,
107 ffs_sysctl,
108 ffs_mountroot,
109 ufs_check_export,
110 ffs_vnodeopv_descs,
111 };
112
113 struct pool ffs_inode_pool;
114
115 /*
116 * Called by main() when ffs is going to be mounted as root.
117 */
118
119 int
120 ffs_mountroot()
121 {
122 struct fs *fs;
123 struct mount *mp;
124 struct proc *p = curproc; /* XXX */
125 struct ufsmount *ump;
126 int error;
127
128 if (root_device->dv_class != DV_DISK)
129 return (ENODEV);
130
131 /*
132 * Get vnodes for rootdev.
133 */
134 if (bdevvp(rootdev, &rootvp))
135 panic("ffs_mountroot: can't setup bdevvp's");
136
137 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
138 vrele(rootvp);
139 return (error);
140 }
141 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
142 mp->mnt_op->vfs_refcount--;
143 vfs_unbusy(mp);
144 free(mp, M_MOUNT);
145 vrele(rootvp);
146 return (error);
147 }
148 simple_lock(&mountlist_slock);
149 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
150 simple_unlock(&mountlist_slock);
151 ump = VFSTOUFS(mp);
152 fs = ump->um_fs;
153 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
154 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
155 (void)ffs_statfs(mp, &mp->mnt_stat, p);
156 vfs_unbusy(mp);
157 inittodr(fs->fs_time);
158 return (0);
159 }
160
161 /*
162 * VFS Operations.
163 *
164 * mount system call
165 */
166 int
167 ffs_mount(mp, path, data, ndp, p)
168 struct mount *mp;
169 const char *path;
170 void *data;
171 struct nameidata *ndp;
172 struct proc *p;
173 {
174 struct vnode *devvp;
175 struct ufs_args args;
176 struct ufsmount *ump = NULL;
177 struct fs *fs;
178 size_t size;
179 int error, flags;
180 mode_t accessmode;
181
182 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
183 if (error)
184 return (error);
185 /*
186 * If updating, check whether changing from read-only to
187 * read/write; if there is no device name, that's all we do.
188 */
189 if (mp->mnt_flag & MNT_UPDATE) {
190 ump = VFSTOUFS(mp);
191 fs = ump->um_fs;
192 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
193 flags = WRITECLOSE;
194 if (mp->mnt_flag & MNT_FORCE)
195 flags |= FORCECLOSE;
196 if (mp->mnt_flag & MNT_SOFTDEP)
197 error = softdep_flushfiles(mp, flags, p);
198 else
199 error = ffs_flushfiles(mp, flags, p);
200 if (error == 0 &&
201 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
202 fs->fs_clean & FS_WASCLEAN) {
203 if (mp->mnt_flag & MNT_SOFTDEP)
204 fs->fs_flags &= ~FS_DOSOFTDEP;
205 fs->fs_clean = FS_ISCLEAN;
206 (void) ffs_sbupdate(ump, MNT_WAIT);
207 }
208 if (error)
209 return (error);
210 fs->fs_ronly = 1;
211 }
212
213 /*
214 * Flush soft dependencies if disabling it via an update
215 * mount. This may leave some items to be processed,
216 * so don't do this yet XXX.
217 */
218 if ((fs->fs_flags & FS_DOSOFTDEP) &&
219 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
220 #ifdef notyet
221 flags = WRITECLOSE;
222 if (mp->mnt_flag & MNT_FORCE)
223 flags |= FORCECLOSE;
224 error = softdep_flushfiles(mp, flags, p);
225 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
226 fs->fs_flags &= ~FS_DOSOFTDEP;
227 (void) ffs_sbupdate(ump, MNT_WAIT);
228 #else
229 mp->mnt_flag |= MNT_SOFTDEP;
230 #endif
231 }
232
233 /*
234 * When upgrading to a softdep mount, we must first flush
235 * all vnodes. (not done yet -- see above)
236 */
237 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
238 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
239 #ifdef notyet
240 flags = WRITECLOSE;
241 if (mp->mnt_flag & MNT_FORCE)
242 flags |= FORCECLOSE;
243 error = ffs_flushfiles(mp, flags, p);
244 #else
245 mp->mnt_flag &= ~MNT_SOFTDEP;
246 #endif
247 }
248
249 if (mp->mnt_flag & MNT_RELOAD) {
250 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
251 if (error)
252 return (error);
253 }
254 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
255 /*
256 * If upgrade to read-write by non-root, then verify
257 * that user has necessary permissions on the device.
258 */
259 devvp = ump->um_devvp;
260 if (p->p_ucred->cr_uid != 0) {
261 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
262 error = VOP_ACCESS(devvp, VREAD | VWRITE,
263 p->p_ucred, p);
264 VOP_UNLOCK(devvp, 0);
265 if (error)
266 return (error);
267 }
268 fs->fs_ronly = 0;
269 fs->fs_clean <<= 1;
270 fs->fs_fmod = 1;
271 if ((fs->fs_flags & FS_DOSOFTDEP)) {
272 error = softdep_mount(devvp, mp, fs,
273 p->p_ucred);
274 if (error)
275 return (error);
276 }
277 }
278 if (args.fspec == 0) {
279 /*
280 * Process export requests.
281 */
282 return (vfs_export(mp, &ump->um_export, &args.export));
283 }
284 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
285 (MNT_SOFTDEP | MNT_ASYNC)) {
286 printf("%s fs uses soft updates, ignoring async mode\n",
287 fs->fs_fsmnt);
288 mp->mnt_flag &= ~MNT_ASYNC;
289 }
290 }
291 /*
292 * Not an update, or updating the name: look up the name
293 * and verify that it refers to a sensible block device.
294 */
295 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
296 if ((error = namei(ndp)) != 0)
297 return (error);
298 devvp = ndp->ni_vp;
299
300 if (devvp->v_type != VBLK) {
301 vrele(devvp);
302 return (ENOTBLK);
303 }
304 if (major(devvp->v_rdev) >= nblkdev) {
305 vrele(devvp);
306 return (ENXIO);
307 }
308 /*
309 * If mount by non-root, then verify that user has necessary
310 * permissions on the device.
311 */
312 if (p->p_ucred->cr_uid != 0) {
313 accessmode = VREAD;
314 if ((mp->mnt_flag & MNT_RDONLY) == 0)
315 accessmode |= VWRITE;
316 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
317 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
318 VOP_UNLOCK(devvp, 0);
319 if (error) {
320 vrele(devvp);
321 return (error);
322 }
323 }
324 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
325 error = ffs_mountfs(devvp, mp, p);
326 if (!error) {
327 ump = VFSTOUFS(mp);
328 fs = ump->um_fs;
329 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
330 (MNT_SOFTDEP | MNT_ASYNC)) {
331 printf("%s fs uses soft updates, "
332 "ignoring async mode\n",
333 fs->fs_fsmnt);
334 mp->mnt_flag &= ~MNT_ASYNC;
335 }
336 }
337 }
338 else {
339 if (devvp != ump->um_devvp)
340 error = EINVAL; /* needs translation */
341 else
342 vrele(devvp);
343 }
344 if (error) {
345 vrele(devvp);
346 return (error);
347 }
348 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
349 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
350 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
351 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
352 &size);
353 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
354 if (mp->mnt_flag & MNT_SOFTDEP)
355 fs->fs_flags |= FS_DOSOFTDEP;
356 if (fs->fs_fmod != 0) { /* XXX */
357 fs->fs_fmod = 0;
358 if (fs->fs_clean & FS_WASCLEAN)
359 fs->fs_time = time.tv_sec;
360 else
361 printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
362 mp->mnt_stat.f_mntfromname, fs->fs_clean);
363 (void) ffs_cgupdate(ump, MNT_WAIT);
364 }
365 return (0);
366 }
367
368 /*
369 * Reload all incore data for a filesystem (used after running fsck on
370 * the root filesystem and finding things to fix). The filesystem must
371 * be mounted read-only.
372 *
373 * Things to do to update the mount:
374 * 1) invalidate all cached meta-data.
375 * 2) re-read superblock from disk.
376 * 3) re-read summary information from disk.
377 * 4) invalidate all inactive vnodes.
378 * 5) invalidate all cached file data.
379 * 6) re-read inode data for all active vnodes.
380 */
381 int
382 ffs_reload(mountp, cred, p)
383 struct mount *mountp;
384 struct ucred *cred;
385 struct proc *p;
386 {
387 struct vnode *vp, *nvp, *devvp;
388 struct inode *ip;
389 struct buf *bp;
390 struct fs *fs, *newfs;
391 struct partinfo dpart;
392 int i, blks, size, error;
393 int32_t *lp;
394 caddr_t cp;
395
396 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
397 return (EINVAL);
398 /*
399 * Step 1: invalidate all cached meta-data.
400 */
401 devvp = VFSTOUFS(mountp)->um_devvp;
402 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
403 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
404 VOP_UNLOCK(devvp, 0);
405 if (error)
406 panic("ffs_reload: dirty1");
407 /*
408 * Step 2: re-read superblock from disk.
409 */
410 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
411 size = DEV_BSIZE;
412 else
413 size = dpart.disklab->d_secsize;
414 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
415 if (error) {
416 brelse(bp);
417 return (error);
418 }
419 fs = VFSTOUFS(mountp)->um_fs;
420 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
421 memcpy(newfs, bp->b_data, fs->fs_sbsize);
422 #ifdef FFS_EI
423 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
424 ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
425 fs->fs_flags |= FS_SWAPPED;
426 }
427 #endif
428 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
429 newfs->fs_bsize < sizeof(struct fs)) {
430 brelse(bp);
431 free(newfs, M_UFSMNT);
432 return (EIO); /* XXX needs translation */
433 }
434 /*
435 * Copy pointer fields back into superblock before copying in XXX
436 * new superblock. These should really be in the ufsmount. XXX
437 * Note that important parameters (eg fs_ncg) are unchanged.
438 */
439 memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
440 newfs->fs_maxcluster = fs->fs_maxcluster;
441 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
442 if (fs->fs_sbsize < SBSIZE)
443 bp->b_flags |= B_INVAL;
444 brelse(bp);
445 free(newfs, M_UFSMNT);
446 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
447 ffs_oldfscompat(fs);
448 ffs_statfs(mountp, &mountp->mnt_stat, p);
449 /*
450 * Step 3: re-read summary information from disk.
451 */
452 blks = howmany(fs->fs_cssize, fs->fs_fsize);
453 for (i = 0; i < blks; i += fs->fs_frag) {
454 size = fs->fs_bsize;
455 if (i + fs->fs_frag > blks)
456 size = (blks - i) * fs->fs_fsize;
457 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
458 NOCRED, &bp);
459 if (error) {
460 brelse(bp);
461 return (error);
462 }
463 #ifdef FFS_EI
464 if (UFS_FSNEEDSWAP(fs))
465 ffs_csum_swap((struct csum*)bp->b_data,
466 (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
467 else
468 #endif
469 memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
470 (size_t)size);
471 brelse(bp);
472 }
473 if ((fs->fs_flags & FS_DOSOFTDEP))
474 softdep_mount(devvp, mountp, fs, cred);
475 /*
476 * We no longer know anything about clusters per cylinder group.
477 */
478 if (fs->fs_contigsumsize > 0) {
479 lp = fs->fs_maxcluster;
480 for (i = 0; i < fs->fs_ncg; i++)
481 *lp++ = fs->fs_contigsumsize;
482 }
483
484 loop:
485 simple_lock(&mntvnode_slock);
486 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
487 if (vp->v_mount != mountp) {
488 simple_unlock(&mntvnode_slock);
489 goto loop;
490 }
491 nvp = vp->v_mntvnodes.le_next;
492 /*
493 * Step 4: invalidate all inactive vnodes.
494 */
495 if (vrecycle(vp, &mntvnode_slock, p))
496 goto loop;
497 /*
498 * Step 5: invalidate all cached file data.
499 */
500 simple_lock(&vp->v_interlock);
501 simple_unlock(&mntvnode_slock);
502 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
503 goto loop;
504 if (vinvalbuf(vp, 0, cred, p, 0, 0))
505 panic("ffs_reload: dirty2");
506 /*
507 * Step 6: re-read inode data for all active vnodes.
508 */
509 ip = VTOI(vp);
510 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
511 (int)fs->fs_bsize, NOCRED, &bp);
512 if (error) {
513 brelse(bp);
514 vput(vp);
515 return (error);
516 }
517 cp = (caddr_t)bp->b_data +
518 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
519 #ifdef FFS_EI
520 if (UFS_FSNEEDSWAP(fs))
521 ffs_dinode_swap((struct dinode *)cp,
522 &ip->i_din.ffs_din);
523 else
524 #endif
525 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
526 ip->i_ffs_effnlink = ip->i_ffs_nlink;
527 brelse(bp);
528 vput(vp);
529 simple_lock(&mntvnode_slock);
530 }
531 simple_unlock(&mntvnode_slock);
532 return (0);
533 }
534
535 /*
536 * Common code for mount and mountroot
537 */
538 int
539 ffs_mountfs(devvp, mp, p)
540 struct vnode *devvp;
541 struct mount *mp;
542 struct proc *p;
543 {
544 struct ufsmount *ump;
545 struct buf *bp;
546 struct fs *fs;
547 dev_t dev;
548 struct partinfo dpart;
549 caddr_t base, space;
550 int blks;
551 int error, i, size, ronly;
552 #ifdef FFS_EI
553 int needswap;
554 #endif
555 int32_t *lp;
556 struct ucred *cred;
557 u_int64_t maxfilesize; /* XXX */
558 u_int32_t sbsize;
559
560 dev = devvp->v_rdev;
561 cred = p ? p->p_ucred : NOCRED;
562 /*
563 * Disallow multiple mounts of the same device.
564 * Disallow mounting of a device that is currently in use
565 * (except for root, which might share swap device for miniroot).
566 * Flush out any old buffers remaining from a previous use.
567 */
568 if ((error = vfs_mountedon(devvp)) != 0)
569 return (error);
570 if (vcount(devvp) > 1 && devvp != rootvp)
571 return (EBUSY);
572 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
573 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
574 VOP_UNLOCK(devvp, 0);
575 if (error)
576 return (error);
577
578 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
579 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
580 if (error)
581 return (error);
582 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
583 size = DEV_BSIZE;
584 else
585 size = dpart.disklab->d_secsize;
586
587 bp = NULL;
588 ump = NULL;
589 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
590 if (error)
591 goto out;
592
593 fs = (struct fs*)bp->b_data;
594 if (fs->fs_magic == FS_MAGIC) {
595 sbsize = fs->fs_sbsize;
596 #ifdef FFS_EI
597 needswap = 0;
598 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
599 sbsize = bswap32(fs->fs_sbsize);
600 needswap = 1;
601 #endif
602 } else {
603 error = EINVAL;
604 goto out;
605 }
606 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
607 error = EINVAL;
608 goto out;
609 }
610
611 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
612 memcpy(fs, bp->b_data, sbsize);
613 #ifdef FFS_EI
614 if (needswap) {
615 ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
616 fs->fs_flags |= FS_SWAPPED;
617 }
618 #endif
619 ffs_oldfscompat(fs);
620
621 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
622 error = EINVAL;
623 goto out;
624 }
625 /* make sure cylinder group summary area is a reasonable size. */
626 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
627 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
628 fs->fs_cssize >
629 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
630 error = EINVAL; /* XXX needs translation */
631 goto out2;
632 }
633 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
634 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
635 error = EROFS; /* XXX what should be returned? */
636 goto out2;
637 }
638
639 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
640 memset((caddr_t)ump, 0, sizeof *ump);
641 ump->um_fs = fs;
642 if (fs->fs_sbsize < SBSIZE)
643 bp->b_flags |= B_INVAL;
644 brelse(bp);
645 bp = NULL;
646 fs->fs_ronly = ronly;
647 if (ronly == 0) {
648 fs->fs_clean <<= 1;
649 fs->fs_fmod = 1;
650 }
651 size = fs->fs_cssize;
652 blks = howmany(size, fs->fs_fsize);
653 if (fs->fs_contigsumsize > 0)
654 size += fs->fs_ncg * sizeof(int32_t);
655 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
656 for (i = 0; i < blks; i += fs->fs_frag) {
657 size = fs->fs_bsize;
658 if (i + fs->fs_frag > blks)
659 size = (blks - i) * fs->fs_fsize;
660 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
661 cred, &bp);
662 if (error) {
663 free(base, M_UFSMNT);
664 goto out2;
665 }
666 #ifdef FFS_EI
667 if (needswap)
668 ffs_csum_swap((struct csum*)bp->b_data,
669 (struct csum*)space, size);
670 else
671 #endif
672 memcpy(space, bp->b_data, (u_int)size);
673
674 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
675 space += size;
676 brelse(bp);
677 bp = NULL;
678 }
679 if (fs->fs_contigsumsize > 0) {
680 fs->fs_maxcluster = lp = (int32_t *)space;
681 for (i = 0; i < fs->fs_ncg; i++)
682 *lp++ = fs->fs_contigsumsize;
683 }
684 mp->mnt_data = (qaddr_t)ump;
685 mp->mnt_stat.f_fsid.val[0] = (long)dev;
686 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
687 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
688 mp->mnt_flag |= MNT_LOCAL;
689 #ifdef FFS_EI
690 if (needswap)
691 ump->um_flags |= UFS_NEEDSWAP;
692 #endif
693 ump->um_mountp = mp;
694 ump->um_dev = dev;
695 ump->um_devvp = devvp;
696 ump->um_nindir = fs->fs_nindir;
697 ump->um_bptrtodb = fs->fs_fsbtodb;
698 ump->um_seqinc = fs->fs_frag;
699 for (i = 0; i < MAXQUOTAS; i++)
700 ump->um_quotas[i] = NULLVP;
701 devvp->v_specmountpoint = mp;
702 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
703 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
704 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
705 fs->fs_maxfilesize = maxfilesize; /* XXX */
706 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
707 error = softdep_mount(devvp, mp, fs, cred);
708 if (error) {
709 free(base, M_UFSMNT);
710 goto out;
711 }
712 }
713 return (0);
714 out2:
715 free(fs, M_UFSMNT);
716 out:
717 devvp->v_specmountpoint = NULL;
718 if (bp)
719 brelse(bp);
720 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
721 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
722 VOP_UNLOCK(devvp, 0);
723 if (ump) {
724 free(ump, M_UFSMNT);
725 mp->mnt_data = (qaddr_t)0;
726 }
727 return (error);
728 }
729
730 /*
731 * Sanity checks for old file systems.
732 *
733 * XXX - goes away some day.
734 */
735 int
736 ffs_oldfscompat(fs)
737 struct fs *fs;
738 {
739 int i;
740
741 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
742 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
743 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
744 fs->fs_nrpos = 8; /* XXX */
745 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
746 u_int64_t sizepb = fs->fs_bsize; /* XXX */
747 /* XXX */
748 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
749 for (i = 0; i < NIADDR; i++) { /* XXX */
750 sizepb *= NINDIR(fs); /* XXX */
751 fs->fs_maxfilesize += sizepb; /* XXX */
752 } /* XXX */
753 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
754 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
755 } /* XXX */
756 return (0);
757 }
758
759 /*
760 * unmount system call
761 */
762 int
763 ffs_unmount(mp, mntflags, p)
764 struct mount *mp;
765 int mntflags;
766 struct proc *p;
767 {
768 struct ufsmount *ump;
769 struct fs *fs;
770 int error, flags;
771
772 flags = 0;
773 if (mntflags & MNT_FORCE)
774 flags |= FORCECLOSE;
775 if (mp->mnt_flag & MNT_SOFTDEP) {
776 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
777 return (error);
778 } else {
779 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
780 return (error);
781 }
782 ump = VFSTOUFS(mp);
783 fs = ump->um_fs;
784 if (fs->fs_ronly == 0 &&
785 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
786 fs->fs_clean & FS_WASCLEAN) {
787 if (mp->mnt_flag & MNT_SOFTDEP)
788 fs->fs_flags &= ~FS_DOSOFTDEP;
789 fs->fs_clean = FS_ISCLEAN;
790 (void) ffs_sbupdate(ump, MNT_WAIT);
791 }
792 if (ump->um_devvp->v_type != VBAD)
793 ump->um_devvp->v_specmountpoint = NULL;
794 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
795 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
796 NOCRED, p);
797 vput(ump->um_devvp);
798 free(fs->fs_csp[0], M_UFSMNT);
799 free(fs, M_UFSMNT);
800 free(ump, M_UFSMNT);
801 mp->mnt_data = (qaddr_t)0;
802 mp->mnt_flag &= ~MNT_LOCAL;
803 return (error);
804 }
805
806 /*
807 * Flush out all the files in a filesystem.
808 */
809 int
810 ffs_flushfiles(mp, flags, p)
811 struct mount *mp;
812 int flags;
813 struct proc *p;
814 {
815 extern int doforce;
816 struct ufsmount *ump;
817 int error;
818
819 if (!doforce)
820 flags &= ~FORCECLOSE;
821 ump = VFSTOUFS(mp);
822 #ifdef QUOTA
823 if (mp->mnt_flag & MNT_QUOTA) {
824 int i;
825 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
826 return (error);
827 for (i = 0; i < MAXQUOTAS; i++) {
828 if (ump->um_quotas[i] == NULLVP)
829 continue;
830 quotaoff(p, mp, i);
831 }
832 /*
833 * Here we fall through to vflush again to ensure
834 * that we have gotten rid of all the system vnodes.
835 */
836 }
837 #endif
838 /*
839 * Flush all the files.
840 */
841 error = vflush(mp, NULLVP, flags);
842 if (error)
843 return (error);
844 /*
845 * Flush filesystem metadata.
846 */
847 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
848 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, p);
849 VOP_UNLOCK(ump->um_devvp, 0);
850 return (error);
851 }
852
853 /*
854 * Get file system statistics.
855 */
856 int
857 ffs_statfs(mp, sbp, p)
858 struct mount *mp;
859 struct statfs *sbp;
860 struct proc *p;
861 {
862 struct ufsmount *ump;
863 struct fs *fs;
864
865 ump = VFSTOUFS(mp);
866 fs = ump->um_fs;
867 if (fs->fs_magic != FS_MAGIC)
868 panic("ffs_statfs");
869 #ifdef COMPAT_09
870 sbp->f_type = 1;
871 #else
872 sbp->f_type = 0;
873 #endif
874 sbp->f_bsize = fs->fs_fsize;
875 sbp->f_iosize = fs->fs_bsize;
876 sbp->f_blocks = fs->fs_dsize;
877 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
878 fs->fs_cstotal.cs_nffree;
879 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
880 (100 - fs->fs_minfree) / (u_int64_t) 100) -
881 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
882 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
883 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
884 if (sbp != &mp->mnt_stat) {
885 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
886 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
887 }
888 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
889 return (0);
890 }
891
892 /*
893 * Go through the disk queues to initiate sandbagged IO;
894 * go through the inodes to write those that have been modified;
895 * initiate the writing of the super block if it has been modified.
896 *
897 * Note: we are always called with the filesystem marked `MPBUSY'.
898 */
899 int
900 ffs_sync(mp, waitfor, cred, p)
901 struct mount *mp;
902 int waitfor;
903 struct ucred *cred;
904 struct proc *p;
905 {
906 struct vnode *vp, *nvp;
907 struct inode *ip;
908 struct ufsmount *ump = VFSTOUFS(mp);
909 struct fs *fs;
910 int error, allerror = 0;
911
912 fs = ump->um_fs;
913 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
914 printf("fs = %s\n", fs->fs_fsmnt);
915 panic("update: rofs mod");
916 }
917 /*
918 * Write back each (modified) inode.
919 */
920 simple_lock(&mntvnode_slock);
921 loop:
922 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
923 /*
924 * If the vnode that we are about to sync is no longer
925 * associated with this mount point, start over.
926 */
927 if (vp->v_mount != mp)
928 goto loop;
929 simple_lock(&vp->v_interlock);
930 nvp = LIST_NEXT(vp, v_mntvnodes);
931 ip = VTOI(vp);
932 if (vp->v_type == VNON ||
933 ((ip->i_flag &
934 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
935 LIST_EMPTY(&vp->v_dirtyblkhd)))
936 {
937 simple_unlock(&vp->v_interlock);
938 continue;
939 }
940 simple_unlock(&mntvnode_slock);
941 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
942 if (error) {
943 simple_lock(&mntvnode_slock);
944 if (error == ENOENT)
945 goto loop;
946 continue;
947 }
948 if ((error = VOP_FSYNC(vp, cred,
949 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
950 allerror = error;
951 vput(vp);
952 simple_lock(&mntvnode_slock);
953 }
954 simple_unlock(&mntvnode_slock);
955 /*
956 * Force stale file system control information to be flushed.
957 */
958 if (waitfor != MNT_LAZY) {
959 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
960 waitfor = MNT_NOWAIT;
961 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
962 if ((error = VOP_FSYNC(ump->um_devvp, cred,
963 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
964 allerror = error;
965 VOP_UNLOCK(ump->um_devvp, 0);
966 }
967 #ifdef QUOTA
968 qsync(mp);
969 #endif
970 /*
971 * Write back modified superblock.
972 */
973 if (fs->fs_fmod != 0) {
974 fs->fs_fmod = 0;
975 fs->fs_time = time.tv_sec;
976 if ((error = ffs_cgupdate(ump, waitfor)))
977 allerror = error;
978 }
979 return (allerror);
980 }
981
982 /*
983 * Look up a FFS dinode number to find its incore vnode, otherwise read it
984 * in from disk. If it is in core, wait for the lock bit to clear, then
985 * return the inode locked. Detection and handling of mount points must be
986 * done by the calling routine.
987 */
988 int
989 ffs_vget(mp, ino, vpp)
990 struct mount *mp;
991 ino_t ino;
992 struct vnode **vpp;
993 {
994 struct fs *fs;
995 struct inode *ip;
996 struct ufsmount *ump;
997 struct buf *bp;
998 struct vnode *vp;
999 dev_t dev;
1000 int error;
1001 caddr_t cp;
1002
1003 ump = VFSTOUFS(mp);
1004 dev = ump->um_dev;
1005 do {
1006 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1007 return (0);
1008 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1009
1010 /* Allocate a new vnode/inode. */
1011 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1012 *vpp = NULL;
1013 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1014 return (error);
1015 }
1016 /*
1017 * XXX MFS ends up here, too, to allocate an inode. Should we
1018 * XXX create another pool for MFS inodes?
1019 */
1020 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1021 memset((caddr_t)ip, 0, sizeof(struct inode));
1022 vp->v_data = ip;
1023 ip->i_vnode = vp;
1024 ip->i_fs = fs = ump->um_fs;
1025 ip->i_dev = dev;
1026 ip->i_number = ino;
1027 #ifdef QUOTA
1028 {
1029 int i;
1030
1031 for (i = 0; i < MAXQUOTAS; i++)
1032 ip->i_dquot[i] = NODQUOT;
1033 }
1034 #endif
1035 /*
1036 * Put it onto its hash chain and lock it so that other requests for
1037 * this inode will block if they arrive while we are sleeping waiting
1038 * for old data structures to be purged or for the contents of the
1039 * disk portion of this inode to be read.
1040 */
1041 ufs_ihashins(ip);
1042 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1043
1044 /* Read in the disk contents for the inode, copy into the inode. */
1045 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1046 (int)fs->fs_bsize, NOCRED, &bp);
1047 if (error) {
1048 /*
1049 * The inode does not contain anything useful, so it would
1050 * be misleading to leave it on its hash chain. With mode
1051 * still zero, it will be unlinked and returned to the free
1052 * list by vput().
1053 */
1054 vput(vp);
1055 brelse(bp);
1056 *vpp = NULL;
1057 return (error);
1058 }
1059 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1060 #ifdef FFS_EI
1061 if (UFS_FSNEEDSWAP(fs))
1062 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1063 else
1064 #endif
1065 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1066 if (DOINGSOFTDEP(vp))
1067 softdep_load_inodeblock(ip);
1068 else
1069 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1070 brelse(bp);
1071
1072 /*
1073 * Initialize the vnode from the inode, check for aliases.
1074 * Note that the underlying vnode may have changed.
1075 */
1076 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1077 if (error) {
1078 vput(vp);
1079 *vpp = NULL;
1080 return (error);
1081 }
1082 /*
1083 * Finish inode initialization now that aliasing has been resolved.
1084 */
1085 ip->i_devvp = ump->um_devvp;
1086 VREF(ip->i_devvp);
1087 /*
1088 * Ensure that uid and gid are correct. This is a temporary
1089 * fix until fsck has been changed to do the update.
1090 */
1091 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1092 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1093 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1094 } /* XXX */
1095
1096 *vpp = vp;
1097 return (0);
1098 }
1099
1100 /*
1101 * File handle to vnode
1102 *
1103 * Have to be really careful about stale file handles:
1104 * - check that the inode number is valid
1105 * - call ffs_vget() to get the locked inode
1106 * - check for an unallocated inode (i_mode == 0)
1107 * - check that the given client host has export rights and return
1108 * those rights via. exflagsp and credanonp
1109 */
1110 int
1111 ffs_fhtovp(mp, fhp, vpp)
1112 struct mount *mp;
1113 struct fid *fhp;
1114 struct vnode **vpp;
1115 {
1116 struct ufid *ufhp;
1117 struct fs *fs;
1118
1119 ufhp = (struct ufid *)fhp;
1120 fs = VFSTOUFS(mp)->um_fs;
1121 if (ufhp->ufid_ino < ROOTINO ||
1122 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1123 return (ESTALE);
1124 return (ufs_fhtovp(mp, ufhp, vpp));
1125 }
1126
1127 /*
1128 * Vnode pointer to File handle
1129 */
1130 /* ARGSUSED */
1131 int
1132 ffs_vptofh(vp, fhp)
1133 struct vnode *vp;
1134 struct fid *fhp;
1135 {
1136 struct inode *ip;
1137 struct ufid *ufhp;
1138
1139 ip = VTOI(vp);
1140 ufhp = (struct ufid *)fhp;
1141 ufhp->ufid_len = sizeof(struct ufid);
1142 ufhp->ufid_ino = ip->i_number;
1143 ufhp->ufid_gen = ip->i_ffs_gen;
1144 return (0);
1145 }
1146
1147 void
1148 ffs_init()
1149 {
1150 if (ffs_initcount++ > 0)
1151 return;
1152
1153 softdep_initialize();
1154 ufs_init();
1155
1156 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1157 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1158 }
1159
1160 void
1161 ffs_done()
1162 {
1163 if (--ffs_initcount > 0)
1164 return;
1165
1166 /* XXX softdep cleanup ? */
1167 ufs_done();
1168 pool_destroy(&ffs_inode_pool);
1169 }
1170
1171 int
1172 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1173 int *name;
1174 u_int namelen;
1175 void *oldp;
1176 size_t *oldlenp;
1177 void *newp;
1178 size_t newlen;
1179 struct proc *p;
1180 {
1181 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1182 extern int ffs_log_changeopt;
1183
1184 /* all sysctl names at this level are terminal */
1185 if (namelen != 1)
1186 return (ENOTDIR); /* overloaded */
1187
1188 switch (name[0]) {
1189 case FFS_CLUSTERREAD:
1190 return (sysctl_int(oldp, oldlenp, newp, newlen,
1191 &doclusterread));
1192 case FFS_CLUSTERWRITE:
1193 return (sysctl_int(oldp, oldlenp, newp, newlen,
1194 &doclusterwrite));
1195 case FFS_REALLOCBLKS:
1196 return (sysctl_int(oldp, oldlenp, newp, newlen,
1197 &doreallocblks));
1198 case FFS_ASYNCFREE:
1199 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1200 case FFS_LOG_CHANGEOPT:
1201 return (sysctl_int(oldp, oldlenp, newp, newlen,
1202 &ffs_log_changeopt));
1203 default:
1204 return (EOPNOTSUPP);
1205 }
1206 /* NOTREACHED */
1207 }
1208
1209 /*
1210 * Write a superblock and associated information back to disk.
1211 */
1212 int
1213 ffs_sbupdate(mp, waitfor)
1214 struct ufsmount *mp;
1215 int waitfor;
1216 {
1217 struct fs *fs = mp->um_fs;
1218 struct buf *bp;
1219 int i, error = 0;
1220 int32_t saved_nrpos = fs->fs_nrpos;
1221 int64_t saved_qbmask = fs->fs_qbmask;
1222 int64_t saved_qfmask = fs->fs_qfmask;
1223 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1224 u_int8_t saveflag;
1225
1226 /* Restore compatibility to old file systems. XXX */
1227 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1228 fs->fs_nrpos = -1; /* XXX */
1229 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1230 int32_t *lp, tmp; /* XXX */
1231 /* XXX */
1232 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1233 tmp = lp[4]; /* XXX */
1234 for (i = 4; i > 0; i--) /* XXX */
1235 lp[i] = lp[i-1]; /* XXX */
1236 lp[0] = tmp; /* XXX */
1237 } /* XXX */
1238 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1239
1240 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1241 (int)fs->fs_sbsize, 0, 0);
1242 saveflag = fs->fs_flags & FS_INTERNAL;
1243 fs->fs_flags &= ~FS_INTERNAL;
1244 memcpy(bp->b_data, fs, fs->fs_sbsize);
1245 #ifdef FFS_EI
1246 if (mp->um_flags & UFS_NEEDSWAP)
1247 ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
1248 #endif
1249
1250 fs->fs_flags |= saveflag;
1251 fs->fs_nrpos = saved_nrpos; /* XXX */
1252 fs->fs_qbmask = saved_qbmask; /* XXX */
1253 fs->fs_qfmask = saved_qfmask; /* XXX */
1254 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1255
1256 if (waitfor == MNT_WAIT)
1257 error = bwrite(bp);
1258 else
1259 bawrite(bp);
1260 return (error);
1261 }
1262
1263 int
1264 ffs_cgupdate(mp, waitfor)
1265 struct ufsmount *mp;
1266 int waitfor;
1267 {
1268 struct fs *fs = mp->um_fs;
1269 struct buf *bp;
1270 int blks;
1271 caddr_t space;
1272 int i, size, error = 0, allerror = 0;
1273
1274 allerror = ffs_sbupdate(mp, waitfor);
1275 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1276 space = (caddr_t)fs->fs_csp[0];
1277 for (i = 0; i < blks; i += fs->fs_frag) {
1278 size = fs->fs_bsize;
1279 if (i + fs->fs_frag > blks)
1280 size = (blks - i) * fs->fs_fsize;
1281 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1282 size, 0, 0);
1283 #ifdef FFS_EI
1284 if (mp->um_flags & UFS_NEEDSWAP)
1285 ffs_csum_swap((struct csum*)space,
1286 (struct csum*)bp->b_data, size);
1287 else
1288 #endif
1289 memcpy(bp->b_data, space, (u_int)size);
1290 space += size;
1291 if (waitfor == MNT_WAIT)
1292 error = bwrite(bp);
1293 else
1294 bawrite(bp);
1295 }
1296 if (!allerror && error)
1297 allerror = error;
1298 return (allerror);
1299 }
1300