ffs_vfsops.c revision 1.94 1 /* $NetBSD: ffs_vfsops.c,v 1.94 2002/03/17 00:02:34 chs Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.94 2002/03/17 00:02:34 chs Exp $");
40
41 #if defined(_KERNEL_OPT)
42 #include "opt_ffs.h"
43 #include "opt_quota.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_softdep.h"
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/kernel.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
55 #include <sys/mount.h>
56 #include <sys/buf.h>
57 #include <sys/device.h>
58 #include <sys/mbuf.h>
59 #include <sys/file.h>
60 #include <sys/disklabel.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/malloc.h>
64 #include <sys/pool.h>
65 #include <sys/lock.h>
66 #include <sys/sysctl.h>
67
68 #include <miscfs/specfs/specdev.h>
69
70 #include <ufs/ufs/quota.h>
71 #include <ufs/ufs/ufsmount.h>
72 #include <ufs/ufs/inode.h>
73 #include <ufs/ufs/dir.h>
74 #include <ufs/ufs/ufs_extern.h>
75 #include <ufs/ufs/ufs_bswap.h>
76
77 #include <ufs/ffs/fs.h>
78 #include <ufs/ffs/ffs_extern.h>
79
80 /* how many times ffs_init() was called */
81 int ffs_initcount = 0;
82
83 extern struct lock ufs_hashlock;
84
85 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
86 extern struct vnodeopv_desc ffs_specop_opv_desc;
87 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
88
89 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
90 &ffs_vnodeop_opv_desc,
91 &ffs_specop_opv_desc,
92 &ffs_fifoop_opv_desc,
93 NULL,
94 };
95
96 struct vfsops ffs_vfsops = {
97 MOUNT_FFS,
98 ffs_mount,
99 ufs_start,
100 ffs_unmount,
101 ufs_root,
102 ufs_quotactl,
103 ffs_statfs,
104 ffs_sync,
105 ffs_vget,
106 ffs_fhtovp,
107 ffs_vptofh,
108 ffs_init,
109 ffs_reinit,
110 ffs_done,
111 ffs_sysctl,
112 ffs_mountroot,
113 ufs_check_export,
114 ffs_vnodeopv_descs,
115 };
116
117 struct genfs_ops ffs_genfsops = {
118 ffs_gop_size,
119 ffs_gop_alloc,
120 genfs_gop_write,
121 };
122
123 struct pool ffs_inode_pool;
124
125 /*
126 * Called by main() when ffs is going to be mounted as root.
127 */
128
129 int
130 ffs_mountroot()
131 {
132 struct fs *fs;
133 struct mount *mp;
134 struct proc *p = curproc; /* XXX */
135 struct ufsmount *ump;
136 int error;
137
138 if (root_device->dv_class != DV_DISK)
139 return (ENODEV);
140
141 /*
142 * Get vnodes for rootdev.
143 */
144 if (bdevvp(rootdev, &rootvp))
145 panic("ffs_mountroot: can't setup bdevvp's");
146
147 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
148 vrele(rootvp);
149 return (error);
150 }
151 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
152 mp->mnt_op->vfs_refcount--;
153 vfs_unbusy(mp);
154 free(mp, M_MOUNT);
155 vrele(rootvp);
156 return (error);
157 }
158 simple_lock(&mountlist_slock);
159 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
160 simple_unlock(&mountlist_slock);
161 ump = VFSTOUFS(mp);
162 fs = ump->um_fs;
163 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
164 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
165 (void)ffs_statfs(mp, &mp->mnt_stat, p);
166 vfs_unbusy(mp);
167 inittodr(fs->fs_time);
168 return (0);
169 }
170
171 /*
172 * VFS Operations.
173 *
174 * mount system call
175 */
176 int
177 ffs_mount(mp, path, data, ndp, p)
178 struct mount *mp;
179 const char *path;
180 void *data;
181 struct nameidata *ndp;
182 struct proc *p;
183 {
184 struct vnode *devvp;
185 struct ufs_args args;
186 struct ufsmount *ump = NULL;
187 struct fs *fs;
188 size_t size;
189 int error, flags;
190 mode_t accessmode;
191
192 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
193 if (error)
194 return (error);
195
196 #if !defined(SOFTDEP)
197 mp->mnt_flag &= ~MNT_SOFTDEP;
198 #endif
199
200 /*
201 * If updating, check whether changing from read-only to
202 * read/write; if there is no device name, that's all we do.
203 */
204 if (mp->mnt_flag & MNT_UPDATE) {
205 ump = VFSTOUFS(mp);
206 fs = ump->um_fs;
207 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
208 flags = WRITECLOSE;
209 if (mp->mnt_flag & MNT_FORCE)
210 flags |= FORCECLOSE;
211 if (mp->mnt_flag & MNT_SOFTDEP)
212 error = softdep_flushfiles(mp, flags, p);
213 else
214 error = ffs_flushfiles(mp, flags, p);
215 if (fs->fs_pendingblocks != 0 ||
216 fs->fs_pendinginodes != 0) {
217 printf("%s: update error: blocks %d files %d\n",
218 fs->fs_fsmnt, fs->fs_pendingblocks,
219 fs->fs_pendinginodes);
220 fs->fs_pendingblocks = 0;
221 fs->fs_pendinginodes = 0;
222 }
223 if (error == 0 &&
224 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
225 fs->fs_clean & FS_WASCLEAN) {
226 if (mp->mnt_flag & MNT_SOFTDEP)
227 fs->fs_flags &= ~FS_DOSOFTDEP;
228 fs->fs_clean = FS_ISCLEAN;
229 (void) ffs_sbupdate(ump, MNT_WAIT);
230 }
231 if (error)
232 return (error);
233 fs->fs_ronly = 1;
234 fs->fs_fmod = 0;
235 }
236
237 /*
238 * Flush soft dependencies if disabling it via an update
239 * mount. This may leave some items to be processed,
240 * so don't do this yet XXX.
241 */
242 if ((fs->fs_flags & FS_DOSOFTDEP) &&
243 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
244 #ifdef notyet
245 flags = WRITECLOSE;
246 if (mp->mnt_flag & MNT_FORCE)
247 flags |= FORCECLOSE;
248 error = softdep_flushfiles(mp, flags, p);
249 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
250 fs->fs_flags &= ~FS_DOSOFTDEP;
251 (void) ffs_sbupdate(ump, MNT_WAIT);
252 #elif defined(SOFTDEP)
253 mp->mnt_flag |= MNT_SOFTDEP;
254 #endif
255 }
256
257 /*
258 * When upgrading to a softdep mount, we must first flush
259 * all vnodes. (not done yet -- see above)
260 */
261 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
262 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
263 #ifdef notyet
264 flags = WRITECLOSE;
265 if (mp->mnt_flag & MNT_FORCE)
266 flags |= FORCECLOSE;
267 error = ffs_flushfiles(mp, flags, p);
268 #else
269 mp->mnt_flag &= ~MNT_SOFTDEP;
270 #endif
271 }
272
273 if (mp->mnt_flag & MNT_RELOAD) {
274 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
275 if (error)
276 return (error);
277 }
278 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
279 /*
280 * If upgrade to read-write by non-root, then verify
281 * that user has necessary permissions on the device.
282 */
283 devvp = ump->um_devvp;
284 if (p->p_ucred->cr_uid != 0) {
285 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
286 error = VOP_ACCESS(devvp, VREAD | VWRITE,
287 p->p_ucred, p);
288 VOP_UNLOCK(devvp, 0);
289 if (error)
290 return (error);
291 }
292 fs->fs_ronly = 0;
293 fs->fs_clean <<= 1;
294 fs->fs_fmod = 1;
295 if ((fs->fs_flags & FS_DOSOFTDEP)) {
296 error = softdep_mount(devvp, mp, fs,
297 p->p_ucred);
298 if (error)
299 return (error);
300 }
301 }
302 if (args.fspec == 0) {
303 /*
304 * Process export requests.
305 */
306 return (vfs_export(mp, &ump->um_export, &args.export));
307 }
308 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
309 (MNT_SOFTDEP | MNT_ASYNC)) {
310 printf("%s fs uses soft updates, ignoring async mode\n",
311 fs->fs_fsmnt);
312 mp->mnt_flag &= ~MNT_ASYNC;
313 }
314 }
315 /*
316 * Not an update, or updating the name: look up the name
317 * and verify that it refers to a sensible block device.
318 */
319 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
320 if ((error = namei(ndp)) != 0)
321 return (error);
322 devvp = ndp->ni_vp;
323
324 if (devvp->v_type != VBLK) {
325 vrele(devvp);
326 return (ENOTBLK);
327 }
328 if (major(devvp->v_rdev) >= nblkdev) {
329 vrele(devvp);
330 return (ENXIO);
331 }
332 /*
333 * If mount by non-root, then verify that user has necessary
334 * permissions on the device.
335 */
336 if (p->p_ucred->cr_uid != 0) {
337 accessmode = VREAD;
338 if ((mp->mnt_flag & MNT_RDONLY) == 0)
339 accessmode |= VWRITE;
340 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
341 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
342 VOP_UNLOCK(devvp, 0);
343 if (error) {
344 vrele(devvp);
345 return (error);
346 }
347 }
348 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
349 error = ffs_mountfs(devvp, mp, p);
350 if (!error) {
351 ump = VFSTOUFS(mp);
352 fs = ump->um_fs;
353 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
354 (MNT_SOFTDEP | MNT_ASYNC)) {
355 printf("%s fs uses soft updates, "
356 "ignoring async mode\n",
357 fs->fs_fsmnt);
358 mp->mnt_flag &= ~MNT_ASYNC;
359 }
360 }
361 }
362 else {
363 if (devvp != ump->um_devvp)
364 error = EINVAL; /* needs translation */
365 else
366 vrele(devvp);
367 }
368 if (error) {
369 vrele(devvp);
370 return (error);
371 }
372 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
373 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
374 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
375 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
376 &size);
377 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
378 if (mp->mnt_flag & MNT_SOFTDEP)
379 fs->fs_flags |= FS_DOSOFTDEP;
380 else
381 fs->fs_flags &= ~FS_DOSOFTDEP;
382 if (fs->fs_fmod != 0) { /* XXX */
383 fs->fs_fmod = 0;
384 if (fs->fs_clean & FS_WASCLEAN)
385 fs->fs_time = time.tv_sec;
386 else {
387 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
388 mp->mnt_stat.f_mntfromname, fs->fs_clean);
389 printf("%s: lost blocks %d files %d\n",
390 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
391 fs->fs_pendinginodes);
392 }
393 (void) ffs_cgupdate(ump, MNT_WAIT);
394 }
395 return (0);
396 }
397
398 /*
399 * Reload all incore data for a filesystem (used after running fsck on
400 * the root filesystem and finding things to fix). The filesystem must
401 * be mounted read-only.
402 *
403 * Things to do to update the mount:
404 * 1) invalidate all cached meta-data.
405 * 2) re-read superblock from disk.
406 * 3) re-read summary information from disk.
407 * 4) invalidate all inactive vnodes.
408 * 5) invalidate all cached file data.
409 * 6) re-read inode data for all active vnodes.
410 */
411 int
412 ffs_reload(mountp, cred, p)
413 struct mount *mountp;
414 struct ucred *cred;
415 struct proc *p;
416 {
417 struct vnode *vp, *nvp, *devvp;
418 struct inode *ip;
419 void *space;
420 struct buf *bp;
421 struct fs *fs, *newfs;
422 struct partinfo dpart;
423 int i, blks, size, error;
424 int32_t *lp;
425 caddr_t cp;
426
427 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
428 return (EINVAL);
429 /*
430 * Step 1: invalidate all cached meta-data.
431 */
432 devvp = VFSTOUFS(mountp)->um_devvp;
433 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
434 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
435 VOP_UNLOCK(devvp, 0);
436 if (error)
437 panic("ffs_reload: dirty1");
438 /*
439 * Step 2: re-read superblock from disk.
440 */
441 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
442 size = DEV_BSIZE;
443 else
444 size = dpart.disklab->d_secsize;
445 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
446 if (error) {
447 brelse(bp);
448 return (error);
449 }
450 fs = VFSTOUFS(mountp)->um_fs;
451 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
452 memcpy(newfs, bp->b_data, fs->fs_sbsize);
453 #ifdef FFS_EI
454 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
455 ffs_sb_swap((struct fs*)bp->b_data, newfs);
456 fs->fs_flags |= FS_SWAPPED;
457 }
458 #endif
459 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
460 newfs->fs_bsize < sizeof(struct fs)) {
461 brelse(bp);
462 free(newfs, M_UFSMNT);
463 return (EIO); /* XXX needs translation */
464 }
465 /*
466 * Copy pointer fields back into superblock before copying in XXX
467 * new superblock. These should really be in the ufsmount. XXX
468 * Note that important parameters (eg fs_ncg) are unchanged.
469 */
470 newfs->fs_csp = fs->fs_csp;
471 newfs->fs_maxcluster = fs->fs_maxcluster;
472 newfs->fs_contigdirs = fs->fs_contigdirs;
473 newfs->fs_ronly = fs->fs_ronly;
474 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
475 if (fs->fs_sbsize < SBSIZE)
476 bp->b_flags |= B_INVAL;
477 brelse(bp);
478 free(newfs, M_UFSMNT);
479 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
480 ffs_oldfscompat(fs);
481 /* An old fsck may have zeroed these fields, so recheck them. */
482 if (fs->fs_avgfilesize <= 0)
483 fs->fs_avgfilesize = AVFILESIZ;
484 if (fs->fs_avgfpdir <= 0)
485 fs->fs_avgfpdir = AFPDIR;
486 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
487 fs->fs_pendingblocks = 0;
488 fs->fs_pendinginodes = 0;
489 }
490
491 ffs_statfs(mountp, &mountp->mnt_stat, p);
492 /*
493 * Step 3: re-read summary information from disk.
494 */
495 blks = howmany(fs->fs_cssize, fs->fs_fsize);
496 space = fs->fs_csp;
497 for (i = 0; i < blks; i += fs->fs_frag) {
498 size = fs->fs_bsize;
499 if (i + fs->fs_frag > blks)
500 size = (blks - i) * fs->fs_fsize;
501 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
502 NOCRED, &bp);
503 if (error) {
504 brelse(bp);
505 return (error);
506 }
507 #ifdef FFS_EI
508 if (UFS_FSNEEDSWAP(fs))
509 ffs_csum_swap((struct csum *)bp->b_data,
510 (struct csum *)space, size);
511 else
512 #endif
513 memcpy(space, bp->b_data, (size_t)size);
514 space = (char *)space + size;
515 brelse(bp);
516 }
517 if ((fs->fs_flags & FS_DOSOFTDEP))
518 softdep_mount(devvp, mountp, fs, cred);
519 /*
520 * We no longer know anything about clusters per cylinder group.
521 */
522 if (fs->fs_contigsumsize > 0) {
523 lp = fs->fs_maxcluster;
524 for (i = 0; i < fs->fs_ncg; i++)
525 *lp++ = fs->fs_contigsumsize;
526 }
527
528 loop:
529 simple_lock(&mntvnode_slock);
530 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
531 if (vp->v_mount != mountp) {
532 simple_unlock(&mntvnode_slock);
533 goto loop;
534 }
535 nvp = vp->v_mntvnodes.le_next;
536 /*
537 * Step 4: invalidate all inactive vnodes.
538 */
539 if (vrecycle(vp, &mntvnode_slock, p))
540 goto loop;
541 /*
542 * Step 5: invalidate all cached file data.
543 */
544 simple_lock(&vp->v_interlock);
545 simple_unlock(&mntvnode_slock);
546 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
547 goto loop;
548 if (vinvalbuf(vp, 0, cred, p, 0, 0))
549 panic("ffs_reload: dirty2");
550 /*
551 * Step 6: re-read inode data for all active vnodes.
552 */
553 ip = VTOI(vp);
554 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
555 (int)fs->fs_bsize, NOCRED, &bp);
556 if (error) {
557 brelse(bp);
558 vput(vp);
559 return (error);
560 }
561 cp = (caddr_t)bp->b_data +
562 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
563 #ifdef FFS_EI
564 if (UFS_FSNEEDSWAP(fs))
565 ffs_dinode_swap((struct dinode *)cp,
566 &ip->i_din.ffs_din);
567 else
568 #endif
569 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
570 ip->i_ffs_effnlink = ip->i_ffs_nlink;
571 brelse(bp);
572 vput(vp);
573 simple_lock(&mntvnode_slock);
574 }
575 simple_unlock(&mntvnode_slock);
576 return (0);
577 }
578
579 /*
580 * Common code for mount and mountroot
581 */
582 int
583 ffs_mountfs(devvp, mp, p)
584 struct vnode *devvp;
585 struct mount *mp;
586 struct proc *p;
587 {
588 struct ufsmount *ump;
589 struct buf *bp;
590 struct fs *fs;
591 dev_t dev;
592 struct partinfo dpart;
593 void *space;
594 int blks;
595 int error, i, size, ronly;
596 #ifdef FFS_EI
597 int needswap;
598 #endif
599 int32_t *lp;
600 struct ucred *cred;
601 u_int64_t maxfilesize; /* XXX */
602 u_int32_t sbsize;
603
604 dev = devvp->v_rdev;
605 cred = p ? p->p_ucred : NOCRED;
606 /*
607 * Disallow multiple mounts of the same device.
608 * Disallow mounting of a device that is currently in use
609 * (except for root, which might share swap device for miniroot).
610 * Flush out any old buffers remaining from a previous use.
611 */
612 if ((error = vfs_mountedon(devvp)) != 0)
613 return (error);
614 if (vcount(devvp) > 1 && devvp != rootvp)
615 return (EBUSY);
616 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
617 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
618 VOP_UNLOCK(devvp, 0);
619 if (error)
620 return (error);
621
622 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
623 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
624 if (error)
625 return (error);
626 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
627 size = DEV_BSIZE;
628 else
629 size = dpart.disklab->d_secsize;
630
631 bp = NULL;
632 ump = NULL;
633 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
634 if (error)
635 goto out;
636
637 fs = (struct fs*)bp->b_data;
638 if (fs->fs_magic == FS_MAGIC) {
639 sbsize = fs->fs_sbsize;
640 #ifdef FFS_EI
641 needswap = 0;
642 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
643 sbsize = bswap32(fs->fs_sbsize);
644 needswap = 1;
645 #endif
646 } else {
647 error = EINVAL;
648 goto out;
649 }
650 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
651 error = EINVAL;
652 goto out;
653 }
654
655 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
656 memcpy(fs, bp->b_data, sbsize);
657 #ifdef FFS_EI
658 if (needswap) {
659 ffs_sb_swap((struct fs*)bp->b_data, fs);
660 fs->fs_flags |= FS_SWAPPED;
661 }
662 #endif
663 ffs_oldfscompat(fs);
664
665 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
666 error = EINVAL;
667 goto out;
668 }
669 /* make sure cylinder group summary area is a reasonable size. */
670 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
671 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
672 fs->fs_cssize >
673 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
674 error = EINVAL; /* XXX needs translation */
675 goto out2;
676 }
677 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
678 fs->fs_pendingblocks = 0;
679 fs->fs_pendinginodes = 0;
680 }
681 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
682 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
683 error = EROFS; /* XXX what should be returned? */
684 goto out2;
685 }
686
687 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
688 memset((caddr_t)ump, 0, sizeof *ump);
689 ump->um_fs = fs;
690 if (fs->fs_sbsize < SBSIZE)
691 bp->b_flags |= B_INVAL;
692 brelse(bp);
693 bp = NULL;
694
695 /*
696 * verify that we can access the last block in the fs.
697 */
698
699 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize, cred,
700 &bp);
701 if (bp->b_bcount != fs->fs_fsize)
702 error = EINVAL;
703 bp->b_flags |= B_INVAL;
704 if (error)
705 goto out;
706 brelse(bp);
707 bp = NULL;
708
709 fs->fs_ronly = ronly;
710 if (ronly == 0) {
711 fs->fs_clean <<= 1;
712 fs->fs_fmod = 1;
713 }
714 size = fs->fs_cssize;
715 blks = howmany(size, fs->fs_fsize);
716 if (fs->fs_contigsumsize > 0)
717 size += fs->fs_ncg * sizeof(int32_t);
718 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
719 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
720 fs->fs_csp = space;
721 for (i = 0; i < blks; i += fs->fs_frag) {
722 size = fs->fs_bsize;
723 if (i + fs->fs_frag > blks)
724 size = (blks - i) * fs->fs_fsize;
725 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
726 cred, &bp);
727 if (error) {
728 free(fs->fs_csp, M_UFSMNT);
729 goto out2;
730 }
731 #ifdef FFS_EI
732 if (needswap)
733 ffs_csum_swap((struct csum *)bp->b_data,
734 (struct csum *)space, size);
735 else
736 #endif
737 memcpy(space, bp->b_data, (u_int)size);
738
739 space = (char *)space + size;
740 brelse(bp);
741 bp = NULL;
742 }
743 if (fs->fs_contigsumsize > 0) {
744 fs->fs_maxcluster = lp = space;
745 for (i = 0; i < fs->fs_ncg; i++)
746 *lp++ = fs->fs_contigsumsize;
747 space = lp;
748 }
749 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
750 fs->fs_contigdirs = space;
751 space = (char *)space + size;
752 memset(fs->fs_contigdirs, 0, size);
753 /* Compatibility for old filesystems - XXX */
754 if (fs->fs_avgfilesize <= 0)
755 fs->fs_avgfilesize = AVFILESIZ;
756 if (fs->fs_avgfpdir <= 0)
757 fs->fs_avgfpdir = AFPDIR;
758 mp->mnt_data = (qaddr_t)ump;
759 mp->mnt_stat.f_fsid.val[0] = (long)dev;
760 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
761 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
762 mp->mnt_fs_bshift = fs->fs_bshift;
763 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
764 mp->mnt_flag |= MNT_LOCAL;
765 #ifdef FFS_EI
766 if (needswap)
767 ump->um_flags |= UFS_NEEDSWAP;
768 #endif
769 ump->um_mountp = mp;
770 ump->um_dev = dev;
771 ump->um_devvp = devvp;
772 ump->um_nindir = fs->fs_nindir;
773 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
774 ump->um_bptrtodb = fs->fs_fsbtodb;
775 ump->um_seqinc = fs->fs_frag;
776 for (i = 0; i < MAXQUOTAS; i++)
777 ump->um_quotas[i] = NULLVP;
778 devvp->v_specmountpoint = mp;
779 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
780 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
781 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
782 fs->fs_maxfilesize = maxfilesize; /* XXX */
783 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
784 error = softdep_mount(devvp, mp, fs, cred);
785 if (error) {
786 free(fs->fs_csp, M_UFSMNT);
787 goto out;
788 }
789 }
790 return (0);
791 out2:
792 free(fs, M_UFSMNT);
793 out:
794 devvp->v_specmountpoint = NULL;
795 if (bp)
796 brelse(bp);
797 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
798 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
799 VOP_UNLOCK(devvp, 0);
800 if (ump) {
801 free(ump, M_UFSMNT);
802 mp->mnt_data = (qaddr_t)0;
803 }
804 return (error);
805 }
806
807 /*
808 * Sanity checks for old file systems.
809 *
810 * XXX - goes away some day.
811 */
812 int
813 ffs_oldfscompat(fs)
814 struct fs *fs;
815 {
816 int i;
817
818 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
819 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
820 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
821 fs->fs_nrpos = 8; /* XXX */
822 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
823 u_int64_t sizepb = fs->fs_bsize; /* XXX */
824 /* XXX */
825 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
826 for (i = 0; i < NIADDR; i++) { /* XXX */
827 sizepb *= NINDIR(fs); /* XXX */
828 fs->fs_maxfilesize += sizepb; /* XXX */
829 } /* XXX */
830 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
831 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
832 } /* XXX */
833 return (0);
834 }
835
836 /*
837 * unmount system call
838 */
839 int
840 ffs_unmount(mp, mntflags, p)
841 struct mount *mp;
842 int mntflags;
843 struct proc *p;
844 {
845 struct ufsmount *ump;
846 struct fs *fs;
847 int error, flags, penderr;
848
849 penderr = 0;
850 flags = 0;
851 if (mntflags & MNT_FORCE)
852 flags |= FORCECLOSE;
853 if (mp->mnt_flag & MNT_SOFTDEP) {
854 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
855 return (error);
856 } else {
857 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
858 return (error);
859 }
860 ump = VFSTOUFS(mp);
861 fs = ump->um_fs;
862 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
863 printf("%s: unmount pending error: blocks %d files %d\n",
864 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
865 fs->fs_pendingblocks = 0;
866 fs->fs_pendinginodes = 0;
867 penderr = 1;
868 }
869 if (fs->fs_ronly == 0 &&
870 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
871 fs->fs_clean & FS_WASCLEAN) {
872 /*
873 * XXXX don't mark fs clean in the case of softdep
874 * pending block errors, until they are fixed.
875 */
876 if (penderr == 0) {
877 if (mp->mnt_flag & MNT_SOFTDEP)
878 fs->fs_flags &= ~FS_DOSOFTDEP;
879 fs->fs_clean = FS_ISCLEAN;
880 }
881 (void) ffs_sbupdate(ump, MNT_WAIT);
882 }
883 if (ump->um_devvp->v_type != VBAD)
884 ump->um_devvp->v_specmountpoint = NULL;
885 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
886 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
887 NOCRED, p);
888 vput(ump->um_devvp);
889 free(fs->fs_csp, M_UFSMNT);
890 free(fs, M_UFSMNT);
891 free(ump, M_UFSMNT);
892 mp->mnt_data = (qaddr_t)0;
893 mp->mnt_flag &= ~MNT_LOCAL;
894 return (error);
895 }
896
897 /*
898 * Flush out all the files in a filesystem.
899 */
900 int
901 ffs_flushfiles(mp, flags, p)
902 struct mount *mp;
903 int flags;
904 struct proc *p;
905 {
906 extern int doforce;
907 struct ufsmount *ump;
908 int error;
909
910 if (!doforce)
911 flags &= ~FORCECLOSE;
912 ump = VFSTOUFS(mp);
913 #ifdef QUOTA
914 if (mp->mnt_flag & MNT_QUOTA) {
915 int i;
916 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
917 return (error);
918 for (i = 0; i < MAXQUOTAS; i++) {
919 if (ump->um_quotas[i] == NULLVP)
920 continue;
921 quotaoff(p, mp, i);
922 }
923 /*
924 * Here we fall through to vflush again to ensure
925 * that we have gotten rid of all the system vnodes.
926 */
927 }
928 #endif
929 /*
930 * Flush all the files.
931 */
932 error = vflush(mp, NULLVP, flags);
933 if (error)
934 return (error);
935 /*
936 * Flush filesystem metadata.
937 */
938 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
939 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
940 VOP_UNLOCK(ump->um_devvp, 0);
941 return (error);
942 }
943
944 /*
945 * Get file system statistics.
946 */
947 int
948 ffs_statfs(mp, sbp, p)
949 struct mount *mp;
950 struct statfs *sbp;
951 struct proc *p;
952 {
953 struct ufsmount *ump;
954 struct fs *fs;
955
956 ump = VFSTOUFS(mp);
957 fs = ump->um_fs;
958 if (fs->fs_magic != FS_MAGIC)
959 panic("ffs_statfs");
960 #ifdef COMPAT_09
961 sbp->f_type = 1;
962 #else
963 sbp->f_type = 0;
964 #endif
965 sbp->f_bsize = fs->fs_fsize;
966 sbp->f_iosize = fs->fs_bsize;
967 sbp->f_blocks = fs->fs_dsize;
968 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
969 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
970 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
971 (100 - fs->fs_minfree) / (u_int64_t) 100) -
972 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
973 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
974 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
975 if (sbp != &mp->mnt_stat) {
976 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
977 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
978 }
979 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
980 return (0);
981 }
982
983 /*
984 * Go through the disk queues to initiate sandbagged IO;
985 * go through the inodes to write those that have been modified;
986 * initiate the writing of the super block if it has been modified.
987 *
988 * Note: we are always called with the filesystem marked `MPBUSY'.
989 */
990 int
991 ffs_sync(mp, waitfor, cred, p)
992 struct mount *mp;
993 int waitfor;
994 struct ucred *cred;
995 struct proc *p;
996 {
997 struct vnode *vp, *nvp;
998 struct inode *ip;
999 struct ufsmount *ump = VFSTOUFS(mp);
1000 struct fs *fs;
1001 int error, allerror = 0;
1002
1003 fs = ump->um_fs;
1004 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1005 printf("fs = %s\n", fs->fs_fsmnt);
1006 panic("update: rofs mod");
1007 }
1008 /*
1009 * Write back each (modified) inode.
1010 */
1011 simple_lock(&mntvnode_slock);
1012 loop:
1013 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1014 /*
1015 * If the vnode that we are about to sync is no longer
1016 * associated with this mount point, start over.
1017 */
1018 if (vp->v_mount != mp)
1019 goto loop;
1020 simple_lock(&vp->v_interlock);
1021 nvp = LIST_NEXT(vp, v_mntvnodes);
1022 ip = VTOI(vp);
1023 if (vp->v_type == VNON ||
1024 ((ip->i_flag &
1025 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1026 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1027 vp->v_uobj.uo_npages == 0))
1028 {
1029 simple_unlock(&vp->v_interlock);
1030 continue;
1031 }
1032 simple_unlock(&mntvnode_slock);
1033 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1034 if (error) {
1035 simple_lock(&mntvnode_slock);
1036 if (error == ENOENT)
1037 goto loop;
1038 continue;
1039 }
1040 if ((error = VOP_FSYNC(vp, cred,
1041 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1042 allerror = error;
1043 vput(vp);
1044 simple_lock(&mntvnode_slock);
1045 }
1046 simple_unlock(&mntvnode_slock);
1047 /*
1048 * Force stale file system control information to be flushed.
1049 */
1050 if (waitfor != MNT_LAZY) {
1051 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
1052 waitfor = MNT_NOWAIT;
1053 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1054 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1055 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1056 allerror = error;
1057 VOP_UNLOCK(ump->um_devvp, 0);
1058 }
1059 #ifdef QUOTA
1060 qsync(mp);
1061 #endif
1062 /*
1063 * Write back modified superblock.
1064 */
1065 if (fs->fs_fmod != 0) {
1066 fs->fs_fmod = 0;
1067 fs->fs_time = time.tv_sec;
1068 if ((error = ffs_cgupdate(ump, waitfor)))
1069 allerror = error;
1070 }
1071 return (allerror);
1072 }
1073
1074 /*
1075 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1076 * in from disk. If it is in core, wait for the lock bit to clear, then
1077 * return the inode locked. Detection and handling of mount points must be
1078 * done by the calling routine.
1079 */
1080 int
1081 ffs_vget(mp, ino, vpp)
1082 struct mount *mp;
1083 ino_t ino;
1084 struct vnode **vpp;
1085 {
1086 struct fs *fs;
1087 struct inode *ip;
1088 struct ufsmount *ump;
1089 struct buf *bp;
1090 struct vnode *vp;
1091 dev_t dev;
1092 int error;
1093 caddr_t cp;
1094
1095 ump = VFSTOUFS(mp);
1096 dev = ump->um_dev;
1097
1098 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1099 return (0);
1100
1101 /* Allocate a new vnode/inode. */
1102 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1103 *vpp = NULL;
1104 return (error);
1105 }
1106
1107 /*
1108 * If someone beat us to it while sleeping in getnewvnode(),
1109 * push back the freshly allocated vnode we don't need, and return.
1110 */
1111
1112 do {
1113 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1114 ungetnewvnode(vp);
1115 return (0);
1116 }
1117 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1118
1119 /*
1120 * XXX MFS ends up here, too, to allocate an inode. Should we
1121 * XXX create another pool for MFS inodes?
1122 */
1123
1124 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1125 memset(ip, 0, sizeof(struct inode));
1126 vp->v_data = ip;
1127 ip->i_vnode = vp;
1128 ip->i_fs = fs = ump->um_fs;
1129 ip->i_dev = dev;
1130 ip->i_number = ino;
1131 LIST_INIT(&ip->i_pcbufhd);
1132 #ifdef QUOTA
1133 {
1134 int i;
1135
1136 for (i = 0; i < MAXQUOTAS; i++)
1137 ip->i_dquot[i] = NODQUOT;
1138 }
1139 #endif
1140
1141 /*
1142 * Put it onto its hash chain and lock it so that other requests for
1143 * this inode will block if they arrive while we are sleeping waiting
1144 * for old data structures to be purged or for the contents of the
1145 * disk portion of this inode to be read.
1146 */
1147
1148 ufs_ihashins(ip);
1149 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1150
1151 /* Read in the disk contents for the inode, copy into the inode. */
1152 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1153 (int)fs->fs_bsize, NOCRED, &bp);
1154 if (error) {
1155
1156 /*
1157 * The inode does not contain anything useful, so it would
1158 * be misleading to leave it on its hash chain. With mode
1159 * still zero, it will be unlinked and returned to the free
1160 * list by vput().
1161 */
1162
1163 vput(vp);
1164 brelse(bp);
1165 *vpp = NULL;
1166 return (error);
1167 }
1168 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1169 #ifdef FFS_EI
1170 if (UFS_FSNEEDSWAP(fs))
1171 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1172 else
1173 #endif
1174 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1175 if (DOINGSOFTDEP(vp))
1176 softdep_load_inodeblock(ip);
1177 else
1178 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1179 brelse(bp);
1180
1181 /*
1182 * Initialize the vnode from the inode, check for aliases.
1183 * Note that the underlying vnode may have changed.
1184 */
1185
1186 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1187
1188 /*
1189 * Finish inode initialization now that aliasing has been resolved.
1190 */
1191
1192 genfs_node_init(vp, &ffs_genfsops);
1193 ip->i_devvp = ump->um_devvp;
1194 VREF(ip->i_devvp);
1195
1196 /*
1197 * Ensure that uid and gid are correct. This is a temporary
1198 * fix until fsck has been changed to do the update.
1199 */
1200
1201 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1202 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1203 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1204 } /* XXX */
1205 uvm_vnp_setsize(vp, ip->i_ffs_size);
1206 *vpp = vp;
1207 return (0);
1208 }
1209
1210 /*
1211 * File handle to vnode
1212 *
1213 * Have to be really careful about stale file handles:
1214 * - check that the inode number is valid
1215 * - call ffs_vget() to get the locked inode
1216 * - check for an unallocated inode (i_mode == 0)
1217 * - check that the given client host has export rights and return
1218 * those rights via. exflagsp and credanonp
1219 */
1220 int
1221 ffs_fhtovp(mp, fhp, vpp)
1222 struct mount *mp;
1223 struct fid *fhp;
1224 struct vnode **vpp;
1225 {
1226 struct ufid *ufhp;
1227 struct fs *fs;
1228
1229 ufhp = (struct ufid *)fhp;
1230 fs = VFSTOUFS(mp)->um_fs;
1231 if (ufhp->ufid_ino < ROOTINO ||
1232 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1233 return (ESTALE);
1234 return (ufs_fhtovp(mp, ufhp, vpp));
1235 }
1236
1237 /*
1238 * Vnode pointer to File handle
1239 */
1240 /* ARGSUSED */
1241 int
1242 ffs_vptofh(vp, fhp)
1243 struct vnode *vp;
1244 struct fid *fhp;
1245 {
1246 struct inode *ip;
1247 struct ufid *ufhp;
1248
1249 ip = VTOI(vp);
1250 ufhp = (struct ufid *)fhp;
1251 ufhp->ufid_len = sizeof(struct ufid);
1252 ufhp->ufid_ino = ip->i_number;
1253 ufhp->ufid_gen = ip->i_ffs_gen;
1254 return (0);
1255 }
1256
1257 void
1258 ffs_init()
1259 {
1260 if (ffs_initcount++ > 0)
1261 return;
1262
1263 softdep_initialize();
1264 ufs_init();
1265
1266 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1267 &pool_allocator_nointr);
1268 }
1269
1270 void
1271 ffs_reinit()
1272 {
1273 softdep_reinitialize();
1274 ufs_reinit();
1275 }
1276
1277 void
1278 ffs_done()
1279 {
1280 if (--ffs_initcount > 0)
1281 return;
1282
1283 /* XXX softdep cleanup ? */
1284 ufs_done();
1285 pool_destroy(&ffs_inode_pool);
1286 }
1287
1288 int
1289 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1290 int *name;
1291 u_int namelen;
1292 void *oldp;
1293 size_t *oldlenp;
1294 void *newp;
1295 size_t newlen;
1296 struct proc *p;
1297 {
1298 extern int doasyncfree;
1299 extern int ffs_log_changeopt;
1300
1301 /* all sysctl names at this level are terminal */
1302 if (namelen != 1)
1303 return (ENOTDIR); /* overloaded */
1304
1305 switch (name[0]) {
1306 case FFS_ASYNCFREE:
1307 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1308 case FFS_LOG_CHANGEOPT:
1309 return (sysctl_int(oldp, oldlenp, newp, newlen,
1310 &ffs_log_changeopt));
1311 default:
1312 return (EOPNOTSUPP);
1313 }
1314 /* NOTREACHED */
1315 }
1316
1317 /*
1318 * Write a superblock and associated information back to disk.
1319 */
1320 int
1321 ffs_sbupdate(mp, waitfor)
1322 struct ufsmount *mp;
1323 int waitfor;
1324 {
1325 struct fs *fs = mp->um_fs;
1326 struct buf *bp;
1327 int i, error = 0;
1328 int32_t saved_nrpos = fs->fs_nrpos;
1329 int64_t saved_qbmask = fs->fs_qbmask;
1330 int64_t saved_qfmask = fs->fs_qfmask;
1331 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1332 u_int8_t saveflag;
1333
1334 /* Restore compatibility to old file systems. XXX */
1335 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1336 fs->fs_nrpos = -1; /* XXX */
1337 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1338 int32_t *lp, tmp; /* XXX */
1339 /* XXX */
1340 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1341 tmp = lp[4]; /* XXX */
1342 for (i = 4; i > 0; i--) /* XXX */
1343 lp[i] = lp[i-1]; /* XXX */
1344 lp[0] = tmp; /* XXX */
1345 } /* XXX */
1346 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1347
1348 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1349 (int)fs->fs_sbsize, 0, 0);
1350 saveflag = fs->fs_flags & FS_INTERNAL;
1351 fs->fs_flags &= ~FS_INTERNAL;
1352 memcpy(bp->b_data, fs, fs->fs_sbsize);
1353 #ifdef FFS_EI
1354 if (mp->um_flags & UFS_NEEDSWAP)
1355 ffs_sb_swap(fs, (struct fs*)bp->b_data);
1356 #endif
1357
1358 fs->fs_flags |= saveflag;
1359 fs->fs_nrpos = saved_nrpos; /* XXX */
1360 fs->fs_qbmask = saved_qbmask; /* XXX */
1361 fs->fs_qfmask = saved_qfmask; /* XXX */
1362 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1363
1364 if (waitfor == MNT_WAIT)
1365 error = bwrite(bp);
1366 else
1367 bawrite(bp);
1368 return (error);
1369 }
1370
1371 int
1372 ffs_cgupdate(mp, waitfor)
1373 struct ufsmount *mp;
1374 int waitfor;
1375 {
1376 struct fs *fs = mp->um_fs;
1377 struct buf *bp;
1378 int blks;
1379 void *space;
1380 int i, size, error = 0, allerror = 0;
1381
1382 allerror = ffs_sbupdate(mp, waitfor);
1383 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1384 space = fs->fs_csp;
1385 for (i = 0; i < blks; i += fs->fs_frag) {
1386 size = fs->fs_bsize;
1387 if (i + fs->fs_frag > blks)
1388 size = (blks - i) * fs->fs_fsize;
1389 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1390 size, 0, 0);
1391 #ifdef FFS_EI
1392 if (mp->um_flags & UFS_NEEDSWAP)
1393 ffs_csum_swap((struct csum*)space,
1394 (struct csum*)bp->b_data, size);
1395 else
1396 #endif
1397 memcpy(bp->b_data, space, (u_int)size);
1398 space = (char *)space + size;
1399 if (waitfor == MNT_WAIT)
1400 error = bwrite(bp);
1401 else
1402 bawrite(bp);
1403 }
1404 if (!allerror && error)
1405 allerror = error;
1406 return (allerror);
1407 }
1408