ffs_vfsops.c revision 1.101 1 /* $NetBSD: ffs_vfsops.c,v 1.101 2002/09/06 13:18:43 gehenna Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.101 2002/09/06 13:18:43 gehenna Exp $");
40
41 #if defined(_KERNEL_OPT)
42 #include "opt_ffs.h"
43 #include "opt_quota.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_softdep.h"
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/kernel.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
55 #include <sys/mount.h>
56 #include <sys/buf.h>
57 #include <sys/device.h>
58 #include <sys/mbuf.h>
59 #include <sys/file.h>
60 #include <sys/disklabel.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/malloc.h>
64 #include <sys/pool.h>
65 #include <sys/lock.h>
66 #include <sys/sysctl.h>
67 #include <sys/conf.h>
68
69 #include <miscfs/specfs/specdev.h>
70
71 #include <ufs/ufs/quota.h>
72 #include <ufs/ufs/ufsmount.h>
73 #include <ufs/ufs/inode.h>
74 #include <ufs/ufs/dir.h>
75 #include <ufs/ufs/ufs_extern.h>
76 #include <ufs/ufs/ufs_bswap.h>
77
78 #include <ufs/ffs/fs.h>
79 #include <ufs/ffs/ffs_extern.h>
80
81 /* how many times ffs_init() was called */
82 int ffs_initcount = 0;
83
84 extern struct lock ufs_hashlock;
85
86 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
87 extern struct vnodeopv_desc ffs_specop_opv_desc;
88 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
89
90 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
91 &ffs_vnodeop_opv_desc,
92 &ffs_specop_opv_desc,
93 &ffs_fifoop_opv_desc,
94 NULL,
95 };
96
97 struct vfsops ffs_vfsops = {
98 MOUNT_FFS,
99 ffs_mount,
100 ufs_start,
101 ffs_unmount,
102 ufs_root,
103 ufs_quotactl,
104 ffs_statfs,
105 ffs_sync,
106 ffs_vget,
107 ffs_fhtovp,
108 ffs_vptofh,
109 ffs_init,
110 ffs_reinit,
111 ffs_done,
112 ffs_sysctl,
113 ffs_mountroot,
114 ufs_check_export,
115 ffs_vnodeopv_descs,
116 };
117
118 struct genfs_ops ffs_genfsops = {
119 ffs_gop_size,
120 ffs_gop_alloc,
121 genfs_gop_write,
122 };
123
124 struct pool ffs_inode_pool;
125
126 /*
127 * Called by main() when ffs is going to be mounted as root.
128 */
129
130 int
131 ffs_mountroot()
132 {
133 struct fs *fs;
134 struct mount *mp;
135 struct proc *p = curproc; /* XXX */
136 struct ufsmount *ump;
137 int error;
138
139 if (root_device->dv_class != DV_DISK)
140 return (ENODEV);
141
142 /*
143 * Get vnodes for rootdev.
144 */
145 if (bdevvp(rootdev, &rootvp))
146 panic("ffs_mountroot: can't setup bdevvp's");
147
148 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
149 vrele(rootvp);
150 return (error);
151 }
152 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
153 mp->mnt_op->vfs_refcount--;
154 vfs_unbusy(mp);
155 free(mp, M_MOUNT);
156 vrele(rootvp);
157 return (error);
158 }
159 simple_lock(&mountlist_slock);
160 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
161 simple_unlock(&mountlist_slock);
162 ump = VFSTOUFS(mp);
163 fs = ump->um_fs;
164 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
165 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
166 (void)ffs_statfs(mp, &mp->mnt_stat, p);
167 vfs_unbusy(mp);
168 inittodr(fs->fs_time);
169 return (0);
170 }
171
172 /*
173 * VFS Operations.
174 *
175 * mount system call
176 */
177 int
178 ffs_mount(mp, path, data, ndp, p)
179 struct mount *mp;
180 const char *path;
181 void *data;
182 struct nameidata *ndp;
183 struct proc *p;
184 {
185 struct vnode *devvp;
186 struct ufs_args args;
187 struct ufsmount *ump = NULL;
188 struct fs *fs;
189 size_t size;
190 int error, flags, update;
191 mode_t accessmode;
192
193 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
194 if (error)
195 return (error);
196
197 #if !defined(SOFTDEP)
198 mp->mnt_flag &= ~MNT_SOFTDEP;
199 #endif
200
201 update = mp->mnt_flag & MNT_UPDATE;
202
203 /* Check arguments */
204 if (update) {
205 /* Use the extant mount */
206 ump = VFSTOUFS(mp);
207 devvp = ump->um_devvp;
208 if (args.fspec == NULL)
209 vref(devvp);
210 } else {
211 /* New mounts must have a filename for the device */
212 if (args.fspec == NULL)
213 return (EINVAL);
214 }
215
216 if (args.fspec != NULL) {
217 /*
218 * Look up the name and verify that it's sane.
219 */
220 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
221 if ((error = namei(ndp)) != 0)
222 return (error);
223 devvp = ndp->ni_vp;
224
225 if (!update) {
226 /*
227 * Be sure this is a valid block device
228 */
229 if (devvp->v_type != VBLK)
230 error = ENOTBLK;
231 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
232 error = ENXIO;
233 } else {
234 /*
235 * Be sure we're still naming the same device
236 * used for our initial mount
237 */
238 if (devvp != ump->um_devvp)
239 error = EINVAL;
240 }
241 }
242
243 /*
244 * If mount by non-root, then verify that user has necessary
245 * permissions on the device.
246 */
247 if (error == 0 && p->p_ucred->cr_uid != 0) {
248 accessmode = VREAD;
249 if (update ?
250 (mp->mnt_flag & MNT_WANTRDWR) != 0 :
251 (mp->mnt_flag & MNT_RDONLY) == 0)
252 accessmode |= VWRITE;
253 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
254 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
255 VOP_UNLOCK(devvp, 0);
256 }
257
258 if (error) {
259 vrele(devvp);
260 return (error);
261 }
262
263 if (!update) {
264 error = ffs_mountfs(devvp, mp, p);
265 if (error) {
266 vrele(devvp);
267 return (error);
268 }
269
270 ump = VFSTOUFS(mp);
271 fs = ump->um_fs;
272 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
273 (MNT_SOFTDEP | MNT_ASYNC)) {
274 printf("%s fs uses soft updates, "
275 "ignoring async mode\n",
276 fs->fs_fsmnt);
277 mp->mnt_flag &= ~MNT_ASYNC;
278 }
279 } else {
280 /*
281 * Update the mount.
282 */
283
284 /*
285 * The initial mount got a reference on this
286 * device, so drop the one obtained via
287 * namei(), above.
288 */
289 vrele(devvp);
290
291 fs = ump->um_fs;
292 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
293 /*
294 * Changing from r/w to r/o
295 */
296 flags = WRITECLOSE;
297 if (mp->mnt_flag & MNT_FORCE)
298 flags |= FORCECLOSE;
299 if (mp->mnt_flag & MNT_SOFTDEP)
300 error = softdep_flushfiles(mp, flags, p);
301 else
302 error = ffs_flushfiles(mp, flags, p);
303 if (fs->fs_pendingblocks != 0 ||
304 fs->fs_pendinginodes != 0) {
305 printf("%s: update error: blocks %d files %d\n",
306 fs->fs_fsmnt, fs->fs_pendingblocks,
307 fs->fs_pendinginodes);
308 fs->fs_pendingblocks = 0;
309 fs->fs_pendinginodes = 0;
310 }
311 if (error == 0 &&
312 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
313 fs->fs_clean & FS_WASCLEAN) {
314 if (mp->mnt_flag & MNT_SOFTDEP)
315 fs->fs_flags &= ~FS_DOSOFTDEP;
316 fs->fs_clean = FS_ISCLEAN;
317 (void) ffs_sbupdate(ump, MNT_WAIT);
318 }
319 if (error)
320 return (error);
321 fs->fs_ronly = 1;
322 fs->fs_fmod = 0;
323 }
324
325 /*
326 * Flush soft dependencies if disabling it via an update
327 * mount. This may leave some items to be processed,
328 * so don't do this yet XXX.
329 */
330 if ((fs->fs_flags & FS_DOSOFTDEP) &&
331 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
332 #ifdef notyet
333 flags = WRITECLOSE;
334 if (mp->mnt_flag & MNT_FORCE)
335 flags |= FORCECLOSE;
336 error = softdep_flushfiles(mp, flags, p);
337 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
338 fs->fs_flags &= ~FS_DOSOFTDEP;
339 (void) ffs_sbupdate(ump, MNT_WAIT);
340 #elif defined(SOFTDEP)
341 mp->mnt_flag |= MNT_SOFTDEP;
342 #endif
343 }
344
345 /*
346 * When upgrading to a softdep mount, we must first flush
347 * all vnodes. (not done yet -- see above)
348 */
349 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
350 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
351 #ifdef notyet
352 flags = WRITECLOSE;
353 if (mp->mnt_flag & MNT_FORCE)
354 flags |= FORCECLOSE;
355 error = ffs_flushfiles(mp, flags, p);
356 #else
357 mp->mnt_flag &= ~MNT_SOFTDEP;
358 #endif
359 }
360
361 if (mp->mnt_flag & MNT_RELOAD) {
362 error = ffs_reload(mp, p->p_ucred, p);
363 if (error)
364 return (error);
365 }
366
367 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
368 /*
369 * Changing from read-only to read/write
370 */
371 fs->fs_ronly = 0;
372 fs->fs_clean <<= 1;
373 fs->fs_fmod = 1;
374 if ((fs->fs_flags & FS_DOSOFTDEP)) {
375 error = softdep_mount(devvp, mp, fs,
376 p->p_ucred);
377 if (error)
378 return (error);
379 }
380 }
381 if (args.fspec == 0) {
382 /*
383 * Process export requests.
384 */
385 return (vfs_export(mp, &ump->um_export, &args.export));
386 }
387 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
388 (MNT_SOFTDEP | MNT_ASYNC)) {
389 printf("%s fs uses soft updates, ignoring async mode\n",
390 fs->fs_fsmnt);
391 mp->mnt_flag &= ~MNT_ASYNC;
392 }
393 }
394
395 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
396 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
397 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
398 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
399 &size);
400 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
401 if (mp->mnt_flag & MNT_SOFTDEP)
402 fs->fs_flags |= FS_DOSOFTDEP;
403 else
404 fs->fs_flags &= ~FS_DOSOFTDEP;
405 if (fs->fs_fmod != 0) { /* XXX */
406 fs->fs_fmod = 0;
407 if (fs->fs_clean & FS_WASCLEAN)
408 fs->fs_time = time.tv_sec;
409 else {
410 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
411 mp->mnt_stat.f_mntfromname, fs->fs_clean);
412 printf("%s: lost blocks %d files %d\n",
413 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
414 fs->fs_pendinginodes);
415 }
416 (void) ffs_cgupdate(ump, MNT_WAIT);
417 }
418 return (0);
419 }
420
421 /*
422 * Reload all incore data for a filesystem (used after running fsck on
423 * the root filesystem and finding things to fix). The filesystem must
424 * be mounted read-only.
425 *
426 * Things to do to update the mount:
427 * 1) invalidate all cached meta-data.
428 * 2) re-read superblock from disk.
429 * 3) re-read summary information from disk.
430 * 4) invalidate all inactive vnodes.
431 * 5) invalidate all cached file data.
432 * 6) re-read inode data for all active vnodes.
433 */
434 int
435 ffs_reload(mountp, cred, p)
436 struct mount *mountp;
437 struct ucred *cred;
438 struct proc *p;
439 {
440 struct vnode *vp, *nvp, *devvp;
441 struct inode *ip;
442 void *space;
443 struct buf *bp;
444 struct fs *fs, *newfs;
445 struct partinfo dpart;
446 int i, blks, size, error;
447 int32_t *lp;
448 caddr_t cp;
449
450 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
451 return (EINVAL);
452 /*
453 * Step 1: invalidate all cached meta-data.
454 */
455 devvp = VFSTOUFS(mountp)->um_devvp;
456 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
457 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
458 VOP_UNLOCK(devvp, 0);
459 if (error)
460 panic("ffs_reload: dirty1");
461 /*
462 * Step 2: re-read superblock from disk.
463 */
464 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
465 size = DEV_BSIZE;
466 else
467 size = dpart.disklab->d_secsize;
468 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
469 if (error) {
470 brelse(bp);
471 return (error);
472 }
473 fs = VFSTOUFS(mountp)->um_fs;
474 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
475 memcpy(newfs, bp->b_data, fs->fs_sbsize);
476 #ifdef FFS_EI
477 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
478 ffs_sb_swap((struct fs*)bp->b_data, newfs);
479 fs->fs_flags |= FS_SWAPPED;
480 }
481 #endif
482 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
483 newfs->fs_bsize < sizeof(struct fs)) {
484 brelse(bp);
485 free(newfs, M_UFSMNT);
486 return (EIO); /* XXX needs translation */
487 }
488 /*
489 * Copy pointer fields back into superblock before copying in XXX
490 * new superblock. These should really be in the ufsmount. XXX
491 * Note that important parameters (eg fs_ncg) are unchanged.
492 */
493 newfs->fs_csp = fs->fs_csp;
494 newfs->fs_maxcluster = fs->fs_maxcluster;
495 newfs->fs_contigdirs = fs->fs_contigdirs;
496 newfs->fs_ronly = fs->fs_ronly;
497 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
498 if (fs->fs_sbsize < SBSIZE)
499 bp->b_flags |= B_INVAL;
500 brelse(bp);
501 free(newfs, M_UFSMNT);
502 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
503 ffs_oldfscompat(fs);
504 /* An old fsck may have zeroed these fields, so recheck them. */
505 if (fs->fs_avgfilesize <= 0)
506 fs->fs_avgfilesize = AVFILESIZ;
507 if (fs->fs_avgfpdir <= 0)
508 fs->fs_avgfpdir = AFPDIR;
509 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
510 fs->fs_pendingblocks = 0;
511 fs->fs_pendinginodes = 0;
512 }
513
514 ffs_statfs(mountp, &mountp->mnt_stat, p);
515 /*
516 * Step 3: re-read summary information from disk.
517 */
518 blks = howmany(fs->fs_cssize, fs->fs_fsize);
519 space = fs->fs_csp;
520 for (i = 0; i < blks; i += fs->fs_frag) {
521 size = fs->fs_bsize;
522 if (i + fs->fs_frag > blks)
523 size = (blks - i) * fs->fs_fsize;
524 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
525 NOCRED, &bp);
526 if (error) {
527 brelse(bp);
528 return (error);
529 }
530 #ifdef FFS_EI
531 if (UFS_FSNEEDSWAP(fs))
532 ffs_csum_swap((struct csum *)bp->b_data,
533 (struct csum *)space, size);
534 else
535 #endif
536 memcpy(space, bp->b_data, (size_t)size);
537 space = (char *)space + size;
538 brelse(bp);
539 }
540 if ((fs->fs_flags & FS_DOSOFTDEP))
541 softdep_mount(devvp, mountp, fs, cred);
542 /*
543 * We no longer know anything about clusters per cylinder group.
544 */
545 if (fs->fs_contigsumsize > 0) {
546 lp = fs->fs_maxcluster;
547 for (i = 0; i < fs->fs_ncg; i++)
548 *lp++ = fs->fs_contigsumsize;
549 }
550
551 loop:
552 simple_lock(&mntvnode_slock);
553 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
554 if (vp->v_mount != mountp) {
555 simple_unlock(&mntvnode_slock);
556 goto loop;
557 }
558 nvp = vp->v_mntvnodes.le_next;
559 /*
560 * Step 4: invalidate all inactive vnodes.
561 */
562 if (vrecycle(vp, &mntvnode_slock, p))
563 goto loop;
564 /*
565 * Step 5: invalidate all cached file data.
566 */
567 simple_lock(&vp->v_interlock);
568 simple_unlock(&mntvnode_slock);
569 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
570 goto loop;
571 if (vinvalbuf(vp, 0, cred, p, 0, 0))
572 panic("ffs_reload: dirty2");
573 /*
574 * Step 6: re-read inode data for all active vnodes.
575 */
576 ip = VTOI(vp);
577 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
578 (int)fs->fs_bsize, NOCRED, &bp);
579 if (error) {
580 brelse(bp);
581 vput(vp);
582 return (error);
583 }
584 cp = (caddr_t)bp->b_data +
585 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
586 #ifdef FFS_EI
587 if (UFS_FSNEEDSWAP(fs))
588 ffs_dinode_swap((struct dinode *)cp,
589 &ip->i_din.ffs_din);
590 else
591 #endif
592 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
593 ip->i_ffs_effnlink = ip->i_ffs_nlink;
594 brelse(bp);
595 vput(vp);
596 simple_lock(&mntvnode_slock);
597 }
598 simple_unlock(&mntvnode_slock);
599 return (0);
600 }
601
602 /*
603 * Common code for mount and mountroot
604 */
605 int
606 ffs_mountfs(devvp, mp, p)
607 struct vnode *devvp;
608 struct mount *mp;
609 struct proc *p;
610 {
611 struct ufsmount *ump;
612 struct buf *bp;
613 struct fs *fs;
614 dev_t dev;
615 struct partinfo dpart;
616 void *space;
617 int blks;
618 int error, i, size, ronly;
619 #ifdef FFS_EI
620 int needswap;
621 #endif
622 int32_t *lp;
623 struct ucred *cred;
624 u_int64_t maxfilesize; /* XXX */
625 u_int32_t sbsize;
626
627 dev = devvp->v_rdev;
628 cred = p ? p->p_ucred : NOCRED;
629 /*
630 * Disallow multiple mounts of the same device.
631 * Disallow mounting of a device that is currently in use
632 * (except for root, which might share swap device for miniroot).
633 * Flush out any old buffers remaining from a previous use.
634 */
635 if ((error = vfs_mountedon(devvp)) != 0)
636 return (error);
637 if (vcount(devvp) > 1 && devvp != rootvp)
638 return (EBUSY);
639 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
640 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
641 VOP_UNLOCK(devvp, 0);
642 if (error)
643 return (error);
644
645 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
646 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
647 if (error)
648 return (error);
649 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
650 size = DEV_BSIZE;
651 else
652 size = dpart.disklab->d_secsize;
653
654 bp = NULL;
655 ump = NULL;
656 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
657 if (error)
658 goto out;
659
660 fs = (struct fs*)bp->b_data;
661 if (fs->fs_magic == FS_MAGIC) {
662 sbsize = fs->fs_sbsize;
663 #ifdef FFS_EI
664 needswap = 0;
665 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
666 sbsize = bswap32(fs->fs_sbsize);
667 needswap = 1;
668 #endif
669 } else {
670 error = EINVAL;
671 goto out;
672 }
673 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
674 error = EINVAL;
675 goto out;
676 }
677
678 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
679 memcpy(fs, bp->b_data, sbsize);
680 #ifdef FFS_EI
681 if (needswap) {
682 ffs_sb_swap((struct fs*)bp->b_data, fs);
683 fs->fs_flags |= FS_SWAPPED;
684 }
685 #endif
686 ffs_oldfscompat(fs);
687
688 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
689 error = EINVAL;
690 goto out;
691 }
692 /* make sure cylinder group summary area is a reasonable size. */
693 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
694 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
695 fs->fs_cssize >
696 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
697 error = EINVAL; /* XXX needs translation */
698 goto out2;
699 }
700 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
701 fs->fs_pendingblocks = 0;
702 fs->fs_pendinginodes = 0;
703 }
704 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
705 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
706 error = EROFS; /* XXX what should be returned? */
707 goto out2;
708 }
709
710 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
711 memset((caddr_t)ump, 0, sizeof *ump);
712 ump->um_fs = fs;
713 if (fs->fs_sbsize < SBSIZE)
714 bp->b_flags |= B_INVAL;
715 brelse(bp);
716 bp = NULL;
717
718 /*
719 * verify that we can access the last block in the fs
720 * if we're mounting read/write.
721 */
722
723 if (!ronly) {
724 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
725 cred, &bp);
726 if (bp->b_bcount != fs->fs_fsize)
727 error = EINVAL;
728 bp->b_flags |= B_INVAL;
729 if (error)
730 goto out;
731 brelse(bp);
732 bp = NULL;
733 }
734
735 fs->fs_ronly = ronly;
736 if (ronly == 0) {
737 fs->fs_clean <<= 1;
738 fs->fs_fmod = 1;
739 }
740 size = fs->fs_cssize;
741 blks = howmany(size, fs->fs_fsize);
742 if (fs->fs_contigsumsize > 0)
743 size += fs->fs_ncg * sizeof(int32_t);
744 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
745 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
746 fs->fs_csp = space;
747 for (i = 0; i < blks; i += fs->fs_frag) {
748 size = fs->fs_bsize;
749 if (i + fs->fs_frag > blks)
750 size = (blks - i) * fs->fs_fsize;
751 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
752 cred, &bp);
753 if (error) {
754 free(fs->fs_csp, M_UFSMNT);
755 goto out2;
756 }
757 #ifdef FFS_EI
758 if (needswap)
759 ffs_csum_swap((struct csum *)bp->b_data,
760 (struct csum *)space, size);
761 else
762 #endif
763 memcpy(space, bp->b_data, (u_int)size);
764
765 space = (char *)space + size;
766 brelse(bp);
767 bp = NULL;
768 }
769 if (fs->fs_contigsumsize > 0) {
770 fs->fs_maxcluster = lp = space;
771 for (i = 0; i < fs->fs_ncg; i++)
772 *lp++ = fs->fs_contigsumsize;
773 space = lp;
774 }
775 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
776 fs->fs_contigdirs = space;
777 space = (char *)space + size;
778 memset(fs->fs_contigdirs, 0, size);
779 /* Compatibility for old filesystems - XXX */
780 if (fs->fs_avgfilesize <= 0)
781 fs->fs_avgfilesize = AVFILESIZ;
782 if (fs->fs_avgfpdir <= 0)
783 fs->fs_avgfpdir = AFPDIR;
784 mp->mnt_data = ump;
785 mp->mnt_stat.f_fsid.val[0] = (long)dev;
786 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
787 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
788 mp->mnt_fs_bshift = fs->fs_bshift;
789 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
790 mp->mnt_flag |= MNT_LOCAL;
791 #ifdef FFS_EI
792 if (needswap)
793 ump->um_flags |= UFS_NEEDSWAP;
794 #endif
795 ump->um_mountp = mp;
796 ump->um_dev = dev;
797 ump->um_devvp = devvp;
798 ump->um_nindir = fs->fs_nindir;
799 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
800 ump->um_bptrtodb = fs->fs_fsbtodb;
801 ump->um_seqinc = fs->fs_frag;
802 for (i = 0; i < MAXQUOTAS; i++)
803 ump->um_quotas[i] = NULLVP;
804 devvp->v_specmountpoint = mp;
805 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
806 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
807 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
808 fs->fs_maxfilesize = maxfilesize; /* XXX */
809 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
810 error = softdep_mount(devvp, mp, fs, cred);
811 if (error) {
812 free(fs->fs_csp, M_UFSMNT);
813 goto out;
814 }
815 }
816 return (0);
817 out2:
818 free(fs, M_UFSMNT);
819 out:
820 devvp->v_specmountpoint = NULL;
821 if (bp)
822 brelse(bp);
823 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
824 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
825 VOP_UNLOCK(devvp, 0);
826 if (ump) {
827 free(ump, M_UFSMNT);
828 mp->mnt_data = NULL;
829 }
830 return (error);
831 }
832
833 /*
834 * Sanity checks for old file systems.
835 *
836 * XXX - goes away some day.
837 */
838 int
839 ffs_oldfscompat(fs)
840 struct fs *fs;
841 {
842 int i;
843
844 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
845 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
846 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
847 fs->fs_nrpos = 8; /* XXX */
848 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
849 u_int64_t sizepb = fs->fs_bsize; /* XXX */
850 /* XXX */
851 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
852 for (i = 0; i < NIADDR; i++) { /* XXX */
853 sizepb *= NINDIR(fs); /* XXX */
854 fs->fs_maxfilesize += sizepb; /* XXX */
855 } /* XXX */
856 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
857 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
858 } /* XXX */
859 return (0);
860 }
861
862 /*
863 * unmount system call
864 */
865 int
866 ffs_unmount(mp, mntflags, p)
867 struct mount *mp;
868 int mntflags;
869 struct proc *p;
870 {
871 struct ufsmount *ump;
872 struct fs *fs;
873 int error, flags, penderr;
874
875 penderr = 0;
876 flags = 0;
877 if (mntflags & MNT_FORCE)
878 flags |= FORCECLOSE;
879 if (mp->mnt_flag & MNT_SOFTDEP) {
880 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
881 return (error);
882 } else {
883 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
884 return (error);
885 }
886 ump = VFSTOUFS(mp);
887 fs = ump->um_fs;
888 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
889 printf("%s: unmount pending error: blocks %d files %d\n",
890 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
891 fs->fs_pendingblocks = 0;
892 fs->fs_pendinginodes = 0;
893 penderr = 1;
894 }
895 if (fs->fs_ronly == 0 &&
896 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
897 fs->fs_clean & FS_WASCLEAN) {
898 /*
899 * XXXX don't mark fs clean in the case of softdep
900 * pending block errors, until they are fixed.
901 */
902 if (penderr == 0) {
903 if (mp->mnt_flag & MNT_SOFTDEP)
904 fs->fs_flags &= ~FS_DOSOFTDEP;
905 fs->fs_clean = FS_ISCLEAN;
906 }
907 (void) ffs_sbupdate(ump, MNT_WAIT);
908 }
909 if (ump->um_devvp->v_type != VBAD)
910 ump->um_devvp->v_specmountpoint = NULL;
911 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
912 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
913 NOCRED, p);
914 vput(ump->um_devvp);
915 free(fs->fs_csp, M_UFSMNT);
916 free(fs, M_UFSMNT);
917 free(ump, M_UFSMNT);
918 mp->mnt_data = NULL;
919 mp->mnt_flag &= ~MNT_LOCAL;
920 return (error);
921 }
922
923 /*
924 * Flush out all the files in a filesystem.
925 */
926 int
927 ffs_flushfiles(mp, flags, p)
928 struct mount *mp;
929 int flags;
930 struct proc *p;
931 {
932 extern int doforce;
933 struct ufsmount *ump;
934 int error;
935
936 if (!doforce)
937 flags &= ~FORCECLOSE;
938 ump = VFSTOUFS(mp);
939 #ifdef QUOTA
940 if (mp->mnt_flag & MNT_QUOTA) {
941 int i;
942 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
943 return (error);
944 for (i = 0; i < MAXQUOTAS; i++) {
945 if (ump->um_quotas[i] == NULLVP)
946 continue;
947 quotaoff(p, mp, i);
948 }
949 /*
950 * Here we fall through to vflush again to ensure
951 * that we have gotten rid of all the system vnodes.
952 */
953 }
954 #endif
955 /*
956 * Flush all the files.
957 */
958 error = vflush(mp, NULLVP, flags);
959 if (error)
960 return (error);
961 /*
962 * Flush filesystem metadata.
963 */
964 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
965 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
966 VOP_UNLOCK(ump->um_devvp, 0);
967 return (error);
968 }
969
970 /*
971 * Get file system statistics.
972 */
973 int
974 ffs_statfs(mp, sbp, p)
975 struct mount *mp;
976 struct statfs *sbp;
977 struct proc *p;
978 {
979 struct ufsmount *ump;
980 struct fs *fs;
981
982 ump = VFSTOUFS(mp);
983 fs = ump->um_fs;
984 if (fs->fs_magic != FS_MAGIC)
985 panic("ffs_statfs");
986 #ifdef COMPAT_09
987 sbp->f_type = 1;
988 #else
989 sbp->f_type = 0;
990 #endif
991 sbp->f_bsize = fs->fs_fsize;
992 sbp->f_iosize = fs->fs_bsize;
993 sbp->f_blocks = fs->fs_dsize;
994 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
995 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
996 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
997 (100 - fs->fs_minfree) / (u_int64_t) 100) -
998 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
999 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1000 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1001 if (sbp != &mp->mnt_stat) {
1002 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
1003 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
1004 }
1005 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
1006 return (0);
1007 }
1008
1009 /*
1010 * Go through the disk queues to initiate sandbagged IO;
1011 * go through the inodes to write those that have been modified;
1012 * initiate the writing of the super block if it has been modified.
1013 *
1014 * Note: we are always called with the filesystem marked `MPBUSY'.
1015 */
1016 int
1017 ffs_sync(mp, waitfor, cred, p)
1018 struct mount *mp;
1019 int waitfor;
1020 struct ucred *cred;
1021 struct proc *p;
1022 {
1023 struct vnode *vp, *nvp;
1024 struct inode *ip;
1025 struct ufsmount *ump = VFSTOUFS(mp);
1026 struct fs *fs;
1027 int error, allerror = 0;
1028
1029 fs = ump->um_fs;
1030 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1031 printf("fs = %s\n", fs->fs_fsmnt);
1032 panic("update: rofs mod");
1033 }
1034 /*
1035 * Write back each (modified) inode.
1036 */
1037 simple_lock(&mntvnode_slock);
1038 loop:
1039 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1040 /*
1041 * If the vnode that we are about to sync is no longer
1042 * associated with this mount point, start over.
1043 */
1044 if (vp->v_mount != mp)
1045 goto loop;
1046 simple_lock(&vp->v_interlock);
1047 nvp = LIST_NEXT(vp, v_mntvnodes);
1048 ip = VTOI(vp);
1049 if (vp->v_type == VNON ||
1050 ((ip->i_flag &
1051 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1052 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1053 vp->v_uobj.uo_npages == 0))
1054 {
1055 simple_unlock(&vp->v_interlock);
1056 continue;
1057 }
1058 simple_unlock(&mntvnode_slock);
1059 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1060 if (error) {
1061 simple_lock(&mntvnode_slock);
1062 if (error == ENOENT)
1063 goto loop;
1064 continue;
1065 }
1066 if ((error = VOP_FSYNC(vp, cred,
1067 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1068 allerror = error;
1069 vput(vp);
1070 simple_lock(&mntvnode_slock);
1071 }
1072 simple_unlock(&mntvnode_slock);
1073 /*
1074 * Force stale file system control information to be flushed.
1075 */
1076 if (waitfor != MNT_LAZY) {
1077 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
1078 waitfor = MNT_NOWAIT;
1079 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1080 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1081 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1082 allerror = error;
1083 VOP_UNLOCK(ump->um_devvp, 0);
1084 }
1085 #ifdef QUOTA
1086 qsync(mp);
1087 #endif
1088 /*
1089 * Write back modified superblock.
1090 */
1091 if (fs->fs_fmod != 0) {
1092 fs->fs_fmod = 0;
1093 fs->fs_time = time.tv_sec;
1094 if ((error = ffs_cgupdate(ump, waitfor)))
1095 allerror = error;
1096 }
1097 return (allerror);
1098 }
1099
1100 /*
1101 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1102 * in from disk. If it is in core, wait for the lock bit to clear, then
1103 * return the inode locked. Detection and handling of mount points must be
1104 * done by the calling routine.
1105 */
1106 int
1107 ffs_vget(mp, ino, vpp)
1108 struct mount *mp;
1109 ino_t ino;
1110 struct vnode **vpp;
1111 {
1112 struct fs *fs;
1113 struct inode *ip;
1114 struct ufsmount *ump;
1115 struct buf *bp;
1116 struct vnode *vp;
1117 dev_t dev;
1118 int error;
1119 caddr_t cp;
1120
1121 ump = VFSTOUFS(mp);
1122 dev = ump->um_dev;
1123
1124 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1125 return (0);
1126
1127 /* Allocate a new vnode/inode. */
1128 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1129 *vpp = NULL;
1130 return (error);
1131 }
1132
1133 /*
1134 * If someone beat us to it while sleeping in getnewvnode(),
1135 * push back the freshly allocated vnode we don't need, and return.
1136 */
1137
1138 do {
1139 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1140 ungetnewvnode(vp);
1141 return (0);
1142 }
1143 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1144
1145 /*
1146 * XXX MFS ends up here, too, to allocate an inode. Should we
1147 * XXX create another pool for MFS inodes?
1148 */
1149
1150 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1151 memset(ip, 0, sizeof(struct inode));
1152 vp->v_data = ip;
1153 ip->i_vnode = vp;
1154 ip->i_fs = fs = ump->um_fs;
1155 ip->i_dev = dev;
1156 ip->i_number = ino;
1157 LIST_INIT(&ip->i_pcbufhd);
1158 #ifdef QUOTA
1159 {
1160 int i;
1161
1162 for (i = 0; i < MAXQUOTAS; i++)
1163 ip->i_dquot[i] = NODQUOT;
1164 }
1165 #endif
1166
1167 /*
1168 * Put it onto its hash chain and lock it so that other requests for
1169 * this inode will block if they arrive while we are sleeping waiting
1170 * for old data structures to be purged or for the contents of the
1171 * disk portion of this inode to be read.
1172 */
1173
1174 ufs_ihashins(ip);
1175 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1176
1177 /* Read in the disk contents for the inode, copy into the inode. */
1178 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1179 (int)fs->fs_bsize, NOCRED, &bp);
1180 if (error) {
1181
1182 /*
1183 * The inode does not contain anything useful, so it would
1184 * be misleading to leave it on its hash chain. With mode
1185 * still zero, it will be unlinked and returned to the free
1186 * list by vput().
1187 */
1188
1189 vput(vp);
1190 brelse(bp);
1191 *vpp = NULL;
1192 return (error);
1193 }
1194 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1195 #ifdef FFS_EI
1196 if (UFS_FSNEEDSWAP(fs))
1197 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1198 else
1199 #endif
1200 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1201 if (DOINGSOFTDEP(vp))
1202 softdep_load_inodeblock(ip);
1203 else
1204 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1205 brelse(bp);
1206
1207 /*
1208 * Initialize the vnode from the inode, check for aliases.
1209 * Note that the underlying vnode may have changed.
1210 */
1211
1212 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1213
1214 /*
1215 * Finish inode initialization now that aliasing has been resolved.
1216 */
1217
1218 genfs_node_init(vp, &ffs_genfsops);
1219 ip->i_devvp = ump->um_devvp;
1220 VREF(ip->i_devvp);
1221
1222 /*
1223 * Ensure that uid and gid are correct. This is a temporary
1224 * fix until fsck has been changed to do the update.
1225 */
1226
1227 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1228 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1229 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1230 } /* XXX */
1231 uvm_vnp_setsize(vp, ip->i_ffs_size);
1232 *vpp = vp;
1233 return (0);
1234 }
1235
1236 /*
1237 * File handle to vnode
1238 *
1239 * Have to be really careful about stale file handles:
1240 * - check that the inode number is valid
1241 * - call ffs_vget() to get the locked inode
1242 * - check for an unallocated inode (i_mode == 0)
1243 * - check that the given client host has export rights and return
1244 * those rights via. exflagsp and credanonp
1245 */
1246 int
1247 ffs_fhtovp(mp, fhp, vpp)
1248 struct mount *mp;
1249 struct fid *fhp;
1250 struct vnode **vpp;
1251 {
1252 struct ufid *ufhp;
1253 struct fs *fs;
1254
1255 ufhp = (struct ufid *)fhp;
1256 fs = VFSTOUFS(mp)->um_fs;
1257 if (ufhp->ufid_ino < ROOTINO ||
1258 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1259 return (ESTALE);
1260 return (ufs_fhtovp(mp, ufhp, vpp));
1261 }
1262
1263 /*
1264 * Vnode pointer to File handle
1265 */
1266 /* ARGSUSED */
1267 int
1268 ffs_vptofh(vp, fhp)
1269 struct vnode *vp;
1270 struct fid *fhp;
1271 {
1272 struct inode *ip;
1273 struct ufid *ufhp;
1274
1275 ip = VTOI(vp);
1276 ufhp = (struct ufid *)fhp;
1277 ufhp->ufid_len = sizeof(struct ufid);
1278 ufhp->ufid_ino = ip->i_number;
1279 ufhp->ufid_gen = ip->i_ffs_gen;
1280 return (0);
1281 }
1282
1283 void
1284 ffs_init()
1285 {
1286 if (ffs_initcount++ > 0)
1287 return;
1288
1289 softdep_initialize();
1290 ufs_init();
1291
1292 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1293 &pool_allocator_nointr);
1294 }
1295
1296 void
1297 ffs_reinit()
1298 {
1299 softdep_reinitialize();
1300 ufs_reinit();
1301 }
1302
1303 void
1304 ffs_done()
1305 {
1306 if (--ffs_initcount > 0)
1307 return;
1308
1309 /* XXX softdep cleanup ? */
1310 ufs_done();
1311 pool_destroy(&ffs_inode_pool);
1312 }
1313
1314 int
1315 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1316 int *name;
1317 u_int namelen;
1318 void *oldp;
1319 size_t *oldlenp;
1320 void *newp;
1321 size_t newlen;
1322 struct proc *p;
1323 {
1324 extern int doasyncfree;
1325 extern int ffs_log_changeopt;
1326
1327 /* all sysctl names at this level are terminal */
1328 if (namelen != 1)
1329 return (ENOTDIR); /* overloaded */
1330
1331 switch (name[0]) {
1332 case FFS_ASYNCFREE:
1333 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1334 case FFS_LOG_CHANGEOPT:
1335 return (sysctl_int(oldp, oldlenp, newp, newlen,
1336 &ffs_log_changeopt));
1337 default:
1338 return (EOPNOTSUPP);
1339 }
1340 /* NOTREACHED */
1341 }
1342
1343 /*
1344 * Write a superblock and associated information back to disk.
1345 */
1346 int
1347 ffs_sbupdate(mp, waitfor)
1348 struct ufsmount *mp;
1349 int waitfor;
1350 {
1351 struct fs *fs = mp->um_fs;
1352 struct buf *bp;
1353 int i, error = 0;
1354 int32_t saved_nrpos = fs->fs_nrpos;
1355 int64_t saved_qbmask = fs->fs_qbmask;
1356 int64_t saved_qfmask = fs->fs_qfmask;
1357 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1358 u_int8_t saveflag;
1359
1360 /* Restore compatibility to old file systems. XXX */
1361 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1362 fs->fs_nrpos = -1; /* XXX */
1363 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1364 int32_t *lp, tmp; /* XXX */
1365 /* XXX */
1366 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1367 tmp = lp[4]; /* XXX */
1368 for (i = 4; i > 0; i--) /* XXX */
1369 lp[i] = lp[i-1]; /* XXX */
1370 lp[0] = tmp; /* XXX */
1371 } /* XXX */
1372 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1373
1374 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1375 (int)fs->fs_sbsize, 0, 0);
1376 saveflag = fs->fs_flags & FS_INTERNAL;
1377 fs->fs_flags &= ~FS_INTERNAL;
1378 memcpy(bp->b_data, fs, fs->fs_sbsize);
1379 #ifdef FFS_EI
1380 if (mp->um_flags & UFS_NEEDSWAP)
1381 ffs_sb_swap(fs, (struct fs*)bp->b_data);
1382 #endif
1383
1384 fs->fs_flags |= saveflag;
1385 fs->fs_nrpos = saved_nrpos; /* XXX */
1386 fs->fs_qbmask = saved_qbmask; /* XXX */
1387 fs->fs_qfmask = saved_qfmask; /* XXX */
1388 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1389
1390 if (waitfor == MNT_WAIT)
1391 error = bwrite(bp);
1392 else
1393 bawrite(bp);
1394 return (error);
1395 }
1396
1397 int
1398 ffs_cgupdate(mp, waitfor)
1399 struct ufsmount *mp;
1400 int waitfor;
1401 {
1402 struct fs *fs = mp->um_fs;
1403 struct buf *bp;
1404 int blks;
1405 void *space;
1406 int i, size, error = 0, allerror = 0;
1407
1408 allerror = ffs_sbupdate(mp, waitfor);
1409 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1410 space = fs->fs_csp;
1411 for (i = 0; i < blks; i += fs->fs_frag) {
1412 size = fs->fs_bsize;
1413 if (i + fs->fs_frag > blks)
1414 size = (blks - i) * fs->fs_fsize;
1415 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1416 size, 0, 0);
1417 #ifdef FFS_EI
1418 if (mp->um_flags & UFS_NEEDSWAP)
1419 ffs_csum_swap((struct csum*)space,
1420 (struct csum*)bp->b_data, size);
1421 else
1422 #endif
1423 memcpy(bp->b_data, space, (u_int)size);
1424 space = (char *)space + size;
1425 if (waitfor == MNT_WAIT)
1426 error = bwrite(bp);
1427 else
1428 bawrite(bp);
1429 }
1430 if (!allerror && error)
1431 allerror = error;
1432 return (allerror);
1433 }
1434