ffs_vfsops.c revision 1.95 1 /* $NetBSD: ffs_vfsops.c,v 1.95 2002/03/31 20:53:25 christos Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.95 2002/03/31 20:53:25 christos Exp $");
40
41 #if defined(_KERNEL_OPT)
42 #include "opt_ffs.h"
43 #include "opt_quota.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_softdep.h"
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/kernel.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
55 #include <sys/mount.h>
56 #include <sys/buf.h>
57 #include <sys/device.h>
58 #include <sys/mbuf.h>
59 #include <sys/file.h>
60 #include <sys/disklabel.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/malloc.h>
64 #include <sys/pool.h>
65 #include <sys/lock.h>
66 #include <sys/sysctl.h>
67
68 #include <miscfs/specfs/specdev.h>
69
70 #include <ufs/ufs/quota.h>
71 #include <ufs/ufs/ufsmount.h>
72 #include <ufs/ufs/inode.h>
73 #include <ufs/ufs/dir.h>
74 #include <ufs/ufs/ufs_extern.h>
75 #include <ufs/ufs/ufs_bswap.h>
76
77 #include <ufs/ffs/fs.h>
78 #include <ufs/ffs/ffs_extern.h>
79
80 /* how many times ffs_init() was called */
81 int ffs_initcount = 0;
82
83 extern struct lock ufs_hashlock;
84
85 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
86 extern struct vnodeopv_desc ffs_specop_opv_desc;
87 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
88
89 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
90 &ffs_vnodeop_opv_desc,
91 &ffs_specop_opv_desc,
92 &ffs_fifoop_opv_desc,
93 NULL,
94 };
95
96 struct vfsops ffs_vfsops = {
97 MOUNT_FFS,
98 ffs_mount,
99 ufs_start,
100 ffs_unmount,
101 ufs_root,
102 ufs_quotactl,
103 ffs_statfs,
104 ffs_sync,
105 ffs_vget,
106 ffs_fhtovp,
107 ffs_vptofh,
108 ffs_init,
109 ffs_reinit,
110 ffs_done,
111 ffs_sysctl,
112 ffs_mountroot,
113 ufs_check_export,
114 ffs_vnodeopv_descs,
115 };
116
117 struct genfs_ops ffs_genfsops = {
118 ffs_gop_size,
119 ffs_gop_alloc,
120 genfs_gop_write,
121 };
122
123 struct pool ffs_inode_pool;
124
125 /*
126 * Called by main() when ffs is going to be mounted as root.
127 */
128
129 int
130 ffs_mountroot()
131 {
132 struct fs *fs;
133 struct mount *mp;
134 struct proc *p = curproc; /* XXX */
135 struct ufsmount *ump;
136 int error;
137
138 if (root_device->dv_class != DV_DISK)
139 return (ENODEV);
140
141 /*
142 * Get vnodes for rootdev.
143 */
144 if (bdevvp(rootdev, &rootvp))
145 panic("ffs_mountroot: can't setup bdevvp's");
146
147 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
148 vrele(rootvp);
149 return (error);
150 }
151 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
152 mp->mnt_op->vfs_refcount--;
153 vfs_unbusy(mp);
154 free(mp, M_MOUNT);
155 vrele(rootvp);
156 return (error);
157 }
158 simple_lock(&mountlist_slock);
159 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
160 simple_unlock(&mountlist_slock);
161 ump = VFSTOUFS(mp);
162 fs = ump->um_fs;
163 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
164 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
165 (void)ffs_statfs(mp, &mp->mnt_stat, p);
166 vfs_unbusy(mp);
167 inittodr(fs->fs_time);
168 return (0);
169 }
170
171 /*
172 * VFS Operations.
173 *
174 * mount system call
175 */
176 int
177 ffs_mount(mp, path, data, ndp, p)
178 struct mount *mp;
179 const char *path;
180 void *data;
181 struct nameidata *ndp;
182 struct proc *p;
183 {
184 struct vnode *devvp;
185 struct ufs_args args;
186 struct ufsmount *ump = NULL;
187 struct fs *fs;
188 size_t size;
189 int error, flags, update;
190 mode_t accessmode;
191
192 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
193 if (error)
194 return (error);
195
196 #if !defined(SOFTDEP)
197 mp->mnt_flag &= ~MNT_SOFTDEP;
198 #endif
199
200 update = mp->mnt_flag & MNT_UPDATE;
201
202 /* Check arguments */
203 if (update) {
204 /* Use the extant mount */
205 ump = VFSTOUFS(mp);
206 devvp = ump->um_devvp;
207 } else {
208 /* New mounts must have a filename for the device */
209 if (args.fspec == NULL)
210 return EINVAL;
211
212 /* Check for update-only flags */
213 if (mp->mnt_flag &
214 (MNT_WANTRDWR | /*
215 * Upgrading from read-only to
216 * read-write can only occur after
217 * the initial mount
218 */
219 MNT_EXRDONLY |
220 MNT_DEFEXPORTED |
221 MNT_EXPORTANON |
222 MNT_EXKERB | /* Only update mounts are allowed */
223 MNT_EXNORESPORT | /* to affect exporting */
224 MNT_EXPUBLIC |
225 MNT_DELEXPORT))
226 return EINVAL;
227 }
228
229 error = 0;
230
231 if (args.fspec) {
232 /*
233 * Look up the name and verify that it's sane.
234 */
235 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
236 if ((error = namei(ndp)) != 0)
237 return (error);
238 devvp = ndp->ni_vp;
239
240 if (!update) {
241 /*
242 * Be sure this is a valid block device
243 */
244 if (devvp->v_type != VBLK)
245 error = ENOTBLK;
246 else if (major(devvp->v_rdev) >= nblkdev)
247 error = ENXIO;
248 } else {
249 /*
250 * Be sure we're still naming the same device
251 * used for our initial mount
252 */
253 if (devvp != ump->um_devvp)
254 error = EINVAL;
255 else
256 /*
257 * The initial mount got a reference on this
258 * device, so drop the one obtained via
259 * namei(), above
260 */
261 vrele(devvp);
262 }
263 }
264
265 /*
266 * If mount by non-root, then verify that user has necessary
267 * permissions on the device.
268 */
269 if (error == 0 && p->p_ucred->cr_uid != 0) {
270 accessmode = VREAD;
271 if ((!update && (mp->mnt_flag & MNT_RDONLY) == 0) ||
272 (update && (mp->mnt_flag & MNT_WANTRDWR)))
273 accessmode |= VWRITE;
274 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
275 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
276 VOP_UNLOCK(devvp, 0);
277 }
278
279 if (error) {
280 vrele(devvp);
281 return (error);
282 }
283
284 if (!update) {
285 error = ffs_mountfs(devvp, mp, p);
286 if (error) {
287 vrele(devvp);
288 return (error);
289 }
290
291 ump = VFSTOUFS(mp);
292 fs = ump->um_fs;
293 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
294 (MNT_SOFTDEP | MNT_ASYNC)) {
295 printf("%s fs uses soft updates, "
296 "ignoring async mode\n",
297 fs->fs_fsmnt);
298 mp->mnt_flag &= ~MNT_ASYNC;
299 }
300 } else {
301 /*
302 * Update the mount
303 */
304 fs = ump->um_fs;
305 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
306 /*
307 * Changing from r/w to r/o
308 */
309 flags = WRITECLOSE;
310 if (mp->mnt_flag & MNT_FORCE)
311 flags |= FORCECLOSE;
312 if (mp->mnt_flag & MNT_SOFTDEP)
313 error = softdep_flushfiles(mp, flags, p);
314 else
315 error = ffs_flushfiles(mp, flags, p);
316 if (fs->fs_pendingblocks != 0 ||
317 fs->fs_pendinginodes != 0) {
318 printf("%s: update error: blocks %d files %d\n",
319 fs->fs_fsmnt, fs->fs_pendingblocks,
320 fs->fs_pendinginodes);
321 fs->fs_pendingblocks = 0;
322 fs->fs_pendinginodes = 0;
323 }
324 if (error == 0 &&
325 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
326 fs->fs_clean & FS_WASCLEAN) {
327 if (mp->mnt_flag & MNT_SOFTDEP)
328 fs->fs_flags &= ~FS_DOSOFTDEP;
329 fs->fs_clean = FS_ISCLEAN;
330 (void) ffs_sbupdate(ump, MNT_WAIT);
331 }
332 if (error)
333 return (error);
334 fs->fs_ronly = 1;
335 fs->fs_fmod = 0;
336 }
337
338 /*
339 * Flush soft dependencies if disabling it via an update
340 * mount. This may leave some items to be processed,
341 * so don't do this yet XXX.
342 */
343 if ((fs->fs_flags & FS_DOSOFTDEP) &&
344 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
345 #ifdef notyet
346 flags = WRITECLOSE;
347 if (mp->mnt_flag & MNT_FORCE)
348 flags |= FORCECLOSE;
349 error = softdep_flushfiles(mp, flags, p);
350 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
351 fs->fs_flags &= ~FS_DOSOFTDEP;
352 (void) ffs_sbupdate(ump, MNT_WAIT);
353 #elif defined(SOFTDEP)
354 mp->mnt_flag |= MNT_SOFTDEP;
355 #endif
356 }
357
358 /*
359 * When upgrading to a softdep mount, we must first flush
360 * all vnodes. (not done yet -- see above)
361 */
362 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
363 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
364 #ifdef notyet
365 flags = WRITECLOSE;
366 if (mp->mnt_flag & MNT_FORCE)
367 flags |= FORCECLOSE;
368 error = ffs_flushfiles(mp, flags, p);
369 #else
370 mp->mnt_flag &= ~MNT_SOFTDEP;
371 #endif
372 }
373
374 if (mp->mnt_flag & MNT_RELOAD) {
375 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
376 if (error)
377 return (error);
378 }
379
380 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
381 /*
382 * Changing from read-only to read/write
383 */
384 fs->fs_ronly = 0;
385 fs->fs_clean <<= 1;
386 fs->fs_fmod = 1;
387 if ((fs->fs_flags & FS_DOSOFTDEP)) {
388 error = softdep_mount(devvp, mp, fs,
389 p->p_ucred);
390 if (error)
391 return (error);
392 }
393 }
394 if (args.fspec == 0) {
395 /*
396 * Process export requests.
397 */
398 return (vfs_export(mp, &ump->um_export, &args.export));
399 }
400 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
401 (MNT_SOFTDEP | MNT_ASYNC)) {
402 printf("%s fs uses soft updates, ignoring async mode\n",
403 fs->fs_fsmnt);
404 mp->mnt_flag &= ~MNT_ASYNC;
405 }
406 }
407
408 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
409 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
410 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
411 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
412 &size);
413 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
414 if (mp->mnt_flag & MNT_SOFTDEP)
415 fs->fs_flags |= FS_DOSOFTDEP;
416 else
417 fs->fs_flags &= ~FS_DOSOFTDEP;
418 if (fs->fs_fmod != 0) { /* XXX */
419 fs->fs_fmod = 0;
420 if (fs->fs_clean & FS_WASCLEAN)
421 fs->fs_time = time.tv_sec;
422 else {
423 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
424 mp->mnt_stat.f_mntfromname, fs->fs_clean);
425 printf("%s: lost blocks %d files %d\n",
426 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
427 fs->fs_pendinginodes);
428 }
429 (void) ffs_cgupdate(ump, MNT_WAIT);
430 }
431 return (0);
432 }
433
434 /*
435 * Reload all incore data for a filesystem (used after running fsck on
436 * the root filesystem and finding things to fix). The filesystem must
437 * be mounted read-only.
438 *
439 * Things to do to update the mount:
440 * 1) invalidate all cached meta-data.
441 * 2) re-read superblock from disk.
442 * 3) re-read summary information from disk.
443 * 4) invalidate all inactive vnodes.
444 * 5) invalidate all cached file data.
445 * 6) re-read inode data for all active vnodes.
446 */
447 int
448 ffs_reload(mountp, cred, p)
449 struct mount *mountp;
450 struct ucred *cred;
451 struct proc *p;
452 {
453 struct vnode *vp, *nvp, *devvp;
454 struct inode *ip;
455 void *space;
456 struct buf *bp;
457 struct fs *fs, *newfs;
458 struct partinfo dpart;
459 int i, blks, size, error;
460 int32_t *lp;
461 caddr_t cp;
462
463 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
464 return (EINVAL);
465 /*
466 * Step 1: invalidate all cached meta-data.
467 */
468 devvp = VFSTOUFS(mountp)->um_devvp;
469 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
470 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
471 VOP_UNLOCK(devvp, 0);
472 if (error)
473 panic("ffs_reload: dirty1");
474 /*
475 * Step 2: re-read superblock from disk.
476 */
477 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
478 size = DEV_BSIZE;
479 else
480 size = dpart.disklab->d_secsize;
481 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
482 if (error) {
483 brelse(bp);
484 return (error);
485 }
486 fs = VFSTOUFS(mountp)->um_fs;
487 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
488 memcpy(newfs, bp->b_data, fs->fs_sbsize);
489 #ifdef FFS_EI
490 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
491 ffs_sb_swap((struct fs*)bp->b_data, newfs);
492 fs->fs_flags |= FS_SWAPPED;
493 }
494 #endif
495 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
496 newfs->fs_bsize < sizeof(struct fs)) {
497 brelse(bp);
498 free(newfs, M_UFSMNT);
499 return (EIO); /* XXX needs translation */
500 }
501 /*
502 * Copy pointer fields back into superblock before copying in XXX
503 * new superblock. These should really be in the ufsmount. XXX
504 * Note that important parameters (eg fs_ncg) are unchanged.
505 */
506 newfs->fs_csp = fs->fs_csp;
507 newfs->fs_maxcluster = fs->fs_maxcluster;
508 newfs->fs_contigdirs = fs->fs_contigdirs;
509 newfs->fs_ronly = fs->fs_ronly;
510 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
511 if (fs->fs_sbsize < SBSIZE)
512 bp->b_flags |= B_INVAL;
513 brelse(bp);
514 free(newfs, M_UFSMNT);
515 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
516 ffs_oldfscompat(fs);
517 /* An old fsck may have zeroed these fields, so recheck them. */
518 if (fs->fs_avgfilesize <= 0)
519 fs->fs_avgfilesize = AVFILESIZ;
520 if (fs->fs_avgfpdir <= 0)
521 fs->fs_avgfpdir = AFPDIR;
522 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
523 fs->fs_pendingblocks = 0;
524 fs->fs_pendinginodes = 0;
525 }
526
527 ffs_statfs(mountp, &mountp->mnt_stat, p);
528 /*
529 * Step 3: re-read summary information from disk.
530 */
531 blks = howmany(fs->fs_cssize, fs->fs_fsize);
532 space = fs->fs_csp;
533 for (i = 0; i < blks; i += fs->fs_frag) {
534 size = fs->fs_bsize;
535 if (i + fs->fs_frag > blks)
536 size = (blks - i) * fs->fs_fsize;
537 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
538 NOCRED, &bp);
539 if (error) {
540 brelse(bp);
541 return (error);
542 }
543 #ifdef FFS_EI
544 if (UFS_FSNEEDSWAP(fs))
545 ffs_csum_swap((struct csum *)bp->b_data,
546 (struct csum *)space, size);
547 else
548 #endif
549 memcpy(space, bp->b_data, (size_t)size);
550 space = (char *)space + size;
551 brelse(bp);
552 }
553 if ((fs->fs_flags & FS_DOSOFTDEP))
554 softdep_mount(devvp, mountp, fs, cred);
555 /*
556 * We no longer know anything about clusters per cylinder group.
557 */
558 if (fs->fs_contigsumsize > 0) {
559 lp = fs->fs_maxcluster;
560 for (i = 0; i < fs->fs_ncg; i++)
561 *lp++ = fs->fs_contigsumsize;
562 }
563
564 loop:
565 simple_lock(&mntvnode_slock);
566 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
567 if (vp->v_mount != mountp) {
568 simple_unlock(&mntvnode_slock);
569 goto loop;
570 }
571 nvp = vp->v_mntvnodes.le_next;
572 /*
573 * Step 4: invalidate all inactive vnodes.
574 */
575 if (vrecycle(vp, &mntvnode_slock, p))
576 goto loop;
577 /*
578 * Step 5: invalidate all cached file data.
579 */
580 simple_lock(&vp->v_interlock);
581 simple_unlock(&mntvnode_slock);
582 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
583 goto loop;
584 if (vinvalbuf(vp, 0, cred, p, 0, 0))
585 panic("ffs_reload: dirty2");
586 /*
587 * Step 6: re-read inode data for all active vnodes.
588 */
589 ip = VTOI(vp);
590 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
591 (int)fs->fs_bsize, NOCRED, &bp);
592 if (error) {
593 brelse(bp);
594 vput(vp);
595 return (error);
596 }
597 cp = (caddr_t)bp->b_data +
598 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
599 #ifdef FFS_EI
600 if (UFS_FSNEEDSWAP(fs))
601 ffs_dinode_swap((struct dinode *)cp,
602 &ip->i_din.ffs_din);
603 else
604 #endif
605 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
606 ip->i_ffs_effnlink = ip->i_ffs_nlink;
607 brelse(bp);
608 vput(vp);
609 simple_lock(&mntvnode_slock);
610 }
611 simple_unlock(&mntvnode_slock);
612 return (0);
613 }
614
615 /*
616 * Common code for mount and mountroot
617 */
618 int
619 ffs_mountfs(devvp, mp, p)
620 struct vnode *devvp;
621 struct mount *mp;
622 struct proc *p;
623 {
624 struct ufsmount *ump;
625 struct buf *bp;
626 struct fs *fs;
627 dev_t dev;
628 struct partinfo dpart;
629 void *space;
630 int blks;
631 int error, i, size, ronly;
632 #ifdef FFS_EI
633 int needswap;
634 #endif
635 int32_t *lp;
636 struct ucred *cred;
637 u_int64_t maxfilesize; /* XXX */
638 u_int32_t sbsize;
639
640 dev = devvp->v_rdev;
641 cred = p ? p->p_ucred : NOCRED;
642 /*
643 * Disallow multiple mounts of the same device.
644 * Disallow mounting of a device that is currently in use
645 * (except for root, which might share swap device for miniroot).
646 * Flush out any old buffers remaining from a previous use.
647 */
648 if ((error = vfs_mountedon(devvp)) != 0)
649 return (error);
650 if (vcount(devvp) > 1 && devvp != rootvp)
651 return (EBUSY);
652 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
653 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
654 VOP_UNLOCK(devvp, 0);
655 if (error)
656 return (error);
657
658 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
659 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
660 if (error)
661 return (error);
662 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
663 size = DEV_BSIZE;
664 else
665 size = dpart.disklab->d_secsize;
666
667 bp = NULL;
668 ump = NULL;
669 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
670 if (error)
671 goto out;
672
673 fs = (struct fs*)bp->b_data;
674 if (fs->fs_magic == FS_MAGIC) {
675 sbsize = fs->fs_sbsize;
676 #ifdef FFS_EI
677 needswap = 0;
678 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
679 sbsize = bswap32(fs->fs_sbsize);
680 needswap = 1;
681 #endif
682 } else {
683 error = EINVAL;
684 goto out;
685 }
686 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
687 error = EINVAL;
688 goto out;
689 }
690
691 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
692 memcpy(fs, bp->b_data, sbsize);
693 #ifdef FFS_EI
694 if (needswap) {
695 ffs_sb_swap((struct fs*)bp->b_data, fs);
696 fs->fs_flags |= FS_SWAPPED;
697 }
698 #endif
699 ffs_oldfscompat(fs);
700
701 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
702 error = EINVAL;
703 goto out;
704 }
705 /* make sure cylinder group summary area is a reasonable size. */
706 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
707 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
708 fs->fs_cssize >
709 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
710 error = EINVAL; /* XXX needs translation */
711 goto out2;
712 }
713 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
714 fs->fs_pendingblocks = 0;
715 fs->fs_pendinginodes = 0;
716 }
717 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
718 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
719 error = EROFS; /* XXX what should be returned? */
720 goto out2;
721 }
722
723 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
724 memset((caddr_t)ump, 0, sizeof *ump);
725 ump->um_fs = fs;
726 if (fs->fs_sbsize < SBSIZE)
727 bp->b_flags |= B_INVAL;
728 brelse(bp);
729 bp = NULL;
730
731 /*
732 * verify that we can access the last block in the fs.
733 */
734
735 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize, cred,
736 &bp);
737 if (bp->b_bcount != fs->fs_fsize)
738 error = EINVAL;
739 bp->b_flags |= B_INVAL;
740 if (error)
741 goto out;
742 brelse(bp);
743 bp = NULL;
744
745 fs->fs_ronly = ronly;
746 if (ronly == 0) {
747 fs->fs_clean <<= 1;
748 fs->fs_fmod = 1;
749 }
750 size = fs->fs_cssize;
751 blks = howmany(size, fs->fs_fsize);
752 if (fs->fs_contigsumsize > 0)
753 size += fs->fs_ncg * sizeof(int32_t);
754 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
755 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
756 fs->fs_csp = space;
757 for (i = 0; i < blks; i += fs->fs_frag) {
758 size = fs->fs_bsize;
759 if (i + fs->fs_frag > blks)
760 size = (blks - i) * fs->fs_fsize;
761 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
762 cred, &bp);
763 if (error) {
764 free(fs->fs_csp, M_UFSMNT);
765 goto out2;
766 }
767 #ifdef FFS_EI
768 if (needswap)
769 ffs_csum_swap((struct csum *)bp->b_data,
770 (struct csum *)space, size);
771 else
772 #endif
773 memcpy(space, bp->b_data, (u_int)size);
774
775 space = (char *)space + size;
776 brelse(bp);
777 bp = NULL;
778 }
779 if (fs->fs_contigsumsize > 0) {
780 fs->fs_maxcluster = lp = space;
781 for (i = 0; i < fs->fs_ncg; i++)
782 *lp++ = fs->fs_contigsumsize;
783 space = lp;
784 }
785 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
786 fs->fs_contigdirs = space;
787 space = (char *)space + size;
788 memset(fs->fs_contigdirs, 0, size);
789 /* Compatibility for old filesystems - XXX */
790 if (fs->fs_avgfilesize <= 0)
791 fs->fs_avgfilesize = AVFILESIZ;
792 if (fs->fs_avgfpdir <= 0)
793 fs->fs_avgfpdir = AFPDIR;
794 mp->mnt_data = (qaddr_t)ump;
795 mp->mnt_stat.f_fsid.val[0] = (long)dev;
796 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
797 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
798 mp->mnt_fs_bshift = fs->fs_bshift;
799 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
800 mp->mnt_flag |= MNT_LOCAL;
801 #ifdef FFS_EI
802 if (needswap)
803 ump->um_flags |= UFS_NEEDSWAP;
804 #endif
805 ump->um_mountp = mp;
806 ump->um_dev = dev;
807 ump->um_devvp = devvp;
808 ump->um_nindir = fs->fs_nindir;
809 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
810 ump->um_bptrtodb = fs->fs_fsbtodb;
811 ump->um_seqinc = fs->fs_frag;
812 for (i = 0; i < MAXQUOTAS; i++)
813 ump->um_quotas[i] = NULLVP;
814 devvp->v_specmountpoint = mp;
815 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
816 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
817 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
818 fs->fs_maxfilesize = maxfilesize; /* XXX */
819 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
820 error = softdep_mount(devvp, mp, fs, cred);
821 if (error) {
822 free(fs->fs_csp, M_UFSMNT);
823 goto out;
824 }
825 }
826 return (0);
827 out2:
828 free(fs, M_UFSMNT);
829 out:
830 devvp->v_specmountpoint = NULL;
831 if (bp)
832 brelse(bp);
833 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
834 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
835 VOP_UNLOCK(devvp, 0);
836 if (ump) {
837 free(ump, M_UFSMNT);
838 mp->mnt_data = (qaddr_t)0;
839 }
840 return (error);
841 }
842
843 /*
844 * Sanity checks for old file systems.
845 *
846 * XXX - goes away some day.
847 */
848 int
849 ffs_oldfscompat(fs)
850 struct fs *fs;
851 {
852 int i;
853
854 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
855 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
856 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
857 fs->fs_nrpos = 8; /* XXX */
858 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
859 u_int64_t sizepb = fs->fs_bsize; /* XXX */
860 /* XXX */
861 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
862 for (i = 0; i < NIADDR; i++) { /* XXX */
863 sizepb *= NINDIR(fs); /* XXX */
864 fs->fs_maxfilesize += sizepb; /* XXX */
865 } /* XXX */
866 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
867 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
868 } /* XXX */
869 return (0);
870 }
871
872 /*
873 * unmount system call
874 */
875 int
876 ffs_unmount(mp, mntflags, p)
877 struct mount *mp;
878 int mntflags;
879 struct proc *p;
880 {
881 struct ufsmount *ump;
882 struct fs *fs;
883 int error, flags, penderr;
884
885 penderr = 0;
886 flags = 0;
887 if (mntflags & MNT_FORCE)
888 flags |= FORCECLOSE;
889 if (mp->mnt_flag & MNT_SOFTDEP) {
890 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
891 return (error);
892 } else {
893 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
894 return (error);
895 }
896 ump = VFSTOUFS(mp);
897 fs = ump->um_fs;
898 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
899 printf("%s: unmount pending error: blocks %d files %d\n",
900 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
901 fs->fs_pendingblocks = 0;
902 fs->fs_pendinginodes = 0;
903 penderr = 1;
904 }
905 if (fs->fs_ronly == 0 &&
906 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
907 fs->fs_clean & FS_WASCLEAN) {
908 /*
909 * XXXX don't mark fs clean in the case of softdep
910 * pending block errors, until they are fixed.
911 */
912 if (penderr == 0) {
913 if (mp->mnt_flag & MNT_SOFTDEP)
914 fs->fs_flags &= ~FS_DOSOFTDEP;
915 fs->fs_clean = FS_ISCLEAN;
916 }
917 (void) ffs_sbupdate(ump, MNT_WAIT);
918 }
919 if (ump->um_devvp->v_type != VBAD)
920 ump->um_devvp->v_specmountpoint = NULL;
921 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
922 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
923 NOCRED, p);
924 vput(ump->um_devvp);
925 free(fs->fs_csp, M_UFSMNT);
926 free(fs, M_UFSMNT);
927 free(ump, M_UFSMNT);
928 mp->mnt_data = (qaddr_t)0;
929 mp->mnt_flag &= ~MNT_LOCAL;
930 return (error);
931 }
932
933 /*
934 * Flush out all the files in a filesystem.
935 */
936 int
937 ffs_flushfiles(mp, flags, p)
938 struct mount *mp;
939 int flags;
940 struct proc *p;
941 {
942 extern int doforce;
943 struct ufsmount *ump;
944 int error;
945
946 if (!doforce)
947 flags &= ~FORCECLOSE;
948 ump = VFSTOUFS(mp);
949 #ifdef QUOTA
950 if (mp->mnt_flag & MNT_QUOTA) {
951 int i;
952 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
953 return (error);
954 for (i = 0; i < MAXQUOTAS; i++) {
955 if (ump->um_quotas[i] == NULLVP)
956 continue;
957 quotaoff(p, mp, i);
958 }
959 /*
960 * Here we fall through to vflush again to ensure
961 * that we have gotten rid of all the system vnodes.
962 */
963 }
964 #endif
965 /*
966 * Flush all the files.
967 */
968 error = vflush(mp, NULLVP, flags);
969 if (error)
970 return (error);
971 /*
972 * Flush filesystem metadata.
973 */
974 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
975 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
976 VOP_UNLOCK(ump->um_devvp, 0);
977 return (error);
978 }
979
980 /*
981 * Get file system statistics.
982 */
983 int
984 ffs_statfs(mp, sbp, p)
985 struct mount *mp;
986 struct statfs *sbp;
987 struct proc *p;
988 {
989 struct ufsmount *ump;
990 struct fs *fs;
991
992 ump = VFSTOUFS(mp);
993 fs = ump->um_fs;
994 if (fs->fs_magic != FS_MAGIC)
995 panic("ffs_statfs");
996 #ifdef COMPAT_09
997 sbp->f_type = 1;
998 #else
999 sbp->f_type = 0;
1000 #endif
1001 sbp->f_bsize = fs->fs_fsize;
1002 sbp->f_iosize = fs->fs_bsize;
1003 sbp->f_blocks = fs->fs_dsize;
1004 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
1005 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1006 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
1007 (100 - fs->fs_minfree) / (u_int64_t) 100) -
1008 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
1009 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1010 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1011 if (sbp != &mp->mnt_stat) {
1012 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
1013 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
1014 }
1015 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
1016 return (0);
1017 }
1018
1019 /*
1020 * Go through the disk queues to initiate sandbagged IO;
1021 * go through the inodes to write those that have been modified;
1022 * initiate the writing of the super block if it has been modified.
1023 *
1024 * Note: we are always called with the filesystem marked `MPBUSY'.
1025 */
1026 int
1027 ffs_sync(mp, waitfor, cred, p)
1028 struct mount *mp;
1029 int waitfor;
1030 struct ucred *cred;
1031 struct proc *p;
1032 {
1033 struct vnode *vp, *nvp;
1034 struct inode *ip;
1035 struct ufsmount *ump = VFSTOUFS(mp);
1036 struct fs *fs;
1037 int error, allerror = 0;
1038
1039 fs = ump->um_fs;
1040 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1041 printf("fs = %s\n", fs->fs_fsmnt);
1042 panic("update: rofs mod");
1043 }
1044 /*
1045 * Write back each (modified) inode.
1046 */
1047 simple_lock(&mntvnode_slock);
1048 loop:
1049 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1050 /*
1051 * If the vnode that we are about to sync is no longer
1052 * associated with this mount point, start over.
1053 */
1054 if (vp->v_mount != mp)
1055 goto loop;
1056 simple_lock(&vp->v_interlock);
1057 nvp = LIST_NEXT(vp, v_mntvnodes);
1058 ip = VTOI(vp);
1059 if (vp->v_type == VNON ||
1060 ((ip->i_flag &
1061 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1062 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1063 vp->v_uobj.uo_npages == 0))
1064 {
1065 simple_unlock(&vp->v_interlock);
1066 continue;
1067 }
1068 simple_unlock(&mntvnode_slock);
1069 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1070 if (error) {
1071 simple_lock(&mntvnode_slock);
1072 if (error == ENOENT)
1073 goto loop;
1074 continue;
1075 }
1076 if ((error = VOP_FSYNC(vp, cred,
1077 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1078 allerror = error;
1079 vput(vp);
1080 simple_lock(&mntvnode_slock);
1081 }
1082 simple_unlock(&mntvnode_slock);
1083 /*
1084 * Force stale file system control information to be flushed.
1085 */
1086 if (waitfor != MNT_LAZY) {
1087 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
1088 waitfor = MNT_NOWAIT;
1089 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1090 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1091 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1092 allerror = error;
1093 VOP_UNLOCK(ump->um_devvp, 0);
1094 }
1095 #ifdef QUOTA
1096 qsync(mp);
1097 #endif
1098 /*
1099 * Write back modified superblock.
1100 */
1101 if (fs->fs_fmod != 0) {
1102 fs->fs_fmod = 0;
1103 fs->fs_time = time.tv_sec;
1104 if ((error = ffs_cgupdate(ump, waitfor)))
1105 allerror = error;
1106 }
1107 return (allerror);
1108 }
1109
1110 /*
1111 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1112 * in from disk. If it is in core, wait for the lock bit to clear, then
1113 * return the inode locked. Detection and handling of mount points must be
1114 * done by the calling routine.
1115 */
1116 int
1117 ffs_vget(mp, ino, vpp)
1118 struct mount *mp;
1119 ino_t ino;
1120 struct vnode **vpp;
1121 {
1122 struct fs *fs;
1123 struct inode *ip;
1124 struct ufsmount *ump;
1125 struct buf *bp;
1126 struct vnode *vp;
1127 dev_t dev;
1128 int error;
1129 caddr_t cp;
1130
1131 ump = VFSTOUFS(mp);
1132 dev = ump->um_dev;
1133
1134 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1135 return (0);
1136
1137 /* Allocate a new vnode/inode. */
1138 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1139 *vpp = NULL;
1140 return (error);
1141 }
1142
1143 /*
1144 * If someone beat us to it while sleeping in getnewvnode(),
1145 * push back the freshly allocated vnode we don't need, and return.
1146 */
1147
1148 do {
1149 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1150 ungetnewvnode(vp);
1151 return (0);
1152 }
1153 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1154
1155 /*
1156 * XXX MFS ends up here, too, to allocate an inode. Should we
1157 * XXX create another pool for MFS inodes?
1158 */
1159
1160 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1161 memset(ip, 0, sizeof(struct inode));
1162 vp->v_data = ip;
1163 ip->i_vnode = vp;
1164 ip->i_fs = fs = ump->um_fs;
1165 ip->i_dev = dev;
1166 ip->i_number = ino;
1167 LIST_INIT(&ip->i_pcbufhd);
1168 #ifdef QUOTA
1169 {
1170 int i;
1171
1172 for (i = 0; i < MAXQUOTAS; i++)
1173 ip->i_dquot[i] = NODQUOT;
1174 }
1175 #endif
1176
1177 /*
1178 * Put it onto its hash chain and lock it so that other requests for
1179 * this inode will block if they arrive while we are sleeping waiting
1180 * for old data structures to be purged or for the contents of the
1181 * disk portion of this inode to be read.
1182 */
1183
1184 ufs_ihashins(ip);
1185 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1186
1187 /* Read in the disk contents for the inode, copy into the inode. */
1188 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1189 (int)fs->fs_bsize, NOCRED, &bp);
1190 if (error) {
1191
1192 /*
1193 * The inode does not contain anything useful, so it would
1194 * be misleading to leave it on its hash chain. With mode
1195 * still zero, it will be unlinked and returned to the free
1196 * list by vput().
1197 */
1198
1199 vput(vp);
1200 brelse(bp);
1201 *vpp = NULL;
1202 return (error);
1203 }
1204 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1205 #ifdef FFS_EI
1206 if (UFS_FSNEEDSWAP(fs))
1207 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1208 else
1209 #endif
1210 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1211 if (DOINGSOFTDEP(vp))
1212 softdep_load_inodeblock(ip);
1213 else
1214 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1215 brelse(bp);
1216
1217 /*
1218 * Initialize the vnode from the inode, check for aliases.
1219 * Note that the underlying vnode may have changed.
1220 */
1221
1222 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1223
1224 /*
1225 * Finish inode initialization now that aliasing has been resolved.
1226 */
1227
1228 genfs_node_init(vp, &ffs_genfsops);
1229 ip->i_devvp = ump->um_devvp;
1230 VREF(ip->i_devvp);
1231
1232 /*
1233 * Ensure that uid and gid are correct. This is a temporary
1234 * fix until fsck has been changed to do the update.
1235 */
1236
1237 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1238 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1239 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1240 } /* XXX */
1241 uvm_vnp_setsize(vp, ip->i_ffs_size);
1242 *vpp = vp;
1243 return (0);
1244 }
1245
1246 /*
1247 * File handle to vnode
1248 *
1249 * Have to be really careful about stale file handles:
1250 * - check that the inode number is valid
1251 * - call ffs_vget() to get the locked inode
1252 * - check for an unallocated inode (i_mode == 0)
1253 * - check that the given client host has export rights and return
1254 * those rights via. exflagsp and credanonp
1255 */
1256 int
1257 ffs_fhtovp(mp, fhp, vpp)
1258 struct mount *mp;
1259 struct fid *fhp;
1260 struct vnode **vpp;
1261 {
1262 struct ufid *ufhp;
1263 struct fs *fs;
1264
1265 ufhp = (struct ufid *)fhp;
1266 fs = VFSTOUFS(mp)->um_fs;
1267 if (ufhp->ufid_ino < ROOTINO ||
1268 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1269 return (ESTALE);
1270 return (ufs_fhtovp(mp, ufhp, vpp));
1271 }
1272
1273 /*
1274 * Vnode pointer to File handle
1275 */
1276 /* ARGSUSED */
1277 int
1278 ffs_vptofh(vp, fhp)
1279 struct vnode *vp;
1280 struct fid *fhp;
1281 {
1282 struct inode *ip;
1283 struct ufid *ufhp;
1284
1285 ip = VTOI(vp);
1286 ufhp = (struct ufid *)fhp;
1287 ufhp->ufid_len = sizeof(struct ufid);
1288 ufhp->ufid_ino = ip->i_number;
1289 ufhp->ufid_gen = ip->i_ffs_gen;
1290 return (0);
1291 }
1292
1293 void
1294 ffs_init()
1295 {
1296 if (ffs_initcount++ > 0)
1297 return;
1298
1299 softdep_initialize();
1300 ufs_init();
1301
1302 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1303 &pool_allocator_nointr);
1304 }
1305
1306 void
1307 ffs_reinit()
1308 {
1309 softdep_reinitialize();
1310 ufs_reinit();
1311 }
1312
1313 void
1314 ffs_done()
1315 {
1316 if (--ffs_initcount > 0)
1317 return;
1318
1319 /* XXX softdep cleanup ? */
1320 ufs_done();
1321 pool_destroy(&ffs_inode_pool);
1322 }
1323
1324 int
1325 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1326 int *name;
1327 u_int namelen;
1328 void *oldp;
1329 size_t *oldlenp;
1330 void *newp;
1331 size_t newlen;
1332 struct proc *p;
1333 {
1334 extern int doasyncfree;
1335 extern int ffs_log_changeopt;
1336
1337 /* all sysctl names at this level are terminal */
1338 if (namelen != 1)
1339 return (ENOTDIR); /* overloaded */
1340
1341 switch (name[0]) {
1342 case FFS_ASYNCFREE:
1343 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1344 case FFS_LOG_CHANGEOPT:
1345 return (sysctl_int(oldp, oldlenp, newp, newlen,
1346 &ffs_log_changeopt));
1347 default:
1348 return (EOPNOTSUPP);
1349 }
1350 /* NOTREACHED */
1351 }
1352
1353 /*
1354 * Write a superblock and associated information back to disk.
1355 */
1356 int
1357 ffs_sbupdate(mp, waitfor)
1358 struct ufsmount *mp;
1359 int waitfor;
1360 {
1361 struct fs *fs = mp->um_fs;
1362 struct buf *bp;
1363 int i, error = 0;
1364 int32_t saved_nrpos = fs->fs_nrpos;
1365 int64_t saved_qbmask = fs->fs_qbmask;
1366 int64_t saved_qfmask = fs->fs_qfmask;
1367 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1368 u_int8_t saveflag;
1369
1370 /* Restore compatibility to old file systems. XXX */
1371 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1372 fs->fs_nrpos = -1; /* XXX */
1373 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1374 int32_t *lp, tmp; /* XXX */
1375 /* XXX */
1376 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1377 tmp = lp[4]; /* XXX */
1378 for (i = 4; i > 0; i--) /* XXX */
1379 lp[i] = lp[i-1]; /* XXX */
1380 lp[0] = tmp; /* XXX */
1381 } /* XXX */
1382 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1383
1384 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1385 (int)fs->fs_sbsize, 0, 0);
1386 saveflag = fs->fs_flags & FS_INTERNAL;
1387 fs->fs_flags &= ~FS_INTERNAL;
1388 memcpy(bp->b_data, fs, fs->fs_sbsize);
1389 #ifdef FFS_EI
1390 if (mp->um_flags & UFS_NEEDSWAP)
1391 ffs_sb_swap(fs, (struct fs*)bp->b_data);
1392 #endif
1393
1394 fs->fs_flags |= saveflag;
1395 fs->fs_nrpos = saved_nrpos; /* XXX */
1396 fs->fs_qbmask = saved_qbmask; /* XXX */
1397 fs->fs_qfmask = saved_qfmask; /* XXX */
1398 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1399
1400 if (waitfor == MNT_WAIT)
1401 error = bwrite(bp);
1402 else
1403 bawrite(bp);
1404 return (error);
1405 }
1406
1407 int
1408 ffs_cgupdate(mp, waitfor)
1409 struct ufsmount *mp;
1410 int waitfor;
1411 {
1412 struct fs *fs = mp->um_fs;
1413 struct buf *bp;
1414 int blks;
1415 void *space;
1416 int i, size, error = 0, allerror = 0;
1417
1418 allerror = ffs_sbupdate(mp, waitfor);
1419 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1420 space = fs->fs_csp;
1421 for (i = 0; i < blks; i += fs->fs_frag) {
1422 size = fs->fs_bsize;
1423 if (i + fs->fs_frag > blks)
1424 size = (blks - i) * fs->fs_fsize;
1425 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1426 size, 0, 0);
1427 #ifdef FFS_EI
1428 if (mp->um_flags & UFS_NEEDSWAP)
1429 ffs_csum_swap((struct csum*)space,
1430 (struct csum*)bp->b_data, size);
1431 else
1432 #endif
1433 memcpy(bp->b_data, space, (u_int)size);
1434 space = (char *)space + size;
1435 if (waitfor == MNT_WAIT)
1436 error = bwrite(bp);
1437 else
1438 bawrite(bp);
1439 }
1440 if (!allerror && error)
1441 allerror = error;
1442 return (allerror);
1443 }
1444