ffs_vfsops.c revision 1.196.6.22 1 /* $NetBSD: ffs_vfsops.c,v 1.196.6.22 2007/11/11 14:48:06 hannken Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.196.6.22 2007/11/11 14:48:06 hannken Exp $");
36
37 #if defined(_KERNEL_OPT)
38 #include "opt_ffs.h"
39 #include "opt_quota.h"
40 #include "opt_softdep.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/kernel.h>
48 #include <sys/vnode.h>
49 #include <sys/socket.h>
50 #include <sys/mount.h>
51 #include <sys/buf.h>
52 #include <sys/device.h>
53 #include <sys/mbuf.h>
54 #include <sys/file.h>
55 #include <sys/disklabel.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/pool.h>
60 #include <sys/lock.h>
61 #include <sys/sysctl.h>
62 #include <sys/conf.h>
63 #include <sys/kauth.h>
64 #include <sys/fstrans.h>
65
66 #include <miscfs/specfs/specdev.h>
67
68 #include <ufs/ufs/quota.h>
69 #include <ufs/ufs/ufsmount.h>
70 #include <ufs/ufs/inode.h>
71 #include <ufs/ufs/dir.h>
72 #include <ufs/ufs/ufs_extern.h>
73 #include <ufs/ufs/ufs_bswap.h>
74
75 #include <ufs/ffs/fs.h>
76 #include <ufs/ffs/ffs_extern.h>
77
78 /* how many times ffs_init() was called */
79 int ffs_initcount = 0;
80
81 extern kmutex_t ufs_hashlock;
82
83 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
84 extern const struct vnodeopv_desc ffs_specop_opv_desc;
85 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
86
87 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
88 &ffs_vnodeop_opv_desc,
89 &ffs_specop_opv_desc,
90 &ffs_fifoop_opv_desc,
91 NULL,
92 };
93
94 struct vfsops ffs_vfsops = {
95 MOUNT_FFS,
96 sizeof (struct ufs_args),
97 ffs_mount,
98 ufs_start,
99 ffs_unmount,
100 ufs_root,
101 ufs_quotactl,
102 ffs_statvfs,
103 ffs_sync,
104 ffs_vget,
105 ffs_fhtovp,
106 ffs_vptofh,
107 ffs_init,
108 ffs_reinit,
109 ffs_done,
110 ffs_mountroot,
111 ffs_snapshot,
112 ffs_extattrctl,
113 ffs_suspendctl,
114 ffs_vnodeopv_descs,
115 0,
116 { NULL, NULL },
117 };
118 VFS_ATTACH(ffs_vfsops);
119
120 static const struct genfs_ops ffs_genfsops = {
121 .gop_size = ffs_gop_size,
122 .gop_alloc = ufs_gop_alloc,
123 .gop_write = genfs_gop_write,
124 .gop_markupdate = ufs_gop_markupdate,
125 };
126
127 static const struct ufs_ops ffs_ufsops = {
128 .uo_itimes = ffs_itimes,
129 .uo_update = ffs_update,
130 .uo_truncate = ffs_truncate,
131 .uo_valloc = ffs_valloc,
132 .uo_vfree = ffs_vfree,
133 .uo_balloc = ffs_balloc,
134 };
135
136 struct pool ffs_inode_pool;
137 struct pool ffs_dinode1_pool;
138 struct pool ffs_dinode2_pool;
139
140 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
141 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
142
143 /*
144 * Called by main() when ffs is going to be mounted as root.
145 */
146
147 int
148 ffs_mountroot(void)
149 {
150 struct fs *fs;
151 struct mount *mp;
152 struct lwp *l = curlwp; /* XXX */
153 struct ufsmount *ump;
154 int error;
155
156 if (device_class(root_device) != DV_DISK)
157 return (ENODEV);
158
159 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
160 vrele(rootvp);
161 return (error);
162 }
163 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
164 mp->mnt_op->vfs_refcount--;
165 vfs_unbusy(mp);
166 vfs_destroy(mp);
167 return (error);
168 }
169 mutex_enter(&mountlist_lock);
170 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
171 mutex_exit(&mountlist_lock);
172 ump = VFSTOUFS(mp);
173 fs = ump->um_fs;
174 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
175 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
176 (void)ffs_statvfs(mp, &mp->mnt_stat, l);
177 vfs_unbusy(mp);
178 setrootfstime((time_t)fs->fs_time);
179 return (0);
180 }
181
182 /*
183 * VFS Operations.
184 *
185 * mount system call
186 */
187 int
188 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len,
189 struct lwp *l)
190 {
191 struct nameidata nd;
192 struct vnode *devvp = NULL;
193 struct ufs_args *args = data;
194 struct ufsmount *ump = NULL;
195 struct fs *fs;
196 int error = 0, flags, update;
197 mode_t accessmode;
198
199 if (*data_len < sizeof *args)
200 return EINVAL;
201
202 if (mp->mnt_flag & MNT_GETARGS) {
203 ump = VFSTOUFS(mp);
204 if (ump == NULL)
205 return EIO;
206 args->fspec = NULL;
207 *data_len = sizeof *args;
208 return 0;
209 }
210
211 #if !defined(SOFTDEP)
212 mp->mnt_flag &= ~MNT_SOFTDEP;
213 #endif
214
215 update = mp->mnt_flag & MNT_UPDATE;
216
217 /* Check arguments */
218 if (args->fspec != NULL) {
219 /*
220 * Look up the name and verify that it's sane.
221 */
222 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec, l);
223 if ((error = namei(&nd)) != 0)
224 return (error);
225 devvp = nd.ni_vp;
226
227 if (!update) {
228 /*
229 * Be sure this is a valid block device
230 */
231 if (devvp->v_type != VBLK)
232 error = ENOTBLK;
233 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
234 error = ENXIO;
235 } else {
236 /*
237 * Be sure we're still naming the same device
238 * used for our initial mount
239 */
240 ump = VFSTOUFS(mp);
241 if (devvp != ump->um_devvp) {
242 if (devvp->v_rdev != ump->um_devvp->v_rdev)
243 error = EINVAL;
244 else {
245 vrele(devvp);
246 devvp = ump->um_devvp;
247 vref(devvp);
248 }
249 }
250 }
251 } else {
252 if (!update) {
253 /* New mounts must have a filename for the device */
254 return (EINVAL);
255 } else {
256 /* Use the extant mount */
257 ump = VFSTOUFS(mp);
258 devvp = ump->um_devvp;
259 vref(devvp);
260 }
261 }
262
263 /*
264 * If mount by non-root, then verify that user has necessary
265 * permissions on the device.
266 */
267 if (error == 0 && kauth_authorize_generic(l->l_cred,
268 KAUTH_GENERIC_ISSUSER, NULL) != 0) {
269 accessmode = VREAD;
270 if (update ?
271 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
272 (mp->mnt_flag & MNT_RDONLY) == 0)
273 accessmode |= VWRITE;
274 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
275 error = VOP_ACCESS(devvp, accessmode, l->l_cred, l);
276 VOP_UNLOCK(devvp, 0);
277 }
278
279 if (error) {
280 vrele(devvp);
281 return (error);
282 }
283
284 if (!update) {
285 int xflags;
286
287 /*
288 * Disallow multiple mounts of the same device.
289 * Disallow mounting of a device that is currently in use
290 * (except for root, which might share swap device for
291 * miniroot).
292 */
293 error = vfs_mountedon(devvp);
294 if (error)
295 goto fail;
296 if (vcount(devvp) > 1 && devvp != rootvp) {
297 error = EBUSY;
298 goto fail;
299 }
300 if (mp->mnt_flag & MNT_RDONLY)
301 xflags = FREAD;
302 else
303 xflags = FREAD|FWRITE;
304 error = VOP_OPEN(devvp, xflags, FSCRED, l);
305 if (error)
306 goto fail;
307 error = ffs_mountfs(devvp, mp, l);
308 if (error) {
309 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
310 (void)VOP_CLOSE(devvp, xflags, NOCRED, l);
311 VOP_UNLOCK(devvp, 0);
312 goto fail;
313 }
314
315 ump = VFSTOUFS(mp);
316 fs = ump->um_fs;
317 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
318 (MNT_SOFTDEP | MNT_ASYNC)) {
319 printf("%s fs uses soft updates, "
320 "ignoring async mode\n",
321 fs->fs_fsmnt);
322 mp->mnt_flag &= ~MNT_ASYNC;
323 }
324 } else {
325 /*
326 * Update the mount.
327 */
328
329 /*
330 * The initial mount got a reference on this
331 * device, so drop the one obtained via
332 * namei(), above.
333 */
334 vrele(devvp);
335
336 ump = VFSTOUFS(mp);
337 fs = ump->um_fs;
338 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
339 /*
340 * Changing from r/w to r/o
341 */
342 flags = WRITECLOSE;
343 if (mp->mnt_flag & MNT_FORCE)
344 flags |= FORCECLOSE;
345 if (mp->mnt_flag & MNT_SOFTDEP)
346 error = softdep_flushfiles(mp, flags, l);
347 else
348 error = ffs_flushfiles(mp, flags, l);
349 if (fs->fs_pendingblocks != 0 ||
350 fs->fs_pendinginodes != 0) {
351 printf("%s: update error: blocks %" PRId64
352 " files %d\n",
353 fs->fs_fsmnt, fs->fs_pendingblocks,
354 fs->fs_pendinginodes);
355 fs->fs_pendingblocks = 0;
356 fs->fs_pendinginodes = 0;
357 }
358 if (error == 0 &&
359 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
360 fs->fs_clean & FS_WASCLEAN) {
361 if (mp->mnt_flag & MNT_SOFTDEP)
362 fs->fs_flags &= ~FS_DOSOFTDEP;
363 fs->fs_clean = FS_ISCLEAN;
364 (void) ffs_sbupdate(ump, MNT_WAIT);
365 }
366 if (error)
367 return (error);
368 fs->fs_ronly = 1;
369 fs->fs_fmod = 0;
370 }
371
372 /*
373 * Flush soft dependencies if disabling it via an update
374 * mount. This may leave some items to be processed,
375 * so don't do this yet XXX.
376 */
377 if ((fs->fs_flags & FS_DOSOFTDEP) &&
378 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
379 #ifdef notyet
380 flags = WRITECLOSE;
381 if (mp->mnt_flag & MNT_FORCE)
382 flags |= FORCECLOSE;
383 error = softdep_flushfiles(mp, flags, l);
384 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
385 fs->fs_flags &= ~FS_DOSOFTDEP;
386 (void) ffs_sbupdate(ump, MNT_WAIT);
387 #elif defined(SOFTDEP)
388 mp->mnt_flag |= MNT_SOFTDEP;
389 #endif
390 }
391
392 /*
393 * When upgrading to a softdep mount, we must first flush
394 * all vnodes. (not done yet -- see above)
395 */
396 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
397 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
398 #ifdef notyet
399 flags = WRITECLOSE;
400 if (mp->mnt_flag & MNT_FORCE)
401 flags |= FORCECLOSE;
402 error = ffs_flushfiles(mp, flags, l);
403 #else
404 mp->mnt_flag &= ~MNT_SOFTDEP;
405 #endif
406 }
407
408 if (mp->mnt_flag & MNT_RELOAD) {
409 error = ffs_reload(mp, l->l_cred, l);
410 if (error)
411 return (error);
412 }
413
414 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
415 /*
416 * Changing from read-only to read/write
417 */
418 fs->fs_ronly = 0;
419 fs->fs_clean <<= 1;
420 fs->fs_fmod = 1;
421 if ((fs->fs_flags & FS_DOSOFTDEP)) {
422 error = softdep_mount(devvp, mp, fs,
423 l->l_cred);
424 if (error)
425 return (error);
426 }
427 if (fs->fs_snapinum[0] != 0)
428 ffs_snapshot_mount(mp);
429 }
430 if (args->fspec == NULL)
431 return EINVAL;
432 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
433 (MNT_SOFTDEP | MNT_ASYNC)) {
434 printf("%s fs uses soft updates, ignoring async mode\n",
435 fs->fs_fsmnt);
436 mp->mnt_flag &= ~MNT_ASYNC;
437 }
438 }
439
440 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
441 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
442 if (error == 0)
443 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
444 sizeof(fs->fs_fsmnt));
445 if (mp->mnt_flag & MNT_SOFTDEP)
446 fs->fs_flags |= FS_DOSOFTDEP;
447 else
448 fs->fs_flags &= ~FS_DOSOFTDEP;
449 if (fs->fs_fmod != 0) { /* XXX */
450 fs->fs_fmod = 0;
451 if (fs->fs_clean & FS_WASCLEAN)
452 fs->fs_time = time_second;
453 else {
454 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
455 mp->mnt_stat.f_mntfromname, fs->fs_clean);
456 printf("%s: lost blocks %" PRId64 " files %d\n",
457 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
458 fs->fs_pendinginodes);
459 }
460 (void) ffs_cgupdate(ump, MNT_WAIT);
461 }
462 return (error);
463
464 fail:
465 vrele(devvp);
466 return (error);
467 }
468
469 /*
470 * Reload all incore data for a filesystem (used after running fsck on
471 * the root filesystem and finding things to fix). The filesystem must
472 * be mounted read-only.
473 *
474 * Things to do to update the mount:
475 * 1) invalidate all cached meta-data.
476 * 2) re-read superblock from disk.
477 * 3) re-read summary information from disk.
478 * 4) invalidate all inactive vnodes.
479 * 5) invalidate all cached file data.
480 * 6) re-read inode data for all active vnodes.
481 */
482 int
483 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
484 {
485 struct vnode *vp, *mvp, *devvp;
486 struct inode *ip;
487 void *space;
488 struct buf *bp;
489 struct fs *fs, *newfs;
490 struct partinfo dpart;
491 int i, blks, size, error;
492 int32_t *lp;
493 struct ufsmount *ump;
494 daddr_t sblockloc;
495
496 if ((mp->mnt_flag & MNT_RDONLY) == 0)
497 return (EINVAL);
498
499 ump = VFSTOUFS(mp);
500 /*
501 * Step 1: invalidate all cached meta-data.
502 */
503 devvp = ump->um_devvp;
504 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
505 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
506 VOP_UNLOCK(devvp, 0);
507 if (error)
508 panic("ffs_reload: dirty1");
509 /*
510 * Step 2: re-read superblock from disk.
511 */
512 fs = ump->um_fs;
513 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED, l) != 0)
514 size = DEV_BSIZE;
515 else
516 size = dpart.disklab->d_secsize;
517 /* XXX we don't handle possibility that superblock moved. */
518 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
519 NOCRED, &bp);
520 if (error) {
521 brelse(bp, 0);
522 return (error);
523 }
524 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
525 memcpy(newfs, bp->b_data, fs->fs_sbsize);
526 #ifdef FFS_EI
527 if (ump->um_flags & UFS_NEEDSWAP) {
528 ffs_sb_swap((struct fs*)bp->b_data, newfs);
529 fs->fs_flags |= FS_SWAPPED;
530 } else
531 #endif
532 fs->fs_flags &= ~FS_SWAPPED;
533 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
534 newfs->fs_magic != FS_UFS2_MAGIC)||
535 newfs->fs_bsize > MAXBSIZE ||
536 newfs->fs_bsize < sizeof(struct fs)) {
537 brelse(bp, 0);
538 free(newfs, M_UFSMNT);
539 return (EIO); /* XXX needs translation */
540 }
541 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
542 sblockloc = fs->fs_sblockloc;
543 /*
544 * Copy pointer fields back into superblock before copying in XXX
545 * new superblock. These should really be in the ufsmount. XXX
546 * Note that important parameters (eg fs_ncg) are unchanged.
547 */
548 newfs->fs_csp = fs->fs_csp;
549 newfs->fs_maxcluster = fs->fs_maxcluster;
550 newfs->fs_contigdirs = fs->fs_contigdirs;
551 newfs->fs_ronly = fs->fs_ronly;
552 newfs->fs_active = fs->fs_active;
553 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
554 brelse(bp, 0);
555 free(newfs, M_UFSMNT);
556
557 /* Recheck for apple UFS filesystem */
558 ump->um_flags &= ~UFS_ISAPPLEUFS;
559 /* First check to see if this is tagged as an Apple UFS filesystem
560 * in the disklabel
561 */
562 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, l) == 0) &&
563 (dpart.part->p_fstype == FS_APPLEUFS)) {
564 ump->um_flags |= UFS_ISAPPLEUFS;
565 }
566 #ifdef APPLE_UFS
567 else {
568 /* Manually look for an apple ufs label, and if a valid one
569 * is found, then treat it like an Apple UFS filesystem anyway
570 */
571 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
572 APPLEUFS_LABEL_SIZE, cred, &bp);
573 if (error) {
574 brelse(bp, 0);
575 return (error);
576 }
577 error = ffs_appleufs_validate(fs->fs_fsmnt,
578 (struct appleufslabel *)bp->b_data,NULL);
579 if (error == 0)
580 ump->um_flags |= UFS_ISAPPLEUFS;
581 brelse(bp, 0);
582 bp = NULL;
583 }
584 #else
585 if (ump->um_flags & UFS_ISAPPLEUFS)
586 return (EIO);
587 #endif
588
589 if (UFS_MPISAPPLEUFS(ump)) {
590 /* see comment about NeXT below */
591 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
592 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
593 mp->mnt_iflag |= IMNT_DTYPE;
594 } else {
595 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
596 ump->um_dirblksiz = DIRBLKSIZ;
597 if (ump->um_maxsymlinklen > 0)
598 mp->mnt_iflag |= IMNT_DTYPE;
599 else
600 mp->mnt_iflag &= ~IMNT_DTYPE;
601 }
602 ffs_oldfscompat_read(fs, ump, sblockloc);
603 mutex_enter(&ump->um_lock);
604 ump->um_maxfilesize = fs->fs_maxfilesize;
605 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
606 fs->fs_pendingblocks = 0;
607 fs->fs_pendinginodes = 0;
608 }
609 mutex_exit(&ump->um_lock);
610
611 ffs_statvfs(mp, &mp->mnt_stat, l);
612 /*
613 * Step 3: re-read summary information from disk.
614 */
615 blks = howmany(fs->fs_cssize, fs->fs_fsize);
616 space = fs->fs_csp;
617 for (i = 0; i < blks; i += fs->fs_frag) {
618 size = fs->fs_bsize;
619 if (i + fs->fs_frag > blks)
620 size = (blks - i) * fs->fs_fsize;
621 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
622 NOCRED, &bp);
623 if (error) {
624 brelse(bp, 0);
625 return (error);
626 }
627 #ifdef FFS_EI
628 if (UFS_FSNEEDSWAP(fs))
629 ffs_csum_swap((struct csum *)bp->b_data,
630 (struct csum *)space, size);
631 else
632 #endif
633 memcpy(space, bp->b_data, (size_t)size);
634 space = (char *)space + size;
635 brelse(bp, 0);
636 }
637 if ((fs->fs_flags & FS_DOSOFTDEP))
638 softdep_mount(devvp, mp, fs, cred);
639 if (fs->fs_snapinum[0] != 0)
640 ffs_snapshot_mount(mp);
641 /*
642 * We no longer know anything about clusters per cylinder group.
643 */
644 if (fs->fs_contigsumsize > 0) {
645 lp = fs->fs_maxcluster;
646 for (i = 0; i < fs->fs_ncg; i++)
647 *lp++ = fs->fs_contigsumsize;
648 }
649
650 /* Allocate a marker vnode. */
651 if ((mvp = valloc(mp)) == NULL)
652 return ENOMEM;
653 /*
654 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
655 * and vclean() can be called indirectly
656 */
657 mutex_enter(&mntvnode_lock);
658 loop:
659 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
660 vmark(mvp, vp);
661 if (vp->v_mount != mp || vismarker(vp))
662 continue;
663 /*
664 * Step 4: invalidate all inactive vnodes.
665 */
666 if (vrecycle(vp, &mntvnode_lock, l)) {
667 mutex_enter(&mntvnode_lock);
668 (void)vunmark(mvp);
669 goto loop;
670 }
671 /*
672 * Step 5: invalidate all cached file data.
673 */
674 mutex_enter(&vp->v_interlock);
675 mutex_exit(&mntvnode_lock);
676 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
677 (void)vunmark(mvp);
678 goto loop;
679 }
680 if (vinvalbuf(vp, 0, cred, l, 0, 0))
681 panic("ffs_reload: dirty2");
682 /*
683 * Step 6: re-read inode data for all active vnodes.
684 */
685 ip = VTOI(vp);
686 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
687 (int)fs->fs_bsize, NOCRED, &bp);
688 if (error) {
689 brelse(bp, 0);
690 vput(vp);
691 (void)vunmark(mvp);
692 break;
693 }
694 ffs_load_inode(bp, ip, fs, ip->i_number);
695 ip->i_ffs_effnlink = ip->i_nlink;
696 brelse(bp, 0);
697 vput(vp);
698 mutex_enter(&mntvnode_lock);
699 }
700 mutex_exit(&mntvnode_lock);
701 vfree(mvp);
702 return (error);
703 }
704
705 /*
706 * Possible superblock locations ordered from most to least likely.
707 */
708 static const int sblock_try[] = SBLOCKSEARCH;
709
710 /*
711 * Common code for mount and mountroot
712 */
713 int
714 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
715 {
716 struct ufsmount *ump;
717 struct buf *bp;
718 struct fs *fs;
719 dev_t dev;
720 struct partinfo dpart;
721 void *space;
722 daddr_t sblockloc, fsblockloc;
723 int blks, fstype;
724 int error, i, size, ronly, bset = 0;
725 #ifdef FFS_EI
726 int needswap = 0; /* keep gcc happy */
727 #endif
728 int32_t *lp;
729 kauth_cred_t cred;
730 u_int32_t sbsize = 8192; /* keep gcc happy*/
731
732 dev = devvp->v_rdev;
733 cred = l ? l->l_cred : NOCRED;
734
735 /* Flush out any old buffers remaining from a previous use. */
736 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
737 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
738 VOP_UNLOCK(devvp, 0);
739 if (error)
740 return (error);
741
742 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
743 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, l) != 0)
744 size = DEV_BSIZE;
745 else
746 size = dpart.disklab->d_secsize;
747
748 bp = NULL;
749 ump = NULL;
750 fs = NULL;
751 sblockloc = 0;
752 fstype = 0;
753
754 error = fstrans_mount(mp);
755 if (error)
756 return error;
757
758 /*
759 * Try reading the superblock in each of its possible locations.
760 */
761 for (i = 0; ; i++) {
762 if (bp != NULL) {
763 brelse(bp, BC_NOCACHE);
764 bp = NULL;
765 }
766 if (sblock_try[i] == -1) {
767 error = EINVAL;
768 fs = NULL;
769 goto out;
770 }
771 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
772 &bp);
773 if (error) {
774 fs = NULL;
775 goto out;
776 }
777 fs = (struct fs*)bp->b_data;
778 fsblockloc = sblockloc = sblock_try[i];
779 if (fs->fs_magic == FS_UFS1_MAGIC) {
780 sbsize = fs->fs_sbsize;
781 fstype = UFS1;
782 #ifdef FFS_EI
783 needswap = 0;
784 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
785 sbsize = bswap32(fs->fs_sbsize);
786 fstype = UFS1;
787 needswap = 1;
788 #endif
789 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
790 sbsize = fs->fs_sbsize;
791 fstype = UFS2;
792 #ifdef FFS_EI
793 needswap = 0;
794 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
795 sbsize = bswap32(fs->fs_sbsize);
796 fstype = UFS2;
797 needswap = 1;
798 #endif
799 } else
800 continue;
801
802
803 /* fs->fs_sblockloc isn't defined for old filesystems */
804 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
805 if (sblockloc == SBLOCK_UFS2)
806 /*
807 * This is likely to be the first alternate
808 * in a filesystem with 64k blocks.
809 * Don't use it.
810 */
811 continue;
812 fsblockloc = sblockloc;
813 } else {
814 fsblockloc = fs->fs_sblockloc;
815 #ifdef FFS_EI
816 if (needswap)
817 fsblockloc = bswap64(fsblockloc);
818 #endif
819 }
820
821 /* Check we haven't found an alternate superblock */
822 if (fsblockloc != sblockloc)
823 continue;
824
825 /* Validate size of superblock */
826 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
827 continue;
828
829 /* Ok seems to be a good superblock */
830 break;
831 }
832
833 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
834 memcpy(fs, bp->b_data, sbsize);
835
836 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
837 memset(ump, 0, sizeof *ump);
838 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
839 ump->um_fs = fs;
840 ump->um_ops = &ffs_ufsops;
841
842 #ifdef FFS_EI
843 if (needswap) {
844 ffs_sb_swap((struct fs*)bp->b_data, fs);
845 fs->fs_flags |= FS_SWAPPED;
846 } else
847 #endif
848 fs->fs_flags &= ~FS_SWAPPED;
849
850 ffs_oldfscompat_read(fs, ump, sblockloc);
851 ump->um_maxfilesize = fs->fs_maxfilesize;
852
853 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
854 fs->fs_pendingblocks = 0;
855 fs->fs_pendinginodes = 0;
856 }
857
858 ump->um_fstype = fstype;
859 if (fs->fs_sbsize < SBLOCKSIZE)
860 brelse(bp, BC_INVAL);
861 else
862 brelse(bp, 0);
863 bp = NULL;
864
865 /* First check to see if this is tagged as an Apple UFS filesystem
866 * in the disklabel
867 */
868 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, l) == 0) &&
869 (dpart.part->p_fstype == FS_APPLEUFS)) {
870 ump->um_flags |= UFS_ISAPPLEUFS;
871 }
872 #ifdef APPLE_UFS
873 else {
874 /* Manually look for an apple ufs label, and if a valid one
875 * is found, then treat it like an Apple UFS filesystem anyway
876 */
877 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
878 APPLEUFS_LABEL_SIZE, cred, &bp);
879 if (error)
880 goto out;
881 error = ffs_appleufs_validate(fs->fs_fsmnt,
882 (struct appleufslabel *)bp->b_data,NULL);
883 if (error == 0) {
884 ump->um_flags |= UFS_ISAPPLEUFS;
885 }
886 brelse(bp, 0);
887 bp = NULL;
888 }
889 #else
890 if (ump->um_flags & UFS_ISAPPLEUFS) {
891 error = EINVAL;
892 goto out;
893 }
894 #endif
895
896 /*
897 * verify that we can access the last block in the fs
898 * if we're mounting read/write.
899 */
900
901 if (!ronly) {
902 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
903 cred, &bp);
904 if (bp->b_bcount != fs->fs_fsize)
905 error = EINVAL;
906 if (error) {
907 bset = BC_INVAL;
908 goto out;
909 }
910 brelse(bp, BC_INVAL);
911 bp = NULL;
912 }
913
914 fs->fs_ronly = ronly;
915 if (ronly == 0) {
916 fs->fs_clean <<= 1;
917 fs->fs_fmod = 1;
918 }
919 size = fs->fs_cssize;
920 blks = howmany(size, fs->fs_fsize);
921 if (fs->fs_contigsumsize > 0)
922 size += fs->fs_ncg * sizeof(int32_t);
923 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
924 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
925 fs->fs_csp = space;
926 for (i = 0; i < blks; i += fs->fs_frag) {
927 size = fs->fs_bsize;
928 if (i + fs->fs_frag > blks)
929 size = (blks - i) * fs->fs_fsize;
930 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
931 cred, &bp);
932 if (error) {
933 free(fs->fs_csp, M_UFSMNT);
934 goto out;
935 }
936 #ifdef FFS_EI
937 if (needswap)
938 ffs_csum_swap((struct csum *)bp->b_data,
939 (struct csum *)space, size);
940 else
941 #endif
942 memcpy(space, bp->b_data, (u_int)size);
943
944 space = (char *)space + size;
945 brelse(bp, 0);
946 bp = NULL;
947 }
948 if (fs->fs_contigsumsize > 0) {
949 fs->fs_maxcluster = lp = space;
950 for (i = 0; i < fs->fs_ncg; i++)
951 *lp++ = fs->fs_contigsumsize;
952 space = lp;
953 }
954 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
955 fs->fs_contigdirs = space;
956 space = (char *)space + size;
957 memset(fs->fs_contigdirs, 0, size);
958 /* Compatibility for old filesystems - XXX */
959 if (fs->fs_avgfilesize <= 0)
960 fs->fs_avgfilesize = AVFILESIZ;
961 if (fs->fs_avgfpdir <= 0)
962 fs->fs_avgfpdir = AFPDIR;
963 fs->fs_active = NULL;
964 mp->mnt_data = ump;
965 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
966 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
967 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
968 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
969 if (UFS_MPISAPPLEUFS(ump)) {
970 /* NeXT used to keep short symlinks in the inode even
971 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
972 * is probably -1, but we still need to be able to identify
973 * short symlinks.
974 */
975 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
976 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
977 mp->mnt_iflag |= IMNT_DTYPE;
978 } else {
979 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
980 ump->um_dirblksiz = DIRBLKSIZ;
981 if (ump->um_maxsymlinklen > 0)
982 mp->mnt_iflag |= IMNT_DTYPE;
983 else
984 mp->mnt_iflag &= ~IMNT_DTYPE;
985 }
986 mp->mnt_fs_bshift = fs->fs_bshift;
987 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
988 mp->mnt_flag |= MNT_LOCAL;
989 mp->mnt_iflag |= IMNT_MPSAFE;
990 #ifdef FFS_EI
991 if (needswap)
992 ump->um_flags |= UFS_NEEDSWAP;
993 #endif
994 ump->um_mountp = mp;
995 ump->um_dev = dev;
996 ump->um_devvp = devvp;
997 ump->um_nindir = fs->fs_nindir;
998 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
999 ump->um_bptrtodb = fs->fs_fsbtodb;
1000 ump->um_seqinc = fs->fs_frag;
1001 for (i = 0; i < MAXQUOTAS; i++)
1002 ump->um_quotas[i] = NULLVP;
1003 devvp->v_specmountpoint = mp;
1004 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
1005 error = softdep_mount(devvp, mp, fs, cred);
1006 if (error) {
1007 free(fs->fs_csp, M_UFSMNT);
1008 goto out;
1009 }
1010 }
1011 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1012 ffs_snapshot_mount(mp);
1013 #ifdef UFS_EXTATTR
1014 /*
1015 * Initialize file-backed extended attributes on UFS1 file
1016 * systems.
1017 */
1018 if (ump->um_fstype == UFS1) {
1019 ufs_extattr_uepm_init(&ump->um_extattr);
1020 #ifdef UFS_EXTATTR_AUTOSTART
1021 /*
1022 * XXX Just ignore errors. Not clear that we should
1023 * XXX fail the mount in this case.
1024 */
1025 (void) ufs_extattr_autostart(mp, l);
1026 #endif
1027 }
1028 #endif /* UFS_EXTATTR */
1029 return (0);
1030 out:
1031 fstrans_unmount(mp);
1032 if (fs)
1033 free(fs, M_UFSMNT);
1034 devvp->v_specmountpoint = NULL;
1035 if (bp)
1036 brelse(bp, bset);
1037 if (ump) {
1038 if (ump->um_oldfscompat)
1039 free(ump->um_oldfscompat, M_UFSMNT);
1040 mutex_destroy(&ump->um_lock);
1041 free(ump, M_UFSMNT);
1042 mp->mnt_data = NULL;
1043 }
1044 return (error);
1045 }
1046
1047 /*
1048 * Sanity checks for loading old filesystem superblocks.
1049 * See ffs_oldfscompat_write below for unwound actions.
1050 *
1051 * XXX - Parts get retired eventually.
1052 * Unfortunately new bits get added.
1053 */
1054 static void
1055 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1056 {
1057 off_t maxfilesize;
1058 int32_t *extrasave;
1059
1060 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1061 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1062 return;
1063
1064 if (!ump->um_oldfscompat)
1065 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1066 M_UFSMNT, M_WAITOK);
1067
1068 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1069 extrasave = ump->um_oldfscompat;
1070 extrasave += 512/sizeof(int32_t);
1071 extrasave[0] = fs->fs_old_npsect;
1072 extrasave[1] = fs->fs_old_interleave;
1073 extrasave[2] = fs->fs_old_trackskew;
1074
1075 /* These fields will be overwritten by their
1076 * original values in fs_oldfscompat_write, so it is harmless
1077 * to modify them here.
1078 */
1079 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1080 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1081 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1082 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1083
1084 fs->fs_maxbsize = fs->fs_bsize;
1085 fs->fs_time = fs->fs_old_time;
1086 fs->fs_size = fs->fs_old_size;
1087 fs->fs_dsize = fs->fs_old_dsize;
1088 fs->fs_csaddr = fs->fs_old_csaddr;
1089 fs->fs_sblockloc = sblockloc;
1090
1091 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1092
1093 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1094 fs->fs_old_nrpos = 8;
1095 fs->fs_old_npsect = fs->fs_old_nsect;
1096 fs->fs_old_interleave = 1;
1097 fs->fs_old_trackskew = 0;
1098 }
1099
1100 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1101 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1102 fs->fs_qbmask = ~fs->fs_bmask;
1103 fs->fs_qfmask = ~fs->fs_fmask;
1104 }
1105
1106 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1107 if (fs->fs_maxfilesize > maxfilesize)
1108 fs->fs_maxfilesize = maxfilesize;
1109
1110 /* Compatibility for old filesystems */
1111 if (fs->fs_avgfilesize <= 0)
1112 fs->fs_avgfilesize = AVFILESIZ;
1113 if (fs->fs_avgfpdir <= 0)
1114 fs->fs_avgfpdir = AFPDIR;
1115
1116 #if 0
1117 if (bigcgs) {
1118 fs->fs_save_cgsize = fs->fs_cgsize;
1119 fs->fs_cgsize = fs->fs_bsize;
1120 }
1121 #endif
1122 }
1123
1124 /*
1125 * Unwinding superblock updates for old filesystems.
1126 * See ffs_oldfscompat_read above for details.
1127 *
1128 * XXX - Parts get retired eventually.
1129 * Unfortunately new bits get added.
1130 */
1131 static void
1132 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1133 {
1134 int32_t *extrasave;
1135
1136 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1137 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1138 return;
1139
1140 fs->fs_old_time = fs->fs_time;
1141 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1142 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1143 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1144 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1145 fs->fs_old_flags = fs->fs_flags;
1146
1147 #if 0
1148 if (bigcgs) {
1149 fs->fs_cgsize = fs->fs_save_cgsize;
1150 }
1151 #endif
1152
1153 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1154 extrasave = ump->um_oldfscompat;
1155 extrasave += 512/sizeof(int32_t);
1156 fs->fs_old_npsect = extrasave[0];
1157 fs->fs_old_interleave = extrasave[1];
1158 fs->fs_old_trackskew = extrasave[2];
1159
1160 }
1161
1162 /*
1163 * unmount system call
1164 */
1165 int
1166 ffs_unmount(struct mount *mp, int mntflags, struct lwp *l)
1167 {
1168 struct ufsmount *ump = VFSTOUFS(mp);
1169 struct fs *fs = ump->um_fs;
1170 int error, flags, penderr;
1171
1172 penderr = 0;
1173 flags = 0;
1174 if (mntflags & MNT_FORCE)
1175 flags |= FORCECLOSE;
1176 #ifdef UFS_EXTATTR
1177 if (ump->um_fstype == UFS1) {
1178 error = ufs_extattr_stop(mp, l);
1179 if (error) {
1180 if (error != EOPNOTSUPP)
1181 printf("%s: ufs_extattr_stop returned %d\n",
1182 fs->fs_fsmnt, error);
1183 } else
1184 ufs_extattr_uepm_destroy(&ump->um_extattr);
1185 }
1186 #endif /* UFS_EXTATTR */
1187 if (mp->mnt_flag & MNT_SOFTDEP) {
1188 if ((error = softdep_flushfiles(mp, flags, l)) != 0)
1189 return (error);
1190 } else {
1191 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1192 return (error);
1193 }
1194 mutex_enter(&ump->um_lock);
1195 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1196 printf("%s: unmount pending error: blocks %" PRId64
1197 " files %d\n",
1198 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1199 fs->fs_pendingblocks = 0;
1200 fs->fs_pendinginodes = 0;
1201 penderr = 1;
1202 }
1203 mutex_exit(&ump->um_lock);
1204 if (fs->fs_ronly == 0 &&
1205 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1206 fs->fs_clean & FS_WASCLEAN) {
1207 /*
1208 * XXXX don't mark fs clean in the case of softdep
1209 * pending block errors, until they are fixed.
1210 */
1211 if (penderr == 0) {
1212 if (mp->mnt_flag & MNT_SOFTDEP)
1213 fs->fs_flags &= ~FS_DOSOFTDEP;
1214 fs->fs_clean = FS_ISCLEAN;
1215 }
1216 fs->fs_fmod = 0;
1217 (void) ffs_sbupdate(ump, MNT_WAIT);
1218 }
1219 if (ump->um_devvp->v_type != VBAD)
1220 ump->um_devvp->v_specmountpoint = NULL;
1221 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1222 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
1223 NOCRED, l);
1224 vput(ump->um_devvp);
1225 free(fs->fs_csp, M_UFSMNT);
1226 free(fs, M_UFSMNT);
1227 if (ump->um_oldfscompat != NULL)
1228 free(ump->um_oldfscompat, M_UFSMNT);
1229 softdep_unmount(mp);
1230 mutex_destroy(&ump->um_lock);
1231 free(ump, M_UFSMNT);
1232 mp->mnt_data = NULL;
1233 mp->mnt_flag &= ~MNT_LOCAL;
1234 fstrans_unmount(mp);
1235 return (0);
1236 }
1237
1238 /*
1239 * Flush out all the files in a filesystem.
1240 */
1241 int
1242 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1243 {
1244 extern int doforce;
1245 struct ufsmount *ump;
1246 int error;
1247
1248 if (!doforce)
1249 flags &= ~FORCECLOSE;
1250 ump = VFSTOUFS(mp);
1251 #ifdef QUOTA
1252 if (mp->mnt_flag & MNT_QUOTA) {
1253 int i;
1254 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
1255 return (error);
1256 for (i = 0; i < MAXQUOTAS; i++) {
1257 if (ump->um_quotas[i] == NULLVP)
1258 continue;
1259 quotaoff(l, mp, i);
1260 }
1261 /*
1262 * Here we fall through to vflush again to ensure
1263 * that we have gotten rid of all the system vnodes.
1264 */
1265 }
1266 #endif
1267 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1268 return (error);
1269 ffs_snapshot_unmount(mp);
1270 /*
1271 * Flush all the files.
1272 */
1273 error = vflush(mp, NULLVP, flags);
1274 if (error)
1275 return (error);
1276 /*
1277 * Flush filesystem metadata.
1278 */
1279 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1280 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0, l);
1281 VOP_UNLOCK(ump->um_devvp, 0);
1282 return (error);
1283 }
1284
1285 /*
1286 * Get file system statistics.
1287 */
1288 int
1289 ffs_statvfs(struct mount *mp, struct statvfs *sbp, struct lwp *l)
1290 {
1291 struct ufsmount *ump;
1292 struct fs *fs;
1293
1294 ump = VFSTOUFS(mp);
1295 fs = ump->um_fs;
1296 mutex_enter(&ump->um_lock);
1297 sbp->f_bsize = fs->fs_bsize;
1298 sbp->f_frsize = fs->fs_fsize;
1299 sbp->f_iosize = fs->fs_bsize;
1300 sbp->f_blocks = fs->fs_dsize;
1301 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1302 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1303 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1304 fs->fs_minfree) / (u_int64_t) 100;
1305 if (sbp->f_bfree > sbp->f_bresvd)
1306 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1307 else
1308 sbp->f_bavail = 0;
1309 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1310 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1311 sbp->f_favail = sbp->f_ffree;
1312 sbp->f_fresvd = 0;
1313 mutex_exit(&ump->um_lock);
1314 copy_statvfs_info(sbp, mp);
1315
1316 return (0);
1317 }
1318
1319 /*
1320 * Go through the disk queues to initiate sandbagged IO;
1321 * go through the inodes to write those that have been modified;
1322 * initiate the writing of the super block if it has been modified.
1323 *
1324 * Note: we are always called with the filesystem marked `MPBUSY'.
1325 */
1326 int
1327 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred, struct lwp *l)
1328 {
1329 struct vnode *vp, *mvp;
1330 struct inode *ip;
1331 struct ufsmount *ump = VFSTOUFS(mp);
1332 struct fs *fs;
1333 int error, count, allerror = 0;
1334
1335 fs = ump->um_fs;
1336 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1337 printf("fs = %s\n", fs->fs_fsmnt);
1338 panic("update: rofs mod");
1339 }
1340
1341 /* Allocate a marker vnode. */
1342 if ((mvp = valloc(mp)) == NULL)
1343 return (ENOMEM);
1344
1345 fstrans_start(mp, FSTRANS_SHARED);
1346 /*
1347 * Write back each (modified) inode.
1348 */
1349 mutex_enter(&mntvnode_lock);
1350 loop:
1351 /*
1352 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1353 * and vclean() can be called indirectly
1354 */
1355 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
1356 vmark(mvp, vp);
1357 /*
1358 * If the vnode that we are about to sync is no longer
1359 * associated with this mount point, start over.
1360 */
1361 if (vp->v_mount != mp || vismarker(vp))
1362 continue;
1363 mutex_enter(&vp->v_interlock);
1364 ip = VTOI(vp);
1365 if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 ||
1366 vp->v_type == VNON || ((ip->i_flag &
1367 (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
1368 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1369 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
1370 {
1371 mutex_exit(&vp->v_interlock);
1372 continue;
1373 }
1374 if (vp->v_type == VBLK &&
1375 fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
1376 mutex_exit(&vp->v_interlock);
1377 continue;
1378 }
1379 mutex_exit(&mntvnode_lock);
1380 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1381 if (error) {
1382 mutex_enter(&mntvnode_lock);
1383 if (error == ENOENT) {
1384 (void)vunmark(mvp);
1385 goto loop;
1386 }
1387 continue;
1388 }
1389 if (vp->v_type == VREG && waitfor == MNT_LAZY)
1390 error = ffs_update(vp, NULL, NULL, 0);
1391 else
1392 error = VOP_FSYNC(vp, cred,
1393 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, l);
1394 if (error)
1395 allerror = error;
1396 vput(vp);
1397 mutex_enter(&mntvnode_lock);
1398 }
1399 mutex_exit(&mntvnode_lock);
1400 /*
1401 * Force stale file system control information to be flushed.
1402 */
1403 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1404 if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
1405 allerror = error;
1406 /* Flushed work items may create new vnodes to clean */
1407 if (allerror == 0 && count) {
1408 mutex_enter(&mntvnode_lock);
1409 goto loop;
1410 }
1411 }
1412 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1413 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1414 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1415 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1416 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, l)) != 0)
1417 allerror = error;
1418 VOP_UNLOCK(ump->um_devvp, 0);
1419 if (allerror == 0 && waitfor == MNT_WAIT) {
1420 mutex_enter(&mntvnode_lock);
1421 goto loop;
1422 }
1423 }
1424 #ifdef QUOTA
1425 qsync(mp);
1426 #endif
1427 /*
1428 * Write back modified superblock.
1429 */
1430 if (fs->fs_fmod != 0) {
1431 fs->fs_fmod = 0;
1432 fs->fs_time = time_second;
1433 if ((error = ffs_cgupdate(ump, waitfor)))
1434 allerror = error;
1435 }
1436 fstrans_done(mp);
1437 vfree(mvp);
1438 return (allerror);
1439 }
1440
1441 /*
1442 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1443 * in from disk. If it is in core, wait for the lock bit to clear, then
1444 * return the inode locked. Detection and handling of mount points must be
1445 * done by the calling routine.
1446 */
1447 int
1448 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1449 {
1450 struct fs *fs;
1451 struct inode *ip;
1452 struct ufsmount *ump;
1453 struct buf *bp;
1454 struct vnode *vp;
1455 dev_t dev;
1456 int error;
1457
1458 ump = VFSTOUFS(mp);
1459 dev = ump->um_dev;
1460
1461 retry:
1462 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1463 return (0);
1464
1465 /* Allocate a new vnode/inode. */
1466 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1467 *vpp = NULL;
1468 return (error);
1469 }
1470 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1471
1472 /*
1473 * If someone beat us to it, put back the freshly allocated
1474 * vnode/inode pair and retry.
1475 */
1476 mutex_enter(&ufs_hashlock);
1477 if (ufs_ihashget(dev, ino, 0) != NULL) {
1478 mutex_exit(&ufs_hashlock);
1479 ungetnewvnode(vp);
1480 pool_put(&ffs_inode_pool, ip);
1481 goto retry;
1482 }
1483
1484 vp->v_vflag |= VV_LOCKSWORK;
1485
1486 /*
1487 * XXX MFS ends up here, too, to allocate an inode. Should we
1488 * XXX create another pool for MFS inodes?
1489 */
1490
1491 memset(ip, 0, sizeof(struct inode));
1492 vp->v_data = ip;
1493 ip->i_vnode = vp;
1494 ip->i_ump = ump;
1495 ip->i_fs = fs = ump->um_fs;
1496 ip->i_dev = dev;
1497 ip->i_number = ino;
1498 LIST_INIT(&ip->i_pcbufhd);
1499 #ifdef QUOTA
1500 ufsquota_init(ip);
1501 #endif
1502
1503 /*
1504 * Put it onto its hash chain and lock it so that other requests for
1505 * this inode will block if they arrive while we are sleeping waiting
1506 * for old data structures to be purged or for the contents of the
1507 * disk portion of this inode to be read.
1508 */
1509
1510 ufs_ihashins(ip);
1511 mutex_exit(&ufs_hashlock);
1512
1513 /* Read in the disk contents for the inode, copy into the inode. */
1514 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1515 (int)fs->fs_bsize, NOCRED, &bp);
1516 if (error) {
1517
1518 /*
1519 * The inode does not contain anything useful, so it would
1520 * be misleading to leave it on its hash chain. With mode
1521 * still zero, it will be unlinked and returned to the free
1522 * list by vput().
1523 */
1524
1525 vput(vp);
1526 brelse(bp, 0);
1527 *vpp = NULL;
1528 return (error);
1529 }
1530 if (ip->i_ump->um_fstype == UFS1)
1531 ip->i_din.ffs1_din = pool_get(&ffs_dinode1_pool, PR_WAITOK);
1532 else
1533 ip->i_din.ffs2_din = pool_get(&ffs_dinode2_pool, PR_WAITOK);
1534 ffs_load_inode(bp, ip, fs, ino);
1535 if (DOINGSOFTDEP(vp))
1536 softdep_load_inodeblock(ip);
1537 else
1538 ip->i_ffs_effnlink = ip->i_nlink;
1539 brelse(bp, 0);
1540
1541 /*
1542 * Initialize the vnode from the inode, check for aliases.
1543 * Note that the underlying vnode may have changed.
1544 */
1545
1546 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1547
1548 /*
1549 * Finish inode initialization now that aliasing has been resolved.
1550 */
1551
1552 genfs_node_init(vp, &ffs_genfsops);
1553 ip->i_devvp = ump->um_devvp;
1554 VREF(ip->i_devvp);
1555
1556 /*
1557 * Ensure that uid and gid are correct. This is a temporary
1558 * fix until fsck has been changed to do the update.
1559 */
1560
1561 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1562 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1563 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1564 } /* XXX */
1565 uvm_vnp_setsize(vp, ip->i_size);
1566 *vpp = vp;
1567 return (0);
1568 }
1569
1570 /*
1571 * File handle to vnode
1572 *
1573 * Have to be really careful about stale file handles:
1574 * - check that the inode number is valid
1575 * - call ffs_vget() to get the locked inode
1576 * - check for an unallocated inode (i_mode == 0)
1577 * - check that the given client host has export rights and return
1578 * those rights via. exflagsp and credanonp
1579 */
1580 int
1581 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1582 {
1583 struct ufid ufh;
1584 struct fs *fs;
1585
1586 if (fhp->fid_len != sizeof(struct ufid))
1587 return EINVAL;
1588
1589 memcpy(&ufh, fhp, sizeof(ufh));
1590 fs = VFSTOUFS(mp)->um_fs;
1591 if (ufh.ufid_ino < ROOTINO ||
1592 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1593 return (ESTALE);
1594 return (ufs_fhtovp(mp, &ufh, vpp));
1595 }
1596
1597 /*
1598 * Vnode pointer to File handle
1599 */
1600 /* ARGSUSED */
1601 int
1602 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1603 {
1604 struct inode *ip;
1605 struct ufid ufh;
1606
1607 if (*fh_size < sizeof(struct ufid)) {
1608 *fh_size = sizeof(struct ufid);
1609 return E2BIG;
1610 }
1611 ip = VTOI(vp);
1612 *fh_size = sizeof(struct ufid);
1613 memset(&ufh, 0, sizeof(ufh));
1614 ufh.ufid_len = sizeof(struct ufid);
1615 ufh.ufid_ino = ip->i_number;
1616 ufh.ufid_gen = ip->i_gen;
1617 memcpy(fhp, &ufh, sizeof(ufh));
1618 return (0);
1619 }
1620
1621 void
1622 ffs_init(void)
1623 {
1624 if (ffs_initcount++ > 0)
1625 return;
1626
1627 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0,
1628 "ffsinopl", &pool_allocator_nointr, IPL_NONE);
1629 pool_init(&ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0,
1630 "dino1pl", &pool_allocator_nointr, IPL_NONE);
1631 pool_init(&ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0,
1632 "dino2pl", &pool_allocator_nointr, IPL_NONE);
1633 softdep_initialize();
1634 ffs_snapshot_init();
1635 ufs_init();
1636 }
1637
1638 void
1639 ffs_reinit(void)
1640 {
1641 softdep_reinitialize();
1642 ufs_reinit();
1643 }
1644
1645 void
1646 ffs_done(void)
1647 {
1648 if (--ffs_initcount > 0)
1649 return;
1650
1651 /* XXX softdep cleanup ? */
1652 ffs_snapshot_fini();
1653 ufs_done();
1654 pool_destroy(&ffs_dinode2_pool);
1655 pool_destroy(&ffs_dinode1_pool);
1656 pool_destroy(&ffs_inode_pool);
1657 }
1658
1659 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
1660 {
1661 #if 0
1662 extern int doasyncfree;
1663 #endif
1664 extern int ffs_log_changeopt;
1665
1666 sysctl_createv(clog, 0, NULL, NULL,
1667 CTLFLAG_PERMANENT,
1668 CTLTYPE_NODE, "vfs", NULL,
1669 NULL, 0, NULL, 0,
1670 CTL_VFS, CTL_EOL);
1671 sysctl_createv(clog, 0, NULL, NULL,
1672 CTLFLAG_PERMANENT,
1673 CTLTYPE_NODE, "ffs",
1674 SYSCTL_DESCR("Berkeley Fast File System"),
1675 NULL, 0, NULL, 0,
1676 CTL_VFS, 1, CTL_EOL);
1677
1678 /*
1679 * @@@ should we even bother with these first three?
1680 */
1681 sysctl_createv(clog, 0, NULL, NULL,
1682 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1683 CTLTYPE_INT, "doclusterread", NULL,
1684 sysctl_notavail, 0, NULL, 0,
1685 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
1686 sysctl_createv(clog, 0, NULL, NULL,
1687 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1688 CTLTYPE_INT, "doclusterwrite", NULL,
1689 sysctl_notavail, 0, NULL, 0,
1690 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
1691 sysctl_createv(clog, 0, NULL, NULL,
1692 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1693 CTLTYPE_INT, "doreallocblks", NULL,
1694 sysctl_notavail, 0, NULL, 0,
1695 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
1696 #if 0
1697 sysctl_createv(clog, 0, NULL, NULL,
1698 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1699 CTLTYPE_INT, "doasyncfree",
1700 SYSCTL_DESCR("Release dirty blocks asynchronously"),
1701 NULL, 0, &doasyncfree, 0,
1702 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
1703 #endif
1704 sysctl_createv(clog, 0, NULL, NULL,
1705 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1706 CTLTYPE_INT, "log_changeopt",
1707 SYSCTL_DESCR("Log changes in optimization strategy"),
1708 NULL, 0, &ffs_log_changeopt, 0,
1709 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
1710 }
1711
1712 /*
1713 * Write a superblock and associated information back to disk.
1714 */
1715 int
1716 ffs_sbupdate(struct ufsmount *mp, int waitfor)
1717 {
1718 struct fs *fs = mp->um_fs;
1719 struct buf *bp;
1720 int error = 0;
1721 u_int32_t saveflag;
1722
1723 bp = getblk(mp->um_devvp,
1724 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
1725 (int)fs->fs_sbsize, 0, 0);
1726 saveflag = fs->fs_flags & FS_INTERNAL;
1727 fs->fs_flags &= ~FS_INTERNAL;
1728
1729 memcpy(bp->b_data, fs, fs->fs_sbsize);
1730
1731 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
1732 #ifdef FFS_EI
1733 if (mp->um_flags & UFS_NEEDSWAP)
1734 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
1735 #endif
1736 fs->fs_flags |= saveflag;
1737
1738 if (waitfor == MNT_WAIT)
1739 error = bwrite(bp);
1740 else
1741 bawrite(bp);
1742 return (error);
1743 }
1744
1745 int
1746 ffs_cgupdate(struct ufsmount *mp, int waitfor)
1747 {
1748 struct fs *fs = mp->um_fs;
1749 struct buf *bp;
1750 int blks;
1751 void *space;
1752 int i, size, error = 0, allerror = 0;
1753
1754 allerror = ffs_sbupdate(mp, waitfor);
1755 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1756 space = fs->fs_csp;
1757 for (i = 0; i < blks; i += fs->fs_frag) {
1758 size = fs->fs_bsize;
1759 if (i + fs->fs_frag > blks)
1760 size = (blks - i) * fs->fs_fsize;
1761 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1762 size, 0, 0);
1763 #ifdef FFS_EI
1764 if (mp->um_flags & UFS_NEEDSWAP)
1765 ffs_csum_swap((struct csum*)space,
1766 (struct csum*)bp->b_data, size);
1767 else
1768 #endif
1769 memcpy(bp->b_data, space, (u_int)size);
1770 space = (char *)space + size;
1771 if (waitfor == MNT_WAIT)
1772 error = bwrite(bp);
1773 else
1774 bawrite(bp);
1775 }
1776 if (!allerror && error)
1777 allerror = error;
1778 return (allerror);
1779 }
1780
1781 int
1782 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
1783 int attrnamespace, const char *attrname, struct lwp *l)
1784 {
1785 #ifdef UFS_EXTATTR
1786 /*
1787 * File-backed extended attributes are only supported on UFS1.
1788 * UFS2 has native extended attributes.
1789 */
1790 if (VFSTOUFS(mp)->um_fstype == UFS1)
1791 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname,
1792 l));
1793 #endif
1794 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname, l));
1795 }
1796
1797 int
1798 ffs_suspendctl(struct mount *mp, int cmd)
1799 {
1800 int error;
1801 struct lwp *l = curlwp;
1802
1803 switch (cmd) {
1804 case SUSPEND_SUSPEND:
1805 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
1806 return error;
1807 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred, l);
1808 if (error == 0)
1809 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
1810 if (error != 0) {
1811 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
1812 return error;
1813 }
1814 return 0;
1815
1816 case SUSPEND_RESUME:
1817 return fstrans_setstate(mp, FSTRANS_NORMAL);
1818
1819 default:
1820 return EINVAL;
1821 }
1822 }
1823