ffs_vfsops.c revision 1.230 1 /* $NetBSD: ffs_vfsops.c,v 1.230 2008/06/28 01:34:05 rumble Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.230 2008/06/28 01:34:05 rumble Exp $");
36
37 #if defined(_KERNEL_OPT)
38 #include "opt_ffs.h"
39 #include "opt_quota.h"
40 #include "opt_softdep.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/kernel.h>
48 #include <sys/vnode.h>
49 #include <sys/socket.h>
50 #include <sys/mount.h>
51 #include <sys/buf.h>
52 #include <sys/device.h>
53 #include <sys/mbuf.h>
54 #include <sys/file.h>
55 #include <sys/disklabel.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/pool.h>
60 #include <sys/lock.h>
61 #include <sys/sysctl.h>
62 #include <sys/conf.h>
63 #include <sys/kauth.h>
64 #include <sys/fstrans.h>
65 #include <sys/module.h>
66
67 #include <miscfs/genfs/genfs.h>
68 #include <miscfs/specfs/specdev.h>
69
70 #include <ufs/ufs/quota.h>
71 #include <ufs/ufs/ufsmount.h>
72 #include <ufs/ufs/inode.h>
73 #include <ufs/ufs/dir.h>
74 #include <ufs/ufs/ufs_extern.h>
75 #include <ufs/ufs/ufs_bswap.h>
76
77 #include <ufs/ffs/fs.h>
78 #include <ufs/ffs/ffs_extern.h>
79
80 MODULE(MODULE_CLASS_VFS, ffs, NULL);
81
82 static struct sysctllog *ffs_sysctl_log;
83
84 /* how many times ffs_init() was called */
85 int ffs_initcount = 0;
86
87 extern kmutex_t ufs_hashlock;
88
89 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
90 extern const struct vnodeopv_desc ffs_specop_opv_desc;
91 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
92
93 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
94 &ffs_vnodeop_opv_desc,
95 &ffs_specop_opv_desc,
96 &ffs_fifoop_opv_desc,
97 NULL,
98 };
99
100 struct vfsops ffs_vfsops = {
101 MOUNT_FFS,
102 sizeof (struct ufs_args),
103 ffs_mount,
104 ufs_start,
105 ffs_unmount,
106 ufs_root,
107 ufs_quotactl,
108 ffs_statvfs,
109 ffs_sync,
110 ffs_vget,
111 ffs_fhtovp,
112 ffs_vptofh,
113 ffs_init,
114 ffs_reinit,
115 ffs_done,
116 ffs_mountroot,
117 ffs_snapshot,
118 ffs_extattrctl,
119 ffs_suspendctl,
120 genfs_renamelock_enter,
121 genfs_renamelock_exit,
122 ffs_full_fsync,
123 ffs_vnodeopv_descs,
124 0,
125 { NULL, NULL },
126 };
127
128 static const struct genfs_ops ffs_genfsops = {
129 .gop_size = ffs_gop_size,
130 .gop_alloc = ufs_gop_alloc,
131 .gop_write = genfs_gop_write,
132 .gop_markupdate = ufs_gop_markupdate,
133 };
134
135 static const struct ufs_ops ffs_ufsops = {
136 .uo_itimes = ffs_itimes,
137 .uo_update = ffs_update,
138 .uo_truncate = ffs_truncate,
139 .uo_valloc = ffs_valloc,
140 .uo_vfree = ffs_vfree,
141 .uo_balloc = ffs_balloc,
142 };
143
144 static int
145 ffs_modcmd(modcmd_t cmd, void *arg)
146 {
147 int error;
148
149 #if 0
150 extern int doasyncfree;
151 #endif
152 extern int ffs_log_changeopt;
153
154 switch (cmd) {
155 case MODULE_CMD_INIT:
156 error = vfs_attach(&ffs_vfsops);
157 if (error != 0)
158 break;
159
160 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
161 CTLFLAG_PERMANENT,
162 CTLTYPE_NODE, "vfs", NULL,
163 NULL, 0, NULL, 0,
164 CTL_VFS, CTL_EOL);
165 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
166 CTLFLAG_PERMANENT,
167 CTLTYPE_NODE, "ffs",
168 SYSCTL_DESCR("Berkeley Fast File System"),
169 NULL, 0, NULL, 0,
170 CTL_VFS, 1, CTL_EOL);
171
172 /*
173 * @@@ should we even bother with these first three?
174 */
175 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
176 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
177 CTLTYPE_INT, "doclusterread", NULL,
178 sysctl_notavail, 0, NULL, 0,
179 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
180 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
181 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
182 CTLTYPE_INT, "doclusterwrite", NULL,
183 sysctl_notavail, 0, NULL, 0,
184 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
185 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
186 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
187 CTLTYPE_INT, "doreallocblks", NULL,
188 sysctl_notavail, 0, NULL, 0,
189 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
190 #if 0
191 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
192 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
193 CTLTYPE_INT, "doasyncfree",
194 SYSCTL_DESCR("Release dirty blocks asynchronously"),
195 NULL, 0, &doasyncfree, 0,
196 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
197 #endif
198 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
199 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
200 CTLTYPE_INT, "log_changeopt",
201 SYSCTL_DESCR("Log changes in optimization strategy"),
202 NULL, 0, &ffs_log_changeopt, 0,
203 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
204 break;
205 case MODULE_CMD_FINI:
206 error = vfs_detach(&ffs_vfsops);
207 if (error != 0)
208 break;
209 sysctl_teardown(&ffs_sysctl_log);
210 break;
211 default:
212 error = ENOTTY;
213 break;
214 }
215
216 return (error);
217 }
218
219 pool_cache_t ffs_inode_cache;
220 pool_cache_t ffs_dinode1_cache;
221 pool_cache_t ffs_dinode2_cache;
222
223 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
224 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
225
226 /*
227 * Called by main() when ffs is going to be mounted as root.
228 */
229
230 int
231 ffs_mountroot(void)
232 {
233 struct fs *fs;
234 struct mount *mp;
235 struct lwp *l = curlwp; /* XXX */
236 struct ufsmount *ump;
237 int error;
238
239 if (device_class(root_device) != DV_DISK)
240 return (ENODEV);
241
242 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
243 vrele(rootvp);
244 return (error);
245 }
246 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
247 vfs_unbusy(mp, false, NULL);
248 vfs_destroy(mp);
249 return (error);
250 }
251 mutex_enter(&mountlist_lock);
252 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
253 mutex_exit(&mountlist_lock);
254 ump = VFSTOUFS(mp);
255 fs = ump->um_fs;
256 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
257 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
258 (void)ffs_statvfs(mp, &mp->mnt_stat);
259 vfs_unbusy(mp, false, NULL);
260 setrootfstime((time_t)fs->fs_time);
261 return (0);
262 }
263
264 /*
265 * VFS Operations.
266 *
267 * mount system call
268 */
269 int
270 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
271 {
272 struct lwp *l = curlwp;
273 struct nameidata nd;
274 struct vnode *vp, *devvp = NULL;
275 struct ufs_args *args = data;
276 struct ufsmount *ump = NULL;
277 struct fs *fs;
278 int error = 0, flags, update;
279 mode_t accessmode;
280
281 if (*data_len < sizeof *args)
282 return EINVAL;
283
284 if (mp->mnt_flag & MNT_GETARGS) {
285 ump = VFSTOUFS(mp);
286 if (ump == NULL)
287 return EIO;
288 args->fspec = NULL;
289 *data_len = sizeof *args;
290 return 0;
291 }
292
293 #if !defined(SOFTDEP)
294 mp->mnt_flag &= ~MNT_SOFTDEP;
295 #endif
296
297 update = mp->mnt_flag & MNT_UPDATE;
298
299 /* Check arguments */
300 if (args->fspec != NULL) {
301 /*
302 * Look up the name and verify that it's sane.
303 */
304 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
305 if ((error = namei(&nd)) != 0)
306 return (error);
307 devvp = nd.ni_vp;
308
309 if (!update) {
310 /*
311 * Be sure this is a valid block device
312 */
313 if (devvp->v_type != VBLK)
314 error = ENOTBLK;
315 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
316 error = ENXIO;
317 } else {
318 /*
319 * Be sure we're still naming the same device
320 * used for our initial mount
321 */
322 ump = VFSTOUFS(mp);
323 if (devvp != ump->um_devvp) {
324 if (devvp->v_rdev != ump->um_devvp->v_rdev)
325 error = EINVAL;
326 else {
327 vrele(devvp);
328 devvp = ump->um_devvp;
329 vref(devvp);
330 }
331 }
332 }
333 } else {
334 if (!update) {
335 /* New mounts must have a filename for the device */
336 return (EINVAL);
337 } else {
338 /* Use the extant mount */
339 ump = VFSTOUFS(mp);
340 devvp = ump->um_devvp;
341 vref(devvp);
342 }
343 }
344
345 /*
346 * Mark the device and any existing vnodes as involved in
347 * softdep processing.
348 */
349 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
350 devvp->v_uflag |= VU_SOFTDEP;
351 mutex_enter(&mntvnode_lock);
352 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
353 if (vp->v_mount != mp || vismarker(vp))
354 continue;
355 vp->v_uflag |= VU_SOFTDEP;
356 }
357 mutex_exit(&mntvnode_lock);
358 }
359
360 /*
361 * If mount by non-root, then verify that user has necessary
362 * permissions on the device.
363 */
364 if (error == 0 && kauth_authorize_generic(l->l_cred,
365 KAUTH_GENERIC_ISSUSER, NULL) != 0) {
366 accessmode = VREAD;
367 if (update ?
368 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
369 (mp->mnt_flag & MNT_RDONLY) == 0)
370 accessmode |= VWRITE;
371 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
372 error = VOP_ACCESS(devvp, accessmode, l->l_cred);
373 VOP_UNLOCK(devvp, 0);
374 }
375
376 if (error) {
377 vrele(devvp);
378 return (error);
379 }
380
381 if (!update) {
382 int xflags;
383
384 if (mp->mnt_flag & MNT_RDONLY)
385 xflags = FREAD;
386 else
387 xflags = FREAD|FWRITE;
388 error = VOP_OPEN(devvp, xflags, FSCRED);
389 if (error)
390 goto fail;
391 error = ffs_mountfs(devvp, mp, l);
392 if (error) {
393 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
394 (void)VOP_CLOSE(devvp, xflags, NOCRED);
395 VOP_UNLOCK(devvp, 0);
396 goto fail;
397 }
398
399 ump = VFSTOUFS(mp);
400 fs = ump->um_fs;
401 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
402 (MNT_SOFTDEP | MNT_ASYNC)) {
403 printf("%s fs uses soft updates, "
404 "ignoring async mode\n",
405 fs->fs_fsmnt);
406 mp->mnt_flag &= ~MNT_ASYNC;
407 }
408 } else {
409 /*
410 * Update the mount.
411 */
412
413 /*
414 * The initial mount got a reference on this
415 * device, so drop the one obtained via
416 * namei(), above.
417 */
418 vrele(devvp);
419
420 ump = VFSTOUFS(mp);
421 fs = ump->um_fs;
422 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
423 /*
424 * Changing from r/w to r/o
425 */
426 flags = WRITECLOSE;
427 if (mp->mnt_flag & MNT_FORCE)
428 flags |= FORCECLOSE;
429 if (mp->mnt_flag & MNT_SOFTDEP)
430 error = softdep_flushfiles(mp, flags, l);
431 else
432 error = ffs_flushfiles(mp, flags, l);
433 if (fs->fs_pendingblocks != 0 ||
434 fs->fs_pendinginodes != 0) {
435 printf("%s: update error: blocks %" PRId64
436 " files %d\n",
437 fs->fs_fsmnt, fs->fs_pendingblocks,
438 fs->fs_pendinginodes);
439 fs->fs_pendingblocks = 0;
440 fs->fs_pendinginodes = 0;
441 }
442 if (error == 0 &&
443 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
444 fs->fs_clean & FS_WASCLEAN) {
445 if (mp->mnt_flag & MNT_SOFTDEP)
446 fs->fs_flags &= ~FS_DOSOFTDEP;
447 fs->fs_clean = FS_ISCLEAN;
448 (void) ffs_sbupdate(ump, MNT_WAIT);
449 }
450 if (error)
451 return (error);
452 fs->fs_ronly = 1;
453 fs->fs_fmod = 0;
454 }
455
456 /*
457 * Flush soft dependencies if disabling it via an update
458 * mount. This may leave some items to be processed,
459 * so don't do this yet XXX.
460 */
461 if ((fs->fs_flags & FS_DOSOFTDEP) &&
462 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
463 #ifdef notyet
464 flags = WRITECLOSE;
465 if (mp->mnt_flag & MNT_FORCE)
466 flags |= FORCECLOSE;
467 error = softdep_flushfiles(mp, flags, l);
468 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
469 fs->fs_flags &= ~FS_DOSOFTDEP;
470 (void) ffs_sbupdate(ump, MNT_WAIT);
471 #elif defined(SOFTDEP)
472 mp->mnt_flag |= MNT_SOFTDEP;
473 #endif
474 }
475
476 /*
477 * When upgrading to a softdep mount, we must first flush
478 * all vnodes. (not done yet -- see above)
479 */
480 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
481 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
482 #ifdef notyet
483 flags = WRITECLOSE;
484 if (mp->mnt_flag & MNT_FORCE)
485 flags |= FORCECLOSE;
486 error = ffs_flushfiles(mp, flags, l);
487 #else
488 mp->mnt_flag &= ~MNT_SOFTDEP;
489 #endif
490 }
491
492 if (mp->mnt_flag & MNT_RELOAD) {
493 error = ffs_reload(mp, l->l_cred, l);
494 if (error)
495 return (error);
496 }
497
498 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
499 /*
500 * Changing from read-only to read/write
501 */
502 fs->fs_ronly = 0;
503 fs->fs_clean <<= 1;
504 fs->fs_fmod = 1;
505 if ((fs->fs_flags & FS_DOSOFTDEP)) {
506 error = softdep_mount(devvp, mp, fs,
507 l->l_cred);
508 if (error)
509 return (error);
510 }
511 if (fs->fs_snapinum[0] != 0)
512 ffs_snapshot_mount(mp);
513 }
514 if (args->fspec == NULL)
515 return EINVAL;
516 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
517 (MNT_SOFTDEP | MNT_ASYNC)) {
518 printf("%s fs uses soft updates, ignoring async mode\n",
519 fs->fs_fsmnt);
520 mp->mnt_flag &= ~MNT_ASYNC;
521 }
522 }
523
524 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
525 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
526 if (error == 0)
527 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
528 sizeof(fs->fs_fsmnt));
529 if (mp->mnt_flag & MNT_SOFTDEP)
530 fs->fs_flags |= FS_DOSOFTDEP;
531 else
532 fs->fs_flags &= ~FS_DOSOFTDEP;
533 if (fs->fs_fmod != 0) { /* XXX */
534 fs->fs_fmod = 0;
535 if (fs->fs_clean & FS_WASCLEAN)
536 fs->fs_time = time_second;
537 else {
538 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
539 mp->mnt_stat.f_mntfromname, fs->fs_clean);
540 printf("%s: lost blocks %" PRId64 " files %d\n",
541 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
542 fs->fs_pendinginodes);
543 }
544 (void) ffs_cgupdate(ump, MNT_WAIT);
545 }
546 return (error);
547
548 fail:
549 vrele(devvp);
550 return (error);
551 }
552
553 /*
554 * Reload all incore data for a filesystem (used after running fsck on
555 * the root filesystem and finding things to fix). The filesystem must
556 * be mounted read-only.
557 *
558 * Things to do to update the mount:
559 * 1) invalidate all cached meta-data.
560 * 2) re-read superblock from disk.
561 * 3) re-read summary information from disk.
562 * 4) invalidate all inactive vnodes.
563 * 5) invalidate all cached file data.
564 * 6) re-read inode data for all active vnodes.
565 */
566 int
567 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
568 {
569 struct vnode *vp, *mvp, *devvp;
570 struct inode *ip;
571 void *space;
572 struct buf *bp;
573 struct fs *fs, *newfs;
574 struct partinfo dpart;
575 int i, blks, size, error;
576 int32_t *lp;
577 struct ufsmount *ump;
578 daddr_t sblockloc;
579
580 if ((mp->mnt_flag & MNT_RDONLY) == 0)
581 return (EINVAL);
582
583 ump = VFSTOUFS(mp);
584 /*
585 * Step 1: invalidate all cached meta-data.
586 */
587 devvp = ump->um_devvp;
588 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
589 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
590 VOP_UNLOCK(devvp, 0);
591 if (error)
592 panic("ffs_reload: dirty1");
593 /*
594 * Step 2: re-read superblock from disk.
595 */
596 fs = ump->um_fs;
597 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
598 size = DEV_BSIZE;
599 else
600 size = dpart.disklab->d_secsize;
601 /* XXX we don't handle possibility that superblock moved. */
602 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
603 NOCRED, 0, &bp);
604 if (error) {
605 brelse(bp, 0);
606 return (error);
607 }
608 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
609 memcpy(newfs, bp->b_data, fs->fs_sbsize);
610 #ifdef FFS_EI
611 if (ump->um_flags & UFS_NEEDSWAP) {
612 ffs_sb_swap((struct fs*)bp->b_data, newfs);
613 fs->fs_flags |= FS_SWAPPED;
614 } else
615 #endif
616 fs->fs_flags &= ~FS_SWAPPED;
617 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
618 newfs->fs_magic != FS_UFS2_MAGIC)||
619 newfs->fs_bsize > MAXBSIZE ||
620 newfs->fs_bsize < sizeof(struct fs)) {
621 brelse(bp, 0);
622 free(newfs, M_UFSMNT);
623 return (EIO); /* XXX needs translation */
624 }
625 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
626 sblockloc = fs->fs_sblockloc;
627 /*
628 * Copy pointer fields back into superblock before copying in XXX
629 * new superblock. These should really be in the ufsmount. XXX
630 * Note that important parameters (eg fs_ncg) are unchanged.
631 */
632 newfs->fs_csp = fs->fs_csp;
633 newfs->fs_maxcluster = fs->fs_maxcluster;
634 newfs->fs_contigdirs = fs->fs_contigdirs;
635 newfs->fs_ronly = fs->fs_ronly;
636 newfs->fs_active = fs->fs_active;
637 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
638 brelse(bp, 0);
639 free(newfs, M_UFSMNT);
640
641 /* Recheck for apple UFS filesystem */
642 ump->um_flags &= ~UFS_ISAPPLEUFS;
643 /* First check to see if this is tagged as an Apple UFS filesystem
644 * in the disklabel
645 */
646 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
647 (dpart.part->p_fstype == FS_APPLEUFS)) {
648 ump->um_flags |= UFS_ISAPPLEUFS;
649 }
650 #ifdef APPLE_UFS
651 else {
652 /* Manually look for an apple ufs label, and if a valid one
653 * is found, then treat it like an Apple UFS filesystem anyway
654 */
655 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
656 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
657 if (error) {
658 brelse(bp, 0);
659 return (error);
660 }
661 error = ffs_appleufs_validate(fs->fs_fsmnt,
662 (struct appleufslabel *)bp->b_data,NULL);
663 if (error == 0)
664 ump->um_flags |= UFS_ISAPPLEUFS;
665 brelse(bp, 0);
666 bp = NULL;
667 }
668 #else
669 if (ump->um_flags & UFS_ISAPPLEUFS)
670 return (EIO);
671 #endif
672
673 if (UFS_MPISAPPLEUFS(ump)) {
674 /* see comment about NeXT below */
675 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
676 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
677 mp->mnt_iflag |= IMNT_DTYPE;
678 } else {
679 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
680 ump->um_dirblksiz = DIRBLKSIZ;
681 if (ump->um_maxsymlinklen > 0)
682 mp->mnt_iflag |= IMNT_DTYPE;
683 else
684 mp->mnt_iflag &= ~IMNT_DTYPE;
685 }
686 ffs_oldfscompat_read(fs, ump, sblockloc);
687 mutex_enter(&ump->um_lock);
688 ump->um_maxfilesize = fs->fs_maxfilesize;
689 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
690 fs->fs_pendingblocks = 0;
691 fs->fs_pendinginodes = 0;
692 }
693 mutex_exit(&ump->um_lock);
694
695 ffs_statvfs(mp, &mp->mnt_stat);
696 /*
697 * Step 3: re-read summary information from disk.
698 */
699 blks = howmany(fs->fs_cssize, fs->fs_fsize);
700 space = fs->fs_csp;
701 for (i = 0; i < blks; i += fs->fs_frag) {
702 size = fs->fs_bsize;
703 if (i + fs->fs_frag > blks)
704 size = (blks - i) * fs->fs_fsize;
705 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
706 NOCRED, 0, &bp);
707 if (error) {
708 brelse(bp, 0);
709 return (error);
710 }
711 #ifdef FFS_EI
712 if (UFS_FSNEEDSWAP(fs))
713 ffs_csum_swap((struct csum *)bp->b_data,
714 (struct csum *)space, size);
715 else
716 #endif
717 memcpy(space, bp->b_data, (size_t)size);
718 space = (char *)space + size;
719 brelse(bp, 0);
720 }
721 if ((fs->fs_flags & FS_DOSOFTDEP))
722 softdep_mount(devvp, mp, fs, cred);
723 if (fs->fs_snapinum[0] != 0)
724 ffs_snapshot_mount(mp);
725 /*
726 * We no longer know anything about clusters per cylinder group.
727 */
728 if (fs->fs_contigsumsize > 0) {
729 lp = fs->fs_maxcluster;
730 for (i = 0; i < fs->fs_ncg; i++)
731 *lp++ = fs->fs_contigsumsize;
732 }
733
734 /* Allocate a marker vnode. */
735 if ((mvp = vnalloc(mp)) == NULL)
736 return ENOMEM;
737 /*
738 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
739 * and vclean() can be called indirectly
740 */
741 mutex_enter(&mntvnode_lock);
742 loop:
743 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
744 vmark(mvp, vp);
745 if (vp->v_mount != mp || vismarker(vp))
746 continue;
747 /*
748 * Step 4: invalidate all inactive vnodes.
749 */
750 if (vrecycle(vp, &mntvnode_lock, l)) {
751 mutex_enter(&mntvnode_lock);
752 (void)vunmark(mvp);
753 goto loop;
754 }
755 /*
756 * Step 5: invalidate all cached file data.
757 */
758 mutex_enter(&vp->v_interlock);
759 mutex_exit(&mntvnode_lock);
760 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
761 (void)vunmark(mvp);
762 goto loop;
763 }
764 if (vinvalbuf(vp, 0, cred, l, 0, 0))
765 panic("ffs_reload: dirty2");
766 /*
767 * Step 6: re-read inode data for all active vnodes.
768 */
769 ip = VTOI(vp);
770 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
771 (int)fs->fs_bsize, NOCRED, 0, &bp);
772 if (error) {
773 brelse(bp, 0);
774 vput(vp);
775 (void)vunmark(mvp);
776 break;
777 }
778 ffs_load_inode(bp, ip, fs, ip->i_number);
779 ip->i_ffs_effnlink = ip->i_nlink;
780 brelse(bp, 0);
781 vput(vp);
782 mutex_enter(&mntvnode_lock);
783 }
784 mutex_exit(&mntvnode_lock);
785 vnfree(mvp);
786 return (error);
787 }
788
789 /*
790 * Possible superblock locations ordered from most to least likely.
791 */
792 static const int sblock_try[] = SBLOCKSEARCH;
793
794 /*
795 * Common code for mount and mountroot
796 */
797 int
798 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
799 {
800 struct ufsmount *ump;
801 struct buf *bp;
802 struct fs *fs;
803 dev_t dev;
804 struct partinfo dpart;
805 void *space;
806 daddr_t sblockloc, fsblockloc;
807 int blks, fstype;
808 int error, i, size, ronly, bset = 0;
809 #ifdef FFS_EI
810 int needswap = 0; /* keep gcc happy */
811 #endif
812 int32_t *lp;
813 kauth_cred_t cred;
814 u_int32_t sbsize = 8192; /* keep gcc happy*/
815
816 dev = devvp->v_rdev;
817 cred = l ? l->l_cred : NOCRED;
818
819 /* Flush out any old buffers remaining from a previous use. */
820 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
821 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
822 VOP_UNLOCK(devvp, 0);
823 if (error)
824 return (error);
825
826 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
827 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
828 size = DEV_BSIZE;
829 else
830 size = dpart.disklab->d_secsize;
831
832 bp = NULL;
833 ump = NULL;
834 fs = NULL;
835 sblockloc = 0;
836 fstype = 0;
837
838 error = fstrans_mount(mp);
839 if (error)
840 return error;
841
842 /*
843 * Try reading the superblock in each of its possible locations.
844 */
845 for (i = 0; ; i++) {
846 if (bp != NULL) {
847 brelse(bp, BC_NOCACHE);
848 bp = NULL;
849 }
850 if (sblock_try[i] == -1) {
851 error = EINVAL;
852 fs = NULL;
853 goto out;
854 }
855 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
856 0, &bp);
857 if (error) {
858 fs = NULL;
859 goto out;
860 }
861 fs = (struct fs*)bp->b_data;
862 fsblockloc = sblockloc = sblock_try[i];
863 if (fs->fs_magic == FS_UFS1_MAGIC) {
864 sbsize = fs->fs_sbsize;
865 fstype = UFS1;
866 #ifdef FFS_EI
867 needswap = 0;
868 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
869 sbsize = bswap32(fs->fs_sbsize);
870 fstype = UFS1;
871 needswap = 1;
872 #endif
873 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
874 sbsize = fs->fs_sbsize;
875 fstype = UFS2;
876 #ifdef FFS_EI
877 needswap = 0;
878 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
879 sbsize = bswap32(fs->fs_sbsize);
880 fstype = UFS2;
881 needswap = 1;
882 #endif
883 } else
884 continue;
885
886
887 /* fs->fs_sblockloc isn't defined for old filesystems */
888 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
889 if (sblockloc == SBLOCK_UFS2)
890 /*
891 * This is likely to be the first alternate
892 * in a filesystem with 64k blocks.
893 * Don't use it.
894 */
895 continue;
896 fsblockloc = sblockloc;
897 } else {
898 fsblockloc = fs->fs_sblockloc;
899 #ifdef FFS_EI
900 if (needswap)
901 fsblockloc = bswap64(fsblockloc);
902 #endif
903 }
904
905 /* Check we haven't found an alternate superblock */
906 if (fsblockloc != sblockloc)
907 continue;
908
909 /* Validate size of superblock */
910 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
911 continue;
912
913 /* Ok seems to be a good superblock */
914 break;
915 }
916
917 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
918 memcpy(fs, bp->b_data, sbsize);
919
920 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
921 memset(ump, 0, sizeof *ump);
922 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
923 error = ffs_snapshot_init(ump);
924 if (error)
925 goto out;
926 ump->um_fs = fs;
927 ump->um_ops = &ffs_ufsops;
928
929 #ifdef FFS_EI
930 if (needswap) {
931 ffs_sb_swap((struct fs*)bp->b_data, fs);
932 fs->fs_flags |= FS_SWAPPED;
933 } else
934 #endif
935 fs->fs_flags &= ~FS_SWAPPED;
936
937 ffs_oldfscompat_read(fs, ump, sblockloc);
938 ump->um_maxfilesize = fs->fs_maxfilesize;
939
940 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
941 fs->fs_pendingblocks = 0;
942 fs->fs_pendinginodes = 0;
943 }
944
945 ump->um_fstype = fstype;
946 if (fs->fs_sbsize < SBLOCKSIZE)
947 brelse(bp, BC_INVAL);
948 else
949 brelse(bp, 0);
950 bp = NULL;
951
952 /* First check to see if this is tagged as an Apple UFS filesystem
953 * in the disklabel
954 */
955 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
956 (dpart.part->p_fstype == FS_APPLEUFS)) {
957 ump->um_flags |= UFS_ISAPPLEUFS;
958 }
959 #ifdef APPLE_UFS
960 else {
961 /* Manually look for an apple ufs label, and if a valid one
962 * is found, then treat it like an Apple UFS filesystem anyway
963 */
964 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
965 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
966 if (error)
967 goto out;
968 error = ffs_appleufs_validate(fs->fs_fsmnt,
969 (struct appleufslabel *)bp->b_data,NULL);
970 if (error == 0) {
971 ump->um_flags |= UFS_ISAPPLEUFS;
972 }
973 brelse(bp, 0);
974 bp = NULL;
975 }
976 #else
977 if (ump->um_flags & UFS_ISAPPLEUFS) {
978 error = EINVAL;
979 goto out;
980 }
981 #endif
982
983 /*
984 * verify that we can access the last block in the fs
985 * if we're mounting read/write.
986 */
987
988 if (!ronly) {
989 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
990 cred, 0, &bp);
991 if (bp->b_bcount != fs->fs_fsize)
992 error = EINVAL;
993 if (error) {
994 bset = BC_INVAL;
995 goto out;
996 }
997 brelse(bp, BC_INVAL);
998 bp = NULL;
999 }
1000
1001 fs->fs_ronly = ronly;
1002 if (ronly == 0) {
1003 fs->fs_clean <<= 1;
1004 fs->fs_fmod = 1;
1005 }
1006 size = fs->fs_cssize;
1007 blks = howmany(size, fs->fs_fsize);
1008 if (fs->fs_contigsumsize > 0)
1009 size += fs->fs_ncg * sizeof(int32_t);
1010 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1011 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
1012 fs->fs_csp = space;
1013 for (i = 0; i < blks; i += fs->fs_frag) {
1014 size = fs->fs_bsize;
1015 if (i + fs->fs_frag > blks)
1016 size = (blks - i) * fs->fs_fsize;
1017 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
1018 cred, 0, &bp);
1019 if (error) {
1020 free(fs->fs_csp, M_UFSMNT);
1021 goto out;
1022 }
1023 #ifdef FFS_EI
1024 if (needswap)
1025 ffs_csum_swap((struct csum *)bp->b_data,
1026 (struct csum *)space, size);
1027 else
1028 #endif
1029 memcpy(space, bp->b_data, (u_int)size);
1030
1031 space = (char *)space + size;
1032 brelse(bp, 0);
1033 bp = NULL;
1034 }
1035 if (fs->fs_contigsumsize > 0) {
1036 fs->fs_maxcluster = lp = space;
1037 for (i = 0; i < fs->fs_ncg; i++)
1038 *lp++ = fs->fs_contigsumsize;
1039 space = lp;
1040 }
1041 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1042 fs->fs_contigdirs = space;
1043 space = (char *)space + size;
1044 memset(fs->fs_contigdirs, 0, size);
1045 /* Compatibility for old filesystems - XXX */
1046 if (fs->fs_avgfilesize <= 0)
1047 fs->fs_avgfilesize = AVFILESIZ;
1048 if (fs->fs_avgfpdir <= 0)
1049 fs->fs_avgfpdir = AFPDIR;
1050 fs->fs_active = NULL;
1051 mp->mnt_data = ump;
1052 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1053 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1054 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1055 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1056 if (UFS_MPISAPPLEUFS(ump)) {
1057 /* NeXT used to keep short symlinks in the inode even
1058 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1059 * is probably -1, but we still need to be able to identify
1060 * short symlinks.
1061 */
1062 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1063 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1064 mp->mnt_iflag |= IMNT_DTYPE;
1065 } else {
1066 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1067 ump->um_dirblksiz = DIRBLKSIZ;
1068 if (ump->um_maxsymlinklen > 0)
1069 mp->mnt_iflag |= IMNT_DTYPE;
1070 else
1071 mp->mnt_iflag &= ~IMNT_DTYPE;
1072 }
1073 mp->mnt_fs_bshift = fs->fs_bshift;
1074 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1075 mp->mnt_flag |= MNT_LOCAL;
1076 mp->mnt_iflag |= IMNT_MPSAFE;
1077 #ifdef FFS_EI
1078 if (needswap)
1079 ump->um_flags |= UFS_NEEDSWAP;
1080 #endif
1081 ump->um_mountp = mp;
1082 ump->um_dev = dev;
1083 ump->um_devvp = devvp;
1084 ump->um_nindir = fs->fs_nindir;
1085 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1086 ump->um_bptrtodb = fs->fs_fsbtodb;
1087 ump->um_seqinc = fs->fs_frag;
1088 for (i = 0; i < MAXQUOTAS; i++)
1089 ump->um_quotas[i] = NULLVP;
1090 devvp->v_specmountpoint = mp;
1091 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
1092 error = softdep_mount(devvp, mp, fs, cred);
1093 if (error) {
1094 free(fs->fs_csp, M_UFSMNT);
1095 goto out;
1096 }
1097 }
1098 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1099 ffs_snapshot_mount(mp);
1100 #ifdef UFS_EXTATTR
1101 /*
1102 * Initialize file-backed extended attributes on UFS1 file
1103 * systems.
1104 */
1105 if (ump->um_fstype == UFS1) {
1106 ufs_extattr_uepm_init(&ump->um_extattr);
1107 #ifdef UFS_EXTATTR_AUTOSTART
1108 /*
1109 * XXX Just ignore errors. Not clear that we should
1110 * XXX fail the mount in this case.
1111 */
1112 (void) ufs_extattr_autostart(mp, l);
1113 #endif
1114 }
1115 #endif /* UFS_EXTATTR */
1116 return (0);
1117 out:
1118 fstrans_unmount(mp);
1119 if (fs)
1120 free(fs, M_UFSMNT);
1121 devvp->v_specmountpoint = NULL;
1122 if (bp)
1123 brelse(bp, bset);
1124 if (ump) {
1125 if (ump->um_oldfscompat)
1126 free(ump->um_oldfscompat, M_UFSMNT);
1127 mutex_destroy(&ump->um_lock);
1128 free(ump, M_UFSMNT);
1129 mp->mnt_data = NULL;
1130 }
1131 return (error);
1132 }
1133
1134 /*
1135 * Sanity checks for loading old filesystem superblocks.
1136 * See ffs_oldfscompat_write below for unwound actions.
1137 *
1138 * XXX - Parts get retired eventually.
1139 * Unfortunately new bits get added.
1140 */
1141 static void
1142 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1143 {
1144 off_t maxfilesize;
1145 int32_t *extrasave;
1146
1147 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1148 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1149 return;
1150
1151 if (!ump->um_oldfscompat)
1152 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1153 M_UFSMNT, M_WAITOK);
1154
1155 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1156 extrasave = ump->um_oldfscompat;
1157 extrasave += 512/sizeof(int32_t);
1158 extrasave[0] = fs->fs_old_npsect;
1159 extrasave[1] = fs->fs_old_interleave;
1160 extrasave[2] = fs->fs_old_trackskew;
1161
1162 /* These fields will be overwritten by their
1163 * original values in fs_oldfscompat_write, so it is harmless
1164 * to modify them here.
1165 */
1166 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1167 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1168 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1169 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1170
1171 fs->fs_maxbsize = fs->fs_bsize;
1172 fs->fs_time = fs->fs_old_time;
1173 fs->fs_size = fs->fs_old_size;
1174 fs->fs_dsize = fs->fs_old_dsize;
1175 fs->fs_csaddr = fs->fs_old_csaddr;
1176 fs->fs_sblockloc = sblockloc;
1177
1178 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1179
1180 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1181 fs->fs_old_nrpos = 8;
1182 fs->fs_old_npsect = fs->fs_old_nsect;
1183 fs->fs_old_interleave = 1;
1184 fs->fs_old_trackskew = 0;
1185 }
1186
1187 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1188 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1189 fs->fs_qbmask = ~fs->fs_bmask;
1190 fs->fs_qfmask = ~fs->fs_fmask;
1191 }
1192
1193 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1194 if (fs->fs_maxfilesize > maxfilesize)
1195 fs->fs_maxfilesize = maxfilesize;
1196
1197 /* Compatibility for old filesystems */
1198 if (fs->fs_avgfilesize <= 0)
1199 fs->fs_avgfilesize = AVFILESIZ;
1200 if (fs->fs_avgfpdir <= 0)
1201 fs->fs_avgfpdir = AFPDIR;
1202
1203 #if 0
1204 if (bigcgs) {
1205 fs->fs_save_cgsize = fs->fs_cgsize;
1206 fs->fs_cgsize = fs->fs_bsize;
1207 }
1208 #endif
1209 }
1210
1211 /*
1212 * Unwinding superblock updates for old filesystems.
1213 * See ffs_oldfscompat_read above for details.
1214 *
1215 * XXX - Parts get retired eventually.
1216 * Unfortunately new bits get added.
1217 */
1218 static void
1219 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1220 {
1221 int32_t *extrasave;
1222
1223 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1224 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1225 return;
1226
1227 fs->fs_old_time = fs->fs_time;
1228 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1229 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1230 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1231 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1232 fs->fs_old_flags = fs->fs_flags;
1233
1234 #if 0
1235 if (bigcgs) {
1236 fs->fs_cgsize = fs->fs_save_cgsize;
1237 }
1238 #endif
1239
1240 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1241 extrasave = ump->um_oldfscompat;
1242 extrasave += 512/sizeof(int32_t);
1243 fs->fs_old_npsect = extrasave[0];
1244 fs->fs_old_interleave = extrasave[1];
1245 fs->fs_old_trackskew = extrasave[2];
1246
1247 }
1248
1249 /*
1250 * unmount system call
1251 */
1252 int
1253 ffs_unmount(struct mount *mp, int mntflags)
1254 {
1255 struct lwp *l = curlwp;
1256 struct ufsmount *ump = VFSTOUFS(mp);
1257 struct fs *fs = ump->um_fs;
1258 int error, flags, penderr;
1259
1260 penderr = 0;
1261 flags = 0;
1262 if (mntflags & MNT_FORCE)
1263 flags |= FORCECLOSE;
1264 #ifdef UFS_EXTATTR
1265 if (ump->um_fstype == UFS1) {
1266 ufs_extattr_stop(mp, l);
1267 ufs_extattr_uepm_destroy(&ump->um_extattr);
1268 }
1269 #endif /* UFS_EXTATTR */
1270 if (mp->mnt_flag & MNT_SOFTDEP) {
1271 if ((error = softdep_flushfiles(mp, flags, l)) != 0)
1272 return (error);
1273 } else {
1274 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1275 return (error);
1276 }
1277 mutex_enter(&ump->um_lock);
1278 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1279 printf("%s: unmount pending error: blocks %" PRId64
1280 " files %d\n",
1281 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1282 fs->fs_pendingblocks = 0;
1283 fs->fs_pendinginodes = 0;
1284 penderr = 1;
1285 }
1286 mutex_exit(&ump->um_lock);
1287 if (fs->fs_ronly == 0 &&
1288 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1289 fs->fs_clean & FS_WASCLEAN) {
1290 /*
1291 * XXXX don't mark fs clean in the case of softdep
1292 * pending block errors, until they are fixed.
1293 */
1294 if (penderr == 0) {
1295 if (mp->mnt_flag & MNT_SOFTDEP)
1296 fs->fs_flags &= ~FS_DOSOFTDEP;
1297 fs->fs_clean = FS_ISCLEAN;
1298 }
1299 fs->fs_fmod = 0;
1300 (void) ffs_sbupdate(ump, MNT_WAIT);
1301 }
1302 if (ump->um_devvp->v_type != VBAD)
1303 ump->um_devvp->v_specmountpoint = NULL;
1304 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1305 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
1306 NOCRED);
1307 vput(ump->um_devvp);
1308 free(fs->fs_csp, M_UFSMNT);
1309 free(fs, M_UFSMNT);
1310 if (ump->um_oldfscompat != NULL)
1311 free(ump->um_oldfscompat, M_UFSMNT);
1312 softdep_unmount(mp);
1313 mutex_destroy(&ump->um_lock);
1314 ffs_snapshot_fini(ump);
1315 free(ump, M_UFSMNT);
1316 mp->mnt_data = NULL;
1317 mp->mnt_flag &= ~MNT_LOCAL;
1318 fstrans_unmount(mp);
1319 return (0);
1320 }
1321
1322 /*
1323 * Flush out all the files in a filesystem.
1324 */
1325 int
1326 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1327 {
1328 extern int doforce;
1329 struct ufsmount *ump;
1330 int error;
1331
1332 if (!doforce)
1333 flags &= ~FORCECLOSE;
1334 ump = VFSTOUFS(mp);
1335 #ifdef QUOTA
1336 if (mp->mnt_flag & MNT_QUOTA) {
1337 int i;
1338 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
1339 return (error);
1340 for (i = 0; i < MAXQUOTAS; i++) {
1341 if (ump->um_quotas[i] == NULLVP)
1342 continue;
1343 quotaoff(l, mp, i);
1344 }
1345 /*
1346 * Here we fall through to vflush again to ensure
1347 * that we have gotten rid of all the system vnodes.
1348 */
1349 }
1350 #endif
1351 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1352 return (error);
1353 ffs_snapshot_unmount(mp);
1354 /*
1355 * Flush all the files.
1356 */
1357 error = vflush(mp, NULLVP, flags);
1358 if (error)
1359 return (error);
1360 /*
1361 * Flush filesystem metadata.
1362 */
1363 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1364 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1365 VOP_UNLOCK(ump->um_devvp, 0);
1366 return (error);
1367 }
1368
1369 /*
1370 * Get file system statistics.
1371 */
1372 int
1373 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1374 {
1375 struct ufsmount *ump;
1376 struct fs *fs;
1377
1378 ump = VFSTOUFS(mp);
1379 fs = ump->um_fs;
1380 mutex_enter(&ump->um_lock);
1381 sbp->f_bsize = fs->fs_bsize;
1382 sbp->f_frsize = fs->fs_fsize;
1383 sbp->f_iosize = fs->fs_bsize;
1384 sbp->f_blocks = fs->fs_dsize;
1385 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1386 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1387 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1388 fs->fs_minfree) / (u_int64_t) 100;
1389 if (sbp->f_bfree > sbp->f_bresvd)
1390 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1391 else
1392 sbp->f_bavail = 0;
1393 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1394 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1395 sbp->f_favail = sbp->f_ffree;
1396 sbp->f_fresvd = 0;
1397 mutex_exit(&ump->um_lock);
1398 copy_statvfs_info(sbp, mp);
1399
1400 return (0);
1401 }
1402
1403 /*
1404 * Go through the disk queues to initiate sandbagged IO;
1405 * go through the inodes to write those that have been modified;
1406 * initiate the writing of the super block if it has been modified.
1407 *
1408 * Note: we are always called with the filesystem marked `MPBUSY'.
1409 */
1410 int
1411 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1412 {
1413 struct lwp *l = curlwp;
1414 struct vnode *vp, *mvp;
1415 struct inode *ip;
1416 struct ufsmount *ump = VFSTOUFS(mp);
1417 struct fs *fs;
1418 int error, count, allerror = 0;
1419
1420 fs = ump->um_fs;
1421 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1422 printf("fs = %s\n", fs->fs_fsmnt);
1423 panic("update: rofs mod");
1424 }
1425
1426 /* Allocate a marker vnode. */
1427 if ((mvp = vnalloc(mp)) == NULL)
1428 return (ENOMEM);
1429
1430 fstrans_start(mp, FSTRANS_SHARED);
1431 /*
1432 * Write back each (modified) inode.
1433 */
1434 mutex_enter(&mntvnode_lock);
1435 loop:
1436 /*
1437 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1438 * and vclean() can be called indirectly
1439 */
1440 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
1441 vmark(mvp, vp);
1442 /*
1443 * If the vnode that we are about to sync is no longer
1444 * associated with this mount point, start over.
1445 */
1446 if (vp->v_mount != mp || vismarker(vp))
1447 continue;
1448 mutex_enter(&vp->v_interlock);
1449 ip = VTOI(vp);
1450 if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 ||
1451 vp->v_type == VNON || ((ip->i_flag &
1452 (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
1453 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1454 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
1455 {
1456 mutex_exit(&vp->v_interlock);
1457 continue;
1458 }
1459 if (vp->v_type == VBLK &&
1460 fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
1461 mutex_exit(&vp->v_interlock);
1462 continue;
1463 }
1464 mutex_exit(&mntvnode_lock);
1465 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1466 if (error) {
1467 mutex_enter(&mntvnode_lock);
1468 if (error == ENOENT) {
1469 (void)vunmark(mvp);
1470 goto loop;
1471 }
1472 continue;
1473 }
1474 if (vp->v_type == VREG && waitfor == MNT_LAZY)
1475 error = ffs_update(vp, NULL, NULL, 0);
1476 else
1477 error = VOP_FSYNC(vp, cred,
1478 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0);
1479 if (error)
1480 allerror = error;
1481 vput(vp);
1482 mutex_enter(&mntvnode_lock);
1483 }
1484 mutex_exit(&mntvnode_lock);
1485 /*
1486 * Force stale file system control information to be flushed.
1487 */
1488 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1489 if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
1490 allerror = error;
1491 /* Flushed work items may create new vnodes to clean */
1492 if (allerror == 0 && count) {
1493 mutex_enter(&mntvnode_lock);
1494 goto loop;
1495 }
1496 }
1497 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1498 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1499 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1500 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1501 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0)
1502 allerror = error;
1503 VOP_UNLOCK(ump->um_devvp, 0);
1504 if (allerror == 0 && waitfor == MNT_WAIT) {
1505 mutex_enter(&mntvnode_lock);
1506 goto loop;
1507 }
1508 }
1509 #ifdef QUOTA
1510 qsync(mp);
1511 #endif
1512 /*
1513 * Write back modified superblock.
1514 */
1515 if (fs->fs_fmod != 0) {
1516 fs->fs_fmod = 0;
1517 fs->fs_time = time_second;
1518 if ((error = ffs_cgupdate(ump, waitfor)))
1519 allerror = error;
1520 }
1521 fstrans_done(mp);
1522 vnfree(mvp);
1523 return (allerror);
1524 }
1525
1526 /*
1527 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1528 * in from disk. If it is in core, wait for the lock bit to clear, then
1529 * return the inode locked. Detection and handling of mount points must be
1530 * done by the calling routine.
1531 */
1532 int
1533 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1534 {
1535 struct fs *fs;
1536 struct inode *ip;
1537 struct ufsmount *ump;
1538 struct buf *bp;
1539 struct vnode *vp;
1540 dev_t dev;
1541 int error;
1542
1543 ump = VFSTOUFS(mp);
1544 dev = ump->um_dev;
1545
1546 retry:
1547 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1548 return (0);
1549
1550 /* Allocate a new vnode/inode. */
1551 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1552 *vpp = NULL;
1553 return (error);
1554 }
1555 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1556
1557 /*
1558 * If someone beat us to it, put back the freshly allocated
1559 * vnode/inode pair and retry.
1560 */
1561 mutex_enter(&ufs_hashlock);
1562 if (ufs_ihashget(dev, ino, 0) != NULL) {
1563 mutex_exit(&ufs_hashlock);
1564 ungetnewvnode(vp);
1565 pool_cache_put(ffs_inode_cache, ip);
1566 goto retry;
1567 }
1568
1569 vp->v_vflag |= VV_LOCKSWORK;
1570 if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
1571 vp->v_uflag |= VU_SOFTDEP;
1572
1573 /*
1574 * XXX MFS ends up here, too, to allocate an inode. Should we
1575 * XXX create another pool for MFS inodes?
1576 */
1577
1578 memset(ip, 0, sizeof(struct inode));
1579 vp->v_data = ip;
1580 ip->i_vnode = vp;
1581 ip->i_ump = ump;
1582 ip->i_fs = fs = ump->um_fs;
1583 ip->i_dev = dev;
1584 ip->i_number = ino;
1585 LIST_INIT(&ip->i_pcbufhd);
1586 #ifdef QUOTA
1587 ufsquota_init(ip);
1588 #endif
1589
1590 /*
1591 * Initialize genfs node, we might proceed to destroy it in
1592 * error branches.
1593 */
1594 genfs_node_init(vp, &ffs_genfsops);
1595
1596 /*
1597 * Put it onto its hash chain and lock it so that other requests for
1598 * this inode will block if they arrive while we are sleeping waiting
1599 * for old data structures to be purged or for the contents of the
1600 * disk portion of this inode to be read.
1601 */
1602
1603 ufs_ihashins(ip);
1604 mutex_exit(&ufs_hashlock);
1605
1606 /* Read in the disk contents for the inode, copy into the inode. */
1607 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1608 (int)fs->fs_bsize, NOCRED, 0, &bp);
1609 if (error) {
1610
1611 /*
1612 * The inode does not contain anything useful, so it would
1613 * be misleading to leave it on its hash chain. With mode
1614 * still zero, it will be unlinked and returned to the free
1615 * list by vput().
1616 */
1617
1618 vput(vp);
1619 brelse(bp, 0);
1620 *vpp = NULL;
1621 return (error);
1622 }
1623 if (ip->i_ump->um_fstype == UFS1)
1624 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1625 PR_WAITOK);
1626 else
1627 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
1628 PR_WAITOK);
1629 ffs_load_inode(bp, ip, fs, ino);
1630 if (DOINGSOFTDEP(vp))
1631 softdep_load_inodeblock(ip);
1632 else
1633 ip->i_ffs_effnlink = ip->i_nlink;
1634 brelse(bp, 0);
1635
1636 /*
1637 * Initialize the vnode from the inode, check for aliases.
1638 * Note that the underlying vnode may have changed.
1639 */
1640
1641 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1642
1643 /*
1644 * Finish inode initialization now that aliasing has been resolved.
1645 */
1646
1647 ip->i_devvp = ump->um_devvp;
1648 VREF(ip->i_devvp);
1649
1650 /*
1651 * Ensure that uid and gid are correct. This is a temporary
1652 * fix until fsck has been changed to do the update.
1653 */
1654
1655 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1656 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1657 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1658 } /* XXX */
1659 uvm_vnp_setsize(vp, ip->i_size);
1660 *vpp = vp;
1661 return (0);
1662 }
1663
1664 /*
1665 * File handle to vnode
1666 *
1667 * Have to be really careful about stale file handles:
1668 * - check that the inode number is valid
1669 * - call ffs_vget() to get the locked inode
1670 * - check for an unallocated inode (i_mode == 0)
1671 * - check that the given client host has export rights and return
1672 * those rights via. exflagsp and credanonp
1673 */
1674 int
1675 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1676 {
1677 struct ufid ufh;
1678 struct fs *fs;
1679
1680 if (fhp->fid_len != sizeof(struct ufid))
1681 return EINVAL;
1682
1683 memcpy(&ufh, fhp, sizeof(ufh));
1684 fs = VFSTOUFS(mp)->um_fs;
1685 if (ufh.ufid_ino < ROOTINO ||
1686 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1687 return (ESTALE);
1688 return (ufs_fhtovp(mp, &ufh, vpp));
1689 }
1690
1691 /*
1692 * Vnode pointer to File handle
1693 */
1694 /* ARGSUSED */
1695 int
1696 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1697 {
1698 struct inode *ip;
1699 struct ufid ufh;
1700
1701 if (*fh_size < sizeof(struct ufid)) {
1702 *fh_size = sizeof(struct ufid);
1703 return E2BIG;
1704 }
1705 ip = VTOI(vp);
1706 *fh_size = sizeof(struct ufid);
1707 memset(&ufh, 0, sizeof(ufh));
1708 ufh.ufid_len = sizeof(struct ufid);
1709 ufh.ufid_ino = ip->i_number;
1710 ufh.ufid_gen = ip->i_gen;
1711 memcpy(fhp, &ufh, sizeof(ufh));
1712 return (0);
1713 }
1714
1715 void
1716 ffs_init(void)
1717 {
1718 if (ffs_initcount++ > 0)
1719 return;
1720
1721 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
1722 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
1723 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
1724 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
1725 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
1726 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
1727 softdep_initialize();
1728 ufs_init();
1729 }
1730
1731 void
1732 ffs_reinit(void)
1733 {
1734 softdep_reinitialize();
1735 ufs_reinit();
1736 }
1737
1738 void
1739 ffs_done(void)
1740 {
1741 if (--ffs_initcount > 0)
1742 return;
1743
1744 /* XXX softdep cleanup ? */
1745 ufs_done();
1746 pool_cache_destroy(ffs_dinode2_cache);
1747 pool_cache_destroy(ffs_dinode1_cache);
1748 pool_cache_destroy(ffs_inode_cache);
1749 }
1750
1751 /*
1752 * Write a superblock and associated information back to disk.
1753 */
1754 int
1755 ffs_sbupdate(struct ufsmount *mp, int waitfor)
1756 {
1757 struct fs *fs = mp->um_fs;
1758 struct buf *bp;
1759 int error = 0;
1760 u_int32_t saveflag;
1761
1762 error = ffs_getblk(mp->um_devvp,
1763 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb), FFS_NOBLK,
1764 fs->fs_sbsize, false, &bp);
1765 if (error)
1766 return error;
1767 saveflag = fs->fs_flags & FS_INTERNAL;
1768 fs->fs_flags &= ~FS_INTERNAL;
1769
1770 memcpy(bp->b_data, fs, fs->fs_sbsize);
1771
1772 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
1773 #ifdef FFS_EI
1774 if (mp->um_flags & UFS_NEEDSWAP)
1775 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
1776 #endif
1777 fs->fs_flags |= saveflag;
1778
1779 if (waitfor == MNT_WAIT)
1780 error = bwrite(bp);
1781 else
1782 bawrite(bp);
1783 return (error);
1784 }
1785
1786 int
1787 ffs_cgupdate(struct ufsmount *mp, int waitfor)
1788 {
1789 struct fs *fs = mp->um_fs;
1790 struct buf *bp;
1791 int blks;
1792 void *space;
1793 int i, size, error = 0, allerror = 0;
1794
1795 allerror = ffs_sbupdate(mp, waitfor);
1796 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1797 space = fs->fs_csp;
1798 for (i = 0; i < blks; i += fs->fs_frag) {
1799 size = fs->fs_bsize;
1800 if (i + fs->fs_frag > blks)
1801 size = (blks - i) * fs->fs_fsize;
1802 error = ffs_getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1803 FFS_NOBLK, size, false, &bp);
1804 if (error)
1805 break;
1806 #ifdef FFS_EI
1807 if (mp->um_flags & UFS_NEEDSWAP)
1808 ffs_csum_swap((struct csum*)space,
1809 (struct csum*)bp->b_data, size);
1810 else
1811 #endif
1812 memcpy(bp->b_data, space, (u_int)size);
1813 space = (char *)space + size;
1814 if (waitfor == MNT_WAIT)
1815 error = bwrite(bp);
1816 else
1817 bawrite(bp);
1818 }
1819 if (!allerror && error)
1820 allerror = error;
1821 return (allerror);
1822 }
1823
1824 int
1825 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
1826 int attrnamespace, const char *attrname)
1827 {
1828 #ifdef UFS_EXTATTR
1829 /*
1830 * File-backed extended attributes are only supported on UFS1.
1831 * UFS2 has native extended attributes.
1832 */
1833 if (VFSTOUFS(mp)->um_fstype == UFS1)
1834 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
1835 #endif
1836 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
1837 }
1838
1839 int
1840 ffs_suspendctl(struct mount *mp, int cmd)
1841 {
1842 int error;
1843 struct lwp *l = curlwp;
1844
1845 switch (cmd) {
1846 case SUSPEND_SUSPEND:
1847 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
1848 return error;
1849 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
1850 if (error == 0)
1851 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
1852 if (error != 0) {
1853 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
1854 return error;
1855 }
1856 return 0;
1857
1858 case SUSPEND_RESUME:
1859 return fstrans_setstate(mp, FSTRANS_NORMAL);
1860
1861 default:
1862 return EINVAL;
1863 }
1864 }
1865