ffs_vfsops.c revision 1.367 1 /* $NetBSD: ffs_vfsops.c,v 1.367 2020/04/04 20:49:31 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.367 2020/04/04 20:49:31 ad Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_wapbl.h"
70 #endif
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/proc.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/socket.h>
79 #include <sys/mount.h>
80 #include <sys/buf.h>
81 #include <sys/device.h>
82 #include <sys/disk.h>
83 #include <sys/file.h>
84 #include <sys/disklabel.h>
85 #include <sys/ioctl.h>
86 #include <sys/errno.h>
87 #include <sys/kmem.h>
88 #include <sys/pool.h>
89 #include <sys/lock.h>
90 #include <sys/sysctl.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/wapbl.h>
94 #include <sys/module.h>
95
96 #include <miscfs/genfs/genfs.h>
97 #include <miscfs/specfs/specdev.h>
98
99 #include <ufs/ufs/quota.h>
100 #include <ufs/ufs/ufsmount.h>
101 #include <ufs/ufs/inode.h>
102 #include <ufs/ufs/dir.h>
103 #include <ufs/ufs/ufs_extern.h>
104 #include <ufs/ufs/ufs_bswap.h>
105 #include <ufs/ufs/ufs_wapbl.h>
106
107 #include <ufs/ffs/fs.h>
108 #include <ufs/ffs/ffs_extern.h>
109
110 #ifdef WAPBL
111 MODULE(MODULE_CLASS_VFS, ffs, "ufs,wapbl");
112 #else
113 MODULE(MODULE_CLASS_VFS, ffs, "ufs");
114 #endif
115
116 static int ffs_vfs_fsync(vnode_t *, int);
117 static int ffs_superblock_validate(struct fs *);
118 static int ffs_is_appleufs(struct vnode *, struct fs *);
119
120 static int ffs_init_vnode(struct ufsmount *, struct vnode *, ino_t);
121 static void ffs_deinit_vnode(struct ufsmount *, struct vnode *);
122
123 static kauth_listener_t ffs_snapshot_listener;
124
125 /* how many times ffs_init() was called */
126 int ffs_initcount = 0;
127
128 #ifdef DEBUG_FFS_MOUNT
129 #define DPRINTF(_fmt, args...) printf("%s: " _fmt "\n", __func__, ##args)
130 #else
131 #define DPRINTF(_fmt, args...) do {} while (/*CONSTCOND*/0)
132 #endif
133
134 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
135 extern const struct vnodeopv_desc ffs_specop_opv_desc;
136 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
137
138 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
139 &ffs_vnodeop_opv_desc,
140 &ffs_specop_opv_desc,
141 &ffs_fifoop_opv_desc,
142 NULL,
143 };
144
145 struct vfsops ffs_vfsops = {
146 .vfs_name = MOUNT_FFS,
147 .vfs_min_mount_data = sizeof (struct ufs_args),
148 .vfs_mount = ffs_mount,
149 .vfs_start = ufs_start,
150 .vfs_unmount = ffs_unmount,
151 .vfs_root = ufs_root,
152 .vfs_quotactl = ufs_quotactl,
153 .vfs_statvfs = ffs_statvfs,
154 .vfs_sync = ffs_sync,
155 .vfs_vget = ufs_vget,
156 .vfs_loadvnode = ffs_loadvnode,
157 .vfs_newvnode = ffs_newvnode,
158 .vfs_fhtovp = ffs_fhtovp,
159 .vfs_vptofh = ffs_vptofh,
160 .vfs_init = ffs_init,
161 .vfs_reinit = ffs_reinit,
162 .vfs_done = ffs_done,
163 .vfs_mountroot = ffs_mountroot,
164 .vfs_snapshot = ffs_snapshot,
165 .vfs_extattrctl = ffs_extattrctl,
166 .vfs_suspendctl = genfs_suspendctl,
167 .vfs_renamelock_enter = genfs_renamelock_enter,
168 .vfs_renamelock_exit = genfs_renamelock_exit,
169 .vfs_fsync = ffs_vfs_fsync,
170 .vfs_opv_descs = ffs_vnodeopv_descs
171 };
172
173 static const struct genfs_ops ffs_genfsops = {
174 .gop_size = ffs_gop_size,
175 .gop_alloc = ufs_gop_alloc,
176 .gop_write = genfs_gop_write,
177 .gop_markupdate = ufs_gop_markupdate,
178 .gop_putrange = genfs_gop_putrange,
179 };
180
181 static const struct ufs_ops ffs_ufsops = {
182 .uo_itimes = ffs_itimes,
183 .uo_update = ffs_update,
184 .uo_truncate = ffs_truncate,
185 .uo_balloc = ffs_balloc,
186 .uo_snapgone = ffs_snapgone,
187 .uo_bufrd = ffs_bufrd,
188 .uo_bufwr = ffs_bufwr,
189 };
190
191 static int
192 ffs_checkrange(struct mount *mp, uint32_t ino)
193 {
194 struct fs *fs = VFSTOUFS(mp)->um_fs;
195
196 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) {
197 DPRINTF("out of range %u\n", ino);
198 return ESTALE;
199 }
200
201 /*
202 * Need to check if inode is initialized because ffsv2 does
203 * lazy initialization and we can get here from nfs_fhtovp
204 */
205 if (fs->fs_magic != FS_UFS2_MAGIC)
206 return 0;
207
208 struct buf *bp;
209 int cg = ino_to_cg(fs, ino);
210 struct ufsmount *ump = VFSTOUFS(mp);
211
212 int error = bread(ump->um_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
213 (int)fs->fs_cgsize, B_MODIFY, &bp);
214 if (error) {
215 DPRINTF("error %d reading cg %d ino %u\n", error, cg, ino);
216 return error;
217 }
218
219 const int needswap = UFS_FSNEEDSWAP(fs);
220
221 struct cg *cgp = (struct cg *)bp->b_data;
222 if (!cg_chkmagic(cgp, needswap)) {
223 brelse(bp, 0);
224 DPRINTF("bad cylinder group magic cg %d ino %u\n", cg, ino);
225 return ESTALE;
226 }
227
228 int32_t initediblk = ufs_rw32(cgp->cg_initediblk, needswap);
229 brelse(bp, 0);
230
231 if (cg * fs->fs_ipg + initediblk < ino) {
232 DPRINTF("cg=%d fs->fs_ipg=%d initediblk=%d ino=%u\n",
233 cg, fs->fs_ipg, initediblk, ino);
234 return ESTALE;
235 }
236 return 0;
237 }
238
239 static int
240 ffs_snapshot_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
241 void *arg0, void *arg1, void *arg2, void *arg3)
242 {
243 vnode_t *vp = arg2;
244 int result = KAUTH_RESULT_DEFER;
245
246 if (action != KAUTH_SYSTEM_FS_SNAPSHOT)
247 return result;
248
249 if (VTOI(vp)->i_uid == kauth_cred_geteuid(cred))
250 result = KAUTH_RESULT_ALLOW;
251
252 return result;
253 }
254
255 SYSCTL_SETUP(ffs_sysctl_setup, "ffs sysctls")
256 {
257 #ifdef UFS_EXTATTR
258 extern int ufs_extattr_autocreate;
259 #endif
260 extern int ffs_log_changeopt;
261
262 sysctl_createv(clog, 0, NULL, NULL,
263 CTLFLAG_PERMANENT,
264 CTLTYPE_NODE, "ffs",
265 SYSCTL_DESCR("Berkeley Fast File System"),
266 NULL, 0, NULL, 0,
267 CTL_VFS, 1, CTL_EOL);
268 /*
269 * @@@ should we even bother with these first three?
270 */
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
273 CTLTYPE_INT, "doclusterread", NULL,
274 sysctl_notavail, 0, NULL, 0,
275 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
276 sysctl_createv(clog, 0, NULL, NULL,
277 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
278 CTLTYPE_INT, "doclusterwrite", NULL,
279 sysctl_notavail, 0, NULL, 0,
280 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
281 sysctl_createv(clog, 0, NULL, NULL,
282 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
283 CTLTYPE_INT, "doreallocblks", NULL,
284 sysctl_notavail, 0, NULL, 0,
285 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
286 #if 0
287 sysctl_createv(clog, 0, NULL, NULL,
288 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
289 CTLTYPE_INT, "doasyncfree",
290 SYSCTL_DESCR("Release dirty blocks asynchronously"),
291 NULL, 0, &doasyncfree, 0,
292 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
293 #endif
294 sysctl_createv(clog, 0, NULL, NULL,
295 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
296 CTLTYPE_INT, "log_changeopt",
297 SYSCTL_DESCR("Log changes in optimization strategy"),
298 NULL, 0, &ffs_log_changeopt, 0,
299 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
300 #ifdef UFS_EXTATTR
301 sysctl_createv(clog, 0, NULL, NULL,
302 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
303 CTLTYPE_INT, "extattr_autocreate",
304 SYSCTL_DESCR("Size of attribute for "
305 "backing file autocreation"),
306 NULL, 0, &ufs_extattr_autocreate, 0,
307 CTL_VFS, 1, FFS_EXTATTR_AUTOCREATE, CTL_EOL);
308
309 #endif /* UFS_EXTATTR */
310 }
311
312 static int
313 ffs_modcmd(modcmd_t cmd, void *arg)
314 {
315 int error;
316
317 #if 0
318 extern int doasyncfree;
319 #endif
320
321 switch (cmd) {
322 case MODULE_CMD_INIT:
323 error = vfs_attach(&ffs_vfsops);
324 if (error != 0)
325 break;
326
327 ffs_snapshot_listener = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
328 ffs_snapshot_cb, NULL);
329 if (ffs_snapshot_listener == NULL)
330 printf("ffs_modcmd: can't listen on system scope.\n");
331
332 break;
333 case MODULE_CMD_FINI:
334 error = vfs_detach(&ffs_vfsops);
335 if (error != 0)
336 break;
337 if (ffs_snapshot_listener != NULL)
338 kauth_unlisten_scope(ffs_snapshot_listener);
339 break;
340 default:
341 error = ENOTTY;
342 break;
343 }
344
345 return (error);
346 }
347
348 pool_cache_t ffs_inode_cache;
349 pool_cache_t ffs_dinode1_cache;
350 pool_cache_t ffs_dinode2_cache;
351
352 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
353 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
354
355 /*
356 * Called by main() when ffs is going to be mounted as root.
357 */
358
359 int
360 ffs_mountroot(void)
361 {
362 struct fs *fs;
363 struct mount *mp;
364 struct lwp *l = curlwp; /* XXX */
365 struct ufsmount *ump;
366 int error;
367
368 if (device_class(root_device) != DV_DISK)
369 return (ENODEV);
370
371 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
372 vrele(rootvp);
373 return (error);
374 }
375
376 /*
377 * We always need to be able to mount the root file system.
378 */
379 mp->mnt_flag |= MNT_FORCE;
380 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
381 vfs_unbusy(mp);
382 vfs_rele(mp);
383 return (error);
384 }
385 mp->mnt_flag &= ~MNT_FORCE;
386 mountlist_append(mp);
387 ump = VFSTOUFS(mp);
388 fs = ump->um_fs;
389 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
390 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
391 (void)ffs_statvfs(mp, &mp->mnt_stat);
392 vfs_unbusy(mp);
393 setrootfstime((time_t)fs->fs_time);
394 return (0);
395 }
396
397 /*
398 * VFS Operations.
399 *
400 * mount system call
401 */
402 int
403 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
404 {
405 struct lwp *l = curlwp;
406 struct vnode *devvp = NULL;
407 struct ufs_args *args = data;
408 struct ufsmount *ump = NULL;
409 struct fs *fs;
410 int error = 0, flags, update;
411 mode_t accessmode;
412
413 if (args == NULL) {
414 DPRINTF("NULL args");
415 return EINVAL;
416 }
417 if (*data_len < sizeof(*args)) {
418 DPRINTF("bad size args %zu != %zu", *data_len, sizeof(*args));
419 return EINVAL;
420 }
421
422 ump = VFSTOUFS(mp);
423 if ((mp->mnt_flag & (MNT_GETARGS|MNT_UPDATE)) && ump == NULL) {
424 DPRINTF("no ump");
425 return EIO;
426 }
427
428 if (mp->mnt_flag & MNT_GETARGS) {
429 args->fspec = NULL;
430 *data_len = sizeof *args;
431 return 0;
432 }
433
434 update = mp->mnt_flag & MNT_UPDATE;
435
436 /* Check arguments */
437 if (args->fspec == NULL) {
438 if (!update) {
439 /* New mounts must have a filename for the device */
440 DPRINTF("no filename for mount");
441 return EINVAL;
442 }
443 } else {
444 /*
445 * Look up the name and verify that it's sane.
446 */
447 error = namei_simple_user(args->fspec,
448 NSM_FOLLOW_NOEMULROOT, &devvp);
449 if (error != 0) {
450 DPRINTF("namei_simple_user returned %d", error);
451 return error;
452 }
453
454 /*
455 * Be sure this is a valid block device
456 */
457 if (devvp->v_type != VBLK) {
458 DPRINTF("non block device %d", devvp->v_type);
459 error = ENOTBLK;
460 goto fail;
461 }
462
463 if (bdevsw_lookup(devvp->v_rdev) == NULL) {
464 DPRINTF("can't find block device 0x%jx",
465 devvp->v_rdev);
466 error = ENXIO;
467 goto fail;
468 }
469
470 if (update) {
471 /*
472 * Be sure we're still naming the same device
473 * used for our initial mount
474 */
475 if (devvp != ump->um_devvp &&
476 devvp->v_rdev != ump->um_devvp->v_rdev) {
477 DPRINTF("wrong device 0x%jx != 0x%jx",
478 (uintmax_t)devvp->v_rdev,
479 (uintmax_t)ump->um_devvp->v_rdev);
480 error = EINVAL;
481 goto fail;
482 }
483 vrele(devvp);
484 devvp = NULL;
485 }
486 }
487
488 if (devvp == NULL) {
489 devvp = ump->um_devvp;
490 vref(devvp);
491 }
492
493 /*
494 * If mount by non-root, then verify that user has necessary
495 * permissions on the device.
496 *
497 * Permission to update a mount is checked higher, so here we presume
498 * updating the mount is okay (for example, as far as securelevel goes)
499 * which leaves us with the normal check.
500 */
501 accessmode = VREAD;
502 if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
503 (mp->mnt_flag & MNT_RDONLY) == 0)
504 accessmode |= VWRITE;
505 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
506 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
507 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode));
508 VOP_UNLOCK(devvp);
509 if (error) {
510 DPRINTF("kauth returned %d", error);
511 goto fail;
512 }
513
514 #ifdef WAPBL
515 /* WAPBL can only be enabled on a r/w mount. */
516 if (((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) ||
517 (mp->mnt_iflag & IMNT_WANTRDONLY)) {
518 mp->mnt_flag &= ~MNT_LOG;
519 }
520 #else /* !WAPBL */
521 mp->mnt_flag &= ~MNT_LOG;
522 #endif /* !WAPBL */
523
524 if (!update) {
525 int xflags;
526
527 if (mp->mnt_flag & MNT_RDONLY)
528 xflags = FREAD;
529 else
530 xflags = FREAD | FWRITE;
531 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
532 error = VOP_OPEN(devvp, xflags, FSCRED);
533 VOP_UNLOCK(devvp);
534 if (error) {
535 DPRINTF("VOP_OPEN returned %d", error);
536 goto fail;
537 }
538 error = ffs_mountfs(devvp, mp, l);
539 if (error) {
540 DPRINTF("ffs_mountfs returned %d", error);
541 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
542 (void)VOP_CLOSE(devvp, xflags, NOCRED);
543 VOP_UNLOCK(devvp);
544 goto fail;
545 }
546
547 ump = VFSTOUFS(mp);
548 fs = ump->um_fs;
549 } else {
550 /*
551 * Update the mount.
552 */
553
554 /*
555 * The initial mount got a reference on this
556 * device, so drop the one obtained via
557 * namei(), above.
558 */
559 vrele(devvp);
560
561 ump = VFSTOUFS(mp);
562 fs = ump->um_fs;
563 if (fs->fs_ronly == 0 && (mp->mnt_iflag & IMNT_WANTRDONLY)) {
564 /*
565 * Changing from r/w to r/o
566 */
567 flags = WRITECLOSE;
568 if (mp->mnt_flag & MNT_FORCE)
569 flags |= FORCECLOSE;
570 error = ffs_flushfiles(mp, flags, l);
571 if (error)
572 return error;
573
574 error = UFS_WAPBL_BEGIN(mp);
575 if (error) {
576 DPRINTF("wapbl %d", error);
577 return error;
578 }
579
580 if (ffs_cgupdate(ump, MNT_WAIT) == 0 &&
581 fs->fs_clean & FS_WASCLEAN) {
582 if (mp->mnt_flag & MNT_SOFTDEP)
583 fs->fs_flags &= ~FS_DOSOFTDEP;
584 fs->fs_clean = FS_ISCLEAN;
585 (void) ffs_sbupdate(ump, MNT_WAIT);
586 }
587
588 UFS_WAPBL_END(mp);
589 }
590
591 #ifdef WAPBL
592 if ((mp->mnt_flag & MNT_LOG) == 0) {
593 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
594 if (error) {
595 DPRINTF("ffs_wapbl_stop returned %d", error);
596 return error;
597 }
598 }
599 #endif /* WAPBL */
600
601 if (fs->fs_ronly == 0 && (mp->mnt_iflag & IMNT_WANTRDONLY)) {
602 /*
603 * Finish change from r/w to r/o
604 */
605 fs->fs_ronly = 1;
606 fs->fs_fmod = 0;
607 }
608
609 if (mp->mnt_flag & MNT_RELOAD) {
610 error = ffs_reload(mp, l->l_cred, l);
611 if (error) {
612 DPRINTF("ffs_reload returned %d", error);
613 return error;
614 }
615 }
616
617 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
618 /*
619 * Changing from read-only to read/write
620 */
621 #ifndef QUOTA2
622 if (fs->fs_flags & FS_DOQUOTA2) {
623 ump->um_flags |= UFS_QUOTA2;
624 uprintf("%s: options QUOTA2 not enabled%s\n",
625 mp->mnt_stat.f_mntonname,
626 (mp->mnt_flag & MNT_FORCE) ? "" :
627 ", not mounting");
628 DPRINTF("ffs_quota2 %d", EINVAL);
629 return EINVAL;
630 }
631 #endif
632 fs->fs_ronly = 0;
633 fs->fs_clean <<= 1;
634 fs->fs_fmod = 1;
635 #ifdef WAPBL
636 if (fs->fs_flags & FS_DOWAPBL) {
637 const char *nm = mp->mnt_stat.f_mntonname;
638 if (!mp->mnt_wapbl_replay) {
639 printf("%s: log corrupted;"
640 " replay cancelled\n", nm);
641 return EFTYPE;
642 }
643 printf("%s: replaying log to disk\n", nm);
644 error = wapbl_replay_write(mp->mnt_wapbl_replay,
645 devvp);
646 if (error) {
647 DPRINTF("%s: wapbl_replay_write %d",
648 nm, error);
649 return error;
650 }
651 wapbl_replay_stop(mp->mnt_wapbl_replay);
652 fs->fs_clean = FS_WASCLEAN;
653 }
654 #endif /* WAPBL */
655 if (fs->fs_snapinum[0] != 0)
656 ffs_snapshot_mount(mp);
657 }
658
659 #ifdef WAPBL
660 error = ffs_wapbl_start(mp);
661 if (error) {
662 DPRINTF("ffs_wapbl_start returned %d", error);
663 return error;
664 }
665 #endif /* WAPBL */
666
667 #ifdef QUOTA2
668 if (!fs->fs_ronly) {
669 error = ffs_quota2_mount(mp);
670 if (error) {
671 DPRINTF("ffs_quota2_mount returned %d", error);
672 return error;
673 }
674 }
675 #endif
676
677 if ((mp->mnt_flag & MNT_DISCARD) && !(ump->um_discarddata))
678 ump->um_discarddata = ffs_discard_init(devvp, fs);
679
680 if (args->fspec == NULL)
681 return 0;
682 }
683
684 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
685 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
686 if (error == 0)
687 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
688 sizeof(fs->fs_fsmnt));
689 else {
690 DPRINTF("set_statvfs_info returned %d", error);
691 }
692 fs->fs_flags &= ~FS_DOSOFTDEP;
693 if (fs->fs_fmod != 0) { /* XXX */
694 int err;
695
696 fs->fs_fmod = 0;
697 if (fs->fs_clean & FS_WASCLEAN)
698 fs->fs_time = time_second;
699 else {
700 printf("%s: file system not clean (fs_clean=%#x); "
701 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
702 fs->fs_clean);
703 printf("%s: lost blocks %" PRId64 " files %d\n",
704 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
705 fs->fs_pendinginodes);
706 }
707 err = UFS_WAPBL_BEGIN(mp);
708 if (err == 0) {
709 (void) ffs_cgupdate(ump, MNT_WAIT);
710 UFS_WAPBL_END(mp);
711 }
712 }
713 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
714 printf("%s: `-o softdep' is no longer supported, "
715 "consider `-o log'\n", mp->mnt_stat.f_mntfromname);
716 mp->mnt_flag &= ~MNT_SOFTDEP;
717 }
718
719 return (error);
720
721 fail:
722 vrele(devvp);
723 return (error);
724 }
725
726 /*
727 * Reload all incore data for a filesystem (used after running fsck on
728 * the root filesystem and finding things to fix). The filesystem must
729 * be mounted read-only.
730 *
731 * Things to do to update the mount:
732 * 1) invalidate all cached meta-data.
733 * 2) re-read superblock from disk.
734 * 3) re-read summary information from disk.
735 * 4) invalidate all inactive vnodes.
736 * 5) invalidate all cached file data.
737 * 6) re-read inode data for all active vnodes.
738 */
739 int
740 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
741 {
742 struct vnode *vp, *devvp;
743 struct inode *ip;
744 void *space;
745 struct buf *bp;
746 struct fs *fs, *newfs;
747 int i, bsize, blks, error;
748 int32_t *lp, fs_sbsize;
749 struct ufsmount *ump;
750 daddr_t sblockloc;
751 struct vnode_iterator *marker;
752
753 if ((mp->mnt_flag & MNT_RDONLY) == 0)
754 return (EINVAL);
755
756 ump = VFSTOUFS(mp);
757
758 /*
759 * Step 1: invalidate all cached meta-data.
760 */
761 devvp = ump->um_devvp;
762 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
763 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
764 VOP_UNLOCK(devvp);
765 if (error)
766 panic("%s: dirty1", __func__);
767
768 /*
769 * Step 2: re-read superblock from disk. XXX: We don't handle
770 * possibility that superblock moved. Which implies that we don't
771 * want its size to change either.
772 */
773 fs = ump->um_fs;
774 fs_sbsize = fs->fs_sbsize;
775 error = bread(devvp, fs->fs_sblockloc / DEV_BSIZE, fs_sbsize,
776 0, &bp);
777 if (error)
778 return (error);
779 newfs = kmem_alloc(fs_sbsize, KM_SLEEP);
780 memcpy(newfs, bp->b_data, fs_sbsize);
781
782 #ifdef FFS_EI
783 if (ump->um_flags & UFS_NEEDSWAP) {
784 ffs_sb_swap((struct fs *)bp->b_data, newfs);
785 newfs->fs_flags |= FS_SWAPPED;
786 } else
787 #endif
788 newfs->fs_flags &= ~FS_SWAPPED;
789
790 brelse(bp, 0);
791
792 if ((newfs->fs_magic != FS_UFS1_MAGIC) &&
793 (newfs->fs_magic != FS_UFS2_MAGIC)) {
794 kmem_free(newfs, fs_sbsize);
795 return (EIO); /* XXX needs translation */
796 }
797 if (!ffs_superblock_validate(newfs)) {
798 kmem_free(newfs, fs_sbsize);
799 return (EINVAL);
800 }
801
802 /*
803 * The current implementation doesn't handle the possibility that
804 * these values may have changed.
805 */
806 if ((newfs->fs_sbsize != fs_sbsize) ||
807 (newfs->fs_cssize != fs->fs_cssize) ||
808 (newfs->fs_contigsumsize != fs->fs_contigsumsize) ||
809 (newfs->fs_ncg != fs->fs_ncg)) {
810 kmem_free(newfs, fs_sbsize);
811 return (EINVAL);
812 }
813
814 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
815 sblockloc = fs->fs_sblockloc;
816 /*
817 * Copy pointer fields back into superblock before copying in XXX
818 * new superblock. These should really be in the ufsmount. XXX
819 * Note that important parameters (eg fs_ncg) are unchanged.
820 */
821 newfs->fs_csp = fs->fs_csp;
822 newfs->fs_maxcluster = fs->fs_maxcluster;
823 newfs->fs_contigdirs = fs->fs_contigdirs;
824 newfs->fs_ronly = fs->fs_ronly;
825 newfs->fs_active = fs->fs_active;
826 memcpy(fs, newfs, (u_int)fs_sbsize);
827 kmem_free(newfs, fs_sbsize);
828
829 /*
830 * Recheck for Apple UFS filesystem.
831 */
832 ump->um_flags &= ~UFS_ISAPPLEUFS;
833 if (ffs_is_appleufs(devvp, fs)) {
834 #ifdef APPLE_UFS
835 ump->um_flags |= UFS_ISAPPLEUFS;
836 #else
837 DPRINTF("AppleUFS not supported");
838 return (EIO); /* XXX: really? */
839 #endif
840 }
841
842 if (UFS_MPISAPPLEUFS(ump)) {
843 /* see comment about NeXT below */
844 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
845 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
846 mp->mnt_iflag |= IMNT_DTYPE;
847 } else {
848 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
849 ump->um_dirblksiz = UFS_DIRBLKSIZ;
850 if (ump->um_maxsymlinklen > 0)
851 mp->mnt_iflag |= IMNT_DTYPE;
852 else
853 mp->mnt_iflag &= ~IMNT_DTYPE;
854 }
855 ffs_oldfscompat_read(fs, ump, sblockloc);
856
857 mutex_enter(&ump->um_lock);
858 ump->um_maxfilesize = fs->fs_maxfilesize;
859 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
860 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
861 mp->mnt_stat.f_mntonname, fs->fs_flags,
862 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
863 if ((mp->mnt_flag & MNT_FORCE) == 0) {
864 mutex_exit(&ump->um_lock);
865 return (EINVAL);
866 }
867 }
868 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
869 fs->fs_pendingblocks = 0;
870 fs->fs_pendinginodes = 0;
871 }
872 mutex_exit(&ump->um_lock);
873
874 ffs_statvfs(mp, &mp->mnt_stat);
875 /*
876 * Step 3: re-read summary information from disk.
877 */
878 blks = howmany(fs->fs_cssize, fs->fs_fsize);
879 space = fs->fs_csp;
880 for (i = 0; i < blks; i += fs->fs_frag) {
881 bsize = fs->fs_bsize;
882 if (i + fs->fs_frag > blks)
883 bsize = (blks - i) * fs->fs_fsize;
884 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i), bsize,
885 0, &bp);
886 if (error) {
887 return (error);
888 }
889 #ifdef FFS_EI
890 if (UFS_FSNEEDSWAP(fs))
891 ffs_csum_swap((struct csum *)bp->b_data,
892 (struct csum *)space, bsize);
893 else
894 #endif
895 memcpy(space, bp->b_data, (size_t)bsize);
896 space = (char *)space + bsize;
897 brelse(bp, 0);
898 }
899 /*
900 * We no longer know anything about clusters per cylinder group.
901 */
902 if (fs->fs_contigsumsize > 0) {
903 lp = fs->fs_maxcluster;
904 for (i = 0; i < fs->fs_ncg; i++)
905 *lp++ = fs->fs_contigsumsize;
906 }
907
908 vfs_vnode_iterator_init(mp, &marker);
909 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
910 /*
911 * Step 4: invalidate all inactive vnodes.
912 */
913 if (vrecycle(vp))
914 continue;
915 /*
916 * Step 5: invalidate all cached file data.
917 */
918 if (vn_lock(vp, LK_EXCLUSIVE)) {
919 vrele(vp);
920 continue;
921 }
922 if (vinvalbuf(vp, 0, cred, l, 0, 0))
923 panic("%s: dirty2", __func__);
924 /*
925 * Step 6: re-read inode data for all active vnodes.
926 */
927 ip = VTOI(vp);
928 error = bread(devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)),
929 (int)fs->fs_bsize, 0, &bp);
930 if (error) {
931 vput(vp);
932 break;
933 }
934 ffs_load_inode(bp, ip, fs, ip->i_number);
935 brelse(bp, 0);
936 vput(vp);
937 }
938 vfs_vnode_iterator_destroy(marker);
939 return (error);
940 }
941
942 /*
943 * Possible superblock locations ordered from most to least likely.
944 */
945 static const int sblock_try[] = SBLOCKSEARCH;
946
947
948 static int
949 ffs_superblock_validate(struct fs *fs)
950 {
951 int32_t i, fs_bshift = 0, fs_fshift = 0, fs_fragshift = 0, fs_frag;
952 int32_t fs_inopb;
953
954 /* Check the superblock size */
955 if (fs->fs_sbsize > SBLOCKSIZE || fs->fs_sbsize < sizeof(struct fs))
956 return 0;
957
958 /* Check the file system blocksize */
959 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < MINBSIZE)
960 return 0;
961 if (!powerof2(fs->fs_bsize))
962 return 0;
963
964 /* Check the size of frag blocks */
965 if (!powerof2(fs->fs_fsize))
966 return 0;
967 if (fs->fs_fsize == 0)
968 return 0;
969
970 /*
971 * XXX: these values are just zero-checked to prevent obvious
972 * bugs. We need more strict checks.
973 */
974 if (fs->fs_size == 0 && fs->fs_old_size == 0)
975 return 0;
976 if (fs->fs_cssize == 0)
977 return 0;
978 if (fs->fs_ipg == 0)
979 return 0;
980 if (fs->fs_fpg == 0)
981 return 0;
982 if (fs->fs_ncg == 0)
983 return 0;
984 if (fs->fs_maxbpg == 0)
985 return 0;
986
987 /* Check the number of inodes per block */
988 if (fs->fs_magic == FS_UFS1_MAGIC)
989 fs_inopb = fs->fs_bsize / sizeof(struct ufs1_dinode);
990 else /* fs->fs_magic == FS_UFS2_MAGIC */
991 fs_inopb = fs->fs_bsize / sizeof(struct ufs2_dinode);
992 if (fs->fs_inopb != fs_inopb)
993 return 0;
994
995 /* Block size cannot be smaller than fragment size */
996 if (fs->fs_bsize < fs->fs_fsize)
997 return 0;
998
999 /* Compute fs_bshift and ensure it is consistent */
1000 for (i = fs->fs_bsize; i > 1; i >>= 1)
1001 fs_bshift++;
1002 if (fs->fs_bshift != fs_bshift)
1003 return 0;
1004
1005 /* Compute fs_fshift and ensure it is consistent */
1006 for (i = fs->fs_fsize; i > 1; i >>= 1)
1007 fs_fshift++;
1008 if (fs->fs_fshift != fs_fshift)
1009 return 0;
1010
1011 /* Compute fs_fragshift and ensure it is consistent */
1012 for (i = fs->fs_frag; i > 1; i >>= 1)
1013 fs_fragshift++;
1014 if (fs->fs_fragshift != fs_fragshift)
1015 return 0;
1016
1017 /* Check the masks */
1018 if (fs->fs_bmask != ~(fs->fs_bsize - 1))
1019 return 0;
1020 if (fs->fs_fmask != ~(fs->fs_fsize - 1))
1021 return 0;
1022
1023 /*
1024 * Now that the shifts and masks are sanitized, we can use the ffs_ API.
1025 */
1026
1027 /* Check the number of frag blocks */
1028 if ((fs_frag = ffs_numfrags(fs, fs->fs_bsize)) > MAXFRAG)
1029 return 0;
1030 if (fs->fs_frag != fs_frag)
1031 return 0;
1032
1033 /* Check the size of cylinder groups */
1034 if ((fs->fs_cgsize < sizeof(struct cg)) ||
1035 (fs->fs_cgsize > fs->fs_bsize))
1036 return 0;
1037
1038 return 1;
1039 }
1040
1041 static int
1042 ffs_is_appleufs(struct vnode *devvp, struct fs *fs)
1043 {
1044 struct dkwedge_info dkw;
1045 int ret = 0;
1046
1047 /*
1048 * First check to see if this is tagged as an Apple UFS filesystem
1049 * in the disklabel.
1050 */
1051 if (getdiskinfo(devvp, &dkw) == 0 &&
1052 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
1053 ret = 1;
1054 #ifdef APPLE_UFS
1055 else {
1056 struct appleufslabel *applefs;
1057 struct buf *bp;
1058 daddr_t blkno = APPLEUFS_LABEL_OFFSET / DEV_BSIZE;
1059 int error;
1060
1061 /*
1062 * Manually look for an Apple UFS label, and if a valid one
1063 * is found, then treat it like an Apple UFS filesystem anyway.
1064 */
1065 error = bread(devvp, blkno, APPLEUFS_LABEL_SIZE, 0, &bp);
1066 if (error) {
1067 DPRINTF("bread@0x%jx returned %d", (intmax_t)blkno, error);
1068 return 0;
1069 }
1070 applefs = (struct appleufslabel *)bp->b_data;
1071 error = ffs_appleufs_validate(fs->fs_fsmnt, applefs, NULL);
1072 if (error == 0)
1073 ret = 1;
1074 brelse(bp, 0);
1075 }
1076 #endif
1077
1078 return ret;
1079 }
1080
1081 /*
1082 * Common code for mount and mountroot
1083 */
1084 int
1085 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
1086 {
1087 struct ufsmount *ump = NULL;
1088 struct buf *bp = NULL;
1089 struct fs *fs = NULL;
1090 dev_t dev;
1091 void *space;
1092 daddr_t sblockloc = 0;
1093 int blks, fstype = 0;
1094 int error, i, bsize, ronly, bset = 0;
1095 #ifdef FFS_EI
1096 int needswap = 0; /* keep gcc happy */
1097 #endif
1098 int32_t *lp;
1099 kauth_cred_t cred;
1100 u_int32_t allocsbsize, fs_sbsize = 0;
1101
1102 dev = devvp->v_rdev;
1103 cred = l ? l->l_cred : NOCRED;
1104
1105 /* Flush out any old buffers remaining from a previous use. */
1106 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1107 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
1108 VOP_UNLOCK(devvp);
1109 if (error) {
1110 DPRINTF("vinvalbuf returned %d", error);
1111 return error;
1112 }
1113
1114 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
1115
1116 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
1117 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
1118 error = ffs_snapshot_init(ump);
1119 if (error) {
1120 DPRINTF("ffs_snapshot_init returned %d", error);
1121 goto out;
1122 }
1123 ump->um_ops = &ffs_ufsops;
1124
1125 #ifdef WAPBL
1126 sbagain:
1127 #endif
1128 /*
1129 * Try reading the superblock in each of its possible locations.
1130 */
1131 for (i = 0; ; i++) {
1132 daddr_t fs_sblockloc;
1133
1134 if (bp != NULL) {
1135 brelse(bp, BC_NOCACHE);
1136 bp = NULL;
1137 }
1138 if (sblock_try[i] == -1) {
1139 DPRINTF("no superblock found");
1140 error = EINVAL;
1141 fs = NULL;
1142 goto out;
1143 }
1144
1145 error = bread(devvp, sblock_try[i] / DEV_BSIZE, SBLOCKSIZE,
1146 0, &bp);
1147 if (error) {
1148 DPRINTF("bread@0x%x returned %d",
1149 sblock_try[i] / DEV_BSIZE, error);
1150 fs = NULL;
1151 goto out;
1152 }
1153 fs = (struct fs *)bp->b_data;
1154
1155 sblockloc = sblock_try[i];
1156 DPRINTF("fs_magic 0x%x", fs->fs_magic);
1157
1158 /*
1159 * Swap: here, we swap fs->fs_sbsize in order to get the correct
1160 * size to read the superblock. Once read, we swap the whole
1161 * superblock structure.
1162 */
1163 if (fs->fs_magic == FS_UFS1_MAGIC) {
1164 fs_sbsize = fs->fs_sbsize;
1165 fstype = UFS1;
1166 #ifdef FFS_EI
1167 needswap = 0;
1168 } else if (fs->fs_magic == FS_UFS1_MAGIC_SWAPPED) {
1169 fs_sbsize = bswap32(fs->fs_sbsize);
1170 fstype = UFS1;
1171 needswap = 1;
1172 #endif
1173 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
1174 fs_sbsize = fs->fs_sbsize;
1175 fstype = UFS2;
1176 #ifdef FFS_EI
1177 needswap = 0;
1178 } else if (fs->fs_magic == FS_UFS2_MAGIC_SWAPPED) {
1179 fs_sbsize = bswap32(fs->fs_sbsize);
1180 fstype = UFS2;
1181 needswap = 1;
1182 #endif
1183 } else
1184 continue;
1185
1186 /* fs->fs_sblockloc isn't defined for old filesystems */
1187 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
1188 if (sblockloc == SBLOCK_UFS2)
1189 /*
1190 * This is likely to be the first alternate
1191 * in a filesystem with 64k blocks.
1192 * Don't use it.
1193 */
1194 continue;
1195 fs_sblockloc = sblockloc;
1196 } else {
1197 fs_sblockloc = fs->fs_sblockloc;
1198 #ifdef FFS_EI
1199 if (needswap)
1200 fs_sblockloc = bswap64(fs_sblockloc);
1201 #endif
1202 }
1203
1204 /* Check we haven't found an alternate superblock */
1205 if (fs_sblockloc != sblockloc)
1206 continue;
1207
1208 /* Check the superblock size */
1209 if (fs_sbsize > SBLOCKSIZE || fs_sbsize < sizeof(struct fs))
1210 continue;
1211 fs = kmem_alloc((u_long)fs_sbsize, KM_SLEEP);
1212 memcpy(fs, bp->b_data, fs_sbsize);
1213
1214 /* Swap the whole superblock structure, if necessary. */
1215 #ifdef FFS_EI
1216 if (needswap) {
1217 ffs_sb_swap((struct fs*)bp->b_data, fs);
1218 fs->fs_flags |= FS_SWAPPED;
1219 } else
1220 #endif
1221 fs->fs_flags &= ~FS_SWAPPED;
1222
1223 /*
1224 * Now that everything is swapped, the superblock is ready to
1225 * be sanitized.
1226 */
1227 if (!ffs_superblock_validate(fs)) {
1228 kmem_free(fs, fs_sbsize);
1229 continue;
1230 }
1231
1232 /* Ok seems to be a good superblock */
1233 break;
1234 }
1235
1236 ump->um_fs = fs;
1237
1238 #ifdef WAPBL
1239 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1240 error = ffs_wapbl_replay_start(mp, fs, devvp);
1241 if (error && (mp->mnt_flag & MNT_FORCE) == 0) {
1242 DPRINTF("ffs_wapbl_replay_start returned %d", error);
1243 goto out;
1244 }
1245 if (!error) {
1246 if (!ronly) {
1247 /* XXX fsmnt may be stale. */
1248 printf("%s: replaying log to disk\n",
1249 fs->fs_fsmnt);
1250 error = wapbl_replay_write(mp->mnt_wapbl_replay,
1251 devvp);
1252 if (error) {
1253 DPRINTF("wapbl_replay_write returned %d",
1254 error);
1255 goto out;
1256 }
1257 wapbl_replay_stop(mp->mnt_wapbl_replay);
1258 fs->fs_clean = FS_WASCLEAN;
1259 } else {
1260 /* XXX fsmnt may be stale */
1261 printf("%s: replaying log to memory\n",
1262 fs->fs_fsmnt);
1263 }
1264
1265 /* Force a re-read of the superblock */
1266 brelse(bp, BC_INVAL);
1267 bp = NULL;
1268 kmem_free(fs, fs_sbsize);
1269 fs = NULL;
1270 goto sbagain;
1271 }
1272 }
1273 #else /* !WAPBL */
1274 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1275 error = EPERM;
1276 DPRINTF("no force %d", error);
1277 goto out;
1278 }
1279 #endif /* !WAPBL */
1280
1281 ffs_oldfscompat_read(fs, ump, sblockloc);
1282 ump->um_maxfilesize = fs->fs_maxfilesize;
1283
1284 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1285 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1286 mp->mnt_stat.f_mntonname, fs->fs_flags,
1287 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1288 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1289 error = EINVAL;
1290 DPRINTF("no force %d", error);
1291 goto out;
1292 }
1293 }
1294
1295 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1296 fs->fs_pendingblocks = 0;
1297 fs->fs_pendinginodes = 0;
1298 }
1299
1300 ump->um_fstype = fstype;
1301 if (fs->fs_sbsize < SBLOCKSIZE)
1302 brelse(bp, BC_INVAL);
1303 else
1304 brelse(bp, 0);
1305 bp = NULL;
1306
1307 if (ffs_is_appleufs(devvp, fs)) {
1308 #ifdef APPLE_UFS
1309 ump->um_flags |= UFS_ISAPPLEUFS;
1310 #else
1311 DPRINTF("AppleUFS not supported");
1312 error = EINVAL;
1313 goto out;
1314 #endif
1315 }
1316
1317 #if 0
1318 /*
1319 * XXX This code changes the behaviour of mounting dirty filesystems, to
1320 * XXX require "mount -f ..." to mount them. This doesn't match what
1321 * XXX mount(8) describes and is disabled for now.
1322 */
1323 /*
1324 * If the file system is not clean, don't allow it to be mounted
1325 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1326 * for the root file system.)
1327 */
1328 if (fs->fs_flags & FS_DOWAPBL) {
1329 /*
1330 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1331 * bit is set, although there's a window in unmount where it
1332 * could be FS_ISCLEAN
1333 */
1334 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1335 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1336 error = EPERM;
1337 goto out;
1338 }
1339 } else
1340 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1341 (mp->mnt_flag & MNT_FORCE) == 0) {
1342 error = EPERM;
1343 goto out;
1344 }
1345 #endif
1346
1347 /*
1348 * Verify that we can access the last block in the fs
1349 * if we're mounting read/write.
1350 */
1351 if (!ronly) {
1352 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_size - 1),
1353 fs->fs_fsize, 0, &bp);
1354 if (error) {
1355 DPRINTF("bread@0x%jx returned %d",
1356 (intmax_t)FFS_FSBTODB(fs, fs->fs_size - 1),
1357 error);
1358 bset = BC_INVAL;
1359 goto out;
1360 }
1361 if (bp->b_bcount != fs->fs_fsize) {
1362 DPRINTF("bcount %x != fsize %x", bp->b_bcount,
1363 fs->fs_fsize);
1364 error = EINVAL;
1365 bset = BC_INVAL;
1366 goto out;
1367 }
1368 brelse(bp, BC_INVAL);
1369 bp = NULL;
1370 }
1371
1372 fs->fs_ronly = ronly;
1373 /* Don't bump fs_clean if we're replaying journal */
1374 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN))) {
1375 if (ronly == 0) {
1376 fs->fs_clean <<= 1;
1377 fs->fs_fmod = 1;
1378 }
1379 }
1380
1381 bsize = fs->fs_cssize;
1382 blks = howmany(bsize, fs->fs_fsize);
1383 if (fs->fs_contigsumsize > 0)
1384 bsize += fs->fs_ncg * sizeof(int32_t);
1385 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1386 allocsbsize = bsize;
1387 space = kmem_alloc((u_long)allocsbsize, KM_SLEEP);
1388 fs->fs_csp = space;
1389
1390 for (i = 0; i < blks; i += fs->fs_frag) {
1391 bsize = fs->fs_bsize;
1392 if (i + fs->fs_frag > blks)
1393 bsize = (blks - i) * fs->fs_fsize;
1394 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i), bsize,
1395 0, &bp);
1396 if (error) {
1397 DPRINTF("bread@0x%jx %d",
1398 (intmax_t)FFS_FSBTODB(fs, fs->fs_csaddr + i),
1399 error);
1400 goto out1;
1401 }
1402 #ifdef FFS_EI
1403 if (needswap)
1404 ffs_csum_swap((struct csum *)bp->b_data,
1405 (struct csum *)space, bsize);
1406 else
1407 #endif
1408 memcpy(space, bp->b_data, (u_int)bsize);
1409
1410 space = (char *)space + bsize;
1411 brelse(bp, 0);
1412 bp = NULL;
1413 }
1414 if (fs->fs_contigsumsize > 0) {
1415 fs->fs_maxcluster = lp = space;
1416 for (i = 0; i < fs->fs_ncg; i++)
1417 *lp++ = fs->fs_contigsumsize;
1418 space = lp;
1419 }
1420 bsize = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1421 fs->fs_contigdirs = space;
1422 space = (char *)space + bsize;
1423 memset(fs->fs_contigdirs, 0, bsize);
1424
1425 /* Compatibility for old filesystems - XXX */
1426 if (fs->fs_avgfilesize <= 0)
1427 fs->fs_avgfilesize = AVFILESIZ;
1428 if (fs->fs_avgfpdir <= 0)
1429 fs->fs_avgfpdir = AFPDIR;
1430 fs->fs_active = NULL;
1431
1432 mp->mnt_data = ump;
1433 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1434 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1435 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1436 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1437 if (UFS_MPISAPPLEUFS(ump)) {
1438 /* NeXT used to keep short symlinks in the inode even
1439 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1440 * is probably -1, but we still need to be able to identify
1441 * short symlinks.
1442 */
1443 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1444 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1445 mp->mnt_iflag |= IMNT_DTYPE;
1446 } else {
1447 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1448 ump->um_dirblksiz = UFS_DIRBLKSIZ;
1449 if (ump->um_maxsymlinklen > 0)
1450 mp->mnt_iflag |= IMNT_DTYPE;
1451 else
1452 mp->mnt_iflag &= ~IMNT_DTYPE;
1453 }
1454 mp->mnt_fs_bshift = fs->fs_bshift;
1455 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1456 mp->mnt_flag |= MNT_LOCAL;
1457 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO | IMNT_SHRLOOKUP |
1458 IMNT_NCLOOKUP;
1459 #ifdef FFS_EI
1460 if (needswap)
1461 ump->um_flags |= UFS_NEEDSWAP;
1462 #endif
1463 ump->um_mountp = mp;
1464 ump->um_dev = dev;
1465 ump->um_devvp = devvp;
1466 ump->um_nindir = fs->fs_nindir;
1467 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1468 ump->um_bptrtodb = fs->fs_fshift - DEV_BSHIFT;
1469 ump->um_seqinc = fs->fs_frag;
1470 for (i = 0; i < MAXQUOTAS; i++)
1471 ump->um_quotas[i] = NULLVP;
1472 spec_node_setmountedfs(devvp, mp);
1473 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1474 ffs_snapshot_mount(mp);
1475 #ifdef WAPBL
1476 if (!ronly) {
1477 KDASSERT(fs->fs_ronly == 0);
1478 /*
1479 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1480 * needs to create a new log file in-filesystem.
1481 */
1482 error = ffs_statvfs(mp, &mp->mnt_stat);
1483 if (error) {
1484 DPRINTF("ffs_statvfs returned %d", error);
1485 goto out1;
1486 }
1487
1488 error = ffs_wapbl_start(mp);
1489 if (error) {
1490 DPRINTF("ffs_wapbl_start returned %d", error);
1491 goto out1;
1492 }
1493 }
1494 #endif /* WAPBL */
1495 if (ronly == 0) {
1496 #ifdef QUOTA2
1497 error = ffs_quota2_mount(mp);
1498 if (error) {
1499 DPRINTF("ffs_quota2_mount returned %d", error);
1500 goto out1;
1501 }
1502 #else
1503 if (fs->fs_flags & FS_DOQUOTA2) {
1504 ump->um_flags |= UFS_QUOTA2;
1505 uprintf("%s: options QUOTA2 not enabled%s\n",
1506 mp->mnt_stat.f_mntonname,
1507 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1508 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1509 error = EINVAL;
1510 DPRINTF("quota disabled %d", error);
1511 goto out1;
1512 }
1513 }
1514 #endif
1515 }
1516
1517 if (mp->mnt_flag & MNT_DISCARD)
1518 ump->um_discarddata = ffs_discard_init(devvp, fs);
1519
1520 return (0);
1521 out1:
1522 kmem_free(fs->fs_csp, allocsbsize);
1523 out:
1524 #ifdef WAPBL
1525 if (mp->mnt_wapbl_replay) {
1526 wapbl_replay_stop(mp->mnt_wapbl_replay);
1527 wapbl_replay_free(mp->mnt_wapbl_replay);
1528 mp->mnt_wapbl_replay = 0;
1529 }
1530 #endif
1531
1532 if (fs)
1533 kmem_free(fs, fs->fs_sbsize);
1534 spec_node_setmountedfs(devvp, NULL);
1535 if (bp)
1536 brelse(bp, bset);
1537 if (ump) {
1538 if (ump->um_oldfscompat)
1539 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1540 mutex_destroy(&ump->um_lock);
1541 kmem_free(ump, sizeof(*ump));
1542 mp->mnt_data = NULL;
1543 }
1544 return (error);
1545 }
1546
1547 /*
1548 * Sanity checks for loading old filesystem superblocks.
1549 * See ffs_oldfscompat_write below for unwound actions.
1550 *
1551 * XXX - Parts get retired eventually.
1552 * Unfortunately new bits get added.
1553 */
1554 static void
1555 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1556 {
1557 off_t maxfilesize;
1558 int32_t *extrasave;
1559
1560 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1561 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1562 return;
1563
1564 if (!ump->um_oldfscompat)
1565 ump->um_oldfscompat = kmem_alloc(512 + 3*sizeof(int32_t),
1566 KM_SLEEP);
1567
1568 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1569 extrasave = ump->um_oldfscompat;
1570 extrasave += 512/sizeof(int32_t);
1571 extrasave[0] = fs->fs_old_npsect;
1572 extrasave[1] = fs->fs_old_interleave;
1573 extrasave[2] = fs->fs_old_trackskew;
1574
1575 /* These fields will be overwritten by their
1576 * original values in fs_oldfscompat_write, so it is harmless
1577 * to modify them here.
1578 */
1579 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1580 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1581 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1582 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1583
1584 fs->fs_maxbsize = fs->fs_bsize;
1585 fs->fs_time = fs->fs_old_time;
1586 fs->fs_size = fs->fs_old_size;
1587 fs->fs_dsize = fs->fs_old_dsize;
1588 fs->fs_csaddr = fs->fs_old_csaddr;
1589 fs->fs_sblockloc = sblockloc;
1590
1591 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1592
1593 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1594 fs->fs_old_nrpos = 8;
1595 fs->fs_old_npsect = fs->fs_old_nsect;
1596 fs->fs_old_interleave = 1;
1597 fs->fs_old_trackskew = 0;
1598 }
1599
1600 if (fs->fs_magic == FS_UFS1_MAGIC &&
1601 fs->fs_old_inodefmt < FS_44INODEFMT) {
1602 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1603 fs->fs_qbmask = ~fs->fs_bmask;
1604 fs->fs_qfmask = ~fs->fs_fmask;
1605 }
1606
1607 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1608 if (fs->fs_maxfilesize > maxfilesize)
1609 fs->fs_maxfilesize = maxfilesize;
1610
1611 /* Compatibility for old filesystems */
1612 if (fs->fs_avgfilesize <= 0)
1613 fs->fs_avgfilesize = AVFILESIZ;
1614 if (fs->fs_avgfpdir <= 0)
1615 fs->fs_avgfpdir = AFPDIR;
1616
1617 #if 0
1618 if (bigcgs) {
1619 fs->fs_save_cgsize = fs->fs_cgsize;
1620 fs->fs_cgsize = fs->fs_bsize;
1621 }
1622 #endif
1623 }
1624
1625 /*
1626 * Unwinding superblock updates for old filesystems.
1627 * See ffs_oldfscompat_read above for details.
1628 *
1629 * XXX - Parts get retired eventually.
1630 * Unfortunately new bits get added.
1631 */
1632 static void
1633 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1634 {
1635 int32_t *extrasave;
1636
1637 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1638 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1639 return;
1640
1641 fs->fs_old_time = fs->fs_time;
1642 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1643 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1644 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1645 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1646 fs->fs_old_flags = fs->fs_flags;
1647
1648 #if 0
1649 if (bigcgs) {
1650 fs->fs_cgsize = fs->fs_save_cgsize;
1651 }
1652 #endif
1653
1654 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1655 extrasave = ump->um_oldfscompat;
1656 extrasave += 512/sizeof(int32_t);
1657 fs->fs_old_npsect = extrasave[0];
1658 fs->fs_old_interleave = extrasave[1];
1659 fs->fs_old_trackskew = extrasave[2];
1660
1661 }
1662
1663 /*
1664 * unmount vfs operation
1665 */
1666 int
1667 ffs_unmount(struct mount *mp, int mntflags)
1668 {
1669 struct lwp *l = curlwp;
1670 struct ufsmount *ump = VFSTOUFS(mp);
1671 struct fs *fs = ump->um_fs;
1672 int error, flags;
1673 u_int32_t bsize;
1674 #ifdef WAPBL
1675 extern int doforce;
1676 #endif
1677
1678 if (ump->um_discarddata) {
1679 ffs_discard_finish(ump->um_discarddata, mntflags);
1680 ump->um_discarddata = NULL;
1681 }
1682
1683 flags = 0;
1684 if (mntflags & MNT_FORCE)
1685 flags |= FORCECLOSE;
1686 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1687 return (error);
1688 error = UFS_WAPBL_BEGIN(mp);
1689 if (error == 0)
1690 if (fs->fs_ronly == 0 &&
1691 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1692 fs->fs_clean & FS_WASCLEAN) {
1693 fs->fs_clean = FS_ISCLEAN;
1694 fs->fs_fmod = 0;
1695 (void) ffs_sbupdate(ump, MNT_WAIT);
1696 }
1697 if (error == 0)
1698 UFS_WAPBL_END(mp);
1699 #ifdef WAPBL
1700 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1701 if (mp->mnt_wapbl_replay) {
1702 KDASSERT(fs->fs_ronly);
1703 wapbl_replay_stop(mp->mnt_wapbl_replay);
1704 wapbl_replay_free(mp->mnt_wapbl_replay);
1705 mp->mnt_wapbl_replay = 0;
1706 }
1707 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1708 if (error) {
1709 return error;
1710 }
1711 #endif /* WAPBL */
1712
1713 if (ump->um_devvp->v_type != VBAD)
1714 spec_node_setmountedfs(ump->um_devvp, NULL);
1715 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1716 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1717 NOCRED);
1718 vput(ump->um_devvp);
1719
1720 bsize = fs->fs_cssize;
1721 if (fs->fs_contigsumsize > 0)
1722 bsize += fs->fs_ncg * sizeof(int32_t);
1723 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1724 kmem_free(fs->fs_csp, bsize);
1725
1726 kmem_free(fs, fs->fs_sbsize);
1727 if (ump->um_oldfscompat != NULL)
1728 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1729 mutex_destroy(&ump->um_lock);
1730 ffs_snapshot_fini(ump);
1731 kmem_free(ump, sizeof(*ump));
1732 mp->mnt_data = NULL;
1733 mp->mnt_flag &= ~MNT_LOCAL;
1734 return (0);
1735 }
1736
1737 /*
1738 * Flush out all the files in a filesystem.
1739 */
1740 int
1741 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1742 {
1743 extern int doforce;
1744 struct ufsmount *ump;
1745 int error;
1746
1747 if (!doforce)
1748 flags &= ~FORCECLOSE;
1749 ump = VFSTOUFS(mp);
1750 #ifdef QUOTA
1751 if ((error = quota1_umount(mp, flags)) != 0)
1752 return (error);
1753 #endif
1754 #ifdef QUOTA2
1755 if ((error = quota2_umount(mp, flags)) != 0)
1756 return (error);
1757 #endif
1758 #ifdef UFS_EXTATTR
1759 if (ump->um_fstype == UFS1) {
1760 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)
1761 ufs_extattr_stop(mp, l);
1762 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_INITIALIZED)
1763 ufs_extattr_uepm_destroy(&ump->um_extattr);
1764 mp->mnt_flag &= ~MNT_EXTATTR;
1765 }
1766 #endif
1767 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1768 return (error);
1769 ffs_snapshot_unmount(mp);
1770 /*
1771 * Flush all the files.
1772 */
1773 error = vflush(mp, NULLVP, flags);
1774 if (error)
1775 return (error);
1776 /*
1777 * Flush filesystem metadata.
1778 */
1779 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1780 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1781 VOP_UNLOCK(ump->um_devvp);
1782 if (flags & FORCECLOSE) /* XXXDBJ */
1783 error = 0;
1784
1785 #ifdef WAPBL
1786 if (error)
1787 return error;
1788 if (mp->mnt_wapbl) {
1789 error = wapbl_flush(mp->mnt_wapbl, 1);
1790 if (flags & FORCECLOSE)
1791 error = 0;
1792 }
1793 #endif
1794
1795 return (error);
1796 }
1797
1798 /*
1799 * Get file system statistics.
1800 */
1801 int
1802 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1803 {
1804 struct ufsmount *ump;
1805 struct fs *fs;
1806
1807 ump = VFSTOUFS(mp);
1808 fs = ump->um_fs;
1809 mutex_enter(&ump->um_lock);
1810 sbp->f_bsize = fs->fs_bsize;
1811 sbp->f_frsize = fs->fs_fsize;
1812 sbp->f_iosize = fs->fs_bsize;
1813 sbp->f_blocks = fs->fs_dsize;
1814 sbp->f_bfree = ffs_blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1815 fs->fs_cstotal.cs_nffree + FFS_DBTOFSB(fs, fs->fs_pendingblocks);
1816 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1817 fs->fs_minfree) / (u_int64_t) 100;
1818 if (sbp->f_bfree > sbp->f_bresvd)
1819 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1820 else
1821 sbp->f_bavail = 0;
1822 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1823 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1824 sbp->f_favail = sbp->f_ffree;
1825 sbp->f_fresvd = 0;
1826 mutex_exit(&ump->um_lock);
1827 copy_statvfs_info(sbp, mp);
1828
1829 return (0);
1830 }
1831
1832 struct ffs_sync_ctx {
1833 int waitfor;
1834 };
1835
1836 static bool
1837 ffs_sync_selector(void *cl, struct vnode *vp)
1838 {
1839 struct ffs_sync_ctx *c = cl;
1840 struct inode *ip;
1841
1842 KASSERT(mutex_owned(vp->v_interlock));
1843
1844 ip = VTOI(vp);
1845 /*
1846 * Skip the vnode/inode if inaccessible.
1847 */
1848 if (ip == NULL || vp->v_type == VNON)
1849 return false;
1850
1851 /*
1852 * We deliberately update inode times here. This will
1853 * prevent a massive queue of updates accumulating, only
1854 * to be handled by a call to unmount.
1855 *
1856 * XXX It would be better to have the syncer trickle these
1857 * out. Adjustment needed to allow registering vnodes for
1858 * sync when the vnode is clean, but the inode dirty. Or
1859 * have ufs itself trickle out inode updates.
1860 *
1861 * If doing a lazy sync, we don't care about metadata or
1862 * data updates, because they are handled by each vnode's
1863 * synclist entry. In this case we are only interested in
1864 * writing back modified inodes.
1865 */
1866 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE |
1867 IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1868 (c->waitfor == MNT_LAZY || (LIST_EMPTY(&vp->v_dirtyblkhd) &&
1869 (vp->v_iflag & VI_ONWORKLST) == 0)))
1870 return false;
1871
1872 return true;
1873 }
1874
1875 /*
1876 * Go through the disk queues to initiate sandbagged IO;
1877 * go through the inodes to write those that have been modified;
1878 * initiate the writing of the super block if it has been modified.
1879 *
1880 * Note: we are always called with the filesystem marked `MPBUSY'.
1881 */
1882 int
1883 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1884 {
1885 struct vnode *vp;
1886 struct ufsmount *ump = VFSTOUFS(mp);
1887 struct fs *fs;
1888 struct vnode_iterator *marker;
1889 int error, allerror = 0;
1890 struct ffs_sync_ctx ctx;
1891
1892 fs = ump->um_fs;
1893 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1894 panic("%s: rofs mod, fs=%s", __func__, fs->fs_fsmnt);
1895 }
1896
1897 /*
1898 * Write back each (modified) inode.
1899 */
1900 vfs_vnode_iterator_init(mp, &marker);
1901
1902 ctx.waitfor = waitfor;
1903 while ((vp = vfs_vnode_iterator_next(marker, ffs_sync_selector, &ctx)))
1904 {
1905 error = vn_lock(vp,
1906 LK_EXCLUSIVE | (waitfor == MNT_LAZY ? LK_NOWAIT : 0));
1907 if (error) {
1908 vrele(vp);
1909 continue;
1910 }
1911 if (waitfor == MNT_LAZY) {
1912 error = UFS_WAPBL_BEGIN(vp->v_mount);
1913 if (!error) {
1914 error = ffs_update(vp, NULL, NULL,
1915 UPDATE_CLOSE);
1916 UFS_WAPBL_END(vp->v_mount);
1917 }
1918 } else {
1919 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
1920 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
1921 }
1922 if (error)
1923 allerror = error;
1924 vput(vp);
1925 }
1926 vfs_vnode_iterator_destroy(marker);
1927
1928 /*
1929 * Force stale file system control information to be flushed.
1930 */
1931 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1932 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1933 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1934 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1935 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
1936 0, 0)) != 0)
1937 allerror = error;
1938 VOP_UNLOCK(ump->um_devvp);
1939 }
1940 #if defined(QUOTA) || defined(QUOTA2)
1941 qsync(mp);
1942 #endif
1943 /*
1944 * Write back modified superblock.
1945 */
1946 if (fs->fs_fmod != 0) {
1947 fs->fs_fmod = 0;
1948 fs->fs_time = time_second;
1949 error = UFS_WAPBL_BEGIN(mp);
1950 if (error)
1951 allerror = error;
1952 else {
1953 if ((error = ffs_cgupdate(ump, waitfor)))
1954 allerror = error;
1955 UFS_WAPBL_END(mp);
1956 }
1957 }
1958
1959 #ifdef WAPBL
1960 if (mp->mnt_wapbl) {
1961 error = wapbl_flush(mp->mnt_wapbl, (waitfor == MNT_WAIT));
1962 if (error)
1963 allerror = error;
1964 }
1965 #endif
1966
1967 return (allerror);
1968 }
1969
1970 /*
1971 * Load inode from disk and initialize vnode.
1972 */
1973 static int
1974 ffs_init_vnode(struct ufsmount *ump, struct vnode *vp, ino_t ino)
1975 {
1976 struct fs *fs;
1977 struct inode *ip;
1978 struct buf *bp;
1979 int error;
1980
1981 fs = ump->um_fs;
1982
1983 /* Read in the disk contents for the inode. */
1984 error = bread(ump->um_devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ino)),
1985 (int)fs->fs_bsize, 0, &bp);
1986 if (error)
1987 return error;
1988
1989 /* Allocate and initialize inode. */
1990 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1991 memset(ip, 0, sizeof(struct inode));
1992 ip->i_ump = ump;
1993 ip->i_fs = fs;
1994 ip->i_dev = ump->um_dev;
1995 ip->i_number = ino;
1996 if (ump->um_fstype == UFS1)
1997 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1998 PR_WAITOK);
1999 else
2000 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
2001 PR_WAITOK);
2002 ffs_load_inode(bp, ip, fs, ino);
2003 brelse(bp, 0);
2004 ip->i_vnode = vp;
2005 #if defined(QUOTA) || defined(QUOTA2)
2006 ufsquota_init(ip);
2007 #endif
2008
2009 /* Initialise vnode with this inode. */
2010 vp->v_tag = VT_UFS;
2011 vp->v_op = ffs_vnodeop_p;
2012 vp->v_vflag |= VV_LOCKSWORK;
2013 vp->v_data = ip;
2014
2015 /* Initialize genfs node. */
2016 genfs_node_init(vp, &ffs_genfsops);
2017
2018 return 0;
2019 }
2020
2021 /*
2022 * Undo ffs_init_vnode().
2023 */
2024 static void
2025 ffs_deinit_vnode(struct ufsmount *ump, struct vnode *vp)
2026 {
2027 struct inode *ip = VTOI(vp);
2028
2029 genfs_node_destroy(vp);
2030 vp->v_data = NULL;
2031
2032 if (ump->um_fstype == UFS1)
2033 pool_cache_put(ffs_dinode1_cache, ip->i_din.ffs1_din);
2034 else
2035 pool_cache_put(ffs_dinode2_cache, ip->i_din.ffs2_din);
2036 pool_cache_put(ffs_inode_cache, ip);
2037 }
2038
2039 /*
2040 * Read an inode from disk and initialize this vnode / inode pair.
2041 * Caller assures no other thread will try to load this inode.
2042 */
2043 int
2044 ffs_loadvnode(struct mount *mp, struct vnode *vp,
2045 const void *key, size_t key_len, const void **new_key)
2046 {
2047 ino_t ino;
2048 struct fs *fs;
2049 struct inode *ip;
2050 struct ufsmount *ump;
2051 int error;
2052
2053 KASSERT(key_len == sizeof(ino));
2054 memcpy(&ino, key, key_len);
2055 ump = VFSTOUFS(mp);
2056 fs = ump->um_fs;
2057
2058 error = ffs_init_vnode(ump, vp, ino);
2059 if (error)
2060 return error;
2061
2062 ip = VTOI(vp);
2063 if (ip->i_mode == 0) {
2064 ffs_deinit_vnode(ump, vp);
2065
2066 return ENOENT;
2067 }
2068
2069 /* Initialize the vnode from the inode. */
2070 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
2071
2072 /* Finish inode initialization. */
2073 ip->i_devvp = ump->um_devvp;
2074 vref(ip->i_devvp);
2075
2076 /*
2077 * Ensure that uid and gid are correct. This is a temporary
2078 * fix until fsck has been changed to do the update.
2079 */
2080
2081 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */
2082 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
2083 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
2084 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
2085 } /* XXX */
2086 uvm_vnp_setsize(vp, ip->i_size);
2087 cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid);
2088 *new_key = &ip->i_number;
2089 return 0;
2090 }
2091
2092 /*
2093 * Create a new inode on disk and initialize this vnode / inode pair.
2094 */
2095 int
2096 ffs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
2097 struct vattr *vap, kauth_cred_t cred, void *extra,
2098 size_t *key_len, const void **new_key)
2099 {
2100 ino_t ino;
2101 struct fs *fs;
2102 struct inode *ip;
2103 struct timespec ts;
2104 struct ufsmount *ump;
2105 int error, mode;
2106
2107 KASSERT(dvp->v_mount == mp);
2108 KASSERT(vap->va_type != VNON);
2109
2110 *key_len = sizeof(ino);
2111 ump = VFSTOUFS(mp);
2112 fs = ump->um_fs;
2113 mode = MAKEIMODE(vap->va_type, vap->va_mode);
2114
2115 /* Allocate fresh inode. */
2116 error = ffs_valloc(dvp, mode, cred, &ino);
2117 if (error)
2118 return error;
2119
2120 /* Attach inode to vnode. */
2121 error = ffs_init_vnode(ump, vp, ino);
2122 if (error) {
2123 if (UFS_WAPBL_BEGIN(mp) == 0) {
2124 ffs_vfree(dvp, ino, mode);
2125 UFS_WAPBL_END(mp);
2126 }
2127 return error;
2128 }
2129
2130 ip = VTOI(vp);
2131 if (ip->i_mode) {
2132 panic("%s: dup alloc ino=%" PRId64 " on %s: mode %o/%o "
2133 "gen %x/%x size %" PRIx64 " blocks %" PRIx64,
2134 __func__, ino, fs->fs_fsmnt, DIP(ip, mode), ip->i_mode,
2135 DIP(ip, gen), ip->i_gen, DIP(ip, size), DIP(ip, blocks));
2136 }
2137 if (DIP(ip, size) || DIP(ip, blocks)) {
2138 printf("%s: ino=%" PRId64 " on %s: "
2139 "gen %x/%x has non zero blocks %" PRIx64 " or size %"
2140 PRIx64 "\n",
2141 __func__, ino, fs->fs_fsmnt, DIP(ip, gen), ip->i_gen,
2142 DIP(ip, blocks), DIP(ip, size));
2143 if ((ip)->i_ump->um_fstype == UFS1)
2144 panic("%s: dirty filesystem?", __func__);
2145 DIP_ASSIGN(ip, blocks, 0);
2146 DIP_ASSIGN(ip, size, 0);
2147 }
2148
2149 /* Set uid / gid. */
2150 if (cred == NOCRED || cred == FSCRED) {
2151 ip->i_gid = 0;
2152 ip->i_uid = 0;
2153 } else {
2154 ip->i_gid = VTOI(dvp)->i_gid;
2155 ip->i_uid = kauth_cred_geteuid(cred);
2156 }
2157 DIP_ASSIGN(ip, gid, ip->i_gid);
2158 DIP_ASSIGN(ip, uid, ip->i_uid);
2159
2160 #if defined(QUOTA) || defined(QUOTA2)
2161 error = UFS_WAPBL_BEGIN(mp);
2162 if (error) {
2163 ffs_deinit_vnode(ump, vp);
2164
2165 return error;
2166 }
2167 error = chkiq(ip, 1, cred, 0);
2168 if (error) {
2169 ffs_vfree(dvp, ino, mode);
2170 UFS_WAPBL_END(mp);
2171 ffs_deinit_vnode(ump, vp);
2172
2173 return error;
2174 }
2175 UFS_WAPBL_END(mp);
2176 #endif
2177
2178 /* Set type and finalize. */
2179 ip->i_flags = 0;
2180 DIP_ASSIGN(ip, flags, 0);
2181 ip->i_mode = mode;
2182 DIP_ASSIGN(ip, mode, mode);
2183 if (vap->va_rdev != VNOVAL) {
2184 /*
2185 * Want to be able to use this to make badblock
2186 * inodes, so don't truncate the dev number.
2187 */
2188 if (ump->um_fstype == UFS1)
2189 ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
2190 UFS_MPNEEDSWAP(ump));
2191 else
2192 ip->i_ffs2_rdev = ufs_rw64(vap->va_rdev,
2193 UFS_MPNEEDSWAP(ump));
2194 }
2195 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
2196 ip->i_devvp = ump->um_devvp;
2197 vref(ip->i_devvp);
2198
2199 /* Set up a new generation number for this inode. */
2200 ip->i_gen++;
2201 DIP_ASSIGN(ip, gen, ip->i_gen);
2202 if (fs->fs_magic == FS_UFS2_MAGIC) {
2203 vfs_timestamp(&ts);
2204 ip->i_ffs2_birthtime = ts.tv_sec;
2205 ip->i_ffs2_birthnsec = ts.tv_nsec;
2206 }
2207
2208 uvm_vnp_setsize(vp, ip->i_size);
2209 cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid);
2210 *new_key = &ip->i_number;
2211 return 0;
2212 }
2213
2214 /*
2215 * File handle to vnode
2216 *
2217 * Have to be really careful about stale file handles:
2218 * - check that the inode number is valid
2219 * - call ffs_vget() to get the locked inode
2220 * - check for an unallocated inode (i_mode == 0)
2221 * - check that the given client host has export rights and return
2222 * those rights via. exflagsp and credanonp
2223 */
2224 int
2225 ffs_fhtovp(struct mount *mp, struct fid *fhp, int lktype, struct vnode **vpp)
2226 {
2227 struct ufid ufh;
2228 int error;
2229
2230 if (fhp->fid_len != sizeof(struct ufid))
2231 return EINVAL;
2232
2233 memcpy(&ufh, fhp, sizeof(ufh));
2234 if ((error = ffs_checkrange(mp, ufh.ufid_ino)) != 0)
2235 return error;
2236
2237 return (ufs_fhtovp(mp, &ufh, lktype, vpp));
2238 }
2239
2240 /*
2241 * Vnode pointer to File handle
2242 */
2243 /* ARGSUSED */
2244 int
2245 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
2246 {
2247 struct inode *ip;
2248 struct ufid ufh;
2249
2250 if (*fh_size < sizeof(struct ufid)) {
2251 *fh_size = sizeof(struct ufid);
2252 return E2BIG;
2253 }
2254 ip = VTOI(vp);
2255 *fh_size = sizeof(struct ufid);
2256 memset(&ufh, 0, sizeof(ufh));
2257 ufh.ufid_len = sizeof(struct ufid);
2258 ufh.ufid_ino = ip->i_number;
2259 ufh.ufid_gen = ip->i_gen;
2260 memcpy(fhp, &ufh, sizeof(ufh));
2261 return (0);
2262 }
2263
2264 void
2265 ffs_init(void)
2266 {
2267 if (ffs_initcount++ > 0)
2268 return;
2269
2270 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
2271 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
2272 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
2273 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
2274 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
2275 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
2276 ufs_init();
2277 }
2278
2279 void
2280 ffs_reinit(void)
2281 {
2282 ufs_reinit();
2283 }
2284
2285 void
2286 ffs_done(void)
2287 {
2288 if (--ffs_initcount > 0)
2289 return;
2290
2291 ufs_done();
2292 pool_cache_destroy(ffs_dinode2_cache);
2293 pool_cache_destroy(ffs_dinode1_cache);
2294 pool_cache_destroy(ffs_inode_cache);
2295 }
2296
2297 /*
2298 * Write a superblock and associated information back to disk.
2299 */
2300 int
2301 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2302 {
2303 struct fs *fs = mp->um_fs;
2304 struct buf *bp;
2305 int error;
2306 u_int32_t saveflag;
2307
2308 error = ffs_getblk(mp->um_devvp,
2309 fs->fs_sblockloc / DEV_BSIZE, FFS_NOBLK,
2310 fs->fs_sbsize, false, &bp);
2311 if (error)
2312 return error;
2313 saveflag = fs->fs_flags & FS_INTERNAL;
2314 fs->fs_flags &= ~FS_INTERNAL;
2315
2316 memcpy(bp->b_data, fs, fs->fs_sbsize);
2317
2318 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2319 #ifdef FFS_EI
2320 if (mp->um_flags & UFS_NEEDSWAP)
2321 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2322 #endif
2323 fs->fs_flags |= saveflag;
2324
2325 if (waitfor == MNT_WAIT)
2326 error = bwrite(bp);
2327 else
2328 bawrite(bp);
2329 return (error);
2330 }
2331
2332 int
2333 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2334 {
2335 struct fs *fs = mp->um_fs;
2336 struct buf *bp;
2337 int blks;
2338 void *space;
2339 int i, size, error = 0, allerror = 0;
2340
2341 UFS_WAPBL_JLOCK_ASSERT(mp->um_mountp);
2342
2343 allerror = ffs_sbupdate(mp, waitfor);
2344 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2345 space = fs->fs_csp;
2346 for (i = 0; i < blks; i += fs->fs_frag) {
2347 size = fs->fs_bsize;
2348 if (i + fs->fs_frag > blks)
2349 size = (blks - i) * fs->fs_fsize;
2350 error = ffs_getblk(mp->um_devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i),
2351 FFS_NOBLK, size, false, &bp);
2352 if (error)
2353 break;
2354 #ifdef FFS_EI
2355 if (mp->um_flags & UFS_NEEDSWAP)
2356 ffs_csum_swap((struct csum*)space,
2357 (struct csum*)bp->b_data, size);
2358 else
2359 #endif
2360 memcpy(bp->b_data, space, (u_int)size);
2361 space = (char *)space + size;
2362 if (waitfor == MNT_WAIT)
2363 error = bwrite(bp);
2364 else
2365 bawrite(bp);
2366 }
2367 if (!allerror && error)
2368 allerror = error;
2369 return (allerror);
2370 }
2371
2372 int
2373 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2374 int attrnamespace, const char *attrname)
2375 {
2376 #ifdef UFS_EXTATTR
2377 /*
2378 * File-backed extended attributes are only supported on UFS1.
2379 * UFS2 has native extended attributes.
2380 */
2381 if (VFSTOUFS(mp)->um_fstype == UFS1)
2382 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2383 #endif
2384 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2385 }
2386
2387 /*
2388 * Synch vnode for a mounted file system.
2389 */
2390 static int
2391 ffs_vfs_fsync(vnode_t *vp, int flags)
2392 {
2393 int error, i, pflags;
2394 #ifdef WAPBL
2395 struct mount *mp;
2396 #endif
2397
2398 KASSERT(vp->v_type == VBLK);
2399 KASSERT(spec_node_getmountedfs(vp) != NULL);
2400
2401 /*
2402 * Flush all dirty data associated with the vnode.
2403 */
2404 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2405 if ((flags & FSYNC_WAIT) != 0)
2406 pflags |= PGO_SYNCIO;
2407 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
2408 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2409 if (error)
2410 return error;
2411
2412 #ifdef WAPBL
2413 mp = spec_node_getmountedfs(vp);
2414 if (mp && mp->mnt_wapbl) {
2415 /*
2416 * Don't bother writing out metadata if the syncer is
2417 * making the request. We will let the sync vnode
2418 * write it out in a single burst through a call to
2419 * VFS_SYNC().
2420 */
2421 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2422 return 0;
2423
2424 /*
2425 * Don't flush the log if the vnode being flushed
2426 * contains no dirty buffers that could be in the log.
2427 */
2428 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2429 error = wapbl_flush(mp->mnt_wapbl, 0);
2430 if (error)
2431 return error;
2432 }
2433
2434 if ((flags & FSYNC_WAIT) != 0) {
2435 mutex_enter(vp->v_interlock);
2436 while (vp->v_numoutput)
2437 cv_wait(&vp->v_cv, vp->v_interlock);
2438 mutex_exit(vp->v_interlock);
2439 }
2440
2441 return 0;
2442 }
2443 #endif /* WAPBL */
2444
2445 error = vflushbuf(vp, flags);
2446 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2447 i = 1;
2448 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2449 kauth_cred_get());
2450 }
2451
2452 return error;
2453 }
2454