ffs_vfsops.c revision 1.366 1 /* $NetBSD: ffs_vfsops.c,v 1.366 2020/03/16 21:20:12 pgoyette Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.366 2020/03/16 21:20:12 pgoyette Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_wapbl.h"
70 #endif
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/proc.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/socket.h>
79 #include <sys/mount.h>
80 #include <sys/buf.h>
81 #include <sys/device.h>
82 #include <sys/disk.h>
83 #include <sys/file.h>
84 #include <sys/disklabel.h>
85 #include <sys/ioctl.h>
86 #include <sys/errno.h>
87 #include <sys/kmem.h>
88 #include <sys/pool.h>
89 #include <sys/lock.h>
90 #include <sys/sysctl.h>
91 #include <sys/conf.h>
92 #include <sys/kauth.h>
93 #include <sys/wapbl.h>
94 #include <sys/module.h>
95
96 #include <miscfs/genfs/genfs.h>
97 #include <miscfs/specfs/specdev.h>
98
99 #include <ufs/ufs/quota.h>
100 #include <ufs/ufs/ufsmount.h>
101 #include <ufs/ufs/inode.h>
102 #include <ufs/ufs/dir.h>
103 #include <ufs/ufs/ufs_extern.h>
104 #include <ufs/ufs/ufs_bswap.h>
105 #include <ufs/ufs/ufs_wapbl.h>
106
107 #include <ufs/ffs/fs.h>
108 #include <ufs/ffs/ffs_extern.h>
109
110 #ifdef WAPBL
111 MODULE(MODULE_CLASS_VFS, ffs, "ufs,wapbl");
112 #else
113 MODULE(MODULE_CLASS_VFS, ffs, "ufs");
114 #endif
115
116 static int ffs_vfs_fsync(vnode_t *, int);
117 static int ffs_superblock_validate(struct fs *);
118 static int ffs_is_appleufs(struct vnode *, struct fs *);
119
120 static int ffs_init_vnode(struct ufsmount *, struct vnode *, ino_t);
121 static void ffs_deinit_vnode(struct ufsmount *, struct vnode *);
122
123 static kauth_listener_t ffs_snapshot_listener;
124
125 /* how many times ffs_init() was called */
126 int ffs_initcount = 0;
127
128 #ifdef DEBUG_FFS_MOUNT
129 #define DPRINTF(_fmt, args...) printf("%s: " _fmt "\n", __func__, ##args)
130 #else
131 #define DPRINTF(_fmt, args...) do {} while (/*CONSTCOND*/0)
132 #endif
133
134 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
135 extern const struct vnodeopv_desc ffs_specop_opv_desc;
136 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
137
138 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
139 &ffs_vnodeop_opv_desc,
140 &ffs_specop_opv_desc,
141 &ffs_fifoop_opv_desc,
142 NULL,
143 };
144
145 struct vfsops ffs_vfsops = {
146 .vfs_name = MOUNT_FFS,
147 .vfs_min_mount_data = sizeof (struct ufs_args),
148 .vfs_mount = ffs_mount,
149 .vfs_start = ufs_start,
150 .vfs_unmount = ffs_unmount,
151 .vfs_root = ufs_root,
152 .vfs_quotactl = ufs_quotactl,
153 .vfs_statvfs = ffs_statvfs,
154 .vfs_sync = ffs_sync,
155 .vfs_vget = ufs_vget,
156 .vfs_loadvnode = ffs_loadvnode,
157 .vfs_newvnode = ffs_newvnode,
158 .vfs_fhtovp = ffs_fhtovp,
159 .vfs_vptofh = ffs_vptofh,
160 .vfs_init = ffs_init,
161 .vfs_reinit = ffs_reinit,
162 .vfs_done = ffs_done,
163 .vfs_mountroot = ffs_mountroot,
164 .vfs_snapshot = ffs_snapshot,
165 .vfs_extattrctl = ffs_extattrctl,
166 .vfs_suspendctl = genfs_suspendctl,
167 .vfs_renamelock_enter = genfs_renamelock_enter,
168 .vfs_renamelock_exit = genfs_renamelock_exit,
169 .vfs_fsync = ffs_vfs_fsync,
170 .vfs_opv_descs = ffs_vnodeopv_descs
171 };
172
173 static const struct genfs_ops ffs_genfsops = {
174 .gop_size = ffs_gop_size,
175 .gop_alloc = ufs_gop_alloc,
176 .gop_write = genfs_gop_write,
177 .gop_markupdate = ufs_gop_markupdate,
178 .gop_putrange = genfs_gop_putrange,
179 };
180
181 static const struct ufs_ops ffs_ufsops = {
182 .uo_itimes = ffs_itimes,
183 .uo_update = ffs_update,
184 .uo_truncate = ffs_truncate,
185 .uo_balloc = ffs_balloc,
186 .uo_snapgone = ffs_snapgone,
187 .uo_bufrd = ffs_bufrd,
188 .uo_bufwr = ffs_bufwr,
189 };
190
191 static int
192 ffs_checkrange(struct mount *mp, uint32_t ino)
193 {
194 struct fs *fs = VFSTOUFS(mp)->um_fs;
195
196 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) {
197 DPRINTF("out of range %u\n", ino);
198 return ESTALE;
199 }
200
201 /*
202 * Need to check if inode is initialized because ffsv2 does
203 * lazy initialization and we can get here from nfs_fhtovp
204 */
205 if (fs->fs_magic != FS_UFS2_MAGIC)
206 return 0;
207
208 struct buf *bp;
209 int cg = ino_to_cg(fs, ino);
210 struct ufsmount *ump = VFSTOUFS(mp);
211
212 int error = bread(ump->um_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
213 (int)fs->fs_cgsize, B_MODIFY, &bp);
214 if (error) {
215 DPRINTF("error %d reading cg %d ino %u\n", error, cg, ino);
216 return error;
217 }
218
219 const int needswap = UFS_FSNEEDSWAP(fs);
220
221 struct cg *cgp = (struct cg *)bp->b_data;
222 if (!cg_chkmagic(cgp, needswap)) {
223 brelse(bp, 0);
224 DPRINTF("bad cylinder group magic cg %d ino %u\n", cg, ino);
225 return ESTALE;
226 }
227
228 int32_t initediblk = ufs_rw32(cgp->cg_initediblk, needswap);
229 brelse(bp, 0);
230
231 if (cg * fs->fs_ipg + initediblk < ino) {
232 DPRINTF("cg=%d fs->fs_ipg=%d initediblk=%d ino=%u\n",
233 cg, fs->fs_ipg, initediblk, ino);
234 return ESTALE;
235 }
236 return 0;
237 }
238
239 static int
240 ffs_snapshot_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
241 void *arg0, void *arg1, void *arg2, void *arg3)
242 {
243 vnode_t *vp = arg2;
244 int result = KAUTH_RESULT_DEFER;
245
246 if (action != KAUTH_SYSTEM_FS_SNAPSHOT)
247 return result;
248
249 if (VTOI(vp)->i_uid == kauth_cred_geteuid(cred))
250 result = KAUTH_RESULT_ALLOW;
251
252 return result;
253 }
254
255 SYSCTL_SETUP(ffs_sysctl_setup, "ffs sysctls")
256 {
257 #ifdef UFS_EXTATTR
258 extern int ufs_extattr_autocreate;
259 #endif
260 extern int ffs_log_changeopt;
261
262 sysctl_createv(clog, 0, NULL, NULL,
263 CTLFLAG_PERMANENT,
264 CTLTYPE_NODE, "ffs",
265 SYSCTL_DESCR("Berkeley Fast File System"),
266 NULL, 0, NULL, 0,
267 CTL_VFS, 1, CTL_EOL);
268 /*
269 * @@@ should we even bother with these first three?
270 */
271 sysctl_createv(clog, 0, NULL, NULL,
272 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
273 CTLTYPE_INT, "doclusterread", NULL,
274 sysctl_notavail, 0, NULL, 0,
275 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
276 sysctl_createv(clog, 0, NULL, NULL,
277 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
278 CTLTYPE_INT, "doclusterwrite", NULL,
279 sysctl_notavail, 0, NULL, 0,
280 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
281 sysctl_createv(clog, 0, NULL, NULL,
282 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
283 CTLTYPE_INT, "doreallocblks", NULL,
284 sysctl_notavail, 0, NULL, 0,
285 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
286 #if 0
287 sysctl_createv(clog, 0, NULL, NULL,
288 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
289 CTLTYPE_INT, "doasyncfree",
290 SYSCTL_DESCR("Release dirty blocks asynchronously"),
291 NULL, 0, &doasyncfree, 0,
292 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
293 #endif
294 sysctl_createv(clog, 0, NULL, NULL,
295 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
296 CTLTYPE_INT, "log_changeopt",
297 SYSCTL_DESCR("Log changes in optimization strategy"),
298 NULL, 0, &ffs_log_changeopt, 0,
299 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
300 #ifdef UFS_EXTATTR
301 sysctl_createv(clog, 0, NULL, NULL,
302 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
303 CTLTYPE_INT, "extattr_autocreate",
304 SYSCTL_DESCR("Size of attribute for "
305 "backing file autocreation"),
306 NULL, 0, &ufs_extattr_autocreate, 0,
307 CTL_VFS, 1, FFS_EXTATTR_AUTOCREATE, CTL_EOL);
308
309 #endif /* UFS_EXTATTR */
310 }
311
312 static int
313 ffs_modcmd(modcmd_t cmd, void *arg)
314 {
315 int error;
316
317 #if 0
318 extern int doasyncfree;
319 #endif
320
321 switch (cmd) {
322 case MODULE_CMD_INIT:
323 error = vfs_attach(&ffs_vfsops);
324 if (error != 0)
325 break;
326
327 ffs_snapshot_listener = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
328 ffs_snapshot_cb, NULL);
329 if (ffs_snapshot_listener == NULL)
330 printf("ffs_modcmd: can't listen on system scope.\n");
331
332 break;
333 case MODULE_CMD_FINI:
334 error = vfs_detach(&ffs_vfsops);
335 if (error != 0)
336 break;
337 if (ffs_snapshot_listener != NULL)
338 kauth_unlisten_scope(ffs_snapshot_listener);
339 break;
340 default:
341 error = ENOTTY;
342 break;
343 }
344
345 return (error);
346 }
347
348 pool_cache_t ffs_inode_cache;
349 pool_cache_t ffs_dinode1_cache;
350 pool_cache_t ffs_dinode2_cache;
351
352 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
353 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
354
355 /*
356 * Called by main() when ffs is going to be mounted as root.
357 */
358
359 int
360 ffs_mountroot(void)
361 {
362 struct fs *fs;
363 struct mount *mp;
364 struct lwp *l = curlwp; /* XXX */
365 struct ufsmount *ump;
366 int error;
367
368 if (device_class(root_device) != DV_DISK)
369 return (ENODEV);
370
371 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
372 vrele(rootvp);
373 return (error);
374 }
375
376 /*
377 * We always need to be able to mount the root file system.
378 */
379 mp->mnt_flag |= MNT_FORCE;
380 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
381 vfs_unbusy(mp);
382 vfs_rele(mp);
383 return (error);
384 }
385 mp->mnt_flag &= ~MNT_FORCE;
386 mountlist_append(mp);
387 ump = VFSTOUFS(mp);
388 fs = ump->um_fs;
389 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
390 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
391 (void)ffs_statvfs(mp, &mp->mnt_stat);
392 vfs_unbusy(mp);
393 setrootfstime((time_t)fs->fs_time);
394 return (0);
395 }
396
397 /*
398 * VFS Operations.
399 *
400 * mount system call
401 */
402 int
403 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
404 {
405 struct lwp *l = curlwp;
406 struct vnode *devvp = NULL;
407 struct ufs_args *args = data;
408 struct ufsmount *ump = NULL;
409 struct fs *fs;
410 int error = 0, flags, update;
411 mode_t accessmode;
412
413 if (args == NULL) {
414 DPRINTF("NULL args");
415 return EINVAL;
416 }
417 if (*data_len < sizeof(*args)) {
418 DPRINTF("bad size args %zu != %zu", *data_len, sizeof(*args));
419 return EINVAL;
420 }
421
422 ump = VFSTOUFS(mp);
423 if ((mp->mnt_flag & (MNT_GETARGS|MNT_UPDATE)) && ump == NULL) {
424 DPRINTF("no ump");
425 return EIO;
426 }
427
428 if (mp->mnt_flag & MNT_GETARGS) {
429 args->fspec = NULL;
430 *data_len = sizeof *args;
431 return 0;
432 }
433
434 update = mp->mnt_flag & MNT_UPDATE;
435
436 /* Check arguments */
437 if (args->fspec == NULL) {
438 if (!update) {
439 /* New mounts must have a filename for the device */
440 DPRINTF("no filename for mount");
441 return EINVAL;
442 }
443 } else {
444 /*
445 * Look up the name and verify that it's sane.
446 */
447 error = namei_simple_user(args->fspec,
448 NSM_FOLLOW_NOEMULROOT, &devvp);
449 if (error != 0) {
450 DPRINTF("namei_simple_user returned %d", error);
451 return error;
452 }
453
454 /*
455 * Be sure this is a valid block device
456 */
457 if (devvp->v_type != VBLK) {
458 DPRINTF("non block device %d", devvp->v_type);
459 error = ENOTBLK;
460 goto fail;
461 }
462
463 if (bdevsw_lookup(devvp->v_rdev) == NULL) {
464 DPRINTF("can't find block device 0x%jx",
465 devvp->v_rdev);
466 error = ENXIO;
467 goto fail;
468 }
469
470 if (update) {
471 /*
472 * Be sure we're still naming the same device
473 * used for our initial mount
474 */
475 if (devvp != ump->um_devvp &&
476 devvp->v_rdev != ump->um_devvp->v_rdev) {
477 DPRINTF("wrong device 0x%jx != 0x%jx",
478 (uintmax_t)devvp->v_rdev,
479 (uintmax_t)ump->um_devvp->v_rdev);
480 error = EINVAL;
481 goto fail;
482 }
483 vrele(devvp);
484 devvp = NULL;
485 }
486 }
487
488 if (devvp == NULL) {
489 devvp = ump->um_devvp;
490 vref(devvp);
491 }
492
493 /*
494 * If mount by non-root, then verify that user has necessary
495 * permissions on the device.
496 *
497 * Permission to update a mount is checked higher, so here we presume
498 * updating the mount is okay (for example, as far as securelevel goes)
499 * which leaves us with the normal check.
500 */
501 accessmode = VREAD;
502 if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
503 (mp->mnt_flag & MNT_RDONLY) == 0)
504 accessmode |= VWRITE;
505 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
506 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
507 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode));
508 VOP_UNLOCK(devvp);
509 if (error) {
510 DPRINTF("kauth returned %d", error);
511 goto fail;
512 }
513
514 #ifdef WAPBL
515 /* WAPBL can only be enabled on a r/w mount. */
516 if (((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) ||
517 (mp->mnt_iflag & IMNT_WANTRDONLY)) {
518 mp->mnt_flag &= ~MNT_LOG;
519 }
520 #else /* !WAPBL */
521 mp->mnt_flag &= ~MNT_LOG;
522 #endif /* !WAPBL */
523
524 if (!update) {
525 int xflags;
526
527 if (mp->mnt_flag & MNT_RDONLY)
528 xflags = FREAD;
529 else
530 xflags = FREAD | FWRITE;
531 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
532 error = VOP_OPEN(devvp, xflags, FSCRED);
533 VOP_UNLOCK(devvp);
534 if (error) {
535 DPRINTF("VOP_OPEN returned %d", error);
536 goto fail;
537 }
538 error = ffs_mountfs(devvp, mp, l);
539 if (error) {
540 DPRINTF("ffs_mountfs returned %d", error);
541 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
542 (void)VOP_CLOSE(devvp, xflags, NOCRED);
543 VOP_UNLOCK(devvp);
544 goto fail;
545 }
546
547 ump = VFSTOUFS(mp);
548 fs = ump->um_fs;
549 } else {
550 /*
551 * Update the mount.
552 */
553
554 /*
555 * The initial mount got a reference on this
556 * device, so drop the one obtained via
557 * namei(), above.
558 */
559 vrele(devvp);
560
561 ump = VFSTOUFS(mp);
562 fs = ump->um_fs;
563 if (fs->fs_ronly == 0 && (mp->mnt_iflag & IMNT_WANTRDONLY)) {
564 /*
565 * Changing from r/w to r/o
566 */
567 flags = WRITECLOSE;
568 if (mp->mnt_flag & MNT_FORCE)
569 flags |= FORCECLOSE;
570 error = ffs_flushfiles(mp, flags, l);
571 if (error)
572 return error;
573
574 error = UFS_WAPBL_BEGIN(mp);
575 if (error) {
576 DPRINTF("wapbl %d", error);
577 return error;
578 }
579
580 if (ffs_cgupdate(ump, MNT_WAIT) == 0 &&
581 fs->fs_clean & FS_WASCLEAN) {
582 if (mp->mnt_flag & MNT_SOFTDEP)
583 fs->fs_flags &= ~FS_DOSOFTDEP;
584 fs->fs_clean = FS_ISCLEAN;
585 (void) ffs_sbupdate(ump, MNT_WAIT);
586 }
587
588 UFS_WAPBL_END(mp);
589 }
590
591 #ifdef WAPBL
592 if ((mp->mnt_flag & MNT_LOG) == 0) {
593 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
594 if (error) {
595 DPRINTF("ffs_wapbl_stop returned %d", error);
596 return error;
597 }
598 }
599 #endif /* WAPBL */
600
601 if (fs->fs_ronly == 0 && (mp->mnt_iflag & IMNT_WANTRDONLY)) {
602 /*
603 * Finish change from r/w to r/o
604 */
605 fs->fs_ronly = 1;
606 fs->fs_fmod = 0;
607 }
608
609 if (mp->mnt_flag & MNT_RELOAD) {
610 error = ffs_reload(mp, l->l_cred, l);
611 if (error) {
612 DPRINTF("ffs_reload returned %d", error);
613 return error;
614 }
615 }
616
617 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
618 /*
619 * Changing from read-only to read/write
620 */
621 #ifndef QUOTA2
622 if (fs->fs_flags & FS_DOQUOTA2) {
623 ump->um_flags |= UFS_QUOTA2;
624 uprintf("%s: options QUOTA2 not enabled%s\n",
625 mp->mnt_stat.f_mntonname,
626 (mp->mnt_flag & MNT_FORCE) ? "" :
627 ", not mounting");
628 DPRINTF("ffs_quota2 %d", EINVAL);
629 return EINVAL;
630 }
631 #endif
632 fs->fs_ronly = 0;
633 fs->fs_clean <<= 1;
634 fs->fs_fmod = 1;
635 #ifdef WAPBL
636 if (fs->fs_flags & FS_DOWAPBL) {
637 const char *nm = mp->mnt_stat.f_mntonname;
638 if (!mp->mnt_wapbl_replay) {
639 printf("%s: log corrupted;"
640 " replay cancelled\n", nm);
641 return EFTYPE;
642 }
643 printf("%s: replaying log to disk\n", nm);
644 error = wapbl_replay_write(mp->mnt_wapbl_replay,
645 devvp);
646 if (error) {
647 DPRINTF("%s: wapbl_replay_write %d",
648 nm, error);
649 return error;
650 }
651 wapbl_replay_stop(mp->mnt_wapbl_replay);
652 fs->fs_clean = FS_WASCLEAN;
653 }
654 #endif /* WAPBL */
655 if (fs->fs_snapinum[0] != 0)
656 ffs_snapshot_mount(mp);
657 }
658
659 #ifdef WAPBL
660 error = ffs_wapbl_start(mp);
661 if (error) {
662 DPRINTF("ffs_wapbl_start returned %d", error);
663 return error;
664 }
665 #endif /* WAPBL */
666
667 #ifdef QUOTA2
668 if (!fs->fs_ronly) {
669 error = ffs_quota2_mount(mp);
670 if (error) {
671 DPRINTF("ffs_quota2_mount returned %d", error);
672 return error;
673 }
674 }
675 #endif
676
677 if ((mp->mnt_flag & MNT_DISCARD) && !(ump->um_discarddata))
678 ump->um_discarddata = ffs_discard_init(devvp, fs);
679
680 if (args->fspec == NULL)
681 return 0;
682 }
683
684 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
685 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
686 if (error == 0)
687 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
688 sizeof(fs->fs_fsmnt));
689 else {
690 DPRINTF("set_statvfs_info returned %d", error);
691 }
692 fs->fs_flags &= ~FS_DOSOFTDEP;
693 if (fs->fs_fmod != 0) { /* XXX */
694 int err;
695
696 fs->fs_fmod = 0;
697 if (fs->fs_clean & FS_WASCLEAN)
698 fs->fs_time = time_second;
699 else {
700 printf("%s: file system not clean (fs_clean=%#x); "
701 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
702 fs->fs_clean);
703 printf("%s: lost blocks %" PRId64 " files %d\n",
704 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
705 fs->fs_pendinginodes);
706 }
707 err = UFS_WAPBL_BEGIN(mp);
708 if (err == 0) {
709 (void) ffs_cgupdate(ump, MNT_WAIT);
710 UFS_WAPBL_END(mp);
711 }
712 }
713 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
714 printf("%s: `-o softdep' is no longer supported, "
715 "consider `-o log'\n", mp->mnt_stat.f_mntfromname);
716 mp->mnt_flag &= ~MNT_SOFTDEP;
717 }
718
719 return (error);
720
721 fail:
722 vrele(devvp);
723 return (error);
724 }
725
726 /*
727 * Reload all incore data for a filesystem (used after running fsck on
728 * the root filesystem and finding things to fix). The filesystem must
729 * be mounted read-only.
730 *
731 * Things to do to update the mount:
732 * 1) invalidate all cached meta-data.
733 * 2) re-read superblock from disk.
734 * 3) re-read summary information from disk.
735 * 4) invalidate all inactive vnodes.
736 * 5) invalidate all cached file data.
737 * 6) re-read inode data for all active vnodes.
738 */
739 int
740 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
741 {
742 struct vnode *vp, *devvp;
743 struct inode *ip;
744 void *space;
745 struct buf *bp;
746 struct fs *fs, *newfs;
747 int i, bsize, blks, error;
748 int32_t *lp, fs_sbsize;
749 struct ufsmount *ump;
750 daddr_t sblockloc;
751 struct vnode_iterator *marker;
752
753 if ((mp->mnt_flag & MNT_RDONLY) == 0)
754 return (EINVAL);
755
756 ump = VFSTOUFS(mp);
757
758 /*
759 * Step 1: invalidate all cached meta-data.
760 */
761 devvp = ump->um_devvp;
762 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
763 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
764 VOP_UNLOCK(devvp);
765 if (error)
766 panic("%s: dirty1", __func__);
767
768 /*
769 * Step 2: re-read superblock from disk. XXX: We don't handle
770 * possibility that superblock moved. Which implies that we don't
771 * want its size to change either.
772 */
773 fs = ump->um_fs;
774 fs_sbsize = fs->fs_sbsize;
775 error = bread(devvp, fs->fs_sblockloc / DEV_BSIZE, fs_sbsize,
776 0, &bp);
777 if (error)
778 return (error);
779 newfs = kmem_alloc(fs_sbsize, KM_SLEEP);
780 memcpy(newfs, bp->b_data, fs_sbsize);
781
782 #ifdef FFS_EI
783 if (ump->um_flags & UFS_NEEDSWAP) {
784 ffs_sb_swap((struct fs *)bp->b_data, newfs);
785 newfs->fs_flags |= FS_SWAPPED;
786 } else
787 #endif
788 newfs->fs_flags &= ~FS_SWAPPED;
789
790 brelse(bp, 0);
791
792 if ((newfs->fs_magic != FS_UFS1_MAGIC) &&
793 (newfs->fs_magic != FS_UFS2_MAGIC)) {
794 kmem_free(newfs, fs_sbsize);
795 return (EIO); /* XXX needs translation */
796 }
797 if (!ffs_superblock_validate(newfs)) {
798 kmem_free(newfs, fs_sbsize);
799 return (EINVAL);
800 }
801
802 /*
803 * The current implementation doesn't handle the possibility that
804 * these values may have changed.
805 */
806 if ((newfs->fs_sbsize != fs_sbsize) ||
807 (newfs->fs_cssize != fs->fs_cssize) ||
808 (newfs->fs_contigsumsize != fs->fs_contigsumsize) ||
809 (newfs->fs_ncg != fs->fs_ncg)) {
810 kmem_free(newfs, fs_sbsize);
811 return (EINVAL);
812 }
813
814 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
815 sblockloc = fs->fs_sblockloc;
816 /*
817 * Copy pointer fields back into superblock before copying in XXX
818 * new superblock. These should really be in the ufsmount. XXX
819 * Note that important parameters (eg fs_ncg) are unchanged.
820 */
821 newfs->fs_csp = fs->fs_csp;
822 newfs->fs_maxcluster = fs->fs_maxcluster;
823 newfs->fs_contigdirs = fs->fs_contigdirs;
824 newfs->fs_ronly = fs->fs_ronly;
825 newfs->fs_active = fs->fs_active;
826 memcpy(fs, newfs, (u_int)fs_sbsize);
827 kmem_free(newfs, fs_sbsize);
828
829 /*
830 * Recheck for Apple UFS filesystem.
831 */
832 ump->um_flags &= ~UFS_ISAPPLEUFS;
833 if (ffs_is_appleufs(devvp, fs)) {
834 #ifdef APPLE_UFS
835 ump->um_flags |= UFS_ISAPPLEUFS;
836 #else
837 DPRINTF("AppleUFS not supported");
838 return (EIO); /* XXX: really? */
839 #endif
840 }
841
842 if (UFS_MPISAPPLEUFS(ump)) {
843 /* see comment about NeXT below */
844 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
845 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
846 mp->mnt_iflag |= IMNT_DTYPE;
847 } else {
848 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
849 ump->um_dirblksiz = UFS_DIRBLKSIZ;
850 if (ump->um_maxsymlinklen > 0)
851 mp->mnt_iflag |= IMNT_DTYPE;
852 else
853 mp->mnt_iflag &= ~IMNT_DTYPE;
854 }
855 ffs_oldfscompat_read(fs, ump, sblockloc);
856
857 mutex_enter(&ump->um_lock);
858 ump->um_maxfilesize = fs->fs_maxfilesize;
859 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
860 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
861 mp->mnt_stat.f_mntonname, fs->fs_flags,
862 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
863 if ((mp->mnt_flag & MNT_FORCE) == 0) {
864 mutex_exit(&ump->um_lock);
865 return (EINVAL);
866 }
867 }
868 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
869 fs->fs_pendingblocks = 0;
870 fs->fs_pendinginodes = 0;
871 }
872 mutex_exit(&ump->um_lock);
873
874 ffs_statvfs(mp, &mp->mnt_stat);
875 /*
876 * Step 3: re-read summary information from disk.
877 */
878 blks = howmany(fs->fs_cssize, fs->fs_fsize);
879 space = fs->fs_csp;
880 for (i = 0; i < blks; i += fs->fs_frag) {
881 bsize = fs->fs_bsize;
882 if (i + fs->fs_frag > blks)
883 bsize = (blks - i) * fs->fs_fsize;
884 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i), bsize,
885 0, &bp);
886 if (error) {
887 return (error);
888 }
889 #ifdef FFS_EI
890 if (UFS_FSNEEDSWAP(fs))
891 ffs_csum_swap((struct csum *)bp->b_data,
892 (struct csum *)space, bsize);
893 else
894 #endif
895 memcpy(space, bp->b_data, (size_t)bsize);
896 space = (char *)space + bsize;
897 brelse(bp, 0);
898 }
899 /*
900 * We no longer know anything about clusters per cylinder group.
901 */
902 if (fs->fs_contigsumsize > 0) {
903 lp = fs->fs_maxcluster;
904 for (i = 0; i < fs->fs_ncg; i++)
905 *lp++ = fs->fs_contigsumsize;
906 }
907
908 vfs_vnode_iterator_init(mp, &marker);
909 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
910 /*
911 * Step 4: invalidate all inactive vnodes.
912 */
913 if (vrecycle(vp))
914 continue;
915 /*
916 * Step 5: invalidate all cached file data.
917 */
918 if (vn_lock(vp, LK_EXCLUSIVE)) {
919 vrele(vp);
920 continue;
921 }
922 if (vinvalbuf(vp, 0, cred, l, 0, 0))
923 panic("%s: dirty2", __func__);
924 /*
925 * Step 6: re-read inode data for all active vnodes.
926 */
927 ip = VTOI(vp);
928 error = bread(devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)),
929 (int)fs->fs_bsize, 0, &bp);
930 if (error) {
931 vput(vp);
932 break;
933 }
934 ffs_load_inode(bp, ip, fs, ip->i_number);
935 brelse(bp, 0);
936 vput(vp);
937 }
938 vfs_vnode_iterator_destroy(marker);
939 return (error);
940 }
941
942 /*
943 * Possible superblock locations ordered from most to least likely.
944 */
945 static const int sblock_try[] = SBLOCKSEARCH;
946
947
948 static int
949 ffs_superblock_validate(struct fs *fs)
950 {
951 int32_t i, fs_bshift = 0, fs_fshift = 0, fs_fragshift = 0, fs_frag;
952 int32_t fs_inopb;
953
954 /* Check the superblock size */
955 if (fs->fs_sbsize > SBLOCKSIZE || fs->fs_sbsize < sizeof(struct fs))
956 return 0;
957
958 /* Check the file system blocksize */
959 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < MINBSIZE)
960 return 0;
961 if (!powerof2(fs->fs_bsize))
962 return 0;
963
964 /* Check the size of frag blocks */
965 if (!powerof2(fs->fs_fsize))
966 return 0;
967 if (fs->fs_fsize == 0)
968 return 0;
969
970 /*
971 * XXX: these values are just zero-checked to prevent obvious
972 * bugs. We need more strict checks.
973 */
974 if (fs->fs_size == 0 && fs->fs_old_size == 0)
975 return 0;
976 if (fs->fs_cssize == 0)
977 return 0;
978 if (fs->fs_ipg == 0)
979 return 0;
980 if (fs->fs_fpg == 0)
981 return 0;
982 if (fs->fs_ncg == 0)
983 return 0;
984 if (fs->fs_maxbpg == 0)
985 return 0;
986
987 /* Check the number of inodes per block */
988 if (fs->fs_magic == FS_UFS1_MAGIC)
989 fs_inopb = fs->fs_bsize / sizeof(struct ufs1_dinode);
990 else /* fs->fs_magic == FS_UFS2_MAGIC */
991 fs_inopb = fs->fs_bsize / sizeof(struct ufs2_dinode);
992 if (fs->fs_inopb != fs_inopb)
993 return 0;
994
995 /* Block size cannot be smaller than fragment size */
996 if (fs->fs_bsize < fs->fs_fsize)
997 return 0;
998
999 /* Compute fs_bshift and ensure it is consistent */
1000 for (i = fs->fs_bsize; i > 1; i >>= 1)
1001 fs_bshift++;
1002 if (fs->fs_bshift != fs_bshift)
1003 return 0;
1004
1005 /* Compute fs_fshift and ensure it is consistent */
1006 for (i = fs->fs_fsize; i > 1; i >>= 1)
1007 fs_fshift++;
1008 if (fs->fs_fshift != fs_fshift)
1009 return 0;
1010
1011 /* Compute fs_fragshift and ensure it is consistent */
1012 for (i = fs->fs_frag; i > 1; i >>= 1)
1013 fs_fragshift++;
1014 if (fs->fs_fragshift != fs_fragshift)
1015 return 0;
1016
1017 /* Check the masks */
1018 if (fs->fs_bmask != ~(fs->fs_bsize - 1))
1019 return 0;
1020 if (fs->fs_fmask != ~(fs->fs_fsize - 1))
1021 return 0;
1022
1023 /*
1024 * Now that the shifts and masks are sanitized, we can use the ffs_ API.
1025 */
1026
1027 /* Check the number of frag blocks */
1028 if ((fs_frag = ffs_numfrags(fs, fs->fs_bsize)) > MAXFRAG)
1029 return 0;
1030 if (fs->fs_frag != fs_frag)
1031 return 0;
1032
1033 /* Check the size of cylinder groups */
1034 if ((fs->fs_cgsize < sizeof(struct cg)) ||
1035 (fs->fs_cgsize > fs->fs_bsize))
1036 return 0;
1037
1038 return 1;
1039 }
1040
1041 static int
1042 ffs_is_appleufs(struct vnode *devvp, struct fs *fs)
1043 {
1044 struct dkwedge_info dkw;
1045 int ret = 0;
1046
1047 /*
1048 * First check to see if this is tagged as an Apple UFS filesystem
1049 * in the disklabel.
1050 */
1051 if (getdiskinfo(devvp, &dkw) == 0 &&
1052 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
1053 ret = 1;
1054 #ifdef APPLE_UFS
1055 else {
1056 struct appleufslabel *applefs;
1057 struct buf *bp;
1058 daddr_t blkno = APPLEUFS_LABEL_OFFSET / DEV_BSIZE;
1059 int error;
1060
1061 /*
1062 * Manually look for an Apple UFS label, and if a valid one
1063 * is found, then treat it like an Apple UFS filesystem anyway.
1064 */
1065 error = bread(devvp, blkno, APPLEUFS_LABEL_SIZE, 0, &bp);
1066 if (error) {
1067 DPRINTF("bread@0x%jx returned %d", (intmax_t)blkno, error);
1068 return 0;
1069 }
1070 applefs = (struct appleufslabel *)bp->b_data;
1071 error = ffs_appleufs_validate(fs->fs_fsmnt, applefs, NULL);
1072 if (error == 0)
1073 ret = 1;
1074 brelse(bp, 0);
1075 }
1076 #endif
1077
1078 return ret;
1079 }
1080
1081 /*
1082 * Common code for mount and mountroot
1083 */
1084 int
1085 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
1086 {
1087 struct ufsmount *ump = NULL;
1088 struct buf *bp = NULL;
1089 struct fs *fs = NULL;
1090 dev_t dev;
1091 void *space;
1092 daddr_t sblockloc = 0;
1093 int blks, fstype = 0;
1094 int error, i, bsize, ronly, bset = 0;
1095 #ifdef FFS_EI
1096 int needswap = 0; /* keep gcc happy */
1097 #endif
1098 int32_t *lp;
1099 kauth_cred_t cred;
1100 u_int32_t allocsbsize, fs_sbsize = 0;
1101
1102 dev = devvp->v_rdev;
1103 cred = l ? l->l_cred : NOCRED;
1104
1105 /* Flush out any old buffers remaining from a previous use. */
1106 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1107 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
1108 VOP_UNLOCK(devvp);
1109 if (error) {
1110 DPRINTF("vinvalbuf returned %d", error);
1111 return error;
1112 }
1113
1114 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
1115
1116 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
1117 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
1118 error = ffs_snapshot_init(ump);
1119 if (error) {
1120 DPRINTF("ffs_snapshot_init returned %d", error);
1121 goto out;
1122 }
1123 ump->um_ops = &ffs_ufsops;
1124
1125 #ifdef WAPBL
1126 sbagain:
1127 #endif
1128 /*
1129 * Try reading the superblock in each of its possible locations.
1130 */
1131 for (i = 0; ; i++) {
1132 daddr_t fs_sblockloc;
1133
1134 if (bp != NULL) {
1135 brelse(bp, BC_NOCACHE);
1136 bp = NULL;
1137 }
1138 if (sblock_try[i] == -1) {
1139 DPRINTF("no superblock found");
1140 error = EINVAL;
1141 fs = NULL;
1142 goto out;
1143 }
1144
1145 error = bread(devvp, sblock_try[i] / DEV_BSIZE, SBLOCKSIZE,
1146 0, &bp);
1147 if (error) {
1148 DPRINTF("bread@0x%x returned %d",
1149 sblock_try[i] / DEV_BSIZE, error);
1150 fs = NULL;
1151 goto out;
1152 }
1153 fs = (struct fs *)bp->b_data;
1154
1155 sblockloc = sblock_try[i];
1156 DPRINTF("fs_magic 0x%x", fs->fs_magic);
1157
1158 /*
1159 * Swap: here, we swap fs->fs_sbsize in order to get the correct
1160 * size to read the superblock. Once read, we swap the whole
1161 * superblock structure.
1162 */
1163 if (fs->fs_magic == FS_UFS1_MAGIC) {
1164 fs_sbsize = fs->fs_sbsize;
1165 fstype = UFS1;
1166 #ifdef FFS_EI
1167 needswap = 0;
1168 } else if (fs->fs_magic == FS_UFS1_MAGIC_SWAPPED) {
1169 fs_sbsize = bswap32(fs->fs_sbsize);
1170 fstype = UFS1;
1171 needswap = 1;
1172 #endif
1173 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
1174 fs_sbsize = fs->fs_sbsize;
1175 fstype = UFS2;
1176 #ifdef FFS_EI
1177 needswap = 0;
1178 } else if (fs->fs_magic == FS_UFS2_MAGIC_SWAPPED) {
1179 fs_sbsize = bswap32(fs->fs_sbsize);
1180 fstype = UFS2;
1181 needswap = 1;
1182 #endif
1183 } else
1184 continue;
1185
1186 /* fs->fs_sblockloc isn't defined for old filesystems */
1187 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
1188 if (sblockloc == SBLOCK_UFS2)
1189 /*
1190 * This is likely to be the first alternate
1191 * in a filesystem with 64k blocks.
1192 * Don't use it.
1193 */
1194 continue;
1195 fs_sblockloc = sblockloc;
1196 } else {
1197 fs_sblockloc = fs->fs_sblockloc;
1198 #ifdef FFS_EI
1199 if (needswap)
1200 fs_sblockloc = bswap64(fs_sblockloc);
1201 #endif
1202 }
1203
1204 /* Check we haven't found an alternate superblock */
1205 if (fs_sblockloc != sblockloc)
1206 continue;
1207
1208 /* Check the superblock size */
1209 if (fs_sbsize > SBLOCKSIZE || fs_sbsize < sizeof(struct fs))
1210 continue;
1211 fs = kmem_alloc((u_long)fs_sbsize, KM_SLEEP);
1212 memcpy(fs, bp->b_data, fs_sbsize);
1213
1214 /* Swap the whole superblock structure, if necessary. */
1215 #ifdef FFS_EI
1216 if (needswap) {
1217 ffs_sb_swap((struct fs*)bp->b_data, fs);
1218 fs->fs_flags |= FS_SWAPPED;
1219 } else
1220 #endif
1221 fs->fs_flags &= ~FS_SWAPPED;
1222
1223 /*
1224 * Now that everything is swapped, the superblock is ready to
1225 * be sanitized.
1226 */
1227 if (!ffs_superblock_validate(fs)) {
1228 kmem_free(fs, fs_sbsize);
1229 continue;
1230 }
1231
1232 /* Ok seems to be a good superblock */
1233 break;
1234 }
1235
1236 ump->um_fs = fs;
1237
1238 #ifdef WAPBL
1239 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1240 error = ffs_wapbl_replay_start(mp, fs, devvp);
1241 if (error && (mp->mnt_flag & MNT_FORCE) == 0) {
1242 DPRINTF("ffs_wapbl_replay_start returned %d", error);
1243 goto out;
1244 }
1245 if (!error) {
1246 if (!ronly) {
1247 /* XXX fsmnt may be stale. */
1248 printf("%s: replaying log to disk\n",
1249 fs->fs_fsmnt);
1250 error = wapbl_replay_write(mp->mnt_wapbl_replay,
1251 devvp);
1252 if (error) {
1253 DPRINTF("wapbl_replay_write returned %d",
1254 error);
1255 goto out;
1256 }
1257 wapbl_replay_stop(mp->mnt_wapbl_replay);
1258 fs->fs_clean = FS_WASCLEAN;
1259 } else {
1260 /* XXX fsmnt may be stale */
1261 printf("%s: replaying log to memory\n",
1262 fs->fs_fsmnt);
1263 }
1264
1265 /* Force a re-read of the superblock */
1266 brelse(bp, BC_INVAL);
1267 bp = NULL;
1268 kmem_free(fs, fs_sbsize);
1269 fs = NULL;
1270 goto sbagain;
1271 }
1272 }
1273 #else /* !WAPBL */
1274 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1275 error = EPERM;
1276 DPRINTF("no force %d", error);
1277 goto out;
1278 }
1279 #endif /* !WAPBL */
1280
1281 ffs_oldfscompat_read(fs, ump, sblockloc);
1282 ump->um_maxfilesize = fs->fs_maxfilesize;
1283
1284 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1285 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1286 mp->mnt_stat.f_mntonname, fs->fs_flags,
1287 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1288 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1289 error = EINVAL;
1290 DPRINTF("no force %d", error);
1291 goto out;
1292 }
1293 }
1294
1295 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1296 fs->fs_pendingblocks = 0;
1297 fs->fs_pendinginodes = 0;
1298 }
1299
1300 ump->um_fstype = fstype;
1301 if (fs->fs_sbsize < SBLOCKSIZE)
1302 brelse(bp, BC_INVAL);
1303 else
1304 brelse(bp, 0);
1305 bp = NULL;
1306
1307 if (ffs_is_appleufs(devvp, fs)) {
1308 #ifdef APPLE_UFS
1309 ump->um_flags |= UFS_ISAPPLEUFS;
1310 #else
1311 DPRINTF("AppleUFS not supported");
1312 error = EINVAL;
1313 goto out;
1314 #endif
1315 }
1316
1317 #if 0
1318 /*
1319 * XXX This code changes the behaviour of mounting dirty filesystems, to
1320 * XXX require "mount -f ..." to mount them. This doesn't match what
1321 * XXX mount(8) describes and is disabled for now.
1322 */
1323 /*
1324 * If the file system is not clean, don't allow it to be mounted
1325 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1326 * for the root file system.)
1327 */
1328 if (fs->fs_flags & FS_DOWAPBL) {
1329 /*
1330 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1331 * bit is set, although there's a window in unmount where it
1332 * could be FS_ISCLEAN
1333 */
1334 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1335 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1336 error = EPERM;
1337 goto out;
1338 }
1339 } else
1340 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1341 (mp->mnt_flag & MNT_FORCE) == 0) {
1342 error = EPERM;
1343 goto out;
1344 }
1345 #endif
1346
1347 /*
1348 * Verify that we can access the last block in the fs
1349 * if we're mounting read/write.
1350 */
1351 if (!ronly) {
1352 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_size - 1),
1353 fs->fs_fsize, 0, &bp);
1354 if (error) {
1355 DPRINTF("bread@0x%jx returned %d",
1356 (intmax_t)FFS_FSBTODB(fs, fs->fs_size - 1),
1357 error);
1358 bset = BC_INVAL;
1359 goto out;
1360 }
1361 if (bp->b_bcount != fs->fs_fsize) {
1362 DPRINTF("bcount %x != fsize %x", bp->b_bcount,
1363 fs->fs_fsize);
1364 error = EINVAL;
1365 bset = BC_INVAL;
1366 goto out;
1367 }
1368 brelse(bp, BC_INVAL);
1369 bp = NULL;
1370 }
1371
1372 fs->fs_ronly = ronly;
1373 /* Don't bump fs_clean if we're replaying journal */
1374 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN))) {
1375 if (ronly == 0) {
1376 fs->fs_clean <<= 1;
1377 fs->fs_fmod = 1;
1378 }
1379 }
1380
1381 bsize = fs->fs_cssize;
1382 blks = howmany(bsize, fs->fs_fsize);
1383 if (fs->fs_contigsumsize > 0)
1384 bsize += fs->fs_ncg * sizeof(int32_t);
1385 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1386 allocsbsize = bsize;
1387 space = kmem_alloc((u_long)allocsbsize, KM_SLEEP);
1388 fs->fs_csp = space;
1389
1390 for (i = 0; i < blks; i += fs->fs_frag) {
1391 bsize = fs->fs_bsize;
1392 if (i + fs->fs_frag > blks)
1393 bsize = (blks - i) * fs->fs_fsize;
1394 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i), bsize,
1395 0, &bp);
1396 if (error) {
1397 DPRINTF("bread@0x%jx %d",
1398 (intmax_t)FFS_FSBTODB(fs, fs->fs_csaddr + i),
1399 error);
1400 goto out1;
1401 }
1402 #ifdef FFS_EI
1403 if (needswap)
1404 ffs_csum_swap((struct csum *)bp->b_data,
1405 (struct csum *)space, bsize);
1406 else
1407 #endif
1408 memcpy(space, bp->b_data, (u_int)bsize);
1409
1410 space = (char *)space + bsize;
1411 brelse(bp, 0);
1412 bp = NULL;
1413 }
1414 if (fs->fs_contigsumsize > 0) {
1415 fs->fs_maxcluster = lp = space;
1416 for (i = 0; i < fs->fs_ncg; i++)
1417 *lp++ = fs->fs_contigsumsize;
1418 space = lp;
1419 }
1420 bsize = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1421 fs->fs_contigdirs = space;
1422 space = (char *)space + bsize;
1423 memset(fs->fs_contigdirs, 0, bsize);
1424
1425 /* Compatibility for old filesystems - XXX */
1426 if (fs->fs_avgfilesize <= 0)
1427 fs->fs_avgfilesize = AVFILESIZ;
1428 if (fs->fs_avgfpdir <= 0)
1429 fs->fs_avgfpdir = AFPDIR;
1430 fs->fs_active = NULL;
1431
1432 mp->mnt_data = ump;
1433 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1434 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1435 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1436 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1437 if (UFS_MPISAPPLEUFS(ump)) {
1438 /* NeXT used to keep short symlinks in the inode even
1439 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1440 * is probably -1, but we still need to be able to identify
1441 * short symlinks.
1442 */
1443 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1444 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1445 mp->mnt_iflag |= IMNT_DTYPE;
1446 } else {
1447 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1448 ump->um_dirblksiz = UFS_DIRBLKSIZ;
1449 if (ump->um_maxsymlinklen > 0)
1450 mp->mnt_iflag |= IMNT_DTYPE;
1451 else
1452 mp->mnt_iflag &= ~IMNT_DTYPE;
1453 }
1454 mp->mnt_fs_bshift = fs->fs_bshift;
1455 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1456 mp->mnt_flag |= MNT_LOCAL;
1457 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO;
1458 #ifdef FFS_EI
1459 if (needswap)
1460 ump->um_flags |= UFS_NEEDSWAP;
1461 #endif
1462 ump->um_mountp = mp;
1463 ump->um_dev = dev;
1464 ump->um_devvp = devvp;
1465 ump->um_nindir = fs->fs_nindir;
1466 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1467 ump->um_bptrtodb = fs->fs_fshift - DEV_BSHIFT;
1468 ump->um_seqinc = fs->fs_frag;
1469 for (i = 0; i < MAXQUOTAS; i++)
1470 ump->um_quotas[i] = NULLVP;
1471 spec_node_setmountedfs(devvp, mp);
1472 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1473 ffs_snapshot_mount(mp);
1474 #ifdef WAPBL
1475 if (!ronly) {
1476 KDASSERT(fs->fs_ronly == 0);
1477 /*
1478 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1479 * needs to create a new log file in-filesystem.
1480 */
1481 error = ffs_statvfs(mp, &mp->mnt_stat);
1482 if (error) {
1483 DPRINTF("ffs_statvfs returned %d", error);
1484 goto out1;
1485 }
1486
1487 error = ffs_wapbl_start(mp);
1488 if (error) {
1489 DPRINTF("ffs_wapbl_start returned %d", error);
1490 goto out1;
1491 }
1492 }
1493 #endif /* WAPBL */
1494 if (ronly == 0) {
1495 #ifdef QUOTA2
1496 error = ffs_quota2_mount(mp);
1497 if (error) {
1498 DPRINTF("ffs_quota2_mount returned %d", error);
1499 goto out1;
1500 }
1501 #else
1502 if (fs->fs_flags & FS_DOQUOTA2) {
1503 ump->um_flags |= UFS_QUOTA2;
1504 uprintf("%s: options QUOTA2 not enabled%s\n",
1505 mp->mnt_stat.f_mntonname,
1506 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1507 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1508 error = EINVAL;
1509 DPRINTF("quota disabled %d", error);
1510 goto out1;
1511 }
1512 }
1513 #endif
1514 }
1515
1516 if (mp->mnt_flag & MNT_DISCARD)
1517 ump->um_discarddata = ffs_discard_init(devvp, fs);
1518
1519 return (0);
1520 out1:
1521 kmem_free(fs->fs_csp, allocsbsize);
1522 out:
1523 #ifdef WAPBL
1524 if (mp->mnt_wapbl_replay) {
1525 wapbl_replay_stop(mp->mnt_wapbl_replay);
1526 wapbl_replay_free(mp->mnt_wapbl_replay);
1527 mp->mnt_wapbl_replay = 0;
1528 }
1529 #endif
1530
1531 if (fs)
1532 kmem_free(fs, fs->fs_sbsize);
1533 spec_node_setmountedfs(devvp, NULL);
1534 if (bp)
1535 brelse(bp, bset);
1536 if (ump) {
1537 if (ump->um_oldfscompat)
1538 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1539 mutex_destroy(&ump->um_lock);
1540 kmem_free(ump, sizeof(*ump));
1541 mp->mnt_data = NULL;
1542 }
1543 return (error);
1544 }
1545
1546 /*
1547 * Sanity checks for loading old filesystem superblocks.
1548 * See ffs_oldfscompat_write below for unwound actions.
1549 *
1550 * XXX - Parts get retired eventually.
1551 * Unfortunately new bits get added.
1552 */
1553 static void
1554 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1555 {
1556 off_t maxfilesize;
1557 int32_t *extrasave;
1558
1559 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1560 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1561 return;
1562
1563 if (!ump->um_oldfscompat)
1564 ump->um_oldfscompat = kmem_alloc(512 + 3*sizeof(int32_t),
1565 KM_SLEEP);
1566
1567 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1568 extrasave = ump->um_oldfscompat;
1569 extrasave += 512/sizeof(int32_t);
1570 extrasave[0] = fs->fs_old_npsect;
1571 extrasave[1] = fs->fs_old_interleave;
1572 extrasave[2] = fs->fs_old_trackskew;
1573
1574 /* These fields will be overwritten by their
1575 * original values in fs_oldfscompat_write, so it is harmless
1576 * to modify them here.
1577 */
1578 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1579 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1580 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1581 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1582
1583 fs->fs_maxbsize = fs->fs_bsize;
1584 fs->fs_time = fs->fs_old_time;
1585 fs->fs_size = fs->fs_old_size;
1586 fs->fs_dsize = fs->fs_old_dsize;
1587 fs->fs_csaddr = fs->fs_old_csaddr;
1588 fs->fs_sblockloc = sblockloc;
1589
1590 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1591
1592 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1593 fs->fs_old_nrpos = 8;
1594 fs->fs_old_npsect = fs->fs_old_nsect;
1595 fs->fs_old_interleave = 1;
1596 fs->fs_old_trackskew = 0;
1597 }
1598
1599 if (fs->fs_magic == FS_UFS1_MAGIC &&
1600 fs->fs_old_inodefmt < FS_44INODEFMT) {
1601 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1602 fs->fs_qbmask = ~fs->fs_bmask;
1603 fs->fs_qfmask = ~fs->fs_fmask;
1604 }
1605
1606 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1607 if (fs->fs_maxfilesize > maxfilesize)
1608 fs->fs_maxfilesize = maxfilesize;
1609
1610 /* Compatibility for old filesystems */
1611 if (fs->fs_avgfilesize <= 0)
1612 fs->fs_avgfilesize = AVFILESIZ;
1613 if (fs->fs_avgfpdir <= 0)
1614 fs->fs_avgfpdir = AFPDIR;
1615
1616 #if 0
1617 if (bigcgs) {
1618 fs->fs_save_cgsize = fs->fs_cgsize;
1619 fs->fs_cgsize = fs->fs_bsize;
1620 }
1621 #endif
1622 }
1623
1624 /*
1625 * Unwinding superblock updates for old filesystems.
1626 * See ffs_oldfscompat_read above for details.
1627 *
1628 * XXX - Parts get retired eventually.
1629 * Unfortunately new bits get added.
1630 */
1631 static void
1632 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1633 {
1634 int32_t *extrasave;
1635
1636 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1637 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1638 return;
1639
1640 fs->fs_old_time = fs->fs_time;
1641 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1642 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1643 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1644 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1645 fs->fs_old_flags = fs->fs_flags;
1646
1647 #if 0
1648 if (bigcgs) {
1649 fs->fs_cgsize = fs->fs_save_cgsize;
1650 }
1651 #endif
1652
1653 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1654 extrasave = ump->um_oldfscompat;
1655 extrasave += 512/sizeof(int32_t);
1656 fs->fs_old_npsect = extrasave[0];
1657 fs->fs_old_interleave = extrasave[1];
1658 fs->fs_old_trackskew = extrasave[2];
1659
1660 }
1661
1662 /*
1663 * unmount vfs operation
1664 */
1665 int
1666 ffs_unmount(struct mount *mp, int mntflags)
1667 {
1668 struct lwp *l = curlwp;
1669 struct ufsmount *ump = VFSTOUFS(mp);
1670 struct fs *fs = ump->um_fs;
1671 int error, flags;
1672 u_int32_t bsize;
1673 #ifdef WAPBL
1674 extern int doforce;
1675 #endif
1676
1677 if (ump->um_discarddata) {
1678 ffs_discard_finish(ump->um_discarddata, mntflags);
1679 ump->um_discarddata = NULL;
1680 }
1681
1682 flags = 0;
1683 if (mntflags & MNT_FORCE)
1684 flags |= FORCECLOSE;
1685 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1686 return (error);
1687 error = UFS_WAPBL_BEGIN(mp);
1688 if (error == 0)
1689 if (fs->fs_ronly == 0 &&
1690 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1691 fs->fs_clean & FS_WASCLEAN) {
1692 fs->fs_clean = FS_ISCLEAN;
1693 fs->fs_fmod = 0;
1694 (void) ffs_sbupdate(ump, MNT_WAIT);
1695 }
1696 if (error == 0)
1697 UFS_WAPBL_END(mp);
1698 #ifdef WAPBL
1699 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1700 if (mp->mnt_wapbl_replay) {
1701 KDASSERT(fs->fs_ronly);
1702 wapbl_replay_stop(mp->mnt_wapbl_replay);
1703 wapbl_replay_free(mp->mnt_wapbl_replay);
1704 mp->mnt_wapbl_replay = 0;
1705 }
1706 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1707 if (error) {
1708 return error;
1709 }
1710 #endif /* WAPBL */
1711
1712 if (ump->um_devvp->v_type != VBAD)
1713 spec_node_setmountedfs(ump->um_devvp, NULL);
1714 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1715 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1716 NOCRED);
1717 vput(ump->um_devvp);
1718
1719 bsize = fs->fs_cssize;
1720 if (fs->fs_contigsumsize > 0)
1721 bsize += fs->fs_ncg * sizeof(int32_t);
1722 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1723 kmem_free(fs->fs_csp, bsize);
1724
1725 kmem_free(fs, fs->fs_sbsize);
1726 if (ump->um_oldfscompat != NULL)
1727 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1728 mutex_destroy(&ump->um_lock);
1729 ffs_snapshot_fini(ump);
1730 kmem_free(ump, sizeof(*ump));
1731 mp->mnt_data = NULL;
1732 mp->mnt_flag &= ~MNT_LOCAL;
1733 return (0);
1734 }
1735
1736 /*
1737 * Flush out all the files in a filesystem.
1738 */
1739 int
1740 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1741 {
1742 extern int doforce;
1743 struct ufsmount *ump;
1744 int error;
1745
1746 if (!doforce)
1747 flags &= ~FORCECLOSE;
1748 ump = VFSTOUFS(mp);
1749 #ifdef QUOTA
1750 if ((error = quota1_umount(mp, flags)) != 0)
1751 return (error);
1752 #endif
1753 #ifdef QUOTA2
1754 if ((error = quota2_umount(mp, flags)) != 0)
1755 return (error);
1756 #endif
1757 #ifdef UFS_EXTATTR
1758 if (ump->um_fstype == UFS1) {
1759 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)
1760 ufs_extattr_stop(mp, l);
1761 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_INITIALIZED)
1762 ufs_extattr_uepm_destroy(&ump->um_extattr);
1763 mp->mnt_flag &= ~MNT_EXTATTR;
1764 }
1765 #endif
1766 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1767 return (error);
1768 ffs_snapshot_unmount(mp);
1769 /*
1770 * Flush all the files.
1771 */
1772 error = vflush(mp, NULLVP, flags);
1773 if (error)
1774 return (error);
1775 /*
1776 * Flush filesystem metadata.
1777 */
1778 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1779 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1780 VOP_UNLOCK(ump->um_devvp);
1781 if (flags & FORCECLOSE) /* XXXDBJ */
1782 error = 0;
1783
1784 #ifdef WAPBL
1785 if (error)
1786 return error;
1787 if (mp->mnt_wapbl) {
1788 error = wapbl_flush(mp->mnt_wapbl, 1);
1789 if (flags & FORCECLOSE)
1790 error = 0;
1791 }
1792 #endif
1793
1794 return (error);
1795 }
1796
1797 /*
1798 * Get file system statistics.
1799 */
1800 int
1801 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1802 {
1803 struct ufsmount *ump;
1804 struct fs *fs;
1805
1806 ump = VFSTOUFS(mp);
1807 fs = ump->um_fs;
1808 mutex_enter(&ump->um_lock);
1809 sbp->f_bsize = fs->fs_bsize;
1810 sbp->f_frsize = fs->fs_fsize;
1811 sbp->f_iosize = fs->fs_bsize;
1812 sbp->f_blocks = fs->fs_dsize;
1813 sbp->f_bfree = ffs_blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1814 fs->fs_cstotal.cs_nffree + FFS_DBTOFSB(fs, fs->fs_pendingblocks);
1815 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1816 fs->fs_minfree) / (u_int64_t) 100;
1817 if (sbp->f_bfree > sbp->f_bresvd)
1818 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1819 else
1820 sbp->f_bavail = 0;
1821 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1822 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1823 sbp->f_favail = sbp->f_ffree;
1824 sbp->f_fresvd = 0;
1825 mutex_exit(&ump->um_lock);
1826 copy_statvfs_info(sbp, mp);
1827
1828 return (0);
1829 }
1830
1831 struct ffs_sync_ctx {
1832 int waitfor;
1833 };
1834
1835 static bool
1836 ffs_sync_selector(void *cl, struct vnode *vp)
1837 {
1838 struct ffs_sync_ctx *c = cl;
1839 struct inode *ip;
1840
1841 KASSERT(mutex_owned(vp->v_interlock));
1842
1843 ip = VTOI(vp);
1844 /*
1845 * Skip the vnode/inode if inaccessible.
1846 */
1847 if (ip == NULL || vp->v_type == VNON)
1848 return false;
1849
1850 /*
1851 * We deliberately update inode times here. This will
1852 * prevent a massive queue of updates accumulating, only
1853 * to be handled by a call to unmount.
1854 *
1855 * XXX It would be better to have the syncer trickle these
1856 * out. Adjustment needed to allow registering vnodes for
1857 * sync when the vnode is clean, but the inode dirty. Or
1858 * have ufs itself trickle out inode updates.
1859 *
1860 * If doing a lazy sync, we don't care about metadata or
1861 * data updates, because they are handled by each vnode's
1862 * synclist entry. In this case we are only interested in
1863 * writing back modified inodes.
1864 */
1865 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE |
1866 IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1867 (c->waitfor == MNT_LAZY || (LIST_EMPTY(&vp->v_dirtyblkhd) &&
1868 (vp->v_iflag & VI_ONWORKLST) == 0)))
1869 return false;
1870
1871 return true;
1872 }
1873
1874 /*
1875 * Go through the disk queues to initiate sandbagged IO;
1876 * go through the inodes to write those that have been modified;
1877 * initiate the writing of the super block if it has been modified.
1878 *
1879 * Note: we are always called with the filesystem marked `MPBUSY'.
1880 */
1881 int
1882 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1883 {
1884 struct vnode *vp;
1885 struct ufsmount *ump = VFSTOUFS(mp);
1886 struct fs *fs;
1887 struct vnode_iterator *marker;
1888 int error, allerror = 0;
1889 struct ffs_sync_ctx ctx;
1890
1891 fs = ump->um_fs;
1892 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1893 panic("%s: rofs mod, fs=%s", __func__, fs->fs_fsmnt);
1894 }
1895
1896 /*
1897 * Write back each (modified) inode.
1898 */
1899 vfs_vnode_iterator_init(mp, &marker);
1900
1901 ctx.waitfor = waitfor;
1902 while ((vp = vfs_vnode_iterator_next(marker, ffs_sync_selector, &ctx)))
1903 {
1904 error = vn_lock(vp,
1905 LK_EXCLUSIVE | (waitfor == MNT_LAZY ? LK_NOWAIT : 0));
1906 if (error) {
1907 vrele(vp);
1908 continue;
1909 }
1910 if (waitfor == MNT_LAZY) {
1911 error = UFS_WAPBL_BEGIN(vp->v_mount);
1912 if (!error) {
1913 error = ffs_update(vp, NULL, NULL,
1914 UPDATE_CLOSE);
1915 UFS_WAPBL_END(vp->v_mount);
1916 }
1917 } else {
1918 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
1919 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
1920 }
1921 if (error)
1922 allerror = error;
1923 vput(vp);
1924 }
1925 vfs_vnode_iterator_destroy(marker);
1926
1927 /*
1928 * Force stale file system control information to be flushed.
1929 */
1930 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1931 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1932 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1933 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1934 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
1935 0, 0)) != 0)
1936 allerror = error;
1937 VOP_UNLOCK(ump->um_devvp);
1938 }
1939 #if defined(QUOTA) || defined(QUOTA2)
1940 qsync(mp);
1941 #endif
1942 /*
1943 * Write back modified superblock.
1944 */
1945 if (fs->fs_fmod != 0) {
1946 fs->fs_fmod = 0;
1947 fs->fs_time = time_second;
1948 error = UFS_WAPBL_BEGIN(mp);
1949 if (error)
1950 allerror = error;
1951 else {
1952 if ((error = ffs_cgupdate(ump, waitfor)))
1953 allerror = error;
1954 UFS_WAPBL_END(mp);
1955 }
1956 }
1957
1958 #ifdef WAPBL
1959 if (mp->mnt_wapbl) {
1960 error = wapbl_flush(mp->mnt_wapbl, (waitfor == MNT_WAIT));
1961 if (error)
1962 allerror = error;
1963 }
1964 #endif
1965
1966 return (allerror);
1967 }
1968
1969 /*
1970 * Load inode from disk and initialize vnode.
1971 */
1972 static int
1973 ffs_init_vnode(struct ufsmount *ump, struct vnode *vp, ino_t ino)
1974 {
1975 struct fs *fs;
1976 struct inode *ip;
1977 struct buf *bp;
1978 int error;
1979
1980 fs = ump->um_fs;
1981
1982 /* Read in the disk contents for the inode. */
1983 error = bread(ump->um_devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ino)),
1984 (int)fs->fs_bsize, 0, &bp);
1985 if (error)
1986 return error;
1987
1988 /* Allocate and initialize inode. */
1989 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1990 memset(ip, 0, sizeof(struct inode));
1991 ip->i_ump = ump;
1992 ip->i_fs = fs;
1993 ip->i_dev = ump->um_dev;
1994 ip->i_number = ino;
1995 if (ump->um_fstype == UFS1)
1996 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1997 PR_WAITOK);
1998 else
1999 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
2000 PR_WAITOK);
2001 ffs_load_inode(bp, ip, fs, ino);
2002 brelse(bp, 0);
2003 ip->i_vnode = vp;
2004 #if defined(QUOTA) || defined(QUOTA2)
2005 ufsquota_init(ip);
2006 #endif
2007
2008 /* Initialise vnode with this inode. */
2009 vp->v_tag = VT_UFS;
2010 vp->v_op = ffs_vnodeop_p;
2011 vp->v_vflag |= VV_LOCKSWORK;
2012 vp->v_data = ip;
2013
2014 /* Initialize genfs node. */
2015 genfs_node_init(vp, &ffs_genfsops);
2016
2017 return 0;
2018 }
2019
2020 /*
2021 * Undo ffs_init_vnode().
2022 */
2023 static void
2024 ffs_deinit_vnode(struct ufsmount *ump, struct vnode *vp)
2025 {
2026 struct inode *ip = VTOI(vp);
2027
2028 genfs_node_destroy(vp);
2029 vp->v_data = NULL;
2030
2031 if (ump->um_fstype == UFS1)
2032 pool_cache_put(ffs_dinode1_cache, ip->i_din.ffs1_din);
2033 else
2034 pool_cache_put(ffs_dinode2_cache, ip->i_din.ffs2_din);
2035 pool_cache_put(ffs_inode_cache, ip);
2036 }
2037
2038 /*
2039 * Read an inode from disk and initialize this vnode / inode pair.
2040 * Caller assures no other thread will try to load this inode.
2041 */
2042 int
2043 ffs_loadvnode(struct mount *mp, struct vnode *vp,
2044 const void *key, size_t key_len, const void **new_key)
2045 {
2046 ino_t ino;
2047 struct fs *fs;
2048 struct inode *ip;
2049 struct ufsmount *ump;
2050 int error;
2051
2052 KASSERT(key_len == sizeof(ino));
2053 memcpy(&ino, key, key_len);
2054 ump = VFSTOUFS(mp);
2055 fs = ump->um_fs;
2056
2057 error = ffs_init_vnode(ump, vp, ino);
2058 if (error)
2059 return error;
2060
2061 ip = VTOI(vp);
2062 if (ip->i_mode == 0) {
2063 ffs_deinit_vnode(ump, vp);
2064
2065 return ENOENT;
2066 }
2067
2068 /* Initialize the vnode from the inode. */
2069 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
2070
2071 /* Finish inode initialization. */
2072 ip->i_devvp = ump->um_devvp;
2073 vref(ip->i_devvp);
2074
2075 /*
2076 * Ensure that uid and gid are correct. This is a temporary
2077 * fix until fsck has been changed to do the update.
2078 */
2079
2080 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */
2081 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
2082 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
2083 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
2084 } /* XXX */
2085 uvm_vnp_setsize(vp, ip->i_size);
2086 *new_key = &ip->i_number;
2087 return 0;
2088 }
2089
2090 /*
2091 * Create a new inode on disk and initialize this vnode / inode pair.
2092 */
2093 int
2094 ffs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
2095 struct vattr *vap, kauth_cred_t cred, void *extra,
2096 size_t *key_len, const void **new_key)
2097 {
2098 ino_t ino;
2099 struct fs *fs;
2100 struct inode *ip;
2101 struct timespec ts;
2102 struct ufsmount *ump;
2103 int error, mode;
2104
2105 KASSERT(dvp->v_mount == mp);
2106 KASSERT(vap->va_type != VNON);
2107
2108 *key_len = sizeof(ino);
2109 ump = VFSTOUFS(mp);
2110 fs = ump->um_fs;
2111 mode = MAKEIMODE(vap->va_type, vap->va_mode);
2112
2113 /* Allocate fresh inode. */
2114 error = ffs_valloc(dvp, mode, cred, &ino);
2115 if (error)
2116 return error;
2117
2118 /* Attach inode to vnode. */
2119 error = ffs_init_vnode(ump, vp, ino);
2120 if (error) {
2121 if (UFS_WAPBL_BEGIN(mp) == 0) {
2122 ffs_vfree(dvp, ino, mode);
2123 UFS_WAPBL_END(mp);
2124 }
2125 return error;
2126 }
2127
2128 ip = VTOI(vp);
2129 if (ip->i_mode) {
2130 panic("%s: dup alloc ino=%" PRId64 " on %s: mode %o/%o "
2131 "gen %x/%x size %" PRIx64 " blocks %" PRIx64,
2132 __func__, ino, fs->fs_fsmnt, DIP(ip, mode), ip->i_mode,
2133 DIP(ip, gen), ip->i_gen, DIP(ip, size), DIP(ip, blocks));
2134 }
2135 if (DIP(ip, size) || DIP(ip, blocks)) {
2136 printf("%s: ino=%" PRId64 " on %s: "
2137 "gen %x/%x has non zero blocks %" PRIx64 " or size %"
2138 PRIx64 "\n",
2139 __func__, ino, fs->fs_fsmnt, DIP(ip, gen), ip->i_gen,
2140 DIP(ip, blocks), DIP(ip, size));
2141 if ((ip)->i_ump->um_fstype == UFS1)
2142 panic("%s: dirty filesystem?", __func__);
2143 DIP_ASSIGN(ip, blocks, 0);
2144 DIP_ASSIGN(ip, size, 0);
2145 }
2146
2147 /* Set uid / gid. */
2148 if (cred == NOCRED || cred == FSCRED) {
2149 ip->i_gid = 0;
2150 ip->i_uid = 0;
2151 } else {
2152 ip->i_gid = VTOI(dvp)->i_gid;
2153 ip->i_uid = kauth_cred_geteuid(cred);
2154 }
2155 DIP_ASSIGN(ip, gid, ip->i_gid);
2156 DIP_ASSIGN(ip, uid, ip->i_uid);
2157
2158 #if defined(QUOTA) || defined(QUOTA2)
2159 error = UFS_WAPBL_BEGIN(mp);
2160 if (error) {
2161 ffs_deinit_vnode(ump, vp);
2162
2163 return error;
2164 }
2165 error = chkiq(ip, 1, cred, 0);
2166 if (error) {
2167 ffs_vfree(dvp, ino, mode);
2168 UFS_WAPBL_END(mp);
2169 ffs_deinit_vnode(ump, vp);
2170
2171 return error;
2172 }
2173 UFS_WAPBL_END(mp);
2174 #endif
2175
2176 /* Set type and finalize. */
2177 ip->i_flags = 0;
2178 DIP_ASSIGN(ip, flags, 0);
2179 ip->i_mode = mode;
2180 DIP_ASSIGN(ip, mode, mode);
2181 if (vap->va_rdev != VNOVAL) {
2182 /*
2183 * Want to be able to use this to make badblock
2184 * inodes, so don't truncate the dev number.
2185 */
2186 if (ump->um_fstype == UFS1)
2187 ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
2188 UFS_MPNEEDSWAP(ump));
2189 else
2190 ip->i_ffs2_rdev = ufs_rw64(vap->va_rdev,
2191 UFS_MPNEEDSWAP(ump));
2192 }
2193 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
2194 ip->i_devvp = ump->um_devvp;
2195 vref(ip->i_devvp);
2196
2197 /* Set up a new generation number for this inode. */
2198 ip->i_gen++;
2199 DIP_ASSIGN(ip, gen, ip->i_gen);
2200 if (fs->fs_magic == FS_UFS2_MAGIC) {
2201 vfs_timestamp(&ts);
2202 ip->i_ffs2_birthtime = ts.tv_sec;
2203 ip->i_ffs2_birthnsec = ts.tv_nsec;
2204 }
2205
2206 uvm_vnp_setsize(vp, ip->i_size);
2207 *new_key = &ip->i_number;
2208 return 0;
2209 }
2210
2211 /*
2212 * File handle to vnode
2213 *
2214 * Have to be really careful about stale file handles:
2215 * - check that the inode number is valid
2216 * - call ffs_vget() to get the locked inode
2217 * - check for an unallocated inode (i_mode == 0)
2218 * - check that the given client host has export rights and return
2219 * those rights via. exflagsp and credanonp
2220 */
2221 int
2222 ffs_fhtovp(struct mount *mp, struct fid *fhp, int lktype, struct vnode **vpp)
2223 {
2224 struct ufid ufh;
2225 int error;
2226
2227 if (fhp->fid_len != sizeof(struct ufid))
2228 return EINVAL;
2229
2230 memcpy(&ufh, fhp, sizeof(ufh));
2231 if ((error = ffs_checkrange(mp, ufh.ufid_ino)) != 0)
2232 return error;
2233
2234 return (ufs_fhtovp(mp, &ufh, lktype, vpp));
2235 }
2236
2237 /*
2238 * Vnode pointer to File handle
2239 */
2240 /* ARGSUSED */
2241 int
2242 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
2243 {
2244 struct inode *ip;
2245 struct ufid ufh;
2246
2247 if (*fh_size < sizeof(struct ufid)) {
2248 *fh_size = sizeof(struct ufid);
2249 return E2BIG;
2250 }
2251 ip = VTOI(vp);
2252 *fh_size = sizeof(struct ufid);
2253 memset(&ufh, 0, sizeof(ufh));
2254 ufh.ufid_len = sizeof(struct ufid);
2255 ufh.ufid_ino = ip->i_number;
2256 ufh.ufid_gen = ip->i_gen;
2257 memcpy(fhp, &ufh, sizeof(ufh));
2258 return (0);
2259 }
2260
2261 void
2262 ffs_init(void)
2263 {
2264 if (ffs_initcount++ > 0)
2265 return;
2266
2267 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
2268 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
2269 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
2270 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
2271 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
2272 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
2273 ufs_init();
2274 }
2275
2276 void
2277 ffs_reinit(void)
2278 {
2279 ufs_reinit();
2280 }
2281
2282 void
2283 ffs_done(void)
2284 {
2285 if (--ffs_initcount > 0)
2286 return;
2287
2288 ufs_done();
2289 pool_cache_destroy(ffs_dinode2_cache);
2290 pool_cache_destroy(ffs_dinode1_cache);
2291 pool_cache_destroy(ffs_inode_cache);
2292 }
2293
2294 /*
2295 * Write a superblock and associated information back to disk.
2296 */
2297 int
2298 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2299 {
2300 struct fs *fs = mp->um_fs;
2301 struct buf *bp;
2302 int error;
2303 u_int32_t saveflag;
2304
2305 error = ffs_getblk(mp->um_devvp,
2306 fs->fs_sblockloc / DEV_BSIZE, FFS_NOBLK,
2307 fs->fs_sbsize, false, &bp);
2308 if (error)
2309 return error;
2310 saveflag = fs->fs_flags & FS_INTERNAL;
2311 fs->fs_flags &= ~FS_INTERNAL;
2312
2313 memcpy(bp->b_data, fs, fs->fs_sbsize);
2314
2315 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2316 #ifdef FFS_EI
2317 if (mp->um_flags & UFS_NEEDSWAP)
2318 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2319 #endif
2320 fs->fs_flags |= saveflag;
2321
2322 if (waitfor == MNT_WAIT)
2323 error = bwrite(bp);
2324 else
2325 bawrite(bp);
2326 return (error);
2327 }
2328
2329 int
2330 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2331 {
2332 struct fs *fs = mp->um_fs;
2333 struct buf *bp;
2334 int blks;
2335 void *space;
2336 int i, size, error = 0, allerror = 0;
2337
2338 UFS_WAPBL_JLOCK_ASSERT(mp->um_mountp);
2339
2340 allerror = ffs_sbupdate(mp, waitfor);
2341 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2342 space = fs->fs_csp;
2343 for (i = 0; i < blks; i += fs->fs_frag) {
2344 size = fs->fs_bsize;
2345 if (i + fs->fs_frag > blks)
2346 size = (blks - i) * fs->fs_fsize;
2347 error = ffs_getblk(mp->um_devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i),
2348 FFS_NOBLK, size, false, &bp);
2349 if (error)
2350 break;
2351 #ifdef FFS_EI
2352 if (mp->um_flags & UFS_NEEDSWAP)
2353 ffs_csum_swap((struct csum*)space,
2354 (struct csum*)bp->b_data, size);
2355 else
2356 #endif
2357 memcpy(bp->b_data, space, (u_int)size);
2358 space = (char *)space + size;
2359 if (waitfor == MNT_WAIT)
2360 error = bwrite(bp);
2361 else
2362 bawrite(bp);
2363 }
2364 if (!allerror && error)
2365 allerror = error;
2366 return (allerror);
2367 }
2368
2369 int
2370 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2371 int attrnamespace, const char *attrname)
2372 {
2373 #ifdef UFS_EXTATTR
2374 /*
2375 * File-backed extended attributes are only supported on UFS1.
2376 * UFS2 has native extended attributes.
2377 */
2378 if (VFSTOUFS(mp)->um_fstype == UFS1)
2379 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2380 #endif
2381 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2382 }
2383
2384 /*
2385 * Synch vnode for a mounted file system.
2386 */
2387 static int
2388 ffs_vfs_fsync(vnode_t *vp, int flags)
2389 {
2390 int error, i, pflags;
2391 #ifdef WAPBL
2392 struct mount *mp;
2393 #endif
2394
2395 KASSERT(vp->v_type == VBLK);
2396 KASSERT(spec_node_getmountedfs(vp) != NULL);
2397
2398 /*
2399 * Flush all dirty data associated with the vnode.
2400 */
2401 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2402 if ((flags & FSYNC_WAIT) != 0)
2403 pflags |= PGO_SYNCIO;
2404 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
2405 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2406 if (error)
2407 return error;
2408
2409 #ifdef WAPBL
2410 mp = spec_node_getmountedfs(vp);
2411 if (mp && mp->mnt_wapbl) {
2412 /*
2413 * Don't bother writing out metadata if the syncer is
2414 * making the request. We will let the sync vnode
2415 * write it out in a single burst through a call to
2416 * VFS_SYNC().
2417 */
2418 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2419 return 0;
2420
2421 /*
2422 * Don't flush the log if the vnode being flushed
2423 * contains no dirty buffers that could be in the log.
2424 */
2425 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2426 error = wapbl_flush(mp->mnt_wapbl, 0);
2427 if (error)
2428 return error;
2429 }
2430
2431 if ((flags & FSYNC_WAIT) != 0) {
2432 mutex_enter(vp->v_interlock);
2433 while (vp->v_numoutput)
2434 cv_wait(&vp->v_cv, vp->v_interlock);
2435 mutex_exit(vp->v_interlock);
2436 }
2437
2438 return 0;
2439 }
2440 #endif /* WAPBL */
2441
2442 error = vflushbuf(vp, flags);
2443 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2444 i = 1;
2445 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2446 kauth_cred_get());
2447 }
2448
2449 return error;
2450 }
2451