ffs_vfsops.c revision 1.379 1 /* $NetBSD: ffs_vfsops.c,v 1.379 2022/12/21 18:58:25 chs Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.379 2022/12/21 18:58:25 chs Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_wapbl.h"
70 #endif
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/proc.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/fstrans.h>
79 #include <sys/socket.h>
80 #include <sys/mount.h>
81 #include <sys/buf.h>
82 #include <sys/device.h>
83 #include <sys/disk.h>
84 #include <sys/file.h>
85 #include <sys/disklabel.h>
86 #include <sys/ioctl.h>
87 #include <sys/errno.h>
88 #include <sys/kmem.h>
89 #include <sys/pool.h>
90 #include <sys/lock.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/wapbl.h>
95 #include <sys/module.h>
96
97 #include <miscfs/genfs/genfs.h>
98 #include <miscfs/specfs/specdev.h>
99
100 #include <ufs/ufs/quota.h>
101 #include <ufs/ufs/ufsmount.h>
102 #include <ufs/ufs/inode.h>
103 #include <ufs/ufs/dir.h>
104 #include <ufs/ufs/ufs_extern.h>
105 #include <ufs/ufs/ufs_bswap.h>
106 #include <ufs/ufs/ufs_wapbl.h>
107
108 #include <ufs/ffs/fs.h>
109 #include <ufs/ffs/ffs_extern.h>
110
111 #ifdef WAPBL
112 MODULE(MODULE_CLASS_VFS, ffs, "ufs,wapbl");
113 #else
114 MODULE(MODULE_CLASS_VFS, ffs, "ufs");
115 #endif
116
117 static int ffs_vfs_fsync(vnode_t *, int);
118 static int ffs_superblock_validate(struct fs *);
119 static int ffs_is_appleufs(struct vnode *, struct fs *);
120
121 static int ffs_init_vnode(struct ufsmount *, struct vnode *, ino_t);
122 static void ffs_deinit_vnode(struct ufsmount *, struct vnode *);
123
124 static kauth_listener_t ffs_snapshot_listener;
125
126 /* how many times ffs_init() was called */
127 int ffs_initcount = 0;
128
129 #ifdef DEBUG_FFS_MOUNT
130 #define DPRINTF(_fmt, args...) printf("%s: " _fmt "\n", __func__, ##args)
131 #else
132 #define DPRINTF(_fmt, args...) do {} while (/*CONSTCOND*/0)
133 #endif
134
135 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
136 extern const struct vnodeopv_desc ffs_specop_opv_desc;
137 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
138
139 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
140 &ffs_vnodeop_opv_desc,
141 &ffs_specop_opv_desc,
142 &ffs_fifoop_opv_desc,
143 NULL,
144 };
145
146 struct vfsops ffs_vfsops = {
147 .vfs_name = MOUNT_FFS,
148 .vfs_min_mount_data = sizeof (struct ufs_args),
149 .vfs_mount = ffs_mount,
150 .vfs_start = ufs_start,
151 .vfs_unmount = ffs_unmount,
152 .vfs_root = ufs_root,
153 .vfs_quotactl = ufs_quotactl,
154 .vfs_statvfs = ffs_statvfs,
155 .vfs_sync = ffs_sync,
156 .vfs_vget = ufs_vget,
157 .vfs_loadvnode = ffs_loadvnode,
158 .vfs_newvnode = ffs_newvnode,
159 .vfs_fhtovp = ffs_fhtovp,
160 .vfs_vptofh = ffs_vptofh,
161 .vfs_init = ffs_init,
162 .vfs_reinit = ffs_reinit,
163 .vfs_done = ffs_done,
164 .vfs_mountroot = ffs_mountroot,
165 .vfs_snapshot = ffs_snapshot,
166 .vfs_extattrctl = ffs_extattrctl,
167 .vfs_suspendctl = genfs_suspendctl,
168 .vfs_renamelock_enter = genfs_renamelock_enter,
169 .vfs_renamelock_exit = genfs_renamelock_exit,
170 .vfs_fsync = ffs_vfs_fsync,
171 .vfs_opv_descs = ffs_vnodeopv_descs
172 };
173
174 static const struct genfs_ops ffs_genfsops = {
175 .gop_size = ffs_gop_size,
176 .gop_alloc = ufs_gop_alloc,
177 .gop_write = genfs_gop_write,
178 .gop_markupdate = ufs_gop_markupdate,
179 .gop_putrange = genfs_gop_putrange,
180 };
181
182 static const struct ufs_ops ffs_ufsops = {
183 .uo_itimes = ffs_itimes,
184 .uo_update = ffs_update,
185 .uo_truncate = ffs_truncate,
186 .uo_balloc = ffs_balloc,
187 .uo_snapgone = ffs_snapgone,
188 .uo_bufrd = ffs_bufrd,
189 .uo_bufwr = ffs_bufwr,
190 };
191
192 static int
193 ffs_checkrange(struct mount *mp, ino_t ino)
194 {
195 struct fs *fs = VFSTOUFS(mp)->um_fs;
196
197 if (ino < UFS_ROOTINO || ino >= fs->fs_ncg * fs->fs_ipg) {
198 DPRINTF("out of range %u\n", ino);
199 return ESTALE;
200 }
201
202 /*
203 * Need to check if inode is initialized because ffsv2 does
204 * lazy initialization and we can get here from nfs_fhtovp
205 */
206 if (fs->fs_magic != FS_UFS2_MAGIC)
207 return 0;
208
209 struct buf *bp;
210 int cg = ino_to_cg(fs, ino);
211 struct ufsmount *ump = VFSTOUFS(mp);
212
213 int error = bread(ump->um_devvp, FFS_FSBTODB(fs, cgtod(fs, cg)),
214 (int)fs->fs_cgsize, B_MODIFY, &bp);
215 if (error) {
216 DPRINTF("error %d reading cg %d ino %u\n", error, cg, ino);
217 return error;
218 }
219
220 const int needswap = UFS_FSNEEDSWAP(fs);
221
222 struct cg *cgp = (struct cg *)bp->b_data;
223 if (!cg_chkmagic(cgp, needswap)) {
224 brelse(bp, 0);
225 DPRINTF("bad cylinder group magic cg %d ino %u\n", cg, ino);
226 return ESTALE;
227 }
228
229 int32_t initediblk = ufs_rw32(cgp->cg_initediblk, needswap);
230 brelse(bp, 0);
231
232 if (cg * fs->fs_ipg + initediblk < ino) {
233 DPRINTF("cg=%d fs->fs_ipg=%d initediblk=%d ino=%u\n",
234 cg, fs->fs_ipg, initediblk, ino);
235 return ESTALE;
236 }
237 return 0;
238 }
239
240 static int
241 ffs_snapshot_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
242 void *arg0, void *arg1, void *arg2, void *arg3)
243 {
244 vnode_t *vp = arg2;
245 int result = KAUTH_RESULT_DEFER;
246
247 if (action != KAUTH_SYSTEM_FS_SNAPSHOT)
248 return result;
249
250 if (VTOI(vp)->i_uid == kauth_cred_geteuid(cred))
251 result = KAUTH_RESULT_ALLOW;
252
253 return result;
254 }
255
256 SYSCTL_SETUP(ffs_sysctl_setup, "ffs sysctls")
257 {
258 #ifdef UFS_EXTATTR
259 extern int ufs_extattr_autocreate;
260 #endif
261 extern int ffs_log_changeopt;
262
263 sysctl_createv(clog, 0, NULL, NULL,
264 CTLFLAG_PERMANENT,
265 CTLTYPE_NODE, "ffs",
266 SYSCTL_DESCR("Berkeley Fast File System"),
267 NULL, 0, NULL, 0,
268 CTL_VFS, 1, CTL_EOL);
269 /*
270 * @@@ should we even bother with these first three?
271 */
272 sysctl_createv(clog, 0, NULL, NULL,
273 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
274 CTLTYPE_INT, "doclusterread", NULL,
275 sysctl_notavail, 0, NULL, 0,
276 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
277 sysctl_createv(clog, 0, NULL, NULL,
278 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
279 CTLTYPE_INT, "doclusterwrite", NULL,
280 sysctl_notavail, 0, NULL, 0,
281 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
282 sysctl_createv(clog, 0, NULL, NULL,
283 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
284 CTLTYPE_INT, "doreallocblks", NULL,
285 sysctl_notavail, 0, NULL, 0,
286 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
287 #if 0
288 sysctl_createv(clog, 0, NULL, NULL,
289 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
290 CTLTYPE_INT, "doasyncfree",
291 SYSCTL_DESCR("Release dirty blocks asynchronously"),
292 NULL, 0, &doasyncfree, 0,
293 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
294 #endif
295 sysctl_createv(clog, 0, NULL, NULL,
296 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
297 CTLTYPE_INT, "log_changeopt",
298 SYSCTL_DESCR("Log changes in optimization strategy"),
299 NULL, 0, &ffs_log_changeopt, 0,
300 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
301 #ifdef UFS_EXTATTR
302 sysctl_createv(clog, 0, NULL, NULL,
303 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
304 CTLTYPE_INT, "extattr_autocreate",
305 SYSCTL_DESCR("Size of attribute for "
306 "backing file autocreation"),
307 NULL, 0, &ufs_extattr_autocreate, 0,
308 CTL_VFS, 1, FFS_EXTATTR_AUTOCREATE, CTL_EOL);
309
310 #endif /* UFS_EXTATTR */
311 }
312
313 static int
314 ffs_modcmd(modcmd_t cmd, void *arg)
315 {
316 int error;
317
318 #if 0
319 extern int doasyncfree;
320 #endif
321
322 switch (cmd) {
323 case MODULE_CMD_INIT:
324 error = vfs_attach(&ffs_vfsops);
325 if (error != 0)
326 break;
327
328 ffs_snapshot_listener = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
329 ffs_snapshot_cb, NULL);
330 if (ffs_snapshot_listener == NULL)
331 printf("ffs_modcmd: can't listen on system scope.\n");
332
333 break;
334 case MODULE_CMD_FINI:
335 error = vfs_detach(&ffs_vfsops);
336 if (error != 0)
337 break;
338 if (ffs_snapshot_listener != NULL)
339 kauth_unlisten_scope(ffs_snapshot_listener);
340 break;
341 default:
342 error = ENOTTY;
343 break;
344 }
345
346 return (error);
347 }
348
349 pool_cache_t ffs_inode_cache;
350 pool_cache_t ffs_dinode1_cache;
351 pool_cache_t ffs_dinode2_cache;
352
353 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
354 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
355
356 /*
357 * Called by main() when ffs is going to be mounted as root.
358 */
359
360 int
361 ffs_mountroot(void)
362 {
363 struct fs *fs;
364 struct mount *mp;
365 struct lwp *l = curlwp; /* XXX */
366 struct ufsmount *ump;
367 int error;
368
369 if (device_class(root_device) != DV_DISK)
370 return (ENODEV);
371
372 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
373 vrele(rootvp);
374 return (error);
375 }
376
377 /*
378 * We always need to be able to mount the root file system.
379 */
380 mp->mnt_flag |= MNT_FORCE;
381 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
382 vfs_unbusy(mp);
383 vfs_rele(mp);
384 return (error);
385 }
386 mp->mnt_flag &= ~MNT_FORCE;
387 mountlist_append(mp);
388 ump = VFSTOUFS(mp);
389 fs = ump->um_fs;
390 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
391 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
392 (void)ffs_statvfs(mp, &mp->mnt_stat);
393 vfs_unbusy(mp);
394 setrootfstime((time_t)fs->fs_time);
395 return (0);
396 }
397
398 static int
399 ffs_acls(struct mount *mp, int fs_flags)
400 {
401 struct ufsmount *ump;
402
403 ump = VFSTOUFS(mp);
404 if (ump->um_fstype == UFS2 && (ump->um_flags & UFS_EA) == 0 &&
405 ((mp->mnt_flag & (MNT_POSIX1EACLS | MNT_NFS4ACLS)) != 0 ||
406 (fs_flags & (FS_POSIX1EACLS | FS_NFS4ACLS)) != 0)) {
407 printf("%s: ACLs requested but not supported by this fs\n",
408 mp->mnt_stat.f_mntonname);
409 return EINVAL;
410 }
411
412 if ((fs_flags & FS_POSIX1EACLS) != 0) {
413 #ifdef UFS_ACL
414 if (mp->mnt_flag & MNT_NFS4ACLS)
415 printf("WARNING: %s: POSIX.1e ACLs flag on fs conflicts "
416 "with \"nfsv4acls\" mount option; option ignored\n",
417 mp->mnt_stat.f_mntonname);
418 mp->mnt_flag &= ~MNT_NFS4ACLS;
419 mp->mnt_flag |= MNT_POSIX1EACLS;
420 #else
421 printf("WARNING: %s: POSIX.1e ACLs flag on fs but no "
422 "ACLs support\n", mp->mnt_stat.f_mntonname);
423 #endif
424 }
425 if ((fs_flags & FS_NFS4ACLS) != 0) {
426 #ifdef UFS_ACL
427 if (mp->mnt_flag & MNT_POSIX1EACLS)
428 printf("WARNING: %s: NFSv4 ACLs flag on fs conflicts "
429 "with \"posix1eacls\" mount option; option ignored\n",
430 mp->mnt_stat.f_mntonname);
431 mp->mnt_flag &= ~MNT_POSIX1EACLS;
432 mp->mnt_flag |= MNT_NFS4ACLS;
433
434 #else
435 printf("WARNING: %s: NFSv4 ACLs flag on fs but no "
436 "ACLs support\n", mp->mnt_stat.f_mntonname);
437 #endif
438 }
439 if ((mp->mnt_flag & (MNT_NFS4ACLS | MNT_POSIX1EACLS))
440 == (MNT_NFS4ACLS | MNT_POSIX1EACLS))
441 {
442 printf("%s: \"posix1eacls\" and \"nfsv4acls\" options "
443 "are mutually exclusive\n",
444 mp->mnt_stat.f_mntonname);
445 return EINVAL;
446 }
447
448 if (mp->mnt_flag & (MNT_NFS4ACLS | MNT_POSIX1EACLS))
449 mp->mnt_iflag &= ~(IMNT_SHRLOOKUP|IMNT_NCLOOKUP);
450 else
451 mp->mnt_iflag |= IMNT_SHRLOOKUP|IMNT_NCLOOKUP;
452 return 0;
453 }
454
455 /*
456 * VFS Operations.
457 *
458 * mount system call
459 */
460 int
461 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
462 {
463 struct lwp *l = curlwp;
464 struct vnode *devvp = NULL;
465 struct ufs_args *args = data;
466 struct ufsmount *ump = NULL;
467 struct fs *fs;
468 int error = 0, flags, update;
469 mode_t accessmode;
470
471 if (args == NULL) {
472 DPRINTF("NULL args");
473 return EINVAL;
474 }
475 if (*data_len < sizeof(*args)) {
476 DPRINTF("bad size args %zu != %zu", *data_len, sizeof(*args));
477 return EINVAL;
478 }
479
480 ump = VFSTOUFS(mp);
481 if ((mp->mnt_flag & (MNT_GETARGS|MNT_UPDATE)) && ump == NULL) {
482 DPRINTF("no ump");
483 return EIO;
484 }
485
486 if (mp->mnt_flag & MNT_GETARGS) {
487 args->fspec = NULL;
488 *data_len = sizeof *args;
489 return 0;
490 }
491
492 update = mp->mnt_flag & MNT_UPDATE;
493
494 /* Check arguments */
495 if (args->fspec == NULL) {
496 if (!update) {
497 /* New mounts must have a filename for the device */
498 DPRINTF("no filename for mount");
499 return EINVAL;
500 }
501 } else {
502 /*
503 * Look up the name and verify that it's sane.
504 */
505 error = namei_simple_user(args->fspec,
506 NSM_FOLLOW_NOEMULROOT, &devvp);
507 if (error != 0) {
508 DPRINTF("namei_simple_user returned %d", error);
509 return error;
510 }
511
512 /*
513 * Be sure this is a valid block device
514 */
515 if (devvp->v_type != VBLK) {
516 DPRINTF("non block device %d", devvp->v_type);
517 error = ENOTBLK;
518 goto fail;
519 }
520
521 if (bdevsw_lookup(devvp->v_rdev) == NULL) {
522 DPRINTF("can't find block device 0x%jx",
523 devvp->v_rdev);
524 error = ENXIO;
525 goto fail;
526 }
527
528 if (update) {
529 /*
530 * Be sure we're still naming the same device
531 * used for our initial mount
532 */
533 if (devvp != ump->um_devvp &&
534 devvp->v_rdev != ump->um_devvp->v_rdev) {
535 DPRINTF("wrong device 0x%jx != 0x%jx",
536 (uintmax_t)devvp->v_rdev,
537 (uintmax_t)ump->um_devvp->v_rdev);
538 error = EINVAL;
539 goto fail;
540 }
541 vrele(devvp);
542 devvp = NULL;
543 }
544 }
545
546 if (devvp == NULL) {
547 devvp = ump->um_devvp;
548 vref(devvp);
549 }
550
551 /*
552 * If mount by non-root, then verify that user has necessary
553 * permissions on the device.
554 *
555 * Permission to update a mount is checked higher, so here we presume
556 * updating the mount is okay (for example, as far as securelevel goes)
557 * which leaves us with the normal check.
558 */
559 accessmode = VREAD;
560 if (update ? (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
561 (mp->mnt_flag & MNT_RDONLY) == 0)
562 accessmode |= VWRITE;
563 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
564 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
565 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp, KAUTH_ARG(accessmode));
566 VOP_UNLOCK(devvp);
567 if (error) {
568 DPRINTF("kauth returned %d", error);
569 goto fail;
570 }
571
572 #ifdef WAPBL
573 /* WAPBL can only be enabled on a r/w mount. */
574 if (((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) ||
575 (mp->mnt_iflag & IMNT_WANTRDONLY)) {
576 mp->mnt_flag &= ~MNT_LOG;
577 }
578 #else /* !WAPBL */
579 mp->mnt_flag &= ~MNT_LOG;
580 #endif /* !WAPBL */
581
582 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
583 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
584 if (error)
585 goto fail;
586
587 if (!update) {
588 int xflags;
589
590 if (mp->mnt_flag & MNT_RDONLY)
591 xflags = FREAD;
592 else
593 xflags = FREAD | FWRITE;
594 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
595 error = VOP_OPEN(devvp, xflags, FSCRED);
596 VOP_UNLOCK(devvp);
597 if (error) {
598 DPRINTF("VOP_OPEN returned %d", error);
599 goto fail;
600 }
601 /* Need fstrans_start() for assertion in ufs_strategy(). */
602 if ((mp->mnt_flag & MNT_RDONLY) == 0)
603 fstrans_start(mp);
604 error = ffs_mountfs(devvp, mp, l);
605 if ((mp->mnt_flag & MNT_RDONLY) == 0)
606 fstrans_done(mp);
607 if (error) {
608 DPRINTF("ffs_mountfs returned %d", error);
609 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
610 (void)VOP_CLOSE(devvp, xflags, NOCRED);
611 VOP_UNLOCK(devvp);
612 goto fail;
613 }
614
615 ump = VFSTOUFS(mp);
616 fs = ump->um_fs;
617 } else {
618 /*
619 * Update the mount.
620 */
621
622 /*
623 * The initial mount got a reference on this
624 * device, so drop the one obtained via
625 * namei(), above.
626 */
627 vrele(devvp);
628
629 ump = VFSTOUFS(mp);
630 fs = ump->um_fs;
631 if (fs->fs_ronly == 0 && (mp->mnt_iflag & IMNT_WANTRDONLY)) {
632 /*
633 * Changing from r/w to r/o
634 */
635 flags = WRITECLOSE;
636 if (mp->mnt_flag & MNT_FORCE)
637 flags |= FORCECLOSE;
638 error = ffs_flushfiles(mp, flags, l);
639 if (error)
640 return error;
641
642 error = UFS_WAPBL_BEGIN(mp);
643 if (error) {
644 DPRINTF("wapbl %d", error);
645 return error;
646 }
647
648 if (ffs_cgupdate(ump, MNT_WAIT) == 0 &&
649 fs->fs_clean & FS_WASCLEAN) {
650 if (mp->mnt_flag & MNT_SOFTDEP)
651 fs->fs_flags &= ~FS_DOSOFTDEP;
652 fs->fs_clean = FS_ISCLEAN;
653 (void) ffs_sbupdate(ump, MNT_WAIT);
654 }
655
656 UFS_WAPBL_END(mp);
657 }
658
659 #ifdef WAPBL
660 if ((mp->mnt_flag & MNT_LOG) == 0) {
661 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
662 if (error) {
663 DPRINTF("ffs_wapbl_stop returned %d", error);
664 return error;
665 }
666 }
667 #endif /* WAPBL */
668
669 if (fs->fs_ronly == 0 && (mp->mnt_iflag & IMNT_WANTRDONLY)) {
670 /*
671 * Finish change from r/w to r/o
672 */
673 fs->fs_ronly = 1;
674 fs->fs_fmod = 0;
675 }
676
677 error = ffs_acls(mp, fs->fs_flags);
678 if (error)
679 return error;
680 if (mp->mnt_flag & MNT_RELOAD) {
681 error = ffs_reload(mp, l->l_cred, l);
682 if (error) {
683 DPRINTF("ffs_reload returned %d", error);
684 return error;
685 }
686 }
687
688 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
689 /*
690 * Changing from read-only to read/write
691 */
692 #ifndef QUOTA2
693 if (fs->fs_flags & FS_DOQUOTA2) {
694 ump->um_flags |= UFS_QUOTA2;
695 uprintf("%s: options QUOTA2 not enabled%s\n",
696 mp->mnt_stat.f_mntonname,
697 (mp->mnt_flag & MNT_FORCE) ? "" :
698 ", not mounting");
699 DPRINTF("ffs_quota2 %d", EINVAL);
700 return EINVAL;
701 }
702 #endif
703 fs->fs_ronly = 0;
704 fs->fs_clean =
705 fs->fs_clean == FS_ISCLEAN ? FS_WASCLEAN : 0;
706 fs->fs_fmod = 1;
707 #ifdef WAPBL
708 if (fs->fs_flags & FS_DOWAPBL) {
709 const char *nm = mp->mnt_stat.f_mntonname;
710 if (!mp->mnt_wapbl_replay) {
711 printf("%s: log corrupted;"
712 " replay cancelled\n", nm);
713 return EFTYPE;
714 }
715 printf("%s: replaying log to disk\n", nm);
716 error = wapbl_replay_write(mp->mnt_wapbl_replay,
717 devvp);
718 if (error) {
719 DPRINTF("%s: wapbl_replay_write %d",
720 nm, error);
721 return error;
722 }
723 wapbl_replay_stop(mp->mnt_wapbl_replay);
724 fs->fs_clean = FS_WASCLEAN;
725 }
726 #endif /* WAPBL */
727 if (fs->fs_snapinum[0] != 0)
728 ffs_snapshot_mount(mp);
729 }
730
731 #ifdef WAPBL
732 error = ffs_wapbl_start(mp);
733 if (error) {
734 DPRINTF("ffs_wapbl_start returned %d", error);
735 return error;
736 }
737 #endif /* WAPBL */
738
739 #ifdef QUOTA2
740 if (!fs->fs_ronly) {
741 error = ffs_quota2_mount(mp);
742 if (error) {
743 DPRINTF("ffs_quota2_mount returned %d", error);
744 return error;
745 }
746 }
747 #endif
748
749 if ((mp->mnt_flag & MNT_DISCARD) && !(ump->um_discarddata))
750 ump->um_discarddata = ffs_discard_init(devvp, fs);
751
752 if (args->fspec == NULL)
753 return 0;
754 }
755
756 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
757 sizeof(fs->fs_fsmnt));
758
759 fs->fs_flags &= ~FS_DOSOFTDEP;
760
761 if ((fs->fs_ronly && (fs->fs_clean & FS_ISCLEAN) == 0) ||
762 (!fs->fs_ronly && (fs->fs_clean & FS_WASCLEAN) == 0)) {
763 printf("%s: file system not clean (fs_clean=%#x); "
764 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
765 fs->fs_clean);
766 }
767
768 if (fs->fs_fmod != 0) {
769 int err;
770
771 KASSERT(!fs->fs_ronly);
772
773 if (fs->fs_clean & FS_WASCLEAN)
774 fs->fs_time = time_second;
775 fs->fs_fmod = 0;
776 err = UFS_WAPBL_BEGIN(mp);
777 if (err == 0) {
778 (void) ffs_cgupdate(ump, MNT_WAIT);
779 UFS_WAPBL_END(mp);
780 }
781 }
782 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
783 printf("%s: `-o softdep' is no longer supported, "
784 "consider `-o log'\n", mp->mnt_stat.f_mntfromname);
785 mp->mnt_flag &= ~MNT_SOFTDEP;
786 }
787
788 return (error);
789
790 fail:
791 vrele(devvp);
792 return (error);
793 }
794
795 /*
796 * Reload all incore data for a filesystem (used after running fsck on
797 * the root filesystem and finding things to fix). The filesystem must
798 * be mounted read-only.
799 *
800 * Things to do to update the mount:
801 * 1) invalidate all cached meta-data.
802 * 2) re-read superblock from disk.
803 * 3) re-read summary information from disk.
804 * 4) invalidate all inactive vnodes.
805 * 5) invalidate all cached file data.
806 * 6) re-read inode data for all active vnodes.
807 */
808 int
809 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
810 {
811 struct vnode *vp, *devvp;
812 struct inode *ip;
813 void *space;
814 struct buf *bp;
815 struct fs *fs, *newfs;
816 int i, bsize, blks, error;
817 int32_t *lp, fs_sbsize;
818 struct ufsmount *ump;
819 daddr_t sblockloc;
820 struct vnode_iterator *marker;
821
822 if ((mp->mnt_flag & MNT_RDONLY) == 0)
823 return (EINVAL);
824
825 ump = VFSTOUFS(mp);
826
827 /*
828 * Step 1: invalidate all cached meta-data.
829 */
830 devvp = ump->um_devvp;
831 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
832 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
833 VOP_UNLOCK(devvp);
834 if (error)
835 panic("%s: dirty1", __func__);
836
837 /*
838 * Step 2: re-read superblock from disk. XXX: We don't handle
839 * possibility that superblock moved. Which implies that we don't
840 * want its size to change either.
841 */
842 fs = ump->um_fs;
843 fs_sbsize = fs->fs_sbsize;
844 error = bread(devvp, fs->fs_sblockloc / DEV_BSIZE, fs_sbsize,
845 0, &bp);
846 if (error)
847 return (error);
848 newfs = kmem_alloc(fs_sbsize, KM_SLEEP);
849 memcpy(newfs, bp->b_data, fs_sbsize);
850
851 #ifdef FFS_EI
852 if (ump->um_flags & UFS_NEEDSWAP) {
853 ffs_sb_swap((struct fs *)bp->b_data, newfs);
854 newfs->fs_flags |= FS_SWAPPED;
855 } else
856 #endif
857 newfs->fs_flags &= ~FS_SWAPPED;
858
859 brelse(bp, 0);
860
861 /* Allow converting from UFS2 to UFS2EA but not vice versa. */
862 if (newfs->fs_magic == FS_UFS2EA_MAGIC) {
863 ump->um_flags |= UFS_EA;
864 newfs->fs_magic = FS_UFS2_MAGIC;
865 } else {
866 if ((ump->um_flags & UFS_EA) != 0)
867 return EINVAL;
868 }
869
870 if ((newfs->fs_magic != FS_UFS1_MAGIC) &&
871 (newfs->fs_magic != FS_UFS2_MAGIC)) {
872 kmem_free(newfs, fs_sbsize);
873 return (EIO); /* XXX needs translation */
874 }
875 if (!ffs_superblock_validate(newfs)) {
876 kmem_free(newfs, fs_sbsize);
877 return (EINVAL);
878 }
879
880 /*
881 * The current implementation doesn't handle the possibility that
882 * these values may have changed.
883 */
884 if ((newfs->fs_sbsize != fs_sbsize) ||
885 (newfs->fs_cssize != fs->fs_cssize) ||
886 (newfs->fs_contigsumsize != fs->fs_contigsumsize) ||
887 (newfs->fs_ncg != fs->fs_ncg)) {
888 kmem_free(newfs, fs_sbsize);
889 return (EINVAL);
890 }
891
892 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
893 sblockloc = fs->fs_sblockloc;
894 /*
895 * Copy pointer fields back into superblock before copying in XXX
896 * new superblock. These should really be in the ufsmount. XXX
897 * Note that important parameters (eg fs_ncg) are unchanged.
898 */
899 newfs->fs_csp = fs->fs_csp;
900 newfs->fs_maxcluster = fs->fs_maxcluster;
901 newfs->fs_contigdirs = fs->fs_contigdirs;
902 newfs->fs_ronly = fs->fs_ronly;
903 newfs->fs_active = fs->fs_active;
904 memcpy(fs, newfs, (u_int)fs_sbsize);
905 kmem_free(newfs, fs_sbsize);
906
907 /*
908 * Recheck for Apple UFS filesystem.
909 */
910 ump->um_flags &= ~UFS_ISAPPLEUFS;
911 if (ffs_is_appleufs(devvp, fs)) {
912 #ifdef APPLE_UFS
913 ump->um_flags |= UFS_ISAPPLEUFS;
914 #else
915 DPRINTF("AppleUFS not supported");
916 return (EIO); /* XXX: really? */
917 #endif
918 }
919
920 if (UFS_MPISAPPLEUFS(ump)) {
921 /* see comment about NeXT below */
922 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
923 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
924 mp->mnt_iflag |= IMNT_DTYPE;
925 } else {
926 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
927 ump->um_dirblksiz = UFS_DIRBLKSIZ;
928 if (ump->um_maxsymlinklen > 0)
929 mp->mnt_iflag |= IMNT_DTYPE;
930 else
931 mp->mnt_iflag &= ~IMNT_DTYPE;
932 }
933 ffs_oldfscompat_read(fs, ump, sblockloc);
934
935 mutex_enter(&ump->um_lock);
936 ump->um_maxfilesize = fs->fs_maxfilesize;
937 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
938 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
939 mp->mnt_stat.f_mntonname, fs->fs_flags,
940 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
941 if ((mp->mnt_flag & MNT_FORCE) == 0) {
942 mutex_exit(&ump->um_lock);
943 return (EINVAL);
944 }
945 }
946
947 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
948 fs->fs_pendingblocks = 0;
949 fs->fs_pendinginodes = 0;
950 }
951 mutex_exit(&ump->um_lock);
952
953 ffs_statvfs(mp, &mp->mnt_stat);
954 /*
955 * Step 3: re-read summary information from disk.
956 */
957 blks = howmany(fs->fs_cssize, fs->fs_fsize);
958 space = fs->fs_csp;
959 for (i = 0; i < blks; i += fs->fs_frag) {
960 bsize = fs->fs_bsize;
961 if (i + fs->fs_frag > blks)
962 bsize = (blks - i) * fs->fs_fsize;
963 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i), bsize,
964 0, &bp);
965 if (error) {
966 return (error);
967 }
968 #ifdef FFS_EI
969 if (UFS_FSNEEDSWAP(fs))
970 ffs_csum_swap((struct csum *)bp->b_data,
971 (struct csum *)space, bsize);
972 else
973 #endif
974 memcpy(space, bp->b_data, (size_t)bsize);
975 space = (char *)space + bsize;
976 brelse(bp, 0);
977 }
978 /*
979 * We no longer know anything about clusters per cylinder group.
980 */
981 if (fs->fs_contigsumsize > 0) {
982 lp = fs->fs_maxcluster;
983 for (i = 0; i < fs->fs_ncg; i++)
984 *lp++ = fs->fs_contigsumsize;
985 }
986
987 vfs_vnode_iterator_init(mp, &marker);
988 while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) {
989 /*
990 * Step 4: invalidate all inactive vnodes.
991 */
992 if (vrecycle(vp))
993 continue;
994 /*
995 * Step 5: invalidate all cached file data.
996 */
997 if (vn_lock(vp, LK_EXCLUSIVE)) {
998 vrele(vp);
999 continue;
1000 }
1001 if (vinvalbuf(vp, 0, cred, l, 0, 0))
1002 panic("%s: dirty2", __func__);
1003 /*
1004 * Step 6: re-read inode data for all active vnodes.
1005 */
1006 ip = VTOI(vp);
1007 error = bread(devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)),
1008 (int)fs->fs_bsize, 0, &bp);
1009 if (error) {
1010 vput(vp);
1011 break;
1012 }
1013 ffs_load_inode(bp, ip, fs, ip->i_number);
1014 brelse(bp, 0);
1015 vput(vp);
1016 }
1017 vfs_vnode_iterator_destroy(marker);
1018 return (error);
1019 }
1020
1021 /*
1022 * Possible superblock locations ordered from most to least likely.
1023 */
1024 static const int sblock_try[] = SBLOCKSEARCH;
1025
1026
1027 static int
1028 ffs_superblock_validate(struct fs *fs)
1029 {
1030 int32_t i, fs_bshift = 0, fs_fshift = 0, fs_fragshift = 0, fs_frag;
1031 int32_t fs_inopb;
1032
1033 /* Check the superblock size */
1034 if (fs->fs_sbsize > SBLOCKSIZE || fs->fs_sbsize < sizeof(struct fs))
1035 return 0;
1036
1037 /* Check the file system blocksize */
1038 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < MINBSIZE)
1039 return 0;
1040 if (!powerof2(fs->fs_bsize))
1041 return 0;
1042
1043 /* Check the size of frag blocks */
1044 if (!powerof2(fs->fs_fsize))
1045 return 0;
1046 if (fs->fs_fsize == 0)
1047 return 0;
1048
1049 /*
1050 * XXX: these values are just zero-checked to prevent obvious
1051 * bugs. We need more strict checks.
1052 */
1053 if (fs->fs_size == 0 && fs->fs_old_size == 0)
1054 return 0;
1055 if (fs->fs_cssize == 0)
1056 return 0;
1057 if (fs->fs_ipg == 0)
1058 return 0;
1059 if (fs->fs_fpg == 0)
1060 return 0;
1061 if (fs->fs_ncg == 0)
1062 return 0;
1063 if (fs->fs_maxbpg == 0)
1064 return 0;
1065
1066 /* Check the number of inodes per block */
1067 if (fs->fs_magic == FS_UFS1_MAGIC)
1068 fs_inopb = fs->fs_bsize / sizeof(struct ufs1_dinode);
1069 else /* fs->fs_magic == FS_UFS2_MAGIC */
1070 fs_inopb = fs->fs_bsize / sizeof(struct ufs2_dinode);
1071 if (fs->fs_inopb != fs_inopb)
1072 return 0;
1073
1074 /* Block size cannot be smaller than fragment size */
1075 if (fs->fs_bsize < fs->fs_fsize)
1076 return 0;
1077
1078 /* Compute fs_bshift and ensure it is consistent */
1079 for (i = fs->fs_bsize; i > 1; i >>= 1)
1080 fs_bshift++;
1081 if (fs->fs_bshift != fs_bshift)
1082 return 0;
1083
1084 /* Compute fs_fshift and ensure it is consistent */
1085 for (i = fs->fs_fsize; i > 1; i >>= 1)
1086 fs_fshift++;
1087 if (fs->fs_fshift != fs_fshift)
1088 return 0;
1089
1090 /* Compute fs_fragshift and ensure it is consistent */
1091 for (i = fs->fs_frag; i > 1; i >>= 1)
1092 fs_fragshift++;
1093 if (fs->fs_fragshift != fs_fragshift)
1094 return 0;
1095
1096 /* Check the masks */
1097 if (fs->fs_bmask != ~(fs->fs_bsize - 1))
1098 return 0;
1099 if (fs->fs_fmask != ~(fs->fs_fsize - 1))
1100 return 0;
1101
1102 /*
1103 * Now that the shifts and masks are sanitized, we can use the ffs_ API.
1104 */
1105
1106 /* Check the number of frag blocks */
1107 if ((fs_frag = ffs_numfrags(fs, fs->fs_bsize)) > MAXFRAG)
1108 return 0;
1109 if (fs->fs_frag != fs_frag)
1110 return 0;
1111
1112 /* Check the size of cylinder groups */
1113 if ((fs->fs_cgsize < sizeof(struct cg)) ||
1114 (fs->fs_cgsize > fs->fs_bsize))
1115 return 0;
1116
1117 return 1;
1118 }
1119
1120 static int
1121 ffs_is_appleufs(struct vnode *devvp, struct fs *fs)
1122 {
1123 struct dkwedge_info dkw;
1124 int ret = 0;
1125
1126 /*
1127 * First check to see if this is tagged as an Apple UFS filesystem
1128 * in the disklabel.
1129 */
1130 if (getdiskinfo(devvp, &dkw) == 0 &&
1131 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
1132 ret = 1;
1133 #ifdef APPLE_UFS
1134 else {
1135 struct appleufslabel *applefs;
1136 struct buf *bp;
1137 daddr_t blkno = APPLEUFS_LABEL_OFFSET / DEV_BSIZE;
1138 int error;
1139
1140 /*
1141 * Manually look for an Apple UFS label, and if a valid one
1142 * is found, then treat it like an Apple UFS filesystem anyway.
1143 */
1144 error = bread(devvp, blkno, APPLEUFS_LABEL_SIZE, 0, &bp);
1145 if (error) {
1146 DPRINTF("bread@0x%jx returned %d", (intmax_t)blkno, error);
1147 return 0;
1148 }
1149 applefs = (struct appleufslabel *)bp->b_data;
1150 error = ffs_appleufs_validate(fs->fs_fsmnt, applefs, NULL);
1151 if (error == 0)
1152 ret = 1;
1153 brelse(bp, 0);
1154 }
1155 #endif
1156
1157 return ret;
1158 }
1159
1160 /*
1161 * Common code for mount and mountroot
1162 */
1163 int
1164 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
1165 {
1166 struct ufsmount *ump = NULL;
1167 struct buf *bp = NULL;
1168 struct fs *fs = NULL;
1169 dev_t dev;
1170 void *space;
1171 daddr_t sblockloc = 0;
1172 int blks, fstype = 0;
1173 int error, i, bsize, ronly, bset = 0;
1174 #ifdef FFS_EI
1175 int needswap = 0; /* keep gcc happy */
1176 #endif
1177 int32_t *lp;
1178 kauth_cred_t cred;
1179 u_int32_t allocsbsize, fs_sbsize = 0;
1180
1181 dev = devvp->v_rdev;
1182 cred = l ? l->l_cred : NOCRED;
1183
1184 /* Flush out any old buffers remaining from a previous use. */
1185 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1186 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
1187 VOP_UNLOCK(devvp);
1188 if (error) {
1189 DPRINTF("vinvalbuf returned %d", error);
1190 return error;
1191 }
1192
1193 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
1194
1195 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
1196 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
1197 error = ffs_snapshot_init(ump);
1198 if (error) {
1199 DPRINTF("ffs_snapshot_init returned %d", error);
1200 goto out;
1201 }
1202 ump->um_ops = &ffs_ufsops;
1203
1204 #ifdef WAPBL
1205 sbagain:
1206 #endif
1207 /*
1208 * Try reading the superblock in each of its possible locations.
1209 */
1210 for (i = 0; ; i++) {
1211 daddr_t fs_sblockloc;
1212
1213 if (bp != NULL) {
1214 brelse(bp, BC_NOCACHE);
1215 bp = NULL;
1216 }
1217 if (sblock_try[i] == -1) {
1218 DPRINTF("no superblock found");
1219 error = EINVAL;
1220 fs = NULL;
1221 goto out;
1222 }
1223
1224 error = bread(devvp, sblock_try[i] / DEV_BSIZE, SBLOCKSIZE,
1225 0, &bp);
1226 if (error) {
1227 DPRINTF("bread@0x%x returned %d",
1228 sblock_try[i] / DEV_BSIZE, error);
1229 fs = NULL;
1230 goto out;
1231 }
1232 fs = (struct fs *)bp->b_data;
1233
1234 sblockloc = sblock_try[i];
1235 DPRINTF("fs_magic 0x%x", fs->fs_magic);
1236
1237 /*
1238 * Swap: here, we swap fs->fs_sbsize in order to get the correct
1239 * size to read the superblock. Once read, we swap the whole
1240 * superblock structure.
1241 */
1242 if (fs->fs_magic == FS_UFS2EA_MAGIC) {
1243 ump->um_flags |= UFS_EA;
1244 fs->fs_magic = FS_UFS2_MAGIC;
1245 } else if (fs->fs_magic == FS_UFS2EA_MAGIC_SWAPPED) {
1246 ump->um_flags |= UFS_EA;
1247 fs->fs_magic = FS_UFS2_MAGIC_SWAPPED;
1248 }
1249 if (fs->fs_magic == FS_UFS1_MAGIC) {
1250 fs_sbsize = fs->fs_sbsize;
1251 fstype = UFS1;
1252 #ifdef FFS_EI
1253 needswap = 0;
1254 } else if (fs->fs_magic == FS_UFS1_MAGIC_SWAPPED) {
1255 fs_sbsize = bswap32(fs->fs_sbsize);
1256 fstype = UFS1;
1257 needswap = 1;
1258 #endif
1259 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
1260 fs_sbsize = fs->fs_sbsize;
1261 fstype = UFS2;
1262 #ifdef FFS_EI
1263 needswap = 0;
1264 } else if (fs->fs_magic == FS_UFS2_MAGIC_SWAPPED) {
1265 fs_sbsize = bswap32(fs->fs_sbsize);
1266 fstype = UFS2;
1267 needswap = 1;
1268 #endif
1269 } else
1270 continue;
1271
1272 /* fs->fs_sblockloc isn't defined for old filesystems */
1273 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
1274 if (sblockloc == SBLOCK_UFS2)
1275 /*
1276 * This is likely to be the first alternate
1277 * in a filesystem with 64k blocks.
1278 * Don't use it.
1279 */
1280 continue;
1281 fs_sblockloc = sblockloc;
1282 } else {
1283 fs_sblockloc = fs->fs_sblockloc;
1284 #ifdef FFS_EI
1285 if (needswap)
1286 fs_sblockloc = bswap64(fs_sblockloc);
1287 #endif
1288 }
1289
1290 /* Check we haven't found an alternate superblock */
1291 if (fs_sblockloc != sblockloc)
1292 continue;
1293
1294 /* Check the superblock size */
1295 if (fs_sbsize > SBLOCKSIZE || fs_sbsize < sizeof(struct fs))
1296 continue;
1297 fs = kmem_alloc((u_long)fs_sbsize, KM_SLEEP);
1298 memcpy(fs, bp->b_data, fs_sbsize);
1299
1300 /* Swap the whole superblock structure, if necessary. */
1301 #ifdef FFS_EI
1302 if (needswap) {
1303 ffs_sb_swap((struct fs*)bp->b_data, fs);
1304 fs->fs_flags |= FS_SWAPPED;
1305 } else
1306 #endif
1307 fs->fs_flags &= ~FS_SWAPPED;
1308
1309 /*
1310 * Now that everything is swapped, the superblock is ready to
1311 * be sanitized.
1312 */
1313 if (!ffs_superblock_validate(fs)) {
1314 kmem_free(fs, fs_sbsize);
1315 continue;
1316 }
1317
1318 /* Ok seems to be a good superblock */
1319 break;
1320 }
1321
1322 ump->um_fs = fs;
1323
1324 #ifdef WAPBL
1325 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1326 error = ffs_wapbl_replay_start(mp, fs, devvp);
1327 if (error && (mp->mnt_flag & MNT_FORCE) == 0) {
1328 DPRINTF("ffs_wapbl_replay_start returned %d", error);
1329 goto out;
1330 }
1331 if (!error) {
1332 if (!ronly) {
1333 /* XXX fsmnt may be stale. */
1334 printf("%s: replaying log to disk\n",
1335 fs->fs_fsmnt);
1336 error = wapbl_replay_write(mp->mnt_wapbl_replay,
1337 devvp);
1338 if (error) {
1339 DPRINTF("wapbl_replay_write returned %d",
1340 error);
1341 goto out;
1342 }
1343 wapbl_replay_stop(mp->mnt_wapbl_replay);
1344 fs->fs_clean = FS_WASCLEAN;
1345 } else {
1346 /* XXX fsmnt may be stale */
1347 printf("%s: replaying log to memory\n",
1348 fs->fs_fsmnt);
1349 }
1350
1351 /* Force a re-read of the superblock */
1352 brelse(bp, BC_INVAL);
1353 bp = NULL;
1354 kmem_free(fs, fs_sbsize);
1355 fs = NULL;
1356 goto sbagain;
1357 }
1358 }
1359 #else /* !WAPBL */
1360 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1361 error = EPERM;
1362 DPRINTF("no force %d", error);
1363 goto out;
1364 }
1365 #endif /* !WAPBL */
1366
1367 ffs_oldfscompat_read(fs, ump, sblockloc);
1368 ump->um_maxfilesize = fs->fs_maxfilesize;
1369
1370 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1371 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1372 mp->mnt_stat.f_mntonname, fs->fs_flags,
1373 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1374 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1375 error = EINVAL;
1376 DPRINTF("no force %d", error);
1377 goto out;
1378 }
1379 }
1380
1381 fs->fs_fmod = 0;
1382 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1383 fs->fs_pendingblocks = 0;
1384 fs->fs_pendinginodes = 0;
1385 }
1386
1387 ump->um_fstype = fstype;
1388 if (fs->fs_sbsize < SBLOCKSIZE)
1389 brelse(bp, BC_INVAL);
1390 else
1391 brelse(bp, 0);
1392 bp = NULL;
1393
1394 if (ffs_is_appleufs(devvp, fs)) {
1395 #ifdef APPLE_UFS
1396 ump->um_flags |= UFS_ISAPPLEUFS;
1397 #else
1398 DPRINTF("AppleUFS not supported");
1399 error = EINVAL;
1400 goto out;
1401 #endif
1402 }
1403
1404 #if 0
1405 /*
1406 * XXX This code changes the behaviour of mounting dirty filesystems, to
1407 * XXX require "mount -f ..." to mount them. This doesn't match what
1408 * XXX mount(8) describes and is disabled for now.
1409 */
1410 /*
1411 * If the file system is not clean, don't allow it to be mounted
1412 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1413 * for the root file system.)
1414 */
1415 if (fs->fs_flags & FS_DOWAPBL) {
1416 /*
1417 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1418 * bit is set, although there's a window in unmount where it
1419 * could be FS_ISCLEAN
1420 */
1421 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1422 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1423 error = EPERM;
1424 goto out;
1425 }
1426 } else
1427 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1428 (mp->mnt_flag & MNT_FORCE) == 0) {
1429 error = EPERM;
1430 goto out;
1431 }
1432 #endif
1433
1434 /*
1435 * Verify that we can access the last block in the fs
1436 * if we're mounting read/write.
1437 */
1438 if (!ronly) {
1439 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_size - 1),
1440 fs->fs_fsize, 0, &bp);
1441 if (error) {
1442 DPRINTF("bread@0x%jx returned %d",
1443 (intmax_t)FFS_FSBTODB(fs, fs->fs_size - 1),
1444 error);
1445 bset = BC_INVAL;
1446 goto out;
1447 }
1448 if (bp->b_bcount != fs->fs_fsize) {
1449 DPRINTF("bcount %x != fsize %x", bp->b_bcount,
1450 fs->fs_fsize);
1451 error = EINVAL;
1452 bset = BC_INVAL;
1453 goto out;
1454 }
1455 brelse(bp, BC_INVAL);
1456 bp = NULL;
1457 }
1458
1459 fs->fs_ronly = ronly;
1460 /* Don't bump fs_clean if we're replaying journal */
1461 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN))) {
1462 if (ronly == 0) {
1463 fs->fs_clean =
1464 fs->fs_clean == FS_ISCLEAN ? FS_WASCLEAN : 0;
1465 fs->fs_fmod = 1;
1466 }
1467 }
1468
1469 bsize = fs->fs_cssize;
1470 blks = howmany(bsize, fs->fs_fsize);
1471 if (fs->fs_contigsumsize > 0)
1472 bsize += fs->fs_ncg * sizeof(int32_t);
1473 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1474 allocsbsize = bsize;
1475 space = kmem_alloc((u_long)allocsbsize, KM_SLEEP);
1476 fs->fs_csp = space;
1477
1478 for (i = 0; i < blks; i += fs->fs_frag) {
1479 bsize = fs->fs_bsize;
1480 if (i + fs->fs_frag > blks)
1481 bsize = (blks - i) * fs->fs_fsize;
1482 error = bread(devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i), bsize,
1483 0, &bp);
1484 if (error) {
1485 DPRINTF("bread@0x%jx %d",
1486 (intmax_t)FFS_FSBTODB(fs, fs->fs_csaddr + i),
1487 error);
1488 goto out1;
1489 }
1490 #ifdef FFS_EI
1491 if (needswap)
1492 ffs_csum_swap((struct csum *)bp->b_data,
1493 (struct csum *)space, bsize);
1494 else
1495 #endif
1496 memcpy(space, bp->b_data, (u_int)bsize);
1497
1498 space = (char *)space + bsize;
1499 brelse(bp, 0);
1500 bp = NULL;
1501 }
1502 if (fs->fs_contigsumsize > 0) {
1503 fs->fs_maxcluster = lp = space;
1504 for (i = 0; i < fs->fs_ncg; i++)
1505 *lp++ = fs->fs_contigsumsize;
1506 space = lp;
1507 }
1508 bsize = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1509 fs->fs_contigdirs = space;
1510 space = (char *)space + bsize;
1511 memset(fs->fs_contigdirs, 0, bsize);
1512
1513 /* Compatibility for old filesystems - XXX */
1514 if (fs->fs_avgfilesize <= 0)
1515 fs->fs_avgfilesize = AVFILESIZ;
1516 if (fs->fs_avgfpdir <= 0)
1517 fs->fs_avgfpdir = AFPDIR;
1518 fs->fs_active = NULL;
1519
1520 mp->mnt_data = ump;
1521 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1522 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1523 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1524 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1525 if (UFS_MPISAPPLEUFS(ump)) {
1526 /* NeXT used to keep short symlinks in the inode even
1527 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1528 * is probably -1, but we still need to be able to identify
1529 * short symlinks.
1530 */
1531 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1532 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1533 mp->mnt_iflag |= IMNT_DTYPE;
1534 } else {
1535 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1536 ump->um_dirblksiz = UFS_DIRBLKSIZ;
1537 if (ump->um_maxsymlinklen > 0)
1538 mp->mnt_iflag |= IMNT_DTYPE;
1539 else
1540 mp->mnt_iflag &= ~IMNT_DTYPE;
1541 }
1542 mp->mnt_fs_bshift = fs->fs_bshift;
1543 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1544 mp->mnt_flag |= MNT_LOCAL;
1545 mp->mnt_iflag |= IMNT_MPSAFE | IMNT_CAN_RWTORO | IMNT_SHRLOOKUP |
1546 IMNT_NCLOOKUP;
1547 #ifdef FFS_EI
1548 if (needswap)
1549 ump->um_flags |= UFS_NEEDSWAP;
1550 #endif
1551 error = ffs_acls(mp, fs->fs_flags);
1552 if (error)
1553 goto out1;
1554 ump->um_mountp = mp;
1555 ump->um_dev = dev;
1556 ump->um_devvp = devvp;
1557 ump->um_nindir = fs->fs_nindir;
1558 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1559 ump->um_bptrtodb = fs->fs_fshift - DEV_BSHIFT;
1560 ump->um_seqinc = fs->fs_frag;
1561 for (i = 0; i < MAXQUOTAS; i++)
1562 ump->um_quotas[i] = NULLVP;
1563 spec_node_setmountedfs(devvp, mp);
1564 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1565 ffs_snapshot_mount(mp);
1566 #ifdef WAPBL
1567 if (!ronly) {
1568 KDASSERT(fs->fs_ronly == 0);
1569 /*
1570 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1571 * needs to create a new log file in-filesystem.
1572 */
1573 error = ffs_statvfs(mp, &mp->mnt_stat);
1574 if (error) {
1575 DPRINTF("ffs_statvfs returned %d", error);
1576 goto out1;
1577 }
1578
1579 error = ffs_wapbl_start(mp);
1580 if (error) {
1581 DPRINTF("ffs_wapbl_start returned %d", error);
1582 goto out1;
1583 }
1584 }
1585 #endif /* WAPBL */
1586 if (ronly == 0) {
1587 #ifdef QUOTA2
1588 error = ffs_quota2_mount(mp);
1589 if (error) {
1590 DPRINTF("ffs_quota2_mount returned %d", error);
1591 goto out1;
1592 }
1593 #else
1594 if (fs->fs_flags & FS_DOQUOTA2) {
1595 ump->um_flags |= UFS_QUOTA2;
1596 uprintf("%s: options QUOTA2 not enabled%s\n",
1597 mp->mnt_stat.f_mntonname,
1598 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1599 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1600 error = EINVAL;
1601 DPRINTF("quota disabled %d", error);
1602 goto out1;
1603 }
1604 }
1605 #endif
1606 }
1607
1608 if (mp->mnt_flag & MNT_DISCARD)
1609 ump->um_discarddata = ffs_discard_init(devvp, fs);
1610
1611 return (0);
1612 out1:
1613 kmem_free(fs->fs_csp, allocsbsize);
1614 out:
1615 #ifdef WAPBL
1616 if (mp->mnt_wapbl_replay) {
1617 wapbl_replay_stop(mp->mnt_wapbl_replay);
1618 wapbl_replay_free(mp->mnt_wapbl_replay);
1619 mp->mnt_wapbl_replay = 0;
1620 }
1621 #endif
1622
1623 if (fs)
1624 kmem_free(fs, fs->fs_sbsize);
1625 spec_node_setmountedfs(devvp, NULL);
1626 if (bp)
1627 brelse(bp, bset);
1628 if (ump) {
1629 if (ump->um_oldfscompat)
1630 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1631 mutex_destroy(&ump->um_lock);
1632 kmem_free(ump, sizeof(*ump));
1633 mp->mnt_data = NULL;
1634 }
1635 return (error);
1636 }
1637
1638 /*
1639 * Sanity checks for loading old filesystem superblocks.
1640 * See ffs_oldfscompat_write below for unwound actions.
1641 *
1642 * XXX - Parts get retired eventually.
1643 * Unfortunately new bits get added.
1644 */
1645 static void
1646 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1647 {
1648 off_t maxfilesize;
1649 int32_t *extrasave;
1650
1651 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1652 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1653 return;
1654
1655 if (!ump->um_oldfscompat)
1656 ump->um_oldfscompat = kmem_alloc(512 + 3*sizeof(int32_t),
1657 KM_SLEEP);
1658
1659 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1660 extrasave = ump->um_oldfscompat;
1661 extrasave += 512/sizeof(int32_t);
1662 extrasave[0] = fs->fs_old_npsect;
1663 extrasave[1] = fs->fs_old_interleave;
1664 extrasave[2] = fs->fs_old_trackskew;
1665
1666 /* These fields will be overwritten by their
1667 * original values in fs_oldfscompat_write, so it is harmless
1668 * to modify them here.
1669 */
1670 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1671 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1672 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1673 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1674
1675 fs->fs_maxbsize = fs->fs_bsize;
1676 fs->fs_time = fs->fs_old_time;
1677 fs->fs_size = fs->fs_old_size;
1678 fs->fs_dsize = fs->fs_old_dsize;
1679 fs->fs_csaddr = fs->fs_old_csaddr;
1680 fs->fs_sblockloc = sblockloc;
1681
1682 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1683
1684 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1685 fs->fs_old_nrpos = 8;
1686 fs->fs_old_npsect = fs->fs_old_nsect;
1687 fs->fs_old_interleave = 1;
1688 fs->fs_old_trackskew = 0;
1689 }
1690
1691 if (fs->fs_magic == FS_UFS1_MAGIC &&
1692 fs->fs_old_inodefmt < FS_44INODEFMT) {
1693 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1694 fs->fs_qbmask = ~fs->fs_bmask;
1695 fs->fs_qfmask = ~fs->fs_fmask;
1696 }
1697
1698 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1699 if (fs->fs_maxfilesize > maxfilesize)
1700 fs->fs_maxfilesize = maxfilesize;
1701
1702 /* Compatibility for old filesystems */
1703 if (fs->fs_avgfilesize <= 0)
1704 fs->fs_avgfilesize = AVFILESIZ;
1705 if (fs->fs_avgfpdir <= 0)
1706 fs->fs_avgfpdir = AFPDIR;
1707
1708 #if 0
1709 if (bigcgs) {
1710 fs->fs_save_cgsize = fs->fs_cgsize;
1711 fs->fs_cgsize = fs->fs_bsize;
1712 }
1713 #endif
1714 }
1715
1716 /*
1717 * Unwinding superblock updates for old filesystems.
1718 * See ffs_oldfscompat_read above for details.
1719 *
1720 * XXX - Parts get retired eventually.
1721 * Unfortunately new bits get added.
1722 */
1723 static void
1724 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1725 {
1726 int32_t *extrasave;
1727
1728 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1729 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1730 return;
1731
1732 fs->fs_old_time = fs->fs_time;
1733 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1734 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1735 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1736 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1737 fs->fs_old_flags = fs->fs_flags;
1738
1739 #if 0
1740 if (bigcgs) {
1741 fs->fs_cgsize = fs->fs_save_cgsize;
1742 }
1743 #endif
1744
1745 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1746 extrasave = ump->um_oldfscompat;
1747 extrasave += 512/sizeof(int32_t);
1748 fs->fs_old_npsect = extrasave[0];
1749 fs->fs_old_interleave = extrasave[1];
1750 fs->fs_old_trackskew = extrasave[2];
1751
1752 }
1753
1754 /*
1755 * unmount vfs operation
1756 */
1757 int
1758 ffs_unmount(struct mount *mp, int mntflags)
1759 {
1760 struct lwp *l = curlwp;
1761 struct ufsmount *ump = VFSTOUFS(mp);
1762 struct fs *fs = ump->um_fs;
1763 int error, flags;
1764 u_int32_t bsize;
1765 #ifdef WAPBL
1766 extern int doforce;
1767 #endif
1768
1769 if (ump->um_discarddata) {
1770 ffs_discard_finish(ump->um_discarddata, mntflags);
1771 ump->um_discarddata = NULL;
1772 }
1773
1774 flags = 0;
1775 if (mntflags & MNT_FORCE)
1776 flags |= FORCECLOSE;
1777 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1778 return (error);
1779 error = UFS_WAPBL_BEGIN(mp);
1780 if (error == 0)
1781 if (fs->fs_ronly == 0 &&
1782 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1783 fs->fs_clean & FS_WASCLEAN) {
1784 fs->fs_clean = FS_ISCLEAN;
1785 fs->fs_fmod = 0;
1786 (void) ffs_sbupdate(ump, MNT_WAIT);
1787 }
1788 if (error == 0)
1789 UFS_WAPBL_END(mp);
1790 #ifdef WAPBL
1791 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1792 if (mp->mnt_wapbl_replay) {
1793 KDASSERT(fs->fs_ronly);
1794 wapbl_replay_stop(mp->mnt_wapbl_replay);
1795 wapbl_replay_free(mp->mnt_wapbl_replay);
1796 mp->mnt_wapbl_replay = 0;
1797 }
1798 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1799 if (error) {
1800 return error;
1801 }
1802 #endif /* WAPBL */
1803
1804 if (ump->um_devvp->v_type != VBAD)
1805 spec_node_setmountedfs(ump->um_devvp, NULL);
1806 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1807 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1808 NOCRED);
1809 vput(ump->um_devvp);
1810
1811 bsize = fs->fs_cssize;
1812 if (fs->fs_contigsumsize > 0)
1813 bsize += fs->fs_ncg * sizeof(int32_t);
1814 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1815 kmem_free(fs->fs_csp, bsize);
1816
1817 kmem_free(fs, fs->fs_sbsize);
1818 if (ump->um_oldfscompat != NULL)
1819 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1820 mutex_destroy(&ump->um_lock);
1821 ffs_snapshot_fini(ump);
1822 kmem_free(ump, sizeof(*ump));
1823 mp->mnt_data = NULL;
1824 mp->mnt_flag &= ~MNT_LOCAL;
1825 return (0);
1826 }
1827
1828 /*
1829 * Flush out all the files in a filesystem.
1830 */
1831 int
1832 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1833 {
1834 extern int doforce;
1835 struct ufsmount *ump;
1836 int error;
1837
1838 if (!doforce)
1839 flags &= ~FORCECLOSE;
1840 ump = VFSTOUFS(mp);
1841 #ifdef QUOTA
1842 if ((error = quota1_umount(mp, flags)) != 0)
1843 return (error);
1844 #endif
1845 #ifdef QUOTA2
1846 if ((error = quota2_umount(mp, flags)) != 0)
1847 return (error);
1848 #endif
1849 #ifdef UFS_EXTATTR
1850 if (ump->um_fstype == UFS1) {
1851 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)
1852 ufs_extattr_stop(mp, l);
1853 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_INITIALIZED)
1854 ufs_extattr_uepm_destroy(&ump->um_extattr);
1855 mp->mnt_flag &= ~MNT_EXTATTR;
1856 }
1857 #endif
1858 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1859 return (error);
1860 ffs_snapshot_unmount(mp);
1861 /*
1862 * Flush all the files.
1863 */
1864 error = vflush(mp, NULLVP, flags);
1865 if (error)
1866 return (error);
1867 /*
1868 * Flush filesystem metadata.
1869 */
1870 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1871 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1872 VOP_UNLOCK(ump->um_devvp);
1873 if (flags & FORCECLOSE) /* XXXDBJ */
1874 error = 0;
1875
1876 #ifdef WAPBL
1877 if (error)
1878 return error;
1879 if (mp->mnt_wapbl) {
1880 error = wapbl_flush(mp->mnt_wapbl, 1);
1881 if (flags & FORCECLOSE)
1882 error = 0;
1883 }
1884 #endif
1885
1886 return (error);
1887 }
1888
1889 /*
1890 * Get file system statistics.
1891 */
1892 int
1893 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1894 {
1895 struct ufsmount *ump;
1896 struct fs *fs;
1897
1898 ump = VFSTOUFS(mp);
1899 fs = ump->um_fs;
1900 mutex_enter(&ump->um_lock);
1901 sbp->f_bsize = fs->fs_bsize;
1902 sbp->f_frsize = fs->fs_fsize;
1903 sbp->f_iosize = fs->fs_bsize;
1904 sbp->f_blocks = fs->fs_dsize;
1905 sbp->f_bfree = ffs_blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1906 fs->fs_cstotal.cs_nffree + FFS_DBTOFSB(fs, fs->fs_pendingblocks);
1907 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1908 fs->fs_minfree) / (u_int64_t) 100;
1909 if (sbp->f_bfree > sbp->f_bresvd)
1910 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1911 else
1912 sbp->f_bavail = 0;
1913 sbp->f_files = fs->fs_ncg * fs->fs_ipg - UFS_ROOTINO;
1914 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1915 sbp->f_favail = sbp->f_ffree;
1916 sbp->f_fresvd = 0;
1917 mutex_exit(&ump->um_lock);
1918 copy_statvfs_info(sbp, mp);
1919
1920 return (0);
1921 }
1922
1923 struct ffs_sync_ctx {
1924 int waitfor;
1925 };
1926
1927 static bool
1928 ffs_sync_selector(void *cl, struct vnode *vp)
1929 {
1930 struct ffs_sync_ctx *c = cl;
1931 struct inode *ip;
1932
1933 KASSERT(mutex_owned(vp->v_interlock));
1934
1935 ip = VTOI(vp);
1936 /*
1937 * Skip the vnode/inode if inaccessible.
1938 */
1939 if (ip == NULL || vp->v_type == VNON)
1940 return false;
1941
1942 /*
1943 * We deliberately update inode times here. This will
1944 * prevent a massive queue of updates accumulating, only
1945 * to be handled by a call to unmount.
1946 *
1947 * XXX It would be better to have the syncer trickle these
1948 * out. Adjustment needed to allow registering vnodes for
1949 * sync when the vnode is clean, but the inode dirty. Or
1950 * have ufs itself trickle out inode updates.
1951 *
1952 * If doing a lazy sync, we don't care about metadata or
1953 * data updates, because they are handled by each vnode's
1954 * synclist entry. In this case we are only interested in
1955 * writing back modified inodes.
1956 */
1957 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE |
1958 IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1959 (c->waitfor == MNT_LAZY || (LIST_EMPTY(&vp->v_dirtyblkhd) &&
1960 (vp->v_iflag & VI_ONWORKLST) == 0)))
1961 return false;
1962
1963 return true;
1964 }
1965
1966 /*
1967 * Go through the disk queues to initiate sandbagged IO;
1968 * go through the inodes to write those that have been modified;
1969 * initiate the writing of the super block if it has been modified.
1970 *
1971 * Note: we are always called with the filesystem marked `MPBUSY'.
1972 */
1973 int
1974 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1975 {
1976 struct vnode *vp;
1977 struct ufsmount *ump = VFSTOUFS(mp);
1978 struct fs *fs;
1979 struct vnode_iterator *marker;
1980 int error, allerror = 0;
1981 struct ffs_sync_ctx ctx;
1982
1983 fs = ump->um_fs;
1984 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1985 panic("%s: rofs mod, fs=%s", __func__, fs->fs_fsmnt);
1986 }
1987
1988 /*
1989 * Write back each (modified) inode.
1990 */
1991 vfs_vnode_iterator_init(mp, &marker);
1992
1993 ctx.waitfor = waitfor;
1994 while ((vp = vfs_vnode_iterator_next(marker, ffs_sync_selector, &ctx)))
1995 {
1996 error = vn_lock(vp,
1997 LK_EXCLUSIVE | (waitfor == MNT_LAZY ? LK_NOWAIT : 0));
1998 if (error) {
1999 vrele(vp);
2000 continue;
2001 }
2002 if (waitfor == MNT_LAZY) {
2003 error = UFS_WAPBL_BEGIN(vp->v_mount);
2004 if (!error) {
2005 error = ffs_update(vp, NULL, NULL,
2006 UPDATE_CLOSE);
2007 UFS_WAPBL_END(vp->v_mount);
2008 }
2009 } else {
2010 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
2011 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
2012 }
2013 if (error)
2014 allerror = error;
2015 vput(vp);
2016 }
2017 vfs_vnode_iterator_destroy(marker);
2018
2019 /*
2020 * Force stale file system control information to be flushed.
2021 */
2022 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
2023 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
2024 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
2025 if ((error = VOP_FSYNC(ump->um_devvp, cred,
2026 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
2027 0, 0)) != 0)
2028 allerror = error;
2029 VOP_UNLOCK(ump->um_devvp);
2030 }
2031 #if defined(QUOTA) || defined(QUOTA2)
2032 qsync(mp);
2033 #endif
2034 /*
2035 * Write back modified superblock.
2036 */
2037 if (fs->fs_fmod != 0) {
2038 fs->fs_fmod = 0;
2039 fs->fs_time = time_second;
2040 error = UFS_WAPBL_BEGIN(mp);
2041 if (error)
2042 allerror = error;
2043 else {
2044 if ((error = ffs_cgupdate(ump, waitfor)))
2045 allerror = error;
2046 UFS_WAPBL_END(mp);
2047 }
2048 }
2049
2050 #ifdef WAPBL
2051 if (mp->mnt_wapbl) {
2052 error = wapbl_flush(mp->mnt_wapbl, (waitfor == MNT_WAIT));
2053 if (error)
2054 allerror = error;
2055 }
2056 #endif
2057
2058 return (allerror);
2059 }
2060
2061 /*
2062 * Load inode from disk and initialize vnode.
2063 */
2064 static int
2065 ffs_init_vnode(struct ufsmount *ump, struct vnode *vp, ino_t ino)
2066 {
2067 struct fs *fs;
2068 struct inode *ip;
2069 struct buf *bp;
2070 int error;
2071
2072 fs = ump->um_fs;
2073
2074 /* Read in the disk contents for the inode. */
2075 error = bread(ump->um_devvp, FFS_FSBTODB(fs, ino_to_fsba(fs, ino)),
2076 (int)fs->fs_bsize, 0, &bp);
2077 if (error)
2078 return error;
2079
2080 /* Allocate and initialize inode. */
2081 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
2082 memset(ip, 0, sizeof(struct inode));
2083 ip->i_ump = ump;
2084 ip->i_fs = fs;
2085 ip->i_dev = ump->um_dev;
2086 ip->i_number = ino;
2087 if (ump->um_fstype == UFS1)
2088 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
2089 PR_WAITOK);
2090 else
2091 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
2092 PR_WAITOK);
2093 ffs_load_inode(bp, ip, fs, ino);
2094 brelse(bp, 0);
2095 ip->i_vnode = vp;
2096 #if defined(QUOTA) || defined(QUOTA2)
2097 ufsquota_init(ip);
2098 #endif
2099
2100 /* Initialise vnode with this inode. */
2101 vp->v_tag = VT_UFS;
2102 vp->v_op = ffs_vnodeop_p;
2103 vp->v_data = ip;
2104
2105 /* Initialize genfs node. */
2106 genfs_node_init(vp, &ffs_genfsops);
2107
2108 return 0;
2109 }
2110
2111 /*
2112 * Undo ffs_init_vnode().
2113 */
2114 static void
2115 ffs_deinit_vnode(struct ufsmount *ump, struct vnode *vp)
2116 {
2117 struct inode *ip = VTOI(vp);
2118
2119 genfs_node_destroy(vp);
2120 vp->v_data = NULL;
2121
2122 if (ump->um_fstype == UFS1)
2123 pool_cache_put(ffs_dinode1_cache, ip->i_din.ffs1_din);
2124 else
2125 pool_cache_put(ffs_dinode2_cache, ip->i_din.ffs2_din);
2126 pool_cache_put(ffs_inode_cache, ip);
2127 }
2128
2129 /*
2130 * Read an inode from disk and initialize this vnode / inode pair.
2131 * Caller assures no other thread will try to load this inode.
2132 */
2133 int
2134 ffs_loadvnode(struct mount *mp, struct vnode *vp,
2135 const void *key, size_t key_len, const void **new_key)
2136 {
2137 ino_t ino;
2138 struct fs *fs;
2139 struct inode *ip;
2140 struct ufsmount *ump;
2141 int error;
2142
2143 KASSERT(key_len == sizeof(ino));
2144 memcpy(&ino, key, key_len);
2145 ump = VFSTOUFS(mp);
2146 fs = ump->um_fs;
2147
2148 error = ffs_init_vnode(ump, vp, ino);
2149 if (error)
2150 return error;
2151
2152 ip = VTOI(vp);
2153 if (ip->i_mode == 0) {
2154 ffs_deinit_vnode(ump, vp);
2155
2156 return ENOENT;
2157 }
2158
2159 /* Initialize the vnode from the inode. */
2160 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
2161
2162 /* Finish inode initialization. */
2163 ip->i_devvp = ump->um_devvp;
2164 vref(ip->i_devvp);
2165
2166 /*
2167 * Ensure that uid and gid are correct. This is a temporary
2168 * fix until fsck has been changed to do the update.
2169 */
2170
2171 if (fs->fs_magic == FS_UFS1_MAGIC && /* XXX */
2172 fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
2173 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
2174 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
2175 } /* XXX */
2176 uvm_vnp_setsize(vp, ip->i_size);
2177 cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, !HAS_ACLS(ip));
2178 *new_key = &ip->i_number;
2179 return 0;
2180 }
2181
2182 /*
2183 * Create a new inode on disk and initialize this vnode / inode pair.
2184 */
2185 int
2186 ffs_newvnode(struct mount *mp, struct vnode *dvp, struct vnode *vp,
2187 struct vattr *vap, kauth_cred_t cred, void *extra,
2188 size_t *key_len, const void **new_key)
2189 {
2190 ino_t ino;
2191 struct fs *fs;
2192 struct inode *ip;
2193 struct timespec ts;
2194 struct ufsmount *ump;
2195 int error, mode;
2196
2197 KASSERT(dvp->v_mount == mp);
2198 KASSERT(vap->va_type != VNON);
2199
2200 *key_len = sizeof(ino);
2201 ump = VFSTOUFS(mp);
2202 fs = ump->um_fs;
2203 mode = MAKEIMODE(vap->va_type, vap->va_mode);
2204
2205 /* Allocate fresh inode. */
2206 error = ffs_valloc(dvp, mode, cred, &ino);
2207 if (error)
2208 return error;
2209
2210 /* Attach inode to vnode. */
2211 error = ffs_init_vnode(ump, vp, ino);
2212 if (error) {
2213 if (UFS_WAPBL_BEGIN(mp) == 0) {
2214 ffs_vfree(dvp, ino, mode);
2215 UFS_WAPBL_END(mp);
2216 }
2217 return error;
2218 }
2219
2220 ip = VTOI(vp);
2221 if (ip->i_mode) {
2222 panic("%s: dup alloc ino=%" PRId64 " on %s: mode %o/%o "
2223 "gen %x/%x size %" PRIx64 " blocks %" PRIx64,
2224 __func__, ino, fs->fs_fsmnt, DIP(ip, mode), ip->i_mode,
2225 DIP(ip, gen), ip->i_gen, DIP(ip, size), DIP(ip, blocks));
2226 }
2227 if (DIP(ip, size) || DIP(ip, blocks)) {
2228 printf("%s: ino=%" PRId64 " on %s: "
2229 "gen %x/%x has non zero blocks %" PRIx64 " or size %"
2230 PRIx64 "\n",
2231 __func__, ino, fs->fs_fsmnt, DIP(ip, gen), ip->i_gen,
2232 DIP(ip, blocks), DIP(ip, size));
2233 if ((ip)->i_ump->um_fstype == UFS1)
2234 panic("%s: dirty filesystem?", __func__);
2235 DIP_ASSIGN(ip, blocks, 0);
2236 DIP_ASSIGN(ip, size, 0);
2237 }
2238
2239 /* Set uid / gid. */
2240 if (cred == NOCRED || cred == FSCRED) {
2241 ip->i_gid = 0;
2242 ip->i_uid = 0;
2243 } else {
2244 ip->i_gid = VTOI(dvp)->i_gid;
2245 ip->i_uid = kauth_cred_geteuid(cred);
2246 }
2247 DIP_ASSIGN(ip, gid, ip->i_gid);
2248 DIP_ASSIGN(ip, uid, ip->i_uid);
2249
2250 #if defined(QUOTA) || defined(QUOTA2)
2251 error = UFS_WAPBL_BEGIN(mp);
2252 if (error) {
2253 ffs_deinit_vnode(ump, vp);
2254
2255 return error;
2256 }
2257 error = chkiq(ip, 1, cred, 0);
2258 if (error) {
2259 ffs_vfree(dvp, ino, mode);
2260 UFS_WAPBL_END(mp);
2261 ffs_deinit_vnode(ump, vp);
2262
2263 return error;
2264 }
2265 UFS_WAPBL_END(mp);
2266 #endif
2267
2268 /* Set type and finalize. */
2269 ip->i_flags = 0;
2270 DIP_ASSIGN(ip, flags, 0);
2271 ip->i_mode = mode;
2272 DIP_ASSIGN(ip, mode, mode);
2273 if (vap->va_rdev != VNOVAL) {
2274 /*
2275 * Want to be able to use this to make badblock
2276 * inodes, so don't truncate the dev number.
2277 */
2278 if (ump->um_fstype == UFS1)
2279 ip->i_ffs1_rdev = ufs_rw32(vap->va_rdev,
2280 UFS_MPNEEDSWAP(ump));
2281 else
2282 ip->i_ffs2_rdev = ufs_rw64(vap->va_rdev,
2283 UFS_MPNEEDSWAP(ump));
2284 }
2285 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
2286 ip->i_devvp = ump->um_devvp;
2287 vref(ip->i_devvp);
2288
2289 /* Set up a new generation number for this inode. */
2290 ip->i_gen++;
2291 DIP_ASSIGN(ip, gen, ip->i_gen);
2292 if (fs->fs_magic == FS_UFS2_MAGIC) {
2293 vfs_timestamp(&ts);
2294 ip->i_ffs2_birthtime = ts.tv_sec;
2295 ip->i_ffs2_birthnsec = ts.tv_nsec;
2296 }
2297
2298 uvm_vnp_setsize(vp, ip->i_size);
2299 cache_enter_id(vp, ip->i_mode, ip->i_uid, ip->i_gid, !HAS_ACLS(ip));
2300 *new_key = &ip->i_number;
2301 return 0;
2302 }
2303
2304 /*
2305 * File handle to vnode
2306 *
2307 * Have to be really careful about stale file handles:
2308 * - check that the inode number is valid
2309 * - call ffs_vget() to get the locked inode
2310 * - check for an unallocated inode (i_mode == 0)
2311 * - check that the given client host has export rights and return
2312 * those rights via. exflagsp and credanonp
2313 */
2314 int
2315 ffs_fhtovp(struct mount *mp, struct fid *fhp, int lktype, struct vnode **vpp)
2316 {
2317 struct ufid ufh;
2318 int error;
2319
2320 if (fhp->fid_len != sizeof(struct ufid))
2321 return EINVAL;
2322
2323 memcpy(&ufh, fhp, sizeof(ufh));
2324 if ((error = ffs_checkrange(mp, ufh.ufid_ino)) != 0)
2325 return error;
2326
2327 return (ufs_fhtovp(mp, &ufh, lktype, vpp));
2328 }
2329
2330 /*
2331 * Vnode pointer to File handle
2332 */
2333 /* ARGSUSED */
2334 int
2335 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
2336 {
2337 struct inode *ip;
2338 struct ufid ufh;
2339
2340 if (*fh_size < sizeof(struct ufid)) {
2341 *fh_size = sizeof(struct ufid);
2342 return E2BIG;
2343 }
2344 ip = VTOI(vp);
2345 *fh_size = sizeof(struct ufid);
2346 memset(&ufh, 0, sizeof(ufh));
2347 ufh.ufid_len = sizeof(struct ufid);
2348 ufh.ufid_ino = ip->i_number;
2349 ufh.ufid_gen = ip->i_gen;
2350 memcpy(fhp, &ufh, sizeof(ufh));
2351 return (0);
2352 }
2353
2354 void
2355 ffs_init(void)
2356 {
2357 if (ffs_initcount++ > 0)
2358 return;
2359
2360 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
2361 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
2362 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
2363 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
2364 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
2365 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
2366 ufs_init();
2367 }
2368
2369 void
2370 ffs_reinit(void)
2371 {
2372 ufs_reinit();
2373 }
2374
2375 void
2376 ffs_done(void)
2377 {
2378 if (--ffs_initcount > 0)
2379 return;
2380
2381 ufs_done();
2382 pool_cache_destroy(ffs_dinode2_cache);
2383 pool_cache_destroy(ffs_dinode1_cache);
2384 pool_cache_destroy(ffs_inode_cache);
2385 }
2386
2387 /*
2388 * Write a superblock and associated information back to disk.
2389 */
2390 int
2391 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2392 {
2393 struct fs *fs = mp->um_fs;
2394 struct buf *bp;
2395 int error;
2396 u_int32_t saveflag;
2397
2398 error = ffs_getblk(mp->um_devvp,
2399 fs->fs_sblockloc / DEV_BSIZE, FFS_NOBLK,
2400 fs->fs_sbsize, false, &bp);
2401 if (error)
2402 return error;
2403 saveflag = fs->fs_flags & FS_INTERNAL;
2404 fs->fs_flags &= ~FS_INTERNAL;
2405
2406 memcpy(bp->b_data, fs, fs->fs_sbsize);
2407
2408 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2409 if (mp->um_flags & UFS_EA) {
2410 struct fs *bfs = (struct fs *)bp->b_data;
2411 KASSERT(bfs->fs_magic == FS_UFS2_MAGIC);
2412 bfs->fs_magic = FS_UFS2EA_MAGIC;
2413 }
2414 #ifdef FFS_EI
2415 if (mp->um_flags & UFS_NEEDSWAP)
2416 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2417 #endif
2418 fs->fs_flags |= saveflag;
2419
2420 if (waitfor == MNT_WAIT)
2421 error = bwrite(bp);
2422 else
2423 bawrite(bp);
2424 return (error);
2425 }
2426
2427 int
2428 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2429 {
2430 struct fs *fs = mp->um_fs;
2431 struct buf *bp;
2432 int blks;
2433 void *space;
2434 int i, size, error = 0, allerror = 0;
2435
2436 UFS_WAPBL_JLOCK_ASSERT(mp->um_mountp);
2437
2438 allerror = ffs_sbupdate(mp, waitfor);
2439 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2440 space = fs->fs_csp;
2441 for (i = 0; i < blks; i += fs->fs_frag) {
2442 size = fs->fs_bsize;
2443 if (i + fs->fs_frag > blks)
2444 size = (blks - i) * fs->fs_fsize;
2445 error = ffs_getblk(mp->um_devvp, FFS_FSBTODB(fs, fs->fs_csaddr + i),
2446 FFS_NOBLK, size, false, &bp);
2447 if (error)
2448 break;
2449 #ifdef FFS_EI
2450 if (mp->um_flags & UFS_NEEDSWAP)
2451 ffs_csum_swap((struct csum*)space,
2452 (struct csum*)bp->b_data, size);
2453 else
2454 #endif
2455 memcpy(bp->b_data, space, (u_int)size);
2456 space = (char *)space + size;
2457 if (waitfor == MNT_WAIT)
2458 error = bwrite(bp);
2459 else
2460 bawrite(bp);
2461 }
2462 if (!allerror && error)
2463 allerror = error;
2464 return (allerror);
2465 }
2466
2467 int
2468 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2469 int attrnamespace, const char *attrname)
2470 {
2471 #ifdef UFS_EXTATTR
2472 /*
2473 * File-backed extended attributes are only supported on UFS1.
2474 * UFS2 has native extended attributes.
2475 */
2476 if (VFSTOUFS(mp)->um_fstype == UFS1)
2477 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2478 #endif
2479 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2480 }
2481
2482 /*
2483 * Synch vnode for a mounted file system.
2484 */
2485 static int
2486 ffs_vfs_fsync(vnode_t *vp, int flags)
2487 {
2488 int error, i, pflags;
2489 #ifdef WAPBL
2490 struct mount *mp;
2491 #endif
2492
2493 KASSERT(vp->v_type == VBLK);
2494 KASSERT(spec_node_getmountedfs(vp) != NULL);
2495
2496 /*
2497 * Flush all dirty data associated with the vnode.
2498 */
2499 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2500 if ((flags & FSYNC_WAIT) != 0)
2501 pflags |= PGO_SYNCIO;
2502 rw_enter(vp->v_uobj.vmobjlock, RW_WRITER);
2503 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2504 if (error)
2505 return error;
2506
2507 #ifdef WAPBL
2508 mp = spec_node_getmountedfs(vp);
2509 if (mp && mp->mnt_wapbl) {
2510 /*
2511 * Don't bother writing out metadata if the syncer is
2512 * making the request. We will let the sync vnode
2513 * write it out in a single burst through a call to
2514 * VFS_SYNC().
2515 */
2516 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2517 return 0;
2518
2519 /*
2520 * Don't flush the log if the vnode being flushed
2521 * contains no dirty buffers that could be in the log.
2522 */
2523 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2524 VOP_UNLOCK(vp);
2525 error = wapbl_flush(mp->mnt_wapbl, 0);
2526 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2527 if (error)
2528 return error;
2529 }
2530
2531 if ((flags & FSYNC_WAIT) != 0) {
2532 mutex_enter(vp->v_interlock);
2533 while (vp->v_numoutput)
2534 cv_wait(&vp->v_cv, vp->v_interlock);
2535 mutex_exit(vp->v_interlock);
2536 }
2537
2538 return 0;
2539 }
2540 #endif /* WAPBL */
2541
2542 error = vflushbuf(vp, flags);
2543 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2544 i = 1;
2545 VOP_UNLOCK(vp);
2546 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2547 kauth_cred_get());
2548 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
2549 }
2550
2551 return error;
2552 }
2553