ffs_vfsops.c revision 1.281 1 /* $NetBSD: ffs_vfsops.c,v 1.281 2012/12/20 08:03:44 hannken Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.281 2012/12/20 08:03:44 hannken Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_wapbl.h"
70 #endif
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/proc.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/socket.h>
79 #include <sys/mount.h>
80 #include <sys/buf.h>
81 #include <sys/device.h>
82 #include <sys/disk.h>
83 #include <sys/mbuf.h>
84 #include <sys/file.h>
85 #include <sys/disklabel.h>
86 #include <sys/ioctl.h>
87 #include <sys/errno.h>
88 #include <sys/kmem.h>
89 #include <sys/pool.h>
90 #include <sys/lock.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/wapbl.h>
95 #include <sys/fstrans.h>
96 #include <sys/module.h>
97
98 #include <miscfs/genfs/genfs.h>
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/ufs/quota.h>
102 #include <ufs/ufs/ufsmount.h>
103 #include <ufs/ufs/inode.h>
104 #include <ufs/ufs/dir.h>
105 #include <ufs/ufs/ufs_extern.h>
106 #include <ufs/ufs/ufs_bswap.h>
107 #include <ufs/ufs/ufs_wapbl.h>
108
109 #include <ufs/ffs/fs.h>
110 #include <ufs/ffs/ffs_extern.h>
111
112 MODULE(MODULE_CLASS_VFS, ffs, NULL);
113
114 static int ffs_vfs_fsync(vnode_t *, int);
115
116 static struct sysctllog *ffs_sysctl_log;
117
118 static kauth_listener_t ffs_snapshot_listener;
119
120 /* how many times ffs_init() was called */
121 int ffs_initcount = 0;
122
123 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
124 extern const struct vnodeopv_desc ffs_specop_opv_desc;
125 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
126
127 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
128 &ffs_vnodeop_opv_desc,
129 &ffs_specop_opv_desc,
130 &ffs_fifoop_opv_desc,
131 NULL,
132 };
133
134 struct vfsops ffs_vfsops = {
135 MOUNT_FFS,
136 sizeof (struct ufs_args),
137 ffs_mount,
138 ufs_start,
139 ffs_unmount,
140 ufs_root,
141 ufs_quotactl,
142 ffs_statvfs,
143 ffs_sync,
144 ffs_vget,
145 ffs_fhtovp,
146 ffs_vptofh,
147 ffs_init,
148 ffs_reinit,
149 ffs_done,
150 ffs_mountroot,
151 ffs_snapshot,
152 ffs_extattrctl,
153 ffs_suspendctl,
154 genfs_renamelock_enter,
155 genfs_renamelock_exit,
156 ffs_vfs_fsync,
157 ffs_vnodeopv_descs,
158 0,
159 { NULL, NULL },
160 };
161
162 static const struct genfs_ops ffs_genfsops = {
163 .gop_size = ffs_gop_size,
164 .gop_alloc = ufs_gop_alloc,
165 .gop_write = genfs_gop_write,
166 .gop_markupdate = ufs_gop_markupdate,
167 };
168
169 static const struct ufs_ops ffs_ufsops = {
170 .uo_itimes = ffs_itimes,
171 .uo_update = ffs_update,
172 .uo_truncate = ffs_truncate,
173 .uo_valloc = ffs_valloc,
174 .uo_vfree = ffs_vfree,
175 .uo_balloc = ffs_balloc,
176 .uo_unmark_vnode = (void (*)(vnode_t *))nullop,
177 };
178
179 static int
180 ffs_snapshot_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
181 void *arg0, void *arg1, void *arg2, void *arg3)
182 {
183 vnode_t *vp = arg2;
184 int result = KAUTH_RESULT_DEFER;;
185
186 if (action != KAUTH_SYSTEM_FS_SNAPSHOT)
187 return result;
188
189 if (VTOI(vp)->i_uid == kauth_cred_geteuid(cred))
190 result = KAUTH_RESULT_ALLOW;
191
192 return result;
193 }
194
195 static int
196 ffs_modcmd(modcmd_t cmd, void *arg)
197 {
198 int error;
199
200 #if 0
201 extern int doasyncfree;
202 #endif
203 #ifdef UFS_EXTATTR
204 extern int ufs_extattr_autocreate;
205 #endif
206 extern int ffs_log_changeopt;
207
208 switch (cmd) {
209 case MODULE_CMD_INIT:
210 error = vfs_attach(&ffs_vfsops);
211 if (error != 0)
212 break;
213
214 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
215 CTLFLAG_PERMANENT,
216 CTLTYPE_NODE, "vfs", NULL,
217 NULL, 0, NULL, 0,
218 CTL_VFS, CTL_EOL);
219 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
220 CTLFLAG_PERMANENT,
221 CTLTYPE_NODE, "ffs",
222 SYSCTL_DESCR("Berkeley Fast File System"),
223 NULL, 0, NULL, 0,
224 CTL_VFS, 1, CTL_EOL);
225 /*
226 * @@@ should we even bother with these first three?
227 */
228 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
229 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
230 CTLTYPE_INT, "doclusterread", NULL,
231 sysctl_notavail, 0, NULL, 0,
232 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
233 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
234 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
235 CTLTYPE_INT, "doclusterwrite", NULL,
236 sysctl_notavail, 0, NULL, 0,
237 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
238 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
239 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
240 CTLTYPE_INT, "doreallocblks", NULL,
241 sysctl_notavail, 0, NULL, 0,
242 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
243 #if 0
244 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
245 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
246 CTLTYPE_INT, "doasyncfree",
247 SYSCTL_DESCR("Release dirty blocks asynchronously"),
248 NULL, 0, &doasyncfree, 0,
249 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
250 #endif
251 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
252 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
253 CTLTYPE_INT, "log_changeopt",
254 SYSCTL_DESCR("Log changes in optimization strategy"),
255 NULL, 0, &ffs_log_changeopt, 0,
256 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
257 #ifdef UFS_EXTATTR
258 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
259 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
260 CTLTYPE_INT, "extattr_autocreate",
261 SYSCTL_DESCR("Size of attribute for "
262 "backing file autocreation"),
263 NULL, 0, &ufs_extattr_autocreate, 0,
264 CTL_VFS, 1, FFS_EXTATTR_AUTOCREATE, CTL_EOL);
265
266 #endif /* UFS_EXTATTR */
267
268 ffs_snapshot_listener = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
269 ffs_snapshot_cb, NULL);
270 if (ffs_snapshot_listener == NULL)
271 printf("ffs_modcmd: can't listen on system scope.\n");
272
273 break;
274 case MODULE_CMD_FINI:
275 error = vfs_detach(&ffs_vfsops);
276 if (error != 0)
277 break;
278 sysctl_teardown(&ffs_sysctl_log);
279 if (ffs_snapshot_listener != NULL)
280 kauth_unlisten_scope(ffs_snapshot_listener);
281 break;
282 default:
283 error = ENOTTY;
284 break;
285 }
286
287 return (error);
288 }
289
290 pool_cache_t ffs_inode_cache;
291 pool_cache_t ffs_dinode1_cache;
292 pool_cache_t ffs_dinode2_cache;
293
294 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
295 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
296
297 /*
298 * Called by main() when ffs is going to be mounted as root.
299 */
300
301 int
302 ffs_mountroot(void)
303 {
304 struct fs *fs;
305 struct mount *mp;
306 struct lwp *l = curlwp; /* XXX */
307 struct ufsmount *ump;
308 int error;
309
310 if (device_class(root_device) != DV_DISK)
311 return (ENODEV);
312
313 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
314 vrele(rootvp);
315 return (error);
316 }
317
318 /*
319 * We always need to be able to mount the root file system.
320 */
321 mp->mnt_flag |= MNT_FORCE;
322 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
323 vfs_unbusy(mp, false, NULL);
324 vfs_destroy(mp);
325 return (error);
326 }
327 mp->mnt_flag &= ~MNT_FORCE;
328 mutex_enter(&mountlist_lock);
329 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
330 mutex_exit(&mountlist_lock);
331 ump = VFSTOUFS(mp);
332 fs = ump->um_fs;
333 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
334 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
335 (void)ffs_statvfs(mp, &mp->mnt_stat);
336 vfs_unbusy(mp, false, NULL);
337 setrootfstime((time_t)fs->fs_time);
338 return (0);
339 }
340
341 /*
342 * VFS Operations.
343 *
344 * mount system call
345 */
346 int
347 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
348 {
349 struct lwp *l = curlwp;
350 struct vnode *devvp = NULL;
351 struct ufs_args *args = data;
352 struct ufsmount *ump = NULL;
353 struct fs *fs;
354 int error = 0, flags, update;
355 mode_t accessmode;
356
357 if (*data_len < sizeof *args)
358 return EINVAL;
359
360 if (mp->mnt_flag & MNT_GETARGS) {
361 ump = VFSTOUFS(mp);
362 if (ump == NULL)
363 return EIO;
364 args->fspec = NULL;
365 *data_len = sizeof *args;
366 return 0;
367 }
368
369 update = mp->mnt_flag & MNT_UPDATE;
370
371 /* Check arguments */
372 if (args->fspec != NULL) {
373 /*
374 * Look up the name and verify that it's sane.
375 */
376 error = namei_simple_user(args->fspec,
377 NSM_FOLLOW_NOEMULROOT, &devvp);
378 if (error != 0)
379 return (error);
380
381 if (!update) {
382 /*
383 * Be sure this is a valid block device
384 */
385 if (devvp->v_type != VBLK)
386 error = ENOTBLK;
387 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
388 error = ENXIO;
389 } else {
390 /*
391 * Be sure we're still naming the same device
392 * used for our initial mount
393 */
394 ump = VFSTOUFS(mp);
395 if (devvp != ump->um_devvp) {
396 if (devvp->v_rdev != ump->um_devvp->v_rdev)
397 error = EINVAL;
398 else {
399 vrele(devvp);
400 devvp = ump->um_devvp;
401 vref(devvp);
402 }
403 }
404 }
405 } else {
406 if (!update) {
407 /* New mounts must have a filename for the device */
408 return (EINVAL);
409 } else {
410 /* Use the extant mount */
411 ump = VFSTOUFS(mp);
412 devvp = ump->um_devvp;
413 vref(devvp);
414 }
415 }
416
417 /*
418 * If mount by non-root, then verify that user has necessary
419 * permissions on the device.
420 *
421 * Permission to update a mount is checked higher, so here we presume
422 * updating the mount is okay (for example, as far as securelevel goes)
423 * which leaves us with the normal check.
424 */
425 if (error == 0) {
426 accessmode = VREAD;
427 if (update ?
428 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
429 (mp->mnt_flag & MNT_RDONLY) == 0)
430 accessmode |= VWRITE;
431 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
432 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
433 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
434 KAUTH_ARG(accessmode));
435 VOP_UNLOCK(devvp);
436 }
437
438 if (error) {
439 vrele(devvp);
440 return (error);
441 }
442
443 #ifdef WAPBL
444 /* WAPBL can only be enabled on a r/w mount. */
445 if ((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) {
446 mp->mnt_flag &= ~MNT_LOG;
447 }
448 #else /* !WAPBL */
449 mp->mnt_flag &= ~MNT_LOG;
450 #endif /* !WAPBL */
451
452 if (!update) {
453 int xflags;
454
455 if (mp->mnt_flag & MNT_RDONLY)
456 xflags = FREAD;
457 else
458 xflags = FREAD | FWRITE;
459 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
460 error = VOP_OPEN(devvp, xflags, FSCRED);
461 VOP_UNLOCK(devvp);
462 if (error)
463 goto fail;
464 error = ffs_mountfs(devvp, mp, l);
465 if (error) {
466 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
467 (void)VOP_CLOSE(devvp, xflags, NOCRED);
468 VOP_UNLOCK(devvp);
469 goto fail;
470 }
471
472 ump = VFSTOUFS(mp);
473 fs = ump->um_fs;
474 } else {
475 /*
476 * Update the mount.
477 */
478
479 /*
480 * The initial mount got a reference on this
481 * device, so drop the one obtained via
482 * namei(), above.
483 */
484 vrele(devvp);
485
486 ump = VFSTOUFS(mp);
487 fs = ump->um_fs;
488 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
489 /*
490 * Changing from r/w to r/o
491 */
492 flags = WRITECLOSE;
493 if (mp->mnt_flag & MNT_FORCE)
494 flags |= FORCECLOSE;
495 error = ffs_flushfiles(mp, flags, l);
496 if (error == 0)
497 error = UFS_WAPBL_BEGIN(mp);
498 if (error == 0 &&
499 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
500 fs->fs_clean & FS_WASCLEAN) {
501 if (mp->mnt_flag & MNT_SOFTDEP)
502 fs->fs_flags &= ~FS_DOSOFTDEP;
503 fs->fs_clean = FS_ISCLEAN;
504 (void) ffs_sbupdate(ump, MNT_WAIT);
505 }
506 if (error == 0)
507 UFS_WAPBL_END(mp);
508 if (error)
509 return (error);
510 }
511
512 #ifdef WAPBL
513 if ((mp->mnt_flag & MNT_LOG) == 0) {
514 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
515 if (error)
516 return error;
517 }
518 #endif /* WAPBL */
519
520 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
521 /*
522 * Finish change from r/w to r/o
523 */
524 fs->fs_ronly = 1;
525 fs->fs_fmod = 0;
526 }
527
528 if (mp->mnt_flag & MNT_RELOAD) {
529 error = ffs_reload(mp, l->l_cred, l);
530 if (error)
531 return (error);
532 }
533
534 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
535 /*
536 * Changing from read-only to read/write
537 */
538 #ifndef QUOTA2
539 if (fs->fs_flags & FS_DOQUOTA2) {
540 ump->um_flags |= UFS_QUOTA2;
541 uprintf("%s: options QUOTA2 not enabled%s\n",
542 mp->mnt_stat.f_mntonname,
543 (mp->mnt_flag & MNT_FORCE) ? "" :
544 ", not mounting");
545 return EINVAL;
546 }
547 #endif
548 fs->fs_ronly = 0;
549 fs->fs_clean <<= 1;
550 fs->fs_fmod = 1;
551 #ifdef WAPBL
552 if (fs->fs_flags & FS_DOWAPBL) {
553 printf("%s: replaying log to disk\n",
554 mp->mnt_stat.f_mntonname);
555 KDASSERT(mp->mnt_wapbl_replay);
556 error = wapbl_replay_write(mp->mnt_wapbl_replay,
557 devvp);
558 if (error) {
559 return error;
560 }
561 wapbl_replay_stop(mp->mnt_wapbl_replay);
562 fs->fs_clean = FS_WASCLEAN;
563 }
564 #endif /* WAPBL */
565 if (fs->fs_snapinum[0] != 0)
566 ffs_snapshot_mount(mp);
567 }
568
569 #ifdef WAPBL
570 error = ffs_wapbl_start(mp);
571 if (error)
572 return error;
573 #endif /* WAPBL */
574
575 #ifdef QUOTA2
576 if (!fs->fs_ronly) {
577 error = ffs_quota2_mount(mp);
578 if (error) {
579 return error;
580 }
581 }
582 #endif
583
584 if ((mp->mnt_flag & MNT_DISCARD) && !(ump->um_discarddata))
585 ump->um_discarddata = ffs_discard_init(devvp, fs);
586
587 if (args->fspec == NULL)
588 return 0;
589 }
590
591 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
592 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
593 if (error == 0)
594 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
595 sizeof(fs->fs_fsmnt));
596 fs->fs_flags &= ~FS_DOSOFTDEP;
597 if (fs->fs_fmod != 0) { /* XXX */
598 int err;
599
600 fs->fs_fmod = 0;
601 if (fs->fs_clean & FS_WASCLEAN)
602 fs->fs_time = time_second;
603 else {
604 printf("%s: file system not clean (fs_clean=%#x); "
605 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
606 fs->fs_clean);
607 printf("%s: lost blocks %" PRId64 " files %d\n",
608 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
609 fs->fs_pendinginodes);
610 }
611 err = UFS_WAPBL_BEGIN(mp);
612 if (err == 0) {
613 (void) ffs_cgupdate(ump, MNT_WAIT);
614 UFS_WAPBL_END(mp);
615 }
616 }
617 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
618 printf("%s: `-o softdep' is no longer supported, "
619 "consider `-o log'\n", mp->mnt_stat.f_mntfromname);
620 mp->mnt_flag &= ~MNT_SOFTDEP;
621 }
622
623 return (error);
624
625 fail:
626 vrele(devvp);
627 return (error);
628 }
629
630 /*
631 * Reload all incore data for a filesystem (used after running fsck on
632 * the root filesystem and finding things to fix). The filesystem must
633 * be mounted read-only.
634 *
635 * Things to do to update the mount:
636 * 1) invalidate all cached meta-data.
637 * 2) re-read superblock from disk.
638 * 3) re-read summary information from disk.
639 * 4) invalidate all inactive vnodes.
640 * 5) invalidate all cached file data.
641 * 6) re-read inode data for all active vnodes.
642 */
643 int
644 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
645 {
646 struct vnode *vp, *mvp, *devvp;
647 struct inode *ip;
648 void *space;
649 struct buf *bp;
650 struct fs *fs, *newfs;
651 struct dkwedge_info dkw;
652 int i, bsize, blks, error;
653 int32_t *lp;
654 struct ufsmount *ump;
655 daddr_t sblockloc;
656
657 if ((mp->mnt_flag & MNT_RDONLY) == 0)
658 return (EINVAL);
659
660 ump = VFSTOUFS(mp);
661 /*
662 * Step 1: invalidate all cached meta-data.
663 */
664 devvp = ump->um_devvp;
665 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
666 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
667 VOP_UNLOCK(devvp);
668 if (error)
669 panic("ffs_reload: dirty1");
670 /*
671 * Step 2: re-read superblock from disk.
672 */
673 fs = ump->um_fs;
674
675 /* XXX we don't handle possibility that superblock moved. */
676 error = bread(devvp, fs->fs_sblockloc / DEV_BSIZE, fs->fs_sbsize,
677 NOCRED, 0, &bp);
678 if (error) {
679 return (error);
680 }
681 newfs = kmem_alloc(fs->fs_sbsize, KM_SLEEP);
682 memcpy(newfs, bp->b_data, fs->fs_sbsize);
683 #ifdef FFS_EI
684 if (ump->um_flags & UFS_NEEDSWAP) {
685 ffs_sb_swap((struct fs*)bp->b_data, newfs);
686 fs->fs_flags |= FS_SWAPPED;
687 } else
688 #endif
689 fs->fs_flags &= ~FS_SWAPPED;
690 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
691 newfs->fs_magic != FS_UFS2_MAGIC)||
692 newfs->fs_bsize > MAXBSIZE ||
693 newfs->fs_bsize < sizeof(struct fs)) {
694 brelse(bp, 0);
695 kmem_free(newfs, fs->fs_sbsize);
696 return (EIO); /* XXX needs translation */
697 }
698 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
699 sblockloc = fs->fs_sblockloc;
700 /*
701 * Copy pointer fields back into superblock before copying in XXX
702 * new superblock. These should really be in the ufsmount. XXX
703 * Note that important parameters (eg fs_ncg) are unchanged.
704 */
705 newfs->fs_csp = fs->fs_csp;
706 newfs->fs_maxcluster = fs->fs_maxcluster;
707 newfs->fs_contigdirs = fs->fs_contigdirs;
708 newfs->fs_ronly = fs->fs_ronly;
709 newfs->fs_active = fs->fs_active;
710 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
711 brelse(bp, 0);
712 kmem_free(newfs, fs->fs_sbsize);
713
714 /* Recheck for apple UFS filesystem */
715 ump->um_flags &= ~UFS_ISAPPLEUFS;
716 /* First check to see if this is tagged as an Apple UFS filesystem
717 * in the disklabel
718 */
719 if (getdiskinfo(devvp, &dkw) == 0 &&
720 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
721 ump->um_flags |= UFS_ISAPPLEUFS;
722 #ifdef APPLE_UFS
723 else {
724 /* Manually look for an apple ufs label, and if a valid one
725 * is found, then treat it like an Apple UFS filesystem anyway
726 *
727 * EINVAL is most probably a blocksize or alignment problem,
728 * it is unlikely that this is an Apple UFS filesystem then.
729 */
730 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / DEV_BSIZE),
731 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
732 if (error && error != EINVAL) {
733 return (error);
734 }
735 if (error == 0) {
736 error = ffs_appleufs_validate(fs->fs_fsmnt,
737 (struct appleufslabel *)bp->b_data, NULL);
738 if (error == 0)
739 ump->um_flags |= UFS_ISAPPLEUFS;
740 brelse(bp, 0);
741 }
742 bp = NULL;
743 }
744 #else
745 if (ump->um_flags & UFS_ISAPPLEUFS)
746 return (EIO);
747 #endif
748
749 if (UFS_MPISAPPLEUFS(ump)) {
750 /* see comment about NeXT below */
751 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
752 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
753 mp->mnt_iflag |= IMNT_DTYPE;
754 } else {
755 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
756 ump->um_dirblksiz = DIRBLKSIZ;
757 if (ump->um_maxsymlinklen > 0)
758 mp->mnt_iflag |= IMNT_DTYPE;
759 else
760 mp->mnt_iflag &= ~IMNT_DTYPE;
761 }
762 ffs_oldfscompat_read(fs, ump, sblockloc);
763
764 mutex_enter(&ump->um_lock);
765 ump->um_maxfilesize = fs->fs_maxfilesize;
766 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
767 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
768 mp->mnt_stat.f_mntonname, fs->fs_flags,
769 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
770 if ((mp->mnt_flag & MNT_FORCE) == 0) {
771 mutex_exit(&ump->um_lock);
772 return (EINVAL);
773 }
774 }
775 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
776 fs->fs_pendingblocks = 0;
777 fs->fs_pendinginodes = 0;
778 }
779 mutex_exit(&ump->um_lock);
780
781 ffs_statvfs(mp, &mp->mnt_stat);
782 /*
783 * Step 3: re-read summary information from disk.
784 */
785 blks = howmany(fs->fs_cssize, fs->fs_fsize);
786 space = fs->fs_csp;
787 for (i = 0; i < blks; i += fs->fs_frag) {
788 bsize = fs->fs_bsize;
789 if (i + fs->fs_frag > blks)
790 bsize = (blks - i) * fs->fs_fsize;
791 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), bsize,
792 NOCRED, 0, &bp);
793 if (error) {
794 return (error);
795 }
796 #ifdef FFS_EI
797 if (UFS_FSNEEDSWAP(fs))
798 ffs_csum_swap((struct csum *)bp->b_data,
799 (struct csum *)space, bsize);
800 else
801 #endif
802 memcpy(space, bp->b_data, (size_t)bsize);
803 space = (char *)space + bsize;
804 brelse(bp, 0);
805 }
806 if (fs->fs_snapinum[0] != 0)
807 ffs_snapshot_mount(mp);
808 /*
809 * We no longer know anything about clusters per cylinder group.
810 */
811 if (fs->fs_contigsumsize > 0) {
812 lp = fs->fs_maxcluster;
813 for (i = 0; i < fs->fs_ncg; i++)
814 *lp++ = fs->fs_contigsumsize;
815 }
816
817 /* Allocate a marker vnode. */
818 mvp = vnalloc(mp);
819 /*
820 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
821 * and vclean() can be called indirectly
822 */
823 mutex_enter(&mntvnode_lock);
824 loop:
825 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
826 vmark(mvp, vp);
827 if (vp->v_mount != mp || vismarker(vp))
828 continue;
829 /*
830 * Step 4: invalidate all inactive vnodes.
831 */
832 if (vrecycle(vp, &mntvnode_lock, l)) {
833 mutex_enter(&mntvnode_lock);
834 (void)vunmark(mvp);
835 goto loop;
836 }
837 /*
838 * Step 5: invalidate all cached file data.
839 */
840 mutex_enter(vp->v_interlock);
841 mutex_exit(&mntvnode_lock);
842 if (vget(vp, LK_EXCLUSIVE)) {
843 (void)vunmark(mvp);
844 goto loop;
845 }
846 if (vinvalbuf(vp, 0, cred, l, 0, 0))
847 panic("ffs_reload: dirty2");
848 /*
849 * Step 6: re-read inode data for all active vnodes.
850 */
851 ip = VTOI(vp);
852 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
853 (int)fs->fs_bsize, NOCRED, 0, &bp);
854 if (error) {
855 vput(vp);
856 (void)vunmark(mvp);
857 break;
858 }
859 ffs_load_inode(bp, ip, fs, ip->i_number);
860 brelse(bp, 0);
861 vput(vp);
862 mutex_enter(&mntvnode_lock);
863 }
864 mutex_exit(&mntvnode_lock);
865 vnfree(mvp);
866 return (error);
867 }
868
869 /*
870 * Possible superblock locations ordered from most to least likely.
871 */
872 static const int sblock_try[] = SBLOCKSEARCH;
873
874 /*
875 * Common code for mount and mountroot
876 */
877 int
878 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
879 {
880 struct ufsmount *ump;
881 struct buf *bp;
882 struct fs *fs;
883 dev_t dev;
884 struct dkwedge_info dkw;
885 void *space;
886 daddr_t sblockloc, fsblockloc;
887 int blks, fstype;
888 int error, i, bsize, ronly, bset = 0;
889 #ifdef FFS_EI
890 int needswap = 0; /* keep gcc happy */
891 #endif
892 int32_t *lp;
893 kauth_cred_t cred;
894 u_int32_t sbsize = 8192; /* keep gcc happy*/
895 u_int32_t allocsbsize;
896 int32_t fsbsize;
897
898 dev = devvp->v_rdev;
899 cred = l ? l->l_cred : NOCRED;
900
901 /* Flush out any old buffers remaining from a previous use. */
902 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
903 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
904 VOP_UNLOCK(devvp);
905 if (error)
906 return (error);
907
908 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
909
910 bp = NULL;
911 ump = NULL;
912 fs = NULL;
913 sblockloc = 0;
914 fstype = 0;
915
916 error = fstrans_mount(mp);
917 if (error)
918 return error;
919
920 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
921 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
922 error = ffs_snapshot_init(ump);
923 if (error)
924 goto out;
925 ump->um_ops = &ffs_ufsops;
926
927 #ifdef WAPBL
928 sbagain:
929 #endif
930 /*
931 * Try reading the superblock in each of its possible locations.
932 */
933 for (i = 0; ; i++) {
934 if (bp != NULL) {
935 brelse(bp, BC_NOCACHE);
936 bp = NULL;
937 }
938 if (sblock_try[i] == -1) {
939 error = EINVAL;
940 fs = NULL;
941 goto out;
942 }
943 error = bread(devvp, sblock_try[i] / DEV_BSIZE, SBLOCKSIZE, cred,
944 0, &bp);
945 if (error) {
946 fs = NULL;
947 goto out;
948 }
949 fs = (struct fs*)bp->b_data;
950 fsblockloc = sblockloc = sblock_try[i];
951 if (fs->fs_magic == FS_UFS1_MAGIC) {
952 sbsize = fs->fs_sbsize;
953 fstype = UFS1;
954 fsbsize = fs->fs_bsize;
955 #ifdef FFS_EI
956 needswap = 0;
957 } else if (fs->fs_magic == FS_UFS1_MAGIC_SWAPPED) {
958 sbsize = bswap32(fs->fs_sbsize);
959 fstype = UFS1;
960 fsbsize = bswap32(fs->fs_bsize);
961 needswap = 1;
962 #endif
963 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
964 sbsize = fs->fs_sbsize;
965 fstype = UFS2;
966 fsbsize = fs->fs_bsize;
967 #ifdef FFS_EI
968 needswap = 0;
969 } else if (fs->fs_magic == FS_UFS2_MAGIC_SWAPPED) {
970 sbsize = bswap32(fs->fs_sbsize);
971 fstype = UFS2;
972 fsbsize = bswap32(fs->fs_bsize);
973 needswap = 1;
974 #endif
975 } else
976 continue;
977
978
979 /* fs->fs_sblockloc isn't defined for old filesystems */
980 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
981 if (sblockloc == SBLOCK_UFS2)
982 /*
983 * This is likely to be the first alternate
984 * in a filesystem with 64k blocks.
985 * Don't use it.
986 */
987 continue;
988 fsblockloc = sblockloc;
989 } else {
990 fsblockloc = fs->fs_sblockloc;
991 #ifdef FFS_EI
992 if (needswap)
993 fsblockloc = bswap64(fsblockloc);
994 #endif
995 }
996
997 /* Check we haven't found an alternate superblock */
998 if (fsblockloc != sblockloc)
999 continue;
1000
1001 /* Validate size of superblock */
1002 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
1003 continue;
1004
1005 /* Check that we can handle the file system blocksize */
1006 if (fsbsize > MAXBSIZE) {
1007 printf("ffs_mountfs: block size (%d) > MAXBSIZE (%d)\n",
1008 fsbsize, MAXBSIZE);
1009 continue;
1010 }
1011
1012 /* Ok seems to be a good superblock */
1013 break;
1014 }
1015
1016 fs = kmem_alloc((u_long)sbsize, KM_SLEEP);
1017 memcpy(fs, bp->b_data, sbsize);
1018 ump->um_fs = fs;
1019
1020 #ifdef FFS_EI
1021 if (needswap) {
1022 ffs_sb_swap((struct fs*)bp->b_data, fs);
1023 fs->fs_flags |= FS_SWAPPED;
1024 } else
1025 #endif
1026 fs->fs_flags &= ~FS_SWAPPED;
1027
1028 #ifdef WAPBL
1029 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1030 error = ffs_wapbl_replay_start(mp, fs, devvp);
1031 if (error && (mp->mnt_flag & MNT_FORCE) == 0)
1032 goto out;
1033 if (!error) {
1034 if (!ronly) {
1035 /* XXX fsmnt may be stale. */
1036 printf("%s: replaying log to disk\n",
1037 fs->fs_fsmnt);
1038 error = wapbl_replay_write(mp->mnt_wapbl_replay,
1039 devvp);
1040 if (error)
1041 goto out;
1042 wapbl_replay_stop(mp->mnt_wapbl_replay);
1043 fs->fs_clean = FS_WASCLEAN;
1044 } else {
1045 /* XXX fsmnt may be stale */
1046 printf("%s: replaying log to memory\n",
1047 fs->fs_fsmnt);
1048 }
1049
1050 /* Force a re-read of the superblock */
1051 brelse(bp, BC_INVAL);
1052 bp = NULL;
1053 kmem_free(fs, sbsize);
1054 fs = NULL;
1055 goto sbagain;
1056 }
1057 }
1058 #else /* !WAPBL */
1059 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1060 error = EPERM;
1061 goto out;
1062 }
1063 #endif /* !WAPBL */
1064
1065 ffs_oldfscompat_read(fs, ump, sblockloc);
1066 ump->um_maxfilesize = fs->fs_maxfilesize;
1067
1068 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1069 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1070 mp->mnt_stat.f_mntonname, fs->fs_flags,
1071 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1072 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1073 error = EINVAL;
1074 goto out;
1075 }
1076 }
1077
1078 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1079 fs->fs_pendingblocks = 0;
1080 fs->fs_pendinginodes = 0;
1081 }
1082
1083 ump->um_fstype = fstype;
1084 if (fs->fs_sbsize < SBLOCKSIZE)
1085 brelse(bp, BC_INVAL);
1086 else
1087 brelse(bp, 0);
1088 bp = NULL;
1089
1090 /* First check to see if this is tagged as an Apple UFS filesystem
1091 * in the disklabel
1092 */
1093 if (getdiskinfo(devvp, &dkw) == 0 &&
1094 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
1095 ump->um_flags |= UFS_ISAPPLEUFS;
1096 #ifdef APPLE_UFS
1097 else {
1098 /* Manually look for an apple ufs label, and if a valid one
1099 * is found, then treat it like an Apple UFS filesystem anyway
1100 */
1101 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / DEV_BSIZE),
1102 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
1103 if (error)
1104 goto out;
1105 error = ffs_appleufs_validate(fs->fs_fsmnt,
1106 (struct appleufslabel *)bp->b_data, NULL);
1107 if (error == 0) {
1108 ump->um_flags |= UFS_ISAPPLEUFS;
1109 }
1110 brelse(bp, 0);
1111 bp = NULL;
1112 }
1113 #else
1114 if (ump->um_flags & UFS_ISAPPLEUFS) {
1115 error = EINVAL;
1116 goto out;
1117 }
1118 #endif
1119
1120 #if 0
1121 /*
1122 * XXX This code changes the behaviour of mounting dirty filesystems, to
1123 * XXX require "mount -f ..." to mount them. This doesn't match what
1124 * XXX mount(8) describes and is disabled for now.
1125 */
1126 /*
1127 * If the file system is not clean, don't allow it to be mounted
1128 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1129 * for the root file system.)
1130 */
1131 if (fs->fs_flags & FS_DOWAPBL) {
1132 /*
1133 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1134 * bit is set, although there's a window in unmount where it
1135 * could be FS_ISCLEAN
1136 */
1137 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1138 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1139 error = EPERM;
1140 goto out;
1141 }
1142 } else
1143 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1144 (mp->mnt_flag & MNT_FORCE) == 0) {
1145 error = EPERM;
1146 goto out;
1147 }
1148 #endif
1149
1150 /*
1151 * verify that we can access the last block in the fs
1152 * if we're mounting read/write.
1153 */
1154
1155 if (!ronly) {
1156 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
1157 cred, 0, &bp);
1158 if (bp->b_bcount != fs->fs_fsize)
1159 error = EINVAL;
1160 if (error) {
1161 bset = BC_INVAL;
1162 goto out;
1163 }
1164 brelse(bp, BC_INVAL);
1165 bp = NULL;
1166 }
1167
1168 fs->fs_ronly = ronly;
1169 /* Don't bump fs_clean if we're replaying journal */
1170 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN)))
1171 if (ronly == 0) {
1172 fs->fs_clean <<= 1;
1173 fs->fs_fmod = 1;
1174 }
1175 bsize = fs->fs_cssize;
1176 blks = howmany(bsize, fs->fs_fsize);
1177 if (fs->fs_contigsumsize > 0)
1178 bsize += fs->fs_ncg * sizeof(int32_t);
1179 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1180 allocsbsize = bsize;
1181 space = kmem_alloc((u_long)allocsbsize, KM_SLEEP);
1182 fs->fs_csp = space;
1183 for (i = 0; i < blks; i += fs->fs_frag) {
1184 bsize = fs->fs_bsize;
1185 if (i + fs->fs_frag > blks)
1186 bsize = (blks - i) * fs->fs_fsize;
1187 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), bsize,
1188 cred, 0, &bp);
1189 if (error) {
1190 kmem_free(fs->fs_csp, allocsbsize);
1191 goto out;
1192 }
1193 #ifdef FFS_EI
1194 if (needswap)
1195 ffs_csum_swap((struct csum *)bp->b_data,
1196 (struct csum *)space, bsize);
1197 else
1198 #endif
1199 memcpy(space, bp->b_data, (u_int)bsize);
1200
1201 space = (char *)space + bsize;
1202 brelse(bp, 0);
1203 bp = NULL;
1204 }
1205 if (fs->fs_contigsumsize > 0) {
1206 fs->fs_maxcluster = lp = space;
1207 for (i = 0; i < fs->fs_ncg; i++)
1208 *lp++ = fs->fs_contigsumsize;
1209 space = lp;
1210 }
1211 bsize = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1212 fs->fs_contigdirs = space;
1213 space = (char *)space + bsize;
1214 memset(fs->fs_contigdirs, 0, bsize);
1215 /* Compatibility for old filesystems - XXX */
1216 if (fs->fs_avgfilesize <= 0)
1217 fs->fs_avgfilesize = AVFILESIZ;
1218 if (fs->fs_avgfpdir <= 0)
1219 fs->fs_avgfpdir = AFPDIR;
1220 fs->fs_active = NULL;
1221 mp->mnt_data = ump;
1222 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1223 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1224 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1225 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1226 if (UFS_MPISAPPLEUFS(ump)) {
1227 /* NeXT used to keep short symlinks in the inode even
1228 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1229 * is probably -1, but we still need to be able to identify
1230 * short symlinks.
1231 */
1232 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1233 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1234 mp->mnt_iflag |= IMNT_DTYPE;
1235 } else {
1236 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1237 ump->um_dirblksiz = DIRBLKSIZ;
1238 if (ump->um_maxsymlinklen > 0)
1239 mp->mnt_iflag |= IMNT_DTYPE;
1240 else
1241 mp->mnt_iflag &= ~IMNT_DTYPE;
1242 }
1243 mp->mnt_fs_bshift = fs->fs_bshift;
1244 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1245 mp->mnt_flag |= MNT_LOCAL;
1246 mp->mnt_iflag |= IMNT_MPSAFE;
1247 #ifdef FFS_EI
1248 if (needswap)
1249 ump->um_flags |= UFS_NEEDSWAP;
1250 #endif
1251 ump->um_mountp = mp;
1252 ump->um_dev = dev;
1253 ump->um_devvp = devvp;
1254 ump->um_nindir = fs->fs_nindir;
1255 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1256 ump->um_bptrtodb = fs->fs_fshift - DEV_BSHIFT;
1257 ump->um_seqinc = fs->fs_frag;
1258 for (i = 0; i < MAXQUOTAS; i++)
1259 ump->um_quotas[i] = NULLVP;
1260 devvp->v_specmountpoint = mp;
1261 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1262 ffs_snapshot_mount(mp);
1263 #ifdef WAPBL
1264 if (!ronly) {
1265 KDASSERT(fs->fs_ronly == 0);
1266 /*
1267 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1268 * needs to create a new log file in-filesystem.
1269 */
1270 ffs_statvfs(mp, &mp->mnt_stat);
1271
1272 error = ffs_wapbl_start(mp);
1273 if (error) {
1274 kmem_free(fs->fs_csp, allocsbsize);
1275 goto out;
1276 }
1277 }
1278 #endif /* WAPBL */
1279 if (ronly == 0) {
1280 #ifdef QUOTA2
1281 error = ffs_quota2_mount(mp);
1282 if (error) {
1283 kmem_free(fs->fs_csp, allocsbsize);
1284 goto out;
1285 }
1286 #else
1287 if (fs->fs_flags & FS_DOQUOTA2) {
1288 ump->um_flags |= UFS_QUOTA2;
1289 uprintf("%s: options QUOTA2 not enabled%s\n",
1290 mp->mnt_stat.f_mntonname,
1291 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1292 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1293 error = EINVAL;
1294 kmem_free(fs->fs_csp, allocsbsize);
1295 goto out;
1296 }
1297 }
1298 #endif
1299 }
1300 #ifdef UFS_EXTATTR
1301 /*
1302 * Initialize file-backed extended attributes on UFS1 file
1303 * systems.
1304 */
1305 if (ump->um_fstype == UFS1)
1306 ufs_extattr_uepm_init(&ump->um_extattr);
1307 #endif /* UFS_EXTATTR */
1308
1309 if (mp->mnt_flag & MNT_DISCARD)
1310 ump->um_discarddata = ffs_discard_init(devvp, fs);
1311
1312 return (0);
1313 out:
1314 #ifdef WAPBL
1315 if (mp->mnt_wapbl_replay) {
1316 wapbl_replay_stop(mp->mnt_wapbl_replay);
1317 wapbl_replay_free(mp->mnt_wapbl_replay);
1318 mp->mnt_wapbl_replay = 0;
1319 }
1320 #endif
1321
1322 fstrans_unmount(mp);
1323 if (fs)
1324 kmem_free(fs, fs->fs_sbsize);
1325 devvp->v_specmountpoint = NULL;
1326 if (bp)
1327 brelse(bp, bset);
1328 if (ump) {
1329 if (ump->um_oldfscompat)
1330 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1331 mutex_destroy(&ump->um_lock);
1332 kmem_free(ump, sizeof(*ump));
1333 mp->mnt_data = NULL;
1334 }
1335 return (error);
1336 }
1337
1338 /*
1339 * Sanity checks for loading old filesystem superblocks.
1340 * See ffs_oldfscompat_write below for unwound actions.
1341 *
1342 * XXX - Parts get retired eventually.
1343 * Unfortunately new bits get added.
1344 */
1345 static void
1346 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1347 {
1348 off_t maxfilesize;
1349 int32_t *extrasave;
1350
1351 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1352 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1353 return;
1354
1355 if (!ump->um_oldfscompat)
1356 ump->um_oldfscompat = kmem_alloc(512 + 3*sizeof(int32_t),
1357 KM_SLEEP);
1358
1359 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1360 extrasave = ump->um_oldfscompat;
1361 extrasave += 512/sizeof(int32_t);
1362 extrasave[0] = fs->fs_old_npsect;
1363 extrasave[1] = fs->fs_old_interleave;
1364 extrasave[2] = fs->fs_old_trackskew;
1365
1366 /* These fields will be overwritten by their
1367 * original values in fs_oldfscompat_write, so it is harmless
1368 * to modify them here.
1369 */
1370 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1371 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1372 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1373 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1374
1375 fs->fs_maxbsize = fs->fs_bsize;
1376 fs->fs_time = fs->fs_old_time;
1377 fs->fs_size = fs->fs_old_size;
1378 fs->fs_dsize = fs->fs_old_dsize;
1379 fs->fs_csaddr = fs->fs_old_csaddr;
1380 fs->fs_sblockloc = sblockloc;
1381
1382 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1383
1384 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1385 fs->fs_old_nrpos = 8;
1386 fs->fs_old_npsect = fs->fs_old_nsect;
1387 fs->fs_old_interleave = 1;
1388 fs->fs_old_trackskew = 0;
1389 }
1390
1391 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1392 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1393 fs->fs_qbmask = ~fs->fs_bmask;
1394 fs->fs_qfmask = ~fs->fs_fmask;
1395 }
1396
1397 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1398 if (fs->fs_maxfilesize > maxfilesize)
1399 fs->fs_maxfilesize = maxfilesize;
1400
1401 /* Compatibility for old filesystems */
1402 if (fs->fs_avgfilesize <= 0)
1403 fs->fs_avgfilesize = AVFILESIZ;
1404 if (fs->fs_avgfpdir <= 0)
1405 fs->fs_avgfpdir = AFPDIR;
1406
1407 #if 0
1408 if (bigcgs) {
1409 fs->fs_save_cgsize = fs->fs_cgsize;
1410 fs->fs_cgsize = fs->fs_bsize;
1411 }
1412 #endif
1413 }
1414
1415 /*
1416 * Unwinding superblock updates for old filesystems.
1417 * See ffs_oldfscompat_read above for details.
1418 *
1419 * XXX - Parts get retired eventually.
1420 * Unfortunately new bits get added.
1421 */
1422 static void
1423 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1424 {
1425 int32_t *extrasave;
1426
1427 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1428 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1429 return;
1430
1431 fs->fs_old_time = fs->fs_time;
1432 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1433 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1434 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1435 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1436 fs->fs_old_flags = fs->fs_flags;
1437
1438 #if 0
1439 if (bigcgs) {
1440 fs->fs_cgsize = fs->fs_save_cgsize;
1441 }
1442 #endif
1443
1444 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1445 extrasave = ump->um_oldfscompat;
1446 extrasave += 512/sizeof(int32_t);
1447 fs->fs_old_npsect = extrasave[0];
1448 fs->fs_old_interleave = extrasave[1];
1449 fs->fs_old_trackskew = extrasave[2];
1450
1451 }
1452
1453 /*
1454 * unmount vfs operation
1455 */
1456 int
1457 ffs_unmount(struct mount *mp, int mntflags)
1458 {
1459 struct lwp *l = curlwp;
1460 struct ufsmount *ump = VFSTOUFS(mp);
1461 struct fs *fs = ump->um_fs;
1462 int error, flags;
1463 u_int32_t bsize;
1464 #ifdef WAPBL
1465 extern int doforce;
1466 #endif
1467
1468 if (ump->um_discarddata) {
1469 ffs_discard_finish(ump->um_discarddata, mntflags);
1470 ump->um_discarddata = NULL;
1471 }
1472
1473 flags = 0;
1474 if (mntflags & MNT_FORCE)
1475 flags |= FORCECLOSE;
1476 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1477 return (error);
1478 error = UFS_WAPBL_BEGIN(mp);
1479 if (error == 0)
1480 if (fs->fs_ronly == 0 &&
1481 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1482 fs->fs_clean & FS_WASCLEAN) {
1483 fs->fs_clean = FS_ISCLEAN;
1484 fs->fs_fmod = 0;
1485 (void) ffs_sbupdate(ump, MNT_WAIT);
1486 }
1487 if (error == 0)
1488 UFS_WAPBL_END(mp);
1489 #ifdef WAPBL
1490 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1491 if (mp->mnt_wapbl_replay) {
1492 KDASSERT(fs->fs_ronly);
1493 wapbl_replay_stop(mp->mnt_wapbl_replay);
1494 wapbl_replay_free(mp->mnt_wapbl_replay);
1495 mp->mnt_wapbl_replay = 0;
1496 }
1497 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1498 if (error) {
1499 return error;
1500 }
1501 #endif /* WAPBL */
1502
1503 if (ump->um_devvp->v_type != VBAD)
1504 ump->um_devvp->v_specmountpoint = NULL;
1505 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1506 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1507 NOCRED);
1508 vput(ump->um_devvp);
1509
1510 bsize = fs->fs_cssize;
1511 if (fs->fs_contigsumsize > 0)
1512 bsize += fs->fs_ncg * sizeof(int32_t);
1513 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1514 kmem_free(fs->fs_csp, bsize);
1515
1516 kmem_free(fs, fs->fs_sbsize);
1517 if (ump->um_oldfscompat != NULL)
1518 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1519 mutex_destroy(&ump->um_lock);
1520 ffs_snapshot_fini(ump);
1521 kmem_free(ump, sizeof(*ump));
1522 mp->mnt_data = NULL;
1523 mp->mnt_flag &= ~MNT_LOCAL;
1524 fstrans_unmount(mp);
1525 return (0);
1526 }
1527
1528 /*
1529 * Flush out all the files in a filesystem.
1530 */
1531 int
1532 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1533 {
1534 extern int doforce;
1535 struct ufsmount *ump;
1536 int error;
1537
1538 if (!doforce)
1539 flags &= ~FORCECLOSE;
1540 ump = VFSTOUFS(mp);
1541 #ifdef QUOTA
1542 if ((error = quota1_umount(mp, flags)) != 0)
1543 return (error);
1544 #endif
1545 #ifdef QUOTA2
1546 if ((error = quota2_umount(mp, flags)) != 0)
1547 return (error);
1548 #endif
1549 #ifdef UFS_EXTATTR
1550 if (ump->um_fstype == UFS1) {
1551 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)
1552 ufs_extattr_stop(mp, l);
1553 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_INITIALIZED)
1554 ufs_extattr_uepm_destroy(&ump->um_extattr);
1555 }
1556 #endif
1557 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1558 return (error);
1559 ffs_snapshot_unmount(mp);
1560 /*
1561 * Flush all the files.
1562 */
1563 error = vflush(mp, NULLVP, flags);
1564 if (error)
1565 return (error);
1566 /*
1567 * Flush filesystem metadata.
1568 */
1569 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1570 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1571 VOP_UNLOCK(ump->um_devvp);
1572 if (flags & FORCECLOSE) /* XXXDBJ */
1573 error = 0;
1574
1575 #ifdef WAPBL
1576 if (error)
1577 return error;
1578 if (mp->mnt_wapbl) {
1579 error = wapbl_flush(mp->mnt_wapbl, 1);
1580 if (flags & FORCECLOSE)
1581 error = 0;
1582 }
1583 #endif
1584
1585 return (error);
1586 }
1587
1588 /*
1589 * Get file system statistics.
1590 */
1591 int
1592 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1593 {
1594 struct ufsmount *ump;
1595 struct fs *fs;
1596
1597 ump = VFSTOUFS(mp);
1598 fs = ump->um_fs;
1599 mutex_enter(&ump->um_lock);
1600 sbp->f_bsize = fs->fs_bsize;
1601 sbp->f_frsize = fs->fs_fsize;
1602 sbp->f_iosize = fs->fs_bsize;
1603 sbp->f_blocks = fs->fs_dsize;
1604 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1605 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1606 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1607 fs->fs_minfree) / (u_int64_t) 100;
1608 if (sbp->f_bfree > sbp->f_bresvd)
1609 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1610 else
1611 sbp->f_bavail = 0;
1612 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1613 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1614 sbp->f_favail = sbp->f_ffree;
1615 sbp->f_fresvd = 0;
1616 mutex_exit(&ump->um_lock);
1617 copy_statvfs_info(sbp, mp);
1618
1619 return (0);
1620 }
1621
1622 /*
1623 * Go through the disk queues to initiate sandbagged IO;
1624 * go through the inodes to write those that have been modified;
1625 * initiate the writing of the super block if it has been modified.
1626 *
1627 * Note: we are always called with the filesystem marked `MPBUSY'.
1628 */
1629 int
1630 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1631 {
1632 struct vnode *vp, *mvp, *nvp;
1633 struct inode *ip;
1634 struct ufsmount *ump = VFSTOUFS(mp);
1635 struct fs *fs;
1636 int error, allerror = 0;
1637 bool is_suspending;
1638
1639 fs = ump->um_fs;
1640 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1641 printf("fs = %s\n", fs->fs_fsmnt);
1642 panic("update: rofs mod");
1643 }
1644
1645 /* Allocate a marker vnode. */
1646 mvp = vnalloc(mp);
1647
1648 fstrans_start(mp, FSTRANS_SHARED);
1649 is_suspending = (fstrans_getstate(mp) == FSTRANS_SUSPENDING);
1650 /*
1651 * Write back each (modified) inode.
1652 */
1653 mutex_enter(&mntvnode_lock);
1654 loop:
1655 /*
1656 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1657 * and vclean() can be called indirectly
1658 */
1659 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1660 nvp = TAILQ_NEXT(vp, v_mntvnodes);
1661 /*
1662 * If the vnode that we are about to sync is no longer
1663 * associated with this mount point, start over.
1664 */
1665 if (vp->v_mount != mp)
1666 goto loop;
1667 /*
1668 * Don't interfere with concurrent scans of this FS.
1669 */
1670 if (vismarker(vp))
1671 continue;
1672 mutex_enter(vp->v_interlock);
1673 ip = VTOI(vp);
1674
1675 /*
1676 * Skip the vnode/inode if inaccessible.
1677 */
1678 if (ip == NULL || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0 ||
1679 vp->v_type == VNON) {
1680 mutex_exit(vp->v_interlock);
1681 continue;
1682 }
1683
1684 /*
1685 * We deliberately update inode times here. This will
1686 * prevent a massive queue of updates accumulating, only
1687 * to be handled by a call to unmount.
1688 *
1689 * XXX It would be better to have the syncer trickle these
1690 * out. Adjustment needed to allow registering vnodes for
1691 * sync when the vnode is clean, but the inode dirty. Or
1692 * have ufs itself trickle out inode updates.
1693 *
1694 * If doing a lazy sync, we don't care about metadata or
1695 * data updates, because they are handled by each vnode's
1696 * synclist entry. In this case we are only interested in
1697 * writing back modified inodes.
1698 */
1699 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE |
1700 IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1701 (waitfor == MNT_LAZY || (LIST_EMPTY(&vp->v_dirtyblkhd) &&
1702 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))) {
1703 mutex_exit(vp->v_interlock);
1704 continue;
1705 }
1706 if (vp->v_type == VBLK && is_suspending) {
1707 mutex_exit(vp->v_interlock);
1708 continue;
1709 }
1710 vmark(mvp, vp);
1711 mutex_exit(&mntvnode_lock);
1712 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT);
1713 if (error) {
1714 mutex_enter(&mntvnode_lock);
1715 nvp = vunmark(mvp);
1716 if (error == ENOENT) {
1717 goto loop;
1718 }
1719 continue;
1720 }
1721 if (waitfor == MNT_LAZY) {
1722 error = UFS_WAPBL_BEGIN(vp->v_mount);
1723 if (!error) {
1724 error = ffs_update(vp, NULL, NULL,
1725 UPDATE_CLOSE);
1726 UFS_WAPBL_END(vp->v_mount);
1727 }
1728 } else {
1729 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
1730 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
1731 }
1732 if (error)
1733 allerror = error;
1734 vput(vp);
1735 mutex_enter(&mntvnode_lock);
1736 nvp = vunmark(mvp);
1737 }
1738 mutex_exit(&mntvnode_lock);
1739 /*
1740 * Force stale file system control information to be flushed.
1741 */
1742 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1743 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1744 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1745 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1746 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
1747 0, 0)) != 0)
1748 allerror = error;
1749 VOP_UNLOCK(ump->um_devvp);
1750 if (allerror == 0 && waitfor == MNT_WAIT && !mp->mnt_wapbl) {
1751 mutex_enter(&mntvnode_lock);
1752 goto loop;
1753 }
1754 }
1755 #if defined(QUOTA) || defined(QUOTA2)
1756 qsync(mp);
1757 #endif
1758 /*
1759 * Write back modified superblock.
1760 */
1761 if (fs->fs_fmod != 0) {
1762 fs->fs_fmod = 0;
1763 fs->fs_time = time_second;
1764 error = UFS_WAPBL_BEGIN(mp);
1765 if (error)
1766 allerror = error;
1767 else {
1768 if ((error = ffs_cgupdate(ump, waitfor)))
1769 allerror = error;
1770 UFS_WAPBL_END(mp);
1771 }
1772 }
1773
1774 #ifdef WAPBL
1775 if (mp->mnt_wapbl) {
1776 error = wapbl_flush(mp->mnt_wapbl, 0);
1777 if (error)
1778 allerror = error;
1779 }
1780 #endif
1781
1782 fstrans_done(mp);
1783 vnfree(mvp);
1784 return (allerror);
1785 }
1786
1787 /*
1788 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1789 * in from disk. If it is in core, wait for the lock bit to clear, then
1790 * return the inode locked. Detection and handling of mount points must be
1791 * done by the calling routine.
1792 */
1793 int
1794 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1795 {
1796 struct fs *fs;
1797 struct inode *ip;
1798 struct ufsmount *ump;
1799 struct buf *bp;
1800 struct vnode *vp;
1801 dev_t dev;
1802 int error;
1803
1804 ump = VFSTOUFS(mp);
1805 dev = ump->um_dev;
1806
1807 retry:
1808 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1809 return (0);
1810
1811 /* Allocate a new vnode/inode. */
1812 error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, NULL, &vp);
1813 if (error) {
1814 *vpp = NULL;
1815 return (error);
1816 }
1817 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1818
1819 /*
1820 * If someone beat us to it, put back the freshly allocated
1821 * vnode/inode pair and retry.
1822 */
1823 mutex_enter(&ufs_hashlock);
1824 if (ufs_ihashget(dev, ino, 0) != NULL) {
1825 mutex_exit(&ufs_hashlock);
1826 ungetnewvnode(vp);
1827 pool_cache_put(ffs_inode_cache, ip);
1828 goto retry;
1829 }
1830
1831 vp->v_vflag |= VV_LOCKSWORK;
1832
1833 /*
1834 * XXX MFS ends up here, too, to allocate an inode. Should we
1835 * XXX create another pool for MFS inodes?
1836 */
1837
1838 memset(ip, 0, sizeof(struct inode));
1839 vp->v_data = ip;
1840 ip->i_vnode = vp;
1841 ip->i_ump = ump;
1842 ip->i_fs = fs = ump->um_fs;
1843 ip->i_dev = dev;
1844 ip->i_number = ino;
1845 #if defined(QUOTA) || defined(QUOTA2)
1846 ufsquota_init(ip);
1847 #endif
1848
1849 /*
1850 * Initialize genfs node, we might proceed to destroy it in
1851 * error branches.
1852 */
1853 genfs_node_init(vp, &ffs_genfsops);
1854
1855 /*
1856 * Put it onto its hash chain and lock it so that other requests for
1857 * this inode will block if they arrive while we are sleeping waiting
1858 * for old data structures to be purged or for the contents of the
1859 * disk portion of this inode to be read.
1860 */
1861
1862 ufs_ihashins(ip);
1863 mutex_exit(&ufs_hashlock);
1864
1865 /* Read in the disk contents for the inode, copy into the inode. */
1866 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1867 (int)fs->fs_bsize, NOCRED, 0, &bp);
1868 if (error) {
1869
1870 /*
1871 * The inode does not contain anything useful, so it would
1872 * be misleading to leave it on its hash chain. With mode
1873 * still zero, it will be unlinked and returned to the free
1874 * list by vput().
1875 */
1876
1877 vput(vp);
1878 *vpp = NULL;
1879 return (error);
1880 }
1881 if (ip->i_ump->um_fstype == UFS1)
1882 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1883 PR_WAITOK);
1884 else
1885 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
1886 PR_WAITOK);
1887 ffs_load_inode(bp, ip, fs, ino);
1888 brelse(bp, 0);
1889
1890 /*
1891 * Initialize the vnode from the inode, check for aliases.
1892 * Note that the underlying vnode may have changed.
1893 */
1894
1895 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1896
1897 /*
1898 * Finish inode initialization now that aliasing has been resolved.
1899 */
1900
1901 ip->i_devvp = ump->um_devvp;
1902 vref(ip->i_devvp);
1903
1904 /*
1905 * Ensure that uid and gid are correct. This is a temporary
1906 * fix until fsck has been changed to do the update.
1907 */
1908
1909 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1910 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1911 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1912 } /* XXX */
1913 uvm_vnp_setsize(vp, ip->i_size);
1914 *vpp = vp;
1915 return (0);
1916 }
1917
1918 /*
1919 * File handle to vnode
1920 *
1921 * Have to be really careful about stale file handles:
1922 * - check that the inode number is valid
1923 * - call ffs_vget() to get the locked inode
1924 * - check for an unallocated inode (i_mode == 0)
1925 * - check that the given client host has export rights and return
1926 * those rights via. exflagsp and credanonp
1927 */
1928 int
1929 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1930 {
1931 struct ufid ufh;
1932 struct fs *fs;
1933
1934 if (fhp->fid_len != sizeof(struct ufid))
1935 return EINVAL;
1936
1937 memcpy(&ufh, fhp, sizeof(ufh));
1938 fs = VFSTOUFS(mp)->um_fs;
1939 if (ufh.ufid_ino < ROOTINO ||
1940 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1941 return (ESTALE);
1942 return (ufs_fhtovp(mp, &ufh, vpp));
1943 }
1944
1945 /*
1946 * Vnode pointer to File handle
1947 */
1948 /* ARGSUSED */
1949 int
1950 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1951 {
1952 struct inode *ip;
1953 struct ufid ufh;
1954
1955 if (*fh_size < sizeof(struct ufid)) {
1956 *fh_size = sizeof(struct ufid);
1957 return E2BIG;
1958 }
1959 ip = VTOI(vp);
1960 *fh_size = sizeof(struct ufid);
1961 memset(&ufh, 0, sizeof(ufh));
1962 ufh.ufid_len = sizeof(struct ufid);
1963 ufh.ufid_ino = ip->i_number;
1964 ufh.ufid_gen = ip->i_gen;
1965 memcpy(fhp, &ufh, sizeof(ufh));
1966 return (0);
1967 }
1968
1969 void
1970 ffs_init(void)
1971 {
1972 if (ffs_initcount++ > 0)
1973 return;
1974
1975 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
1976 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
1977 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
1978 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
1979 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
1980 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
1981 ufs_init();
1982 }
1983
1984 void
1985 ffs_reinit(void)
1986 {
1987
1988 ufs_reinit();
1989 }
1990
1991 void
1992 ffs_done(void)
1993 {
1994 if (--ffs_initcount > 0)
1995 return;
1996
1997 ufs_done();
1998 pool_cache_destroy(ffs_dinode2_cache);
1999 pool_cache_destroy(ffs_dinode1_cache);
2000 pool_cache_destroy(ffs_inode_cache);
2001 }
2002
2003 /*
2004 * Write a superblock and associated information back to disk.
2005 */
2006 int
2007 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2008 {
2009 struct fs *fs = mp->um_fs;
2010 struct buf *bp;
2011 int error = 0;
2012 u_int32_t saveflag;
2013
2014 error = ffs_getblk(mp->um_devvp,
2015 fs->fs_sblockloc / DEV_BSIZE, FFS_NOBLK,
2016 fs->fs_sbsize, false, &bp);
2017 if (error)
2018 return error;
2019 saveflag = fs->fs_flags & FS_INTERNAL;
2020 fs->fs_flags &= ~FS_INTERNAL;
2021
2022 memcpy(bp->b_data, fs, fs->fs_sbsize);
2023
2024 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2025 #ifdef FFS_EI
2026 if (mp->um_flags & UFS_NEEDSWAP)
2027 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2028 #endif
2029 fs->fs_flags |= saveflag;
2030
2031 if (waitfor == MNT_WAIT)
2032 error = bwrite(bp);
2033 else
2034 bawrite(bp);
2035 return (error);
2036 }
2037
2038 int
2039 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2040 {
2041 struct fs *fs = mp->um_fs;
2042 struct buf *bp;
2043 int blks;
2044 void *space;
2045 int i, size, error = 0, allerror = 0;
2046
2047 allerror = ffs_sbupdate(mp, waitfor);
2048 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2049 space = fs->fs_csp;
2050 for (i = 0; i < blks; i += fs->fs_frag) {
2051 size = fs->fs_bsize;
2052 if (i + fs->fs_frag > blks)
2053 size = (blks - i) * fs->fs_fsize;
2054 error = ffs_getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
2055 FFS_NOBLK, size, false, &bp);
2056 if (error)
2057 break;
2058 #ifdef FFS_EI
2059 if (mp->um_flags & UFS_NEEDSWAP)
2060 ffs_csum_swap((struct csum*)space,
2061 (struct csum*)bp->b_data, size);
2062 else
2063 #endif
2064 memcpy(bp->b_data, space, (u_int)size);
2065 space = (char *)space + size;
2066 if (waitfor == MNT_WAIT)
2067 error = bwrite(bp);
2068 else
2069 bawrite(bp);
2070 }
2071 if (!allerror && error)
2072 allerror = error;
2073 return (allerror);
2074 }
2075
2076 int
2077 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2078 int attrnamespace, const char *attrname)
2079 {
2080 #ifdef UFS_EXTATTR
2081 /*
2082 * File-backed extended attributes are only supported on UFS1.
2083 * UFS2 has native extended attributes.
2084 */
2085 if (VFSTOUFS(mp)->um_fstype == UFS1)
2086 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2087 #endif
2088 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2089 }
2090
2091 int
2092 ffs_suspendctl(struct mount *mp, int cmd)
2093 {
2094 int error;
2095 struct lwp *l = curlwp;
2096
2097 switch (cmd) {
2098 case SUSPEND_SUSPEND:
2099 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
2100 return error;
2101 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
2102 if (error == 0)
2103 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
2104 #ifdef WAPBL
2105 if (error == 0 && mp->mnt_wapbl)
2106 error = wapbl_flush(mp->mnt_wapbl, 1);
2107 #endif
2108 if (error != 0) {
2109 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
2110 return error;
2111 }
2112 return 0;
2113
2114 case SUSPEND_RESUME:
2115 return fstrans_setstate(mp, FSTRANS_NORMAL);
2116
2117 default:
2118 return EINVAL;
2119 }
2120 }
2121
2122 /*
2123 * Synch vnode for a mounted file system.
2124 */
2125 static int
2126 ffs_vfs_fsync(vnode_t *vp, int flags)
2127 {
2128 int error, i, pflags;
2129 #ifdef WAPBL
2130 struct mount *mp;
2131 #endif
2132
2133 KASSERT(vp->v_type == VBLK);
2134 KASSERT(vp->v_specmountpoint != NULL);
2135
2136 /*
2137 * Flush all dirty data associated with the vnode.
2138 */
2139 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2140 if ((flags & FSYNC_WAIT) != 0)
2141 pflags |= PGO_SYNCIO;
2142 mutex_enter(vp->v_interlock);
2143 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2144 if (error)
2145 return error;
2146
2147 #ifdef WAPBL
2148 mp = vp->v_specmountpoint;
2149 if (mp && mp->mnt_wapbl) {
2150 /*
2151 * Don't bother writing out metadata if the syncer is
2152 * making the request. We will let the sync vnode
2153 * write it out in a single burst through a call to
2154 * VFS_SYNC().
2155 */
2156 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2157 return 0;
2158
2159 /*
2160 * Don't flush the log if the vnode being flushed
2161 * contains no dirty buffers that could be in the log.
2162 */
2163 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2164 error = wapbl_flush(mp->mnt_wapbl, 0);
2165 if (error)
2166 return error;
2167 }
2168
2169 if ((flags & FSYNC_WAIT) != 0) {
2170 mutex_enter(vp->v_interlock);
2171 while (vp->v_numoutput)
2172 cv_wait(&vp->v_cv, vp->v_interlock);
2173 mutex_exit(vp->v_interlock);
2174 }
2175
2176 return 0;
2177 }
2178 #endif /* WAPBL */
2179
2180 error = vflushbuf(vp, flags);
2181 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2182 i = 1;
2183 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2184 kauth_cred_get());
2185 }
2186
2187 return error;
2188 }
2189