ffs_vfsops.c revision 1.269.2.3 1 /* $NetBSD: ffs_vfsops.c,v 1.269.2.3 2012/10/30 17:23:00 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.269.2.3 2012/10/30 17:23:00 yamt Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_wapbl.h"
70 #endif
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/namei.h>
75 #include <sys/proc.h>
76 #include <sys/kernel.h>
77 #include <sys/vnode.h>
78 #include <sys/socket.h>
79 #include <sys/mount.h>
80 #include <sys/buf.h>
81 #include <sys/device.h>
82 #include <sys/disk.h>
83 #include <sys/mbuf.h>
84 #include <sys/file.h>
85 #include <sys/disklabel.h>
86 #include <sys/ioctl.h>
87 #include <sys/errno.h>
88 #include <sys/kmem.h>
89 #include <sys/pool.h>
90 #include <sys/lock.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/wapbl.h>
95 #include <sys/fstrans.h>
96 #include <sys/module.h>
97
98 #include <miscfs/genfs/genfs.h>
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/ufs/quota.h>
102 #include <ufs/ufs/ufsmount.h>
103 #include <ufs/ufs/inode.h>
104 #include <ufs/ufs/dir.h>
105 #include <ufs/ufs/ufs_extern.h>
106 #include <ufs/ufs/ufs_bswap.h>
107 #include <ufs/ufs/ufs_wapbl.h>
108
109 #include <ufs/ffs/fs.h>
110 #include <ufs/ffs/ffs_extern.h>
111
112 MODULE(MODULE_CLASS_VFS, ffs, NULL);
113
114 static int ffs_vfs_fsync(vnode_t *, int);
115
116 static struct sysctllog *ffs_sysctl_log;
117
118 static kauth_listener_t ffs_snapshot_listener;
119
120 /* how many times ffs_init() was called */
121 int ffs_initcount = 0;
122
123 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
124 extern const struct vnodeopv_desc ffs_specop_opv_desc;
125 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
126
127 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
128 &ffs_vnodeop_opv_desc,
129 &ffs_specop_opv_desc,
130 &ffs_fifoop_opv_desc,
131 NULL,
132 };
133
134 struct vfsops ffs_vfsops = {
135 MOUNT_FFS,
136 sizeof (struct ufs_args),
137 ffs_mount,
138 ufs_start,
139 ffs_unmount,
140 ufs_root,
141 ufs_quotactl,
142 ffs_statvfs,
143 ffs_sync,
144 ffs_vget,
145 ffs_fhtovp,
146 ffs_vptofh,
147 ffs_init,
148 ffs_reinit,
149 ffs_done,
150 ffs_mountroot,
151 ffs_snapshot,
152 ffs_extattrctl,
153 ffs_suspendctl,
154 genfs_renamelock_enter,
155 genfs_renamelock_exit,
156 ffs_vfs_fsync,
157 ffs_vnodeopv_descs,
158 0,
159 { NULL, NULL },
160 };
161
162 static const struct genfs_ops ffs_genfsops = {
163 .gop_size = ffs_gop_size,
164 .gop_alloc = ufs_gop_alloc,
165 .gop_write = genfs_gop_write,
166 .gop_markupdate = ufs_gop_markupdate,
167 };
168
169 static const struct ufs_ops ffs_ufsops = {
170 .uo_itimes = ffs_itimes,
171 .uo_update = ffs_update,
172 .uo_truncate = ffs_truncate,
173 .uo_valloc = ffs_valloc,
174 .uo_vfree = ffs_vfree,
175 .uo_balloc = ffs_balloc,
176 .uo_unmark_vnode = (void (*)(vnode_t *))nullop,
177 };
178
179 static int
180 ffs_snapshot_cb(kauth_cred_t cred, kauth_action_t action, void *cookie,
181 void *arg0, void *arg1, void *arg2, void *arg3)
182 {
183 vnode_t *vp = arg2;
184 int result = KAUTH_RESULT_DEFER;;
185
186 if (action != KAUTH_SYSTEM_FS_SNAPSHOT)
187 return result;
188
189 if (VTOI(vp)->i_uid == kauth_cred_geteuid(cred))
190 result = KAUTH_RESULT_ALLOW;
191
192 return result;
193 }
194
195 static int
196 ffs_modcmd(modcmd_t cmd, void *arg)
197 {
198 int error;
199
200 #if 0
201 extern int doasyncfree;
202 #endif
203 #ifdef UFS_EXTATTR
204 extern int ufs_extattr_autocreate;
205 #endif
206 extern int ffs_log_changeopt;
207
208 switch (cmd) {
209 case MODULE_CMD_INIT:
210 error = vfs_attach(&ffs_vfsops);
211 if (error != 0)
212 break;
213
214 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
215 CTLFLAG_PERMANENT,
216 CTLTYPE_NODE, "vfs", NULL,
217 NULL, 0, NULL, 0,
218 CTL_VFS, CTL_EOL);
219 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
220 CTLFLAG_PERMANENT,
221 CTLTYPE_NODE, "ffs",
222 SYSCTL_DESCR("Berkeley Fast File System"),
223 NULL, 0, NULL, 0,
224 CTL_VFS, 1, CTL_EOL);
225 /*
226 * @@@ should we even bother with these first three?
227 */
228 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
229 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
230 CTLTYPE_INT, "doclusterread", NULL,
231 sysctl_notavail, 0, NULL, 0,
232 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
233 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
234 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
235 CTLTYPE_INT, "doclusterwrite", NULL,
236 sysctl_notavail, 0, NULL, 0,
237 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
238 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
239 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
240 CTLTYPE_INT, "doreallocblks", NULL,
241 sysctl_notavail, 0, NULL, 0,
242 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
243 #if 0
244 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
245 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
246 CTLTYPE_INT, "doasyncfree",
247 SYSCTL_DESCR("Release dirty blocks asynchronously"),
248 NULL, 0, &doasyncfree, 0,
249 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
250 #endif
251 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
252 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
253 CTLTYPE_INT, "log_changeopt",
254 SYSCTL_DESCR("Log changes in optimization strategy"),
255 NULL, 0, &ffs_log_changeopt, 0,
256 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
257 #ifdef UFS_EXTATTR
258 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
259 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
260 CTLTYPE_INT, "extattr_autocreate",
261 SYSCTL_DESCR("Size of attribute for "
262 "backing file autocreation"),
263 NULL, 0, &ufs_extattr_autocreate, 0,
264 CTL_VFS, 1, FFS_EXTATTR_AUTOCREATE, CTL_EOL);
265
266 #endif /* UFS_EXTATTR */
267
268 ffs_snapshot_listener = kauth_listen_scope(KAUTH_SCOPE_SYSTEM,
269 ffs_snapshot_cb, NULL);
270 if (ffs_snapshot_listener == NULL)
271 printf("ffs_modcmd: can't listen on system scope.\n");
272
273 break;
274 case MODULE_CMD_FINI:
275 error = vfs_detach(&ffs_vfsops);
276 if (error != 0)
277 break;
278 sysctl_teardown(&ffs_sysctl_log);
279 if (ffs_snapshot_listener != NULL)
280 kauth_unlisten_scope(ffs_snapshot_listener);
281 break;
282 default:
283 error = ENOTTY;
284 break;
285 }
286
287 return (error);
288 }
289
290 pool_cache_t ffs_inode_cache;
291 pool_cache_t ffs_dinode1_cache;
292 pool_cache_t ffs_dinode2_cache;
293
294 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
295 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
296
297 /*
298 * Called by main() when ffs is going to be mounted as root.
299 */
300
301 int
302 ffs_mountroot(void)
303 {
304 struct fs *fs;
305 struct mount *mp;
306 struct lwp *l = curlwp; /* XXX */
307 struct ufsmount *ump;
308 int error;
309
310 if (device_class(root_device) != DV_DISK)
311 return (ENODEV);
312
313 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
314 vrele(rootvp);
315 return (error);
316 }
317
318 /*
319 * We always need to be able to mount the root file system.
320 */
321 mp->mnt_flag |= MNT_FORCE;
322 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
323 vfs_unbusy(mp, false, NULL);
324 vfs_destroy(mp);
325 return (error);
326 }
327 mp->mnt_flag &= ~MNT_FORCE;
328 mutex_enter(&mountlist_lock);
329 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
330 mutex_exit(&mountlist_lock);
331 ump = VFSTOUFS(mp);
332 fs = ump->um_fs;
333 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
334 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
335 (void)ffs_statvfs(mp, &mp->mnt_stat);
336 vfs_unbusy(mp, false, NULL);
337 setrootfstime((time_t)fs->fs_time);
338 return (0);
339 }
340
341 /*
342 * VFS Operations.
343 *
344 * mount system call
345 */
346 int
347 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
348 {
349 struct lwp *l = curlwp;
350 struct vnode *devvp = NULL;
351 struct ufs_args *args = data;
352 struct ufsmount *ump = NULL;
353 struct fs *fs;
354 int error = 0, flags, update;
355 mode_t accessmode;
356
357 if (*data_len < sizeof *args)
358 return EINVAL;
359
360 if (mp->mnt_flag & MNT_GETARGS) {
361 ump = VFSTOUFS(mp);
362 if (ump == NULL)
363 return EIO;
364 args->fspec = NULL;
365 *data_len = sizeof *args;
366 return 0;
367 }
368
369 update = mp->mnt_flag & MNT_UPDATE;
370
371 /* Check arguments */
372 if (args->fspec != NULL) {
373 /*
374 * Look up the name and verify that it's sane.
375 */
376 error = namei_simple_user(args->fspec,
377 NSM_FOLLOW_NOEMULROOT, &devvp);
378 if (error != 0)
379 return (error);
380
381 if (!update) {
382 /*
383 * Be sure this is a valid block device
384 */
385 if (devvp->v_type != VBLK)
386 error = ENOTBLK;
387 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
388 error = ENXIO;
389 } else {
390 /*
391 * Be sure we're still naming the same device
392 * used for our initial mount
393 */
394 ump = VFSTOUFS(mp);
395 if (devvp != ump->um_devvp) {
396 if (devvp->v_rdev != ump->um_devvp->v_rdev)
397 error = EINVAL;
398 else {
399 vrele(devvp);
400 devvp = ump->um_devvp;
401 vref(devvp);
402 }
403 }
404 }
405 } else {
406 if (!update) {
407 /* New mounts must have a filename for the device */
408 return (EINVAL);
409 } else {
410 /* Use the extant mount */
411 ump = VFSTOUFS(mp);
412 devvp = ump->um_devvp;
413 vref(devvp);
414 }
415 }
416
417 /*
418 * If mount by non-root, then verify that user has necessary
419 * permissions on the device.
420 *
421 * Permission to update a mount is checked higher, so here we presume
422 * updating the mount is okay (for example, as far as securelevel goes)
423 * which leaves us with the normal check.
424 */
425 if (error == 0) {
426 accessmode = VREAD;
427 if (update ?
428 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
429 (mp->mnt_flag & MNT_RDONLY) == 0)
430 accessmode |= VWRITE;
431 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
432 error = kauth_authorize_system(l->l_cred, KAUTH_SYSTEM_MOUNT,
433 KAUTH_REQ_SYSTEM_MOUNT_DEVICE, mp, devvp,
434 KAUTH_ARG(accessmode));
435 VOP_UNLOCK(devvp);
436 }
437
438 if (error) {
439 vrele(devvp);
440 return (error);
441 }
442
443 #ifdef WAPBL
444 /* WAPBL can only be enabled on a r/w mount. */
445 if ((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) {
446 mp->mnt_flag &= ~MNT_LOG;
447 }
448 #else /* !WAPBL */
449 mp->mnt_flag &= ~MNT_LOG;
450 #endif /* !WAPBL */
451
452 if (!update) {
453 int xflags;
454
455 if (mp->mnt_flag & MNT_RDONLY)
456 xflags = FREAD;
457 else
458 xflags = FREAD | FWRITE;
459 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
460 error = VOP_OPEN(devvp, xflags, FSCRED);
461 VOP_UNLOCK(devvp);
462 if (error)
463 goto fail;
464 error = ffs_mountfs(devvp, mp, l);
465 if (error) {
466 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
467 (void)VOP_CLOSE(devvp, xflags, NOCRED);
468 VOP_UNLOCK(devvp);
469 goto fail;
470 }
471
472 ump = VFSTOUFS(mp);
473 fs = ump->um_fs;
474 } else {
475 /*
476 * Update the mount.
477 */
478
479 /*
480 * The initial mount got a reference on this
481 * device, so drop the one obtained via
482 * namei(), above.
483 */
484 vrele(devvp);
485
486 ump = VFSTOUFS(mp);
487 fs = ump->um_fs;
488 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
489 /*
490 * Changing from r/w to r/o
491 */
492 flags = WRITECLOSE;
493 if (mp->mnt_flag & MNT_FORCE)
494 flags |= FORCECLOSE;
495 error = ffs_flushfiles(mp, flags, l);
496 if (error == 0)
497 error = UFS_WAPBL_BEGIN(mp);
498 if (error == 0 &&
499 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
500 fs->fs_clean & FS_WASCLEAN) {
501 if (mp->mnt_flag & MNT_SOFTDEP)
502 fs->fs_flags &= ~FS_DOSOFTDEP;
503 fs->fs_clean = FS_ISCLEAN;
504 (void) ffs_sbupdate(ump, MNT_WAIT);
505 }
506 if (error == 0)
507 UFS_WAPBL_END(mp);
508 if (error)
509 return (error);
510 }
511
512 #ifdef WAPBL
513 if ((mp->mnt_flag & MNT_LOG) == 0) {
514 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
515 if (error)
516 return error;
517 }
518 #endif /* WAPBL */
519
520 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
521 /*
522 * Finish change from r/w to r/o
523 */
524 fs->fs_ronly = 1;
525 fs->fs_fmod = 0;
526 }
527
528 if (mp->mnt_flag & MNT_RELOAD) {
529 error = ffs_reload(mp, l->l_cred, l);
530 if (error)
531 return (error);
532 }
533
534 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
535 /*
536 * Changing from read-only to read/write
537 */
538 #ifndef QUOTA2
539 if (fs->fs_flags & FS_DOQUOTA2) {
540 ump->um_flags |= UFS_QUOTA2;
541 uprintf("%s: options QUOTA2 not enabled%s\n",
542 mp->mnt_stat.f_mntonname,
543 (mp->mnt_flag & MNT_FORCE) ? "" :
544 ", not mounting");
545 return EINVAL;
546 }
547 #endif
548 fs->fs_ronly = 0;
549 fs->fs_clean <<= 1;
550 fs->fs_fmod = 1;
551 #ifdef WAPBL
552 if (fs->fs_flags & FS_DOWAPBL) {
553 printf("%s: replaying log to disk\n",
554 mp->mnt_stat.f_mntonname);
555 KDASSERT(mp->mnt_wapbl_replay);
556 error = wapbl_replay_write(mp->mnt_wapbl_replay,
557 devvp);
558 if (error) {
559 return error;
560 }
561 wapbl_replay_stop(mp->mnt_wapbl_replay);
562 fs->fs_clean = FS_WASCLEAN;
563 }
564 #endif /* WAPBL */
565 if (fs->fs_snapinum[0] != 0)
566 ffs_snapshot_mount(mp);
567 }
568
569 #ifdef WAPBL
570 error = ffs_wapbl_start(mp);
571 if (error)
572 return error;
573 #endif /* WAPBL */
574
575 #ifdef QUOTA2
576 if (!fs->fs_ronly) {
577 error = ffs_quota2_mount(mp);
578 if (error) {
579 return error;
580 }
581 }
582 #endif
583 if (args->fspec == NULL)
584 return 0;
585 }
586
587 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
588 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
589 if (error == 0)
590 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
591 sizeof(fs->fs_fsmnt));
592 fs->fs_flags &= ~FS_DOSOFTDEP;
593 if (fs->fs_fmod != 0) { /* XXX */
594 int err;
595
596 fs->fs_fmod = 0;
597 if (fs->fs_clean & FS_WASCLEAN)
598 fs->fs_time = time_second;
599 else {
600 printf("%s: file system not clean (fs_clean=%#x); "
601 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
602 fs->fs_clean);
603 printf("%s: lost blocks %" PRId64 " files %d\n",
604 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
605 fs->fs_pendinginodes);
606 }
607 err = UFS_WAPBL_BEGIN(mp);
608 if (err == 0) {
609 (void) ffs_cgupdate(ump, MNT_WAIT);
610 UFS_WAPBL_END(mp);
611 }
612 }
613 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
614 printf("%s: `-o softdep' is no longer supported, "
615 "consider `-o log'\n", mp->mnt_stat.f_mntfromname);
616 mp->mnt_flag &= ~MNT_SOFTDEP;
617 }
618
619 return (error);
620
621 fail:
622 vrele(devvp);
623 return (error);
624 }
625
626 /*
627 * Reload all incore data for a filesystem (used after running fsck on
628 * the root filesystem and finding things to fix). The filesystem must
629 * be mounted read-only.
630 *
631 * Things to do to update the mount:
632 * 1) invalidate all cached meta-data.
633 * 2) re-read superblock from disk.
634 * 3) re-read summary information from disk.
635 * 4) invalidate all inactive vnodes.
636 * 5) invalidate all cached file data.
637 * 6) re-read inode data for all active vnodes.
638 */
639 int
640 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
641 {
642 struct vnode *vp, *mvp, *devvp;
643 struct inode *ip;
644 void *space;
645 struct buf *bp;
646 struct fs *fs, *newfs;
647 struct dkwedge_info dkw;
648 int i, bsize, blks, error;
649 int32_t *lp;
650 struct ufsmount *ump;
651 daddr_t sblockloc;
652
653 if ((mp->mnt_flag & MNT_RDONLY) == 0)
654 return (EINVAL);
655
656 ump = VFSTOUFS(mp);
657 /*
658 * Step 1: invalidate all cached meta-data.
659 */
660 devvp = ump->um_devvp;
661 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
662 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
663 VOP_UNLOCK(devvp);
664 if (error)
665 panic("ffs_reload: dirty1");
666 /*
667 * Step 2: re-read superblock from disk.
668 */
669 fs = ump->um_fs;
670
671 /* XXX we don't handle possibility that superblock moved. */
672 error = bread(devvp, fs->fs_sblockloc / DEV_BSIZE, fs->fs_sbsize,
673 NOCRED, 0, &bp);
674 if (error) {
675 brelse(bp, 0);
676 return (error);
677 }
678 newfs = kmem_alloc(fs->fs_sbsize, KM_SLEEP);
679 memcpy(newfs, bp->b_data, fs->fs_sbsize);
680 #ifdef FFS_EI
681 if (ump->um_flags & UFS_NEEDSWAP) {
682 ffs_sb_swap((struct fs*)bp->b_data, newfs);
683 fs->fs_flags |= FS_SWAPPED;
684 } else
685 #endif
686 fs->fs_flags &= ~FS_SWAPPED;
687 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
688 newfs->fs_magic != FS_UFS2_MAGIC)||
689 newfs->fs_bsize > MAXBSIZE ||
690 newfs->fs_bsize < sizeof(struct fs)) {
691 brelse(bp, 0);
692 kmem_free(newfs, fs->fs_sbsize);
693 return (EIO); /* XXX needs translation */
694 }
695 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
696 sblockloc = fs->fs_sblockloc;
697 /*
698 * Copy pointer fields back into superblock before copying in XXX
699 * new superblock. These should really be in the ufsmount. XXX
700 * Note that important parameters (eg fs_ncg) are unchanged.
701 */
702 newfs->fs_csp = fs->fs_csp;
703 newfs->fs_maxcluster = fs->fs_maxcluster;
704 newfs->fs_contigdirs = fs->fs_contigdirs;
705 newfs->fs_ronly = fs->fs_ronly;
706 newfs->fs_active = fs->fs_active;
707 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
708 brelse(bp, 0);
709 kmem_free(newfs, fs->fs_sbsize);
710
711 /* Recheck for apple UFS filesystem */
712 ump->um_flags &= ~UFS_ISAPPLEUFS;
713 /* First check to see if this is tagged as an Apple UFS filesystem
714 * in the disklabel
715 */
716 if (getdiskinfo(devvp, &dkw) == 0 &&
717 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
718 ump->um_flags |= UFS_ISAPPLEUFS;
719 #ifdef APPLE_UFS
720 else {
721 /* Manually look for an apple ufs label, and if a valid one
722 * is found, then treat it like an Apple UFS filesystem anyway
723 *
724 * EINVAL is most probably a blocksize or alignment problem,
725 * it is unlikely that this is an Apple UFS filesystem then.
726 */
727 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / DEV_BSIZE),
728 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
729 if (error && error != EINVAL) {
730 brelse(bp, 0);
731 return (error);
732 }
733 if (error == 0) {
734 error = ffs_appleufs_validate(fs->fs_fsmnt,
735 (struct appleufslabel *)bp->b_data, NULL);
736 if (error == 0)
737 ump->um_flags |= UFS_ISAPPLEUFS;
738 }
739 brelse(bp, 0);
740 bp = NULL;
741 }
742 #else
743 if (ump->um_flags & UFS_ISAPPLEUFS)
744 return (EIO);
745 #endif
746
747 if (UFS_MPISAPPLEUFS(ump)) {
748 /* see comment about NeXT below */
749 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
750 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
751 mp->mnt_iflag |= IMNT_DTYPE;
752 } else {
753 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
754 ump->um_dirblksiz = DIRBLKSIZ;
755 if (ump->um_maxsymlinklen > 0)
756 mp->mnt_iflag |= IMNT_DTYPE;
757 else
758 mp->mnt_iflag &= ~IMNT_DTYPE;
759 }
760 ffs_oldfscompat_read(fs, ump, sblockloc);
761
762 mutex_enter(&ump->um_lock);
763 ump->um_maxfilesize = fs->fs_maxfilesize;
764 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
765 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
766 mp->mnt_stat.f_mntonname, fs->fs_flags,
767 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
768 if ((mp->mnt_flag & MNT_FORCE) == 0) {
769 mutex_exit(&ump->um_lock);
770 return (EINVAL);
771 }
772 }
773 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
774 fs->fs_pendingblocks = 0;
775 fs->fs_pendinginodes = 0;
776 }
777 mutex_exit(&ump->um_lock);
778
779 ffs_statvfs(mp, &mp->mnt_stat);
780 /*
781 * Step 3: re-read summary information from disk.
782 */
783 blks = howmany(fs->fs_cssize, fs->fs_fsize);
784 space = fs->fs_csp;
785 for (i = 0; i < blks; i += fs->fs_frag) {
786 bsize = fs->fs_bsize;
787 if (i + fs->fs_frag > blks)
788 bsize = (blks - i) * fs->fs_fsize;
789 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), bsize,
790 NOCRED, 0, &bp);
791 if (error) {
792 brelse(bp, 0);
793 return (error);
794 }
795 #ifdef FFS_EI
796 if (UFS_FSNEEDSWAP(fs))
797 ffs_csum_swap((struct csum *)bp->b_data,
798 (struct csum *)space, bsize);
799 else
800 #endif
801 memcpy(space, bp->b_data, (size_t)bsize);
802 space = (char *)space + bsize;
803 brelse(bp, 0);
804 }
805 if (fs->fs_snapinum[0] != 0)
806 ffs_snapshot_mount(mp);
807 /*
808 * We no longer know anything about clusters per cylinder group.
809 */
810 if (fs->fs_contigsumsize > 0) {
811 lp = fs->fs_maxcluster;
812 for (i = 0; i < fs->fs_ncg; i++)
813 *lp++ = fs->fs_contigsumsize;
814 }
815
816 /* Allocate a marker vnode. */
817 mvp = vnalloc(mp);
818 /*
819 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
820 * and vclean() can be called indirectly
821 */
822 mutex_enter(&mntvnode_lock);
823 loop:
824 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
825 vmark(mvp, vp);
826 if (vp->v_mount != mp || vismarker(vp))
827 continue;
828 /*
829 * Step 4: invalidate all inactive vnodes.
830 */
831 if (vrecycle(vp, &mntvnode_lock, l)) {
832 mutex_enter(&mntvnode_lock);
833 (void)vunmark(mvp);
834 goto loop;
835 }
836 /*
837 * Step 5: invalidate all cached file data.
838 */
839 mutex_enter(vp->v_interlock);
840 mutex_exit(&mntvnode_lock);
841 if (vget(vp, LK_EXCLUSIVE)) {
842 (void)vunmark(mvp);
843 goto loop;
844 }
845 if (vinvalbuf(vp, 0, cred, l, 0, 0))
846 panic("ffs_reload: dirty2");
847 /*
848 * Step 6: re-read inode data for all active vnodes.
849 */
850 ip = VTOI(vp);
851 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
852 (int)fs->fs_bsize, NOCRED, 0, &bp);
853 if (error) {
854 brelse(bp, 0);
855 vput(vp);
856 (void)vunmark(mvp);
857 break;
858 }
859 ffs_load_inode(bp, ip, fs, ip->i_number);
860 brelse(bp, 0);
861 vput(vp);
862 mutex_enter(&mntvnode_lock);
863 }
864 mutex_exit(&mntvnode_lock);
865 vnfree(mvp);
866 return (error);
867 }
868
869 /*
870 * Possible superblock locations ordered from most to least likely.
871 */
872 static const int sblock_try[] = SBLOCKSEARCH;
873
874 /*
875 * Common code for mount and mountroot
876 */
877 int
878 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
879 {
880 struct ufsmount *ump;
881 struct buf *bp;
882 struct fs *fs;
883 dev_t dev;
884 struct dkwedge_info dkw;
885 void *space;
886 daddr_t sblockloc, fsblockloc;
887 int blks, fstype;
888 int error, i, bsize, ronly, bset = 0;
889 #ifdef FFS_EI
890 int needswap = 0; /* keep gcc happy */
891 #endif
892 int32_t *lp;
893 kauth_cred_t cred;
894 u_int32_t sbsize = 8192; /* keep gcc happy*/
895 u_int32_t allocsbsize;
896 int32_t fsbsize;
897
898 dev = devvp->v_rdev;
899 cred = l ? l->l_cred : NOCRED;
900
901 /* Flush out any old buffers remaining from a previous use. */
902 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
903 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
904 VOP_UNLOCK(devvp);
905 if (error)
906 return (error);
907
908 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
909
910 bp = NULL;
911 ump = NULL;
912 fs = NULL;
913 sblockloc = 0;
914 fstype = 0;
915
916 error = fstrans_mount(mp);
917 if (error)
918 return error;
919
920 ump = kmem_zalloc(sizeof(*ump), KM_SLEEP);
921 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
922 error = ffs_snapshot_init(ump);
923 if (error)
924 goto out;
925 ump->um_ops = &ffs_ufsops;
926
927 #ifdef WAPBL
928 sbagain:
929 #endif
930 /*
931 * Try reading the superblock in each of its possible locations.
932 */
933 for (i = 0; ; i++) {
934 if (bp != NULL) {
935 brelse(bp, BC_NOCACHE);
936 bp = NULL;
937 }
938 if (sblock_try[i] == -1) {
939 error = EINVAL;
940 fs = NULL;
941 goto out;
942 }
943 error = bread(devvp, sblock_try[i] / DEV_BSIZE, SBLOCKSIZE, cred,
944 0, &bp);
945 if (error) {
946 fs = NULL;
947 goto out;
948 }
949 fs = (struct fs*)bp->b_data;
950 fsblockloc = sblockloc = sblock_try[i];
951 if (fs->fs_magic == FS_UFS1_MAGIC) {
952 sbsize = fs->fs_sbsize;
953 fstype = UFS1;
954 fsbsize = fs->fs_bsize;
955 #ifdef FFS_EI
956 needswap = 0;
957 } else if (fs->fs_magic == FS_UFS1_MAGIC_SWAPPED) {
958 sbsize = bswap32(fs->fs_sbsize);
959 fstype = UFS1;
960 fsbsize = bswap32(fs->fs_bsize);
961 needswap = 1;
962 #endif
963 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
964 sbsize = fs->fs_sbsize;
965 fstype = UFS2;
966 fsbsize = fs->fs_bsize;
967 #ifdef FFS_EI
968 needswap = 0;
969 } else if (fs->fs_magic == FS_UFS2_MAGIC_SWAPPED) {
970 sbsize = bswap32(fs->fs_sbsize);
971 fstype = UFS2;
972 fsbsize = bswap32(fs->fs_bsize);
973 needswap = 1;
974 #endif
975 } else
976 continue;
977
978
979 /* fs->fs_sblockloc isn't defined for old filesystems */
980 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
981 if (sblockloc == SBLOCK_UFS2)
982 /*
983 * This is likely to be the first alternate
984 * in a filesystem with 64k blocks.
985 * Don't use it.
986 */
987 continue;
988 fsblockloc = sblockloc;
989 } else {
990 fsblockloc = fs->fs_sblockloc;
991 #ifdef FFS_EI
992 if (needswap)
993 fsblockloc = bswap64(fsblockloc);
994 #endif
995 }
996
997 /* Check we haven't found an alternate superblock */
998 if (fsblockloc != sblockloc)
999 continue;
1000
1001 /* Validate size of superblock */
1002 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
1003 continue;
1004
1005 /* Check that we can handle the file system blocksize */
1006 if (fsbsize > MAXBSIZE) {
1007 printf("ffs_mountfs: block size (%d) > MAXBSIZE (%d)\n",
1008 fsbsize, MAXBSIZE);
1009 continue;
1010 }
1011
1012 /* Ok seems to be a good superblock */
1013 break;
1014 }
1015
1016 fs = kmem_alloc((u_long)sbsize, KM_SLEEP);
1017 memcpy(fs, bp->b_data, sbsize);
1018 ump->um_fs = fs;
1019
1020 #ifdef FFS_EI
1021 if (needswap) {
1022 ffs_sb_swap((struct fs*)bp->b_data, fs);
1023 fs->fs_flags |= FS_SWAPPED;
1024 } else
1025 #endif
1026 fs->fs_flags &= ~FS_SWAPPED;
1027
1028 #ifdef WAPBL
1029 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1030 error = ffs_wapbl_replay_start(mp, fs, devvp);
1031 if (error && (mp->mnt_flag & MNT_FORCE) == 0)
1032 goto out;
1033 if (!error) {
1034 if (!ronly) {
1035 /* XXX fsmnt may be stale. */
1036 printf("%s: replaying log to disk\n",
1037 fs->fs_fsmnt);
1038 error = wapbl_replay_write(mp->mnt_wapbl_replay,
1039 devvp);
1040 if (error)
1041 goto out;
1042 wapbl_replay_stop(mp->mnt_wapbl_replay);
1043 fs->fs_clean = FS_WASCLEAN;
1044 } else {
1045 /* XXX fsmnt may be stale */
1046 printf("%s: replaying log to memory\n",
1047 fs->fs_fsmnt);
1048 }
1049
1050 /* Force a re-read of the superblock */
1051 brelse(bp, BC_INVAL);
1052 bp = NULL;
1053 kmem_free(fs, sbsize);
1054 fs = NULL;
1055 goto sbagain;
1056 }
1057 }
1058 #else /* !WAPBL */
1059 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1060 error = EPERM;
1061 goto out;
1062 }
1063 #endif /* !WAPBL */
1064
1065 ffs_oldfscompat_read(fs, ump, sblockloc);
1066 ump->um_maxfilesize = fs->fs_maxfilesize;
1067
1068 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1069 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1070 mp->mnt_stat.f_mntonname, fs->fs_flags,
1071 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1072 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1073 error = EINVAL;
1074 goto out;
1075 }
1076 }
1077
1078 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1079 fs->fs_pendingblocks = 0;
1080 fs->fs_pendinginodes = 0;
1081 }
1082
1083 ump->um_fstype = fstype;
1084 if (fs->fs_sbsize < SBLOCKSIZE)
1085 brelse(bp, BC_INVAL);
1086 else
1087 brelse(bp, 0);
1088 bp = NULL;
1089
1090 /* First check to see if this is tagged as an Apple UFS filesystem
1091 * in the disklabel
1092 */
1093 if (getdiskinfo(devvp, &dkw) == 0 &&
1094 strcmp(dkw.dkw_ptype, DKW_PTYPE_APPLEUFS) == 0)
1095 ump->um_flags |= UFS_ISAPPLEUFS;
1096 #ifdef APPLE_UFS
1097 else {
1098 /* Manually look for an apple ufs label, and if a valid one
1099 * is found, then treat it like an Apple UFS filesystem anyway
1100 */
1101 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / DEV_BSIZE),
1102 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
1103 if (error)
1104 goto out;
1105 error = ffs_appleufs_validate(fs->fs_fsmnt,
1106 (struct appleufslabel *)bp->b_data, NULL);
1107 if (error == 0) {
1108 ump->um_flags |= UFS_ISAPPLEUFS;
1109 }
1110 brelse(bp, 0);
1111 bp = NULL;
1112 }
1113 #else
1114 if (ump->um_flags & UFS_ISAPPLEUFS) {
1115 error = EINVAL;
1116 goto out;
1117 }
1118 #endif
1119
1120 #if 0
1121 /*
1122 * XXX This code changes the behaviour of mounting dirty filesystems, to
1123 * XXX require "mount -f ..." to mount them. This doesn't match what
1124 * XXX mount(8) describes and is disabled for now.
1125 */
1126 /*
1127 * If the file system is not clean, don't allow it to be mounted
1128 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1129 * for the root file system.)
1130 */
1131 if (fs->fs_flags & FS_DOWAPBL) {
1132 /*
1133 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1134 * bit is set, although there's a window in unmount where it
1135 * could be FS_ISCLEAN
1136 */
1137 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1138 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1139 error = EPERM;
1140 goto out;
1141 }
1142 } else
1143 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1144 (mp->mnt_flag & MNT_FORCE) == 0) {
1145 error = EPERM;
1146 goto out;
1147 }
1148 #endif
1149
1150 /*
1151 * verify that we can access the last block in the fs
1152 * if we're mounting read/write.
1153 */
1154
1155 if (!ronly) {
1156 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
1157 cred, 0, &bp);
1158 if (bp->b_bcount != fs->fs_fsize)
1159 error = EINVAL;
1160 if (error) {
1161 bset = BC_INVAL;
1162 goto out;
1163 }
1164 brelse(bp, BC_INVAL);
1165 bp = NULL;
1166 }
1167
1168 fs->fs_ronly = ronly;
1169 /* Don't bump fs_clean if we're replaying journal */
1170 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN)))
1171 if (ronly == 0) {
1172 fs->fs_clean <<= 1;
1173 fs->fs_fmod = 1;
1174 }
1175 bsize = fs->fs_cssize;
1176 blks = howmany(bsize, fs->fs_fsize);
1177 if (fs->fs_contigsumsize > 0)
1178 bsize += fs->fs_ncg * sizeof(int32_t);
1179 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1180 allocsbsize = bsize;
1181 space = kmem_alloc((u_long)allocsbsize, KM_SLEEP);
1182 fs->fs_csp = space;
1183 for (i = 0; i < blks; i += fs->fs_frag) {
1184 bsize = fs->fs_bsize;
1185 if (i + fs->fs_frag > blks)
1186 bsize = (blks - i) * fs->fs_fsize;
1187 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), bsize,
1188 cred, 0, &bp);
1189 if (error) {
1190 kmem_free(fs->fs_csp, allocsbsize);
1191 goto out;
1192 }
1193 #ifdef FFS_EI
1194 if (needswap)
1195 ffs_csum_swap((struct csum *)bp->b_data,
1196 (struct csum *)space, bsize);
1197 else
1198 #endif
1199 memcpy(space, bp->b_data, (u_int)bsize);
1200
1201 space = (char *)space + bsize;
1202 brelse(bp, 0);
1203 bp = NULL;
1204 }
1205 if (fs->fs_contigsumsize > 0) {
1206 fs->fs_maxcluster = lp = space;
1207 for (i = 0; i < fs->fs_ncg; i++)
1208 *lp++ = fs->fs_contigsumsize;
1209 space = lp;
1210 }
1211 bsize = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1212 fs->fs_contigdirs = space;
1213 space = (char *)space + bsize;
1214 memset(fs->fs_contigdirs, 0, bsize);
1215 /* Compatibility for old filesystems - XXX */
1216 if (fs->fs_avgfilesize <= 0)
1217 fs->fs_avgfilesize = AVFILESIZ;
1218 if (fs->fs_avgfpdir <= 0)
1219 fs->fs_avgfpdir = AFPDIR;
1220 fs->fs_active = NULL;
1221 mp->mnt_data = ump;
1222 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1223 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1224 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1225 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1226 if (UFS_MPISAPPLEUFS(ump)) {
1227 /* NeXT used to keep short symlinks in the inode even
1228 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1229 * is probably -1, but we still need to be able to identify
1230 * short symlinks.
1231 */
1232 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1233 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1234 mp->mnt_iflag |= IMNT_DTYPE;
1235 } else {
1236 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1237 ump->um_dirblksiz = DIRBLKSIZ;
1238 if (ump->um_maxsymlinklen > 0)
1239 mp->mnt_iflag |= IMNT_DTYPE;
1240 else
1241 mp->mnt_iflag &= ~IMNT_DTYPE;
1242 }
1243 mp->mnt_fs_bshift = fs->fs_bshift;
1244 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1245 mp->mnt_flag |= MNT_LOCAL;
1246 mp->mnt_iflag |= IMNT_MPSAFE;
1247 #ifdef FFS_EI
1248 if (needswap)
1249 ump->um_flags |= UFS_NEEDSWAP;
1250 #endif
1251 ump->um_mountp = mp;
1252 ump->um_dev = dev;
1253 ump->um_devvp = devvp;
1254 ump->um_nindir = fs->fs_nindir;
1255 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1256 ump->um_bptrtodb = fs->fs_fshift - DEV_BSHIFT;
1257 ump->um_seqinc = fs->fs_frag;
1258 for (i = 0; i < MAXQUOTAS; i++)
1259 ump->um_quotas[i] = NULLVP;
1260 devvp->v_specmountpoint = mp;
1261 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1262 ffs_snapshot_mount(mp);
1263 #ifdef WAPBL
1264 if (!ronly) {
1265 KDASSERT(fs->fs_ronly == 0);
1266 /*
1267 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1268 * needs to create a new log file in-filesystem.
1269 */
1270 ffs_statvfs(mp, &mp->mnt_stat);
1271
1272 error = ffs_wapbl_start(mp);
1273 if (error) {
1274 kmem_free(fs->fs_csp, allocsbsize);
1275 goto out;
1276 }
1277 }
1278 #endif /* WAPBL */
1279 if (ronly == 0) {
1280 #ifdef QUOTA2
1281 error = ffs_quota2_mount(mp);
1282 if (error) {
1283 kmem_free(fs->fs_csp, allocsbsize);
1284 goto out;
1285 }
1286 #else
1287 if (fs->fs_flags & FS_DOQUOTA2) {
1288 ump->um_flags |= UFS_QUOTA2;
1289 uprintf("%s: options QUOTA2 not enabled%s\n",
1290 mp->mnt_stat.f_mntonname,
1291 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1292 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1293 error = EINVAL;
1294 kmem_free(fs->fs_csp, allocsbsize);
1295 goto out;
1296 }
1297 }
1298 #endif
1299 }
1300 #ifdef UFS_EXTATTR
1301 /*
1302 * Initialize file-backed extended attributes on UFS1 file
1303 * systems.
1304 */
1305 if (ump->um_fstype == UFS1)
1306 ufs_extattr_uepm_init(&ump->um_extattr);
1307 #endif /* UFS_EXTATTR */
1308
1309 if (mp->mnt_flag & MNT_DISCARD)
1310 ump->um_discarddata = ffs_discard_init(devvp, fs);
1311
1312 return (0);
1313 out:
1314 #ifdef WAPBL
1315 if (mp->mnt_wapbl_replay) {
1316 wapbl_replay_stop(mp->mnt_wapbl_replay);
1317 wapbl_replay_free(mp->mnt_wapbl_replay);
1318 mp->mnt_wapbl_replay = 0;
1319 }
1320 #endif
1321
1322 fstrans_unmount(mp);
1323 if (fs)
1324 kmem_free(fs, fs->fs_sbsize);
1325 devvp->v_specmountpoint = NULL;
1326 if (bp)
1327 brelse(bp, bset);
1328 if (ump) {
1329 if (ump->um_oldfscompat)
1330 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1331 mutex_destroy(&ump->um_lock);
1332 kmem_free(ump, sizeof(*ump));
1333 mp->mnt_data = NULL;
1334 }
1335 return (error);
1336 }
1337
1338 /*
1339 * Sanity checks for loading old filesystem superblocks.
1340 * See ffs_oldfscompat_write below for unwound actions.
1341 *
1342 * XXX - Parts get retired eventually.
1343 * Unfortunately new bits get added.
1344 */
1345 static void
1346 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1347 {
1348 off_t maxfilesize;
1349 int32_t *extrasave;
1350
1351 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1352 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1353 return;
1354
1355 if (!ump->um_oldfscompat)
1356 ump->um_oldfscompat = kmem_alloc(512 + 3*sizeof(int32_t),
1357 KM_SLEEP);
1358
1359 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1360 extrasave = ump->um_oldfscompat;
1361 extrasave += 512/sizeof(int32_t);
1362 extrasave[0] = fs->fs_old_npsect;
1363 extrasave[1] = fs->fs_old_interleave;
1364 extrasave[2] = fs->fs_old_trackskew;
1365
1366 /* These fields will be overwritten by their
1367 * original values in fs_oldfscompat_write, so it is harmless
1368 * to modify them here.
1369 */
1370 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1371 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1372 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1373 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1374
1375 fs->fs_maxbsize = fs->fs_bsize;
1376 fs->fs_time = fs->fs_old_time;
1377 fs->fs_size = fs->fs_old_size;
1378 fs->fs_dsize = fs->fs_old_dsize;
1379 fs->fs_csaddr = fs->fs_old_csaddr;
1380 fs->fs_sblockloc = sblockloc;
1381
1382 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1383
1384 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1385 fs->fs_old_nrpos = 8;
1386 fs->fs_old_npsect = fs->fs_old_nsect;
1387 fs->fs_old_interleave = 1;
1388 fs->fs_old_trackskew = 0;
1389 }
1390
1391 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1392 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1393 fs->fs_qbmask = ~fs->fs_bmask;
1394 fs->fs_qfmask = ~fs->fs_fmask;
1395 }
1396
1397 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1398 if (fs->fs_maxfilesize > maxfilesize)
1399 fs->fs_maxfilesize = maxfilesize;
1400
1401 /* Compatibility for old filesystems */
1402 if (fs->fs_avgfilesize <= 0)
1403 fs->fs_avgfilesize = AVFILESIZ;
1404 if (fs->fs_avgfpdir <= 0)
1405 fs->fs_avgfpdir = AFPDIR;
1406
1407 #if 0
1408 if (bigcgs) {
1409 fs->fs_save_cgsize = fs->fs_cgsize;
1410 fs->fs_cgsize = fs->fs_bsize;
1411 }
1412 #endif
1413 }
1414
1415 /*
1416 * Unwinding superblock updates for old filesystems.
1417 * See ffs_oldfscompat_read above for details.
1418 *
1419 * XXX - Parts get retired eventually.
1420 * Unfortunately new bits get added.
1421 */
1422 static void
1423 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1424 {
1425 int32_t *extrasave;
1426
1427 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1428 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1429 return;
1430
1431 fs->fs_old_time = fs->fs_time;
1432 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1433 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1434 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1435 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1436 fs->fs_old_flags = fs->fs_flags;
1437
1438 #if 0
1439 if (bigcgs) {
1440 fs->fs_cgsize = fs->fs_save_cgsize;
1441 }
1442 #endif
1443
1444 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1445 extrasave = ump->um_oldfscompat;
1446 extrasave += 512/sizeof(int32_t);
1447 fs->fs_old_npsect = extrasave[0];
1448 fs->fs_old_interleave = extrasave[1];
1449 fs->fs_old_trackskew = extrasave[2];
1450
1451 }
1452
1453 /*
1454 * unmount vfs operation
1455 */
1456 int
1457 ffs_unmount(struct mount *mp, int mntflags)
1458 {
1459 struct lwp *l = curlwp;
1460 struct ufsmount *ump = VFSTOUFS(mp);
1461 struct fs *fs = ump->um_fs;
1462 int error, flags;
1463 u_int32_t bsize;
1464 #ifdef WAPBL
1465 extern int doforce;
1466 #endif
1467
1468 if (ump->um_discarddata) {
1469 ffs_discard_finish(ump->um_discarddata, mntflags);
1470 ump->um_discarddata = NULL;
1471 }
1472
1473 flags = 0;
1474 if (mntflags & MNT_FORCE)
1475 flags |= FORCECLOSE;
1476 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1477 return (error);
1478 error = UFS_WAPBL_BEGIN(mp);
1479 if (error == 0)
1480 if (fs->fs_ronly == 0 &&
1481 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1482 fs->fs_clean & FS_WASCLEAN) {
1483 fs->fs_clean = FS_ISCLEAN;
1484 fs->fs_fmod = 0;
1485 (void) ffs_sbupdate(ump, MNT_WAIT);
1486 }
1487 if (error == 0)
1488 UFS_WAPBL_END(mp);
1489 #ifdef WAPBL
1490 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1491 if (mp->mnt_wapbl_replay) {
1492 KDASSERT(fs->fs_ronly);
1493 wapbl_replay_stop(mp->mnt_wapbl_replay);
1494 wapbl_replay_free(mp->mnt_wapbl_replay);
1495 mp->mnt_wapbl_replay = 0;
1496 }
1497 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1498 if (error) {
1499 return error;
1500 }
1501 #endif /* WAPBL */
1502
1503 if (ump->um_devvp->v_type != VBAD)
1504 ump->um_devvp->v_specmountpoint = NULL;
1505 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1506 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1507 NOCRED);
1508 vput(ump->um_devvp);
1509
1510 bsize = fs->fs_cssize;
1511 if (fs->fs_contigsumsize > 0)
1512 bsize += fs->fs_ncg * sizeof(int32_t);
1513 bsize += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1514 kmem_free(fs->fs_csp, bsize);
1515
1516 kmem_free(fs, fs->fs_sbsize);
1517 if (ump->um_oldfscompat != NULL)
1518 kmem_free(ump->um_oldfscompat, 512 + 3*sizeof(int32_t));
1519 mutex_destroy(&ump->um_lock);
1520 ffs_snapshot_fini(ump);
1521 kmem_free(ump, sizeof(*ump));
1522 mp->mnt_data = NULL;
1523 mp->mnt_flag &= ~MNT_LOCAL;
1524 fstrans_unmount(mp);
1525 return (0);
1526 }
1527
1528 /*
1529 * Flush out all the files in a filesystem.
1530 */
1531 int
1532 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1533 {
1534 extern int doforce;
1535 struct ufsmount *ump;
1536 int error;
1537
1538 if (!doforce)
1539 flags &= ~FORCECLOSE;
1540 ump = VFSTOUFS(mp);
1541 #ifdef QUOTA
1542 if ((error = quota1_umount(mp, flags)) != 0)
1543 return (error);
1544 #endif
1545 #ifdef QUOTA2
1546 if ((error = quota2_umount(mp, flags)) != 0)
1547 return (error);
1548 #endif
1549 #ifdef UFS_EXTATTR
1550 if (ump->um_fstype == UFS1) {
1551 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_STARTED)
1552 ufs_extattr_stop(mp, l);
1553 if (ump->um_extattr.uepm_flags & UFS_EXTATTR_UEPM_INITIALIZED)
1554 ufs_extattr_uepm_destroy(&ump->um_extattr);
1555 }
1556 #endif
1557 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1558 return (error);
1559 ffs_snapshot_unmount(mp);
1560 /*
1561 * Flush all the files.
1562 */
1563 error = vflush(mp, NULLVP, flags);
1564 if (error)
1565 return (error);
1566 /*
1567 * Flush filesystem metadata.
1568 */
1569 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1570 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1571 VOP_UNLOCK(ump->um_devvp);
1572 if (flags & FORCECLOSE) /* XXXDBJ */
1573 error = 0;
1574
1575 #ifdef WAPBL
1576 if (error)
1577 return error;
1578 if (mp->mnt_wapbl) {
1579 error = wapbl_flush(mp->mnt_wapbl, 1);
1580 if (flags & FORCECLOSE)
1581 error = 0;
1582 }
1583 #endif
1584
1585 return (error);
1586 }
1587
1588 /*
1589 * Get file system statistics.
1590 */
1591 int
1592 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1593 {
1594 struct ufsmount *ump;
1595 struct fs *fs;
1596
1597 ump = VFSTOUFS(mp);
1598 fs = ump->um_fs;
1599 mutex_enter(&ump->um_lock);
1600 sbp->f_bsize = fs->fs_bsize;
1601 sbp->f_frsize = fs->fs_fsize;
1602 sbp->f_iosize = fs->fs_bsize;
1603 sbp->f_blocks = fs->fs_dsize;
1604 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1605 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1606 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1607 fs->fs_minfree) / (u_int64_t) 100;
1608 if (sbp->f_bfree > sbp->f_bresvd)
1609 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1610 else
1611 sbp->f_bavail = 0;
1612 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1613 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1614 sbp->f_favail = sbp->f_ffree;
1615 sbp->f_fresvd = 0;
1616 mutex_exit(&ump->um_lock);
1617 copy_statvfs_info(sbp, mp);
1618
1619 return (0);
1620 }
1621
1622 /*
1623 * Go through the disk queues to initiate sandbagged IO;
1624 * go through the inodes to write those that have been modified;
1625 * initiate the writing of the super block if it has been modified.
1626 *
1627 * Note: we are always called with the filesystem marked `MPBUSY'.
1628 */
1629 int
1630 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1631 {
1632 struct vnode *vp, *mvp, *nvp;
1633 struct inode *ip;
1634 struct ufsmount *ump = VFSTOUFS(mp);
1635 struct fs *fs;
1636 int error, allerror = 0;
1637 bool is_suspending;
1638
1639 fs = ump->um_fs;
1640 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1641 printf("fs = %s\n", fs->fs_fsmnt);
1642 panic("update: rofs mod");
1643 }
1644
1645 /* Allocate a marker vnode. */
1646 mvp = vnalloc(mp);
1647
1648 fstrans_start(mp, FSTRANS_SHARED);
1649 is_suspending = (fstrans_getstate(mp) == FSTRANS_SUSPENDING);
1650 /*
1651 * Write back each (modified) inode.
1652 */
1653 mutex_enter(&mntvnode_lock);
1654 loop:
1655 /*
1656 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1657 * and vclean() can be called indirectly
1658 */
1659 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1660 nvp = TAILQ_NEXT(vp, v_mntvnodes);
1661 /*
1662 * If the vnode that we are about to sync is no longer
1663 * associated with this mount point, start over.
1664 */
1665 if (vp->v_mount != mp)
1666 goto loop;
1667 /*
1668 * Don't interfere with concurrent scans of this FS.
1669 */
1670 if (vismarker(vp))
1671 continue;
1672 mutex_enter(vp->v_interlock);
1673 ip = VTOI(vp);
1674
1675 /*
1676 * Skip the vnode/inode if inaccessible.
1677 */
1678 if (ip == NULL || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0 ||
1679 vp->v_type == VNON) {
1680 mutex_exit(vp->v_interlock);
1681 continue;
1682 }
1683
1684 /*
1685 * We deliberately update inode times here. This will
1686 * prevent a massive queue of updates accumulating, only
1687 * to be handled by a call to unmount.
1688 *
1689 * XXX It would be better to have the syncer trickle these
1690 * out. Adjustment needed to allow registering vnodes for
1691 * sync when the vnode is clean, but the inode dirty. Or
1692 * have ufs itself trickle out inode updates.
1693 *
1694 * If doing a lazy sync, we don't care about metadata or
1695 * data updates, because they are handled by each vnode's
1696 * synclist entry. In this case we are only interested in
1697 * writing back modified inodes.
1698 */
1699 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE |
1700 IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1701 (waitfor == MNT_LAZY || (LIST_EMPTY(&vp->v_dirtyblkhd) &&
1702 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))) {
1703 mutex_exit(vp->v_interlock);
1704 continue;
1705 }
1706 if (vp->v_type == VBLK && is_suspending) {
1707 mutex_exit(vp->v_interlock);
1708 continue;
1709 }
1710 vmark(mvp, vp);
1711 mutex_exit(&mntvnode_lock);
1712 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT);
1713 if (error) {
1714 mutex_enter(&mntvnode_lock);
1715 nvp = vunmark(mvp);
1716 if (error == ENOENT) {
1717 goto loop;
1718 }
1719 continue;
1720 }
1721 if (waitfor == MNT_LAZY) {
1722 error = UFS_WAPBL_BEGIN(vp->v_mount);
1723 if (!error) {
1724 error = ffs_update(vp, NULL, NULL,
1725 UPDATE_CLOSE);
1726 UFS_WAPBL_END(vp->v_mount);
1727 }
1728 } else {
1729 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
1730 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
1731 }
1732 if (error)
1733 allerror = error;
1734 vput(vp);
1735 mutex_enter(&mntvnode_lock);
1736 nvp = vunmark(mvp);
1737 }
1738 mutex_exit(&mntvnode_lock);
1739 /*
1740 * Force stale file system control information to be flushed.
1741 */
1742 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1743 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1744 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1745 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1746 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
1747 0, 0)) != 0)
1748 allerror = error;
1749 VOP_UNLOCK(ump->um_devvp);
1750 if (allerror == 0 && waitfor == MNT_WAIT && !mp->mnt_wapbl) {
1751 mutex_enter(&mntvnode_lock);
1752 goto loop;
1753 }
1754 }
1755 #if defined(QUOTA) || defined(QUOTA2)
1756 qsync(mp);
1757 #endif
1758 /*
1759 * Write back modified superblock.
1760 */
1761 if (fs->fs_fmod != 0) {
1762 fs->fs_fmod = 0;
1763 fs->fs_time = time_second;
1764 error = UFS_WAPBL_BEGIN(mp);
1765 if (error)
1766 allerror = error;
1767 else {
1768 if ((error = ffs_cgupdate(ump, waitfor)))
1769 allerror = error;
1770 UFS_WAPBL_END(mp);
1771 }
1772 }
1773
1774 #ifdef WAPBL
1775 if (mp->mnt_wapbl) {
1776 error = wapbl_flush(mp->mnt_wapbl, 0);
1777 if (error)
1778 allerror = error;
1779 }
1780 #endif
1781
1782 fstrans_done(mp);
1783 vnfree(mvp);
1784 return (allerror);
1785 }
1786
1787 /*
1788 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1789 * in from disk. If it is in core, wait for the lock bit to clear, then
1790 * return the inode locked. Detection and handling of mount points must be
1791 * done by the calling routine.
1792 */
1793 int
1794 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1795 {
1796 struct fs *fs;
1797 struct inode *ip;
1798 struct ufsmount *ump;
1799 struct buf *bp;
1800 struct vnode *vp;
1801 dev_t dev;
1802 int error;
1803
1804 ump = VFSTOUFS(mp);
1805 dev = ump->um_dev;
1806
1807 retry:
1808 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1809 return (0);
1810
1811 /* Allocate a new vnode/inode. */
1812 error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, NULL, &vp);
1813 if (error) {
1814 *vpp = NULL;
1815 return (error);
1816 }
1817 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1818
1819 /*
1820 * If someone beat us to it, put back the freshly allocated
1821 * vnode/inode pair and retry.
1822 */
1823 mutex_enter(&ufs_hashlock);
1824 if (ufs_ihashget(dev, ino, 0) != NULL) {
1825 mutex_exit(&ufs_hashlock);
1826 ungetnewvnode(vp);
1827 pool_cache_put(ffs_inode_cache, ip);
1828 goto retry;
1829 }
1830
1831 vp->v_vflag |= VV_LOCKSWORK;
1832
1833 /*
1834 * XXX MFS ends up here, too, to allocate an inode. Should we
1835 * XXX create another pool for MFS inodes?
1836 */
1837
1838 memset(ip, 0, sizeof(struct inode));
1839 vp->v_data = ip;
1840 ip->i_vnode = vp;
1841 ip->i_ump = ump;
1842 ip->i_fs = fs = ump->um_fs;
1843 ip->i_dev = dev;
1844 ip->i_number = ino;
1845 #if defined(QUOTA) || defined(QUOTA2)
1846 ufsquota_init(ip);
1847 #endif
1848
1849 /*
1850 * Initialize genfs node, we might proceed to destroy it in
1851 * error branches.
1852 */
1853 genfs_node_init(vp, &ffs_genfsops);
1854
1855 /*
1856 * Put it onto its hash chain and lock it so that other requests for
1857 * this inode will block if they arrive while we are sleeping waiting
1858 * for old data structures to be purged or for the contents of the
1859 * disk portion of this inode to be read.
1860 */
1861
1862 ufs_ihashins(ip);
1863 mutex_exit(&ufs_hashlock);
1864
1865 /* Read in the disk contents for the inode, copy into the inode. */
1866 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1867 (int)fs->fs_bsize, NOCRED, 0, &bp);
1868 if (error) {
1869
1870 /*
1871 * The inode does not contain anything useful, so it would
1872 * be misleading to leave it on its hash chain. With mode
1873 * still zero, it will be unlinked and returned to the free
1874 * list by vput().
1875 */
1876
1877 vput(vp);
1878 brelse(bp, 0);
1879 *vpp = NULL;
1880 return (error);
1881 }
1882 if (ip->i_ump->um_fstype == UFS1)
1883 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1884 PR_WAITOK);
1885 else
1886 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
1887 PR_WAITOK);
1888 ffs_load_inode(bp, ip, fs, ino);
1889 brelse(bp, 0);
1890
1891 /*
1892 * Initialize the vnode from the inode, check for aliases.
1893 * Note that the underlying vnode may have changed.
1894 */
1895
1896 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1897
1898 /*
1899 * Finish inode initialization now that aliasing has been resolved.
1900 */
1901
1902 ip->i_devvp = ump->um_devvp;
1903 vref(ip->i_devvp);
1904
1905 /*
1906 * Ensure that uid and gid are correct. This is a temporary
1907 * fix until fsck has been changed to do the update.
1908 */
1909
1910 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1911 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1912 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1913 } /* XXX */
1914 uvm_vnp_setsize(vp, ip->i_size);
1915 *vpp = vp;
1916 return (0);
1917 }
1918
1919 /*
1920 * File handle to vnode
1921 *
1922 * Have to be really careful about stale file handles:
1923 * - check that the inode number is valid
1924 * - call ffs_vget() to get the locked inode
1925 * - check for an unallocated inode (i_mode == 0)
1926 * - check that the given client host has export rights and return
1927 * those rights via. exflagsp and credanonp
1928 */
1929 int
1930 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1931 {
1932 struct ufid ufh;
1933 struct fs *fs;
1934
1935 if (fhp->fid_len != sizeof(struct ufid))
1936 return EINVAL;
1937
1938 memcpy(&ufh, fhp, sizeof(ufh));
1939 fs = VFSTOUFS(mp)->um_fs;
1940 if (ufh.ufid_ino < ROOTINO ||
1941 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1942 return (ESTALE);
1943 return (ufs_fhtovp(mp, &ufh, vpp));
1944 }
1945
1946 /*
1947 * Vnode pointer to File handle
1948 */
1949 /* ARGSUSED */
1950 int
1951 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1952 {
1953 struct inode *ip;
1954 struct ufid ufh;
1955
1956 if (*fh_size < sizeof(struct ufid)) {
1957 *fh_size = sizeof(struct ufid);
1958 return E2BIG;
1959 }
1960 ip = VTOI(vp);
1961 *fh_size = sizeof(struct ufid);
1962 memset(&ufh, 0, sizeof(ufh));
1963 ufh.ufid_len = sizeof(struct ufid);
1964 ufh.ufid_ino = ip->i_number;
1965 ufh.ufid_gen = ip->i_gen;
1966 memcpy(fhp, &ufh, sizeof(ufh));
1967 return (0);
1968 }
1969
1970 void
1971 ffs_init(void)
1972 {
1973 if (ffs_initcount++ > 0)
1974 return;
1975
1976 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
1977 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
1978 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
1979 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
1980 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
1981 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
1982 ufs_init();
1983 }
1984
1985 void
1986 ffs_reinit(void)
1987 {
1988
1989 ufs_reinit();
1990 }
1991
1992 void
1993 ffs_done(void)
1994 {
1995 if (--ffs_initcount > 0)
1996 return;
1997
1998 ufs_done();
1999 pool_cache_destroy(ffs_dinode2_cache);
2000 pool_cache_destroy(ffs_dinode1_cache);
2001 pool_cache_destroy(ffs_inode_cache);
2002 }
2003
2004 /*
2005 * Write a superblock and associated information back to disk.
2006 */
2007 int
2008 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2009 {
2010 struct fs *fs = mp->um_fs;
2011 struct buf *bp;
2012 int error = 0;
2013 u_int32_t saveflag;
2014
2015 error = ffs_getblk(mp->um_devvp,
2016 fs->fs_sblockloc / DEV_BSIZE, FFS_NOBLK,
2017 fs->fs_sbsize, false, &bp);
2018 if (error)
2019 return error;
2020 saveflag = fs->fs_flags & FS_INTERNAL;
2021 fs->fs_flags &= ~FS_INTERNAL;
2022
2023 memcpy(bp->b_data, fs, fs->fs_sbsize);
2024
2025 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2026 #ifdef FFS_EI
2027 if (mp->um_flags & UFS_NEEDSWAP)
2028 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2029 #endif
2030 fs->fs_flags |= saveflag;
2031
2032 if (waitfor == MNT_WAIT)
2033 error = bwrite(bp);
2034 else
2035 bawrite(bp);
2036 return (error);
2037 }
2038
2039 int
2040 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2041 {
2042 struct fs *fs = mp->um_fs;
2043 struct buf *bp;
2044 int blks;
2045 void *space;
2046 int i, size, error = 0, allerror = 0;
2047
2048 allerror = ffs_sbupdate(mp, waitfor);
2049 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2050 space = fs->fs_csp;
2051 for (i = 0; i < blks; i += fs->fs_frag) {
2052 size = fs->fs_bsize;
2053 if (i + fs->fs_frag > blks)
2054 size = (blks - i) * fs->fs_fsize;
2055 error = ffs_getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
2056 FFS_NOBLK, size, false, &bp);
2057 if (error)
2058 break;
2059 #ifdef FFS_EI
2060 if (mp->um_flags & UFS_NEEDSWAP)
2061 ffs_csum_swap((struct csum*)space,
2062 (struct csum*)bp->b_data, size);
2063 else
2064 #endif
2065 memcpy(bp->b_data, space, (u_int)size);
2066 space = (char *)space + size;
2067 if (waitfor == MNT_WAIT)
2068 error = bwrite(bp);
2069 else
2070 bawrite(bp);
2071 }
2072 if (!allerror && error)
2073 allerror = error;
2074 return (allerror);
2075 }
2076
2077 int
2078 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2079 int attrnamespace, const char *attrname)
2080 {
2081 #ifdef UFS_EXTATTR
2082 /*
2083 * File-backed extended attributes are only supported on UFS1.
2084 * UFS2 has native extended attributes.
2085 */
2086 if (VFSTOUFS(mp)->um_fstype == UFS1)
2087 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2088 #endif
2089 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2090 }
2091
2092 int
2093 ffs_suspendctl(struct mount *mp, int cmd)
2094 {
2095 int error;
2096 struct lwp *l = curlwp;
2097
2098 switch (cmd) {
2099 case SUSPEND_SUSPEND:
2100 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
2101 return error;
2102 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
2103 if (error == 0)
2104 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
2105 #ifdef WAPBL
2106 if (error == 0 && mp->mnt_wapbl)
2107 error = wapbl_flush(mp->mnt_wapbl, 1);
2108 #endif
2109 if (error != 0) {
2110 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
2111 return error;
2112 }
2113 return 0;
2114
2115 case SUSPEND_RESUME:
2116 return fstrans_setstate(mp, FSTRANS_NORMAL);
2117
2118 default:
2119 return EINVAL;
2120 }
2121 }
2122
2123 /*
2124 * Synch vnode for a mounted file system.
2125 */
2126 static int
2127 ffs_vfs_fsync(vnode_t *vp, int flags)
2128 {
2129 int error, i, pflags;
2130 #ifdef WAPBL
2131 struct mount *mp;
2132 #endif
2133
2134 KASSERT(vp->v_type == VBLK);
2135 KASSERT(vp->v_specmountpoint != NULL);
2136
2137 /*
2138 * Flush all dirty data associated with the vnode.
2139 */
2140 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2141 if ((flags & FSYNC_WAIT) != 0)
2142 pflags |= PGO_SYNCIO;
2143 mutex_enter(vp->v_interlock);
2144 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2145 if (error)
2146 return error;
2147
2148 #ifdef WAPBL
2149 mp = vp->v_specmountpoint;
2150 if (mp && mp->mnt_wapbl) {
2151 /*
2152 * Don't bother writing out metadata if the syncer is
2153 * making the request. We will let the sync vnode
2154 * write it out in a single burst through a call to
2155 * VFS_SYNC().
2156 */
2157 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2158 return 0;
2159
2160 /*
2161 * Don't flush the log if the vnode being flushed
2162 * contains no dirty buffers that could be in the log.
2163 */
2164 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2165 error = wapbl_flush(mp->mnt_wapbl, 0);
2166 if (error)
2167 return error;
2168 }
2169
2170 if ((flags & FSYNC_WAIT) != 0) {
2171 mutex_enter(vp->v_interlock);
2172 while (vp->v_numoutput)
2173 cv_wait(&vp->v_cv, vp->v_interlock);
2174 mutex_exit(vp->v_interlock);
2175 }
2176
2177 return 0;
2178 }
2179 #endif /* WAPBL */
2180
2181 error = vflushbuf(vp, flags);
2182 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2183 i = 1;
2184 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2185 kauth_cred_get());
2186 }
2187
2188 return error;
2189 }
2190