ffs_vfsops.c revision 1.239.2.3 1 /* $NetBSD: ffs_vfsops.c,v 1.239.2.3 2009/04/04 17:38:30 snj Exp $ */
2
3 /*-
4 * Copyright (c) 2008, 2009 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc, and by Andrew Doran.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.239.2.3 2009/04/04 17:38:30 snj Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_softdep.h"
70 #include "opt_wapbl.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/kernel.h>
78 #include <sys/vnode.h>
79 #include <sys/socket.h>
80 #include <sys/mount.h>
81 #include <sys/buf.h>
82 #include <sys/device.h>
83 #include <sys/mbuf.h>
84 #include <sys/file.h>
85 #include <sys/disklabel.h>
86 #include <sys/ioctl.h>
87 #include <sys/errno.h>
88 #include <sys/malloc.h>
89 #include <sys/pool.h>
90 #include <sys/lock.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/wapbl.h>
95 #include <sys/fstrans.h>
96 #include <sys/module.h>
97
98 #include <miscfs/genfs/genfs.h>
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/ufs/quota.h>
102 #include <ufs/ufs/ufsmount.h>
103 #include <ufs/ufs/inode.h>
104 #include <ufs/ufs/dir.h>
105 #include <ufs/ufs/ufs_extern.h>
106 #include <ufs/ufs/ufs_bswap.h>
107 #include <ufs/ufs/ufs_wapbl.h>
108
109 #include <ufs/ffs/fs.h>
110 #include <ufs/ffs/ffs_extern.h>
111
112 MODULE(MODULE_CLASS_VFS, ffs, NULL);
113
114 static int ffs_vfs_fsync(vnode_t *, int);
115
116 static struct sysctllog *ffs_sysctl_log;
117
118 /* how many times ffs_init() was called */
119 int ffs_initcount = 0;
120
121 extern kmutex_t ufs_hashlock;
122
123 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
124 extern const struct vnodeopv_desc ffs_specop_opv_desc;
125 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
126
127 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
128 &ffs_vnodeop_opv_desc,
129 &ffs_specop_opv_desc,
130 &ffs_fifoop_opv_desc,
131 NULL,
132 };
133
134 struct vfsops ffs_vfsops = {
135 MOUNT_FFS,
136 sizeof (struct ufs_args),
137 ffs_mount,
138 ufs_start,
139 ffs_unmount,
140 ufs_root,
141 ufs_quotactl,
142 ffs_statvfs,
143 ffs_sync,
144 ffs_vget,
145 ffs_fhtovp,
146 ffs_vptofh,
147 ffs_init,
148 ffs_reinit,
149 ffs_done,
150 ffs_mountroot,
151 ffs_snapshot,
152 ffs_extattrctl,
153 ffs_suspendctl,
154 genfs_renamelock_enter,
155 genfs_renamelock_exit,
156 ffs_vfs_fsync,
157 ffs_vnodeopv_descs,
158 0,
159 { NULL, NULL },
160 };
161
162 static const struct genfs_ops ffs_genfsops = {
163 .gop_size = ffs_gop_size,
164 .gop_alloc = ufs_gop_alloc,
165 .gop_write = genfs_gop_write,
166 .gop_markupdate = ufs_gop_markupdate,
167 };
168
169 static const struct ufs_ops ffs_ufsops = {
170 .uo_itimes = ffs_itimes,
171 .uo_update = ffs_update,
172 .uo_truncate = ffs_truncate,
173 .uo_valloc = ffs_valloc,
174 .uo_vfree = ffs_vfree,
175 .uo_balloc = ffs_balloc,
176 };
177
178 static int
179 ffs_modcmd(modcmd_t cmd, void *arg)
180 {
181 int error;
182
183 #if 0
184 extern int doasyncfree;
185 #endif
186 extern int ffs_log_changeopt;
187
188 switch (cmd) {
189 case MODULE_CMD_INIT:
190 error = vfs_attach(&ffs_vfsops);
191 if (error != 0)
192 break;
193
194 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
195 CTLFLAG_PERMANENT,
196 CTLTYPE_NODE, "vfs", NULL,
197 NULL, 0, NULL, 0,
198 CTL_VFS, CTL_EOL);
199 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
200 CTLFLAG_PERMANENT,
201 CTLTYPE_NODE, "ffs",
202 SYSCTL_DESCR("Berkeley Fast File System"),
203 NULL, 0, NULL, 0,
204 CTL_VFS, 1, CTL_EOL);
205
206 /*
207 * @@@ should we even bother with these first three?
208 */
209 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
210 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
211 CTLTYPE_INT, "doclusterread", NULL,
212 sysctl_notavail, 0, NULL, 0,
213 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
214 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
215 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
216 CTLTYPE_INT, "doclusterwrite", NULL,
217 sysctl_notavail, 0, NULL, 0,
218 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
219 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
220 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
221 CTLTYPE_INT, "doreallocblks", NULL,
222 sysctl_notavail, 0, NULL, 0,
223 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
224 #if 0
225 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
226 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
227 CTLTYPE_INT, "doasyncfree",
228 SYSCTL_DESCR("Release dirty blocks asynchronously"),
229 NULL, 0, &doasyncfree, 0,
230 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
231 #endif
232 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
233 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
234 CTLTYPE_INT, "log_changeopt",
235 SYSCTL_DESCR("Log changes in optimization strategy"),
236 NULL, 0, &ffs_log_changeopt, 0,
237 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
238 break;
239 case MODULE_CMD_FINI:
240 error = vfs_detach(&ffs_vfsops);
241 if (error != 0)
242 break;
243 sysctl_teardown(&ffs_sysctl_log);
244 break;
245 default:
246 error = ENOTTY;
247 break;
248 }
249
250 return (error);
251 }
252
253 pool_cache_t ffs_inode_cache;
254 pool_cache_t ffs_dinode1_cache;
255 pool_cache_t ffs_dinode2_cache;
256
257 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
258 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
259
260 /*
261 * Called by main() when ffs is going to be mounted as root.
262 */
263
264 int
265 ffs_mountroot(void)
266 {
267 struct fs *fs;
268 struct mount *mp;
269 struct lwp *l = curlwp; /* XXX */
270 struct ufsmount *ump;
271 int error;
272
273 if (device_class(root_device) != DV_DISK)
274 return (ENODEV);
275
276 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
277 vrele(rootvp);
278 return (error);
279 }
280
281 /*
282 * We always need to be able to mount the root file system.
283 */
284 mp->mnt_flag |= MNT_FORCE;
285 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
286 vfs_unbusy(mp, false, NULL);
287 vfs_destroy(mp);
288 return (error);
289 }
290 mp->mnt_flag &= ~MNT_FORCE;
291 mutex_enter(&mountlist_lock);
292 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
293 mutex_exit(&mountlist_lock);
294 ump = VFSTOUFS(mp);
295 fs = ump->um_fs;
296 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
297 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
298 (void)ffs_statvfs(mp, &mp->mnt_stat);
299 vfs_unbusy(mp, false, NULL);
300 setrootfstime((time_t)fs->fs_time);
301 return (0);
302 }
303
304 /*
305 * VFS Operations.
306 *
307 * mount system call
308 */
309 int
310 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
311 {
312 struct lwp *l = curlwp;
313 struct nameidata nd;
314 struct vnode *vp, *devvp = NULL;
315 struct ufs_args *args = data;
316 struct ufsmount *ump = NULL;
317 struct fs *fs;
318 int error = 0, flags, update;
319 mode_t accessmode;
320
321 if (*data_len < sizeof *args)
322 return EINVAL;
323
324 if (mp->mnt_flag & MNT_GETARGS) {
325 ump = VFSTOUFS(mp);
326 if (ump == NULL)
327 return EIO;
328 args->fspec = NULL;
329 *data_len = sizeof *args;
330 return 0;
331 }
332
333 #if !defined(SOFTDEP)
334 mp->mnt_flag &= ~MNT_SOFTDEP;
335 #endif
336
337 update = mp->mnt_flag & MNT_UPDATE;
338
339 /* Check arguments */
340 if (args->fspec != NULL) {
341 /*
342 * Look up the name and verify that it's sane.
343 */
344 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
345 if ((error = namei(&nd)) != 0)
346 return (error);
347 devvp = nd.ni_vp;
348
349 if (!update) {
350 /*
351 * Be sure this is a valid block device
352 */
353 if (devvp->v_type != VBLK)
354 error = ENOTBLK;
355 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
356 error = ENXIO;
357 } else {
358 /*
359 * Be sure we're still naming the same device
360 * used for our initial mount
361 */
362 ump = VFSTOUFS(mp);
363 if (devvp != ump->um_devvp) {
364 if (devvp->v_rdev != ump->um_devvp->v_rdev)
365 error = EINVAL;
366 else {
367 vrele(devvp);
368 devvp = ump->um_devvp;
369 vref(devvp);
370 }
371 }
372 }
373 } else {
374 if (!update) {
375 /* New mounts must have a filename for the device */
376 return (EINVAL);
377 } else {
378 /* Use the extant mount */
379 ump = VFSTOUFS(mp);
380 devvp = ump->um_devvp;
381 vref(devvp);
382 }
383 }
384
385 /*
386 * Mark the device and any existing vnodes as involved in
387 * softdep processing.
388 */
389 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
390 devvp->v_uflag |= VU_SOFTDEP;
391 mutex_enter(&mntvnode_lock);
392 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
393 if (vp->v_mount != mp || vismarker(vp))
394 continue;
395 vp->v_uflag |= VU_SOFTDEP;
396 }
397 mutex_exit(&mntvnode_lock);
398 }
399
400 /*
401 * If mount by non-root, then verify that user has necessary
402 * permissions on the device.
403 */
404 if (error == 0 && kauth_authorize_generic(l->l_cred,
405 KAUTH_GENERIC_ISSUSER, NULL) != 0) {
406 accessmode = VREAD;
407 if (update ?
408 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
409 (mp->mnt_flag & MNT_RDONLY) == 0)
410 accessmode |= VWRITE;
411 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
412 error = VOP_ACCESS(devvp, accessmode, l->l_cred);
413 VOP_UNLOCK(devvp, 0);
414 }
415
416 if (error) {
417 vrele(devvp);
418 return (error);
419 }
420
421 #ifdef WAPBL
422 /*
423 * WAPBL can only be enabled on a r/w mount
424 * that does not use softdep.
425 */
426 if ((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) {
427 mp->mnt_flag &= ~MNT_LOG;
428 }
429 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_LOG)) ==
430 (MNT_SOFTDEP | MNT_LOG)) {
431 printf("%s fs is journalled, ignoring soft update mode\n",
432 VFSTOUFS(mp)->um_fs->fs_fsmnt);
433 mp->mnt_flag &= ~MNT_SOFTDEP;
434 }
435 #else /* !WAPBL */
436 mp->mnt_flag &= ~MNT_LOG;
437 #endif /* !WAPBL */
438
439 if (!update) {
440 int xflags;
441
442 if (mp->mnt_flag & MNT_RDONLY)
443 xflags = FREAD;
444 else
445 xflags = FREAD | FWRITE;
446 error = VOP_OPEN(devvp, xflags, FSCRED);
447 if (error)
448 goto fail;
449 error = ffs_mountfs(devvp, mp, l);
450 if (error) {
451 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
452 (void)VOP_CLOSE(devvp, xflags, NOCRED);
453 VOP_UNLOCK(devvp, 0);
454 goto fail;
455 }
456
457 ump = VFSTOUFS(mp);
458 fs = ump->um_fs;
459 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
460 (MNT_SOFTDEP | MNT_ASYNC)) {
461 printf("%s fs uses soft updates, "
462 "ignoring async mode\n",
463 fs->fs_fsmnt);
464 mp->mnt_flag &= ~MNT_ASYNC;
465 }
466 } else {
467 /*
468 * Update the mount.
469 */
470
471 /*
472 * The initial mount got a reference on this
473 * device, so drop the one obtained via
474 * namei(), above.
475 */
476 vrele(devvp);
477
478 ump = VFSTOUFS(mp);
479 fs = ump->um_fs;
480 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
481 /*
482 * Changing from r/w to r/o
483 */
484 flags = WRITECLOSE;
485 if (mp->mnt_flag & MNT_FORCE)
486 flags |= FORCECLOSE;
487 if (mp->mnt_flag & MNT_SOFTDEP)
488 error = softdep_flushfiles(mp, flags, l);
489 else
490 error = ffs_flushfiles(mp, flags, l);
491 if (fs->fs_pendingblocks != 0 ||
492 fs->fs_pendinginodes != 0) {
493 printf("%s: update error: blocks %" PRId64
494 " files %d\n",
495 fs->fs_fsmnt, fs->fs_pendingblocks,
496 fs->fs_pendinginodes);
497 fs->fs_pendingblocks = 0;
498 fs->fs_pendinginodes = 0;
499 }
500 if (error == 0)
501 error = UFS_WAPBL_BEGIN(mp);
502 if (error == 0 &&
503 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
504 fs->fs_clean & FS_WASCLEAN) {
505 if (mp->mnt_flag & MNT_SOFTDEP)
506 fs->fs_flags &= ~FS_DOSOFTDEP;
507 fs->fs_clean = FS_ISCLEAN;
508 (void) ffs_sbupdate(ump, MNT_WAIT);
509 }
510 if (error == 0)
511 UFS_WAPBL_END(mp);
512 if (error)
513 return (error);
514 }
515
516 #ifdef WAPBL
517 if ((mp->mnt_flag & MNT_LOG) == 0) {
518 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
519 if (error)
520 return error;
521 }
522 #endif /* WAPBL */
523
524 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
525 /*
526 * Finish change from r/w to r/o
527 */
528 fs->fs_ronly = 1;
529 fs->fs_fmod = 0;
530 }
531
532 /*
533 * Flush soft dependencies if disabling it via an update
534 * mount. This may leave some items to be processed,
535 * so don't do this yet XXX.
536 */
537 if ((fs->fs_flags & FS_DOSOFTDEP) &&
538 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
539 #ifdef notyet
540 flags = WRITECLOSE;
541 if (mp->mnt_flag & MNT_FORCE)
542 flags |= FORCECLOSE;
543 error = softdep_flushfiles(mp, flags, l);
544 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
545 fs->fs_flags &= ~FS_DOSOFTDEP;
546 (void) ffs_sbupdate(ump, MNT_WAIT);
547 #elif defined(SOFTDEP)
548 mp->mnt_flag |= MNT_SOFTDEP;
549 #endif
550 }
551
552 /*
553 * When upgrading to a softdep mount, we must first flush
554 * all vnodes. (not done yet -- see above)
555 */
556 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
557 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
558 #ifdef notyet
559 flags = WRITECLOSE;
560 if (mp->mnt_flag & MNT_FORCE)
561 flags |= FORCECLOSE;
562 error = ffs_flushfiles(mp, flags, l);
563 #else
564 mp->mnt_flag &= ~MNT_SOFTDEP;
565 #endif
566 }
567
568 if (mp->mnt_flag & MNT_RELOAD) {
569 error = ffs_reload(mp, l->l_cred, l);
570 if (error)
571 return (error);
572 }
573
574 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
575 /*
576 * Changing from read-only to read/write
577 */
578 fs->fs_ronly = 0;
579 fs->fs_clean <<= 1;
580 fs->fs_fmod = 1;
581 if ((fs->fs_flags & FS_DOSOFTDEP)) {
582 error = softdep_mount(devvp, mp, fs,
583 l->l_cred);
584 if (error)
585 return (error);
586 }
587 #ifdef WAPBL
588 if (fs->fs_flags & FS_DOWAPBL) {
589 printf("%s: replaying log to disk\n",
590 fs->fs_fsmnt);
591 KDASSERT(mp->mnt_wapbl_replay);
592 error = wapbl_replay_write(mp->mnt_wapbl_replay,
593 devvp);
594 if (error) {
595 return error;
596 }
597 wapbl_replay_stop(mp->mnt_wapbl_replay);
598 fs->fs_clean = FS_WASCLEAN;
599 }
600 #endif /* WAPBL */
601 if (fs->fs_snapinum[0] != 0)
602 ffs_snapshot_mount(mp);
603 }
604
605 #ifdef WAPBL
606 error = ffs_wapbl_start(mp);
607 if (error)
608 return error;
609 #endif /* WAPBL */
610
611 if (args->fspec == NULL)
612 return EINVAL;
613 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
614 (MNT_SOFTDEP | MNT_ASYNC)) {
615 printf("%s fs uses soft updates, ignoring async mode\n",
616 fs->fs_fsmnt);
617 mp->mnt_flag &= ~MNT_ASYNC;
618 }
619 }
620
621 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
622 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
623 if (error == 0)
624 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
625 sizeof(fs->fs_fsmnt));
626 if (mp->mnt_flag & MNT_SOFTDEP)
627 fs->fs_flags |= FS_DOSOFTDEP;
628 else
629 fs->fs_flags &= ~FS_DOSOFTDEP;
630 if (fs->fs_fmod != 0) { /* XXX */
631 int err;
632
633 fs->fs_fmod = 0;
634 if (fs->fs_clean & FS_WASCLEAN)
635 fs->fs_time = time_second;
636 else {
637 printf("%s: file system not clean (fs_clean=%#x); "
638 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
639 fs->fs_clean);
640 printf("%s: lost blocks %" PRId64 " files %d\n",
641 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
642 fs->fs_pendinginodes);
643 }
644 err = UFS_WAPBL_BEGIN(mp);
645 if (err == 0) {
646 (void) ffs_cgupdate(ump, MNT_WAIT);
647 UFS_WAPBL_END(mp);
648 }
649 }
650 return (error);
651
652 fail:
653 vrele(devvp);
654 return (error);
655 }
656
657 /*
658 * Reload all incore data for a filesystem (used after running fsck on
659 * the root filesystem and finding things to fix). The filesystem must
660 * be mounted read-only.
661 *
662 * Things to do to update the mount:
663 * 1) invalidate all cached meta-data.
664 * 2) re-read superblock from disk.
665 * 3) re-read summary information from disk.
666 * 4) invalidate all inactive vnodes.
667 * 5) invalidate all cached file data.
668 * 6) re-read inode data for all active vnodes.
669 */
670 int
671 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
672 {
673 struct vnode *vp, *mvp, *devvp;
674 struct inode *ip;
675 void *space;
676 struct buf *bp;
677 struct fs *fs, *newfs;
678 struct partinfo dpart;
679 int i, blks, size, error;
680 int32_t *lp;
681 struct ufsmount *ump;
682 daddr_t sblockloc;
683
684 if ((mp->mnt_flag & MNT_RDONLY) == 0)
685 return (EINVAL);
686
687 ump = VFSTOUFS(mp);
688 /*
689 * Step 1: invalidate all cached meta-data.
690 */
691 devvp = ump->um_devvp;
692 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
693 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
694 VOP_UNLOCK(devvp, 0);
695 if (error)
696 panic("ffs_reload: dirty1");
697 /*
698 * Step 2: re-read superblock from disk.
699 */
700 fs = ump->um_fs;
701 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
702 size = DEV_BSIZE;
703 else
704 size = dpart.disklab->d_secsize;
705 /* XXX we don't handle possibility that superblock moved. */
706 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
707 NOCRED, 0, &bp);
708 if (error) {
709 brelse(bp, 0);
710 return (error);
711 }
712 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
713 memcpy(newfs, bp->b_data, fs->fs_sbsize);
714 #ifdef FFS_EI
715 if (ump->um_flags & UFS_NEEDSWAP) {
716 ffs_sb_swap((struct fs*)bp->b_data, newfs);
717 fs->fs_flags |= FS_SWAPPED;
718 } else
719 #endif
720 fs->fs_flags &= ~FS_SWAPPED;
721 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
722 newfs->fs_magic != FS_UFS2_MAGIC)||
723 newfs->fs_bsize > MAXBSIZE ||
724 newfs->fs_bsize < sizeof(struct fs)) {
725 brelse(bp, 0);
726 free(newfs, M_UFSMNT);
727 return (EIO); /* XXX needs translation */
728 }
729 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
730 sblockloc = fs->fs_sblockloc;
731 /*
732 * Copy pointer fields back into superblock before copying in XXX
733 * new superblock. These should really be in the ufsmount. XXX
734 * Note that important parameters (eg fs_ncg) are unchanged.
735 */
736 newfs->fs_csp = fs->fs_csp;
737 newfs->fs_maxcluster = fs->fs_maxcluster;
738 newfs->fs_contigdirs = fs->fs_contigdirs;
739 newfs->fs_ronly = fs->fs_ronly;
740 newfs->fs_active = fs->fs_active;
741 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
742 brelse(bp, 0);
743 free(newfs, M_UFSMNT);
744
745 /* Recheck for apple UFS filesystem */
746 ump->um_flags &= ~UFS_ISAPPLEUFS;
747 /* First check to see if this is tagged as an Apple UFS filesystem
748 * in the disklabel
749 */
750 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
751 (dpart.part->p_fstype == FS_APPLEUFS)) {
752 ump->um_flags |= UFS_ISAPPLEUFS;
753 }
754 #ifdef APPLE_UFS
755 else {
756 /* Manually look for an apple ufs label, and if a valid one
757 * is found, then treat it like an Apple UFS filesystem anyway
758 */
759 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
760 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
761 if (error) {
762 brelse(bp, 0);
763 return (error);
764 }
765 error = ffs_appleufs_validate(fs->fs_fsmnt,
766 (struct appleufslabel *)bp->b_data, NULL);
767 if (error == 0)
768 ump->um_flags |= UFS_ISAPPLEUFS;
769 brelse(bp, 0);
770 bp = NULL;
771 }
772 #else
773 if (ump->um_flags & UFS_ISAPPLEUFS)
774 return (EIO);
775 #endif
776
777 if (UFS_MPISAPPLEUFS(ump)) {
778 /* see comment about NeXT below */
779 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
780 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
781 mp->mnt_iflag |= IMNT_DTYPE;
782 } else {
783 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
784 ump->um_dirblksiz = DIRBLKSIZ;
785 if (ump->um_maxsymlinklen > 0)
786 mp->mnt_iflag |= IMNT_DTYPE;
787 else
788 mp->mnt_iflag &= ~IMNT_DTYPE;
789 }
790 ffs_oldfscompat_read(fs, ump, sblockloc);
791 mutex_enter(&ump->um_lock);
792 ump->um_maxfilesize = fs->fs_maxfilesize;
793
794 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
795 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
796 mp->mnt_stat.f_mntonname, fs->fs_flags,
797 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
798 if ((mp->mnt_flag & MNT_FORCE) == 0) {
799 mutex_exit(&ump->um_lock);
800 return (EINVAL);
801 }
802 }
803
804 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
805 fs->fs_pendingblocks = 0;
806 fs->fs_pendinginodes = 0;
807 }
808 mutex_exit(&ump->um_lock);
809
810 ffs_statvfs(mp, &mp->mnt_stat);
811 /*
812 * Step 3: re-read summary information from disk.
813 */
814 blks = howmany(fs->fs_cssize, fs->fs_fsize);
815 space = fs->fs_csp;
816 for (i = 0; i < blks; i += fs->fs_frag) {
817 size = fs->fs_bsize;
818 if (i + fs->fs_frag > blks)
819 size = (blks - i) * fs->fs_fsize;
820 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
821 NOCRED, 0, &bp);
822 if (error) {
823 brelse(bp, 0);
824 return (error);
825 }
826 #ifdef FFS_EI
827 if (UFS_FSNEEDSWAP(fs))
828 ffs_csum_swap((struct csum *)bp->b_data,
829 (struct csum *)space, size);
830 else
831 #endif
832 memcpy(space, bp->b_data, (size_t)size);
833 space = (char *)space + size;
834 brelse(bp, 0);
835 }
836 if ((fs->fs_flags & FS_DOSOFTDEP))
837 softdep_mount(devvp, mp, fs, cred);
838 if (fs->fs_snapinum[0] != 0)
839 ffs_snapshot_mount(mp);
840 /*
841 * We no longer know anything about clusters per cylinder group.
842 */
843 if (fs->fs_contigsumsize > 0) {
844 lp = fs->fs_maxcluster;
845 for (i = 0; i < fs->fs_ncg; i++)
846 *lp++ = fs->fs_contigsumsize;
847 }
848
849 /* Allocate a marker vnode. */
850 if ((mvp = vnalloc(mp)) == NULL)
851 return ENOMEM;
852 /*
853 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
854 * and vclean() can be called indirectly
855 */
856 mutex_enter(&mntvnode_lock);
857 loop:
858 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
859 vmark(mvp, vp);
860 if (vp->v_mount != mp || vismarker(vp))
861 continue;
862 /*
863 * Step 4: invalidate all inactive vnodes.
864 */
865 if (vrecycle(vp, &mntvnode_lock, l)) {
866 mutex_enter(&mntvnode_lock);
867 (void)vunmark(mvp);
868 goto loop;
869 }
870 /*
871 * Step 5: invalidate all cached file data.
872 */
873 mutex_enter(&vp->v_interlock);
874 mutex_exit(&mntvnode_lock);
875 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
876 (void)vunmark(mvp);
877 goto loop;
878 }
879 if (vinvalbuf(vp, 0, cred, l, 0, 0))
880 panic("ffs_reload: dirty2");
881 /*
882 * Step 6: re-read inode data for all active vnodes.
883 */
884 ip = VTOI(vp);
885 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
886 (int)fs->fs_bsize, NOCRED, 0, &bp);
887 if (error) {
888 brelse(bp, 0);
889 vput(vp);
890 (void)vunmark(mvp);
891 break;
892 }
893 ffs_load_inode(bp, ip, fs, ip->i_number);
894 ip->i_ffs_effnlink = ip->i_nlink;
895 brelse(bp, 0);
896 vput(vp);
897 mutex_enter(&mntvnode_lock);
898 }
899 mutex_exit(&mntvnode_lock);
900 vnfree(mvp);
901 return (error);
902 }
903
904 /*
905 * Possible superblock locations ordered from most to least likely.
906 */
907 static const int sblock_try[] = SBLOCKSEARCH;
908
909 /*
910 * Common code for mount and mountroot
911 */
912 int
913 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
914 {
915 struct ufsmount *ump;
916 struct buf *bp;
917 struct fs *fs;
918 dev_t dev;
919 struct partinfo dpart;
920 void *space;
921 daddr_t sblockloc, fsblockloc;
922 int blks, fstype;
923 int error, i, size, ronly, bset = 0;
924 #ifdef FFS_EI
925 int needswap = 0; /* keep gcc happy */
926 #endif
927 int32_t *lp;
928 kauth_cred_t cred;
929 u_int32_t sbsize = 8192; /* keep gcc happy*/
930
931 dev = devvp->v_rdev;
932 cred = l ? l->l_cred : NOCRED;
933
934 /* Flush out any old buffers remaining from a previous use. */
935 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
936 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
937 VOP_UNLOCK(devvp, 0);
938 if (error)
939 return (error);
940
941 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
942 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
943 size = DEV_BSIZE;
944 else
945 size = dpart.disklab->d_secsize;
946
947 bp = NULL;
948 ump = NULL;
949 fs = NULL;
950 sblockloc = 0;
951 fstype = 0;
952
953 error = fstrans_mount(mp);
954 if (error)
955 return error;
956
957 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
958 memset(ump, 0, sizeof *ump);
959 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
960 error = ffs_snapshot_init(ump);
961 if (error)
962 goto out;
963 ump->um_ops = &ffs_ufsops;
964
965 #ifdef WAPBL
966 sbagain:
967 #endif
968 /*
969 * Try reading the superblock in each of its possible locations.
970 */
971 for (i = 0; ; i++) {
972 if (bp != NULL) {
973 brelse(bp, BC_NOCACHE);
974 bp = NULL;
975 }
976 if (sblock_try[i] == -1) {
977 error = EINVAL;
978 fs = NULL;
979 goto out;
980 }
981 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
982 0, &bp);
983 if (error) {
984 fs = NULL;
985 goto out;
986 }
987 fs = (struct fs*)bp->b_data;
988 fsblockloc = sblockloc = sblock_try[i];
989 if (fs->fs_magic == FS_UFS1_MAGIC) {
990 sbsize = fs->fs_sbsize;
991 fstype = UFS1;
992 #ifdef FFS_EI
993 needswap = 0;
994 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
995 sbsize = bswap32(fs->fs_sbsize);
996 fstype = UFS1;
997 needswap = 1;
998 #endif
999 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
1000 sbsize = fs->fs_sbsize;
1001 fstype = UFS2;
1002 #ifdef FFS_EI
1003 needswap = 0;
1004 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
1005 sbsize = bswap32(fs->fs_sbsize);
1006 fstype = UFS2;
1007 needswap = 1;
1008 #endif
1009 } else
1010 continue;
1011
1012
1013 /* fs->fs_sblockloc isn't defined for old filesystems */
1014 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
1015 if (sblockloc == SBLOCK_UFS2)
1016 /*
1017 * This is likely to be the first alternate
1018 * in a filesystem with 64k blocks.
1019 * Don't use it.
1020 */
1021 continue;
1022 fsblockloc = sblockloc;
1023 } else {
1024 fsblockloc = fs->fs_sblockloc;
1025 #ifdef FFS_EI
1026 if (needswap)
1027 fsblockloc = bswap64(fsblockloc);
1028 #endif
1029 }
1030
1031 /* Check we haven't found an alternate superblock */
1032 if (fsblockloc != sblockloc)
1033 continue;
1034
1035 /* Validate size of superblock */
1036 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
1037 continue;
1038
1039 /* Ok seems to be a good superblock */
1040 break;
1041 }
1042
1043 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
1044 memcpy(fs, bp->b_data, sbsize);
1045 ump->um_fs = fs;
1046
1047 #ifdef FFS_EI
1048 if (needswap) {
1049 ffs_sb_swap((struct fs*)bp->b_data, fs);
1050 fs->fs_flags |= FS_SWAPPED;
1051 } else
1052 #endif
1053 fs->fs_flags &= ~FS_SWAPPED;
1054
1055 #ifdef WAPBL
1056 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1057 error = ffs_wapbl_replay_start(mp, fs, devvp);
1058 if (error)
1059 goto out;
1060
1061 if (!ronly) {
1062 /* XXX fsmnt may be stale. */
1063 printf("%s: replaying log to disk\n", fs->fs_fsmnt);
1064 error = wapbl_replay_write(mp->mnt_wapbl_replay, devvp);
1065 if (error)
1066 goto out;
1067 wapbl_replay_stop(mp->mnt_wapbl_replay);
1068 fs->fs_clean = FS_WASCLEAN;
1069 } else {
1070 /* XXX fsmnt may be stale */
1071 printf("%s: replaying log to memory\n", fs->fs_fsmnt);
1072 }
1073
1074 /* Force a re-read of the superblock */
1075 brelse(bp, BC_INVAL);
1076 bp = NULL;
1077 free(fs, M_UFSMNT);
1078 fs = NULL;
1079 goto sbagain;
1080 }
1081 #else /* !WAPBL */
1082 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1083 error = EPERM;
1084 goto out;
1085 }
1086 #endif /* !WAPBL */
1087
1088 ffs_oldfscompat_read(fs, ump, sblockloc);
1089 ump->um_maxfilesize = fs->fs_maxfilesize;
1090
1091 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1092 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1093 mp->mnt_stat.f_mntonname, fs->fs_flags,
1094 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1095 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1096 error = EINVAL;
1097 goto out;
1098 }
1099 }
1100
1101 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1102 fs->fs_pendingblocks = 0;
1103 fs->fs_pendinginodes = 0;
1104 }
1105
1106 ump->um_fstype = fstype;
1107 if (fs->fs_sbsize < SBLOCKSIZE)
1108 brelse(bp, BC_INVAL);
1109 else
1110 brelse(bp, 0);
1111 bp = NULL;
1112
1113 /* First check to see if this is tagged as an Apple UFS filesystem
1114 * in the disklabel
1115 */
1116 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
1117 (dpart.part->p_fstype == FS_APPLEUFS)) {
1118 ump->um_flags |= UFS_ISAPPLEUFS;
1119 }
1120 #ifdef APPLE_UFS
1121 else {
1122 /* Manually look for an apple ufs label, and if a valid one
1123 * is found, then treat it like an Apple UFS filesystem anyway
1124 */
1125 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
1126 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
1127 if (error)
1128 goto out;
1129 error = ffs_appleufs_validate(fs->fs_fsmnt,
1130 (struct appleufslabel *)bp->b_data, NULL);
1131 if (error == 0) {
1132 ump->um_flags |= UFS_ISAPPLEUFS;
1133 }
1134 brelse(bp, 0);
1135 bp = NULL;
1136 }
1137 #else
1138 if (ump->um_flags & UFS_ISAPPLEUFS) {
1139 error = EINVAL;
1140 goto out;
1141 }
1142 #endif
1143
1144 #if 0
1145 /*
1146 * XXX This code changes the behaviour of mounting dirty filesystems, to
1147 * XXX require "mount -f ..." to mount them. This doesn't match what
1148 * XXX mount(8) describes and is disabled for now.
1149 */
1150 /*
1151 * If the file system is not clean, don't allow it to be mounted
1152 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1153 * for the root file system.)
1154 */
1155 if (fs->fs_flags & FS_DOWAPBL) {
1156 /*
1157 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1158 * bit is set, although there's a window in unmount where it
1159 * could be FS_ISCLEAN
1160 */
1161 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1162 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1163 error = EPERM;
1164 goto out;
1165 }
1166 } else
1167 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1168 (mp->mnt_flag & MNT_FORCE) == 0) {
1169 error = EPERM;
1170 goto out;
1171 }
1172 #endif
1173
1174 /*
1175 * verify that we can access the last block in the fs
1176 * if we're mounting read/write.
1177 */
1178
1179 if (!ronly) {
1180 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
1181 cred, 0, &bp);
1182 if (bp->b_bcount != fs->fs_fsize)
1183 error = EINVAL;
1184 if (error) {
1185 bset = BC_INVAL;
1186 goto out;
1187 }
1188 brelse(bp, BC_INVAL);
1189 bp = NULL;
1190 }
1191
1192 fs->fs_ronly = ronly;
1193 /* Don't bump fs_clean if we're replaying journal */
1194 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN)))
1195 if (ronly == 0) {
1196 fs->fs_clean <<= 1;
1197 fs->fs_fmod = 1;
1198 }
1199 size = fs->fs_cssize;
1200 blks = howmany(size, fs->fs_fsize);
1201 if (fs->fs_contigsumsize > 0)
1202 size += fs->fs_ncg * sizeof(int32_t);
1203 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1204 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
1205 fs->fs_csp = space;
1206 for (i = 0; i < blks; i += fs->fs_frag) {
1207 size = fs->fs_bsize;
1208 if (i + fs->fs_frag > blks)
1209 size = (blks - i) * fs->fs_fsize;
1210 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
1211 cred, 0, &bp);
1212 if (error) {
1213 free(fs->fs_csp, M_UFSMNT);
1214 goto out;
1215 }
1216 #ifdef FFS_EI
1217 if (needswap)
1218 ffs_csum_swap((struct csum *)bp->b_data,
1219 (struct csum *)space, size);
1220 else
1221 #endif
1222 memcpy(space, bp->b_data, (u_int)size);
1223
1224 space = (char *)space + size;
1225 brelse(bp, 0);
1226 bp = NULL;
1227 }
1228 if (fs->fs_contigsumsize > 0) {
1229 fs->fs_maxcluster = lp = space;
1230 for (i = 0; i < fs->fs_ncg; i++)
1231 *lp++ = fs->fs_contigsumsize;
1232 space = lp;
1233 }
1234 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1235 fs->fs_contigdirs = space;
1236 space = (char *)space + size;
1237 memset(fs->fs_contigdirs, 0, size);
1238 /* Compatibility for old filesystems - XXX */
1239 if (fs->fs_avgfilesize <= 0)
1240 fs->fs_avgfilesize = AVFILESIZ;
1241 if (fs->fs_avgfpdir <= 0)
1242 fs->fs_avgfpdir = AFPDIR;
1243 fs->fs_active = NULL;
1244 mp->mnt_data = ump;
1245 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1246 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1247 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1248 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1249 if (UFS_MPISAPPLEUFS(ump)) {
1250 /* NeXT used to keep short symlinks in the inode even
1251 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1252 * is probably -1, but we still need to be able to identify
1253 * short symlinks.
1254 */
1255 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1256 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1257 mp->mnt_iflag |= IMNT_DTYPE;
1258 } else {
1259 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1260 ump->um_dirblksiz = DIRBLKSIZ;
1261 if (ump->um_maxsymlinklen > 0)
1262 mp->mnt_iflag |= IMNT_DTYPE;
1263 else
1264 mp->mnt_iflag &= ~IMNT_DTYPE;
1265 }
1266 mp->mnt_fs_bshift = fs->fs_bshift;
1267 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1268 mp->mnt_flag |= MNT_LOCAL;
1269 mp->mnt_iflag |= IMNT_MPSAFE;
1270 #ifdef FFS_EI
1271 if (needswap)
1272 ump->um_flags |= UFS_NEEDSWAP;
1273 #endif
1274 ump->um_mountp = mp;
1275 ump->um_dev = dev;
1276 ump->um_devvp = devvp;
1277 ump->um_nindir = fs->fs_nindir;
1278 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1279 ump->um_bptrtodb = fs->fs_fsbtodb;
1280 ump->um_seqinc = fs->fs_frag;
1281 for (i = 0; i < MAXQUOTAS; i++)
1282 ump->um_quotas[i] = NULLVP;
1283 devvp->v_specmountpoint = mp;
1284 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
1285 error = softdep_mount(devvp, mp, fs, cred);
1286 if (error) {
1287 free(fs->fs_csp, M_UFSMNT);
1288 goto out;
1289 }
1290 }
1291 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1292 ffs_snapshot_mount(mp);
1293
1294 #ifdef WAPBL
1295 if (!ronly) {
1296 KDASSERT(fs->fs_ronly == 0);
1297 /*
1298 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1299 * needs to create a new log file in-filesystem.
1300 */
1301 ffs_statvfs(mp, &mp->mnt_stat);
1302
1303 error = ffs_wapbl_start(mp);
1304 if (error) {
1305 free(fs->fs_csp, M_UFSMNT);
1306 goto out;
1307 }
1308 }
1309 #endif /* WAPBL */
1310 #ifdef UFS_EXTATTR
1311 /*
1312 * Initialize file-backed extended attributes on UFS1 file
1313 * systems.
1314 */
1315 if (ump->um_fstype == UFS1) {
1316 ufs_extattr_uepm_init(&ump->um_extattr);
1317 #ifdef UFS_EXTATTR_AUTOSTART
1318 /*
1319 * XXX Just ignore errors. Not clear that we should
1320 * XXX fail the mount in this case.
1321 */
1322 (void) ufs_extattr_autostart(mp, l);
1323 #endif
1324 }
1325 #endif /* UFS_EXTATTR */
1326 return (0);
1327 out:
1328 #ifdef WAPBL
1329 if (mp->mnt_wapbl_replay) {
1330 if (wapbl_replay_isopen(mp->mnt_wapbl_replay))
1331 wapbl_replay_stop(mp->mnt_wapbl_replay);
1332 wapbl_replay_free(mp->mnt_wapbl_replay);
1333 mp->mnt_wapbl_replay = 0;
1334 }
1335 #endif
1336
1337 fstrans_unmount(mp);
1338 if (fs)
1339 free(fs, M_UFSMNT);
1340 devvp->v_specmountpoint = NULL;
1341 if (bp)
1342 brelse(bp, bset);
1343 if (ump) {
1344 if (ump->um_oldfscompat)
1345 free(ump->um_oldfscompat, M_UFSMNT);
1346 mutex_destroy(&ump->um_lock);
1347 free(ump, M_UFSMNT);
1348 mp->mnt_data = NULL;
1349 }
1350 return (error);
1351 }
1352
1353 /*
1354 * Sanity checks for loading old filesystem superblocks.
1355 * See ffs_oldfscompat_write below for unwound actions.
1356 *
1357 * XXX - Parts get retired eventually.
1358 * Unfortunately new bits get added.
1359 */
1360 static void
1361 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1362 {
1363 off_t maxfilesize;
1364 int32_t *extrasave;
1365
1366 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1367 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1368 return;
1369
1370 if (!ump->um_oldfscompat)
1371 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1372 M_UFSMNT, M_WAITOK);
1373
1374 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1375 extrasave = ump->um_oldfscompat;
1376 extrasave += 512/sizeof(int32_t);
1377 extrasave[0] = fs->fs_old_npsect;
1378 extrasave[1] = fs->fs_old_interleave;
1379 extrasave[2] = fs->fs_old_trackskew;
1380
1381 /* These fields will be overwritten by their
1382 * original values in fs_oldfscompat_write, so it is harmless
1383 * to modify them here.
1384 */
1385 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1386 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1387 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1388 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1389
1390 fs->fs_maxbsize = fs->fs_bsize;
1391 fs->fs_time = fs->fs_old_time;
1392 fs->fs_size = fs->fs_old_size;
1393 fs->fs_dsize = fs->fs_old_dsize;
1394 fs->fs_csaddr = fs->fs_old_csaddr;
1395 fs->fs_sblockloc = sblockloc;
1396
1397 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1398
1399 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1400 fs->fs_old_nrpos = 8;
1401 fs->fs_old_npsect = fs->fs_old_nsect;
1402 fs->fs_old_interleave = 1;
1403 fs->fs_old_trackskew = 0;
1404 }
1405
1406 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1407 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1408 fs->fs_qbmask = ~fs->fs_bmask;
1409 fs->fs_qfmask = ~fs->fs_fmask;
1410 }
1411
1412 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1413 if (fs->fs_maxfilesize > maxfilesize)
1414 fs->fs_maxfilesize = maxfilesize;
1415
1416 /* Compatibility for old filesystems */
1417 if (fs->fs_avgfilesize <= 0)
1418 fs->fs_avgfilesize = AVFILESIZ;
1419 if (fs->fs_avgfpdir <= 0)
1420 fs->fs_avgfpdir = AFPDIR;
1421
1422 #if 0
1423 if (bigcgs) {
1424 fs->fs_save_cgsize = fs->fs_cgsize;
1425 fs->fs_cgsize = fs->fs_bsize;
1426 }
1427 #endif
1428 }
1429
1430 /*
1431 * Unwinding superblock updates for old filesystems.
1432 * See ffs_oldfscompat_read above for details.
1433 *
1434 * XXX - Parts get retired eventually.
1435 * Unfortunately new bits get added.
1436 */
1437 static void
1438 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1439 {
1440 int32_t *extrasave;
1441
1442 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1443 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1444 return;
1445
1446 fs->fs_old_time = fs->fs_time;
1447 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1448 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1449 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1450 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1451 fs->fs_old_flags = fs->fs_flags;
1452
1453 #if 0
1454 if (bigcgs) {
1455 fs->fs_cgsize = fs->fs_save_cgsize;
1456 }
1457 #endif
1458
1459 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1460 extrasave = ump->um_oldfscompat;
1461 extrasave += 512/sizeof(int32_t);
1462 fs->fs_old_npsect = extrasave[0];
1463 fs->fs_old_interleave = extrasave[1];
1464 fs->fs_old_trackskew = extrasave[2];
1465
1466 }
1467
1468 /*
1469 * unmount system call
1470 */
1471 int
1472 ffs_unmount(struct mount *mp, int mntflags)
1473 {
1474 struct lwp *l = curlwp;
1475 struct ufsmount *ump = VFSTOUFS(mp);
1476 struct fs *fs = ump->um_fs;
1477 int error, flags, penderr;
1478 #ifdef WAPBL
1479 extern int doforce;
1480 #endif
1481
1482 penderr = 0;
1483 flags = 0;
1484 if (mntflags & MNT_FORCE)
1485 flags |= FORCECLOSE;
1486 #ifdef UFS_EXTATTR
1487 if (ump->um_fstype == UFS1) {
1488 ufs_extattr_stop(mp, l);
1489 ufs_extattr_uepm_destroy(&ump->um_extattr);
1490 }
1491 #endif /* UFS_EXTATTR */
1492 if (mp->mnt_flag & MNT_SOFTDEP) {
1493 if ((error = softdep_flushfiles(mp, flags, l)) != 0)
1494 return (error);
1495 } else {
1496 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1497 return (error);
1498 }
1499 mutex_enter(&ump->um_lock);
1500 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1501 printf("%s: unmount pending error: blocks %" PRId64
1502 " files %d\n",
1503 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1504 fs->fs_pendingblocks = 0;
1505 fs->fs_pendinginodes = 0;
1506 penderr = 1;
1507 }
1508 mutex_exit(&ump->um_lock);
1509 error = UFS_WAPBL_BEGIN(mp);
1510 if (error == 0)
1511 if (fs->fs_ronly == 0 &&
1512 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1513 fs->fs_clean & FS_WASCLEAN) {
1514 /*
1515 * XXXX don't mark fs clean in the case of softdep
1516 * pending block errors, until they are fixed.
1517 */
1518 if (penderr == 0) {
1519 if (mp->mnt_flag & MNT_SOFTDEP)
1520 fs->fs_flags &= ~FS_DOSOFTDEP;
1521 fs->fs_clean = FS_ISCLEAN;
1522 }
1523 fs->fs_fmod = 0;
1524 (void) ffs_sbupdate(ump, MNT_WAIT);
1525 }
1526 if (error == 0)
1527 UFS_WAPBL_END(mp);
1528 #ifdef WAPBL
1529 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1530 if (mp->mnt_wapbl_replay) {
1531 KDASSERT(fs->fs_ronly);
1532 wapbl_replay_stop(mp->mnt_wapbl_replay);
1533 wapbl_replay_free(mp->mnt_wapbl_replay);
1534 mp->mnt_wapbl_replay = 0;
1535 }
1536 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1537 if (error) {
1538 return error;
1539 }
1540 #endif /* WAPBL */
1541 if (ump->um_devvp->v_type != VBAD)
1542 ump->um_devvp->v_specmountpoint = NULL;
1543 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1544 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1545 NOCRED);
1546 vput(ump->um_devvp);
1547 free(fs->fs_csp, M_UFSMNT);
1548 free(fs, M_UFSMNT);
1549 if (ump->um_oldfscompat != NULL)
1550 free(ump->um_oldfscompat, M_UFSMNT);
1551 softdep_unmount(mp);
1552 mutex_destroy(&ump->um_lock);
1553 ffs_snapshot_fini(ump);
1554 free(ump, M_UFSMNT);
1555 mp->mnt_data = NULL;
1556 mp->mnt_flag &= ~MNT_LOCAL;
1557 fstrans_unmount(mp);
1558 return (0);
1559 }
1560
1561 /*
1562 * Flush out all the files in a filesystem.
1563 */
1564 int
1565 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1566 {
1567 extern int doforce;
1568 struct ufsmount *ump;
1569 int error;
1570
1571 if (!doforce)
1572 flags &= ~FORCECLOSE;
1573 ump = VFSTOUFS(mp);
1574 #ifdef QUOTA
1575 if (mp->mnt_flag & MNT_QUOTA) {
1576 int i;
1577 if ((error = vflush(mp, NULLVP, SKIPSYSTEM | flags)) != 0)
1578 return (error);
1579 for (i = 0; i < MAXQUOTAS; i++) {
1580 if (ump->um_quotas[i] == NULLVP)
1581 continue;
1582 quotaoff(l, mp, i);
1583 }
1584 /*
1585 * Here we fall through to vflush again to ensure
1586 * that we have gotten rid of all the system vnodes.
1587 */
1588 }
1589 #endif
1590 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1591 return (error);
1592 ffs_snapshot_unmount(mp);
1593 /*
1594 * Flush all the files.
1595 */
1596 error = vflush(mp, NULLVP, flags);
1597 if (error)
1598 return (error);
1599 /*
1600 * Flush filesystem metadata.
1601 */
1602 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1603 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1604 VOP_UNLOCK(ump->um_devvp, 0);
1605 if (flags & FORCECLOSE) /* XXXDBJ */
1606 error = 0;
1607
1608 #ifdef WAPBL
1609 if (error)
1610 return error;
1611 if (mp->mnt_wapbl) {
1612 error = wapbl_flush(mp->mnt_wapbl, 1);
1613 if (flags & FORCECLOSE)
1614 error = 0;
1615 }
1616 #endif
1617
1618 return (error);
1619 }
1620
1621 /*
1622 * Get file system statistics.
1623 */
1624 int
1625 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1626 {
1627 struct ufsmount *ump;
1628 struct fs *fs;
1629
1630 ump = VFSTOUFS(mp);
1631 fs = ump->um_fs;
1632 mutex_enter(&ump->um_lock);
1633 sbp->f_bsize = fs->fs_bsize;
1634 sbp->f_frsize = fs->fs_fsize;
1635 sbp->f_iosize = fs->fs_bsize;
1636 sbp->f_blocks = fs->fs_dsize;
1637 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1638 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1639 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1640 fs->fs_minfree) / (u_int64_t) 100;
1641 if (sbp->f_bfree > sbp->f_bresvd)
1642 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1643 else
1644 sbp->f_bavail = 0;
1645 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1646 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1647 sbp->f_favail = sbp->f_ffree;
1648 sbp->f_fresvd = 0;
1649 mutex_exit(&ump->um_lock);
1650 copy_statvfs_info(sbp, mp);
1651
1652 return (0);
1653 }
1654
1655 /*
1656 * Go through the disk queues to initiate sandbagged IO;
1657 * go through the inodes to write those that have been modified;
1658 * initiate the writing of the super block if it has been modified.
1659 *
1660 * Note: we are always called with the filesystem marked `MPBUSY'.
1661 */
1662 int
1663 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1664 {
1665 struct lwp *l = curlwp;
1666 struct vnode *vp, *mvp, *nvp;
1667 struct inode *ip;
1668 struct ufsmount *ump = VFSTOUFS(mp);
1669 struct fs *fs;
1670 int error, count, allerror = 0;
1671
1672 fs = ump->um_fs;
1673 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1674 printf("fs = %s\n", fs->fs_fsmnt);
1675 panic("update: rofs mod");
1676 }
1677
1678 /* Allocate a marker vnode. */
1679 if ((mvp = vnalloc(mp)) == NULL)
1680 return (ENOMEM);
1681
1682 fstrans_start(mp, FSTRANS_SHARED);
1683 /*
1684 * Write back each (modified) inode.
1685 */
1686 mutex_enter(&mntvnode_lock);
1687 loop:
1688 /*
1689 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1690 * and vclean() can be called indirectly
1691 */
1692 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
1693 nvp = TAILQ_NEXT(vp, v_mntvnodes);
1694 /*
1695 * If the vnode that we are about to sync is no longer
1696 * associated with this mount point, start over.
1697 */
1698 if (vp->v_mount != mp)
1699 goto loop;
1700 /*
1701 * Don't interfere with concurrent scans of this FS.
1702 */
1703 if (vismarker(vp))
1704 continue;
1705 mutex_enter(&vp->v_interlock);
1706 ip = VTOI(vp);
1707
1708 /*
1709 * Skip the vnode/inode if inaccessible.
1710 */
1711 if (ip == NULL || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0 ||
1712 vp->v_type == VNON) {
1713 mutex_exit(&vp->v_interlock);
1714 continue;
1715 }
1716
1717 /*
1718 * We deliberately update inode times here. This will
1719 * prevent a massive queue of updates accumulating, only
1720 * to be handled by a call to unmount.
1721 *
1722 * XXX It would be better to have the syncer trickle these
1723 * out. Adjustment needed to allow registering vnodes for
1724 * sync when the vnode is clean, but the inode dirty. Or
1725 * have ufs itself trickle out inode updates.
1726 *
1727 * If doing a lazy sync, we don't care about metadata or
1728 * data updates, because they are handled by each vnode's
1729 * synclist entry. In this case we are only interested in
1730 * writing back modified inodes.
1731 */
1732 if ((ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE |
1733 IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1734 (waitfor == MNT_LAZY || (LIST_EMPTY(&vp->v_dirtyblkhd) &&
1735 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))) {
1736 mutex_exit(&vp->v_interlock);
1737 continue;
1738 }
1739 if (vp->v_type == VBLK &&
1740 fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
1741 mutex_exit(&vp->v_interlock);
1742 continue;
1743 }
1744 vmark(mvp, vp);
1745 mutex_exit(&mntvnode_lock);
1746 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1747 if (error) {
1748 mutex_enter(&mntvnode_lock);
1749 nvp = vunmark(mvp);
1750 if (error == ENOENT) {
1751 goto loop;
1752 }
1753 continue;
1754 }
1755 if (waitfor == MNT_LAZY) {
1756 error = UFS_WAPBL_BEGIN(vp->v_mount);
1757 if (!error) {
1758 error = ffs_update(vp, NULL, NULL,
1759 UPDATE_CLOSE);
1760 UFS_WAPBL_END(vp->v_mount);
1761 }
1762 } else {
1763 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
1764 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
1765 }
1766 if (error)
1767 allerror = error;
1768 vput(vp);
1769 mutex_enter(&mntvnode_lock);
1770 nvp = vunmark(mvp);
1771 }
1772 mutex_exit(&mntvnode_lock);
1773 /*
1774 * Force stale file system control information to be flushed.
1775 */
1776 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1777 if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
1778 allerror = error;
1779 /* Flushed work items may create new vnodes to clean */
1780 if (allerror == 0 && count) {
1781 mutex_enter(&mntvnode_lock);
1782 goto loop;
1783 }
1784 }
1785 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1786 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1787 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1788 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1789 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
1790 0, 0)) != 0)
1791 allerror = error;
1792 VOP_UNLOCK(ump->um_devvp, 0);
1793 if (allerror == 0 && waitfor == MNT_WAIT && !mp->mnt_wapbl) {
1794 mutex_enter(&mntvnode_lock);
1795 goto loop;
1796 }
1797 }
1798 #ifdef QUOTA
1799 qsync(mp);
1800 #endif
1801 /*
1802 * Write back modified superblock.
1803 */
1804 if (fs->fs_fmod != 0) {
1805 fs->fs_fmod = 0;
1806 fs->fs_time = time_second;
1807 error = UFS_WAPBL_BEGIN(mp);
1808 if (error)
1809 allerror = error;
1810 else {
1811 if ((error = ffs_cgupdate(ump, waitfor)))
1812 allerror = error;
1813 UFS_WAPBL_END(mp);
1814 }
1815 }
1816
1817 #ifdef WAPBL
1818 if (mp->mnt_wapbl) {
1819 error = wapbl_flush(mp->mnt_wapbl, 0);
1820 if (error)
1821 allerror = error;
1822 }
1823 #endif
1824
1825 fstrans_done(mp);
1826 vnfree(mvp);
1827 return (allerror);
1828 }
1829
1830 /*
1831 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1832 * in from disk. If it is in core, wait for the lock bit to clear, then
1833 * return the inode locked. Detection and handling of mount points must be
1834 * done by the calling routine.
1835 */
1836 int
1837 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1838 {
1839 struct fs *fs;
1840 struct inode *ip;
1841 struct ufsmount *ump;
1842 struct buf *bp;
1843 struct vnode *vp;
1844 dev_t dev;
1845 int error;
1846
1847 ump = VFSTOUFS(mp);
1848 dev = ump->um_dev;
1849
1850 retry:
1851 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1852 return (0);
1853
1854 /* Allocate a new vnode/inode. */
1855 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1856 *vpp = NULL;
1857 return (error);
1858 }
1859 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1860
1861 /*
1862 * If someone beat us to it, put back the freshly allocated
1863 * vnode/inode pair and retry.
1864 */
1865 mutex_enter(&ufs_hashlock);
1866 if (ufs_ihashget(dev, ino, 0) != NULL) {
1867 mutex_exit(&ufs_hashlock);
1868 ungetnewvnode(vp);
1869 pool_cache_put(ffs_inode_cache, ip);
1870 goto retry;
1871 }
1872
1873 vp->v_vflag |= VV_LOCKSWORK;
1874 if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
1875 vp->v_uflag |= VU_SOFTDEP;
1876
1877 /*
1878 * XXX MFS ends up here, too, to allocate an inode. Should we
1879 * XXX create another pool for MFS inodes?
1880 */
1881
1882 memset(ip, 0, sizeof(struct inode));
1883 vp->v_data = ip;
1884 ip->i_vnode = vp;
1885 ip->i_ump = ump;
1886 ip->i_fs = fs = ump->um_fs;
1887 ip->i_dev = dev;
1888 ip->i_number = ino;
1889 LIST_INIT(&ip->i_pcbufhd);
1890 #ifdef QUOTA
1891 ufsquota_init(ip);
1892 #endif
1893
1894 /*
1895 * Initialize genfs node, we might proceed to destroy it in
1896 * error branches.
1897 */
1898 genfs_node_init(vp, &ffs_genfsops);
1899
1900 /*
1901 * Put it onto its hash chain and lock it so that other requests for
1902 * this inode will block if they arrive while we are sleeping waiting
1903 * for old data structures to be purged or for the contents of the
1904 * disk portion of this inode to be read.
1905 */
1906
1907 ufs_ihashins(ip);
1908 mutex_exit(&ufs_hashlock);
1909
1910 /* Read in the disk contents for the inode, copy into the inode. */
1911 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1912 (int)fs->fs_bsize, NOCRED, 0, &bp);
1913 if (error) {
1914
1915 /*
1916 * The inode does not contain anything useful, so it would
1917 * be misleading to leave it on its hash chain. With mode
1918 * still zero, it will be unlinked and returned to the free
1919 * list by vput().
1920 */
1921
1922 vput(vp);
1923 brelse(bp, 0);
1924 *vpp = NULL;
1925 return (error);
1926 }
1927 if (ip->i_ump->um_fstype == UFS1)
1928 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1929 PR_WAITOK);
1930 else
1931 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
1932 PR_WAITOK);
1933 ffs_load_inode(bp, ip, fs, ino);
1934 if (DOINGSOFTDEP(vp))
1935 softdep_load_inodeblock(ip);
1936 else
1937 ip->i_ffs_effnlink = ip->i_nlink;
1938 brelse(bp, 0);
1939
1940 /*
1941 * Initialize the vnode from the inode, check for aliases.
1942 * Note that the underlying vnode may have changed.
1943 */
1944
1945 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1946
1947 /*
1948 * Finish inode initialization now that aliasing has been resolved.
1949 */
1950
1951 ip->i_devvp = ump->um_devvp;
1952 VREF(ip->i_devvp);
1953
1954 /*
1955 * Ensure that uid and gid are correct. This is a temporary
1956 * fix until fsck has been changed to do the update.
1957 */
1958
1959 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1960 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1961 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1962 } /* XXX */
1963 uvm_vnp_setsize(vp, ip->i_size);
1964 *vpp = vp;
1965 return (0);
1966 }
1967
1968 /*
1969 * File handle to vnode
1970 *
1971 * Have to be really careful about stale file handles:
1972 * - check that the inode number is valid
1973 * - call ffs_vget() to get the locked inode
1974 * - check for an unallocated inode (i_mode == 0)
1975 * - check that the given client host has export rights and return
1976 * those rights via. exflagsp and credanonp
1977 */
1978 int
1979 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1980 {
1981 struct ufid ufh;
1982 struct fs *fs;
1983
1984 if (fhp->fid_len != sizeof(struct ufid))
1985 return EINVAL;
1986
1987 memcpy(&ufh, fhp, sizeof(ufh));
1988 fs = VFSTOUFS(mp)->um_fs;
1989 if (ufh.ufid_ino < ROOTINO ||
1990 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1991 return (ESTALE);
1992 return (ufs_fhtovp(mp, &ufh, vpp));
1993 }
1994
1995 /*
1996 * Vnode pointer to File handle
1997 */
1998 /* ARGSUSED */
1999 int
2000 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
2001 {
2002 struct inode *ip;
2003 struct ufid ufh;
2004
2005 if (*fh_size < sizeof(struct ufid)) {
2006 *fh_size = sizeof(struct ufid);
2007 return E2BIG;
2008 }
2009 ip = VTOI(vp);
2010 *fh_size = sizeof(struct ufid);
2011 memset(&ufh, 0, sizeof(ufh));
2012 ufh.ufid_len = sizeof(struct ufid);
2013 ufh.ufid_ino = ip->i_number;
2014 ufh.ufid_gen = ip->i_gen;
2015 memcpy(fhp, &ufh, sizeof(ufh));
2016 return (0);
2017 }
2018
2019 void
2020 ffs_init(void)
2021 {
2022 if (ffs_initcount++ > 0)
2023 return;
2024
2025 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
2026 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
2027 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
2028 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
2029 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
2030 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
2031 softdep_initialize();
2032 ufs_init();
2033 }
2034
2035 void
2036 ffs_reinit(void)
2037 {
2038 softdep_reinitialize();
2039 ufs_reinit();
2040 }
2041
2042 void
2043 ffs_done(void)
2044 {
2045 if (--ffs_initcount > 0)
2046 return;
2047
2048 /* XXX softdep cleanup ? */
2049 ufs_done();
2050 pool_cache_destroy(ffs_dinode2_cache);
2051 pool_cache_destroy(ffs_dinode1_cache);
2052 pool_cache_destroy(ffs_inode_cache);
2053 }
2054
2055 /*
2056 * Write a superblock and associated information back to disk.
2057 */
2058 int
2059 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2060 {
2061 struct fs *fs = mp->um_fs;
2062 struct buf *bp;
2063 int error = 0;
2064 u_int32_t saveflag;
2065
2066 error = ffs_getblk(mp->um_devvp,
2067 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb), FFS_NOBLK,
2068 fs->fs_sbsize, false, &bp);
2069 if (error)
2070 return error;
2071 saveflag = fs->fs_flags & FS_INTERNAL;
2072 fs->fs_flags &= ~FS_INTERNAL;
2073
2074 memcpy(bp->b_data, fs, fs->fs_sbsize);
2075
2076 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2077 #ifdef FFS_EI
2078 if (mp->um_flags & UFS_NEEDSWAP)
2079 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2080 #endif
2081 fs->fs_flags |= saveflag;
2082
2083 if (waitfor == MNT_WAIT)
2084 error = bwrite(bp);
2085 else
2086 bawrite(bp);
2087 return (error);
2088 }
2089
2090 int
2091 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2092 {
2093 struct fs *fs = mp->um_fs;
2094 struct buf *bp;
2095 int blks;
2096 void *space;
2097 int i, size, error = 0, allerror = 0;
2098
2099 allerror = ffs_sbupdate(mp, waitfor);
2100 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2101 space = fs->fs_csp;
2102 for (i = 0; i < blks; i += fs->fs_frag) {
2103 size = fs->fs_bsize;
2104 if (i + fs->fs_frag > blks)
2105 size = (blks - i) * fs->fs_fsize;
2106 error = ffs_getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
2107 FFS_NOBLK, size, false, &bp);
2108 if (error)
2109 break;
2110 #ifdef FFS_EI
2111 if (mp->um_flags & UFS_NEEDSWAP)
2112 ffs_csum_swap((struct csum*)space,
2113 (struct csum*)bp->b_data, size);
2114 else
2115 #endif
2116 memcpy(bp->b_data, space, (u_int)size);
2117 space = (char *)space + size;
2118 if (waitfor == MNT_WAIT)
2119 error = bwrite(bp);
2120 else
2121 bawrite(bp);
2122 }
2123 if (!allerror && error)
2124 allerror = error;
2125 return (allerror);
2126 }
2127
2128 int
2129 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2130 int attrnamespace, const char *attrname)
2131 {
2132 #ifdef UFS_EXTATTR
2133 /*
2134 * File-backed extended attributes are only supported on UFS1.
2135 * UFS2 has native extended attributes.
2136 */
2137 if (VFSTOUFS(mp)->um_fstype == UFS1)
2138 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2139 #endif
2140 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2141 }
2142
2143 int
2144 ffs_suspendctl(struct mount *mp, int cmd)
2145 {
2146 int error;
2147 struct lwp *l = curlwp;
2148
2149 switch (cmd) {
2150 case SUSPEND_SUSPEND:
2151 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
2152 return error;
2153 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
2154 if (error == 0)
2155 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
2156 #ifdef WAPBL
2157 if (error == 0 && mp->mnt_wapbl)
2158 error = wapbl_flush(mp->mnt_wapbl, 1);
2159 #endif
2160 if (error != 0) {
2161 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
2162 return error;
2163 }
2164 return 0;
2165
2166 case SUSPEND_RESUME:
2167 return fstrans_setstate(mp, FSTRANS_NORMAL);
2168
2169 default:
2170 return EINVAL;
2171 }
2172 }
2173
2174 /*
2175 * Synch vnode for a mounted file system. This is called for foreign
2176 * vnodes, i.e. non-ffs.
2177 */
2178 static int
2179 ffs_vfs_fsync(vnode_t *vp, int flags)
2180 {
2181 int error, passes, skipmeta, i, pflags;
2182 buf_t *bp, *nbp;
2183 struct mount *mp;
2184
2185 KASSERT(vp->v_type == VBLK);
2186 KASSERT(vp->v_specmountpoint != NULL);
2187
2188 mp = vp->v_specmountpoint;
2189 if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
2190 softdep_fsync_mountdev(vp);
2191
2192 /*
2193 * Flush all dirty data associated with the vnode.
2194 */
2195 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2196 if ((flags & FSYNC_WAIT) != 0)
2197 pflags |= PGO_SYNCIO;
2198 mutex_enter(&vp->v_interlock);
2199 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2200 if (error)
2201 return error;
2202
2203 #ifdef WAPBL
2204 if (mp && mp->mnt_wapbl) {
2205 /*
2206 * Don't bother writing out metadata if the syncer is
2207 * making the request. We will let the sync vnode
2208 * write it out in a single burst through a call to
2209 * VFS_SYNC().
2210 */
2211 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2212 return 0;
2213
2214 /*
2215 * Don't flush the log if the vnode being flushed
2216 * contains no dirty buffers that could be in the log.
2217 */
2218 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2219 error = wapbl_flush(mp->mnt_wapbl, 0);
2220 if (error)
2221 return error;
2222 }
2223
2224 if ((flags & FSYNC_WAIT) != 0) {
2225 mutex_enter(&vp->v_interlock);
2226 while (vp->v_numoutput)
2227 cv_wait(&vp->v_cv, &vp->v_interlock);
2228 mutex_exit(&vp->v_interlock);
2229 }
2230
2231 return 0;
2232 }
2233 #endif /* WAPBL */
2234
2235 /*
2236 * Write out metadata for non-logging file systems. This block can
2237 * be simplified once softdep goes.
2238 */
2239 passes = NIADDR + 1;
2240 skipmeta = 0;
2241 if (flags & FSYNC_WAIT)
2242 skipmeta = 1;
2243
2244 loop:
2245 mutex_enter(&bufcache_lock);
2246 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2247 bp->b_cflags &= ~BC_SCANNED;
2248 }
2249 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2250 nbp = LIST_NEXT(bp, b_vnbufs);
2251 if (bp->b_cflags & (BC_BUSY | BC_SCANNED))
2252 continue;
2253 if ((bp->b_oflags & BO_DELWRI) == 0)
2254 panic("ffs_fsync: not dirty");
2255 if (skipmeta && bp->b_lblkno < 0)
2256 continue;
2257 bp->b_cflags |= BC_BUSY | BC_VFLUSH | BC_SCANNED;
2258 mutex_exit(&bufcache_lock);
2259 /*
2260 * On our final pass through, do all I/O synchronously
2261 * so that we can find out if our flush is failing
2262 * because of write errors.
2263 */
2264 if (passes > 0 || !(flags & FSYNC_WAIT))
2265 (void) bawrite(bp);
2266 else if ((error = bwrite(bp)) != 0)
2267 return (error);
2268 /*
2269 * Since we unlocked during the I/O, we need
2270 * to start from a known point.
2271 */
2272 mutex_enter(&bufcache_lock);
2273 nbp = LIST_FIRST(&vp->v_dirtyblkhd);
2274 }
2275 mutex_exit(&bufcache_lock);
2276 if (skipmeta) {
2277 skipmeta = 0;
2278 goto loop;
2279 }
2280
2281 if ((flags & FSYNC_WAIT) != 0) {
2282 mutex_enter(&vp->v_interlock);
2283 while (vp->v_numoutput) {
2284 cv_wait(&vp->v_cv, &vp->v_interlock);
2285 }
2286 mutex_exit(&vp->v_interlock);
2287
2288 /*
2289 * Ensure that any filesystem metadata associated
2290 * with the vnode has been written.
2291 */
2292 if ((error = softdep_sync_metadata(vp)) != 0)
2293 return (error);
2294
2295 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2296 /*
2297 * Block devices associated with filesystems may
2298 * have new I/O requests posted for them even if
2299 * the vnode is locked, so no amount of trying will
2300 * get them clean. Thus we give block devices a
2301 * good effort, then just give up. For all other file
2302 * types, go around and try again until it is clean.
2303 */
2304 if (passes > 0) {
2305 passes--;
2306 goto loop;
2307 }
2308 #ifdef DIAGNOSTIC
2309 if (vp->v_type != VBLK)
2310 vprint("ffs_fsync: dirty", vp);
2311 #endif
2312 }
2313 }
2314
2315 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2316 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2317 kauth_cred_get());
2318 }
2319
2320 return error;
2321 }
2322