ffs_vfsops.c revision 1.242 1 /* $NetBSD: ffs_vfsops.c,v 1.242 2009/02/22 20:10:25 ad Exp $ */
2
3 /*-
4 * Copyright (c) 2008 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Wasabi Systems, Inc.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 /*
33 * Copyright (c) 1989, 1991, 1993, 1994
34 * The Regents of the University of California. All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
61 */
62
63 #include <sys/cdefs.h>
64 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.242 2009/02/22 20:10:25 ad Exp $");
65
66 #if defined(_KERNEL_OPT)
67 #include "opt_ffs.h"
68 #include "opt_quota.h"
69 #include "opt_softdep.h"
70 #include "opt_wapbl.h"
71 #endif
72
73 #include <sys/param.h>
74 #include <sys/systm.h>
75 #include <sys/namei.h>
76 #include <sys/proc.h>
77 #include <sys/kernel.h>
78 #include <sys/vnode.h>
79 #include <sys/socket.h>
80 #include <sys/mount.h>
81 #include <sys/buf.h>
82 #include <sys/device.h>
83 #include <sys/mbuf.h>
84 #include <sys/file.h>
85 #include <sys/disklabel.h>
86 #include <sys/ioctl.h>
87 #include <sys/errno.h>
88 #include <sys/malloc.h>
89 #include <sys/pool.h>
90 #include <sys/lock.h>
91 #include <sys/sysctl.h>
92 #include <sys/conf.h>
93 #include <sys/kauth.h>
94 #include <sys/wapbl.h>
95 #include <sys/fstrans.h>
96 #include <sys/module.h>
97
98 #include <miscfs/genfs/genfs.h>
99 #include <miscfs/specfs/specdev.h>
100
101 #include <ufs/ufs/quota.h>
102 #include <ufs/ufs/ufsmount.h>
103 #include <ufs/ufs/inode.h>
104 #include <ufs/ufs/dir.h>
105 #include <ufs/ufs/ufs_extern.h>
106 #include <ufs/ufs/ufs_bswap.h>
107 #include <ufs/ufs/ufs_wapbl.h>
108
109 #include <ufs/ffs/fs.h>
110 #include <ufs/ffs/ffs_extern.h>
111
112 MODULE(MODULE_CLASS_VFS, ffs, NULL);
113
114 static int ffs_vfs_fsync(vnode_t *, int);
115
116 static struct sysctllog *ffs_sysctl_log;
117
118 /* how many times ffs_init() was called */
119 int ffs_initcount = 0;
120
121 extern kmutex_t ufs_hashlock;
122
123 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
124 extern const struct vnodeopv_desc ffs_specop_opv_desc;
125 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
126
127 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
128 &ffs_vnodeop_opv_desc,
129 &ffs_specop_opv_desc,
130 &ffs_fifoop_opv_desc,
131 NULL,
132 };
133
134 struct vfsops ffs_vfsops = {
135 MOUNT_FFS,
136 sizeof (struct ufs_args),
137 ffs_mount,
138 ufs_start,
139 ffs_unmount,
140 ufs_root,
141 ufs_quotactl,
142 ffs_statvfs,
143 ffs_sync,
144 ffs_vget,
145 ffs_fhtovp,
146 ffs_vptofh,
147 ffs_init,
148 ffs_reinit,
149 ffs_done,
150 ffs_mountroot,
151 ffs_snapshot,
152 ffs_extattrctl,
153 ffs_suspendctl,
154 genfs_renamelock_enter,
155 genfs_renamelock_exit,
156 ffs_vfs_fsync,
157 ffs_vnodeopv_descs,
158 0,
159 { NULL, NULL },
160 };
161
162 static const struct genfs_ops ffs_genfsops = {
163 .gop_size = ffs_gop_size,
164 .gop_alloc = ufs_gop_alloc,
165 .gop_write = genfs_gop_write,
166 .gop_markupdate = ufs_gop_markupdate,
167 };
168
169 static const struct ufs_ops ffs_ufsops = {
170 .uo_itimes = ffs_itimes,
171 .uo_update = ffs_update,
172 .uo_truncate = ffs_truncate,
173 .uo_valloc = ffs_valloc,
174 .uo_vfree = ffs_vfree,
175 .uo_balloc = ffs_balloc,
176 .uo_unmark_vnode = (void (*)(vnode_t *))nullop,
177 };
178
179 static int
180 ffs_modcmd(modcmd_t cmd, void *arg)
181 {
182 int error;
183
184 #if 0
185 extern int doasyncfree;
186 #endif
187 extern int ffs_log_changeopt;
188
189 switch (cmd) {
190 case MODULE_CMD_INIT:
191 error = vfs_attach(&ffs_vfsops);
192 if (error != 0)
193 break;
194
195 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
196 CTLFLAG_PERMANENT,
197 CTLTYPE_NODE, "vfs", NULL,
198 NULL, 0, NULL, 0,
199 CTL_VFS, CTL_EOL);
200 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
201 CTLFLAG_PERMANENT,
202 CTLTYPE_NODE, "ffs",
203 SYSCTL_DESCR("Berkeley Fast File System"),
204 NULL, 0, NULL, 0,
205 CTL_VFS, 1, CTL_EOL);
206
207 /*
208 * @@@ should we even bother with these first three?
209 */
210 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
211 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
212 CTLTYPE_INT, "doclusterread", NULL,
213 sysctl_notavail, 0, NULL, 0,
214 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
215 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
216 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
217 CTLTYPE_INT, "doclusterwrite", NULL,
218 sysctl_notavail, 0, NULL, 0,
219 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
220 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
221 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
222 CTLTYPE_INT, "doreallocblks", NULL,
223 sysctl_notavail, 0, NULL, 0,
224 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
225 #if 0
226 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
227 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
228 CTLTYPE_INT, "doasyncfree",
229 SYSCTL_DESCR("Release dirty blocks asynchronously"),
230 NULL, 0, &doasyncfree, 0,
231 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
232 #endif
233 sysctl_createv(&ffs_sysctl_log, 0, NULL, NULL,
234 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
235 CTLTYPE_INT, "log_changeopt",
236 SYSCTL_DESCR("Log changes in optimization strategy"),
237 NULL, 0, &ffs_log_changeopt, 0,
238 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
239 break;
240 case MODULE_CMD_FINI:
241 error = vfs_detach(&ffs_vfsops);
242 if (error != 0)
243 break;
244 sysctl_teardown(&ffs_sysctl_log);
245 break;
246 default:
247 error = ENOTTY;
248 break;
249 }
250
251 return (error);
252 }
253
254 pool_cache_t ffs_inode_cache;
255 pool_cache_t ffs_dinode1_cache;
256 pool_cache_t ffs_dinode2_cache;
257
258 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
259 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
260
261 /*
262 * Called by main() when ffs is going to be mounted as root.
263 */
264
265 int
266 ffs_mountroot(void)
267 {
268 struct fs *fs;
269 struct mount *mp;
270 struct lwp *l = curlwp; /* XXX */
271 struct ufsmount *ump;
272 int error;
273
274 if (device_class(root_device) != DV_DISK)
275 return (ENODEV);
276
277 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
278 vrele(rootvp);
279 return (error);
280 }
281
282 /*
283 * We always need to be able to mount the root file system.
284 */
285 mp->mnt_flag |= MNT_FORCE;
286 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
287 vfs_unbusy(mp, false, NULL);
288 vfs_destroy(mp);
289 return (error);
290 }
291 mp->mnt_flag &= ~MNT_FORCE;
292 mutex_enter(&mountlist_lock);
293 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
294 mutex_exit(&mountlist_lock);
295 ump = VFSTOUFS(mp);
296 fs = ump->um_fs;
297 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
298 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
299 (void)ffs_statvfs(mp, &mp->mnt_stat);
300 vfs_unbusy(mp, false, NULL);
301 setrootfstime((time_t)fs->fs_time);
302 return (0);
303 }
304
305 /*
306 * VFS Operations.
307 *
308 * mount system call
309 */
310 int
311 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
312 {
313 struct lwp *l = curlwp;
314 struct nameidata nd;
315 struct vnode *vp, *devvp = NULL;
316 struct ufs_args *args = data;
317 struct ufsmount *ump = NULL;
318 struct fs *fs;
319 int error = 0, flags, update;
320 mode_t accessmode;
321
322 if (*data_len < sizeof *args)
323 return EINVAL;
324
325 if (mp->mnt_flag & MNT_GETARGS) {
326 ump = VFSTOUFS(mp);
327 if (ump == NULL)
328 return EIO;
329 args->fspec = NULL;
330 *data_len = sizeof *args;
331 return 0;
332 }
333
334 #if !defined(SOFTDEP)
335 mp->mnt_flag &= ~MNT_SOFTDEP;
336 #endif
337
338 update = mp->mnt_flag & MNT_UPDATE;
339
340 /* Check arguments */
341 if (args->fspec != NULL) {
342 /*
343 * Look up the name and verify that it's sane.
344 */
345 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
346 if ((error = namei(&nd)) != 0)
347 return (error);
348 devvp = nd.ni_vp;
349
350 if (!update) {
351 /*
352 * Be sure this is a valid block device
353 */
354 if (devvp->v_type != VBLK)
355 error = ENOTBLK;
356 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
357 error = ENXIO;
358 } else {
359 /*
360 * Be sure we're still naming the same device
361 * used for our initial mount
362 */
363 ump = VFSTOUFS(mp);
364 if (devvp != ump->um_devvp) {
365 if (devvp->v_rdev != ump->um_devvp->v_rdev)
366 error = EINVAL;
367 else {
368 vrele(devvp);
369 devvp = ump->um_devvp;
370 vref(devvp);
371 }
372 }
373 }
374 } else {
375 if (!update) {
376 /* New mounts must have a filename for the device */
377 return (EINVAL);
378 } else {
379 /* Use the extant mount */
380 ump = VFSTOUFS(mp);
381 devvp = ump->um_devvp;
382 vref(devvp);
383 }
384 }
385
386 /*
387 * Mark the device and any existing vnodes as involved in
388 * softdep processing.
389 */
390 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
391 devvp->v_uflag |= VU_SOFTDEP;
392 mutex_enter(&mntvnode_lock);
393 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
394 if (vp->v_mount != mp || vismarker(vp))
395 continue;
396 vp->v_uflag |= VU_SOFTDEP;
397 }
398 mutex_exit(&mntvnode_lock);
399 }
400
401 /*
402 * If mount by non-root, then verify that user has necessary
403 * permissions on the device.
404 */
405 if (error == 0 && kauth_authorize_generic(l->l_cred,
406 KAUTH_GENERIC_ISSUSER, NULL) != 0) {
407 accessmode = VREAD;
408 if (update ?
409 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
410 (mp->mnt_flag & MNT_RDONLY) == 0)
411 accessmode |= VWRITE;
412 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
413 error = VOP_ACCESS(devvp, accessmode, l->l_cred);
414 VOP_UNLOCK(devvp, 0);
415 }
416
417 if (error) {
418 vrele(devvp);
419 return (error);
420 }
421
422 #ifdef WAPBL
423 /*
424 * WAPBL can only be enabled on a r/w mount
425 * that does not use softdep.
426 */
427 if ((mp->mnt_flag & MNT_RDONLY) && !(mp->mnt_iflag & IMNT_WANTRDWR)) {
428 mp->mnt_flag &= ~MNT_LOG;
429 }
430 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_LOG)) ==
431 (MNT_SOFTDEP | MNT_LOG)) {
432 printf("%s fs is journalled, ignoring soft update mode\n",
433 VFSTOUFS(mp)->um_fs->fs_fsmnt);
434 mp->mnt_flag &= ~MNT_SOFTDEP;
435 }
436 #else /* !WAPBL */
437 mp->mnt_flag &= ~MNT_LOG;
438 #endif /* !WAPBL */
439
440 if (!update) {
441 int xflags;
442
443 if (mp->mnt_flag & MNT_RDONLY)
444 xflags = FREAD;
445 else
446 xflags = FREAD | FWRITE;
447 error = VOP_OPEN(devvp, xflags, FSCRED);
448 if (error)
449 goto fail;
450 error = ffs_mountfs(devvp, mp, l);
451 if (error) {
452 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
453 (void)VOP_CLOSE(devvp, xflags, NOCRED);
454 VOP_UNLOCK(devvp, 0);
455 goto fail;
456 }
457
458 ump = VFSTOUFS(mp);
459 fs = ump->um_fs;
460 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
461 (MNT_SOFTDEP | MNT_ASYNC)) {
462 printf("%s fs uses soft updates, "
463 "ignoring async mode\n",
464 fs->fs_fsmnt);
465 mp->mnt_flag &= ~MNT_ASYNC;
466 }
467 } else {
468 /*
469 * Update the mount.
470 */
471
472 /*
473 * The initial mount got a reference on this
474 * device, so drop the one obtained via
475 * namei(), above.
476 */
477 vrele(devvp);
478
479 ump = VFSTOUFS(mp);
480 fs = ump->um_fs;
481 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
482 /*
483 * Changing from r/w to r/o
484 */
485 flags = WRITECLOSE;
486 if (mp->mnt_flag & MNT_FORCE)
487 flags |= FORCECLOSE;
488 if (mp->mnt_flag & MNT_SOFTDEP)
489 error = softdep_flushfiles(mp, flags, l);
490 else
491 error = ffs_flushfiles(mp, flags, l);
492 if (fs->fs_pendingblocks != 0 ||
493 fs->fs_pendinginodes != 0) {
494 printf("%s: update error: blocks %" PRId64
495 " files %d\n",
496 fs->fs_fsmnt, fs->fs_pendingblocks,
497 fs->fs_pendinginodes);
498 fs->fs_pendingblocks = 0;
499 fs->fs_pendinginodes = 0;
500 }
501 if (error == 0)
502 error = UFS_WAPBL_BEGIN(mp);
503 if (error == 0 &&
504 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
505 fs->fs_clean & FS_WASCLEAN) {
506 if (mp->mnt_flag & MNT_SOFTDEP)
507 fs->fs_flags &= ~FS_DOSOFTDEP;
508 fs->fs_clean = FS_ISCLEAN;
509 (void) ffs_sbupdate(ump, MNT_WAIT);
510 }
511 if (error == 0)
512 UFS_WAPBL_END(mp);
513 if (error)
514 return (error);
515 }
516
517 #ifdef WAPBL
518 if ((mp->mnt_flag & MNT_LOG) == 0) {
519 error = ffs_wapbl_stop(mp, mp->mnt_flag & MNT_FORCE);
520 if (error)
521 return error;
522 }
523 #endif /* WAPBL */
524
525 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
526 /*
527 * Finish change from r/w to r/o
528 */
529 fs->fs_ronly = 1;
530 fs->fs_fmod = 0;
531 }
532
533 /*
534 * Flush soft dependencies if disabling it via an update
535 * mount. This may leave some items to be processed,
536 * so don't do this yet XXX.
537 */
538 if ((fs->fs_flags & FS_DOSOFTDEP) &&
539 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
540 #ifdef notyet
541 flags = WRITECLOSE;
542 if (mp->mnt_flag & MNT_FORCE)
543 flags |= FORCECLOSE;
544 error = softdep_flushfiles(mp, flags, l);
545 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
546 fs->fs_flags &= ~FS_DOSOFTDEP;
547 (void) ffs_sbupdate(ump, MNT_WAIT);
548 #elif defined(SOFTDEP)
549 mp->mnt_flag |= MNT_SOFTDEP;
550 #endif
551 }
552
553 /*
554 * When upgrading to a softdep mount, we must first flush
555 * all vnodes. (not done yet -- see above)
556 */
557 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
558 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
559 #ifdef notyet
560 flags = WRITECLOSE;
561 if (mp->mnt_flag & MNT_FORCE)
562 flags |= FORCECLOSE;
563 error = ffs_flushfiles(mp, flags, l);
564 #else
565 mp->mnt_flag &= ~MNT_SOFTDEP;
566 #endif
567 }
568
569 if (mp->mnt_flag & MNT_RELOAD) {
570 error = ffs_reload(mp, l->l_cred, l);
571 if (error)
572 return (error);
573 }
574
575 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
576 /*
577 * Changing from read-only to read/write
578 */
579 fs->fs_ronly = 0;
580 fs->fs_clean <<= 1;
581 fs->fs_fmod = 1;
582 if ((fs->fs_flags & FS_DOSOFTDEP)) {
583 error = softdep_mount(devvp, mp, fs,
584 l->l_cred);
585 if (error)
586 return (error);
587 }
588 #ifdef WAPBL
589 if (fs->fs_flags & FS_DOWAPBL) {
590 printf("%s: replaying log to disk\n",
591 fs->fs_fsmnt);
592 KDASSERT(mp->mnt_wapbl_replay);
593 error = wapbl_replay_write(mp->mnt_wapbl_replay,
594 devvp);
595 if (error) {
596 return error;
597 }
598 wapbl_replay_stop(mp->mnt_wapbl_replay);
599 fs->fs_clean = FS_WASCLEAN;
600 }
601 #endif /* WAPBL */
602 if (fs->fs_snapinum[0] != 0)
603 ffs_snapshot_mount(mp);
604 }
605
606 #ifdef WAPBL
607 error = ffs_wapbl_start(mp);
608 if (error)
609 return error;
610 #endif /* WAPBL */
611
612 if (args->fspec == NULL)
613 return EINVAL;
614 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
615 (MNT_SOFTDEP | MNT_ASYNC)) {
616 printf("%s fs uses soft updates, ignoring async mode\n",
617 fs->fs_fsmnt);
618 mp->mnt_flag &= ~MNT_ASYNC;
619 }
620 }
621
622 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
623 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
624 if (error == 0)
625 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
626 sizeof(fs->fs_fsmnt));
627 if (mp->mnt_flag & MNT_SOFTDEP)
628 fs->fs_flags |= FS_DOSOFTDEP;
629 else
630 fs->fs_flags &= ~FS_DOSOFTDEP;
631 if (fs->fs_fmod != 0) { /* XXX */
632 int err;
633
634 fs->fs_fmod = 0;
635 if (fs->fs_clean & FS_WASCLEAN)
636 fs->fs_time = time_second;
637 else {
638 printf("%s: file system not clean (fs_clean=%#x); "
639 "please fsck(8)\n", mp->mnt_stat.f_mntfromname,
640 fs->fs_clean);
641 printf("%s: lost blocks %" PRId64 " files %d\n",
642 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
643 fs->fs_pendinginodes);
644 }
645 err = UFS_WAPBL_BEGIN(mp);
646 if (err == 0) {
647 (void) ffs_cgupdate(ump, MNT_WAIT);
648 UFS_WAPBL_END(mp);
649 }
650 }
651 return (error);
652
653 fail:
654 vrele(devvp);
655 return (error);
656 }
657
658 /*
659 * Reload all incore data for a filesystem (used after running fsck on
660 * the root filesystem and finding things to fix). The filesystem must
661 * be mounted read-only.
662 *
663 * Things to do to update the mount:
664 * 1) invalidate all cached meta-data.
665 * 2) re-read superblock from disk.
666 * 3) re-read summary information from disk.
667 * 4) invalidate all inactive vnodes.
668 * 5) invalidate all cached file data.
669 * 6) re-read inode data for all active vnodes.
670 */
671 int
672 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
673 {
674 struct vnode *vp, *mvp, *devvp;
675 struct inode *ip;
676 void *space;
677 struct buf *bp;
678 struct fs *fs, *newfs;
679 struct partinfo dpart;
680 int i, blks, size, error;
681 int32_t *lp;
682 struct ufsmount *ump;
683 daddr_t sblockloc;
684
685 if ((mp->mnt_flag & MNT_RDONLY) == 0)
686 return (EINVAL);
687
688 ump = VFSTOUFS(mp);
689 /*
690 * Step 1: invalidate all cached meta-data.
691 */
692 devvp = ump->um_devvp;
693 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
694 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
695 VOP_UNLOCK(devvp, 0);
696 if (error)
697 panic("ffs_reload: dirty1");
698 /*
699 * Step 2: re-read superblock from disk.
700 */
701 fs = ump->um_fs;
702 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
703 size = DEV_BSIZE;
704 else
705 size = dpart.disklab->d_secsize;
706 /* XXX we don't handle possibility that superblock moved. */
707 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
708 NOCRED, 0, &bp);
709 if (error) {
710 brelse(bp, 0);
711 return (error);
712 }
713 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
714 memcpy(newfs, bp->b_data, fs->fs_sbsize);
715 #ifdef FFS_EI
716 if (ump->um_flags & UFS_NEEDSWAP) {
717 ffs_sb_swap((struct fs*)bp->b_data, newfs);
718 fs->fs_flags |= FS_SWAPPED;
719 } else
720 #endif
721 fs->fs_flags &= ~FS_SWAPPED;
722 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
723 newfs->fs_magic != FS_UFS2_MAGIC)||
724 newfs->fs_bsize > MAXBSIZE ||
725 newfs->fs_bsize < sizeof(struct fs)) {
726 brelse(bp, 0);
727 free(newfs, M_UFSMNT);
728 return (EIO); /* XXX needs translation */
729 }
730 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
731 sblockloc = fs->fs_sblockloc;
732 /*
733 * Copy pointer fields back into superblock before copying in XXX
734 * new superblock. These should really be in the ufsmount. XXX
735 * Note that important parameters (eg fs_ncg) are unchanged.
736 */
737 newfs->fs_csp = fs->fs_csp;
738 newfs->fs_maxcluster = fs->fs_maxcluster;
739 newfs->fs_contigdirs = fs->fs_contigdirs;
740 newfs->fs_ronly = fs->fs_ronly;
741 newfs->fs_active = fs->fs_active;
742 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
743 brelse(bp, 0);
744 free(newfs, M_UFSMNT);
745
746 /* Recheck for apple UFS filesystem */
747 ump->um_flags &= ~UFS_ISAPPLEUFS;
748 /* First check to see if this is tagged as an Apple UFS filesystem
749 * in the disklabel
750 */
751 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
752 (dpart.part->p_fstype == FS_APPLEUFS)) {
753 ump->um_flags |= UFS_ISAPPLEUFS;
754 }
755 #ifdef APPLE_UFS
756 else {
757 /* Manually look for an apple ufs label, and if a valid one
758 * is found, then treat it like an Apple UFS filesystem anyway
759 */
760 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
761 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
762 if (error) {
763 brelse(bp, 0);
764 return (error);
765 }
766 error = ffs_appleufs_validate(fs->fs_fsmnt,
767 (struct appleufslabel *)bp->b_data, NULL);
768 if (error == 0)
769 ump->um_flags |= UFS_ISAPPLEUFS;
770 brelse(bp, 0);
771 bp = NULL;
772 }
773 #else
774 if (ump->um_flags & UFS_ISAPPLEUFS)
775 return (EIO);
776 #endif
777
778 if (UFS_MPISAPPLEUFS(ump)) {
779 /* see comment about NeXT below */
780 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
781 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
782 mp->mnt_iflag |= IMNT_DTYPE;
783 } else {
784 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
785 ump->um_dirblksiz = DIRBLKSIZ;
786 if (ump->um_maxsymlinklen > 0)
787 mp->mnt_iflag |= IMNT_DTYPE;
788 else
789 mp->mnt_iflag &= ~IMNT_DTYPE;
790 }
791 ffs_oldfscompat_read(fs, ump, sblockloc);
792 mutex_enter(&ump->um_lock);
793 ump->um_maxfilesize = fs->fs_maxfilesize;
794
795 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
796 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
797 mp->mnt_stat.f_mntonname, fs->fs_flags,
798 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
799 if ((mp->mnt_flag & MNT_FORCE) == 0) {
800 mutex_exit(&ump->um_lock);
801 return (EINVAL);
802 }
803 }
804
805 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
806 fs->fs_pendingblocks = 0;
807 fs->fs_pendinginodes = 0;
808 }
809 mutex_exit(&ump->um_lock);
810
811 ffs_statvfs(mp, &mp->mnt_stat);
812 /*
813 * Step 3: re-read summary information from disk.
814 */
815 blks = howmany(fs->fs_cssize, fs->fs_fsize);
816 space = fs->fs_csp;
817 for (i = 0; i < blks; i += fs->fs_frag) {
818 size = fs->fs_bsize;
819 if (i + fs->fs_frag > blks)
820 size = (blks - i) * fs->fs_fsize;
821 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
822 NOCRED, 0, &bp);
823 if (error) {
824 brelse(bp, 0);
825 return (error);
826 }
827 #ifdef FFS_EI
828 if (UFS_FSNEEDSWAP(fs))
829 ffs_csum_swap((struct csum *)bp->b_data,
830 (struct csum *)space, size);
831 else
832 #endif
833 memcpy(space, bp->b_data, (size_t)size);
834 space = (char *)space + size;
835 brelse(bp, 0);
836 }
837 if ((fs->fs_flags & FS_DOSOFTDEP))
838 softdep_mount(devvp, mp, fs, cred);
839 if (fs->fs_snapinum[0] != 0)
840 ffs_snapshot_mount(mp);
841 /*
842 * We no longer know anything about clusters per cylinder group.
843 */
844 if (fs->fs_contigsumsize > 0) {
845 lp = fs->fs_maxcluster;
846 for (i = 0; i < fs->fs_ncg; i++)
847 *lp++ = fs->fs_contigsumsize;
848 }
849
850 /* Allocate a marker vnode. */
851 if ((mvp = vnalloc(mp)) == NULL)
852 return ENOMEM;
853 /*
854 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
855 * and vclean() can be called indirectly
856 */
857 mutex_enter(&mntvnode_lock);
858 loop:
859 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
860 vmark(mvp, vp);
861 if (vp->v_mount != mp || vismarker(vp))
862 continue;
863 /*
864 * Step 4: invalidate all inactive vnodes.
865 */
866 if (vrecycle(vp, &mntvnode_lock, l)) {
867 mutex_enter(&mntvnode_lock);
868 (void)vunmark(mvp);
869 goto loop;
870 }
871 /*
872 * Step 5: invalidate all cached file data.
873 */
874 mutex_enter(&vp->v_interlock);
875 mutex_exit(&mntvnode_lock);
876 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
877 (void)vunmark(mvp);
878 goto loop;
879 }
880 if (vinvalbuf(vp, 0, cred, l, 0, 0))
881 panic("ffs_reload: dirty2");
882 /*
883 * Step 6: re-read inode data for all active vnodes.
884 */
885 ip = VTOI(vp);
886 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
887 (int)fs->fs_bsize, NOCRED, 0, &bp);
888 if (error) {
889 brelse(bp, 0);
890 vput(vp);
891 (void)vunmark(mvp);
892 break;
893 }
894 ffs_load_inode(bp, ip, fs, ip->i_number);
895 ip->i_ffs_effnlink = ip->i_nlink;
896 brelse(bp, 0);
897 vput(vp);
898 mutex_enter(&mntvnode_lock);
899 }
900 mutex_exit(&mntvnode_lock);
901 vnfree(mvp);
902 return (error);
903 }
904
905 /*
906 * Possible superblock locations ordered from most to least likely.
907 */
908 static const int sblock_try[] = SBLOCKSEARCH;
909
910 /*
911 * Common code for mount and mountroot
912 */
913 int
914 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
915 {
916 struct ufsmount *ump;
917 struct buf *bp;
918 struct fs *fs;
919 dev_t dev;
920 struct partinfo dpart;
921 void *space;
922 daddr_t sblockloc, fsblockloc;
923 int blks, fstype;
924 int error, i, size, ronly, bset = 0;
925 #ifdef FFS_EI
926 int needswap = 0; /* keep gcc happy */
927 #endif
928 int32_t *lp;
929 kauth_cred_t cred;
930 u_int32_t sbsize = 8192; /* keep gcc happy*/
931
932 dev = devvp->v_rdev;
933 cred = l ? l->l_cred : NOCRED;
934
935 /* Flush out any old buffers remaining from a previous use. */
936 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
937 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
938 VOP_UNLOCK(devvp, 0);
939 if (error)
940 return (error);
941
942 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
943 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
944 size = DEV_BSIZE;
945 else
946 size = dpart.disklab->d_secsize;
947
948 bp = NULL;
949 ump = NULL;
950 fs = NULL;
951 sblockloc = 0;
952 fstype = 0;
953
954 error = fstrans_mount(mp);
955 if (error)
956 return error;
957
958 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
959 memset(ump, 0, sizeof *ump);
960 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
961 error = ffs_snapshot_init(ump);
962 if (error)
963 goto out;
964 ump->um_ops = &ffs_ufsops;
965
966 #ifdef WAPBL
967 sbagain:
968 #endif
969 /*
970 * Try reading the superblock in each of its possible locations.
971 */
972 for (i = 0; ; i++) {
973 if (bp != NULL) {
974 brelse(bp, BC_NOCACHE);
975 bp = NULL;
976 }
977 if (sblock_try[i] == -1) {
978 error = EINVAL;
979 fs = NULL;
980 goto out;
981 }
982 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
983 0, &bp);
984 if (error) {
985 fs = NULL;
986 goto out;
987 }
988 fs = (struct fs*)bp->b_data;
989 fsblockloc = sblockloc = sblock_try[i];
990 if (fs->fs_magic == FS_UFS1_MAGIC) {
991 sbsize = fs->fs_sbsize;
992 fstype = UFS1;
993 #ifdef FFS_EI
994 needswap = 0;
995 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
996 sbsize = bswap32(fs->fs_sbsize);
997 fstype = UFS1;
998 needswap = 1;
999 #endif
1000 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
1001 sbsize = fs->fs_sbsize;
1002 fstype = UFS2;
1003 #ifdef FFS_EI
1004 needswap = 0;
1005 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
1006 sbsize = bswap32(fs->fs_sbsize);
1007 fstype = UFS2;
1008 needswap = 1;
1009 #endif
1010 } else
1011 continue;
1012
1013
1014 /* fs->fs_sblockloc isn't defined for old filesystems */
1015 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
1016 if (sblockloc == SBLOCK_UFS2)
1017 /*
1018 * This is likely to be the first alternate
1019 * in a filesystem with 64k blocks.
1020 * Don't use it.
1021 */
1022 continue;
1023 fsblockloc = sblockloc;
1024 } else {
1025 fsblockloc = fs->fs_sblockloc;
1026 #ifdef FFS_EI
1027 if (needswap)
1028 fsblockloc = bswap64(fsblockloc);
1029 #endif
1030 }
1031
1032 /* Check we haven't found an alternate superblock */
1033 if (fsblockloc != sblockloc)
1034 continue;
1035
1036 /* Validate size of superblock */
1037 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
1038 continue;
1039
1040 /* Ok seems to be a good superblock */
1041 break;
1042 }
1043
1044 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
1045 memcpy(fs, bp->b_data, sbsize);
1046 ump->um_fs = fs;
1047
1048 #ifdef FFS_EI
1049 if (needswap) {
1050 ffs_sb_swap((struct fs*)bp->b_data, fs);
1051 fs->fs_flags |= FS_SWAPPED;
1052 } else
1053 #endif
1054 fs->fs_flags &= ~FS_SWAPPED;
1055
1056 #ifdef WAPBL
1057 if ((mp->mnt_wapbl_replay == 0) && (fs->fs_flags & FS_DOWAPBL)) {
1058 error = ffs_wapbl_replay_start(mp, fs, devvp);
1059 if (error)
1060 goto out;
1061
1062 if (!ronly) {
1063 /* XXX fsmnt may be stale. */
1064 printf("%s: replaying log to disk\n", fs->fs_fsmnt);
1065 error = wapbl_replay_write(mp->mnt_wapbl_replay, devvp);
1066 if (error)
1067 goto out;
1068 wapbl_replay_stop(mp->mnt_wapbl_replay);
1069 fs->fs_clean = FS_WASCLEAN;
1070 } else {
1071 /* XXX fsmnt may be stale */
1072 printf("%s: replaying log to memory\n", fs->fs_fsmnt);
1073 }
1074
1075 /* Force a re-read of the superblock */
1076 brelse(bp, BC_INVAL);
1077 bp = NULL;
1078 free(fs, M_UFSMNT);
1079 fs = NULL;
1080 goto sbagain;
1081 }
1082 #else /* !WAPBL */
1083 if ((fs->fs_flags & FS_DOWAPBL) && (mp->mnt_flag & MNT_FORCE) == 0) {
1084 error = EPERM;
1085 goto out;
1086 }
1087 #endif /* !WAPBL */
1088
1089 ffs_oldfscompat_read(fs, ump, sblockloc);
1090 ump->um_maxfilesize = fs->fs_maxfilesize;
1091
1092 if (fs->fs_flags & ~(FS_KNOWN_FLAGS | FS_INTERNAL)) {
1093 uprintf("%s: unknown ufs flags: 0x%08"PRIx32"%s\n",
1094 mp->mnt_stat.f_mntonname, fs->fs_flags,
1095 (mp->mnt_flag & MNT_FORCE) ? "" : ", not mounting");
1096 if ((mp->mnt_flag & MNT_FORCE) == 0) {
1097 error = EINVAL;
1098 goto out;
1099 }
1100 }
1101
1102 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1103 fs->fs_pendingblocks = 0;
1104 fs->fs_pendinginodes = 0;
1105 }
1106
1107 ump->um_fstype = fstype;
1108 if (fs->fs_sbsize < SBLOCKSIZE)
1109 brelse(bp, BC_INVAL);
1110 else
1111 brelse(bp, 0);
1112 bp = NULL;
1113
1114 /* First check to see if this is tagged as an Apple UFS filesystem
1115 * in the disklabel
1116 */
1117 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
1118 (dpart.part->p_fstype == FS_APPLEUFS)) {
1119 ump->um_flags |= UFS_ISAPPLEUFS;
1120 }
1121 #ifdef APPLE_UFS
1122 else {
1123 /* Manually look for an apple ufs label, and if a valid one
1124 * is found, then treat it like an Apple UFS filesystem anyway
1125 */
1126 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
1127 APPLEUFS_LABEL_SIZE, cred, 0, &bp);
1128 if (error)
1129 goto out;
1130 error = ffs_appleufs_validate(fs->fs_fsmnt,
1131 (struct appleufslabel *)bp->b_data, NULL);
1132 if (error == 0) {
1133 ump->um_flags |= UFS_ISAPPLEUFS;
1134 }
1135 brelse(bp, 0);
1136 bp = NULL;
1137 }
1138 #else
1139 if (ump->um_flags & UFS_ISAPPLEUFS) {
1140 error = EINVAL;
1141 goto out;
1142 }
1143 #endif
1144
1145 #if 0
1146 /*
1147 * XXX This code changes the behaviour of mounting dirty filesystems, to
1148 * XXX require "mount -f ..." to mount them. This doesn't match what
1149 * XXX mount(8) describes and is disabled for now.
1150 */
1151 /*
1152 * If the file system is not clean, don't allow it to be mounted
1153 * unless MNT_FORCE is specified. (Note: MNT_FORCE is always set
1154 * for the root file system.)
1155 */
1156 if (fs->fs_flags & FS_DOWAPBL) {
1157 /*
1158 * wapbl normally expects to be FS_WASCLEAN when the FS_DOWAPBL
1159 * bit is set, although there's a window in unmount where it
1160 * could be FS_ISCLEAN
1161 */
1162 if ((mp->mnt_flag & MNT_FORCE) == 0 &&
1163 (fs->fs_clean & (FS_WASCLEAN | FS_ISCLEAN)) == 0) {
1164 error = EPERM;
1165 goto out;
1166 }
1167 } else
1168 if ((fs->fs_clean & FS_ISCLEAN) == 0 &&
1169 (mp->mnt_flag & MNT_FORCE) == 0) {
1170 error = EPERM;
1171 goto out;
1172 }
1173 #endif
1174
1175 /*
1176 * verify that we can access the last block in the fs
1177 * if we're mounting read/write.
1178 */
1179
1180 if (!ronly) {
1181 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
1182 cred, 0, &bp);
1183 if (bp->b_bcount != fs->fs_fsize)
1184 error = EINVAL;
1185 if (error) {
1186 bset = BC_INVAL;
1187 goto out;
1188 }
1189 brelse(bp, BC_INVAL);
1190 bp = NULL;
1191 }
1192
1193 fs->fs_ronly = ronly;
1194 /* Don't bump fs_clean if we're replaying journal */
1195 if (!((fs->fs_flags & FS_DOWAPBL) && (fs->fs_clean & FS_WASCLEAN)))
1196 if (ronly == 0) {
1197 fs->fs_clean <<= 1;
1198 fs->fs_fmod = 1;
1199 }
1200 size = fs->fs_cssize;
1201 blks = howmany(size, fs->fs_fsize);
1202 if (fs->fs_contigsumsize > 0)
1203 size += fs->fs_ncg * sizeof(int32_t);
1204 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1205 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
1206 fs->fs_csp = space;
1207 for (i = 0; i < blks; i += fs->fs_frag) {
1208 size = fs->fs_bsize;
1209 if (i + fs->fs_frag > blks)
1210 size = (blks - i) * fs->fs_fsize;
1211 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
1212 cred, 0, &bp);
1213 if (error) {
1214 free(fs->fs_csp, M_UFSMNT);
1215 goto out;
1216 }
1217 #ifdef FFS_EI
1218 if (needswap)
1219 ffs_csum_swap((struct csum *)bp->b_data,
1220 (struct csum *)space, size);
1221 else
1222 #endif
1223 memcpy(space, bp->b_data, (u_int)size);
1224
1225 space = (char *)space + size;
1226 brelse(bp, 0);
1227 bp = NULL;
1228 }
1229 if (fs->fs_contigsumsize > 0) {
1230 fs->fs_maxcluster = lp = space;
1231 for (i = 0; i < fs->fs_ncg; i++)
1232 *lp++ = fs->fs_contigsumsize;
1233 space = lp;
1234 }
1235 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
1236 fs->fs_contigdirs = space;
1237 space = (char *)space + size;
1238 memset(fs->fs_contigdirs, 0, size);
1239 /* Compatibility for old filesystems - XXX */
1240 if (fs->fs_avgfilesize <= 0)
1241 fs->fs_avgfilesize = AVFILESIZ;
1242 if (fs->fs_avgfpdir <= 0)
1243 fs->fs_avgfpdir = AFPDIR;
1244 fs->fs_active = NULL;
1245 mp->mnt_data = ump;
1246 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
1247 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
1248 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
1249 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
1250 if (UFS_MPISAPPLEUFS(ump)) {
1251 /* NeXT used to keep short symlinks in the inode even
1252 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
1253 * is probably -1, but we still need to be able to identify
1254 * short symlinks.
1255 */
1256 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
1257 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
1258 mp->mnt_iflag |= IMNT_DTYPE;
1259 } else {
1260 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
1261 ump->um_dirblksiz = DIRBLKSIZ;
1262 if (ump->um_maxsymlinklen > 0)
1263 mp->mnt_iflag |= IMNT_DTYPE;
1264 else
1265 mp->mnt_iflag &= ~IMNT_DTYPE;
1266 }
1267 mp->mnt_fs_bshift = fs->fs_bshift;
1268 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
1269 mp->mnt_flag |= MNT_LOCAL;
1270 mp->mnt_iflag |= IMNT_MPSAFE;
1271 #ifdef FFS_EI
1272 if (needswap)
1273 ump->um_flags |= UFS_NEEDSWAP;
1274 #endif
1275 ump->um_mountp = mp;
1276 ump->um_dev = dev;
1277 ump->um_devvp = devvp;
1278 ump->um_nindir = fs->fs_nindir;
1279 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1280 ump->um_bptrtodb = fs->fs_fsbtodb;
1281 ump->um_seqinc = fs->fs_frag;
1282 for (i = 0; i < MAXQUOTAS; i++)
1283 ump->um_quotas[i] = NULLVP;
1284 devvp->v_specmountpoint = mp;
1285 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
1286 error = softdep_mount(devvp, mp, fs, cred);
1287 if (error) {
1288 free(fs->fs_csp, M_UFSMNT);
1289 goto out;
1290 }
1291 }
1292 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1293 ffs_snapshot_mount(mp);
1294
1295 #ifdef WAPBL
1296 if (!ronly) {
1297 KDASSERT(fs->fs_ronly == 0);
1298 /*
1299 * ffs_wapbl_start() needs mp->mnt_stat initialised if it
1300 * needs to create a new log file in-filesystem.
1301 */
1302 ffs_statvfs(mp, &mp->mnt_stat);
1303
1304 error = ffs_wapbl_start(mp);
1305 if (error) {
1306 free(fs->fs_csp, M_UFSMNT);
1307 goto out;
1308 }
1309 }
1310 #endif /* WAPBL */
1311 #ifdef UFS_EXTATTR
1312 /*
1313 * Initialize file-backed extended attributes on UFS1 file
1314 * systems.
1315 */
1316 if (ump->um_fstype == UFS1) {
1317 ufs_extattr_uepm_init(&ump->um_extattr);
1318 #ifdef UFS_EXTATTR_AUTOSTART
1319 /*
1320 * XXX Just ignore errors. Not clear that we should
1321 * XXX fail the mount in this case.
1322 */
1323 (void) ufs_extattr_autostart(mp, l);
1324 #endif
1325 }
1326 #endif /* UFS_EXTATTR */
1327 return (0);
1328 out:
1329 #ifdef WAPBL
1330 if (mp->mnt_wapbl_replay) {
1331 wapbl_replay_stop(mp->mnt_wapbl_replay);
1332 wapbl_replay_free(mp->mnt_wapbl_replay);
1333 mp->mnt_wapbl_replay = 0;
1334 }
1335 #endif
1336
1337 fstrans_unmount(mp);
1338 if (fs)
1339 free(fs, M_UFSMNT);
1340 devvp->v_specmountpoint = NULL;
1341 if (bp)
1342 brelse(bp, bset);
1343 if (ump) {
1344 if (ump->um_oldfscompat)
1345 free(ump->um_oldfscompat, M_UFSMNT);
1346 mutex_destroy(&ump->um_lock);
1347 free(ump, M_UFSMNT);
1348 mp->mnt_data = NULL;
1349 }
1350 return (error);
1351 }
1352
1353 /*
1354 * Sanity checks for loading old filesystem superblocks.
1355 * See ffs_oldfscompat_write below for unwound actions.
1356 *
1357 * XXX - Parts get retired eventually.
1358 * Unfortunately new bits get added.
1359 */
1360 static void
1361 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1362 {
1363 off_t maxfilesize;
1364 int32_t *extrasave;
1365
1366 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1367 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1368 return;
1369
1370 if (!ump->um_oldfscompat)
1371 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1372 M_UFSMNT, M_WAITOK);
1373
1374 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1375 extrasave = ump->um_oldfscompat;
1376 extrasave += 512/sizeof(int32_t);
1377 extrasave[0] = fs->fs_old_npsect;
1378 extrasave[1] = fs->fs_old_interleave;
1379 extrasave[2] = fs->fs_old_trackskew;
1380
1381 /* These fields will be overwritten by their
1382 * original values in fs_oldfscompat_write, so it is harmless
1383 * to modify them here.
1384 */
1385 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1386 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1387 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1388 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1389
1390 fs->fs_maxbsize = fs->fs_bsize;
1391 fs->fs_time = fs->fs_old_time;
1392 fs->fs_size = fs->fs_old_size;
1393 fs->fs_dsize = fs->fs_old_dsize;
1394 fs->fs_csaddr = fs->fs_old_csaddr;
1395 fs->fs_sblockloc = sblockloc;
1396
1397 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1398
1399 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1400 fs->fs_old_nrpos = 8;
1401 fs->fs_old_npsect = fs->fs_old_nsect;
1402 fs->fs_old_interleave = 1;
1403 fs->fs_old_trackskew = 0;
1404 }
1405
1406 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1407 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1408 fs->fs_qbmask = ~fs->fs_bmask;
1409 fs->fs_qfmask = ~fs->fs_fmask;
1410 }
1411
1412 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1413 if (fs->fs_maxfilesize > maxfilesize)
1414 fs->fs_maxfilesize = maxfilesize;
1415
1416 /* Compatibility for old filesystems */
1417 if (fs->fs_avgfilesize <= 0)
1418 fs->fs_avgfilesize = AVFILESIZ;
1419 if (fs->fs_avgfpdir <= 0)
1420 fs->fs_avgfpdir = AFPDIR;
1421
1422 #if 0
1423 if (bigcgs) {
1424 fs->fs_save_cgsize = fs->fs_cgsize;
1425 fs->fs_cgsize = fs->fs_bsize;
1426 }
1427 #endif
1428 }
1429
1430 /*
1431 * Unwinding superblock updates for old filesystems.
1432 * See ffs_oldfscompat_read above for details.
1433 *
1434 * XXX - Parts get retired eventually.
1435 * Unfortunately new bits get added.
1436 */
1437 static void
1438 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1439 {
1440 int32_t *extrasave;
1441
1442 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1443 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1444 return;
1445
1446 fs->fs_old_time = fs->fs_time;
1447 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1448 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1449 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1450 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1451 fs->fs_old_flags = fs->fs_flags;
1452
1453 #if 0
1454 if (bigcgs) {
1455 fs->fs_cgsize = fs->fs_save_cgsize;
1456 }
1457 #endif
1458
1459 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1460 extrasave = ump->um_oldfscompat;
1461 extrasave += 512/sizeof(int32_t);
1462 fs->fs_old_npsect = extrasave[0];
1463 fs->fs_old_interleave = extrasave[1];
1464 fs->fs_old_trackskew = extrasave[2];
1465
1466 }
1467
1468 /*
1469 * unmount system call
1470 */
1471 int
1472 ffs_unmount(struct mount *mp, int mntflags)
1473 {
1474 struct lwp *l = curlwp;
1475 struct ufsmount *ump = VFSTOUFS(mp);
1476 struct fs *fs = ump->um_fs;
1477 int error, flags, penderr;
1478 #ifdef WAPBL
1479 extern int doforce;
1480 #endif
1481
1482 penderr = 0;
1483 flags = 0;
1484 if (mntflags & MNT_FORCE)
1485 flags |= FORCECLOSE;
1486 #ifdef UFS_EXTATTR
1487 if (ump->um_fstype == UFS1) {
1488 ufs_extattr_stop(mp, l);
1489 ufs_extattr_uepm_destroy(&ump->um_extattr);
1490 }
1491 #endif /* UFS_EXTATTR */
1492 if (mp->mnt_flag & MNT_SOFTDEP) {
1493 if ((error = softdep_flushfiles(mp, flags, l)) != 0)
1494 return (error);
1495 } else {
1496 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1497 return (error);
1498 }
1499 mutex_enter(&ump->um_lock);
1500 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1501 printf("%s: unmount pending error: blocks %" PRId64
1502 " files %d\n",
1503 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1504 fs->fs_pendingblocks = 0;
1505 fs->fs_pendinginodes = 0;
1506 penderr = 1;
1507 }
1508 mutex_exit(&ump->um_lock);
1509 error = UFS_WAPBL_BEGIN(mp);
1510 if (error == 0)
1511 if (fs->fs_ronly == 0 &&
1512 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1513 fs->fs_clean & FS_WASCLEAN) {
1514 /*
1515 * XXXX don't mark fs clean in the case of softdep
1516 * pending block errors, until they are fixed.
1517 */
1518 if (penderr == 0) {
1519 if (mp->mnt_flag & MNT_SOFTDEP)
1520 fs->fs_flags &= ~FS_DOSOFTDEP;
1521 fs->fs_clean = FS_ISCLEAN;
1522 }
1523 fs->fs_fmod = 0;
1524 (void) ffs_sbupdate(ump, MNT_WAIT);
1525 }
1526 if (error == 0)
1527 UFS_WAPBL_END(mp);
1528 #ifdef WAPBL
1529 KASSERT(!(mp->mnt_wapbl_replay && mp->mnt_wapbl));
1530 if (mp->mnt_wapbl_replay) {
1531 KDASSERT(fs->fs_ronly);
1532 wapbl_replay_stop(mp->mnt_wapbl_replay);
1533 wapbl_replay_free(mp->mnt_wapbl_replay);
1534 mp->mnt_wapbl_replay = 0;
1535 }
1536 error = ffs_wapbl_stop(mp, doforce && (mntflags & MNT_FORCE));
1537 if (error) {
1538 return error;
1539 }
1540 #endif /* WAPBL */
1541 if (ump->um_devvp->v_type != VBAD)
1542 ump->um_devvp->v_specmountpoint = NULL;
1543 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1544 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD | FWRITE,
1545 NOCRED);
1546 vput(ump->um_devvp);
1547 free(fs->fs_csp, M_UFSMNT);
1548 free(fs, M_UFSMNT);
1549 if (ump->um_oldfscompat != NULL)
1550 free(ump->um_oldfscompat, M_UFSMNT);
1551 softdep_unmount(mp);
1552 mutex_destroy(&ump->um_lock);
1553 ffs_snapshot_fini(ump);
1554 free(ump, M_UFSMNT);
1555 mp->mnt_data = NULL;
1556 mp->mnt_flag &= ~MNT_LOCAL;
1557 fstrans_unmount(mp);
1558 return (0);
1559 }
1560
1561 /*
1562 * Flush out all the files in a filesystem.
1563 */
1564 int
1565 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1566 {
1567 extern int doforce;
1568 struct ufsmount *ump;
1569 int error;
1570
1571 if (!doforce)
1572 flags &= ~FORCECLOSE;
1573 ump = VFSTOUFS(mp);
1574 #ifdef QUOTA
1575 if (mp->mnt_flag & MNT_QUOTA) {
1576 int i;
1577 if ((error = vflush(mp, NULLVP, SKIPSYSTEM | flags)) != 0)
1578 return (error);
1579 for (i = 0; i < MAXQUOTAS; i++) {
1580 if (ump->um_quotas[i] == NULLVP)
1581 continue;
1582 quotaoff(l, mp, i);
1583 }
1584 /*
1585 * Here we fall through to vflush again to ensure
1586 * that we have gotten rid of all the system vnodes.
1587 */
1588 }
1589 #endif
1590 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1591 return (error);
1592 ffs_snapshot_unmount(mp);
1593 /*
1594 * Flush all the files.
1595 */
1596 error = vflush(mp, NULLVP, flags);
1597 if (error)
1598 return (error);
1599 /*
1600 * Flush filesystem metadata.
1601 */
1602 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1603 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1604 VOP_UNLOCK(ump->um_devvp, 0);
1605 if (flags & FORCECLOSE) /* XXXDBJ */
1606 error = 0;
1607
1608 #ifdef WAPBL
1609 if (error)
1610 return error;
1611 if (mp->mnt_wapbl) {
1612 error = wapbl_flush(mp->mnt_wapbl, 1);
1613 if (flags & FORCECLOSE)
1614 error = 0;
1615 }
1616 #endif
1617
1618 return (error);
1619 }
1620
1621 /*
1622 * Get file system statistics.
1623 */
1624 int
1625 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1626 {
1627 struct ufsmount *ump;
1628 struct fs *fs;
1629
1630 ump = VFSTOUFS(mp);
1631 fs = ump->um_fs;
1632 mutex_enter(&ump->um_lock);
1633 sbp->f_bsize = fs->fs_bsize;
1634 sbp->f_frsize = fs->fs_fsize;
1635 sbp->f_iosize = fs->fs_bsize;
1636 sbp->f_blocks = fs->fs_dsize;
1637 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1638 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1639 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1640 fs->fs_minfree) / (u_int64_t) 100;
1641 if (sbp->f_bfree > sbp->f_bresvd)
1642 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1643 else
1644 sbp->f_bavail = 0;
1645 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1646 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1647 sbp->f_favail = sbp->f_ffree;
1648 sbp->f_fresvd = 0;
1649 mutex_exit(&ump->um_lock);
1650 copy_statvfs_info(sbp, mp);
1651
1652 return (0);
1653 }
1654
1655 /*
1656 * Go through the disk queues to initiate sandbagged IO;
1657 * go through the inodes to write those that have been modified;
1658 * initiate the writing of the super block if it has been modified.
1659 *
1660 * Note: we are always called with the filesystem marked `MPBUSY'.
1661 */
1662 int
1663 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1664 {
1665 struct lwp *l = curlwp;
1666 struct vnode *vp, *mvp;
1667 struct inode *ip;
1668 struct ufsmount *ump = VFSTOUFS(mp);
1669 struct fs *fs;
1670 int error, count, allerror = 0;
1671
1672 fs = ump->um_fs;
1673 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1674 printf("fs = %s\n", fs->fs_fsmnt);
1675 panic("update: rofs mod");
1676 }
1677
1678 /* Allocate a marker vnode. */
1679 if ((mvp = vnalloc(mp)) == NULL)
1680 return (ENOMEM);
1681
1682 fstrans_start(mp, FSTRANS_SHARED);
1683 /*
1684 * Write back each (modified) inode.
1685 */
1686 mutex_enter(&mntvnode_lock);
1687 loop:
1688 /*
1689 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1690 * and vclean() can be called indirectly
1691 */
1692 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
1693 vmark(mvp, vp);
1694 /*
1695 * If the vnode that we are about to sync is no longer
1696 * associated with this mount point, start over.
1697 */
1698 if (vp->v_mount != mp || vismarker(vp))
1699 continue;
1700 mutex_enter(&vp->v_interlock);
1701 ip = VTOI(vp);
1702
1703 /*
1704 * We deliberately update inode times here. This will
1705 * prevent a massive queue of updates accumulating, only
1706 * to be handled by a call to unmount.
1707 *
1708 * XXX It would be better to have the syncer trickle these
1709 * out. Adjustment needed to allow registering vnodes for
1710 * sync when the vnode is clean, but the inode dirty. Or
1711 * have ufs itself trickle out inode updates.
1712 */
1713 if (ip == NULL || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0 ||
1714 vp->v_type == VNON || ((ip->i_flag &
1715 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY |
1716 IN_MODIFIED | IN_ACCESSED)) == 0 &&
1717 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1718 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
1719 {
1720 mutex_exit(&vp->v_interlock);
1721 continue;
1722 }
1723 if (vp->v_type == VBLK &&
1724 fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
1725 mutex_exit(&vp->v_interlock);
1726 continue;
1727 }
1728 mutex_exit(&mntvnode_lock);
1729 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1730 if (error) {
1731 mutex_enter(&mntvnode_lock);
1732 if (error == ENOENT) {
1733 (void)vunmark(mvp);
1734 goto loop;
1735 }
1736 continue;
1737 }
1738 if (vp->v_type == VREG && waitfor == MNT_LAZY) {
1739 error = UFS_WAPBL_BEGIN(vp->v_mount);
1740 if (!error) {
1741 error = ffs_update(vp, NULL, NULL, 0);
1742 UFS_WAPBL_END(vp->v_mount);
1743 }
1744 } else {
1745 error = VOP_FSYNC(vp, cred, FSYNC_NOLOG |
1746 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0), 0, 0);
1747 }
1748 if (error)
1749 allerror = error;
1750 vput(vp);
1751 mutex_enter(&mntvnode_lock);
1752 }
1753 mutex_exit(&mntvnode_lock);
1754 /*
1755 * Force stale file system control information to be flushed.
1756 */
1757 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1758 if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
1759 allerror = error;
1760 /* Flushed work items may create new vnodes to clean */
1761 if (allerror == 0 && count) {
1762 mutex_enter(&mntvnode_lock);
1763 goto loop;
1764 }
1765 }
1766 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1767 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1768 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1769 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1770 (waitfor == MNT_WAIT ? FSYNC_WAIT : 0) | FSYNC_NOLOG,
1771 0, 0)) != 0)
1772 allerror = error;
1773 VOP_UNLOCK(ump->um_devvp, 0);
1774 if (allerror == 0 && waitfor == MNT_WAIT && !mp->mnt_wapbl) {
1775 mutex_enter(&mntvnode_lock);
1776 goto loop;
1777 }
1778 }
1779 #ifdef QUOTA
1780 qsync(mp);
1781 #endif
1782 /*
1783 * Write back modified superblock.
1784 */
1785 if (fs->fs_fmod != 0) {
1786 fs->fs_fmod = 0;
1787 fs->fs_time = time_second;
1788 error = UFS_WAPBL_BEGIN(mp);
1789 if (error)
1790 allerror = error;
1791 else {
1792 if ((error = ffs_cgupdate(ump, waitfor)))
1793 allerror = error;
1794 UFS_WAPBL_END(mp);
1795 }
1796 }
1797
1798 #ifdef WAPBL
1799 if (mp->mnt_wapbl) {
1800 error = wapbl_flush(mp->mnt_wapbl, 0);
1801 if (error)
1802 allerror = error;
1803 }
1804 #endif
1805
1806 fstrans_done(mp);
1807 vnfree(mvp);
1808 return (allerror);
1809 }
1810
1811 /*
1812 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1813 * in from disk. If it is in core, wait for the lock bit to clear, then
1814 * return the inode locked. Detection and handling of mount points must be
1815 * done by the calling routine.
1816 */
1817 int
1818 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1819 {
1820 struct fs *fs;
1821 struct inode *ip;
1822 struct ufsmount *ump;
1823 struct buf *bp;
1824 struct vnode *vp;
1825 dev_t dev;
1826 int error;
1827
1828 ump = VFSTOUFS(mp);
1829 dev = ump->um_dev;
1830
1831 retry:
1832 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1833 return (0);
1834
1835 /* Allocate a new vnode/inode. */
1836 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1837 *vpp = NULL;
1838 return (error);
1839 }
1840 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1841
1842 /*
1843 * If someone beat us to it, put back the freshly allocated
1844 * vnode/inode pair and retry.
1845 */
1846 mutex_enter(&ufs_hashlock);
1847 if (ufs_ihashget(dev, ino, 0) != NULL) {
1848 mutex_exit(&ufs_hashlock);
1849 ungetnewvnode(vp);
1850 pool_cache_put(ffs_inode_cache, ip);
1851 goto retry;
1852 }
1853
1854 vp->v_vflag |= VV_LOCKSWORK;
1855 if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
1856 vp->v_uflag |= VU_SOFTDEP;
1857
1858 /*
1859 * XXX MFS ends up here, too, to allocate an inode. Should we
1860 * XXX create another pool for MFS inodes?
1861 */
1862
1863 memset(ip, 0, sizeof(struct inode));
1864 vp->v_data = ip;
1865 ip->i_vnode = vp;
1866 ip->i_ump = ump;
1867 ip->i_fs = fs = ump->um_fs;
1868 ip->i_dev = dev;
1869 ip->i_number = ino;
1870 LIST_INIT(&ip->i_pcbufhd);
1871 #ifdef QUOTA
1872 ufsquota_init(ip);
1873 #endif
1874
1875 /*
1876 * Initialize genfs node, we might proceed to destroy it in
1877 * error branches.
1878 */
1879 genfs_node_init(vp, &ffs_genfsops);
1880
1881 /*
1882 * Put it onto its hash chain and lock it so that other requests for
1883 * this inode will block if they arrive while we are sleeping waiting
1884 * for old data structures to be purged or for the contents of the
1885 * disk portion of this inode to be read.
1886 */
1887
1888 ufs_ihashins(ip);
1889 mutex_exit(&ufs_hashlock);
1890
1891 /* Read in the disk contents for the inode, copy into the inode. */
1892 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1893 (int)fs->fs_bsize, NOCRED, 0, &bp);
1894 if (error) {
1895
1896 /*
1897 * The inode does not contain anything useful, so it would
1898 * be misleading to leave it on its hash chain. With mode
1899 * still zero, it will be unlinked and returned to the free
1900 * list by vput().
1901 */
1902
1903 vput(vp);
1904 brelse(bp, 0);
1905 *vpp = NULL;
1906 return (error);
1907 }
1908 if (ip->i_ump->um_fstype == UFS1)
1909 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1910 PR_WAITOK);
1911 else
1912 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
1913 PR_WAITOK);
1914 ffs_load_inode(bp, ip, fs, ino);
1915 if (DOINGSOFTDEP(vp))
1916 softdep_load_inodeblock(ip);
1917 else
1918 ip->i_ffs_effnlink = ip->i_nlink;
1919 brelse(bp, 0);
1920
1921 /*
1922 * Initialize the vnode from the inode, check for aliases.
1923 * Note that the underlying vnode may have changed.
1924 */
1925
1926 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1927
1928 /*
1929 * Finish inode initialization now that aliasing has been resolved.
1930 */
1931
1932 ip->i_devvp = ump->um_devvp;
1933 VREF(ip->i_devvp);
1934
1935 /*
1936 * Ensure that uid and gid are correct. This is a temporary
1937 * fix until fsck has been changed to do the update.
1938 */
1939
1940 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1941 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1942 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1943 } /* XXX */
1944 uvm_vnp_setsize(vp, ip->i_size);
1945 *vpp = vp;
1946 return (0);
1947 }
1948
1949 /*
1950 * File handle to vnode
1951 *
1952 * Have to be really careful about stale file handles:
1953 * - check that the inode number is valid
1954 * - call ffs_vget() to get the locked inode
1955 * - check for an unallocated inode (i_mode == 0)
1956 * - check that the given client host has export rights and return
1957 * those rights via. exflagsp and credanonp
1958 */
1959 int
1960 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1961 {
1962 struct ufid ufh;
1963 struct fs *fs;
1964
1965 if (fhp->fid_len != sizeof(struct ufid))
1966 return EINVAL;
1967
1968 memcpy(&ufh, fhp, sizeof(ufh));
1969 fs = VFSTOUFS(mp)->um_fs;
1970 if (ufh.ufid_ino < ROOTINO ||
1971 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1972 return (ESTALE);
1973 return (ufs_fhtovp(mp, &ufh, vpp));
1974 }
1975
1976 /*
1977 * Vnode pointer to File handle
1978 */
1979 /* ARGSUSED */
1980 int
1981 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1982 {
1983 struct inode *ip;
1984 struct ufid ufh;
1985
1986 if (*fh_size < sizeof(struct ufid)) {
1987 *fh_size = sizeof(struct ufid);
1988 return E2BIG;
1989 }
1990 ip = VTOI(vp);
1991 *fh_size = sizeof(struct ufid);
1992 memset(&ufh, 0, sizeof(ufh));
1993 ufh.ufid_len = sizeof(struct ufid);
1994 ufh.ufid_ino = ip->i_number;
1995 ufh.ufid_gen = ip->i_gen;
1996 memcpy(fhp, &ufh, sizeof(ufh));
1997 return (0);
1998 }
1999
2000 void
2001 ffs_init(void)
2002 {
2003 if (ffs_initcount++ > 0)
2004 return;
2005
2006 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
2007 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
2008 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
2009 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
2010 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
2011 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
2012 softdep_initialize();
2013 ufs_init();
2014 }
2015
2016 void
2017 ffs_reinit(void)
2018 {
2019 softdep_reinitialize();
2020 ufs_reinit();
2021 }
2022
2023 void
2024 ffs_done(void)
2025 {
2026 if (--ffs_initcount > 0)
2027 return;
2028
2029 /* XXX softdep cleanup ? */
2030 ufs_done();
2031 pool_cache_destroy(ffs_dinode2_cache);
2032 pool_cache_destroy(ffs_dinode1_cache);
2033 pool_cache_destroy(ffs_inode_cache);
2034 }
2035
2036 /*
2037 * Write a superblock and associated information back to disk.
2038 */
2039 int
2040 ffs_sbupdate(struct ufsmount *mp, int waitfor)
2041 {
2042 struct fs *fs = mp->um_fs;
2043 struct buf *bp;
2044 int error = 0;
2045 u_int32_t saveflag;
2046
2047 error = ffs_getblk(mp->um_devvp,
2048 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb), FFS_NOBLK,
2049 fs->fs_sbsize, false, &bp);
2050 if (error)
2051 return error;
2052 saveflag = fs->fs_flags & FS_INTERNAL;
2053 fs->fs_flags &= ~FS_INTERNAL;
2054
2055 memcpy(bp->b_data, fs, fs->fs_sbsize);
2056
2057 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
2058 #ifdef FFS_EI
2059 if (mp->um_flags & UFS_NEEDSWAP)
2060 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
2061 #endif
2062 fs->fs_flags |= saveflag;
2063
2064 if (waitfor == MNT_WAIT)
2065 error = bwrite(bp);
2066 else
2067 bawrite(bp);
2068 return (error);
2069 }
2070
2071 int
2072 ffs_cgupdate(struct ufsmount *mp, int waitfor)
2073 {
2074 struct fs *fs = mp->um_fs;
2075 struct buf *bp;
2076 int blks;
2077 void *space;
2078 int i, size, error = 0, allerror = 0;
2079
2080 allerror = ffs_sbupdate(mp, waitfor);
2081 blks = howmany(fs->fs_cssize, fs->fs_fsize);
2082 space = fs->fs_csp;
2083 for (i = 0; i < blks; i += fs->fs_frag) {
2084 size = fs->fs_bsize;
2085 if (i + fs->fs_frag > blks)
2086 size = (blks - i) * fs->fs_fsize;
2087 error = ffs_getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
2088 FFS_NOBLK, size, false, &bp);
2089 if (error)
2090 break;
2091 #ifdef FFS_EI
2092 if (mp->um_flags & UFS_NEEDSWAP)
2093 ffs_csum_swap((struct csum*)space,
2094 (struct csum*)bp->b_data, size);
2095 else
2096 #endif
2097 memcpy(bp->b_data, space, (u_int)size);
2098 space = (char *)space + size;
2099 if (waitfor == MNT_WAIT)
2100 error = bwrite(bp);
2101 else
2102 bawrite(bp);
2103 }
2104 if (!allerror && error)
2105 allerror = error;
2106 return (allerror);
2107 }
2108
2109 int
2110 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
2111 int attrnamespace, const char *attrname)
2112 {
2113 #ifdef UFS_EXTATTR
2114 /*
2115 * File-backed extended attributes are only supported on UFS1.
2116 * UFS2 has native extended attributes.
2117 */
2118 if (VFSTOUFS(mp)->um_fstype == UFS1)
2119 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
2120 #endif
2121 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
2122 }
2123
2124 int
2125 ffs_suspendctl(struct mount *mp, int cmd)
2126 {
2127 int error;
2128 struct lwp *l = curlwp;
2129
2130 switch (cmd) {
2131 case SUSPEND_SUSPEND:
2132 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
2133 return error;
2134 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
2135 if (error == 0)
2136 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
2137 #ifdef WAPBL
2138 if (error == 0 && mp->mnt_wapbl)
2139 error = wapbl_flush(mp->mnt_wapbl, 1);
2140 #endif
2141 if (error != 0) {
2142 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
2143 return error;
2144 }
2145 return 0;
2146
2147 case SUSPEND_RESUME:
2148 return fstrans_setstate(mp, FSTRANS_NORMAL);
2149
2150 default:
2151 return EINVAL;
2152 }
2153 }
2154
2155 /*
2156 * Synch vnode for a mounted file system. This is called for foreign
2157 * vnodes, i.e. non-ffs.
2158 */
2159 static int
2160 ffs_vfs_fsync(vnode_t *vp, int flags)
2161 {
2162 int error, passes, skipmeta, i, pflags;
2163 buf_t *bp, *nbp;
2164 struct mount *mp;
2165
2166 KASSERT(vp->v_type == VBLK);
2167 KASSERT(vp->v_specmountpoint != NULL);
2168
2169 mp = vp->v_specmountpoint;
2170 if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
2171 softdep_fsync_mountdev(vp);
2172
2173 /*
2174 * Flush all dirty data associated with the vnode.
2175 */
2176 pflags = PGO_ALLPAGES | PGO_CLEANIT;
2177 if ((flags & FSYNC_WAIT) != 0)
2178 pflags |= PGO_SYNCIO;
2179 mutex_enter(&vp->v_interlock);
2180 error = VOP_PUTPAGES(vp, 0, 0, pflags);
2181 if (error)
2182 return error;
2183
2184 #ifdef WAPBL
2185 if (mp && mp->mnt_wapbl) {
2186 /*
2187 * Don't bother writing out metadata if the syncer is
2188 * making the request. We will let the sync vnode
2189 * write it out in a single burst through a call to
2190 * VFS_SYNC().
2191 */
2192 if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY | FSYNC_NOLOG)) != 0)
2193 return 0;
2194
2195 /*
2196 * Don't flush the log if the vnode being flushed
2197 * contains no dirty buffers that could be in the log.
2198 */
2199 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2200 error = wapbl_flush(mp->mnt_wapbl, 0);
2201 if (error)
2202 return error;
2203 }
2204
2205 if ((flags & FSYNC_WAIT) != 0) {
2206 mutex_enter(&vp->v_interlock);
2207 while (vp->v_numoutput)
2208 cv_wait(&vp->v_cv, &vp->v_interlock);
2209 mutex_exit(&vp->v_interlock);
2210 }
2211
2212 return 0;
2213 }
2214 #endif /* WAPBL */
2215
2216 /*
2217 * Write out metadata for non-logging file systems. This block can
2218 * be simplified once softdep goes.
2219 */
2220 passes = NIADDR + 1;
2221 skipmeta = 0;
2222 if (flags & FSYNC_WAIT)
2223 skipmeta = 1;
2224
2225 loop:
2226 mutex_enter(&bufcache_lock);
2227 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
2228 bp->b_cflags &= ~BC_SCANNED;
2229 }
2230 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
2231 nbp = LIST_NEXT(bp, b_vnbufs);
2232 if (bp->b_cflags & (BC_BUSY | BC_SCANNED))
2233 continue;
2234 if ((bp->b_oflags & BO_DELWRI) == 0)
2235 panic("ffs_fsync: not dirty");
2236 if (skipmeta && bp->b_lblkno < 0)
2237 continue;
2238 bp->b_cflags |= BC_BUSY | BC_VFLUSH | BC_SCANNED;
2239 mutex_exit(&bufcache_lock);
2240 /*
2241 * On our final pass through, do all I/O synchronously
2242 * so that we can find out if our flush is failing
2243 * because of write errors.
2244 */
2245 if (passes > 0 || !(flags & FSYNC_WAIT))
2246 (void) bawrite(bp);
2247 else if ((error = bwrite(bp)) != 0)
2248 return (error);
2249 /*
2250 * Since we unlocked during the I/O, we need
2251 * to start from a known point.
2252 */
2253 mutex_enter(&bufcache_lock);
2254 nbp = LIST_FIRST(&vp->v_dirtyblkhd);
2255 }
2256 mutex_exit(&bufcache_lock);
2257 if (skipmeta) {
2258 skipmeta = 0;
2259 goto loop;
2260 }
2261
2262 if ((flags & FSYNC_WAIT) != 0) {
2263 mutex_enter(&vp->v_interlock);
2264 while (vp->v_numoutput) {
2265 cv_wait(&vp->v_cv, &vp->v_interlock);
2266 }
2267 mutex_exit(&vp->v_interlock);
2268
2269 /*
2270 * Ensure that any filesystem metadata associated
2271 * with the vnode has been written.
2272 */
2273 if ((error = softdep_sync_metadata(vp)) != 0)
2274 return (error);
2275
2276 if (!LIST_EMPTY(&vp->v_dirtyblkhd)) {
2277 /*
2278 * Block devices associated with filesystems may
2279 * have new I/O requests posted for them even if
2280 * the vnode is locked, so no amount of trying will
2281 * get them clean. Thus we give block devices a
2282 * good effort, then just give up. For all other file
2283 * types, go around and try again until it is clean.
2284 */
2285 if (passes > 0) {
2286 passes--;
2287 goto loop;
2288 }
2289 #ifdef DIAGNOSTIC
2290 if (vp->v_type != VBLK)
2291 vprint("ffs_fsync: dirty", vp);
2292 #endif
2293 }
2294 }
2295
2296 if (error == 0 && (flags & FSYNC_CACHE) != 0) {
2297 (void)VOP_IOCTL(vp, DIOCCACHESYNC, &i, FWRITE,
2298 kauth_cred_get());
2299 }
2300
2301 return error;
2302 }
2303