ffs_vfsops.c revision 1.220 1 /* $NetBSD: ffs_vfsops.c,v 1.220 2008/01/25 10:49:32 pooka Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.220 2008/01/25 10:49:32 pooka Exp $");
36
37 #if defined(_KERNEL_OPT)
38 #include "opt_ffs.h"
39 #include "opt_quota.h"
40 #include "opt_softdep.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/kernel.h>
48 #include <sys/vnode.h>
49 #include <sys/socket.h>
50 #include <sys/mount.h>
51 #include <sys/buf.h>
52 #include <sys/device.h>
53 #include <sys/mbuf.h>
54 #include <sys/file.h>
55 #include <sys/disklabel.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/pool.h>
60 #include <sys/lock.h>
61 #include <sys/sysctl.h>
62 #include <sys/conf.h>
63 #include <sys/kauth.h>
64 #include <sys/fstrans.h>
65
66 #include <miscfs/specfs/specdev.h>
67
68 #include <ufs/ufs/quota.h>
69 #include <ufs/ufs/ufsmount.h>
70 #include <ufs/ufs/inode.h>
71 #include <ufs/ufs/dir.h>
72 #include <ufs/ufs/ufs_extern.h>
73 #include <ufs/ufs/ufs_bswap.h>
74
75 #include <ufs/ffs/fs.h>
76 #include <ufs/ffs/ffs_extern.h>
77
78 /* how many times ffs_init() was called */
79 int ffs_initcount = 0;
80
81 extern kmutex_t ufs_hashlock;
82
83 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
84 extern const struct vnodeopv_desc ffs_specop_opv_desc;
85 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
86
87 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
88 &ffs_vnodeop_opv_desc,
89 &ffs_specop_opv_desc,
90 &ffs_fifoop_opv_desc,
91 NULL,
92 };
93
94 struct vfsops ffs_vfsops = {
95 MOUNT_FFS,
96 sizeof (struct ufs_args),
97 ffs_mount,
98 ufs_start,
99 ffs_unmount,
100 ufs_root,
101 ufs_quotactl,
102 ffs_statvfs,
103 ffs_sync,
104 ffs_vget,
105 ffs_fhtovp,
106 ffs_vptofh,
107 ffs_init,
108 ffs_reinit,
109 ffs_done,
110 ffs_mountroot,
111 ffs_snapshot,
112 ffs_extattrctl,
113 ffs_suspendctl,
114 ffs_vnodeopv_descs,
115 0,
116 { NULL, NULL },
117 };
118 VFS_ATTACH(ffs_vfsops);
119
120 static const struct genfs_ops ffs_genfsops = {
121 .gop_size = ffs_gop_size,
122 .gop_alloc = ufs_gop_alloc,
123 .gop_write = genfs_gop_write,
124 .gop_markupdate = ufs_gop_markupdate,
125 };
126
127 static const struct ufs_ops ffs_ufsops = {
128 .uo_itimes = ffs_itimes,
129 .uo_update = ffs_update,
130 .uo_truncate = ffs_truncate,
131 .uo_valloc = ffs_valloc,
132 .uo_vfree = ffs_vfree,
133 .uo_balloc = ffs_balloc,
134 };
135
136 pool_cache_t ffs_inode_cache;
137 pool_cache_t ffs_dinode1_cache;
138 pool_cache_t ffs_dinode2_cache;
139
140 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
141 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
142
143 /*
144 * Called by main() when ffs is going to be mounted as root.
145 */
146
147 int
148 ffs_mountroot(void)
149 {
150 struct fs *fs;
151 struct mount *mp;
152 struct lwp *l = curlwp; /* XXX */
153 struct ufsmount *ump;
154 int error;
155
156 if (device_class(root_device) != DV_DISK)
157 return (ENODEV);
158
159 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
160 vrele(rootvp);
161 return (error);
162 }
163 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
164 mp->mnt_op->vfs_refcount--;
165 vfs_unbusy(mp);
166 vfs_destroy(mp);
167 return (error);
168 }
169 mutex_enter(&mountlist_lock);
170 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
171 mutex_exit(&mountlist_lock);
172 ump = VFSTOUFS(mp);
173 fs = ump->um_fs;
174 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
175 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
176 (void)ffs_statvfs(mp, &mp->mnt_stat);
177 vfs_unbusy(mp);
178 setrootfstime((time_t)fs->fs_time);
179 return (0);
180 }
181
182 /*
183 * VFS Operations.
184 *
185 * mount system call
186 */
187 int
188 ffs_mount(struct mount *mp, const char *path, void *data, size_t *data_len)
189 {
190 struct lwp *l = curlwp;
191 struct nameidata nd;
192 struct vnode *vp, *devvp = NULL;
193 struct ufs_args *args = data;
194 struct ufsmount *ump = NULL;
195 struct fs *fs;
196 int error = 0, flags, update;
197 mode_t accessmode;
198
199 if (*data_len < sizeof *args)
200 return EINVAL;
201
202 if (mp->mnt_flag & MNT_GETARGS) {
203 ump = VFSTOUFS(mp);
204 if (ump == NULL)
205 return EIO;
206 args->fspec = NULL;
207 *data_len = sizeof *args;
208 return 0;
209 }
210
211 #if !defined(SOFTDEP)
212 mp->mnt_flag &= ~MNT_SOFTDEP;
213 #endif
214
215 update = mp->mnt_flag & MNT_UPDATE;
216
217 /* Check arguments */
218 if (args->fspec != NULL) {
219 /*
220 * Look up the name and verify that it's sane.
221 */
222 NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, args->fspec);
223 if ((error = namei(&nd)) != 0)
224 return (error);
225 devvp = nd.ni_vp;
226
227 if (!update) {
228 /*
229 * Be sure this is a valid block device
230 */
231 if (devvp->v_type != VBLK)
232 error = ENOTBLK;
233 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
234 error = ENXIO;
235 } else {
236 /*
237 * Be sure we're still naming the same device
238 * used for our initial mount
239 */
240 ump = VFSTOUFS(mp);
241 if (devvp != ump->um_devvp) {
242 if (devvp->v_rdev != ump->um_devvp->v_rdev)
243 error = EINVAL;
244 else {
245 vrele(devvp);
246 devvp = ump->um_devvp;
247 vref(devvp);
248 }
249 }
250 }
251 } else {
252 if (!update) {
253 /* New mounts must have a filename for the device */
254 return (EINVAL);
255 } else {
256 /* Use the extant mount */
257 ump = VFSTOUFS(mp);
258 devvp = ump->um_devvp;
259 vref(devvp);
260 }
261 }
262
263 /*
264 * Mark the device and any existing vnodes as involved in
265 * softdep processing.
266 */
267 if ((mp->mnt_flag & MNT_SOFTDEP) != 0) {
268 devvp->v_uflag |= VU_SOFTDEP;
269 mutex_enter(&mntvnode_lock);
270 TAILQ_FOREACH(vp, &mp->mnt_vnodelist, v_mntvnodes) {
271 if (vp->v_mount != mp || vismarker(vp))
272 continue;
273 vp->v_uflag |= VU_SOFTDEP;
274 }
275 mutex_exit(&mntvnode_lock);
276 }
277
278 /*
279 * If mount by non-root, then verify that user has necessary
280 * permissions on the device.
281 */
282 if (error == 0 && kauth_authorize_generic(l->l_cred,
283 KAUTH_GENERIC_ISSUSER, NULL) != 0) {
284 accessmode = VREAD;
285 if (update ?
286 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
287 (mp->mnt_flag & MNT_RDONLY) == 0)
288 accessmode |= VWRITE;
289 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
290 error = VOP_ACCESS(devvp, accessmode, l->l_cred);
291 VOP_UNLOCK(devvp, 0);
292 }
293
294 if (error) {
295 vrele(devvp);
296 return (error);
297 }
298
299 if (!update) {
300 int xflags;
301
302 if (mp->mnt_flag & MNT_RDONLY)
303 xflags = FREAD;
304 else
305 xflags = FREAD|FWRITE;
306 error = VOP_OPEN(devvp, xflags, FSCRED);
307 if (error)
308 goto fail;
309 error = ffs_mountfs(devvp, mp, l);
310 if (error) {
311 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
312 (void)VOP_CLOSE(devvp, xflags, NOCRED);
313 VOP_UNLOCK(devvp, 0);
314 goto fail;
315 }
316
317 ump = VFSTOUFS(mp);
318 fs = ump->um_fs;
319 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
320 (MNT_SOFTDEP | MNT_ASYNC)) {
321 printf("%s fs uses soft updates, "
322 "ignoring async mode\n",
323 fs->fs_fsmnt);
324 mp->mnt_flag &= ~MNT_ASYNC;
325 }
326 } else {
327 /*
328 * Update the mount.
329 */
330
331 /*
332 * The initial mount got a reference on this
333 * device, so drop the one obtained via
334 * namei(), above.
335 */
336 vrele(devvp);
337
338 ump = VFSTOUFS(mp);
339 fs = ump->um_fs;
340 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
341 /*
342 * Changing from r/w to r/o
343 */
344 flags = WRITECLOSE;
345 if (mp->mnt_flag & MNT_FORCE)
346 flags |= FORCECLOSE;
347 if (mp->mnt_flag & MNT_SOFTDEP)
348 error = softdep_flushfiles(mp, flags, l);
349 else
350 error = ffs_flushfiles(mp, flags, l);
351 if (fs->fs_pendingblocks != 0 ||
352 fs->fs_pendinginodes != 0) {
353 printf("%s: update error: blocks %" PRId64
354 " files %d\n",
355 fs->fs_fsmnt, fs->fs_pendingblocks,
356 fs->fs_pendinginodes);
357 fs->fs_pendingblocks = 0;
358 fs->fs_pendinginodes = 0;
359 }
360 if (error == 0 &&
361 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
362 fs->fs_clean & FS_WASCLEAN) {
363 if (mp->mnt_flag & MNT_SOFTDEP)
364 fs->fs_flags &= ~FS_DOSOFTDEP;
365 fs->fs_clean = FS_ISCLEAN;
366 (void) ffs_sbupdate(ump, MNT_WAIT);
367 }
368 if (error)
369 return (error);
370 fs->fs_ronly = 1;
371 fs->fs_fmod = 0;
372 }
373
374 /*
375 * Flush soft dependencies if disabling it via an update
376 * mount. This may leave some items to be processed,
377 * so don't do this yet XXX.
378 */
379 if ((fs->fs_flags & FS_DOSOFTDEP) &&
380 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
381 #ifdef notyet
382 flags = WRITECLOSE;
383 if (mp->mnt_flag & MNT_FORCE)
384 flags |= FORCECLOSE;
385 error = softdep_flushfiles(mp, flags, l);
386 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
387 fs->fs_flags &= ~FS_DOSOFTDEP;
388 (void) ffs_sbupdate(ump, MNT_WAIT);
389 #elif defined(SOFTDEP)
390 mp->mnt_flag |= MNT_SOFTDEP;
391 #endif
392 }
393
394 /*
395 * When upgrading to a softdep mount, we must first flush
396 * all vnodes. (not done yet -- see above)
397 */
398 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
399 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
400 #ifdef notyet
401 flags = WRITECLOSE;
402 if (mp->mnt_flag & MNT_FORCE)
403 flags |= FORCECLOSE;
404 error = ffs_flushfiles(mp, flags, l);
405 #else
406 mp->mnt_flag &= ~MNT_SOFTDEP;
407 #endif
408 }
409
410 if (mp->mnt_flag & MNT_RELOAD) {
411 error = ffs_reload(mp, l->l_cred, l);
412 if (error)
413 return (error);
414 }
415
416 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
417 /*
418 * Changing from read-only to read/write
419 */
420 fs->fs_ronly = 0;
421 fs->fs_clean <<= 1;
422 fs->fs_fmod = 1;
423 if ((fs->fs_flags & FS_DOSOFTDEP)) {
424 error = softdep_mount(devvp, mp, fs,
425 l->l_cred);
426 if (error)
427 return (error);
428 }
429 if (fs->fs_snapinum[0] != 0)
430 ffs_snapshot_mount(mp);
431 }
432 if (args->fspec == NULL)
433 return EINVAL;
434 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
435 (MNT_SOFTDEP | MNT_ASYNC)) {
436 printf("%s fs uses soft updates, ignoring async mode\n",
437 fs->fs_fsmnt);
438 mp->mnt_flag &= ~MNT_ASYNC;
439 }
440 }
441
442 error = set_statvfs_info(path, UIO_USERSPACE, args->fspec,
443 UIO_USERSPACE, mp->mnt_op->vfs_name, mp, l);
444 if (error == 0)
445 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
446 sizeof(fs->fs_fsmnt));
447 if (mp->mnt_flag & MNT_SOFTDEP)
448 fs->fs_flags |= FS_DOSOFTDEP;
449 else
450 fs->fs_flags &= ~FS_DOSOFTDEP;
451 if (fs->fs_fmod != 0) { /* XXX */
452 fs->fs_fmod = 0;
453 if (fs->fs_clean & FS_WASCLEAN)
454 fs->fs_time = time_second;
455 else {
456 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
457 mp->mnt_stat.f_mntfromname, fs->fs_clean);
458 printf("%s: lost blocks %" PRId64 " files %d\n",
459 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
460 fs->fs_pendinginodes);
461 }
462 (void) ffs_cgupdate(ump, MNT_WAIT);
463 }
464 return (error);
465
466 fail:
467 vrele(devvp);
468 return (error);
469 }
470
471 /*
472 * Reload all incore data for a filesystem (used after running fsck on
473 * the root filesystem and finding things to fix). The filesystem must
474 * be mounted read-only.
475 *
476 * Things to do to update the mount:
477 * 1) invalidate all cached meta-data.
478 * 2) re-read superblock from disk.
479 * 3) re-read summary information from disk.
480 * 4) invalidate all inactive vnodes.
481 * 5) invalidate all cached file data.
482 * 6) re-read inode data for all active vnodes.
483 */
484 int
485 ffs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l)
486 {
487 struct vnode *vp, *mvp, *devvp;
488 struct inode *ip;
489 void *space;
490 struct buf *bp;
491 struct fs *fs, *newfs;
492 struct partinfo dpart;
493 int i, blks, size, error;
494 int32_t *lp;
495 struct ufsmount *ump;
496 daddr_t sblockloc;
497
498 if ((mp->mnt_flag & MNT_RDONLY) == 0)
499 return (EINVAL);
500
501 ump = VFSTOUFS(mp);
502 /*
503 * Step 1: invalidate all cached meta-data.
504 */
505 devvp = ump->um_devvp;
506 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
507 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
508 VOP_UNLOCK(devvp, 0);
509 if (error)
510 panic("ffs_reload: dirty1");
511 /*
512 * Step 2: re-read superblock from disk.
513 */
514 fs = ump->um_fs;
515 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED) != 0)
516 size = DEV_BSIZE;
517 else
518 size = dpart.disklab->d_secsize;
519 /* XXX we don't handle possibility that superblock moved. */
520 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
521 NOCRED, &bp);
522 if (error) {
523 brelse(bp, 0);
524 return (error);
525 }
526 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
527 memcpy(newfs, bp->b_data, fs->fs_sbsize);
528 #ifdef FFS_EI
529 if (ump->um_flags & UFS_NEEDSWAP) {
530 ffs_sb_swap((struct fs*)bp->b_data, newfs);
531 fs->fs_flags |= FS_SWAPPED;
532 } else
533 #endif
534 fs->fs_flags &= ~FS_SWAPPED;
535 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
536 newfs->fs_magic != FS_UFS2_MAGIC)||
537 newfs->fs_bsize > MAXBSIZE ||
538 newfs->fs_bsize < sizeof(struct fs)) {
539 brelse(bp, 0);
540 free(newfs, M_UFSMNT);
541 return (EIO); /* XXX needs translation */
542 }
543 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
544 sblockloc = fs->fs_sblockloc;
545 /*
546 * Copy pointer fields back into superblock before copying in XXX
547 * new superblock. These should really be in the ufsmount. XXX
548 * Note that important parameters (eg fs_ncg) are unchanged.
549 */
550 newfs->fs_csp = fs->fs_csp;
551 newfs->fs_maxcluster = fs->fs_maxcluster;
552 newfs->fs_contigdirs = fs->fs_contigdirs;
553 newfs->fs_ronly = fs->fs_ronly;
554 newfs->fs_active = fs->fs_active;
555 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
556 brelse(bp, 0);
557 free(newfs, M_UFSMNT);
558
559 /* Recheck for apple UFS filesystem */
560 ump->um_flags &= ~UFS_ISAPPLEUFS;
561 /* First check to see if this is tagged as an Apple UFS filesystem
562 * in the disklabel
563 */
564 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
565 (dpart.part->p_fstype == FS_APPLEUFS)) {
566 ump->um_flags |= UFS_ISAPPLEUFS;
567 }
568 #ifdef APPLE_UFS
569 else {
570 /* Manually look for an apple ufs label, and if a valid one
571 * is found, then treat it like an Apple UFS filesystem anyway
572 */
573 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
574 APPLEUFS_LABEL_SIZE, cred, &bp);
575 if (error) {
576 brelse(bp, 0);
577 return (error);
578 }
579 error = ffs_appleufs_validate(fs->fs_fsmnt,
580 (struct appleufslabel *)bp->b_data,NULL);
581 if (error == 0)
582 ump->um_flags |= UFS_ISAPPLEUFS;
583 brelse(bp, 0);
584 bp = NULL;
585 }
586 #else
587 if (ump->um_flags & UFS_ISAPPLEUFS)
588 return (EIO);
589 #endif
590
591 if (UFS_MPISAPPLEUFS(ump)) {
592 /* see comment about NeXT below */
593 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
594 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
595 mp->mnt_iflag |= IMNT_DTYPE;
596 } else {
597 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
598 ump->um_dirblksiz = DIRBLKSIZ;
599 if (ump->um_maxsymlinklen > 0)
600 mp->mnt_iflag |= IMNT_DTYPE;
601 else
602 mp->mnt_iflag &= ~IMNT_DTYPE;
603 }
604 ffs_oldfscompat_read(fs, ump, sblockloc);
605 mutex_enter(&ump->um_lock);
606 ump->um_maxfilesize = fs->fs_maxfilesize;
607 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
608 fs->fs_pendingblocks = 0;
609 fs->fs_pendinginodes = 0;
610 }
611 mutex_exit(&ump->um_lock);
612
613 ffs_statvfs(mp, &mp->mnt_stat);
614 /*
615 * Step 3: re-read summary information from disk.
616 */
617 blks = howmany(fs->fs_cssize, fs->fs_fsize);
618 space = fs->fs_csp;
619 for (i = 0; i < blks; i += fs->fs_frag) {
620 size = fs->fs_bsize;
621 if (i + fs->fs_frag > blks)
622 size = (blks - i) * fs->fs_fsize;
623 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
624 NOCRED, &bp);
625 if (error) {
626 brelse(bp, 0);
627 return (error);
628 }
629 #ifdef FFS_EI
630 if (UFS_FSNEEDSWAP(fs))
631 ffs_csum_swap((struct csum *)bp->b_data,
632 (struct csum *)space, size);
633 else
634 #endif
635 memcpy(space, bp->b_data, (size_t)size);
636 space = (char *)space + size;
637 brelse(bp, 0);
638 }
639 if ((fs->fs_flags & FS_DOSOFTDEP))
640 softdep_mount(devvp, mp, fs, cred);
641 if (fs->fs_snapinum[0] != 0)
642 ffs_snapshot_mount(mp);
643 /*
644 * We no longer know anything about clusters per cylinder group.
645 */
646 if (fs->fs_contigsumsize > 0) {
647 lp = fs->fs_maxcluster;
648 for (i = 0; i < fs->fs_ncg; i++)
649 *lp++ = fs->fs_contigsumsize;
650 }
651
652 /* Allocate a marker vnode. */
653 if ((mvp = vnalloc(mp)) == NULL)
654 return ENOMEM;
655 /*
656 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
657 * and vclean() can be called indirectly
658 */
659 mutex_enter(&mntvnode_lock);
660 loop:
661 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
662 vmark(mvp, vp);
663 if (vp->v_mount != mp || vismarker(vp))
664 continue;
665 /*
666 * Step 4: invalidate all inactive vnodes.
667 */
668 if (vrecycle(vp, &mntvnode_lock, l)) {
669 mutex_enter(&mntvnode_lock);
670 (void)vunmark(mvp);
671 goto loop;
672 }
673 /*
674 * Step 5: invalidate all cached file data.
675 */
676 mutex_enter(&vp->v_interlock);
677 mutex_exit(&mntvnode_lock);
678 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) {
679 (void)vunmark(mvp);
680 goto loop;
681 }
682 if (vinvalbuf(vp, 0, cred, l, 0, 0))
683 panic("ffs_reload: dirty2");
684 /*
685 * Step 6: re-read inode data for all active vnodes.
686 */
687 ip = VTOI(vp);
688 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
689 (int)fs->fs_bsize, NOCRED, &bp);
690 if (error) {
691 brelse(bp, 0);
692 vput(vp);
693 (void)vunmark(mvp);
694 break;
695 }
696 ffs_load_inode(bp, ip, fs, ip->i_number);
697 ip->i_ffs_effnlink = ip->i_nlink;
698 brelse(bp, 0);
699 vput(vp);
700 mutex_enter(&mntvnode_lock);
701 }
702 mutex_exit(&mntvnode_lock);
703 vnfree(mvp);
704 return (error);
705 }
706
707 /*
708 * Possible superblock locations ordered from most to least likely.
709 */
710 static const int sblock_try[] = SBLOCKSEARCH;
711
712 /*
713 * Common code for mount and mountroot
714 */
715 int
716 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct lwp *l)
717 {
718 struct ufsmount *ump;
719 struct buf *bp;
720 struct fs *fs;
721 dev_t dev;
722 struct partinfo dpart;
723 void *space;
724 daddr_t sblockloc, fsblockloc;
725 int blks, fstype;
726 int error, i, size, ronly, bset = 0;
727 #ifdef FFS_EI
728 int needswap = 0; /* keep gcc happy */
729 #endif
730 int32_t *lp;
731 kauth_cred_t cred;
732 u_int32_t sbsize = 8192; /* keep gcc happy*/
733
734 dev = devvp->v_rdev;
735 cred = l ? l->l_cred : NOCRED;
736
737 /* Flush out any old buffers remaining from a previous use. */
738 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
739 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
740 VOP_UNLOCK(devvp, 0);
741 if (error)
742 return (error);
743
744 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
745 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) != 0)
746 size = DEV_BSIZE;
747 else
748 size = dpart.disklab->d_secsize;
749
750 bp = NULL;
751 ump = NULL;
752 fs = NULL;
753 sblockloc = 0;
754 fstype = 0;
755
756 error = fstrans_mount(mp);
757 if (error)
758 return error;
759
760 /*
761 * Try reading the superblock in each of its possible locations.
762 */
763 for (i = 0; ; i++) {
764 if (bp != NULL) {
765 brelse(bp, BC_NOCACHE);
766 bp = NULL;
767 }
768 if (sblock_try[i] == -1) {
769 error = EINVAL;
770 fs = NULL;
771 goto out;
772 }
773 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
774 &bp);
775 if (error) {
776 fs = NULL;
777 goto out;
778 }
779 fs = (struct fs*)bp->b_data;
780 fsblockloc = sblockloc = sblock_try[i];
781 if (fs->fs_magic == FS_UFS1_MAGIC) {
782 sbsize = fs->fs_sbsize;
783 fstype = UFS1;
784 #ifdef FFS_EI
785 needswap = 0;
786 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
787 sbsize = bswap32(fs->fs_sbsize);
788 fstype = UFS1;
789 needswap = 1;
790 #endif
791 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
792 sbsize = fs->fs_sbsize;
793 fstype = UFS2;
794 #ifdef FFS_EI
795 needswap = 0;
796 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
797 sbsize = bswap32(fs->fs_sbsize);
798 fstype = UFS2;
799 needswap = 1;
800 #endif
801 } else
802 continue;
803
804
805 /* fs->fs_sblockloc isn't defined for old filesystems */
806 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
807 if (sblockloc == SBLOCK_UFS2)
808 /*
809 * This is likely to be the first alternate
810 * in a filesystem with 64k blocks.
811 * Don't use it.
812 */
813 continue;
814 fsblockloc = sblockloc;
815 } else {
816 fsblockloc = fs->fs_sblockloc;
817 #ifdef FFS_EI
818 if (needswap)
819 fsblockloc = bswap64(fsblockloc);
820 #endif
821 }
822
823 /* Check we haven't found an alternate superblock */
824 if (fsblockloc != sblockloc)
825 continue;
826
827 /* Validate size of superblock */
828 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
829 continue;
830
831 /* Ok seems to be a good superblock */
832 break;
833 }
834
835 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
836 memcpy(fs, bp->b_data, sbsize);
837
838 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
839 memset(ump, 0, sizeof *ump);
840 mutex_init(&ump->um_lock, MUTEX_DEFAULT, IPL_NONE);
841 ump->um_fs = fs;
842 ump->um_ops = &ffs_ufsops;
843
844 #ifdef FFS_EI
845 if (needswap) {
846 ffs_sb_swap((struct fs*)bp->b_data, fs);
847 fs->fs_flags |= FS_SWAPPED;
848 } else
849 #endif
850 fs->fs_flags &= ~FS_SWAPPED;
851
852 ffs_oldfscompat_read(fs, ump, sblockloc);
853 ump->um_maxfilesize = fs->fs_maxfilesize;
854
855 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
856 fs->fs_pendingblocks = 0;
857 fs->fs_pendinginodes = 0;
858 }
859
860 ump->um_fstype = fstype;
861 if (fs->fs_sbsize < SBLOCKSIZE)
862 brelse(bp, BC_INVAL);
863 else
864 brelse(bp, 0);
865 bp = NULL;
866
867 /* First check to see if this is tagged as an Apple UFS filesystem
868 * in the disklabel
869 */
870 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred) == 0) &&
871 (dpart.part->p_fstype == FS_APPLEUFS)) {
872 ump->um_flags |= UFS_ISAPPLEUFS;
873 }
874 #ifdef APPLE_UFS
875 else {
876 /* Manually look for an apple ufs label, and if a valid one
877 * is found, then treat it like an Apple UFS filesystem anyway
878 */
879 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
880 APPLEUFS_LABEL_SIZE, cred, &bp);
881 if (error)
882 goto out;
883 error = ffs_appleufs_validate(fs->fs_fsmnt,
884 (struct appleufslabel *)bp->b_data,NULL);
885 if (error == 0) {
886 ump->um_flags |= UFS_ISAPPLEUFS;
887 }
888 brelse(bp, 0);
889 bp = NULL;
890 }
891 #else
892 if (ump->um_flags & UFS_ISAPPLEUFS) {
893 error = EINVAL;
894 goto out;
895 }
896 #endif
897
898 /*
899 * verify that we can access the last block in the fs
900 * if we're mounting read/write.
901 */
902
903 if (!ronly) {
904 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
905 cred, &bp);
906 if (bp->b_bcount != fs->fs_fsize)
907 error = EINVAL;
908 if (error) {
909 bset = BC_INVAL;
910 goto out;
911 }
912 brelse(bp, BC_INVAL);
913 bp = NULL;
914 }
915
916 fs->fs_ronly = ronly;
917 if (ronly == 0) {
918 fs->fs_clean <<= 1;
919 fs->fs_fmod = 1;
920 }
921 size = fs->fs_cssize;
922 blks = howmany(size, fs->fs_fsize);
923 if (fs->fs_contigsumsize > 0)
924 size += fs->fs_ncg * sizeof(int32_t);
925 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
926 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
927 fs->fs_csp = space;
928 for (i = 0; i < blks; i += fs->fs_frag) {
929 size = fs->fs_bsize;
930 if (i + fs->fs_frag > blks)
931 size = (blks - i) * fs->fs_fsize;
932 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
933 cred, &bp);
934 if (error) {
935 free(fs->fs_csp, M_UFSMNT);
936 goto out;
937 }
938 #ifdef FFS_EI
939 if (needswap)
940 ffs_csum_swap((struct csum *)bp->b_data,
941 (struct csum *)space, size);
942 else
943 #endif
944 memcpy(space, bp->b_data, (u_int)size);
945
946 space = (char *)space + size;
947 brelse(bp, 0);
948 bp = NULL;
949 }
950 if (fs->fs_contigsumsize > 0) {
951 fs->fs_maxcluster = lp = space;
952 for (i = 0; i < fs->fs_ncg; i++)
953 *lp++ = fs->fs_contigsumsize;
954 space = lp;
955 }
956 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
957 fs->fs_contigdirs = space;
958 space = (char *)space + size;
959 memset(fs->fs_contigdirs, 0, size);
960 /* Compatibility for old filesystems - XXX */
961 if (fs->fs_avgfilesize <= 0)
962 fs->fs_avgfilesize = AVFILESIZ;
963 if (fs->fs_avgfpdir <= 0)
964 fs->fs_avgfpdir = AFPDIR;
965 fs->fs_active = NULL;
966 mp->mnt_data = ump;
967 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
968 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
969 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
970 mp->mnt_stat.f_namemax = FFS_MAXNAMLEN;
971 if (UFS_MPISAPPLEUFS(ump)) {
972 /* NeXT used to keep short symlinks in the inode even
973 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
974 * is probably -1, but we still need to be able to identify
975 * short symlinks.
976 */
977 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
978 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
979 mp->mnt_iflag |= IMNT_DTYPE;
980 } else {
981 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
982 ump->um_dirblksiz = DIRBLKSIZ;
983 if (ump->um_maxsymlinklen > 0)
984 mp->mnt_iflag |= IMNT_DTYPE;
985 else
986 mp->mnt_iflag &= ~IMNT_DTYPE;
987 }
988 mp->mnt_fs_bshift = fs->fs_bshift;
989 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
990 mp->mnt_flag |= MNT_LOCAL;
991 mp->mnt_iflag |= IMNT_MPSAFE;
992 #ifdef FFS_EI
993 if (needswap)
994 ump->um_flags |= UFS_NEEDSWAP;
995 #endif
996 ump->um_mountp = mp;
997 ump->um_dev = dev;
998 ump->um_devvp = devvp;
999 ump->um_nindir = fs->fs_nindir;
1000 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
1001 ump->um_bptrtodb = fs->fs_fsbtodb;
1002 ump->um_seqinc = fs->fs_frag;
1003 for (i = 0; i < MAXQUOTAS; i++)
1004 ump->um_quotas[i] = NULLVP;
1005 devvp->v_specmountpoint = mp;
1006 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
1007 error = softdep_mount(devvp, mp, fs, cred);
1008 if (error) {
1009 free(fs->fs_csp, M_UFSMNT);
1010 goto out;
1011 }
1012 }
1013 if (ronly == 0 && fs->fs_snapinum[0] != 0)
1014 ffs_snapshot_mount(mp);
1015 #ifdef UFS_EXTATTR
1016 /*
1017 * Initialize file-backed extended attributes on UFS1 file
1018 * systems.
1019 */
1020 if (ump->um_fstype == UFS1) {
1021 ufs_extattr_uepm_init(&ump->um_extattr);
1022 #ifdef UFS_EXTATTR_AUTOSTART
1023 /*
1024 * XXX Just ignore errors. Not clear that we should
1025 * XXX fail the mount in this case.
1026 */
1027 (void) ufs_extattr_autostart(mp, l);
1028 #endif
1029 }
1030 #endif /* UFS_EXTATTR */
1031 return (0);
1032 out:
1033 fstrans_unmount(mp);
1034 if (fs)
1035 free(fs, M_UFSMNT);
1036 devvp->v_specmountpoint = NULL;
1037 if (bp)
1038 brelse(bp, bset);
1039 if (ump) {
1040 if (ump->um_oldfscompat)
1041 free(ump->um_oldfscompat, M_UFSMNT);
1042 mutex_destroy(&ump->um_lock);
1043 free(ump, M_UFSMNT);
1044 mp->mnt_data = NULL;
1045 }
1046 return (error);
1047 }
1048
1049 /*
1050 * Sanity checks for loading old filesystem superblocks.
1051 * See ffs_oldfscompat_write below for unwound actions.
1052 *
1053 * XXX - Parts get retired eventually.
1054 * Unfortunately new bits get added.
1055 */
1056 static void
1057 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1058 {
1059 off_t maxfilesize;
1060 int32_t *extrasave;
1061
1062 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1063 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1064 return;
1065
1066 if (!ump->um_oldfscompat)
1067 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1068 M_UFSMNT, M_WAITOK);
1069
1070 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1071 extrasave = ump->um_oldfscompat;
1072 extrasave += 512/sizeof(int32_t);
1073 extrasave[0] = fs->fs_old_npsect;
1074 extrasave[1] = fs->fs_old_interleave;
1075 extrasave[2] = fs->fs_old_trackskew;
1076
1077 /* These fields will be overwritten by their
1078 * original values in fs_oldfscompat_write, so it is harmless
1079 * to modify them here.
1080 */
1081 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1082 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1083 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1084 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1085
1086 fs->fs_maxbsize = fs->fs_bsize;
1087 fs->fs_time = fs->fs_old_time;
1088 fs->fs_size = fs->fs_old_size;
1089 fs->fs_dsize = fs->fs_old_dsize;
1090 fs->fs_csaddr = fs->fs_old_csaddr;
1091 fs->fs_sblockloc = sblockloc;
1092
1093 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1094
1095 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1096 fs->fs_old_nrpos = 8;
1097 fs->fs_old_npsect = fs->fs_old_nsect;
1098 fs->fs_old_interleave = 1;
1099 fs->fs_old_trackskew = 0;
1100 }
1101
1102 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1103 fs->fs_maxfilesize = (u_quad_t) 1LL << 39;
1104 fs->fs_qbmask = ~fs->fs_bmask;
1105 fs->fs_qfmask = ~fs->fs_fmask;
1106 }
1107
1108 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1109 if (fs->fs_maxfilesize > maxfilesize)
1110 fs->fs_maxfilesize = maxfilesize;
1111
1112 /* Compatibility for old filesystems */
1113 if (fs->fs_avgfilesize <= 0)
1114 fs->fs_avgfilesize = AVFILESIZ;
1115 if (fs->fs_avgfpdir <= 0)
1116 fs->fs_avgfpdir = AFPDIR;
1117
1118 #if 0
1119 if (bigcgs) {
1120 fs->fs_save_cgsize = fs->fs_cgsize;
1121 fs->fs_cgsize = fs->fs_bsize;
1122 }
1123 #endif
1124 }
1125
1126 /*
1127 * Unwinding superblock updates for old filesystems.
1128 * See ffs_oldfscompat_read above for details.
1129 *
1130 * XXX - Parts get retired eventually.
1131 * Unfortunately new bits get added.
1132 */
1133 static void
1134 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1135 {
1136 int32_t *extrasave;
1137
1138 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1139 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1140 return;
1141
1142 fs->fs_old_time = fs->fs_time;
1143 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1144 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1145 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1146 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1147 fs->fs_old_flags = fs->fs_flags;
1148
1149 #if 0
1150 if (bigcgs) {
1151 fs->fs_cgsize = fs->fs_save_cgsize;
1152 }
1153 #endif
1154
1155 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1156 extrasave = ump->um_oldfscompat;
1157 extrasave += 512/sizeof(int32_t);
1158 fs->fs_old_npsect = extrasave[0];
1159 fs->fs_old_interleave = extrasave[1];
1160 fs->fs_old_trackskew = extrasave[2];
1161
1162 }
1163
1164 /*
1165 * unmount system call
1166 */
1167 int
1168 ffs_unmount(struct mount *mp, int mntflags)
1169 {
1170 struct lwp *l = curlwp;
1171 struct ufsmount *ump = VFSTOUFS(mp);
1172 struct fs *fs = ump->um_fs;
1173 int error, flags, penderr;
1174
1175 penderr = 0;
1176 flags = 0;
1177 if (mntflags & MNT_FORCE)
1178 flags |= FORCECLOSE;
1179 #ifdef UFS_EXTATTR
1180 if (ump->um_fstype == UFS1) {
1181 ufs_extattr_stop(mp, l);
1182 ufs_extattr_uepm_destroy(&ump->um_extattr);
1183 }
1184 #endif /* UFS_EXTATTR */
1185 if (mp->mnt_flag & MNT_SOFTDEP) {
1186 if ((error = softdep_flushfiles(mp, flags, l)) != 0)
1187 return (error);
1188 } else {
1189 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1190 return (error);
1191 }
1192 mutex_enter(&ump->um_lock);
1193 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1194 printf("%s: unmount pending error: blocks %" PRId64
1195 " files %d\n",
1196 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1197 fs->fs_pendingblocks = 0;
1198 fs->fs_pendinginodes = 0;
1199 penderr = 1;
1200 }
1201 mutex_exit(&ump->um_lock);
1202 if (fs->fs_ronly == 0 &&
1203 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1204 fs->fs_clean & FS_WASCLEAN) {
1205 /*
1206 * XXXX don't mark fs clean in the case of softdep
1207 * pending block errors, until they are fixed.
1208 */
1209 if (penderr == 0) {
1210 if (mp->mnt_flag & MNT_SOFTDEP)
1211 fs->fs_flags &= ~FS_DOSOFTDEP;
1212 fs->fs_clean = FS_ISCLEAN;
1213 }
1214 fs->fs_fmod = 0;
1215 (void) ffs_sbupdate(ump, MNT_WAIT);
1216 }
1217 if (ump->um_devvp->v_type != VBAD)
1218 ump->um_devvp->v_specmountpoint = NULL;
1219 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1220 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
1221 NOCRED);
1222 vput(ump->um_devvp);
1223 free(fs->fs_csp, M_UFSMNT);
1224 free(fs, M_UFSMNT);
1225 if (ump->um_oldfscompat != NULL)
1226 free(ump->um_oldfscompat, M_UFSMNT);
1227 softdep_unmount(mp);
1228 mutex_destroy(&ump->um_lock);
1229 free(ump, M_UFSMNT);
1230 mp->mnt_data = NULL;
1231 mp->mnt_flag &= ~MNT_LOCAL;
1232 fstrans_unmount(mp);
1233 return (0);
1234 }
1235
1236 /*
1237 * Flush out all the files in a filesystem.
1238 */
1239 int
1240 ffs_flushfiles(struct mount *mp, int flags, struct lwp *l)
1241 {
1242 extern int doforce;
1243 struct ufsmount *ump;
1244 int error;
1245
1246 if (!doforce)
1247 flags &= ~FORCECLOSE;
1248 ump = VFSTOUFS(mp);
1249 #ifdef QUOTA
1250 if (mp->mnt_flag & MNT_QUOTA) {
1251 int i;
1252 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
1253 return (error);
1254 for (i = 0; i < MAXQUOTAS; i++) {
1255 if (ump->um_quotas[i] == NULLVP)
1256 continue;
1257 quotaoff(l, mp, i);
1258 }
1259 /*
1260 * Here we fall through to vflush again to ensure
1261 * that we have gotten rid of all the system vnodes.
1262 */
1263 }
1264 #endif
1265 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1266 return (error);
1267 ffs_snapshot_unmount(mp);
1268 /*
1269 * Flush all the files.
1270 */
1271 error = vflush(mp, NULLVP, flags);
1272 if (error)
1273 return (error);
1274 /*
1275 * Flush filesystem metadata.
1276 */
1277 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1278 error = VOP_FSYNC(ump->um_devvp, l->l_cred, FSYNC_WAIT, 0, 0);
1279 VOP_UNLOCK(ump->um_devvp, 0);
1280 return (error);
1281 }
1282
1283 /*
1284 * Get file system statistics.
1285 */
1286 int
1287 ffs_statvfs(struct mount *mp, struct statvfs *sbp)
1288 {
1289 struct ufsmount *ump;
1290 struct fs *fs;
1291
1292 ump = VFSTOUFS(mp);
1293 fs = ump->um_fs;
1294 mutex_enter(&ump->um_lock);
1295 sbp->f_bsize = fs->fs_bsize;
1296 sbp->f_frsize = fs->fs_fsize;
1297 sbp->f_iosize = fs->fs_bsize;
1298 sbp->f_blocks = fs->fs_dsize;
1299 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1300 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1301 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1302 fs->fs_minfree) / (u_int64_t) 100;
1303 if (sbp->f_bfree > sbp->f_bresvd)
1304 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1305 else
1306 sbp->f_bavail = 0;
1307 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1308 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1309 sbp->f_favail = sbp->f_ffree;
1310 sbp->f_fresvd = 0;
1311 mutex_exit(&ump->um_lock);
1312 copy_statvfs_info(sbp, mp);
1313
1314 return (0);
1315 }
1316
1317 /*
1318 * Go through the disk queues to initiate sandbagged IO;
1319 * go through the inodes to write those that have been modified;
1320 * initiate the writing of the super block if it has been modified.
1321 *
1322 * Note: we are always called with the filesystem marked `MPBUSY'.
1323 */
1324 int
1325 ffs_sync(struct mount *mp, int waitfor, kauth_cred_t cred)
1326 {
1327 struct lwp *l = curlwp;
1328 struct vnode *vp, *mvp;
1329 struct inode *ip;
1330 struct ufsmount *ump = VFSTOUFS(mp);
1331 struct fs *fs;
1332 int error, count, allerror = 0;
1333
1334 fs = ump->um_fs;
1335 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1336 printf("fs = %s\n", fs->fs_fsmnt);
1337 panic("update: rofs mod");
1338 }
1339
1340 /* Allocate a marker vnode. */
1341 if ((mvp = vnalloc(mp)) == NULL)
1342 return (ENOMEM);
1343
1344 fstrans_start(mp, FSTRANS_SHARED);
1345 /*
1346 * Write back each (modified) inode.
1347 */
1348 mutex_enter(&mntvnode_lock);
1349 loop:
1350 /*
1351 * NOTE: not using the TAILQ_FOREACH here since in this loop vgone()
1352 * and vclean() can be called indirectly
1353 */
1354 for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) {
1355 vmark(mvp, vp);
1356 /*
1357 * If the vnode that we are about to sync is no longer
1358 * associated with this mount point, start over.
1359 */
1360 if (vp->v_mount != mp || vismarker(vp))
1361 continue;
1362 mutex_enter(&vp->v_interlock);
1363 ip = VTOI(vp);
1364 if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 ||
1365 vp->v_type == VNON || ((ip->i_flag &
1366 (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
1367 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1368 UVM_OBJ_IS_CLEAN(&vp->v_uobj)))
1369 {
1370 mutex_exit(&vp->v_interlock);
1371 continue;
1372 }
1373 if (vp->v_type == VBLK &&
1374 fstrans_getstate(mp) == FSTRANS_SUSPENDING) {
1375 mutex_exit(&vp->v_interlock);
1376 continue;
1377 }
1378 mutex_exit(&mntvnode_lock);
1379 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1380 if (error) {
1381 mutex_enter(&mntvnode_lock);
1382 if (error == ENOENT) {
1383 (void)vunmark(mvp);
1384 goto loop;
1385 }
1386 continue;
1387 }
1388 if (vp->v_type == VREG && waitfor == MNT_LAZY)
1389 error = ffs_update(vp, NULL, NULL, 0);
1390 else
1391 error = VOP_FSYNC(vp, cred,
1392 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0);
1393 if (error)
1394 allerror = error;
1395 vput(vp);
1396 mutex_enter(&mntvnode_lock);
1397 }
1398 mutex_exit(&mntvnode_lock);
1399 /*
1400 * Force stale file system control information to be flushed.
1401 */
1402 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1403 if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
1404 allerror = error;
1405 /* Flushed work items may create new vnodes to clean */
1406 if (allerror == 0 && count) {
1407 mutex_enter(&mntvnode_lock);
1408 goto loop;
1409 }
1410 }
1411 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1412 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1413 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1414 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1415 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0)
1416 allerror = error;
1417 VOP_UNLOCK(ump->um_devvp, 0);
1418 if (allerror == 0 && waitfor == MNT_WAIT) {
1419 mutex_enter(&mntvnode_lock);
1420 goto loop;
1421 }
1422 }
1423 #ifdef QUOTA
1424 qsync(mp);
1425 #endif
1426 /*
1427 * Write back modified superblock.
1428 */
1429 if (fs->fs_fmod != 0) {
1430 fs->fs_fmod = 0;
1431 fs->fs_time = time_second;
1432 if ((error = ffs_cgupdate(ump, waitfor)))
1433 allerror = error;
1434 }
1435 fstrans_done(mp);
1436 vnfree(mvp);
1437 return (allerror);
1438 }
1439
1440 /*
1441 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1442 * in from disk. If it is in core, wait for the lock bit to clear, then
1443 * return the inode locked. Detection and handling of mount points must be
1444 * done by the calling routine.
1445 */
1446 int
1447 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1448 {
1449 struct fs *fs;
1450 struct inode *ip;
1451 struct ufsmount *ump;
1452 struct buf *bp;
1453 struct vnode *vp;
1454 dev_t dev;
1455 int error;
1456
1457 ump = VFSTOUFS(mp);
1458 dev = ump->um_dev;
1459
1460 retry:
1461 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1462 return (0);
1463
1464 /* Allocate a new vnode/inode. */
1465 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1466 *vpp = NULL;
1467 return (error);
1468 }
1469 ip = pool_cache_get(ffs_inode_cache, PR_WAITOK);
1470
1471 /*
1472 * If someone beat us to it, put back the freshly allocated
1473 * vnode/inode pair and retry.
1474 */
1475 mutex_enter(&ufs_hashlock);
1476 if (ufs_ihashget(dev, ino, 0) != NULL) {
1477 mutex_exit(&ufs_hashlock);
1478 ungetnewvnode(vp);
1479 pool_cache_put(ffs_inode_cache, ip);
1480 goto retry;
1481 }
1482
1483 vp->v_vflag |= VV_LOCKSWORK;
1484 if ((mp->mnt_flag & MNT_SOFTDEP) != 0)
1485 vp->v_uflag |= VU_SOFTDEP;
1486
1487 /*
1488 * XXX MFS ends up here, too, to allocate an inode. Should we
1489 * XXX create another pool for MFS inodes?
1490 */
1491
1492 memset(ip, 0, sizeof(struct inode));
1493 vp->v_data = ip;
1494 ip->i_vnode = vp;
1495 ip->i_ump = ump;
1496 ip->i_fs = fs = ump->um_fs;
1497 ip->i_dev = dev;
1498 ip->i_number = ino;
1499 LIST_INIT(&ip->i_pcbufhd);
1500 #ifdef QUOTA
1501 ufsquota_init(ip);
1502 #endif
1503
1504 /*
1505 * Initialize genfs node, we might proceed to destroy it in
1506 * error branches.
1507 */
1508 genfs_node_init(vp, &ffs_genfsops);
1509
1510 /*
1511 * Put it onto its hash chain and lock it so that other requests for
1512 * this inode will block if they arrive while we are sleeping waiting
1513 * for old data structures to be purged or for the contents of the
1514 * disk portion of this inode to be read.
1515 */
1516
1517 ufs_ihashins(ip);
1518 mutex_exit(&ufs_hashlock);
1519
1520 /* Read in the disk contents for the inode, copy into the inode. */
1521 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1522 (int)fs->fs_bsize, NOCRED, &bp);
1523 if (error) {
1524
1525 /*
1526 * The inode does not contain anything useful, so it would
1527 * be misleading to leave it on its hash chain. With mode
1528 * still zero, it will be unlinked and returned to the free
1529 * list by vput().
1530 */
1531
1532 vput(vp);
1533 brelse(bp, 0);
1534 *vpp = NULL;
1535 return (error);
1536 }
1537 if (ip->i_ump->um_fstype == UFS1)
1538 ip->i_din.ffs1_din = pool_cache_get(ffs_dinode1_cache,
1539 PR_WAITOK);
1540 else
1541 ip->i_din.ffs2_din = pool_cache_get(ffs_dinode2_cache,
1542 PR_WAITOK);
1543 ffs_load_inode(bp, ip, fs, ino);
1544 if (DOINGSOFTDEP(vp))
1545 softdep_load_inodeblock(ip);
1546 else
1547 ip->i_ffs_effnlink = ip->i_nlink;
1548 brelse(bp, 0);
1549
1550 /*
1551 * Initialize the vnode from the inode, check for aliases.
1552 * Note that the underlying vnode may have changed.
1553 */
1554
1555 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1556
1557 /*
1558 * Finish inode initialization now that aliasing has been resolved.
1559 */
1560
1561 ip->i_devvp = ump->um_devvp;
1562 VREF(ip->i_devvp);
1563
1564 /*
1565 * Ensure that uid and gid are correct. This is a temporary
1566 * fix until fsck has been changed to do the update.
1567 */
1568
1569 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1570 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1571 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1572 } /* XXX */
1573 uvm_vnp_setsize(vp, ip->i_size);
1574 *vpp = vp;
1575 return (0);
1576 }
1577
1578 /*
1579 * File handle to vnode
1580 *
1581 * Have to be really careful about stale file handles:
1582 * - check that the inode number is valid
1583 * - call ffs_vget() to get the locked inode
1584 * - check for an unallocated inode (i_mode == 0)
1585 * - check that the given client host has export rights and return
1586 * those rights via. exflagsp and credanonp
1587 */
1588 int
1589 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1590 {
1591 struct ufid ufh;
1592 struct fs *fs;
1593
1594 if (fhp->fid_len != sizeof(struct ufid))
1595 return EINVAL;
1596
1597 memcpy(&ufh, fhp, sizeof(ufh));
1598 fs = VFSTOUFS(mp)->um_fs;
1599 if (ufh.ufid_ino < ROOTINO ||
1600 ufh.ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1601 return (ESTALE);
1602 return (ufs_fhtovp(mp, &ufh, vpp));
1603 }
1604
1605 /*
1606 * Vnode pointer to File handle
1607 */
1608 /* ARGSUSED */
1609 int
1610 ffs_vptofh(struct vnode *vp, struct fid *fhp, size_t *fh_size)
1611 {
1612 struct inode *ip;
1613 struct ufid ufh;
1614
1615 if (*fh_size < sizeof(struct ufid)) {
1616 *fh_size = sizeof(struct ufid);
1617 return E2BIG;
1618 }
1619 ip = VTOI(vp);
1620 *fh_size = sizeof(struct ufid);
1621 memset(&ufh, 0, sizeof(ufh));
1622 ufh.ufid_len = sizeof(struct ufid);
1623 ufh.ufid_ino = ip->i_number;
1624 ufh.ufid_gen = ip->i_gen;
1625 memcpy(fhp, &ufh, sizeof(ufh));
1626 return (0);
1627 }
1628
1629 void
1630 ffs_init(void)
1631 {
1632 if (ffs_initcount++ > 0)
1633 return;
1634
1635 ffs_inode_cache = pool_cache_init(sizeof(struct inode), 0, 0, 0,
1636 "ffsino", NULL, IPL_NONE, NULL, NULL, NULL);
1637 ffs_dinode1_cache = pool_cache_init(sizeof(struct ufs1_dinode), 0, 0, 0,
1638 "ffsdino1", NULL, IPL_NONE, NULL, NULL, NULL);
1639 ffs_dinode2_cache = pool_cache_init(sizeof(struct ufs2_dinode), 0, 0, 0,
1640 "ffsdino2", NULL, IPL_NONE, NULL, NULL, NULL);
1641 softdep_initialize();
1642 ffs_snapshot_init();
1643 ufs_init();
1644 }
1645
1646 void
1647 ffs_reinit(void)
1648 {
1649 softdep_reinitialize();
1650 ufs_reinit();
1651 }
1652
1653 void
1654 ffs_done(void)
1655 {
1656 if (--ffs_initcount > 0)
1657 return;
1658
1659 /* XXX softdep cleanup ? */
1660 ffs_snapshot_fini();
1661 ufs_done();
1662 pool_cache_destroy(ffs_dinode2_cache);
1663 pool_cache_destroy(ffs_dinode1_cache);
1664 pool_cache_destroy(ffs_inode_cache);
1665 }
1666
1667 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
1668 {
1669 #if 0
1670 extern int doasyncfree;
1671 #endif
1672 extern int ffs_log_changeopt;
1673
1674 sysctl_createv(clog, 0, NULL, NULL,
1675 CTLFLAG_PERMANENT,
1676 CTLTYPE_NODE, "vfs", NULL,
1677 NULL, 0, NULL, 0,
1678 CTL_VFS, CTL_EOL);
1679 sysctl_createv(clog, 0, NULL, NULL,
1680 CTLFLAG_PERMANENT,
1681 CTLTYPE_NODE, "ffs",
1682 SYSCTL_DESCR("Berkeley Fast File System"),
1683 NULL, 0, NULL, 0,
1684 CTL_VFS, 1, CTL_EOL);
1685
1686 /*
1687 * @@@ should we even bother with these first three?
1688 */
1689 sysctl_createv(clog, 0, NULL, NULL,
1690 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1691 CTLTYPE_INT, "doclusterread", NULL,
1692 sysctl_notavail, 0, NULL, 0,
1693 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
1694 sysctl_createv(clog, 0, NULL, NULL,
1695 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1696 CTLTYPE_INT, "doclusterwrite", NULL,
1697 sysctl_notavail, 0, NULL, 0,
1698 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
1699 sysctl_createv(clog, 0, NULL, NULL,
1700 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1701 CTLTYPE_INT, "doreallocblks", NULL,
1702 sysctl_notavail, 0, NULL, 0,
1703 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
1704 #if 0
1705 sysctl_createv(clog, 0, NULL, NULL,
1706 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1707 CTLTYPE_INT, "doasyncfree",
1708 SYSCTL_DESCR("Release dirty blocks asynchronously"),
1709 NULL, 0, &doasyncfree, 0,
1710 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
1711 #endif
1712 sysctl_createv(clog, 0, NULL, NULL,
1713 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1714 CTLTYPE_INT, "log_changeopt",
1715 SYSCTL_DESCR("Log changes in optimization strategy"),
1716 NULL, 0, &ffs_log_changeopt, 0,
1717 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
1718 }
1719
1720 /*
1721 * Write a superblock and associated information back to disk.
1722 */
1723 int
1724 ffs_sbupdate(struct ufsmount *mp, int waitfor)
1725 {
1726 struct fs *fs = mp->um_fs;
1727 struct buf *bp;
1728 int error = 0;
1729 u_int32_t saveflag;
1730
1731 bp = getblk(mp->um_devvp,
1732 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
1733 (int)fs->fs_sbsize, 0, 0);
1734 saveflag = fs->fs_flags & FS_INTERNAL;
1735 fs->fs_flags &= ~FS_INTERNAL;
1736
1737 memcpy(bp->b_data, fs, fs->fs_sbsize);
1738
1739 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
1740 #ifdef FFS_EI
1741 if (mp->um_flags & UFS_NEEDSWAP)
1742 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
1743 #endif
1744 fs->fs_flags |= saveflag;
1745
1746 if (waitfor == MNT_WAIT)
1747 error = bwrite(bp);
1748 else
1749 bawrite(bp);
1750 return (error);
1751 }
1752
1753 int
1754 ffs_cgupdate(struct ufsmount *mp, int waitfor)
1755 {
1756 struct fs *fs = mp->um_fs;
1757 struct buf *bp;
1758 int blks;
1759 void *space;
1760 int i, size, error = 0, allerror = 0;
1761
1762 allerror = ffs_sbupdate(mp, waitfor);
1763 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1764 space = fs->fs_csp;
1765 for (i = 0; i < blks; i += fs->fs_frag) {
1766 size = fs->fs_bsize;
1767 if (i + fs->fs_frag > blks)
1768 size = (blks - i) * fs->fs_fsize;
1769 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1770 size, 0, 0);
1771 #ifdef FFS_EI
1772 if (mp->um_flags & UFS_NEEDSWAP)
1773 ffs_csum_swap((struct csum*)space,
1774 (struct csum*)bp->b_data, size);
1775 else
1776 #endif
1777 memcpy(bp->b_data, space, (u_int)size);
1778 space = (char *)space + size;
1779 if (waitfor == MNT_WAIT)
1780 error = bwrite(bp);
1781 else
1782 bawrite(bp);
1783 }
1784 if (!allerror && error)
1785 allerror = error;
1786 return (allerror);
1787 }
1788
1789 int
1790 ffs_extattrctl(struct mount *mp, int cmd, struct vnode *vp,
1791 int attrnamespace, const char *attrname)
1792 {
1793 #ifdef UFS_EXTATTR
1794 /*
1795 * File-backed extended attributes are only supported on UFS1.
1796 * UFS2 has native extended attributes.
1797 */
1798 if (VFSTOUFS(mp)->um_fstype == UFS1)
1799 return (ufs_extattrctl(mp, cmd, vp, attrnamespace, attrname));
1800 #endif
1801 return (vfs_stdextattrctl(mp, cmd, vp, attrnamespace, attrname));
1802 }
1803
1804 int
1805 ffs_suspendctl(struct mount *mp, int cmd)
1806 {
1807 int error;
1808 struct lwp *l = curlwp;
1809
1810 switch (cmd) {
1811 case SUSPEND_SUSPEND:
1812 if ((error = fstrans_setstate(mp, FSTRANS_SUSPENDING)) != 0)
1813 return error;
1814 error = ffs_sync(mp, MNT_WAIT, l->l_proc->p_cred);
1815 if (error == 0)
1816 error = fstrans_setstate(mp, FSTRANS_SUSPENDED);
1817 if (error != 0) {
1818 (void) fstrans_setstate(mp, FSTRANS_NORMAL);
1819 return error;
1820 }
1821 return 0;
1822
1823 case SUSPEND_RESUME:
1824 return fstrans_setstate(mp, FSTRANS_NORMAL);
1825
1826 default:
1827 return EINVAL;
1828 }
1829 }
1830