ffs_vfsops.c revision 1.118.2.7 1 /* $NetBSD: ffs_vfsops.c,v 1.118.2.7 2004/09/24 10:53:58 skrll Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.118.2.7 2004/09/24 10:53:58 skrll Exp $");
36
37 #if defined(_KERNEL_OPT)
38 #include "opt_ffs.h"
39 #include "opt_quota.h"
40 #include "opt_compat_netbsd.h"
41 #include "opt_softdep.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <sys/sysctl.h>
63 #include <sys/conf.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 /* how many times ffs_init() was called */
78 int ffs_initcount = 0;
79
80 extern struct lock ufs_hashlock;
81
82 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
83 extern const struct vnodeopv_desc ffs_specop_opv_desc;
84 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
85
86 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
87 &ffs_vnodeop_opv_desc,
88 &ffs_specop_opv_desc,
89 &ffs_fifoop_opv_desc,
90 NULL,
91 };
92
93 struct vfsops ffs_vfsops = {
94 MOUNT_FFS,
95 ffs_mount,
96 ufs_start,
97 ffs_unmount,
98 ufs_root,
99 ufs_quotactl,
100 ffs_statvfs,
101 ffs_sync,
102 ffs_vget,
103 ffs_fhtovp,
104 ffs_vptofh,
105 ffs_init,
106 ffs_reinit,
107 ffs_done,
108 NULL,
109 ffs_mountroot,
110 ufs_check_export,
111 ffs_snapshot,
112 ffs_vnodeopv_descs,
113 };
114
115 struct genfs_ops ffs_genfsops = {
116 ffs_gop_size,
117 ufs_gop_alloc,
118 genfs_gop_write,
119 };
120
121 POOL_INIT(ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
122 &pool_allocator_nointr);
123 POOL_INIT(ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0, "dino1pl",
124 &pool_allocator_nointr);
125 POOL_INIT(ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0, "dino2pl",
126 &pool_allocator_nointr);
127
128 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
129 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
130
131 /*
132 * Called by main() when ffs is going to be mounted as root.
133 */
134
135 int
136 ffs_mountroot()
137 {
138 struct fs *fs;
139 struct mount *mp;
140 struct lwp *l = curlwp; /* XXX */
141 struct ufsmount *ump;
142 int error;
143
144 if (root_device->dv_class != DV_DISK)
145 return (ENODEV);
146
147 /*
148 * Get vnodes for rootdev.
149 */
150 if (bdevvp(rootdev, &rootvp))
151 panic("ffs_mountroot: can't setup bdevvp's");
152
153 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
154 vrele(rootvp);
155 return (error);
156 }
157 if ((error = ffs_mountfs(rootvp, mp, l)) != 0) {
158 mp->mnt_op->vfs_refcount--;
159 vfs_unbusy(mp);
160 free(mp, M_MOUNT);
161 vrele(rootvp);
162 return (error);
163 }
164 simple_lock(&mountlist_slock);
165 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
166 simple_unlock(&mountlist_slock);
167 ump = VFSTOUFS(mp);
168 fs = ump->um_fs;
169 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
170 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
171 (void)ffs_statvfs(mp, &mp->mnt_stat, l);
172 vfs_unbusy(mp);
173 setrootfstime((time_t)fs->fs_time);
174 return (0);
175 }
176
177 /*
178 * VFS Operations.
179 *
180 * mount system call
181 */
182 int
183 ffs_mount(mp, path, data, ndp, l)
184 struct mount *mp;
185 const char *path;
186 void *data;
187 struct nameidata *ndp;
188 struct lwp *l;
189 {
190 struct vnode *devvp = NULL;
191 struct ufs_args args;
192 struct ufsmount *ump = NULL;
193 struct proc *p;
194 struct fs *fs;
195 int error, flags, update;
196 mode_t accessmode;
197
198 p = l->l_proc;
199 if (mp->mnt_flag & MNT_GETARGS) {
200 ump = VFSTOUFS(mp);
201 if (ump == NULL)
202 return EIO;
203 args.fspec = NULL;
204 vfs_showexport(mp, &args.export, &ump->um_export);
205 return copyout(&args, data, sizeof(args));
206 }
207 error = copyin(data, &args, sizeof (struct ufs_args));
208 if (error)
209 return (error);
210
211 #if !defined(SOFTDEP)
212 mp->mnt_flag &= ~MNT_SOFTDEP;
213 #endif
214
215 update = mp->mnt_flag & MNT_UPDATE;
216
217 /* Check arguments */
218 if (update) {
219 /* Use the extant mount */
220 ump = VFSTOUFS(mp);
221 devvp = ump->um_devvp;
222 if (args.fspec == NULL)
223 vref(devvp);
224 } else {
225 /* New mounts must have a filename for the device */
226 if (args.fspec == NULL)
227 return (EINVAL);
228 }
229
230 if (args.fspec != NULL) {
231 /*
232 * Look up the name and verify that it's sane.
233 */
234 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, l);
235 if ((error = namei(ndp)) != 0)
236 return (error);
237 devvp = ndp->ni_vp;
238
239 if (!update) {
240 /*
241 * Be sure this is a valid block device
242 */
243 if (devvp->v_type != VBLK)
244 error = ENOTBLK;
245 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
246 error = ENXIO;
247 } else {
248 /*
249 * Be sure we're still naming the same device
250 * used for our initial mount
251 */
252 if (devvp != ump->um_devvp)
253 error = EINVAL;
254 }
255 }
256
257 /*
258 * If mount by non-root, then verify that user has necessary
259 * permissions on the device.
260 */
261 if (error == 0 && p->p_ucred->cr_uid != 0) {
262 accessmode = VREAD;
263 if (update ?
264 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
265 (mp->mnt_flag & MNT_RDONLY) == 0)
266 accessmode |= VWRITE;
267 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
268 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, l);
269 VOP_UNLOCK(devvp, 0);
270 }
271
272 if (error) {
273 vrele(devvp);
274 return (error);
275 }
276
277 if (!update) {
278 error = ffs_mountfs(devvp, mp, l);
279 if (error) {
280 vrele(devvp);
281 return (error);
282 }
283
284 ump = VFSTOUFS(mp);
285 fs = ump->um_fs;
286 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
287 (MNT_SOFTDEP | MNT_ASYNC)) {
288 printf("%s fs uses soft updates, "
289 "ignoring async mode\n",
290 fs->fs_fsmnt);
291 mp->mnt_flag &= ~MNT_ASYNC;
292 }
293 } else {
294 /*
295 * Update the mount.
296 */
297
298 /*
299 * The initial mount got a reference on this
300 * device, so drop the one obtained via
301 * namei(), above.
302 */
303 vrele(devvp);
304
305 fs = ump->um_fs;
306 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
307 /*
308 * Changing from r/w to r/o
309 */
310 vn_start_write(NULL, &mp, V_WAIT);
311 flags = WRITECLOSE;
312 if (mp->mnt_flag & MNT_FORCE)
313 flags |= FORCECLOSE;
314 if (mp->mnt_flag & MNT_SOFTDEP)
315 error = softdep_flushfiles(mp, flags, l);
316 else
317 error = ffs_flushfiles(mp, flags, l);
318 if (fs->fs_pendingblocks != 0 ||
319 fs->fs_pendinginodes != 0) {
320 printf("%s: update error: blocks %" PRId64
321 " files %d\n",
322 fs->fs_fsmnt, fs->fs_pendingblocks,
323 fs->fs_pendinginodes);
324 fs->fs_pendingblocks = 0;
325 fs->fs_pendinginodes = 0;
326 }
327 if (error == 0 &&
328 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
329 fs->fs_clean & FS_WASCLEAN) {
330 if (mp->mnt_flag & MNT_SOFTDEP)
331 fs->fs_flags &= ~FS_DOSOFTDEP;
332 fs->fs_clean = FS_ISCLEAN;
333 (void) ffs_sbupdate(ump, MNT_WAIT);
334 }
335 vn_finished_write(mp, 0);
336 if (error)
337 return (error);
338 fs->fs_ronly = 1;
339 fs->fs_fmod = 0;
340 }
341
342 /*
343 * Flush soft dependencies if disabling it via an update
344 * mount. This may leave some items to be processed,
345 * so don't do this yet XXX.
346 */
347 if ((fs->fs_flags & FS_DOSOFTDEP) &&
348 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
349 #ifdef notyet
350 vn_start_write(NULL, &mp, V_WAIT);
351 flags = WRITECLOSE;
352 if (mp->mnt_flag & MNT_FORCE)
353 flags |= FORCECLOSE;
354 error = softdep_flushfiles(mp, flags, l);
355 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
356 fs->fs_flags &= ~FS_DOSOFTDEP;
357 (void) ffs_sbupdate(ump, MNT_WAIT);
358 vn_finished_write(mp);
359 #elif defined(SOFTDEP)
360 mp->mnt_flag |= MNT_SOFTDEP;
361 #endif
362 }
363
364 /*
365 * When upgrading to a softdep mount, we must first flush
366 * all vnodes. (not done yet -- see above)
367 */
368 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
369 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
370 #ifdef notyet
371 vn_start_write(NULL, &mp, V_WAIT);
372 flags = WRITECLOSE;
373 if (mp->mnt_flag & MNT_FORCE)
374 flags |= FORCECLOSE;
375 error = ffs_flushfiles(mp, flags, l);
376 vn_finished_write(mp);
377 #else
378 mp->mnt_flag &= ~MNT_SOFTDEP;
379 #endif
380 }
381
382 if (mp->mnt_flag & MNT_RELOAD) {
383 error = ffs_reload(mp, p->p_ucred, l);
384 if (error)
385 return (error);
386 }
387
388 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
389 /*
390 * Changing from read-only to read/write
391 */
392 fs->fs_ronly = 0;
393 fs->fs_clean <<= 1;
394 fs->fs_fmod = 1;
395 if ((fs->fs_flags & FS_DOSOFTDEP)) {
396 error = softdep_mount(devvp, mp, fs,
397 p->p_ucred);
398 if (error)
399 return (error);
400 }
401 if (fs->fs_snapinum[0] != 0)
402 ffs_snapshot_mount(mp);
403 }
404 if (args.fspec == 0) {
405 /*
406 * Process export requests.
407 */
408 return (vfs_export(mp, &ump->um_export, &args.export));
409 }
410 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
411 (MNT_SOFTDEP | MNT_ASYNC)) {
412 printf("%s fs uses soft updates, ignoring async mode\n",
413 fs->fs_fsmnt);
414 mp->mnt_flag &= ~MNT_ASYNC;
415 }
416 }
417
418 error = set_statvfs_info(path, UIO_USERSPACE, args.fspec,
419 UIO_USERSPACE, mp, l);
420 if (error == 0)
421 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
422 sizeof(fs->fs_fsmnt));
423 if (mp->mnt_flag & MNT_SOFTDEP)
424 fs->fs_flags |= FS_DOSOFTDEP;
425 else
426 fs->fs_flags &= ~FS_DOSOFTDEP;
427 if (fs->fs_fmod != 0) { /* XXX */
428 fs->fs_fmod = 0;
429 if (fs->fs_clean & FS_WASCLEAN)
430 fs->fs_time = time.tv_sec;
431 else {
432 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
433 mp->mnt_stat.f_mntfromname, fs->fs_clean);
434 printf("%s: lost blocks %" PRId64 " files %d\n",
435 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
436 fs->fs_pendinginodes);
437 }
438 (void) ffs_cgupdate(ump, MNT_WAIT);
439 }
440 return error;
441 }
442
443 /*
444 * Reload all incore data for a filesystem (used after running fsck on
445 * the root filesystem and finding things to fix). The filesystem must
446 * be mounted read-only.
447 *
448 * Things to do to update the mount:
449 * 1) invalidate all cached meta-data.
450 * 2) re-read superblock from disk.
451 * 3) re-read summary information from disk.
452 * 4) invalidate all inactive vnodes.
453 * 5) invalidate all cached file data.
454 * 6) re-read inode data for all active vnodes.
455 */
456 int
457 ffs_reload(mp, cred, l)
458 struct mount *mp;
459 struct ucred *cred;
460 struct lwp *l;
461 {
462 struct vnode *vp, *nvp, *devvp;
463 struct inode *ip;
464 void *space;
465 struct buf *bp;
466 struct fs *fs, *newfs;
467 struct partinfo dpart;
468 int i, blks, size, error;
469 int32_t *lp;
470 struct ufsmount *ump;
471 daddr_t sblockloc;
472
473 if ((mp->mnt_flag & MNT_RDONLY) == 0)
474 return (EINVAL);
475
476 ump = VFSTOUFS(mp);
477 /*
478 * Step 1: invalidate all cached meta-data.
479 */
480 devvp = ump->um_devvp;
481 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
482 error = vinvalbuf(devvp, 0, cred, l, 0, 0);
483 VOP_UNLOCK(devvp, 0);
484 if (error)
485 panic("ffs_reload: dirty1");
486 /*
487 * Step 2: re-read superblock from disk.
488 */
489 fs = ump->um_fs;
490 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED, l) != 0)
491 size = DEV_BSIZE;
492 else
493 size = dpart.disklab->d_secsize;
494 /* XXX we don't handle possibility that superblock moved. */
495 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
496 NOCRED, &bp);
497 if (error) {
498 brelse(bp);
499 return (error);
500 }
501 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
502 memcpy(newfs, bp->b_data, fs->fs_sbsize);
503 #ifdef FFS_EI
504 if (ump->um_flags & UFS_NEEDSWAP) {
505 ffs_sb_swap((struct fs*)bp->b_data, newfs);
506 fs->fs_flags |= FS_SWAPPED;
507 } else
508 #endif
509 fs->fs_flags &= ~FS_SWAPPED;
510 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
511 newfs->fs_magic != FS_UFS2_MAGIC)||
512 newfs->fs_bsize > MAXBSIZE ||
513 newfs->fs_bsize < sizeof(struct fs)) {
514 brelse(bp);
515 free(newfs, M_UFSMNT);
516 return (EIO); /* XXX needs translation */
517 }
518 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
519 sblockloc = fs->fs_sblockloc;
520 /*
521 * Copy pointer fields back into superblock before copying in XXX
522 * new superblock. These should really be in the ufsmount. XXX
523 * Note that important parameters (eg fs_ncg) are unchanged.
524 */
525 newfs->fs_csp = fs->fs_csp;
526 newfs->fs_maxcluster = fs->fs_maxcluster;
527 newfs->fs_contigdirs = fs->fs_contigdirs;
528 newfs->fs_ronly = fs->fs_ronly;
529 newfs->fs_active = fs->fs_active;
530 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
531 brelse(bp);
532 free(newfs, M_UFSMNT);
533
534 /* Recheck for apple UFS filesystem */
535 ump->um_flags &= ~UFS_ISAPPLEUFS;
536 /* First check to see if this is tagged as an Apple UFS filesystem
537 * in the disklabel
538 */
539 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, l) == 0) &&
540 (dpart.part->p_fstype == FS_APPLEUFS)) {
541 ump->um_flags |= UFS_ISAPPLEUFS;
542 }
543 #ifdef APPLE_UFS
544 else {
545 /* Manually look for an apple ufs label, and if a valid one
546 * is found, then treat it like an Apple UFS filesystem anyway
547 */
548 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
549 APPLEUFS_LABEL_SIZE, cred, &bp);
550 if (error) {
551 brelse(bp);
552 return (error);
553 }
554 error = ffs_appleufs_validate(fs->fs_fsmnt,
555 (struct appleufslabel *)bp->b_data,NULL);
556 if (error == 0)
557 ump->um_flags |= UFS_ISAPPLEUFS;
558 brelse(bp);
559 bp = NULL;
560 }
561 #else
562 if (ump->um_flags & UFS_ISAPPLEUFS)
563 return (EIO);
564 #endif
565
566 if (UFS_MPISAPPLEUFS(ump)) {
567 /* see comment about NeXT below */
568 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
569 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
570 mp->mnt_iflag |= IMNT_DTYPE;
571 } else {
572 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
573 ump->um_dirblksiz = DIRBLKSIZ;
574 if (ump->um_maxsymlinklen > 0)
575 mp->mnt_iflag |= IMNT_DTYPE;
576 else
577 mp->mnt_iflag &= ~IMNT_DTYPE;
578 }
579 ffs_oldfscompat_read(fs, ump, sblockloc);
580 ump->um_maxfilesize = fs->fs_maxfilesize;
581 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
582 fs->fs_pendingblocks = 0;
583 fs->fs_pendinginodes = 0;
584 }
585
586 ffs_statvfs(mp, &mp->mnt_stat, l);
587 /*
588 * Step 3: re-read summary information from disk.
589 */
590 blks = howmany(fs->fs_cssize, fs->fs_fsize);
591 space = fs->fs_csp;
592 for (i = 0; i < blks; i += fs->fs_frag) {
593 size = fs->fs_bsize;
594 if (i + fs->fs_frag > blks)
595 size = (blks - i) * fs->fs_fsize;
596 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
597 NOCRED, &bp);
598 if (error) {
599 brelse(bp);
600 return (error);
601 }
602 #ifdef FFS_EI
603 if (UFS_FSNEEDSWAP(fs))
604 ffs_csum_swap((struct csum *)bp->b_data,
605 (struct csum *)space, size);
606 else
607 #endif
608 memcpy(space, bp->b_data, (size_t)size);
609 space = (char *)space + size;
610 brelse(bp);
611 }
612 if ((fs->fs_flags & FS_DOSOFTDEP))
613 softdep_mount(devvp, mp, fs, cred);
614 if (fs->fs_snapinum[0] != 0)
615 ffs_snapshot_mount(mp);
616 /*
617 * We no longer know anything about clusters per cylinder group.
618 */
619 if (fs->fs_contigsumsize > 0) {
620 lp = fs->fs_maxcluster;
621 for (i = 0; i < fs->fs_ncg; i++)
622 *lp++ = fs->fs_contigsumsize;
623 }
624
625 loop:
626 simple_lock(&mntvnode_slock);
627 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
628 if (vp->v_mount != mp) {
629 simple_unlock(&mntvnode_slock);
630 goto loop;
631 }
632 nvp = vp->v_mntvnodes.le_next;
633 /*
634 * Step 4: invalidate all inactive vnodes.
635 */
636 if (vrecycle(vp, &mntvnode_slock, l))
637 goto loop;
638 /*
639 * Step 5: invalidate all cached file data.
640 */
641 simple_lock(&vp->v_interlock);
642 simple_unlock(&mntvnode_slock);
643 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
644 goto loop;
645 if (vinvalbuf(vp, 0, cred, l, 0, 0))
646 panic("ffs_reload: dirty2");
647 /*
648 * Step 6: re-read inode data for all active vnodes.
649 */
650 ip = VTOI(vp);
651 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
652 (int)fs->fs_bsize, NOCRED, &bp);
653 if (error) {
654 brelse(bp);
655 vput(vp);
656 return (error);
657 }
658 ffs_load_inode(bp, ip, fs, ip->i_number);
659 ip->i_ffs_effnlink = ip->i_nlink;
660 brelse(bp);
661 vput(vp);
662 simple_lock(&mntvnode_slock);
663 }
664 simple_unlock(&mntvnode_slock);
665 return (0);
666 }
667
668 /*
669 * Possible superblock locations ordered from most to least likely.
670 */
671 static const int sblock_try[] = SBLOCKSEARCH;
672
673 /*
674 * Common code for mount and mountroot
675 */
676 int
677 ffs_mountfs(devvp, mp, l)
678 struct vnode *devvp;
679 struct mount *mp;
680 struct lwp *l;
681 {
682 struct ufsmount *ump;
683 struct buf *bp;
684 struct fs *fs;
685 dev_t dev;
686 struct partinfo dpart;
687 void *space;
688 struct proc *p;
689 daddr_t sblockloc, fsblockloc;
690 int blks, fstype;
691 int error, i, size, ronly;
692 #ifdef FFS_EI
693 int needswap = 0; /* keep gcc happy */
694 #endif
695 int32_t *lp;
696 struct ucred *cred;
697 u_int32_t sbsize = 8192; /* keep gcc happy*/
698
699 dev = devvp->v_rdev;
700 p = l ? l->l_proc : NULL;
701 cred = p ? p->p_ucred : NOCRED;
702 /*
703 * Disallow multiple mounts of the same device.
704 * Disallow mounting of a device that is currently in use
705 * (except for root, which might share swap device for miniroot).
706 * Flush out any old buffers remaining from a previous use.
707 */
708 if ((error = vfs_mountedon(devvp)) != 0)
709 return (error);
710 if (vcount(devvp) > 1 && devvp != rootvp)
711 return (EBUSY);
712 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
713 error = vinvalbuf(devvp, V_SAVE, cred, l, 0, 0);
714 VOP_UNLOCK(devvp, 0);
715 if (error)
716 return (error);
717
718 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
719 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, l);
720 if (error)
721 return (error);
722 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, l) != 0)
723 size = DEV_BSIZE;
724 else
725 size = dpart.disklab->d_secsize;
726
727 bp = NULL;
728 ump = NULL;
729 fs = NULL;
730 sblockloc = 0;
731 fstype = 0;
732
733 /*
734 * Try reading the superblock in each of its possible locations. */
735 for (i = 0; ; i++) {
736 if (bp != NULL) {
737 bp->b_flags |= B_NOCACHE;
738 brelse(bp);
739 bp = NULL;
740 }
741 if (sblock_try[i] == -1) {
742 error = EINVAL;
743 fs = NULL;
744 goto out;
745 }
746 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
747 &bp);
748 if (error)
749 goto out;
750 fs = (struct fs*)bp->b_data;
751 fsblockloc = sblockloc = sblock_try[i];
752 if (fs->fs_magic == FS_UFS1_MAGIC) {
753 sbsize = fs->fs_sbsize;
754 fstype = UFS1;
755 #ifdef FFS_EI
756 needswap = 0;
757 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
758 sbsize = bswap32(fs->fs_sbsize);
759 fstype = UFS1;
760 needswap = 1;
761 #endif
762 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
763 sbsize = fs->fs_sbsize;
764 fstype = UFS2;
765 #ifdef FFS_EI
766 needswap = 0;
767 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
768 sbsize = bswap32(fs->fs_sbsize);
769 fstype = UFS2;
770 needswap = 1;
771 #endif
772 } else
773 continue;
774
775
776 /* fs->fs_sblockloc isn't defined for old filesystems */
777 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
778 if (sblockloc == SBLOCK_UFS2)
779 /*
780 * This is likely to be the first alternate
781 * in a filesystem with 64k blocks.
782 * Don't use it.
783 */
784 continue;
785 fsblockloc = sblockloc;
786 } else {
787 fsblockloc = fs->fs_sblockloc;
788 #ifdef FFS_EI
789 if (needswap)
790 fsblockloc = bswap64(fsblockloc);
791 #endif
792 }
793
794 /* Check we haven't found an alternate superblock */
795 if (fsblockloc != sblockloc)
796 continue;
797
798 /* Validate size of superblock */
799 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
800 continue;
801
802 /* Ok seems to be a good superblock */
803 break;
804 }
805
806 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
807 memcpy(fs, bp->b_data, sbsize);
808
809 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
810 memset(ump, 0, sizeof *ump);
811 TAILQ_INIT(&ump->um_snapshots);
812 ump->um_fs = fs;
813
814 #ifdef FFS_EI
815 if (needswap) {
816 ffs_sb_swap((struct fs*)bp->b_data, fs);
817 fs->fs_flags |= FS_SWAPPED;
818 } else
819 #endif
820 fs->fs_flags &= ~FS_SWAPPED;
821
822 ffs_oldfscompat_read(fs, ump, sblockloc);
823 ump->um_maxfilesize = fs->fs_maxfilesize;
824
825 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
826 fs->fs_pendingblocks = 0;
827 fs->fs_pendinginodes = 0;
828 }
829
830 ump->um_fstype = fstype;
831 if (fs->fs_sbsize < SBLOCKSIZE)
832 bp->b_flags |= B_INVAL;
833 brelse(bp);
834 bp = NULL;
835
836 /* First check to see if this is tagged as an Apple UFS filesystem
837 * in the disklabel
838 */
839 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, l) == 0) &&
840 (dpart.part->p_fstype == FS_APPLEUFS)) {
841 ump->um_flags |= UFS_ISAPPLEUFS;
842 }
843 #ifdef APPLE_UFS
844 else {
845 /* Manually look for an apple ufs label, and if a valid one
846 * is found, then treat it like an Apple UFS filesystem anyway
847 */
848 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
849 APPLEUFS_LABEL_SIZE, cred, &bp);
850 if (error)
851 goto out;
852 error = ffs_appleufs_validate(fs->fs_fsmnt,
853 (struct appleufslabel *)bp->b_data,NULL);
854 if (error == 0) {
855 ump->um_flags |= UFS_ISAPPLEUFS;
856 }
857 brelse(bp);
858 bp = NULL;
859 }
860 #else
861 if (ump->um_flags & UFS_ISAPPLEUFS) {
862 error = EINVAL;
863 goto out;
864 }
865 #endif
866
867 /*
868 * verify that we can access the last block in the fs
869 * if we're mounting read/write.
870 */
871
872 if (!ronly) {
873 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
874 cred, &bp);
875 if (bp->b_bcount != fs->fs_fsize)
876 error = EINVAL;
877 bp->b_flags |= B_INVAL;
878 if (error)
879 goto out;
880 brelse(bp);
881 bp = NULL;
882 }
883
884 fs->fs_ronly = ronly;
885 if (ronly == 0) {
886 fs->fs_clean <<= 1;
887 fs->fs_fmod = 1;
888 }
889 size = fs->fs_cssize;
890 blks = howmany(size, fs->fs_fsize);
891 if (fs->fs_contigsumsize > 0)
892 size += fs->fs_ncg * sizeof(int32_t);
893 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
894 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
895 fs->fs_csp = space;
896 for (i = 0; i < blks; i += fs->fs_frag) {
897 size = fs->fs_bsize;
898 if (i + fs->fs_frag > blks)
899 size = (blks - i) * fs->fs_fsize;
900 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
901 cred, &bp);
902 if (error) {
903 free(fs->fs_csp, M_UFSMNT);
904 goto out;
905 }
906 #ifdef FFS_EI
907 if (needswap)
908 ffs_csum_swap((struct csum *)bp->b_data,
909 (struct csum *)space, size);
910 else
911 #endif
912 memcpy(space, bp->b_data, (u_int)size);
913
914 space = (char *)space + size;
915 brelse(bp);
916 bp = NULL;
917 }
918 if (fs->fs_contigsumsize > 0) {
919 fs->fs_maxcluster = lp = space;
920 for (i = 0; i < fs->fs_ncg; i++)
921 *lp++ = fs->fs_contigsumsize;
922 space = lp;
923 }
924 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
925 fs->fs_contigdirs = space;
926 space = (char *)space + size;
927 memset(fs->fs_contigdirs, 0, size);
928 /* Compatibility for old filesystems - XXX */
929 if (fs->fs_avgfilesize <= 0)
930 fs->fs_avgfilesize = AVFILESIZ;
931 if (fs->fs_avgfpdir <= 0)
932 fs->fs_avgfpdir = AFPDIR;
933 fs->fs_active = NULL;
934 mp->mnt_data = ump;
935 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
936 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
937 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
938 mp->mnt_stat.f_namemax = MAXNAMLEN;
939 if (UFS_MPISAPPLEUFS(ump)) {
940 /* NeXT used to keep short symlinks in the inode even
941 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
942 * is probably -1, but we still need to be able to identify
943 * short symlinks.
944 */
945 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
946 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
947 mp->mnt_iflag |= IMNT_DTYPE;
948 } else {
949 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
950 ump->um_dirblksiz = DIRBLKSIZ;
951 if (ump->um_maxsymlinklen > 0)
952 mp->mnt_iflag |= IMNT_DTYPE;
953 else
954 mp->mnt_iflag &= ~IMNT_DTYPE;
955 }
956 mp->mnt_fs_bshift = fs->fs_bshift;
957 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
958 mp->mnt_flag |= MNT_LOCAL;
959 #ifdef FFS_EI
960 if (needswap)
961 ump->um_flags |= UFS_NEEDSWAP;
962 #endif
963 ump->um_mountp = mp;
964 ump->um_dev = dev;
965 ump->um_devvp = devvp;
966 ump->um_nindir = fs->fs_nindir;
967 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
968 ump->um_bptrtodb = fs->fs_fsbtodb;
969 ump->um_seqinc = fs->fs_frag;
970 for (i = 0; i < MAXQUOTAS; i++)
971 ump->um_quotas[i] = NULLVP;
972 devvp->v_specmountpoint = mp;
973 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
974 error = softdep_mount(devvp, mp, fs, cred);
975 if (error) {
976 free(fs->fs_csp, M_UFSMNT);
977 goto out;
978 }
979 }
980 if (ronly == 0 && fs->fs_snapinum[0] != 0)
981 ffs_snapshot_mount(mp);
982 return (0);
983 out:
984 if (fs)
985 free(fs, M_UFSMNT);
986 devvp->v_specmountpoint = NULL;
987 if (bp)
988 brelse(bp);
989 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
990 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, l);
991 VOP_UNLOCK(devvp, 0);
992 if (ump) {
993 if (ump->um_oldfscompat)
994 free(ump->um_oldfscompat, M_UFSMNT);
995 free(ump, M_UFSMNT);
996 mp->mnt_data = NULL;
997 }
998 return (error);
999 }
1000
1001 /*
1002 * Sanity checks for loading old filesystem superblocks.
1003 * See ffs_oldfscompat_write below for unwound actions.
1004 *
1005 * XXX - Parts get retired eventually.
1006 * Unfortunately new bits get added.
1007 */
1008 static void
1009 ffs_oldfscompat_read(fs, ump, sblockloc)
1010 struct fs *fs;
1011 struct ufsmount *ump;
1012 daddr_t sblockloc;
1013 {
1014 off_t maxfilesize;
1015 int32_t *extrasave;
1016
1017 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1018 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1019 return;
1020
1021 if (!ump->um_oldfscompat)
1022 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1023 M_UFSMNT, M_WAITOK);
1024
1025 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1026 extrasave = ump->um_oldfscompat;
1027 extrasave += 512/sizeof(int32_t);
1028 extrasave[0] = fs->fs_old_npsect;
1029 extrasave[1] = fs->fs_old_interleave;
1030 extrasave[2] = fs->fs_old_trackskew;
1031
1032 /* These fields will be overwritten by their
1033 * original values in fs_oldfscompat_write, so it is harmless
1034 * to modify them here.
1035 */
1036 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1037 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1038 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1039 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1040
1041 fs->fs_maxbsize = fs->fs_bsize;
1042 fs->fs_time = fs->fs_old_time;
1043 fs->fs_size = fs->fs_old_size;
1044 fs->fs_dsize = fs->fs_old_dsize;
1045 fs->fs_csaddr = fs->fs_old_csaddr;
1046 fs->fs_sblockloc = sblockloc;
1047
1048 fs->fs_flags = fs->fs_old_flags;
1049
1050 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1051 fs->fs_old_nrpos = 8;
1052 fs->fs_old_npsect = fs->fs_old_nsect;
1053 fs->fs_old_interleave = 1;
1054 fs->fs_old_trackskew = 0;
1055 }
1056
1057 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1058 ump->um_maxfilesize = (u_quad_t) 1LL << 39;
1059 fs->fs_qbmask = ~fs->fs_bmask;
1060 fs->fs_qfmask = ~fs->fs_fmask;
1061 }
1062
1063 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1064 if (ump->um_maxfilesize > maxfilesize)
1065 ump->um_maxfilesize = maxfilesize;
1066
1067 /* Compatibility for old filesystems */
1068 if (fs->fs_avgfilesize <= 0)
1069 fs->fs_avgfilesize = AVFILESIZ;
1070 if (fs->fs_avgfpdir <= 0)
1071 fs->fs_avgfpdir = AFPDIR;
1072
1073 #if 0
1074 if (bigcgs) {
1075 fs->fs_save_cgsize = fs->fs_cgsize;
1076 fs->fs_cgsize = fs->fs_bsize;
1077 }
1078 #endif
1079 }
1080
1081 /*
1082 * Unwinding superblock updates for old filesystems.
1083 * See ffs_oldfscompat_read above for details.
1084 *
1085 * XXX - Parts get retired eventually.
1086 * Unfortunately new bits get added.
1087 */
1088 static void
1089 ffs_oldfscompat_write(fs, ump)
1090 struct fs *fs;
1091 struct ufsmount *ump;
1092 {
1093 int32_t *extrasave;
1094
1095 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1096 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1097 return;
1098
1099 fs->fs_old_time = fs->fs_time;
1100 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1101 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1102 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1103 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1104 fs->fs_old_flags = fs->fs_flags;
1105
1106 #if 0
1107 if (bigcgs) {
1108 fs->fs_cgsize = fs->fs_save_cgsize;
1109 }
1110 #endif
1111
1112 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1113 extrasave = ump->um_oldfscompat;
1114 extrasave += 512/sizeof(int32_t);
1115 fs->fs_old_npsect = extrasave[0];
1116 fs->fs_old_interleave = extrasave[1];
1117 fs->fs_old_trackskew = extrasave[2];
1118
1119 }
1120
1121 /*
1122 * unmount system call
1123 */
1124 int
1125 ffs_unmount(mp, mntflags, l)
1126 struct mount *mp;
1127 int mntflags;
1128 struct lwp *l;
1129 {
1130 struct ufsmount *ump;
1131 struct fs *fs;
1132 int error, flags, penderr;
1133
1134 penderr = 0;
1135 flags = 0;
1136 if (mntflags & MNT_FORCE)
1137 flags |= FORCECLOSE;
1138 if (mp->mnt_flag & MNT_SOFTDEP) {
1139 if ((error = softdep_flushfiles(mp, flags, l)) != 0)
1140 return (error);
1141 } else {
1142 if ((error = ffs_flushfiles(mp, flags, l)) != 0)
1143 return (error);
1144 }
1145 ump = VFSTOUFS(mp);
1146 fs = ump->um_fs;
1147 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1148 printf("%s: unmount pending error: blocks %" PRId64
1149 " files %d\n",
1150 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1151 fs->fs_pendingblocks = 0;
1152 fs->fs_pendinginodes = 0;
1153 penderr = 1;
1154 }
1155 if (fs->fs_ronly == 0 &&
1156 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1157 fs->fs_clean & FS_WASCLEAN) {
1158 /*
1159 * XXXX don't mark fs clean in the case of softdep
1160 * pending block errors, until they are fixed.
1161 */
1162 if (penderr == 0) {
1163 if (mp->mnt_flag & MNT_SOFTDEP)
1164 fs->fs_flags &= ~FS_DOSOFTDEP;
1165 fs->fs_clean = FS_ISCLEAN;
1166 }
1167 fs->fs_fmod = 0;
1168 (void) ffs_sbupdate(ump, MNT_WAIT);
1169 }
1170 if (ump->um_devvp->v_type != VBAD)
1171 ump->um_devvp->v_specmountpoint = NULL;
1172 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1173 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
1174 NOCRED, l);
1175 vput(ump->um_devvp);
1176 free(fs->fs_csp, M_UFSMNT);
1177 free(fs, M_UFSMNT);
1178 if (ump->um_oldfscompat != NULL)
1179 free(ump->um_oldfscompat, M_UFSMNT);
1180 free(ump, M_UFSMNT);
1181 mp->mnt_data = NULL;
1182 mp->mnt_flag &= ~MNT_LOCAL;
1183 return (0);
1184 }
1185
1186 /*
1187 * Flush out all the files in a filesystem.
1188 */
1189 int
1190 ffs_flushfiles(mp, flags, l)
1191 struct mount *mp;
1192 int flags;
1193 struct lwp *l;
1194 {
1195 extern int doforce;
1196 struct ufsmount *ump;
1197 int error;
1198
1199 if (!doforce)
1200 flags &= ~FORCECLOSE;
1201 ump = VFSTOUFS(mp);
1202 #ifdef QUOTA
1203 if (mp->mnt_flag & MNT_QUOTA) {
1204 int i;
1205 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
1206 return (error);
1207 for (i = 0; i < MAXQUOTAS; i++) {
1208 if (ump->um_quotas[i] == NULLVP)
1209 continue;
1210 quotaoff(l, mp, i);
1211 }
1212 /*
1213 * Here we fall through to vflush again to ensure
1214 * that we have gotten rid of all the system vnodes.
1215 */
1216 }
1217 #endif
1218 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1219 return (error);
1220 ffs_snapshot_unmount(mp);
1221 /*
1222 * Flush all the files.
1223 */
1224 error = vflush(mp, NULLVP, flags);
1225 if (error)
1226 return (error);
1227 /*
1228 * Flush filesystem metadata.
1229 */
1230 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1231 error = VOP_FSYNC(ump->um_devvp, l->l_proc->p_ucred, FSYNC_WAIT, 0, 0, l);
1232 VOP_UNLOCK(ump->um_devvp, 0);
1233 return (error);
1234 }
1235
1236 /*
1237 * Get file system statistics.
1238 */
1239 int
1240 ffs_statvfs(mp, sbp, l)
1241 struct mount *mp;
1242 struct statvfs *sbp;
1243 struct lwp *l;
1244 {
1245 struct ufsmount *ump;
1246 struct fs *fs;
1247
1248 ump = VFSTOUFS(mp);
1249 fs = ump->um_fs;
1250 sbp->f_bsize = fs->fs_bsize;
1251 sbp->f_frsize = fs->fs_fsize;
1252 sbp->f_iosize = fs->fs_bsize;
1253 sbp->f_blocks = fs->fs_dsize;
1254 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1255 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1256 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1257 fs->fs_minfree) / (u_int64_t) 100;
1258 if (sbp->f_bfree > sbp->f_bresvd)
1259 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1260 else
1261 sbp->f_bavail = 0;
1262 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1263 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1264 sbp->f_favail = sbp->f_ffree;
1265 sbp->f_fresvd = 0;
1266 copy_statvfs_info(sbp, mp);
1267 return (0);
1268 }
1269
1270 /*
1271 * Go through the disk queues to initiate sandbagged IO;
1272 * go through the inodes to write those that have been modified;
1273 * initiate the writing of the super block if it has been modified.
1274 *
1275 * Note: we are always called with the filesystem marked `MPBUSY'.
1276 */
1277 int
1278 ffs_sync(mp, waitfor, cred, l)
1279 struct mount *mp;
1280 int waitfor;
1281 struct ucred *cred;
1282 struct lwp *l;
1283 {
1284 struct vnode *vp, *nvp;
1285 struct inode *ip;
1286 struct ufsmount *ump = VFSTOUFS(mp);
1287 struct fs *fs;
1288 int error, count, allerror = 0;
1289
1290 fs = ump->um_fs;
1291 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1292 printf("fs = %s\n", fs->fs_fsmnt);
1293 panic("update: rofs mod");
1294 }
1295 /*
1296 * Write back each (modified) inode.
1297 */
1298 simple_lock(&mntvnode_slock);
1299 loop:
1300 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1301 /*
1302 * If the vnode that we are about to sync is no longer
1303 * associated with this mount point, start over.
1304 */
1305 if (vp->v_mount != mp)
1306 goto loop;
1307 simple_lock(&vp->v_interlock);
1308 nvp = LIST_NEXT(vp, v_mntvnodes);
1309 ip = VTOI(vp);
1310 if (vp->v_type == VNON ||
1311 ((ip->i_flag &
1312 (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
1313 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1314 vp->v_uobj.uo_npages == 0))
1315 {
1316 simple_unlock(&vp->v_interlock);
1317 continue;
1318 }
1319 simple_unlock(&mntvnode_slock);
1320 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1321 if (error) {
1322 simple_lock(&mntvnode_slock);
1323 if (error == ENOENT)
1324 goto loop;
1325 continue;
1326 }
1327 if (vp->v_type == VREG && waitfor == MNT_LAZY)
1328 error = VOP_UPDATE(vp, NULL, NULL, 0);
1329 else
1330 error = VOP_FSYNC(vp, cred,
1331 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, l);
1332 if (error)
1333 allerror = error;
1334 vput(vp);
1335 simple_lock(&mntvnode_slock);
1336 }
1337 simple_unlock(&mntvnode_slock);
1338 /*
1339 * Force stale file system control information to be flushed.
1340 */
1341 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1342 if ((error = softdep_flushworklist(ump->um_mountp, &count, l)))
1343 allerror = error;
1344 /* Flushed work items may create new vnodes to clean */
1345 if (allerror == 0 && count) {
1346 simple_lock(&mntvnode_slock);
1347 goto loop;
1348 }
1349 }
1350 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1351 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1352 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1353 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1354 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, l)) != 0)
1355 allerror = error;
1356 VOP_UNLOCK(ump->um_devvp, 0);
1357 if (allerror == 0 && waitfor == MNT_WAIT) {
1358 simple_lock(&mntvnode_slock);
1359 goto loop;
1360 }
1361 }
1362 #ifdef QUOTA
1363 qsync(l, mp);
1364 #endif
1365 /*
1366 * Write back modified superblock.
1367 */
1368 if (fs->fs_fmod != 0) {
1369 fs->fs_fmod = 0;
1370 fs->fs_time = time.tv_sec;
1371 if ((error = ffs_cgupdate(ump, waitfor)))
1372 allerror = error;
1373 }
1374 return (allerror);
1375 }
1376
1377 /*
1378 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1379 * in from disk. If it is in core, wait for the lock bit to clear, then
1380 * return the inode locked. Detection and handling of mount points must be
1381 * done by the calling routine.
1382 */
1383 int
1384 ffs_vget(mp, ino, vpp)
1385 struct mount *mp;
1386 ino_t ino;
1387 struct vnode **vpp;
1388 {
1389 struct fs *fs;
1390 struct inode *ip;
1391 struct ufsmount *ump;
1392 struct buf *bp;
1393 struct vnode *vp;
1394 dev_t dev;
1395 int error;
1396
1397 ump = VFSTOUFS(mp);
1398 dev = ump->um_dev;
1399
1400 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1401 return (0);
1402
1403 /* Allocate a new vnode/inode. */
1404 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1405 *vpp = NULL;
1406 return (error);
1407 }
1408
1409 /*
1410 * If someone beat us to it while sleeping in getnewvnode(),
1411 * push back the freshly allocated vnode we don't need, and return.
1412 */
1413
1414 do {
1415 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1416 ungetnewvnode(vp);
1417 return (0);
1418 }
1419 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1420
1421 vp->v_flag |= VLOCKSWORK;
1422
1423 /*
1424 * XXX MFS ends up here, too, to allocate an inode. Should we
1425 * XXX create another pool for MFS inodes?
1426 */
1427
1428 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1429 memset(ip, 0, sizeof(struct inode));
1430 vp->v_data = ip;
1431 ip->i_vnode = vp;
1432 ip->i_ump = ump;
1433 ip->i_fs = fs = ump->um_fs;
1434 ip->i_dev = dev;
1435 ip->i_number = ino;
1436 LIST_INIT(&ip->i_pcbufhd);
1437 #ifdef QUOTA
1438 {
1439 int i;
1440
1441 for (i = 0; i < MAXQUOTAS; i++)
1442 ip->i_dquot[i] = NODQUOT;
1443 }
1444 #endif
1445
1446 /*
1447 * Put it onto its hash chain and lock it so that other requests for
1448 * this inode will block if they arrive while we are sleeping waiting
1449 * for old data structures to be purged or for the contents of the
1450 * disk portion of this inode to be read.
1451 */
1452
1453 ufs_ihashins(ip);
1454 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1455
1456 /* Read in the disk contents for the inode, copy into the inode. */
1457 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1458 (int)fs->fs_bsize, NOCRED, &bp);
1459 if (error) {
1460
1461 /*
1462 * The inode does not contain anything useful, so it would
1463 * be misleading to leave it on its hash chain. With mode
1464 * still zero, it will be unlinked and returned to the free
1465 * list by vput().
1466 */
1467
1468 vput(vp);
1469 brelse(bp);
1470 *vpp = NULL;
1471 return (error);
1472 }
1473 if (ip->i_ump->um_fstype == UFS1)
1474 ip->i_din.ffs1_din = pool_get(&ffs_dinode1_pool, PR_WAITOK);
1475 else
1476 ip->i_din.ffs2_din = pool_get(&ffs_dinode2_pool, PR_WAITOK);
1477 ffs_load_inode(bp, ip, fs, ino);
1478 if (DOINGSOFTDEP(vp))
1479 softdep_load_inodeblock(ip);
1480 else
1481 ip->i_ffs_effnlink = ip->i_nlink;
1482 brelse(bp);
1483
1484 /*
1485 * Initialize the vnode from the inode, check for aliases.
1486 * Note that the underlying vnode may have changed.
1487 */
1488
1489 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1490
1491 /*
1492 * Finish inode initialization now that aliasing has been resolved.
1493 */
1494
1495 genfs_node_init(vp, &ffs_genfsops);
1496 ip->i_devvp = ump->um_devvp;
1497 VREF(ip->i_devvp);
1498
1499 /*
1500 * Ensure that uid and gid are correct. This is a temporary
1501 * fix until fsck has been changed to do the update.
1502 */
1503
1504 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1505 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1506 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1507 } /* XXX */
1508 uvm_vnp_setsize(vp, ip->i_size);
1509 *vpp = vp;
1510 return (0);
1511 }
1512
1513 /*
1514 * File handle to vnode
1515 *
1516 * Have to be really careful about stale file handles:
1517 * - check that the inode number is valid
1518 * - call ffs_vget() to get the locked inode
1519 * - check for an unallocated inode (i_mode == 0)
1520 * - check that the given client host has export rights and return
1521 * those rights via. exflagsp and credanonp
1522 */
1523 int
1524 ffs_fhtovp(mp, fhp, vpp)
1525 struct mount *mp;
1526 struct fid *fhp;
1527 struct vnode **vpp;
1528 {
1529 struct ufid *ufhp;
1530 struct fs *fs;
1531
1532 ufhp = (struct ufid *)fhp;
1533 fs = VFSTOUFS(mp)->um_fs;
1534 if (ufhp->ufid_ino < ROOTINO ||
1535 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1536 return (ESTALE);
1537 return (ufs_fhtovp(mp, ufhp, vpp));
1538 }
1539
1540 /*
1541 * Vnode pointer to File handle
1542 */
1543 /* ARGSUSED */
1544 int
1545 ffs_vptofh(vp, fhp)
1546 struct vnode *vp;
1547 struct fid *fhp;
1548 {
1549 struct inode *ip;
1550 struct ufid *ufhp;
1551
1552 ip = VTOI(vp);
1553 ufhp = (struct ufid *)fhp;
1554 ufhp->ufid_len = sizeof(struct ufid);
1555 ufhp->ufid_ino = ip->i_number;
1556 ufhp->ufid_gen = ip->i_gen;
1557 return (0);
1558 }
1559
1560 void
1561 ffs_init()
1562 {
1563 if (ffs_initcount++ > 0)
1564 return;
1565
1566 #ifdef _LKM
1567 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0,
1568 "ffsinopl", &pool_allocator_nointr);
1569 pool_init(&ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0,
1570 "dino1pl", &pool_allocator_nointr);
1571 pool_init(&ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0,
1572 "dino2pl", &pool_allocator_nointr);
1573 #endif
1574 softdep_initialize();
1575 ufs_init();
1576 }
1577
1578 void
1579 ffs_reinit()
1580 {
1581 softdep_reinitialize();
1582 ufs_reinit();
1583 }
1584
1585 void
1586 ffs_done()
1587 {
1588 if (--ffs_initcount > 0)
1589 return;
1590
1591 /* XXX softdep cleanup ? */
1592 ufs_done();
1593 #ifdef _LKM
1594 pool_destroy(&ffs_dinode2_pool);
1595 pool_destroy(&ffs_dinode1_pool);
1596 pool_destroy(&ffs_inode_pool);
1597 #endif
1598 }
1599
1600 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
1601 {
1602 extern int doasyncfree;
1603 extern int ffs_log_changeopt;
1604
1605 sysctl_createv(clog, 0, NULL, NULL,
1606 CTLFLAG_PERMANENT,
1607 CTLTYPE_NODE, "vfs", NULL,
1608 NULL, 0, NULL, 0,
1609 CTL_VFS, CTL_EOL);
1610 sysctl_createv(clog, 0, NULL, NULL,
1611 CTLFLAG_PERMANENT,
1612 CTLTYPE_NODE, "ffs",
1613 SYSCTL_DESCR("Berkeley Fast File System"),
1614 NULL, 0, NULL, 0,
1615 CTL_VFS, 1, CTL_EOL);
1616
1617 /*
1618 * @@@ should we even bother with these first three?
1619 */
1620 sysctl_createv(clog, 0, NULL, NULL,
1621 CTLFLAG_PERMANENT,
1622 CTLTYPE_INT, "doclusterread", NULL,
1623 sysctl_notavail, 0, NULL, 0,
1624 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
1625 sysctl_createv(clog, 0, NULL, NULL,
1626 CTLFLAG_PERMANENT,
1627 CTLTYPE_INT, "doclusterwrite", NULL,
1628 sysctl_notavail, 0, NULL, 0,
1629 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
1630 sysctl_createv(clog, 0, NULL, NULL,
1631 CTLFLAG_PERMANENT,
1632 CTLTYPE_INT, "doreallocblks", NULL,
1633 sysctl_notavail, 0, NULL, 0,
1634 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
1635 sysctl_createv(clog, 0, NULL, NULL,
1636 CTLFLAG_PERMANENT,
1637 CTLTYPE_INT, "doasyncfree",
1638 SYSCTL_DESCR("Release dirty blocks asynchronously"),
1639 NULL, 0, &doasyncfree, 0,
1640 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
1641 sysctl_createv(clog, 0, NULL, NULL,
1642 CTLFLAG_PERMANENT,
1643 CTLTYPE_INT, "log_changeopt",
1644 SYSCTL_DESCR("Log changes in optimization strategy"),
1645 NULL, 0, &ffs_log_changeopt, 0,
1646 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
1647 }
1648
1649 /*
1650 * Write a superblock and associated information back to disk.
1651 */
1652 int
1653 ffs_sbupdate(mp, waitfor)
1654 struct ufsmount *mp;
1655 int waitfor;
1656 {
1657 struct fs *fs = mp->um_fs;
1658 struct buf *bp;
1659 int error = 0;
1660 u_int32_t saveflag;
1661
1662 bp = getblk(mp->um_devvp,
1663 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
1664 (int)fs->fs_sbsize, 0, 0);
1665 saveflag = fs->fs_flags & FS_INTERNAL;
1666 fs->fs_flags &= ~FS_INTERNAL;
1667
1668 memcpy(bp->b_data, fs, fs->fs_sbsize);
1669
1670 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
1671 #ifdef FFS_EI
1672 if (mp->um_flags & UFS_NEEDSWAP)
1673 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
1674 #endif
1675 fs->fs_flags |= saveflag;
1676
1677 if (waitfor == MNT_WAIT)
1678 error = bwrite(bp);
1679 else
1680 bawrite(bp);
1681 return (error);
1682 }
1683
1684 int
1685 ffs_cgupdate(mp, waitfor)
1686 struct ufsmount *mp;
1687 int waitfor;
1688 {
1689 struct fs *fs = mp->um_fs;
1690 struct buf *bp;
1691 int blks;
1692 void *space;
1693 int i, size, error = 0, allerror = 0;
1694
1695 allerror = ffs_sbupdate(mp, waitfor);
1696 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1697 space = fs->fs_csp;
1698 for (i = 0; i < blks; i += fs->fs_frag) {
1699 size = fs->fs_bsize;
1700 if (i + fs->fs_frag > blks)
1701 size = (blks - i) * fs->fs_fsize;
1702 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1703 size, 0, 0);
1704 #ifdef FFS_EI
1705 if (mp->um_flags & UFS_NEEDSWAP)
1706 ffs_csum_swap((struct csum*)space,
1707 (struct csum*)bp->b_data, size);
1708 else
1709 #endif
1710 memcpy(bp->b_data, space, (u_int)size);
1711 space = (char *)space + size;
1712 if (waitfor == MNT_WAIT)
1713 error = bwrite(bp);
1714 else
1715 bawrite(bp);
1716 }
1717 if (!allerror && error)
1718 allerror = error;
1719 return (allerror);
1720 }
1721