ffs_vfsops.c revision 1.166 1 /* $NetBSD: ffs_vfsops.c,v 1.166 2005/07/15 05:01:16 thorpej Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
30 *
31 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
32 */
33
34 #include <sys/cdefs.h>
35 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.166 2005/07/15 05:01:16 thorpej Exp $");
36
37 #if defined(_KERNEL_OPT)
38 #include "opt_ffs.h"
39 #include "opt_quota.h"
40 #include "opt_softdep.h"
41 #endif
42
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/namei.h>
46 #include <sys/proc.h>
47 #include <sys/kernel.h>
48 #include <sys/vnode.h>
49 #include <sys/socket.h>
50 #include <sys/mount.h>
51 #include <sys/buf.h>
52 #include <sys/device.h>
53 #include <sys/mbuf.h>
54 #include <sys/file.h>
55 #include <sys/disklabel.h>
56 #include <sys/ioctl.h>
57 #include <sys/errno.h>
58 #include <sys/malloc.h>
59 #include <sys/pool.h>
60 #include <sys/lock.h>
61 #include <sys/sysctl.h>
62 #include <sys/conf.h>
63
64 #include <miscfs/specfs/specdev.h>
65
66 #include <ufs/ufs/quota.h>
67 #include <ufs/ufs/ufsmount.h>
68 #include <ufs/ufs/inode.h>
69 #include <ufs/ufs/dir.h>
70 #include <ufs/ufs/ufs_extern.h>
71 #include <ufs/ufs/ufs_bswap.h>
72
73 #include <ufs/ffs/fs.h>
74 #include <ufs/ffs/ffs_extern.h>
75
76 /* how many times ffs_init() was called */
77 int ffs_initcount = 0;
78
79 extern struct lock ufs_hashlock;
80
81 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
82 extern const struct vnodeopv_desc ffs_specop_opv_desc;
83 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
84
85 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
86 &ffs_vnodeop_opv_desc,
87 &ffs_specop_opv_desc,
88 &ffs_fifoop_opv_desc,
89 NULL,
90 };
91
92 struct vfsops ffs_vfsops = {
93 MOUNT_FFS,
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statvfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_reinit,
106 ffs_done,
107 NULL,
108 ffs_mountroot,
109 ufs_check_export,
110 ffs_snapshot,
111 vfs_stdextattrctl,
112 ffs_vnodeopv_descs,
113 };
114 VFS_ATTACH(ffs_vfsops);
115
116 static const struct genfs_ops ffs_genfsops = {
117 .gop_size = ffs_gop_size,
118 .gop_alloc = ufs_gop_alloc,
119 .gop_write = genfs_gop_write,
120 };
121
122 POOL_INIT(ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
123 &pool_allocator_nointr);
124 POOL_INIT(ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0, "dino1pl",
125 &pool_allocator_nointr);
126 POOL_INIT(ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0, "dino2pl",
127 &pool_allocator_nointr);
128
129 static void ffs_oldfscompat_read(struct fs *, struct ufsmount *, daddr_t);
130 static void ffs_oldfscompat_write(struct fs *, struct ufsmount *);
131
132 /*
133 * Called by main() when ffs is going to be mounted as root.
134 */
135
136 int
137 ffs_mountroot(void)
138 {
139 struct fs *fs;
140 struct mount *mp;
141 struct proc *p = curproc; /* XXX */
142 struct ufsmount *ump;
143 int error;
144
145 if (root_device->dv_class != DV_DISK)
146 return (ENODEV);
147
148 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
149 vrele(rootvp);
150 return (error);
151 }
152 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
153 mp->mnt_op->vfs_refcount--;
154 vfs_unbusy(mp);
155 free(mp, M_MOUNT);
156 return (error);
157 }
158 simple_lock(&mountlist_slock);
159 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
160 simple_unlock(&mountlist_slock);
161 ump = VFSTOUFS(mp);
162 fs = ump->um_fs;
163 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
164 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
165 (void)ffs_statvfs(mp, &mp->mnt_stat, p);
166 vfs_unbusy(mp);
167 setrootfstime((time_t)fs->fs_time);
168 return (0);
169 }
170
171 /*
172 * VFS Operations.
173 *
174 * mount system call
175 */
176 int
177 ffs_mount(struct mount *mp, const char *path, void *data,
178 struct nameidata *ndp, struct proc *p)
179 {
180 struct vnode *devvp = NULL;
181 struct ufs_args args;
182 struct ufsmount *ump = NULL;
183 struct fs *fs;
184 int error, flags, update;
185 mode_t accessmode;
186
187 if (mp->mnt_flag & MNT_GETARGS) {
188 ump = VFSTOUFS(mp);
189 if (ump == NULL)
190 return EIO;
191 args.fspec = NULL;
192 vfs_showexport(mp, &args.export, &ump->um_export);
193 return copyout(&args, data, sizeof(args));
194 }
195 error = copyin(data, &args, sizeof (struct ufs_args));
196 if (error)
197 return (error);
198
199 #if !defined(SOFTDEP)
200 mp->mnt_flag &= ~MNT_SOFTDEP;
201 #endif
202
203 update = mp->mnt_flag & MNT_UPDATE;
204
205 /* Check arguments */
206 if (args.fspec != NULL) {
207 /*
208 * Look up the name and verify that it's sane.
209 */
210 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
211 if ((error = namei(ndp)) != 0)
212 return (error);
213 devvp = ndp->ni_vp;
214
215 if (!update) {
216 /*
217 * Be sure this is a valid block device
218 */
219 if (devvp->v_type != VBLK)
220 error = ENOTBLK;
221 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
222 error = ENXIO;
223 } else {
224 /*
225 * Be sure we're still naming the same device
226 * used for our initial mount
227 */
228 ump = VFSTOUFS(mp);
229 if (devvp != ump->um_devvp)
230 error = EINVAL;
231 }
232 } else {
233 if (!update) {
234 /* New mounts must have a filename for the device */
235 return (EINVAL);
236 } else {
237 /* Use the extant mount */
238 ump = VFSTOUFS(mp);
239 devvp = ump->um_devvp;
240 vref(devvp);
241 }
242 }
243
244 /*
245 * If mount by non-root, then verify that user has necessary
246 * permissions on the device.
247 */
248 if (error == 0 && p->p_ucred->cr_uid != 0) {
249 accessmode = VREAD;
250 if (update ?
251 (mp->mnt_iflag & IMNT_WANTRDWR) != 0 :
252 (mp->mnt_flag & MNT_RDONLY) == 0)
253 accessmode |= VWRITE;
254 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
255 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
256 VOP_UNLOCK(devvp, 0);
257 }
258
259 if (error) {
260 vrele(devvp);
261 return (error);
262 }
263
264 if (!update) {
265 int xflags;
266
267 /*
268 * Disallow multiple mounts of the same device.
269 * Disallow mounting of a device that is currently in use
270 * (except for root, which might share swap device for
271 * miniroot).
272 */
273 error = vfs_mountedon(devvp);
274 if (error)
275 goto fail;
276 if (vcount(devvp) > 1 && devvp != rootvp) {
277 error = EBUSY;
278 goto fail;
279 }
280 if (mp->mnt_flag & MNT_RDONLY)
281 xflags = FREAD;
282 else
283 xflags = FREAD|FWRITE;
284 error = VOP_OPEN(devvp, xflags, FSCRED, p);
285 if (error)
286 goto fail;
287 error = ffs_mountfs(devvp, mp, p);
288 if (error) {
289 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
290 (void)VOP_CLOSE(devvp, xflags, NOCRED, p);
291 VOP_UNLOCK(devvp, 0);
292 goto fail;
293 }
294
295 ump = VFSTOUFS(mp);
296 fs = ump->um_fs;
297 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
298 (MNT_SOFTDEP | MNT_ASYNC)) {
299 printf("%s fs uses soft updates, "
300 "ignoring async mode\n",
301 fs->fs_fsmnt);
302 mp->mnt_flag &= ~MNT_ASYNC;
303 }
304 } else {
305 /*
306 * Update the mount.
307 */
308
309 /*
310 * The initial mount got a reference on this
311 * device, so drop the one obtained via
312 * namei(), above.
313 */
314 vrele(devvp);
315
316 ump = VFSTOUFS(mp);
317 fs = ump->um_fs;
318 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
319 /*
320 * Changing from r/w to r/o
321 */
322 vn_start_write(NULL, &mp, V_WAIT);
323 flags = WRITECLOSE;
324 if (mp->mnt_flag & MNT_FORCE)
325 flags |= FORCECLOSE;
326 if (mp->mnt_flag & MNT_SOFTDEP)
327 error = softdep_flushfiles(mp, flags, p);
328 else
329 error = ffs_flushfiles(mp, flags, p);
330 if (fs->fs_pendingblocks != 0 ||
331 fs->fs_pendinginodes != 0) {
332 printf("%s: update error: blocks %" PRId64
333 " files %d\n",
334 fs->fs_fsmnt, fs->fs_pendingblocks,
335 fs->fs_pendinginodes);
336 fs->fs_pendingblocks = 0;
337 fs->fs_pendinginodes = 0;
338 }
339 if (error == 0 &&
340 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
341 fs->fs_clean & FS_WASCLEAN) {
342 if (mp->mnt_flag & MNT_SOFTDEP)
343 fs->fs_flags &= ~FS_DOSOFTDEP;
344 fs->fs_clean = FS_ISCLEAN;
345 (void) ffs_sbupdate(ump, MNT_WAIT);
346 }
347 vn_finished_write(mp, 0);
348 if (error)
349 return (error);
350 fs->fs_ronly = 1;
351 fs->fs_fmod = 0;
352 }
353
354 /*
355 * Flush soft dependencies if disabling it via an update
356 * mount. This may leave some items to be processed,
357 * so don't do this yet XXX.
358 */
359 if ((fs->fs_flags & FS_DOSOFTDEP) &&
360 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
361 #ifdef notyet
362 vn_start_write(NULL, &mp, V_WAIT);
363 flags = WRITECLOSE;
364 if (mp->mnt_flag & MNT_FORCE)
365 flags |= FORCECLOSE;
366 error = softdep_flushfiles(mp, flags, p);
367 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
368 fs->fs_flags &= ~FS_DOSOFTDEP;
369 (void) ffs_sbupdate(ump, MNT_WAIT);
370 vn_finished_write(mp);
371 #elif defined(SOFTDEP)
372 mp->mnt_flag |= MNT_SOFTDEP;
373 #endif
374 }
375
376 /*
377 * When upgrading to a softdep mount, we must first flush
378 * all vnodes. (not done yet -- see above)
379 */
380 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
381 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
382 #ifdef notyet
383 vn_start_write(NULL, &mp, V_WAIT);
384 flags = WRITECLOSE;
385 if (mp->mnt_flag & MNT_FORCE)
386 flags |= FORCECLOSE;
387 error = ffs_flushfiles(mp, flags, p);
388 vn_finished_write(mp);
389 #else
390 mp->mnt_flag &= ~MNT_SOFTDEP;
391 #endif
392 }
393
394 if (mp->mnt_flag & MNT_RELOAD) {
395 error = ffs_reload(mp, p->p_ucred, p);
396 if (error)
397 return (error);
398 }
399
400 if (fs->fs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
401 /*
402 * Changing from read-only to read/write
403 */
404 fs->fs_ronly = 0;
405 fs->fs_clean <<= 1;
406 fs->fs_fmod = 1;
407 if ((fs->fs_flags & FS_DOSOFTDEP)) {
408 error = softdep_mount(devvp, mp, fs,
409 p->p_ucred);
410 if (error)
411 return (error);
412 }
413 if (fs->fs_snapinum[0] != 0)
414 ffs_snapshot_mount(mp);
415 }
416 if (args.fspec == 0) {
417 /*
418 * Process export requests.
419 */
420 return (vfs_export(mp, &ump->um_export, &args.export));
421 }
422 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
423 (MNT_SOFTDEP | MNT_ASYNC)) {
424 printf("%s fs uses soft updates, ignoring async mode\n",
425 fs->fs_fsmnt);
426 mp->mnt_flag &= ~MNT_ASYNC;
427 }
428 }
429
430 error = set_statvfs_info(path, UIO_USERSPACE, args.fspec,
431 UIO_USERSPACE, mp, p);
432 if (error == 0)
433 (void)strncpy(fs->fs_fsmnt, mp->mnt_stat.f_mntonname,
434 sizeof(fs->fs_fsmnt));
435 if (mp->mnt_flag & MNT_SOFTDEP)
436 fs->fs_flags |= FS_DOSOFTDEP;
437 else
438 fs->fs_flags &= ~FS_DOSOFTDEP;
439 if (fs->fs_fmod != 0) { /* XXX */
440 fs->fs_fmod = 0;
441 if (fs->fs_clean & FS_WASCLEAN)
442 fs->fs_time = time.tv_sec;
443 else {
444 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
445 mp->mnt_stat.f_mntfromname, fs->fs_clean);
446 printf("%s: lost blocks %" PRId64 " files %d\n",
447 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
448 fs->fs_pendinginodes);
449 }
450 (void) ffs_cgupdate(ump, MNT_WAIT);
451 }
452 return (error);
453
454 fail:
455 vrele(devvp);
456 return (error);
457 }
458
459 /*
460 * Reload all incore data for a filesystem (used after running fsck on
461 * the root filesystem and finding things to fix). The filesystem must
462 * be mounted read-only.
463 *
464 * Things to do to update the mount:
465 * 1) invalidate all cached meta-data.
466 * 2) re-read superblock from disk.
467 * 3) re-read summary information from disk.
468 * 4) invalidate all inactive vnodes.
469 * 5) invalidate all cached file data.
470 * 6) re-read inode data for all active vnodes.
471 */
472 int
473 ffs_reload(struct mount *mp, struct ucred *cred, struct proc *p)
474 {
475 struct vnode *vp, *nvp, *devvp;
476 struct inode *ip;
477 void *space;
478 struct buf *bp;
479 struct fs *fs, *newfs;
480 struct partinfo dpart;
481 int i, blks, size, error;
482 int32_t *lp;
483 struct ufsmount *ump;
484 daddr_t sblockloc;
485
486 if ((mp->mnt_flag & MNT_RDONLY) == 0)
487 return (EINVAL);
488
489 ump = VFSTOUFS(mp);
490 /*
491 * Step 1: invalidate all cached meta-data.
492 */
493 devvp = ump->um_devvp;
494 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
495 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
496 VOP_UNLOCK(devvp, 0);
497 if (error)
498 panic("ffs_reload: dirty1");
499 /*
500 * Step 2: re-read superblock from disk.
501 */
502 fs = ump->um_fs;
503 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, NOCRED, p) != 0)
504 size = DEV_BSIZE;
505 else
506 size = dpart.disklab->d_secsize;
507 /* XXX we don't handle possibility that superblock moved. */
508 error = bread(devvp, fs->fs_sblockloc / size, fs->fs_sbsize,
509 NOCRED, &bp);
510 if (error) {
511 brelse(bp);
512 return (error);
513 }
514 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
515 memcpy(newfs, bp->b_data, fs->fs_sbsize);
516 #ifdef FFS_EI
517 if (ump->um_flags & UFS_NEEDSWAP) {
518 ffs_sb_swap((struct fs*)bp->b_data, newfs);
519 fs->fs_flags |= FS_SWAPPED;
520 } else
521 #endif
522 fs->fs_flags &= ~FS_SWAPPED;
523 if ((newfs->fs_magic != FS_UFS1_MAGIC &&
524 newfs->fs_magic != FS_UFS2_MAGIC)||
525 newfs->fs_bsize > MAXBSIZE ||
526 newfs->fs_bsize < sizeof(struct fs)) {
527 brelse(bp);
528 free(newfs, M_UFSMNT);
529 return (EIO); /* XXX needs translation */
530 }
531 /* Store off old fs_sblockloc for fs_oldfscompat_read. */
532 sblockloc = fs->fs_sblockloc;
533 /*
534 * Copy pointer fields back into superblock before copying in XXX
535 * new superblock. These should really be in the ufsmount. XXX
536 * Note that important parameters (eg fs_ncg) are unchanged.
537 */
538 newfs->fs_csp = fs->fs_csp;
539 newfs->fs_maxcluster = fs->fs_maxcluster;
540 newfs->fs_contigdirs = fs->fs_contigdirs;
541 newfs->fs_ronly = fs->fs_ronly;
542 newfs->fs_active = fs->fs_active;
543 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
544 brelse(bp);
545 free(newfs, M_UFSMNT);
546
547 /* Recheck for apple UFS filesystem */
548 ump->um_flags &= ~UFS_ISAPPLEUFS;
549 /* First check to see if this is tagged as an Apple UFS filesystem
550 * in the disklabel
551 */
552 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) == 0) &&
553 (dpart.part->p_fstype == FS_APPLEUFS)) {
554 ump->um_flags |= UFS_ISAPPLEUFS;
555 }
556 #ifdef APPLE_UFS
557 else {
558 /* Manually look for an apple ufs label, and if a valid one
559 * is found, then treat it like an Apple UFS filesystem anyway
560 */
561 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
562 APPLEUFS_LABEL_SIZE, cred, &bp);
563 if (error) {
564 brelse(bp);
565 return (error);
566 }
567 error = ffs_appleufs_validate(fs->fs_fsmnt,
568 (struct appleufslabel *)bp->b_data,NULL);
569 if (error == 0)
570 ump->um_flags |= UFS_ISAPPLEUFS;
571 brelse(bp);
572 bp = NULL;
573 }
574 #else
575 if (ump->um_flags & UFS_ISAPPLEUFS)
576 return (EIO);
577 #endif
578
579 if (UFS_MPISAPPLEUFS(ump)) {
580 /* see comment about NeXT below */
581 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
582 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
583 mp->mnt_iflag |= IMNT_DTYPE;
584 } else {
585 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
586 ump->um_dirblksiz = DIRBLKSIZ;
587 if (ump->um_maxsymlinklen > 0)
588 mp->mnt_iflag |= IMNT_DTYPE;
589 else
590 mp->mnt_iflag &= ~IMNT_DTYPE;
591 }
592 ffs_oldfscompat_read(fs, ump, sblockloc);
593 ump->um_maxfilesize = fs->fs_maxfilesize;
594 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
595 fs->fs_pendingblocks = 0;
596 fs->fs_pendinginodes = 0;
597 }
598
599 ffs_statvfs(mp, &mp->mnt_stat, p);
600 /*
601 * Step 3: re-read summary information from disk.
602 */
603 blks = howmany(fs->fs_cssize, fs->fs_fsize);
604 space = fs->fs_csp;
605 for (i = 0; i < blks; i += fs->fs_frag) {
606 size = fs->fs_bsize;
607 if (i + fs->fs_frag > blks)
608 size = (blks - i) * fs->fs_fsize;
609 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
610 NOCRED, &bp);
611 if (error) {
612 brelse(bp);
613 return (error);
614 }
615 #ifdef FFS_EI
616 if (UFS_FSNEEDSWAP(fs))
617 ffs_csum_swap((struct csum *)bp->b_data,
618 (struct csum *)space, size);
619 else
620 #endif
621 memcpy(space, bp->b_data, (size_t)size);
622 space = (char *)space + size;
623 brelse(bp);
624 }
625 if ((fs->fs_flags & FS_DOSOFTDEP))
626 softdep_mount(devvp, mp, fs, cred);
627 if (fs->fs_snapinum[0] != 0)
628 ffs_snapshot_mount(mp);
629 /*
630 * We no longer know anything about clusters per cylinder group.
631 */
632 if (fs->fs_contigsumsize > 0) {
633 lp = fs->fs_maxcluster;
634 for (i = 0; i < fs->fs_ncg; i++)
635 *lp++ = fs->fs_contigsumsize;
636 }
637
638 loop:
639 simple_lock(&mntvnode_slock);
640 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
641 if (vp->v_mount != mp) {
642 simple_unlock(&mntvnode_slock);
643 goto loop;
644 }
645 nvp = vp->v_mntvnodes.le_next;
646 /*
647 * Step 4: invalidate all inactive vnodes.
648 */
649 if (vrecycle(vp, &mntvnode_slock, p))
650 goto loop;
651 /*
652 * Step 5: invalidate all cached file data.
653 */
654 simple_lock(&vp->v_interlock);
655 simple_unlock(&mntvnode_slock);
656 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
657 goto loop;
658 if (vinvalbuf(vp, 0, cred, p, 0, 0))
659 panic("ffs_reload: dirty2");
660 /*
661 * Step 6: re-read inode data for all active vnodes.
662 */
663 ip = VTOI(vp);
664 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
665 (int)fs->fs_bsize, NOCRED, &bp);
666 if (error) {
667 brelse(bp);
668 vput(vp);
669 return (error);
670 }
671 ffs_load_inode(bp, ip, fs, ip->i_number);
672 ip->i_ffs_effnlink = ip->i_nlink;
673 brelse(bp);
674 vput(vp);
675 simple_lock(&mntvnode_slock);
676 }
677 simple_unlock(&mntvnode_slock);
678 return (0);
679 }
680
681 /*
682 * Possible superblock locations ordered from most to least likely.
683 */
684 static const int sblock_try[] = SBLOCKSEARCH;
685
686 /*
687 * Common code for mount and mountroot
688 */
689 int
690 ffs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
691 {
692 struct ufsmount *ump;
693 struct buf *bp;
694 struct fs *fs;
695 dev_t dev;
696 struct partinfo dpart;
697 void *space;
698 daddr_t sblockloc, fsblockloc;
699 int blks, fstype;
700 int error, i, size, ronly;
701 #ifdef FFS_EI
702 int needswap = 0; /* keep gcc happy */
703 #endif
704 int32_t *lp;
705 struct ucred *cred;
706 u_int32_t sbsize = 8192; /* keep gcc happy*/
707
708 dev = devvp->v_rdev;
709 cred = p ? p->p_ucred : NOCRED;
710
711 /* Flush out any old buffers remaining from a previous use. */
712 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
713 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
714 VOP_UNLOCK(devvp, 0);
715 if (error)
716 return (error);
717
718 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
719 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) != 0)
720 size = DEV_BSIZE;
721 else
722 size = dpart.disklab->d_secsize;
723
724 bp = NULL;
725 ump = NULL;
726 fs = NULL;
727 sblockloc = 0;
728 fstype = 0;
729
730 /*
731 * Try reading the superblock in each of its possible locations. */
732 for (i = 0; ; i++) {
733 if (bp != NULL) {
734 bp->b_flags |= B_NOCACHE;
735 brelse(bp);
736 bp = NULL;
737 }
738 if (sblock_try[i] == -1) {
739 error = EINVAL;
740 fs = NULL;
741 goto out;
742 }
743 error = bread(devvp, sblock_try[i] / size, SBLOCKSIZE, cred,
744 &bp);
745 if (error)
746 goto out;
747 fs = (struct fs*)bp->b_data;
748 fsblockloc = sblockloc = sblock_try[i];
749 if (fs->fs_magic == FS_UFS1_MAGIC) {
750 sbsize = fs->fs_sbsize;
751 fstype = UFS1;
752 #ifdef FFS_EI
753 needswap = 0;
754 } else if (fs->fs_magic == bswap32(FS_UFS1_MAGIC)) {
755 sbsize = bswap32(fs->fs_sbsize);
756 fstype = UFS1;
757 needswap = 1;
758 #endif
759 } else if (fs->fs_magic == FS_UFS2_MAGIC) {
760 sbsize = fs->fs_sbsize;
761 fstype = UFS2;
762 #ifdef FFS_EI
763 needswap = 0;
764 } else if (fs->fs_magic == bswap32(FS_UFS2_MAGIC)) {
765 sbsize = bswap32(fs->fs_sbsize);
766 fstype = UFS2;
767 needswap = 1;
768 #endif
769 } else
770 continue;
771
772
773 /* fs->fs_sblockloc isn't defined for old filesystems */
774 if (fstype == UFS1 && !(fs->fs_old_flags & FS_FLAGS_UPDATED)) {
775 if (sblockloc == SBLOCK_UFS2)
776 /*
777 * This is likely to be the first alternate
778 * in a filesystem with 64k blocks.
779 * Don't use it.
780 */
781 continue;
782 fsblockloc = sblockloc;
783 } else {
784 fsblockloc = fs->fs_sblockloc;
785 #ifdef FFS_EI
786 if (needswap)
787 fsblockloc = bswap64(fsblockloc);
788 #endif
789 }
790
791 /* Check we haven't found an alternate superblock */
792 if (fsblockloc != sblockloc)
793 continue;
794
795 /* Validate size of superblock */
796 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs))
797 continue;
798
799 /* Ok seems to be a good superblock */
800 break;
801 }
802
803 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
804 memcpy(fs, bp->b_data, sbsize);
805
806 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
807 memset(ump, 0, sizeof *ump);
808 TAILQ_INIT(&ump->um_snapshots);
809 ump->um_fs = fs;
810
811 #ifdef FFS_EI
812 if (needswap) {
813 ffs_sb_swap((struct fs*)bp->b_data, fs);
814 fs->fs_flags |= FS_SWAPPED;
815 } else
816 #endif
817 fs->fs_flags &= ~FS_SWAPPED;
818
819 ffs_oldfscompat_read(fs, ump, sblockloc);
820 ump->um_maxfilesize = fs->fs_maxfilesize;
821
822 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
823 fs->fs_pendingblocks = 0;
824 fs->fs_pendinginodes = 0;
825 }
826
827 ump->um_fstype = fstype;
828 if (fs->fs_sbsize < SBLOCKSIZE)
829 bp->b_flags |= B_INVAL;
830 brelse(bp);
831 bp = NULL;
832
833 /* First check to see if this is tagged as an Apple UFS filesystem
834 * in the disklabel
835 */
836 if ((VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) == 0) &&
837 (dpart.part->p_fstype == FS_APPLEUFS)) {
838 ump->um_flags |= UFS_ISAPPLEUFS;
839 }
840 #ifdef APPLE_UFS
841 else {
842 /* Manually look for an apple ufs label, and if a valid one
843 * is found, then treat it like an Apple UFS filesystem anyway
844 */
845 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
846 APPLEUFS_LABEL_SIZE, cred, &bp);
847 if (error)
848 goto out;
849 error = ffs_appleufs_validate(fs->fs_fsmnt,
850 (struct appleufslabel *)bp->b_data,NULL);
851 if (error == 0) {
852 ump->um_flags |= UFS_ISAPPLEUFS;
853 }
854 brelse(bp);
855 bp = NULL;
856 }
857 #else
858 if (ump->um_flags & UFS_ISAPPLEUFS) {
859 error = EINVAL;
860 goto out;
861 }
862 #endif
863
864 /*
865 * verify that we can access the last block in the fs
866 * if we're mounting read/write.
867 */
868
869 if (!ronly) {
870 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
871 cred, &bp);
872 if (bp->b_bcount != fs->fs_fsize)
873 error = EINVAL;
874 bp->b_flags |= B_INVAL;
875 if (error)
876 goto out;
877 brelse(bp);
878 bp = NULL;
879 }
880
881 fs->fs_ronly = ronly;
882 if (ronly == 0) {
883 fs->fs_clean <<= 1;
884 fs->fs_fmod = 1;
885 }
886 size = fs->fs_cssize;
887 blks = howmany(size, fs->fs_fsize);
888 if (fs->fs_contigsumsize > 0)
889 size += fs->fs_ncg * sizeof(int32_t);
890 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
891 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
892 fs->fs_csp = space;
893 for (i = 0; i < blks; i += fs->fs_frag) {
894 size = fs->fs_bsize;
895 if (i + fs->fs_frag > blks)
896 size = (blks - i) * fs->fs_fsize;
897 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
898 cred, &bp);
899 if (error) {
900 free(fs->fs_csp, M_UFSMNT);
901 goto out;
902 }
903 #ifdef FFS_EI
904 if (needswap)
905 ffs_csum_swap((struct csum *)bp->b_data,
906 (struct csum *)space, size);
907 else
908 #endif
909 memcpy(space, bp->b_data, (u_int)size);
910
911 space = (char *)space + size;
912 brelse(bp);
913 bp = NULL;
914 }
915 if (fs->fs_contigsumsize > 0) {
916 fs->fs_maxcluster = lp = space;
917 for (i = 0; i < fs->fs_ncg; i++)
918 *lp++ = fs->fs_contigsumsize;
919 space = lp;
920 }
921 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
922 fs->fs_contigdirs = space;
923 space = (char *)space + size;
924 memset(fs->fs_contigdirs, 0, size);
925 /* Compatibility for old filesystems - XXX */
926 if (fs->fs_avgfilesize <= 0)
927 fs->fs_avgfilesize = AVFILESIZ;
928 if (fs->fs_avgfpdir <= 0)
929 fs->fs_avgfpdir = AFPDIR;
930 fs->fs_active = NULL;
931 mp->mnt_data = ump;
932 mp->mnt_stat.f_fsidx.__fsid_val[0] = (long)dev;
933 mp->mnt_stat.f_fsidx.__fsid_val[1] = makefstype(MOUNT_FFS);
934 mp->mnt_stat.f_fsid = mp->mnt_stat.f_fsidx.__fsid_val[0];
935 mp->mnt_stat.f_namemax = MAXNAMLEN;
936 if (UFS_MPISAPPLEUFS(ump)) {
937 /* NeXT used to keep short symlinks in the inode even
938 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
939 * is probably -1, but we still need to be able to identify
940 * short symlinks.
941 */
942 ump->um_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
943 ump->um_dirblksiz = APPLEUFS_DIRBLKSIZ;
944 mp->mnt_iflag |= IMNT_DTYPE;
945 } else {
946 ump->um_maxsymlinklen = fs->fs_maxsymlinklen;
947 ump->um_dirblksiz = DIRBLKSIZ;
948 if (ump->um_maxsymlinklen > 0)
949 mp->mnt_iflag |= IMNT_DTYPE;
950 else
951 mp->mnt_iflag &= ~IMNT_DTYPE;
952 }
953 mp->mnt_fs_bshift = fs->fs_bshift;
954 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
955 mp->mnt_flag |= MNT_LOCAL;
956 #ifdef FFS_EI
957 if (needswap)
958 ump->um_flags |= UFS_NEEDSWAP;
959 #endif
960 ump->um_mountp = mp;
961 ump->um_dev = dev;
962 ump->um_devvp = devvp;
963 ump->um_nindir = fs->fs_nindir;
964 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
965 ump->um_bptrtodb = fs->fs_fsbtodb;
966 ump->um_seqinc = fs->fs_frag;
967 for (i = 0; i < MAXQUOTAS; i++)
968 ump->um_quotas[i] = NULLVP;
969 devvp->v_specmountpoint = mp;
970 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
971 error = softdep_mount(devvp, mp, fs, cred);
972 if (error) {
973 free(fs->fs_csp, M_UFSMNT);
974 goto out;
975 }
976 }
977 if (ronly == 0 && fs->fs_snapinum[0] != 0)
978 ffs_snapshot_mount(mp);
979 return (0);
980 out:
981 if (fs)
982 free(fs, M_UFSMNT);
983 devvp->v_specmountpoint = NULL;
984 if (bp)
985 brelse(bp);
986 if (ump) {
987 if (ump->um_oldfscompat)
988 free(ump->um_oldfscompat, M_UFSMNT);
989 free(ump, M_UFSMNT);
990 mp->mnt_data = NULL;
991 }
992 return (error);
993 }
994
995 /*
996 * Sanity checks for loading old filesystem superblocks.
997 * See ffs_oldfscompat_write below for unwound actions.
998 *
999 * XXX - Parts get retired eventually.
1000 * Unfortunately new bits get added.
1001 */
1002 static void
1003 ffs_oldfscompat_read(struct fs *fs, struct ufsmount *ump, daddr_t sblockloc)
1004 {
1005 off_t maxfilesize;
1006 int32_t *extrasave;
1007
1008 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1009 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1010 return;
1011
1012 if (!ump->um_oldfscompat)
1013 ump->um_oldfscompat = malloc(512 + 3*sizeof(int32_t),
1014 M_UFSMNT, M_WAITOK);
1015
1016 memcpy(ump->um_oldfscompat, &fs->fs_old_postbl_start, 512);
1017 extrasave = ump->um_oldfscompat;
1018 extrasave += 512/sizeof(int32_t);
1019 extrasave[0] = fs->fs_old_npsect;
1020 extrasave[1] = fs->fs_old_interleave;
1021 extrasave[2] = fs->fs_old_trackskew;
1022
1023 /* These fields will be overwritten by their
1024 * original values in fs_oldfscompat_write, so it is harmless
1025 * to modify them here.
1026 */
1027 fs->fs_cstotal.cs_ndir = fs->fs_old_cstotal.cs_ndir;
1028 fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
1029 fs->fs_cstotal.cs_nifree = fs->fs_old_cstotal.cs_nifree;
1030 fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
1031
1032 fs->fs_maxbsize = fs->fs_bsize;
1033 fs->fs_time = fs->fs_old_time;
1034 fs->fs_size = fs->fs_old_size;
1035 fs->fs_dsize = fs->fs_old_dsize;
1036 fs->fs_csaddr = fs->fs_old_csaddr;
1037 fs->fs_sblockloc = sblockloc;
1038
1039 fs->fs_flags = fs->fs_old_flags | (fs->fs_flags & FS_INTERNAL);
1040
1041 if (fs->fs_old_postblformat == FS_42POSTBLFMT) {
1042 fs->fs_old_nrpos = 8;
1043 fs->fs_old_npsect = fs->fs_old_nsect;
1044 fs->fs_old_interleave = 1;
1045 fs->fs_old_trackskew = 0;
1046 }
1047
1048 if (fs->fs_old_inodefmt < FS_44INODEFMT) {
1049 ump->um_maxfilesize = (u_quad_t) 1LL << 39;
1050 fs->fs_qbmask = ~fs->fs_bmask;
1051 fs->fs_qfmask = ~fs->fs_fmask;
1052 }
1053
1054 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1;
1055 if (ump->um_maxfilesize > maxfilesize)
1056 ump->um_maxfilesize = maxfilesize;
1057
1058 /* Compatibility for old filesystems */
1059 if (fs->fs_avgfilesize <= 0)
1060 fs->fs_avgfilesize = AVFILESIZ;
1061 if (fs->fs_avgfpdir <= 0)
1062 fs->fs_avgfpdir = AFPDIR;
1063
1064 #if 0
1065 if (bigcgs) {
1066 fs->fs_save_cgsize = fs->fs_cgsize;
1067 fs->fs_cgsize = fs->fs_bsize;
1068 }
1069 #endif
1070 }
1071
1072 /*
1073 * Unwinding superblock updates for old filesystems.
1074 * See ffs_oldfscompat_read above for details.
1075 *
1076 * XXX - Parts get retired eventually.
1077 * Unfortunately new bits get added.
1078 */
1079 static void
1080 ffs_oldfscompat_write(struct fs *fs, struct ufsmount *ump)
1081 {
1082 int32_t *extrasave;
1083
1084 if ((fs->fs_magic != FS_UFS1_MAGIC) ||
1085 (fs->fs_old_flags & FS_FLAGS_UPDATED))
1086 return;
1087
1088 fs->fs_old_time = fs->fs_time;
1089 fs->fs_old_cstotal.cs_ndir = fs->fs_cstotal.cs_ndir;
1090 fs->fs_old_cstotal.cs_nbfree = fs->fs_cstotal.cs_nbfree;
1091 fs->fs_old_cstotal.cs_nifree = fs->fs_cstotal.cs_nifree;
1092 fs->fs_old_cstotal.cs_nffree = fs->fs_cstotal.cs_nffree;
1093 fs->fs_old_flags = fs->fs_flags;
1094
1095 #if 0
1096 if (bigcgs) {
1097 fs->fs_cgsize = fs->fs_save_cgsize;
1098 }
1099 #endif
1100
1101 memcpy(&fs->fs_old_postbl_start, ump->um_oldfscompat, 512);
1102 extrasave = ump->um_oldfscompat;
1103 extrasave += 512/sizeof(int32_t);
1104 fs->fs_old_npsect = extrasave[0];
1105 fs->fs_old_interleave = extrasave[1];
1106 fs->fs_old_trackskew = extrasave[2];
1107
1108 }
1109
1110 /*
1111 * unmount system call
1112 */
1113 int
1114 ffs_unmount(struct mount *mp, int mntflags, struct proc *p)
1115 {
1116 struct ufsmount *ump;
1117 struct fs *fs;
1118 int error, flags, penderr;
1119
1120 penderr = 0;
1121 flags = 0;
1122 if (mntflags & MNT_FORCE)
1123 flags |= FORCECLOSE;
1124 if (mp->mnt_flag & MNT_SOFTDEP) {
1125 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
1126 return (error);
1127 } else {
1128 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
1129 return (error);
1130 }
1131 ump = VFSTOUFS(mp);
1132 fs = ump->um_fs;
1133 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
1134 printf("%s: unmount pending error: blocks %" PRId64
1135 " files %d\n",
1136 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
1137 fs->fs_pendingblocks = 0;
1138 fs->fs_pendinginodes = 0;
1139 penderr = 1;
1140 }
1141 if (fs->fs_ronly == 0 &&
1142 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
1143 fs->fs_clean & FS_WASCLEAN) {
1144 /*
1145 * XXXX don't mark fs clean in the case of softdep
1146 * pending block errors, until they are fixed.
1147 */
1148 if (penderr == 0) {
1149 if (mp->mnt_flag & MNT_SOFTDEP)
1150 fs->fs_flags &= ~FS_DOSOFTDEP;
1151 fs->fs_clean = FS_ISCLEAN;
1152 }
1153 fs->fs_fmod = 0;
1154 (void) ffs_sbupdate(ump, MNT_WAIT);
1155 }
1156 if (ump->um_devvp->v_type != VBAD)
1157 ump->um_devvp->v_specmountpoint = NULL;
1158 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1159 (void)VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
1160 NOCRED, p);
1161 vput(ump->um_devvp);
1162 free(fs->fs_csp, M_UFSMNT);
1163 free(fs, M_UFSMNT);
1164 if (ump->um_oldfscompat != NULL)
1165 free(ump->um_oldfscompat, M_UFSMNT);
1166 free(ump, M_UFSMNT);
1167 mp->mnt_data = NULL;
1168 mp->mnt_flag &= ~MNT_LOCAL;
1169 return (0);
1170 }
1171
1172 /*
1173 * Flush out all the files in a filesystem.
1174 */
1175 int
1176 ffs_flushfiles(struct mount *mp, int flags, struct proc *p)
1177 {
1178 extern int doforce;
1179 struct ufsmount *ump;
1180 int error;
1181
1182 if (!doforce)
1183 flags &= ~FORCECLOSE;
1184 ump = VFSTOUFS(mp);
1185 #ifdef QUOTA
1186 if (mp->mnt_flag & MNT_QUOTA) {
1187 int i;
1188 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
1189 return (error);
1190 for (i = 0; i < MAXQUOTAS; i++) {
1191 if (ump->um_quotas[i] == NULLVP)
1192 continue;
1193 quotaoff(p, mp, i);
1194 }
1195 /*
1196 * Here we fall through to vflush again to ensure
1197 * that we have gotten rid of all the system vnodes.
1198 */
1199 }
1200 #endif
1201 if ((error = vflush(mp, 0, SKIPSYSTEM | flags)) != 0)
1202 return (error);
1203 ffs_snapshot_unmount(mp);
1204 /*
1205 * Flush all the files.
1206 */
1207 error = vflush(mp, NULLVP, flags);
1208 if (error)
1209 return (error);
1210 /*
1211 * Flush filesystem metadata.
1212 */
1213 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1214 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
1215 VOP_UNLOCK(ump->um_devvp, 0);
1216 return (error);
1217 }
1218
1219 /*
1220 * Get file system statistics.
1221 */
1222 int
1223 ffs_statvfs(struct mount *mp, struct statvfs *sbp, struct proc *p)
1224 {
1225 struct ufsmount *ump;
1226 struct fs *fs;
1227
1228 ump = VFSTOUFS(mp);
1229 fs = ump->um_fs;
1230 sbp->f_bsize = fs->fs_bsize;
1231 sbp->f_frsize = fs->fs_fsize;
1232 sbp->f_iosize = fs->fs_bsize;
1233 sbp->f_blocks = fs->fs_dsize;
1234 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1235 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1236 sbp->f_bresvd = ((u_int64_t) fs->fs_dsize * (u_int64_t)
1237 fs->fs_minfree) / (u_int64_t) 100;
1238 if (sbp->f_bfree > sbp->f_bresvd)
1239 sbp->f_bavail = sbp->f_bfree - sbp->f_bresvd;
1240 else
1241 sbp->f_bavail = 0;
1242 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1243 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1244 sbp->f_favail = sbp->f_ffree;
1245 sbp->f_fresvd = 0;
1246 copy_statvfs_info(sbp, mp);
1247 return (0);
1248 }
1249
1250 /*
1251 * Go through the disk queues to initiate sandbagged IO;
1252 * go through the inodes to write those that have been modified;
1253 * initiate the writing of the super block if it has been modified.
1254 *
1255 * Note: we are always called with the filesystem marked `MPBUSY'.
1256 */
1257 int
1258 ffs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct proc *p)
1259 {
1260 struct vnode *vp, *nvp;
1261 struct inode *ip;
1262 struct ufsmount *ump = VFSTOUFS(mp);
1263 struct fs *fs;
1264 int error, count, allerror = 0;
1265
1266 fs = ump->um_fs;
1267 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1268 printf("fs = %s\n", fs->fs_fsmnt);
1269 panic("update: rofs mod");
1270 }
1271 /*
1272 * Write back each (modified) inode.
1273 */
1274 simple_lock(&mntvnode_slock);
1275 loop:
1276 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1277 /*
1278 * If the vnode that we are about to sync is no longer
1279 * associated with this mount point, start over.
1280 */
1281 if (vp->v_mount != mp)
1282 goto loop;
1283 simple_lock(&vp->v_interlock);
1284 nvp = LIST_NEXT(vp, v_mntvnodes);
1285 ip = VTOI(vp);
1286 if (vp->v_type == VNON ||
1287 ((ip->i_flag &
1288 (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 &&
1289 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1290 vp->v_uobj.uo_npages == 0))
1291 {
1292 simple_unlock(&vp->v_interlock);
1293 continue;
1294 }
1295 simple_unlock(&mntvnode_slock);
1296 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1297 if (error) {
1298 simple_lock(&mntvnode_slock);
1299 if (error == ENOENT)
1300 goto loop;
1301 continue;
1302 }
1303 if (vp->v_type == VREG && waitfor == MNT_LAZY)
1304 error = VOP_UPDATE(vp, NULL, NULL, 0);
1305 else
1306 error = VOP_FSYNC(vp, cred,
1307 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p);
1308 if (error)
1309 allerror = error;
1310 vput(vp);
1311 simple_lock(&mntvnode_slock);
1312 }
1313 simple_unlock(&mntvnode_slock);
1314 /*
1315 * Force stale file system control information to be flushed.
1316 */
1317 if (waitfor == MNT_WAIT && (ump->um_mountp->mnt_flag & MNT_SOFTDEP)) {
1318 if ((error = softdep_flushworklist(ump->um_mountp, &count, p)))
1319 allerror = error;
1320 /* Flushed work items may create new vnodes to clean */
1321 if (allerror == 0 && count) {
1322 simple_lock(&mntvnode_slock);
1323 goto loop;
1324 }
1325 }
1326 if (waitfor != MNT_LAZY && (ump->um_devvp->v_numoutput > 0 ||
1327 !LIST_EMPTY(&ump->um_devvp->v_dirtyblkhd))) {
1328 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1329 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1330 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1331 allerror = error;
1332 VOP_UNLOCK(ump->um_devvp, 0);
1333 if (allerror == 0 && waitfor == MNT_WAIT) {
1334 simple_lock(&mntvnode_slock);
1335 goto loop;
1336 }
1337 }
1338 #ifdef QUOTA
1339 qsync(mp);
1340 #endif
1341 /*
1342 * Write back modified superblock.
1343 */
1344 if (fs->fs_fmod != 0) {
1345 fs->fs_fmod = 0;
1346 fs->fs_time = time.tv_sec;
1347 if ((error = ffs_cgupdate(ump, waitfor)))
1348 allerror = error;
1349 }
1350 return (allerror);
1351 }
1352
1353 /*
1354 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1355 * in from disk. If it is in core, wait for the lock bit to clear, then
1356 * return the inode locked. Detection and handling of mount points must be
1357 * done by the calling routine.
1358 */
1359 int
1360 ffs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1361 {
1362 struct fs *fs;
1363 struct inode *ip;
1364 struct ufsmount *ump;
1365 struct buf *bp;
1366 struct vnode *vp;
1367 dev_t dev;
1368 int error;
1369
1370 ump = VFSTOUFS(mp);
1371 dev = ump->um_dev;
1372
1373 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1374 return (0);
1375
1376 /* Allocate a new vnode/inode. */
1377 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1378 *vpp = NULL;
1379 return (error);
1380 }
1381
1382 /*
1383 * If someone beat us to it while sleeping in getnewvnode(),
1384 * push back the freshly allocated vnode we don't need, and return.
1385 */
1386
1387 do {
1388 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1389 ungetnewvnode(vp);
1390 return (0);
1391 }
1392 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1393
1394 vp->v_flag |= VLOCKSWORK;
1395
1396 /*
1397 * XXX MFS ends up here, too, to allocate an inode. Should we
1398 * XXX create another pool for MFS inodes?
1399 */
1400
1401 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1402 memset(ip, 0, sizeof(struct inode));
1403 vp->v_data = ip;
1404 ip->i_vnode = vp;
1405 ip->i_ump = ump;
1406 ip->i_fs = fs = ump->um_fs;
1407 ip->i_dev = dev;
1408 ip->i_number = ino;
1409 LIST_INIT(&ip->i_pcbufhd);
1410 #ifdef QUOTA
1411 {
1412 int i;
1413
1414 for (i = 0; i < MAXQUOTAS; i++)
1415 ip->i_dquot[i] = NODQUOT;
1416 }
1417 #endif
1418
1419 /*
1420 * Put it onto its hash chain and lock it so that other requests for
1421 * this inode will block if they arrive while we are sleeping waiting
1422 * for old data structures to be purged or for the contents of the
1423 * disk portion of this inode to be read.
1424 */
1425
1426 ufs_ihashins(ip);
1427 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1428
1429 /* Read in the disk contents for the inode, copy into the inode. */
1430 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1431 (int)fs->fs_bsize, NOCRED, &bp);
1432 if (error) {
1433
1434 /*
1435 * The inode does not contain anything useful, so it would
1436 * be misleading to leave it on its hash chain. With mode
1437 * still zero, it will be unlinked and returned to the free
1438 * list by vput().
1439 */
1440
1441 vput(vp);
1442 brelse(bp);
1443 *vpp = NULL;
1444 return (error);
1445 }
1446 if (ip->i_ump->um_fstype == UFS1)
1447 ip->i_din.ffs1_din = pool_get(&ffs_dinode1_pool, PR_WAITOK);
1448 else
1449 ip->i_din.ffs2_din = pool_get(&ffs_dinode2_pool, PR_WAITOK);
1450 ffs_load_inode(bp, ip, fs, ino);
1451 if (DOINGSOFTDEP(vp))
1452 softdep_load_inodeblock(ip);
1453 else
1454 ip->i_ffs_effnlink = ip->i_nlink;
1455 brelse(bp);
1456
1457 /*
1458 * Initialize the vnode from the inode, check for aliases.
1459 * Note that the underlying vnode may have changed.
1460 */
1461
1462 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1463
1464 /*
1465 * Finish inode initialization now that aliasing has been resolved.
1466 */
1467
1468 genfs_node_init(vp, &ffs_genfsops);
1469 ip->i_devvp = ump->um_devvp;
1470 VREF(ip->i_devvp);
1471
1472 /*
1473 * Ensure that uid and gid are correct. This is a temporary
1474 * fix until fsck has been changed to do the update.
1475 */
1476
1477 if (fs->fs_old_inodefmt < FS_44INODEFMT) { /* XXX */
1478 ip->i_uid = ip->i_ffs1_ouid; /* XXX */
1479 ip->i_gid = ip->i_ffs1_ogid; /* XXX */
1480 } /* XXX */
1481 uvm_vnp_setsize(vp, ip->i_size);
1482 *vpp = vp;
1483 return (0);
1484 }
1485
1486 /*
1487 * File handle to vnode
1488 *
1489 * Have to be really careful about stale file handles:
1490 * - check that the inode number is valid
1491 * - call ffs_vget() to get the locked inode
1492 * - check for an unallocated inode (i_mode == 0)
1493 * - check that the given client host has export rights and return
1494 * those rights via. exflagsp and credanonp
1495 */
1496 int
1497 ffs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1498 {
1499 struct ufid *ufhp;
1500 struct fs *fs;
1501
1502 ufhp = (struct ufid *)fhp;
1503 fs = VFSTOUFS(mp)->um_fs;
1504 if (ufhp->ufid_ino < ROOTINO ||
1505 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1506 return (ESTALE);
1507 return (ufs_fhtovp(mp, ufhp, vpp));
1508 }
1509
1510 /*
1511 * Vnode pointer to File handle
1512 */
1513 /* ARGSUSED */
1514 int
1515 ffs_vptofh(struct vnode *vp, struct fid *fhp)
1516 {
1517 struct inode *ip;
1518 struct ufid *ufhp;
1519
1520 ip = VTOI(vp);
1521 ufhp = (struct ufid *)fhp;
1522 ufhp->ufid_len = sizeof(struct ufid);
1523 ufhp->ufid_ino = ip->i_number;
1524 ufhp->ufid_gen = ip->i_gen;
1525 return (0);
1526 }
1527
1528 void
1529 ffs_init(void)
1530 {
1531 if (ffs_initcount++ > 0)
1532 return;
1533
1534 #ifdef _LKM
1535 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0,
1536 "ffsinopl", &pool_allocator_nointr);
1537 pool_init(&ffs_dinode1_pool, sizeof(struct ufs1_dinode), 0, 0, 0,
1538 "dino1pl", &pool_allocator_nointr);
1539 pool_init(&ffs_dinode2_pool, sizeof(struct ufs2_dinode), 0, 0, 0,
1540 "dino2pl", &pool_allocator_nointr);
1541 #endif
1542 softdep_initialize();
1543 ufs_init();
1544 }
1545
1546 void
1547 ffs_reinit(void)
1548 {
1549 softdep_reinitialize();
1550 ufs_reinit();
1551 }
1552
1553 void
1554 ffs_done(void)
1555 {
1556 if (--ffs_initcount > 0)
1557 return;
1558
1559 /* XXX softdep cleanup ? */
1560 ufs_done();
1561 #ifdef _LKM
1562 pool_destroy(&ffs_dinode2_pool);
1563 pool_destroy(&ffs_dinode1_pool);
1564 pool_destroy(&ffs_inode_pool);
1565 #endif
1566 }
1567
1568 SYSCTL_SETUP(sysctl_vfs_ffs_setup, "sysctl vfs.ffs subtree setup")
1569 {
1570 extern int doasyncfree;
1571 extern int ffs_log_changeopt;
1572
1573 sysctl_createv(clog, 0, NULL, NULL,
1574 CTLFLAG_PERMANENT,
1575 CTLTYPE_NODE, "vfs", NULL,
1576 NULL, 0, NULL, 0,
1577 CTL_VFS, CTL_EOL);
1578 sysctl_createv(clog, 0, NULL, NULL,
1579 CTLFLAG_PERMANENT,
1580 CTLTYPE_NODE, "ffs",
1581 SYSCTL_DESCR("Berkeley Fast File System"),
1582 NULL, 0, NULL, 0,
1583 CTL_VFS, 1, CTL_EOL);
1584
1585 /*
1586 * @@@ should we even bother with these first three?
1587 */
1588 sysctl_createv(clog, 0, NULL, NULL,
1589 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1590 CTLTYPE_INT, "doclusterread", NULL,
1591 sysctl_notavail, 0, NULL, 0,
1592 CTL_VFS, 1, FFS_CLUSTERREAD, CTL_EOL);
1593 sysctl_createv(clog, 0, NULL, NULL,
1594 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1595 CTLTYPE_INT, "doclusterwrite", NULL,
1596 sysctl_notavail, 0, NULL, 0,
1597 CTL_VFS, 1, FFS_CLUSTERWRITE, CTL_EOL);
1598 sysctl_createv(clog, 0, NULL, NULL,
1599 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1600 CTLTYPE_INT, "doreallocblks", NULL,
1601 sysctl_notavail, 0, NULL, 0,
1602 CTL_VFS, 1, FFS_REALLOCBLKS, CTL_EOL);
1603 sysctl_createv(clog, 0, NULL, NULL,
1604 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1605 CTLTYPE_INT, "doasyncfree",
1606 SYSCTL_DESCR("Release dirty blocks asynchronously"),
1607 NULL, 0, &doasyncfree, 0,
1608 CTL_VFS, 1, FFS_ASYNCFREE, CTL_EOL);
1609 sysctl_createv(clog, 0, NULL, NULL,
1610 CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
1611 CTLTYPE_INT, "log_changeopt",
1612 SYSCTL_DESCR("Log changes in optimization strategy"),
1613 NULL, 0, &ffs_log_changeopt, 0,
1614 CTL_VFS, 1, FFS_LOG_CHANGEOPT, CTL_EOL);
1615 }
1616
1617 /*
1618 * Write a superblock and associated information back to disk.
1619 */
1620 int
1621 ffs_sbupdate(struct ufsmount *mp, int waitfor)
1622 {
1623 struct fs *fs = mp->um_fs;
1624 struct buf *bp;
1625 int error = 0;
1626 u_int32_t saveflag;
1627
1628 bp = getblk(mp->um_devvp,
1629 fs->fs_sblockloc >> (fs->fs_fshift - fs->fs_fsbtodb),
1630 (int)fs->fs_sbsize, 0, 0);
1631 saveflag = fs->fs_flags & FS_INTERNAL;
1632 fs->fs_flags &= ~FS_INTERNAL;
1633
1634 memcpy(bp->b_data, fs, fs->fs_sbsize);
1635
1636 ffs_oldfscompat_write((struct fs *)bp->b_data, mp);
1637 #ifdef FFS_EI
1638 if (mp->um_flags & UFS_NEEDSWAP)
1639 ffs_sb_swap((struct fs *)bp->b_data, (struct fs *)bp->b_data);
1640 #endif
1641 fs->fs_flags |= saveflag;
1642
1643 if (waitfor == MNT_WAIT)
1644 error = bwrite(bp);
1645 else
1646 bawrite(bp);
1647 return (error);
1648 }
1649
1650 int
1651 ffs_cgupdate(struct ufsmount *mp, int waitfor)
1652 {
1653 struct fs *fs = mp->um_fs;
1654 struct buf *bp;
1655 int blks;
1656 void *space;
1657 int i, size, error = 0, allerror = 0;
1658
1659 allerror = ffs_sbupdate(mp, waitfor);
1660 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1661 space = fs->fs_csp;
1662 for (i = 0; i < blks; i += fs->fs_frag) {
1663 size = fs->fs_bsize;
1664 if (i + fs->fs_frag > blks)
1665 size = (blks - i) * fs->fs_fsize;
1666 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1667 size, 0, 0);
1668 #ifdef FFS_EI
1669 if (mp->um_flags & UFS_NEEDSWAP)
1670 ffs_csum_swap((struct csum*)space,
1671 (struct csum*)bp->b_data, size);
1672 else
1673 #endif
1674 memcpy(bp->b_data, space, (u_int)size);
1675 space = (char *)space + size;
1676 if (waitfor == MNT_WAIT)
1677 error = bwrite(bp);
1678 else
1679 bawrite(bp);
1680 }
1681 if (!allerror && error)
1682 allerror = error;
1683 return (allerror);
1684 }
1685