ffs_vfsops.c revision 1.47 1 /* $NetBSD: ffs_vfsops.c,v 1.47 1999/02/10 13:14:09 bouyer Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL) && !defined(_LKM)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 extern struct lock ufs_hashlock;
78
79 int ffs_sbupdate __P((struct ufsmount *, int));
80
81 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
82 extern struct vnodeopv_desc ffs_specop_opv_desc;
83 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
84
85 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
86 &ffs_vnodeop_opv_desc,
87 &ffs_specop_opv_desc,
88 &ffs_fifoop_opv_desc,
89 NULL,
90 };
91
92 struct vfsops ffs_vfsops = {
93 MOUNT_FFS,
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 ffs_mountroot,
107 ffs_vnodeopv_descs,
108 };
109
110 struct pool ffs_inode_pool;
111
112 /*
113 * Called by main() when ffs is going to be mounted as root.
114 */
115
116 int
117 ffs_mountroot()
118 {
119 extern struct vnode *rootvp;
120 struct fs *fs;
121 struct mount *mp;
122 struct proc *p = curproc; /* XXX */
123 struct ufsmount *ump;
124 int error;
125
126 if (root_device->dv_class != DV_DISK)
127 return (ENODEV);
128
129 /*
130 * Get vnodes for rootdev.
131 */
132 if (bdevvp(rootdev, &rootvp))
133 panic("ffs_mountroot: can't setup bdevvp's");
134
135 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp)))
136 return (error);
137 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
138 mp->mnt_op->vfs_refcount--;
139 vfs_unbusy(mp);
140 free(mp, M_MOUNT);
141 return (error);
142 }
143 simple_lock(&mountlist_slock);
144 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
145 simple_unlock(&mountlist_slock);
146 ump = VFSTOUFS(mp);
147 fs = ump->um_fs;
148 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
149 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
150 (void)ffs_statfs(mp, &mp->mnt_stat, p);
151 vfs_unbusy(mp);
152 inittodr(fs->fs_time);
153 return (0);
154 }
155
156 /*
157 * VFS Operations.
158 *
159 * mount system call
160 */
161 int
162 ffs_mount(mp, path, data, ndp, p)
163 register struct mount *mp;
164 const char *path;
165 void *data;
166 struct nameidata *ndp;
167 struct proc *p;
168 {
169 struct vnode *devvp;
170 struct ufs_args args;
171 struct ufsmount *ump = NULL;
172 register struct fs *fs;
173 size_t size;
174 int error, flags;
175 mode_t accessmode;
176
177 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
178 if (error)
179 return (error);
180 /*
181 * If updating, check whether changing from read-only to
182 * read/write; if there is no device name, that's all we do.
183 */
184 if (mp->mnt_flag & MNT_UPDATE) {
185 ump = VFSTOUFS(mp);
186 fs = ump->um_fs;
187 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
188 flags = WRITECLOSE;
189 if (mp->mnt_flag & MNT_FORCE)
190 flags |= FORCECLOSE;
191 error = ffs_flushfiles(mp, flags, p);
192 if (error == 0 &&
193 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
194 fs->fs_clean & FS_WASCLEAN) {
195 fs->fs_clean = FS_ISCLEAN;
196 (void) ffs_sbupdate(ump, MNT_WAIT);
197 }
198 if (error)
199 return (error);
200 fs->fs_ronly = 1;
201 }
202 if (mp->mnt_flag & MNT_RELOAD) {
203 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
204 if (error)
205 return (error);
206 }
207 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
208 /*
209 * If upgrade to read-write by non-root, then verify
210 * that user has necessary permissions on the device.
211 */
212 if (p->p_ucred->cr_uid != 0) {
213 devvp = ump->um_devvp;
214 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
215 error = VOP_ACCESS(devvp, VREAD | VWRITE,
216 p->p_ucred, p);
217 VOP_UNLOCK(devvp, 0);
218 if (error)
219 return (error);
220 }
221 fs->fs_ronly = 0;
222 fs->fs_clean <<= 1;
223 fs->fs_fmod = 1;
224 }
225 if (args.fspec == 0) {
226 /*
227 * Process export requests.
228 */
229 return (vfs_export(mp, &ump->um_export, &args.export));
230 }
231 }
232 /*
233 * Not an update, or updating the name: look up the name
234 * and verify that it refers to a sensible block device.
235 */
236 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
237 if ((error = namei(ndp)) != 0)
238 return (error);
239 devvp = ndp->ni_vp;
240
241 if (devvp->v_type != VBLK) {
242 vrele(devvp);
243 return (ENOTBLK);
244 }
245 if (major(devvp->v_rdev) >= nblkdev) {
246 vrele(devvp);
247 return (ENXIO);
248 }
249 /*
250 * If mount by non-root, then verify that user has necessary
251 * permissions on the device.
252 */
253 if (p->p_ucred->cr_uid != 0) {
254 accessmode = VREAD;
255 if ((mp->mnt_flag & MNT_RDONLY) == 0)
256 accessmode |= VWRITE;
257 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
258 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
259 VOP_UNLOCK(devvp, 0);
260 if (error) {
261 vrele(devvp);
262 return (error);
263 }
264 }
265 if ((mp->mnt_flag & MNT_UPDATE) == 0)
266 error = ffs_mountfs(devvp, mp, p);
267 else {
268 if (devvp != ump->um_devvp)
269 error = EINVAL; /* needs translation */
270 else
271 vrele(devvp);
272 }
273 if (error) {
274 vrele(devvp);
275 return (error);
276 }
277 ump = VFSTOUFS(mp);
278 fs = ump->um_fs;
279 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
280 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
281 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
282 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
283 &size);
284 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
285 if (fs->fs_fmod != 0) { /* XXX */
286 fs->fs_fmod = 0;
287 if (fs->fs_clean & FS_WASCLEAN)
288 fs->fs_time = time.tv_sec;
289 else
290 printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
291 mp->mnt_stat.f_mntfromname, fs->fs_clean);
292 (void) ffs_cgupdate(ump, MNT_WAIT);
293 }
294 return (0);
295 }
296
297 /*
298 * Reload all incore data for a filesystem (used after running fsck on
299 * the root filesystem and finding things to fix). The filesystem must
300 * be mounted read-only.
301 *
302 * Things to do to update the mount:
303 * 1) invalidate all cached meta-data.
304 * 2) re-read superblock from disk.
305 * 3) re-read summary information from disk.
306 * 4) invalidate all inactive vnodes.
307 * 5) invalidate all cached file data.
308 * 6) re-read inode data for all active vnodes.
309 */
310 int
311 ffs_reload(mountp, cred, p)
312 register struct mount *mountp;
313 struct ucred *cred;
314 struct proc *p;
315 {
316 register struct vnode *vp, *nvp, *devvp;
317 struct inode *ip;
318 struct buf *bp;
319 struct fs *fs, *newfs;
320 struct partinfo dpart;
321 int i, blks, size, error;
322 int32_t *lp;
323 caddr_t cp;
324
325 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
326 return (EINVAL);
327 /*
328 * Step 1: invalidate all cached meta-data.
329 */
330 devvp = VFSTOUFS(mountp)->um_devvp;
331 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
332 panic("ffs_reload: dirty1");
333 /*
334 * Step 2: re-read superblock from disk.
335 */
336 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
337 size = DEV_BSIZE;
338 else
339 size = dpart.disklab->d_secsize;
340 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
341 if (error) {
342 brelse(bp);
343 return (error);
344 }
345 fs = VFSTOUFS(mountp)->um_fs;
346 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
347 memcpy(newfs, bp->b_data, fs->fs_sbsize);
348 #ifdef FFS_EI
349 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP)
350 ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
351 #endif
352 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
353 newfs->fs_bsize < sizeof(struct fs)) {
354 brelse(bp);
355 free(newfs, M_UFSMNT);
356 return (EIO); /* XXX needs translation */
357 }
358 /*
359 * Copy pointer fields back into superblock before copying in XXX
360 * new superblock. These should really be in the ufsmount. XXX
361 * Note that important parameters (eg fs_ncg) are unchanged.
362 */
363 memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
364 newfs->fs_maxcluster = fs->fs_maxcluster;
365 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
366 if (fs->fs_sbsize < SBSIZE)
367 bp->b_flags |= B_INVAL;
368 brelse(bp);
369 free(newfs, M_UFSMNT);
370 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
371 ffs_oldfscompat(fs);
372 /*
373 * Step 3: re-read summary information from disk.
374 */
375 blks = howmany(fs->fs_cssize, fs->fs_fsize);
376 for (i = 0; i < blks; i += fs->fs_frag) {
377 size = fs->fs_bsize;
378 if (i + fs->fs_frag > blks)
379 size = (blks - i) * fs->fs_fsize;
380 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
381 NOCRED, &bp);
382 if (error) {
383 brelse(bp);
384 return (error);
385 }
386 #ifdef FFS_EI
387 if (UFS_MPNEEDSWAP(mountp))
388 ffs_csum_swap((struct csum*)bp->b_data,
389 (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
390 else
391 #endif
392 memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
393 (size_t)size);
394 brelse(bp);
395 }
396 /*
397 * We no longer know anything about clusters per cylinder group.
398 */
399 if (fs->fs_contigsumsize > 0) {
400 lp = fs->fs_maxcluster;
401 for (i = 0; i < fs->fs_ncg; i++)
402 *lp++ = fs->fs_contigsumsize;
403 }
404
405 loop:
406 simple_lock(&mntvnode_slock);
407 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
408 if (vp->v_mount != mountp) {
409 simple_unlock(&mntvnode_slock);
410 goto loop;
411 }
412 nvp = vp->v_mntvnodes.le_next;
413 /*
414 * Step 4: invalidate all inactive vnodes.
415 */
416 if (vrecycle(vp, &mntvnode_slock, p))
417 goto loop;
418 /*
419 * Step 5: invalidate all cached file data.
420 */
421 simple_lock(&vp->v_interlock);
422 simple_unlock(&mntvnode_slock);
423 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
424 goto loop;
425 if (vinvalbuf(vp, 0, cred, p, 0, 0))
426 panic("ffs_reload: dirty2");
427 /*
428 * Step 6: re-read inode data for all active vnodes.
429 */
430 ip = VTOI(vp);
431 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
432 (int)fs->fs_bsize, NOCRED, &bp);
433 if (error) {
434 brelse(bp);
435 vput(vp);
436 return (error);
437 }
438 cp = (caddr_t)bp->b_data +
439 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
440 #ifdef FFS_EI
441 if (UFS_MPNEEDSWAP(mountp))
442 ffs_dinode_swap((struct dinode *)cp,
443 &ip->i_din.ffs_din);
444 else
445 #endif
446 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
447 brelse(bp);
448 vput(vp);
449 simple_lock(&mntvnode_slock);
450 }
451 simple_unlock(&mntvnode_slock);
452 return (0);
453 }
454
455 /*
456 * Common code for mount and mountroot
457 */
458 int
459 ffs_mountfs(devvp, mp, p)
460 register struct vnode *devvp;
461 struct mount *mp;
462 struct proc *p;
463 {
464 struct ufsmount *ump;
465 struct buf *bp;
466 struct fs *fs;
467 dev_t dev;
468 struct partinfo dpart;
469 caddr_t base, space;
470 int blks;
471 int error, i, size, ronly, needswap;
472 int32_t *lp;
473 struct ucred *cred;
474 extern struct vnode *rootvp;
475 u_int64_t maxfilesize; /* XXX */
476 u_int32_t sbsize;
477
478 dev = devvp->v_rdev;
479 cred = p ? p->p_ucred : NOCRED;
480 /*
481 * Disallow multiple mounts of the same device.
482 * Disallow mounting of a device that is currently in use
483 * (except for root, which might share swap device for miniroot).
484 * Flush out any old buffers remaining from a previous use.
485 */
486 if ((error = vfs_mountedon(devvp)) != 0)
487 return (error);
488 if (vcount(devvp) > 1 && devvp != rootvp)
489 return (EBUSY);
490 if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0)
491 return (error);
492
493 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
494 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
495 if (error)
496 return (error);
497 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
498 size = DEV_BSIZE;
499 else
500 size = dpart.disklab->d_secsize;
501
502 bp = NULL;
503 ump = NULL;
504 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
505 if (error)
506 goto out;
507
508 fs = (struct fs*)bp->b_data;
509 if (fs->fs_magic == FS_MAGIC) {
510 needswap = 0;
511 sbsize = fs->fs_sbsize;
512 #ifdef FFS_EI
513 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
514 needswap = 1;
515 sbsize = bswap32(fs->fs_sbsize);
516 #endif
517 } else {
518 error = EINVAL;
519 goto out;
520 }
521 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
522 error = EINVAL;
523 goto out;
524 }
525
526 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
527 memcpy(fs, bp->b_data, sbsize);
528 #ifdef FFS_EI
529 if (needswap)
530 ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
531 #endif
532
533 /* make sure cylinder group summary area is a reasonable size. */
534 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
535 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
536 fs->fs_cssize >
537 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
538 error = EINVAL; /* XXX needs translation */
539 goto out2;
540 }
541 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
542 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
543 error = EROFS; /* XXX what should be returned? */
544 goto out2;
545 }
546 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
547 memset((caddr_t)ump, 0, sizeof *ump);
548 ump->um_fs = fs;
549 if (fs->fs_sbsize < SBSIZE)
550 bp->b_flags |= B_INVAL;
551 brelse(bp);
552 bp = NULL;
553 fs->fs_ronly = ronly;
554 if (ronly == 0) {
555 fs->fs_clean <<= 1;
556 fs->fs_fmod = 1;
557 }
558 size = fs->fs_cssize;
559 blks = howmany(size, fs->fs_fsize);
560 if (fs->fs_contigsumsize > 0)
561 size += fs->fs_ncg * sizeof(int32_t);
562 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
563 for (i = 0; i < blks; i += fs->fs_frag) {
564 size = fs->fs_bsize;
565 if (i + fs->fs_frag > blks)
566 size = (blks - i) * fs->fs_fsize;
567 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
568 cred, &bp);
569 if (error) {
570 free(base, M_UFSMNT);
571 goto out2;
572 }
573 #ifdef FFS_EI
574 if (needswap)
575 ffs_csum_swap((struct csum*)bp->b_data,
576 (struct csum*)space, size);
577 else
578 #endif
579 memcpy(space, bp->b_data, (u_int)size);
580
581 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
582 space += size;
583 brelse(bp);
584 bp = NULL;
585 }
586 if (fs->fs_contigsumsize > 0) {
587 fs->fs_maxcluster = lp = (int32_t *)space;
588 for (i = 0; i < fs->fs_ncg; i++)
589 *lp++ = fs->fs_contigsumsize;
590 }
591 mp->mnt_data = (qaddr_t)ump;
592 mp->mnt_stat.f_fsid.val[0] = (long)dev;
593 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
594 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
595 mp->mnt_flag |= MNT_LOCAL;
596 #ifdef FFS_EI
597 if (needswap)
598 ump->um_flags |= UFS_NEEDSWAP;
599 #endif
600 ump->um_mountp = mp;
601 ump->um_dev = dev;
602 ump->um_devvp = devvp;
603 ump->um_nindir = fs->fs_nindir;
604 ump->um_bptrtodb = fs->fs_fsbtodb;
605 ump->um_seqinc = fs->fs_frag;
606 for (i = 0; i < MAXQUOTAS; i++)
607 ump->um_quotas[i] = NULLVP;
608 devvp->v_specflags |= SI_MOUNTEDON;
609 ffs_oldfscompat(fs);
610 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
611 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
612 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
613 fs->fs_maxfilesize = maxfilesize; /* XXX */
614 return (0);
615 out2:
616 free(fs, M_UFSMNT);
617 out:
618 if (bp)
619 brelse(bp);
620 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
621 if (ump) {
622 free(ump, M_UFSMNT);
623 mp->mnt_data = (qaddr_t)0;
624 }
625 return (error);
626 }
627
628 /*
629 * Sanity checks for old file systems.
630 *
631 * XXX - goes away some day.
632 */
633 int
634 ffs_oldfscompat(fs)
635 struct fs *fs;
636 {
637 int i;
638
639 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
640 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
641 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
642 fs->fs_nrpos = 8; /* XXX */
643 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
644 u_int64_t sizepb = fs->fs_bsize; /* XXX */
645 /* XXX */
646 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
647 for (i = 0; i < NIADDR; i++) { /* XXX */
648 sizepb *= NINDIR(fs); /* XXX */
649 fs->fs_maxfilesize += sizepb; /* XXX */
650 } /* XXX */
651 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
652 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
653 } /* XXX */
654 return (0);
655 }
656
657 /*
658 * unmount system call
659 */
660 int
661 ffs_unmount(mp, mntflags, p)
662 struct mount *mp;
663 int mntflags;
664 struct proc *p;
665 {
666 register struct ufsmount *ump;
667 register struct fs *fs;
668 int error, flags;
669
670 flags = 0;
671 if (mntflags & MNT_FORCE)
672 flags |= FORCECLOSE;
673 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
674 return (error);
675 ump = VFSTOUFS(mp);
676 fs = ump->um_fs;
677 if (fs->fs_ronly == 0 &&
678 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
679 fs->fs_clean & FS_WASCLEAN) {
680 fs->fs_clean = FS_ISCLEAN;
681 (void) ffs_sbupdate(ump, MNT_WAIT);
682 }
683 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
684 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
685 NOCRED, p);
686 vrele(ump->um_devvp);
687 free(fs->fs_csp[0], M_UFSMNT);
688 free(fs, M_UFSMNT);
689 free(ump, M_UFSMNT);
690 mp->mnt_data = (qaddr_t)0;
691 mp->mnt_flag &= ~MNT_LOCAL;
692 return (error);
693 }
694
695 /*
696 * Flush out all the files in a filesystem.
697 */
698 int
699 ffs_flushfiles(mp, flags, p)
700 register struct mount *mp;
701 int flags;
702 struct proc *p;
703 {
704 extern int doforce;
705 register struct ufsmount *ump;
706 int error;
707
708 if (!doforce)
709 flags &= ~FORCECLOSE;
710 ump = VFSTOUFS(mp);
711 #ifdef QUOTA
712 if (mp->mnt_flag & MNT_QUOTA) {
713 int i;
714 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
715 return (error);
716 for (i = 0; i < MAXQUOTAS; i++) {
717 if (ump->um_quotas[i] == NULLVP)
718 continue;
719 quotaoff(p, mp, i);
720 }
721 /*
722 * Here we fall through to vflush again to ensure
723 * that we have gotten rid of all the system vnodes.
724 */
725 }
726 #endif
727 error = vflush(mp, NULLVP, flags);
728 return (error);
729 }
730
731 /*
732 * Get file system statistics.
733 */
734 int
735 ffs_statfs(mp, sbp, p)
736 struct mount *mp;
737 register struct statfs *sbp;
738 struct proc *p;
739 {
740 register struct ufsmount *ump;
741 register struct fs *fs;
742
743 ump = VFSTOUFS(mp);
744 fs = ump->um_fs;
745 if (fs->fs_magic != FS_MAGIC)
746 panic("ffs_statfs");
747 #ifdef COMPAT_09
748 sbp->f_type = 1;
749 #else
750 sbp->f_type = 0;
751 #endif
752 sbp->f_bsize = fs->fs_fsize;
753 sbp->f_iosize = fs->fs_bsize;
754 sbp->f_blocks = fs->fs_dsize;
755 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
756 fs->fs_cstotal.cs_nffree;
757 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
758 (100 - fs->fs_minfree) / (u_int64_t) 100) -
759 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
760 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
761 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
762 if (sbp != &mp->mnt_stat) {
763 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
764 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
765 }
766 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
767 return (0);
768 }
769
770 /*
771 * Go through the disk queues to initiate sandbagged IO;
772 * go through the inodes to write those that have been modified;
773 * initiate the writing of the super block if it has been modified.
774 *
775 * Note: we are always called with the filesystem marked `MPBUSY'.
776 */
777 int
778 ffs_sync(mp, waitfor, cred, p)
779 struct mount *mp;
780 int waitfor;
781 struct ucred *cred;
782 struct proc *p;
783 {
784 struct vnode *vp, *nvp;
785 struct inode *ip;
786 struct ufsmount *ump = VFSTOUFS(mp);
787 struct fs *fs;
788 int error, allerror = 0;
789
790 fs = ump->um_fs;
791 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
792 printf("fs = %s\n", fs->fs_fsmnt);
793 panic("update: rofs mod");
794 }
795 /*
796 * Write back each (modified) inode.
797 */
798 simple_lock(&mntvnode_slock);
799 loop:
800 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
801 /*
802 * If the vnode that we are about to sync is no longer
803 * associated with this mount point, start over.
804 */
805 if (vp->v_mount != mp)
806 goto loop;
807 simple_lock(&vp->v_interlock);
808 nvp = vp->v_mntvnodes.le_next;
809 ip = VTOI(vp);
810 if ((ip->i_flag &
811 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
812 vp->v_dirtyblkhd.lh_first == NULL) {
813 simple_unlock(&vp->v_interlock);
814 continue;
815 }
816 simple_unlock(&mntvnode_slock);
817 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
818 if (error) {
819 simple_lock(&mntvnode_slock);
820 if (error == ENOENT)
821 goto loop;
822 continue;
823 }
824 if ((error = VOP_FSYNC(vp, cred,
825 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
826 allerror = error;
827 vput(vp);
828 simple_lock(&mntvnode_slock);
829 }
830 simple_unlock(&mntvnode_slock);
831 /*
832 * Force stale file system control information to be flushed.
833 */
834 if ((error = VOP_FSYNC(ump->um_devvp, cred,
835 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
836 allerror = error;
837 #ifdef QUOTA
838 qsync(mp);
839 #endif
840 /*
841 * Write back modified superblock.
842 */
843 if (fs->fs_fmod != 0) {
844 fs->fs_fmod = 0;
845 fs->fs_time = time.tv_sec;
846 allerror = ffs_cgupdate(ump, waitfor);
847 }
848 return (allerror);
849 }
850
851 /*
852 * Look up a FFS dinode number to find its incore vnode, otherwise read it
853 * in from disk. If it is in core, wait for the lock bit to clear, then
854 * return the inode locked. Detection and handling of mount points must be
855 * done by the calling routine.
856 */
857 int
858 ffs_vget(mp, ino, vpp)
859 struct mount *mp;
860 ino_t ino;
861 struct vnode **vpp;
862 {
863 struct fs *fs;
864 struct inode *ip;
865 struct ufsmount *ump;
866 struct buf *bp;
867 struct vnode *vp;
868 dev_t dev;
869 int error;
870 caddr_t cp;
871
872 ump = VFSTOUFS(mp);
873 dev = ump->um_dev;
874 do {
875 if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
876 return (0);
877 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
878
879 /* Allocate a new vnode/inode. */
880 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
881 *vpp = NULL;
882 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
883 return (error);
884 }
885 /*
886 * XXX MFS ends up here, too, to allocate an inode. Should we
887 * XXX create another pool for MFS inodes?
888 */
889 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
890 memset((caddr_t)ip, 0, sizeof(struct inode));
891 lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
892 vp->v_data = ip;
893 ip->i_vnode = vp;
894 ip->i_fs = fs = ump->um_fs;
895 ip->i_dev = dev;
896 ip->i_number = ino;
897 #ifdef QUOTA
898 {
899 int i;
900
901 for (i = 0; i < MAXQUOTAS; i++)
902 ip->i_dquot[i] = NODQUOT;
903 }
904 #endif
905 /*
906 * Put it onto its hash chain and lock it so that other requests for
907 * this inode will block if they arrive while we are sleeping waiting
908 * for old data structures to be purged or for the contents of the
909 * disk portion of this inode to be read.
910 */
911 ufs_ihashins(ip);
912 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
913
914 /* Read in the disk contents for the inode, copy into the inode. */
915 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
916 (int)fs->fs_bsize, NOCRED, &bp);
917 if (error) {
918 /*
919 * The inode does not contain anything useful, so it would
920 * be misleading to leave it on its hash chain. With mode
921 * still zero, it will be unlinked and returned to the free
922 * list by vput().
923 */
924 vput(vp);
925 brelse(bp);
926 *vpp = NULL;
927 return (error);
928 }
929 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
930 #ifdef FFS_EI
931 if (UFS_MPNEEDSWAP(mp))
932 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
933 else
934 #endif
935 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
936 brelse(bp);
937
938 /*
939 * Initialize the vnode from the inode, check for aliases.
940 * Note that the underlying vnode may have changed.
941 */
942 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
943 if (error) {
944 vput(vp);
945 *vpp = NULL;
946 return (error);
947 }
948 /*
949 * Finish inode initialization now that aliasing has been resolved.
950 */
951 ip->i_devvp = ump->um_devvp;
952 VREF(ip->i_devvp);
953 /*
954 * Ensure that uid and gid are correct. This is a temporary
955 * fix until fsck has been changed to do the update.
956 */
957 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
958 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
959 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
960 } /* XXX */
961
962 *vpp = vp;
963 return (0);
964 }
965
966 /*
967 * File handle to vnode
968 *
969 * Have to be really careful about stale file handles:
970 * - check that the inode number is valid
971 * - call ffs_vget() to get the locked inode
972 * - check for an unallocated inode (i_mode == 0)
973 * - check that the given client host has export rights and return
974 * those rights via. exflagsp and credanonp
975 */
976 int
977 ffs_fhtovp(mp, fhp, nam, vpp, exflagsp, credanonp)
978 register struct mount *mp;
979 struct fid *fhp;
980 struct mbuf *nam;
981 struct vnode **vpp;
982 int *exflagsp;
983 struct ucred **credanonp;
984 {
985 register struct ufid *ufhp;
986 struct fs *fs;
987
988 ufhp = (struct ufid *)fhp;
989 fs = VFSTOUFS(mp)->um_fs;
990 if (ufhp->ufid_ino < ROOTINO ||
991 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
992 return (ESTALE);
993 return (ufs_check_export(mp, ufhp, nam, vpp, exflagsp, credanonp));
994 }
995
996 /*
997 * Vnode pointer to File handle
998 */
999 /* ARGSUSED */
1000 int
1001 ffs_vptofh(vp, fhp)
1002 struct vnode *vp;
1003 struct fid *fhp;
1004 {
1005 register struct inode *ip;
1006 register struct ufid *ufhp;
1007
1008 ip = VTOI(vp);
1009 ufhp = (struct ufid *)fhp;
1010 ufhp->ufid_len = sizeof(struct ufid);
1011 ufhp->ufid_ino = ip->i_number;
1012 ufhp->ufid_gen = ip->i_ffs_gen;
1013 return (0);
1014 }
1015
1016 void
1017 ffs_init()
1018 {
1019 ufs_init();
1020
1021 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1022 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1023 }
1024
1025 int
1026 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1027 int *name;
1028 u_int namelen;
1029 void *oldp;
1030 size_t *oldlenp;
1031 void *newp;
1032 size_t newlen;
1033 struct proc *p;
1034 {
1035 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1036
1037 /* all sysctl names at this level are terminal */
1038 if (namelen != 1)
1039 return (ENOTDIR); /* overloaded */
1040
1041 switch (name[0]) {
1042 case FFS_CLUSTERREAD:
1043 return (sysctl_int(oldp, oldlenp, newp, newlen,
1044 &doclusterread));
1045 case FFS_CLUSTERWRITE:
1046 return (sysctl_int(oldp, oldlenp, newp, newlen,
1047 &doclusterwrite));
1048 case FFS_REALLOCBLKS:
1049 return (sysctl_int(oldp, oldlenp, newp, newlen,
1050 &doreallocblks));
1051 case FFS_ASYNCFREE:
1052 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1053 default:
1054 return (EOPNOTSUPP);
1055 }
1056 /* NOTREACHED */
1057 }
1058
1059 /*
1060 * Write a superblock and associated information back to disk.
1061 */
1062 int
1063 ffs_sbupdate(mp, waitfor)
1064 struct ufsmount *mp;
1065 int waitfor;
1066 {
1067 register struct fs *fs = mp->um_fs;
1068 register struct buf *bp;
1069 int i, error = 0;
1070 int32_t saved_nrpos = fs->fs_nrpos;
1071 int64_t saved_qbmask = fs->fs_qbmask;
1072 int64_t saved_qfmask = fs->fs_qfmask;
1073 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1074
1075 /* Restore compatibility to old file systems. XXX */
1076 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1077 fs->fs_nrpos = -1; /* XXX */
1078 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1079 int32_t *lp, tmp; /* XXX */
1080 /* XXX */
1081 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1082 tmp = lp[4]; /* XXX */
1083 for (i = 4; i > 0; i--) /* XXX */
1084 lp[i] = lp[i-1]; /* XXX */
1085 lp[0] = tmp; /* XXX */
1086 } /* XXX */
1087 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1088
1089 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1090 (int)fs->fs_sbsize, 0, 0);
1091 memcpy(bp->b_data, fs, fs->fs_sbsize);
1092 #ifdef FFS_EI
1093 if (mp->um_flags & UFS_NEEDSWAP)
1094 ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
1095 #endif
1096
1097 fs->fs_nrpos = saved_nrpos; /* XXX */
1098 fs->fs_qbmask = saved_qbmask; /* XXX */
1099 fs->fs_qfmask = saved_qfmask; /* XXX */
1100 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1101
1102 if (waitfor == MNT_WAIT)
1103 error = bwrite(bp);
1104 else
1105 bawrite(bp);
1106 return (error);
1107 }
1108
1109 int
1110 ffs_cgupdate(mp, waitfor)
1111 struct ufsmount *mp;
1112 int waitfor;
1113 {
1114 register struct fs *fs = mp->um_fs;
1115 register struct buf *bp;
1116 int blks;
1117 caddr_t space;
1118 int i, size, error = 0, allerror = 0;
1119
1120 allerror = ffs_sbupdate(mp, waitfor);
1121 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1122 space = (caddr_t)fs->fs_csp[0];
1123 for (i = 0; i < blks; i += fs->fs_frag) {
1124 size = fs->fs_bsize;
1125 if (i + fs->fs_frag > blks)
1126 size = (blks - i) * fs->fs_fsize;
1127 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1128 size, 0, 0);
1129 #ifdef FFS_EI
1130 if (mp->um_flags & UFS_NEEDSWAP)
1131 ffs_csum_swap((struct csum*)space,
1132 (struct csum*)bp->b_data, size);
1133 else
1134 #endif
1135 memcpy(bp->b_data, space, (u_int)size);
1136 space += size;
1137 if (waitfor == MNT_WAIT)
1138 error = bwrite(bp);
1139 else
1140 bawrite(bp);
1141 }
1142 if (!allerror && error)
1143 allerror = error;
1144 return (allerror);
1145 }
1146