ffs_vfsops.c revision 1.49.2.2 1 /* $NetBSD: ffs_vfsops.c,v 1.49.2.2 1999/12/20 13:16:30 he Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL) && !defined(_LKM)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 extern struct lock ufs_hashlock;
78
79 int ffs_sbupdate __P((struct ufsmount *, int));
80
81 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
82 extern struct vnodeopv_desc ffs_specop_opv_desc;
83 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
84
85 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
86 &ffs_vnodeop_opv_desc,
87 &ffs_specop_opv_desc,
88 &ffs_fifoop_opv_desc,
89 NULL,
90 };
91
92 struct vfsops ffs_vfsops = {
93 MOUNT_FFS,
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 ffs_mountroot,
107 ufs_check_export,
108 ffs_vnodeopv_descs,
109 };
110
111 struct pool ffs_inode_pool;
112
113 /*
114 * Called by main() when ffs is going to be mounted as root.
115 */
116
117 int
118 ffs_mountroot()
119 {
120 extern struct vnode *rootvp;
121 struct fs *fs;
122 struct mount *mp;
123 struct proc *p = curproc; /* XXX */
124 struct ufsmount *ump;
125 int error;
126
127 if (root_device->dv_class != DV_DISK)
128 return (ENODEV);
129
130 /*
131 * Get vnodes for rootdev.
132 */
133 if (bdevvp(rootdev, &rootvp))
134 panic("ffs_mountroot: can't setup bdevvp's");
135
136 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp)))
137 return (error);
138 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
139 mp->mnt_op->vfs_refcount--;
140 vfs_unbusy(mp);
141 free(mp, M_MOUNT);
142 return (error);
143 }
144 simple_lock(&mountlist_slock);
145 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
146 simple_unlock(&mountlist_slock);
147 ump = VFSTOUFS(mp);
148 fs = ump->um_fs;
149 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
150 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
151 (void)ffs_statfs(mp, &mp->mnt_stat, p);
152 vfs_unbusy(mp);
153 inittodr(fs->fs_time);
154 return (0);
155 }
156
157 /*
158 * VFS Operations.
159 *
160 * mount system call
161 */
162 int
163 ffs_mount(mp, path, data, ndp, p)
164 register struct mount *mp;
165 const char *path;
166 void *data;
167 struct nameidata *ndp;
168 struct proc *p;
169 {
170 struct vnode *devvp;
171 struct ufs_args args;
172 struct ufsmount *ump = NULL;
173 register struct fs *fs;
174 size_t size;
175 int error, flags;
176 mode_t accessmode;
177
178 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
179 if (error)
180 return (error);
181 /*
182 * If updating, check whether changing from read-only to
183 * read/write; if there is no device name, that's all we do.
184 */
185 if (mp->mnt_flag & MNT_UPDATE) {
186 ump = VFSTOUFS(mp);
187 fs = ump->um_fs;
188 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
189 flags = WRITECLOSE;
190 if (mp->mnt_flag & MNT_FORCE)
191 flags |= FORCECLOSE;
192 error = ffs_flushfiles(mp, flags, p);
193 if (error == 0 &&
194 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
195 fs->fs_clean & FS_WASCLEAN) {
196 fs->fs_clean = FS_ISCLEAN;
197 (void) ffs_sbupdate(ump, MNT_WAIT);
198 }
199 if (error)
200 return (error);
201 fs->fs_ronly = 1;
202 }
203 if (mp->mnt_flag & MNT_RELOAD) {
204 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
205 if (error)
206 return (error);
207 }
208 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
209 /*
210 * If upgrade to read-write by non-root, then verify
211 * that user has necessary permissions on the device.
212 */
213 if (p->p_ucred->cr_uid != 0) {
214 devvp = ump->um_devvp;
215 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
216 error = VOP_ACCESS(devvp, VREAD | VWRITE,
217 p->p_ucred, p);
218 VOP_UNLOCK(devvp, 0);
219 if (error)
220 return (error);
221 }
222 fs->fs_ronly = 0;
223 fs->fs_clean <<= 1;
224 fs->fs_fmod = 1;
225 }
226 if (args.fspec == 0) {
227 /*
228 * Process export requests.
229 */
230 return (vfs_export(mp, &ump->um_export, &args.export));
231 }
232 }
233 /*
234 * Not an update, or updating the name: look up the name
235 * and verify that it refers to a sensible block device.
236 */
237 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
238 if ((error = namei(ndp)) != 0)
239 return (error);
240 devvp = ndp->ni_vp;
241
242 if (devvp->v_type != VBLK) {
243 vrele(devvp);
244 return (ENOTBLK);
245 }
246 if (major(devvp->v_rdev) >= nblkdev) {
247 vrele(devvp);
248 return (ENXIO);
249 }
250 /*
251 * If mount by non-root, then verify that user has necessary
252 * permissions on the device.
253 */
254 if (p->p_ucred->cr_uid != 0) {
255 accessmode = VREAD;
256 if ((mp->mnt_flag & MNT_RDONLY) == 0)
257 accessmode |= VWRITE;
258 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
259 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
260 VOP_UNLOCK(devvp, 0);
261 if (error) {
262 vrele(devvp);
263 return (error);
264 }
265 }
266 if ((mp->mnt_flag & MNT_UPDATE) == 0)
267 error = ffs_mountfs(devvp, mp, p);
268 else {
269 if (devvp != ump->um_devvp)
270 error = EINVAL; /* needs translation */
271 else
272 vrele(devvp);
273 }
274 if (error) {
275 vrele(devvp);
276 return (error);
277 }
278 ump = VFSTOUFS(mp);
279 fs = ump->um_fs;
280 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
281 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
282 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
283 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
284 &size);
285 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
286 if (fs->fs_fmod != 0) { /* XXX */
287 fs->fs_fmod = 0;
288 if (fs->fs_clean & FS_WASCLEAN)
289 fs->fs_time = time.tv_sec;
290 else
291 printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
292 mp->mnt_stat.f_mntfromname, fs->fs_clean);
293 (void) ffs_cgupdate(ump, MNT_WAIT);
294 }
295 return (0);
296 }
297
298 /*
299 * Reload all incore data for a filesystem (used after running fsck on
300 * the root filesystem and finding things to fix). The filesystem must
301 * be mounted read-only.
302 *
303 * Things to do to update the mount:
304 * 1) invalidate all cached meta-data.
305 * 2) re-read superblock from disk.
306 * 3) re-read summary information from disk.
307 * 4) invalidate all inactive vnodes.
308 * 5) invalidate all cached file data.
309 * 6) re-read inode data for all active vnodes.
310 */
311 int
312 ffs_reload(mountp, cred, p)
313 register struct mount *mountp;
314 struct ucred *cred;
315 struct proc *p;
316 {
317 register struct vnode *vp, *nvp, *devvp;
318 struct inode *ip;
319 struct buf *bp;
320 struct fs *fs, *newfs;
321 struct partinfo dpart;
322 int i, blks, size, error;
323 int32_t *lp;
324 caddr_t cp;
325
326 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
327 return (EINVAL);
328 /*
329 * Step 1: invalidate all cached meta-data.
330 */
331 devvp = VFSTOUFS(mountp)->um_devvp;
332 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
333 panic("ffs_reload: dirty1");
334 /*
335 * Step 2: re-read superblock from disk.
336 */
337 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
338 size = DEV_BSIZE;
339 else
340 size = dpart.disklab->d_secsize;
341 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
342 if (error) {
343 brelse(bp);
344 return (error);
345 }
346 fs = VFSTOUFS(mountp)->um_fs;
347 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
348 memcpy(newfs, bp->b_data, fs->fs_sbsize);
349 #ifdef FFS_EI
350 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP)
351 ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
352 #endif
353 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
354 newfs->fs_bsize < sizeof(struct fs)) {
355 brelse(bp);
356 free(newfs, M_UFSMNT);
357 return (EIO); /* XXX needs translation */
358 }
359 /*
360 * Copy pointer fields back into superblock before copying in XXX
361 * new superblock. These should really be in the ufsmount. XXX
362 * Note that important parameters (eg fs_ncg) are unchanged.
363 */
364 memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
365 newfs->fs_maxcluster = fs->fs_maxcluster;
366 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
367 if (fs->fs_sbsize < SBSIZE)
368 bp->b_flags |= B_INVAL;
369 brelse(bp);
370 free(newfs, M_UFSMNT);
371 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
372 ffs_oldfscompat(fs);
373 /*
374 * Step 3: re-read summary information from disk.
375 */
376 blks = howmany(fs->fs_cssize, fs->fs_fsize);
377 for (i = 0; i < blks; i += fs->fs_frag) {
378 size = fs->fs_bsize;
379 if (i + fs->fs_frag > blks)
380 size = (blks - i) * fs->fs_fsize;
381 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
382 NOCRED, &bp);
383 if (error) {
384 brelse(bp);
385 return (error);
386 }
387 #ifdef FFS_EI
388 if (UFS_MPNEEDSWAP(mountp))
389 ffs_csum_swap((struct csum*)bp->b_data,
390 (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
391 else
392 #endif
393 memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
394 (size_t)size);
395 brelse(bp);
396 }
397 /*
398 * We no longer know anything about clusters per cylinder group.
399 */
400 if (fs->fs_contigsumsize > 0) {
401 lp = fs->fs_maxcluster;
402 for (i = 0; i < fs->fs_ncg; i++)
403 *lp++ = fs->fs_contigsumsize;
404 }
405
406 loop:
407 simple_lock(&mntvnode_slock);
408 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
409 if (vp->v_mount != mountp) {
410 simple_unlock(&mntvnode_slock);
411 goto loop;
412 }
413 nvp = vp->v_mntvnodes.le_next;
414 /*
415 * Step 4: invalidate all inactive vnodes.
416 */
417 if (vrecycle(vp, &mntvnode_slock, p))
418 goto loop;
419 /*
420 * Step 5: invalidate all cached file data.
421 */
422 simple_lock(&vp->v_interlock);
423 simple_unlock(&mntvnode_slock);
424 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
425 goto loop;
426 if (vinvalbuf(vp, 0, cred, p, 0, 0))
427 panic("ffs_reload: dirty2");
428 /*
429 * Step 6: re-read inode data for all active vnodes.
430 */
431 ip = VTOI(vp);
432 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
433 (int)fs->fs_bsize, NOCRED, &bp);
434 if (error) {
435 brelse(bp);
436 vput(vp);
437 return (error);
438 }
439 cp = (caddr_t)bp->b_data +
440 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
441 #ifdef FFS_EI
442 if (UFS_MPNEEDSWAP(mountp))
443 ffs_dinode_swap((struct dinode *)cp,
444 &ip->i_din.ffs_din);
445 else
446 #endif
447 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
448 brelse(bp);
449 vput(vp);
450 simple_lock(&mntvnode_slock);
451 }
452 simple_unlock(&mntvnode_slock);
453 return (0);
454 }
455
456 /*
457 * Common code for mount and mountroot
458 */
459 int
460 ffs_mountfs(devvp, mp, p)
461 register struct vnode *devvp;
462 struct mount *mp;
463 struct proc *p;
464 {
465 struct ufsmount *ump;
466 struct buf *bp;
467 struct fs *fs;
468 dev_t dev;
469 struct partinfo dpart;
470 caddr_t base, space;
471 int blks;
472 int error, i, size, ronly, needswap;
473 int32_t *lp;
474 struct ucred *cred;
475 extern struct vnode *rootvp;
476 u_int64_t maxfilesize; /* XXX */
477 u_int32_t sbsize;
478
479 dev = devvp->v_rdev;
480 cred = p ? p->p_ucred : NOCRED;
481 /*
482 * Disallow multiple mounts of the same device.
483 * Disallow mounting of a device that is currently in use
484 * (except for root, which might share swap device for miniroot).
485 * Flush out any old buffers remaining from a previous use.
486 */
487 if ((error = vfs_mountedon(devvp)) != 0)
488 return (error);
489 if (vcount(devvp) > 1 && devvp != rootvp)
490 return (EBUSY);
491 if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0)
492 return (error);
493
494 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
495 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
496 if (error)
497 return (error);
498 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
499 size = DEV_BSIZE;
500 else
501 size = dpart.disklab->d_secsize;
502
503 bp = NULL;
504 ump = NULL;
505 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
506 if (error)
507 goto out;
508
509 fs = (struct fs*)bp->b_data;
510 if (fs->fs_magic == FS_MAGIC) {
511 needswap = 0;
512 sbsize = fs->fs_sbsize;
513 #ifdef FFS_EI
514 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
515 needswap = 1;
516 sbsize = bswap32(fs->fs_sbsize);
517 #endif
518 } else {
519 error = EINVAL;
520 goto out;
521 }
522 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
523 error = EINVAL;
524 goto out;
525 }
526
527 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
528 memcpy(fs, bp->b_data, sbsize);
529 #ifdef FFS_EI
530 if (needswap)
531 ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
532 #endif
533 ffs_oldfscompat(fs);
534
535 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
536 error = EINVAL;
537 goto out;
538 }
539 /* make sure cylinder group summary area is a reasonable size. */
540 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
541 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
542 fs->fs_cssize >
543 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
544 error = EINVAL; /* XXX needs translation */
545 goto out2;
546 }
547 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
548 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
549 error = EROFS; /* XXX what should be returned? */
550 goto out2;
551 }
552 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
553 memset((caddr_t)ump, 0, sizeof *ump);
554 ump->um_fs = fs;
555 if (fs->fs_sbsize < SBSIZE)
556 bp->b_flags |= B_INVAL;
557 brelse(bp);
558 bp = NULL;
559 fs->fs_ronly = ronly;
560 if (ronly == 0) {
561 fs->fs_clean <<= 1;
562 fs->fs_fmod = 1;
563 }
564 size = fs->fs_cssize;
565 blks = howmany(size, fs->fs_fsize);
566 if (fs->fs_contigsumsize > 0)
567 size += fs->fs_ncg * sizeof(int32_t);
568 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
569 for (i = 0; i < blks; i += fs->fs_frag) {
570 size = fs->fs_bsize;
571 if (i + fs->fs_frag > blks)
572 size = (blks - i) * fs->fs_fsize;
573 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
574 cred, &bp);
575 if (error) {
576 free(base, M_UFSMNT);
577 goto out2;
578 }
579 #ifdef FFS_EI
580 if (needswap)
581 ffs_csum_swap((struct csum*)bp->b_data,
582 (struct csum*)space, size);
583 else
584 #endif
585 memcpy(space, bp->b_data, (u_int)size);
586
587 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
588 space += size;
589 brelse(bp);
590 bp = NULL;
591 }
592 if (fs->fs_contigsumsize > 0) {
593 fs->fs_maxcluster = lp = (int32_t *)space;
594 for (i = 0; i < fs->fs_ncg; i++)
595 *lp++ = fs->fs_contigsumsize;
596 }
597 mp->mnt_data = (qaddr_t)ump;
598 mp->mnt_stat.f_fsid.val[0] = (long)dev;
599 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
600 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
601 mp->mnt_flag |= MNT_LOCAL;
602 #ifdef FFS_EI
603 if (needswap)
604 ump->um_flags |= UFS_NEEDSWAP;
605 #endif
606 ump->um_mountp = mp;
607 ump->um_dev = dev;
608 ump->um_devvp = devvp;
609 ump->um_nindir = fs->fs_nindir;
610 ump->um_bptrtodb = fs->fs_fsbtodb;
611 ump->um_seqinc = fs->fs_frag;
612 for (i = 0; i < MAXQUOTAS; i++)
613 ump->um_quotas[i] = NULLVP;
614 devvp->v_specflags |= SI_MOUNTEDON;
615 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
616 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
617 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
618 fs->fs_maxfilesize = maxfilesize; /* XXX */
619 return (0);
620 out2:
621 free(fs, M_UFSMNT);
622 out:
623 if (bp)
624 brelse(bp);
625 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
626 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
627 VOP_UNLOCK(devvp, 0);
628 if (ump) {
629 free(ump, M_UFSMNT);
630 mp->mnt_data = (qaddr_t)0;
631 }
632 return (error);
633 }
634
635 /*
636 * Sanity checks for old file systems.
637 *
638 * XXX - goes away some day.
639 */
640 int
641 ffs_oldfscompat(fs)
642 struct fs *fs;
643 {
644 int i;
645
646 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
647 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
648 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
649 fs->fs_nrpos = 8; /* XXX */
650 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
651 u_int64_t sizepb = fs->fs_bsize; /* XXX */
652 /* XXX */
653 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
654 for (i = 0; i < NIADDR; i++) { /* XXX */
655 sizepb *= NINDIR(fs); /* XXX */
656 fs->fs_maxfilesize += sizepb; /* XXX */
657 } /* XXX */
658 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
659 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
660 } /* XXX */
661 return (0);
662 }
663
664 /*
665 * unmount system call
666 */
667 int
668 ffs_unmount(mp, mntflags, p)
669 struct mount *mp;
670 int mntflags;
671 struct proc *p;
672 {
673 register struct ufsmount *ump;
674 register struct fs *fs;
675 int error, flags;
676
677 flags = 0;
678 if (mntflags & MNT_FORCE)
679 flags |= FORCECLOSE;
680 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
681 return (error);
682 ump = VFSTOUFS(mp);
683 fs = ump->um_fs;
684 if (fs->fs_ronly == 0 &&
685 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
686 fs->fs_clean & FS_WASCLEAN) {
687 fs->fs_clean = FS_ISCLEAN;
688 (void) ffs_sbupdate(ump, MNT_WAIT);
689 }
690 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
691 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
692 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
693 NOCRED, p);
694 vput(ump->um_devvp);
695 free(fs->fs_csp[0], M_UFSMNT);
696 free(fs, M_UFSMNT);
697 free(ump, M_UFSMNT);
698 mp->mnt_data = (qaddr_t)0;
699 mp->mnt_flag &= ~MNT_LOCAL;
700 return (error);
701 }
702
703 /*
704 * Flush out all the files in a filesystem.
705 */
706 int
707 ffs_flushfiles(mp, flags, p)
708 register struct mount *mp;
709 int flags;
710 struct proc *p;
711 {
712 extern int doforce;
713 register struct ufsmount *ump;
714 int error;
715
716 if (!doforce)
717 flags &= ~FORCECLOSE;
718 ump = VFSTOUFS(mp);
719 #ifdef QUOTA
720 if (mp->mnt_flag & MNT_QUOTA) {
721 int i;
722 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
723 return (error);
724 for (i = 0; i < MAXQUOTAS; i++) {
725 if (ump->um_quotas[i] == NULLVP)
726 continue;
727 quotaoff(p, mp, i);
728 }
729 /*
730 * Here we fall through to vflush again to ensure
731 * that we have gotten rid of all the system vnodes.
732 */
733 }
734 #endif
735 error = vflush(mp, NULLVP, flags);
736 return (error);
737 }
738
739 /*
740 * Get file system statistics.
741 */
742 int
743 ffs_statfs(mp, sbp, p)
744 struct mount *mp;
745 register struct statfs *sbp;
746 struct proc *p;
747 {
748 register struct ufsmount *ump;
749 register struct fs *fs;
750
751 ump = VFSTOUFS(mp);
752 fs = ump->um_fs;
753 if (fs->fs_magic != FS_MAGIC)
754 panic("ffs_statfs");
755 #ifdef COMPAT_09
756 sbp->f_type = 1;
757 #else
758 sbp->f_type = 0;
759 #endif
760 sbp->f_bsize = fs->fs_fsize;
761 sbp->f_iosize = fs->fs_bsize;
762 sbp->f_blocks = fs->fs_dsize;
763 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
764 fs->fs_cstotal.cs_nffree;
765 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
766 (100 - fs->fs_minfree) / (u_int64_t) 100) -
767 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
768 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
769 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
770 if (sbp != &mp->mnt_stat) {
771 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
772 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
773 }
774 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
775 return (0);
776 }
777
778 /*
779 * Go through the disk queues to initiate sandbagged IO;
780 * go through the inodes to write those that have been modified;
781 * initiate the writing of the super block if it has been modified.
782 *
783 * Note: we are always called with the filesystem marked `MPBUSY'.
784 */
785 int
786 ffs_sync(mp, waitfor, cred, p)
787 struct mount *mp;
788 int waitfor;
789 struct ucred *cred;
790 struct proc *p;
791 {
792 struct vnode *vp, *nvp;
793 struct inode *ip;
794 struct ufsmount *ump = VFSTOUFS(mp);
795 struct fs *fs;
796 int error, allerror = 0;
797
798 fs = ump->um_fs;
799 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
800 printf("fs = %s\n", fs->fs_fsmnt);
801 panic("update: rofs mod");
802 }
803 /*
804 * Write back each (modified) inode.
805 */
806 simple_lock(&mntvnode_slock);
807 loop:
808 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
809 /*
810 * If the vnode that we are about to sync is no longer
811 * associated with this mount point, start over.
812 */
813 if (vp->v_mount != mp)
814 goto loop;
815 simple_lock(&vp->v_interlock);
816 nvp = vp->v_mntvnodes.le_next;
817 ip = VTOI(vp);
818 if ((ip->i_flag &
819 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
820 vp->v_dirtyblkhd.lh_first == NULL) {
821 simple_unlock(&vp->v_interlock);
822 continue;
823 }
824 simple_unlock(&mntvnode_slock);
825 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
826 if (error) {
827 simple_lock(&mntvnode_slock);
828 if (error == ENOENT)
829 goto loop;
830 continue;
831 }
832 if ((error = VOP_FSYNC(vp, cred,
833 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
834 allerror = error;
835 vput(vp);
836 simple_lock(&mntvnode_slock);
837 }
838 simple_unlock(&mntvnode_slock);
839 /*
840 * Force stale file system control information to be flushed.
841 */
842 if ((error = VOP_FSYNC(ump->um_devvp, cred,
843 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
844 allerror = error;
845 #ifdef QUOTA
846 qsync(mp);
847 #endif
848 /*
849 * Write back modified superblock.
850 */
851 if (fs->fs_fmod != 0) {
852 fs->fs_fmod = 0;
853 fs->fs_time = time.tv_sec;
854 allerror = ffs_cgupdate(ump, waitfor);
855 }
856 return (allerror);
857 }
858
859 /*
860 * Look up a FFS dinode number to find its incore vnode, otherwise read it
861 * in from disk. If it is in core, wait for the lock bit to clear, then
862 * return the inode locked. Detection and handling of mount points must be
863 * done by the calling routine.
864 */
865 int
866 ffs_vget(mp, ino, vpp)
867 struct mount *mp;
868 ino_t ino;
869 struct vnode **vpp;
870 {
871 struct fs *fs;
872 struct inode *ip;
873 struct ufsmount *ump;
874 struct buf *bp;
875 struct vnode *vp;
876 dev_t dev;
877 int error;
878 caddr_t cp;
879
880 ump = VFSTOUFS(mp);
881 dev = ump->um_dev;
882 do {
883 if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
884 return (0);
885 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
886
887 /* Allocate a new vnode/inode. */
888 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
889 *vpp = NULL;
890 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
891 return (error);
892 }
893 /*
894 * XXX MFS ends up here, too, to allocate an inode. Should we
895 * XXX create another pool for MFS inodes?
896 */
897 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
898 memset((caddr_t)ip, 0, sizeof(struct inode));
899 lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
900 vp->v_data = ip;
901 ip->i_vnode = vp;
902 ip->i_fs = fs = ump->um_fs;
903 ip->i_dev = dev;
904 ip->i_number = ino;
905 #ifdef QUOTA
906 {
907 int i;
908
909 for (i = 0; i < MAXQUOTAS; i++)
910 ip->i_dquot[i] = NODQUOT;
911 }
912 #endif
913 /*
914 * Put it onto its hash chain and lock it so that other requests for
915 * this inode will block if they arrive while we are sleeping waiting
916 * for old data structures to be purged or for the contents of the
917 * disk portion of this inode to be read.
918 */
919 ufs_ihashins(ip);
920 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
921
922 /* Read in the disk contents for the inode, copy into the inode. */
923 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
924 (int)fs->fs_bsize, NOCRED, &bp);
925 if (error) {
926 /*
927 * The inode does not contain anything useful, so it would
928 * be misleading to leave it on its hash chain. With mode
929 * still zero, it will be unlinked and returned to the free
930 * list by vput().
931 */
932 vput(vp);
933 brelse(bp);
934 *vpp = NULL;
935 return (error);
936 }
937 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
938 #ifdef FFS_EI
939 if (UFS_MPNEEDSWAP(mp))
940 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
941 else
942 #endif
943 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
944 brelse(bp);
945
946 /*
947 * Initialize the vnode from the inode, check for aliases.
948 * Note that the underlying vnode may have changed.
949 */
950 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
951 if (error) {
952 vput(vp);
953 *vpp = NULL;
954 return (error);
955 }
956 /*
957 * Finish inode initialization now that aliasing has been resolved.
958 */
959 ip->i_devvp = ump->um_devvp;
960 VREF(ip->i_devvp);
961 /*
962 * Ensure that uid and gid are correct. This is a temporary
963 * fix until fsck has been changed to do the update.
964 */
965 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
966 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
967 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
968 } /* XXX */
969
970 *vpp = vp;
971 return (0);
972 }
973
974 /*
975 * File handle to vnode
976 *
977 * Have to be really careful about stale file handles:
978 * - check that the inode number is valid
979 * - call ffs_vget() to get the locked inode
980 * - check for an unallocated inode (i_mode == 0)
981 * - check that the given client host has export rights and return
982 * those rights via. exflagsp and credanonp
983 */
984 int
985 ffs_fhtovp(mp, fhp, vpp)
986 register struct mount *mp;
987 struct fid *fhp;
988 struct vnode **vpp;
989 {
990 register struct ufid *ufhp;
991 struct fs *fs;
992
993 ufhp = (struct ufid *)fhp;
994 fs = VFSTOUFS(mp)->um_fs;
995 if (ufhp->ufid_ino < ROOTINO ||
996 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
997 return (ESTALE);
998 return (ufs_fhtovp(mp, ufhp, vpp));
999 }
1000
1001 /*
1002 * Vnode pointer to File handle
1003 */
1004 /* ARGSUSED */
1005 int
1006 ffs_vptofh(vp, fhp)
1007 struct vnode *vp;
1008 struct fid *fhp;
1009 {
1010 register struct inode *ip;
1011 register struct ufid *ufhp;
1012
1013 ip = VTOI(vp);
1014 ufhp = (struct ufid *)fhp;
1015 ufhp->ufid_len = sizeof(struct ufid);
1016 ufhp->ufid_ino = ip->i_number;
1017 ufhp->ufid_gen = ip->i_ffs_gen;
1018 return (0);
1019 }
1020
1021 void
1022 ffs_init()
1023 {
1024 ufs_init();
1025
1026 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1027 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1028 }
1029
1030 int
1031 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1032 int *name;
1033 u_int namelen;
1034 void *oldp;
1035 size_t *oldlenp;
1036 void *newp;
1037 size_t newlen;
1038 struct proc *p;
1039 {
1040 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1041
1042 /* all sysctl names at this level are terminal */
1043 if (namelen != 1)
1044 return (ENOTDIR); /* overloaded */
1045
1046 switch (name[0]) {
1047 case FFS_CLUSTERREAD:
1048 return (sysctl_int(oldp, oldlenp, newp, newlen,
1049 &doclusterread));
1050 case FFS_CLUSTERWRITE:
1051 return (sysctl_int(oldp, oldlenp, newp, newlen,
1052 &doclusterwrite));
1053 case FFS_REALLOCBLKS:
1054 return (sysctl_int(oldp, oldlenp, newp, newlen,
1055 &doreallocblks));
1056 case FFS_ASYNCFREE:
1057 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1058 default:
1059 return (EOPNOTSUPP);
1060 }
1061 /* NOTREACHED */
1062 }
1063
1064 /*
1065 * Write a superblock and associated information back to disk.
1066 */
1067 int
1068 ffs_sbupdate(mp, waitfor)
1069 struct ufsmount *mp;
1070 int waitfor;
1071 {
1072 register struct fs *fs = mp->um_fs;
1073 register struct buf *bp;
1074 int i, error = 0;
1075 int32_t saved_nrpos = fs->fs_nrpos;
1076 int64_t saved_qbmask = fs->fs_qbmask;
1077 int64_t saved_qfmask = fs->fs_qfmask;
1078 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1079
1080 /* Restore compatibility to old file systems. XXX */
1081 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1082 fs->fs_nrpos = -1; /* XXX */
1083 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1084 int32_t *lp, tmp; /* XXX */
1085 /* XXX */
1086 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1087 tmp = lp[4]; /* XXX */
1088 for (i = 4; i > 0; i--) /* XXX */
1089 lp[i] = lp[i-1]; /* XXX */
1090 lp[0] = tmp; /* XXX */
1091 } /* XXX */
1092 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1093
1094 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1095 (int)fs->fs_sbsize, 0, 0);
1096 memcpy(bp->b_data, fs, fs->fs_sbsize);
1097 #ifdef FFS_EI
1098 if (mp->um_flags & UFS_NEEDSWAP)
1099 ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
1100 #endif
1101
1102 fs->fs_nrpos = saved_nrpos; /* XXX */
1103 fs->fs_qbmask = saved_qbmask; /* XXX */
1104 fs->fs_qfmask = saved_qfmask; /* XXX */
1105 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1106
1107 if (waitfor == MNT_WAIT)
1108 error = bwrite(bp);
1109 else
1110 bawrite(bp);
1111 return (error);
1112 }
1113
1114 int
1115 ffs_cgupdate(mp, waitfor)
1116 struct ufsmount *mp;
1117 int waitfor;
1118 {
1119 register struct fs *fs = mp->um_fs;
1120 register struct buf *bp;
1121 int blks;
1122 caddr_t space;
1123 int i, size, error = 0, allerror = 0;
1124
1125 allerror = ffs_sbupdate(mp, waitfor);
1126 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1127 space = (caddr_t)fs->fs_csp[0];
1128 for (i = 0; i < blks; i += fs->fs_frag) {
1129 size = fs->fs_bsize;
1130 if (i + fs->fs_frag > blks)
1131 size = (blks - i) * fs->fs_fsize;
1132 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1133 size, 0, 0);
1134 #ifdef FFS_EI
1135 if (mp->um_flags & UFS_NEEDSWAP)
1136 ffs_csum_swap((struct csum*)space,
1137 (struct csum*)bp->b_data, size);
1138 else
1139 #endif
1140 memcpy(bp->b_data, space, (u_int)size);
1141 space += size;
1142 if (waitfor == MNT_WAIT)
1143 error = bwrite(bp);
1144 else
1145 bawrite(bp);
1146 }
1147 if (!allerror && error)
1148 allerror = error;
1149 return (allerror);
1150 }
1151