ffs_vfsops.c revision 1.53 1 /* $NetBSD: ffs_vfsops.c,v 1.53 1999/10/16 23:53:29 wrstuden Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL) && !defined(_LKM)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 extern struct lock ufs_hashlock;
78
79 int ffs_sbupdate __P((struct ufsmount *, int));
80
81 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
82 extern struct vnodeopv_desc ffs_specop_opv_desc;
83 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
84
85 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
86 &ffs_vnodeop_opv_desc,
87 &ffs_specop_opv_desc,
88 &ffs_fifoop_opv_desc,
89 NULL,
90 };
91
92 struct vfsops ffs_vfsops = {
93 MOUNT_FFS,
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 ffs_mountroot,
107 ufs_check_export,
108 ffs_vnodeopv_descs,
109 };
110
111 struct pool ffs_inode_pool;
112
113 /*
114 * Called by main() when ffs is going to be mounted as root.
115 */
116
117 int
118 ffs_mountroot()
119 {
120 extern struct vnode *rootvp;
121 struct fs *fs;
122 struct mount *mp;
123 struct proc *p = curproc; /* XXX */
124 struct ufsmount *ump;
125 int error;
126
127 if (root_device->dv_class != DV_DISK)
128 return (ENODEV);
129
130 /*
131 * Get vnodes for rootdev.
132 */
133 if (bdevvp(rootdev, &rootvp))
134 panic("ffs_mountroot: can't setup bdevvp's");
135
136 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
137 vrele(rootvp);
138 return (error);
139 }
140 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
141 mp->mnt_op->vfs_refcount--;
142 vfs_unbusy(mp);
143 free(mp, M_MOUNT);
144 vrele(rootvp);
145 return (error);
146 }
147 simple_lock(&mountlist_slock);
148 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
149 simple_unlock(&mountlist_slock);
150 ump = VFSTOUFS(mp);
151 fs = ump->um_fs;
152 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
153 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
154 (void)ffs_statfs(mp, &mp->mnt_stat, p);
155 vfs_unbusy(mp);
156 inittodr(fs->fs_time);
157 return (0);
158 }
159
160 /*
161 * VFS Operations.
162 *
163 * mount system call
164 */
165 int
166 ffs_mount(mp, path, data, ndp, p)
167 register struct mount *mp;
168 const char *path;
169 void *data;
170 struct nameidata *ndp;
171 struct proc *p;
172 {
173 struct vnode *devvp;
174 struct ufs_args args;
175 struct ufsmount *ump = NULL;
176 register struct fs *fs;
177 size_t size;
178 int error, flags;
179 mode_t accessmode;
180
181 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
182 if (error)
183 return (error);
184 /*
185 * If updating, check whether changing from read-only to
186 * read/write; if there is no device name, that's all we do.
187 */
188 if (mp->mnt_flag & MNT_UPDATE) {
189 ump = VFSTOUFS(mp);
190 fs = ump->um_fs;
191 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
192 flags = WRITECLOSE;
193 if (mp->mnt_flag & MNT_FORCE)
194 flags |= FORCECLOSE;
195 error = ffs_flushfiles(mp, flags, p);
196 if (error == 0 &&
197 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
198 fs->fs_clean & FS_WASCLEAN) {
199 fs->fs_clean = FS_ISCLEAN;
200 (void) ffs_sbupdate(ump, MNT_WAIT);
201 }
202 if (error)
203 return (error);
204 fs->fs_ronly = 1;
205 }
206 if (mp->mnt_flag & MNT_RELOAD) {
207 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
208 if (error)
209 return (error);
210 }
211 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
212 /*
213 * If upgrade to read-write by non-root, then verify
214 * that user has necessary permissions on the device.
215 */
216 if (p->p_ucred->cr_uid != 0) {
217 devvp = ump->um_devvp;
218 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
219 error = VOP_ACCESS(devvp, VREAD | VWRITE,
220 p->p_ucred, p);
221 VOP_UNLOCK(devvp, 0);
222 if (error)
223 return (error);
224 }
225 fs->fs_ronly = 0;
226 fs->fs_clean <<= 1;
227 fs->fs_fmod = 1;
228 }
229 if (args.fspec == 0) {
230 /*
231 * Process export requests.
232 */
233 return (vfs_export(mp, &ump->um_export, &args.export));
234 }
235 }
236 /*
237 * Not an update, or updating the name: look up the name
238 * and verify that it refers to a sensible block device.
239 */
240 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
241 if ((error = namei(ndp)) != 0)
242 return (error);
243 devvp = ndp->ni_vp;
244
245 if (devvp->v_type != VBLK) {
246 vrele(devvp);
247 return (ENOTBLK);
248 }
249 if (major(devvp->v_rdev) >= nblkdev) {
250 vrele(devvp);
251 return (ENXIO);
252 }
253 /*
254 * If mount by non-root, then verify that user has necessary
255 * permissions on the device.
256 */
257 if (p->p_ucred->cr_uid != 0) {
258 accessmode = VREAD;
259 if ((mp->mnt_flag & MNT_RDONLY) == 0)
260 accessmode |= VWRITE;
261 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
262 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
263 VOP_UNLOCK(devvp, 0);
264 if (error) {
265 vrele(devvp);
266 return (error);
267 }
268 }
269 if ((mp->mnt_flag & MNT_UPDATE) == 0)
270 error = ffs_mountfs(devvp, mp, p);
271 else {
272 if (devvp != ump->um_devvp)
273 error = EINVAL; /* needs translation */
274 else
275 vrele(devvp);
276 }
277 if (error) {
278 vrele(devvp);
279 return (error);
280 }
281 ump = VFSTOUFS(mp);
282 fs = ump->um_fs;
283 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
284 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
285 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
286 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
287 &size);
288 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
289 if (fs->fs_fmod != 0) { /* XXX */
290 fs->fs_fmod = 0;
291 if (fs->fs_clean & FS_WASCLEAN)
292 fs->fs_time = time.tv_sec;
293 else
294 printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
295 mp->mnt_stat.f_mntfromname, fs->fs_clean);
296 (void) ffs_cgupdate(ump, MNT_WAIT);
297 }
298 return (0);
299 }
300
301 /*
302 * Reload all incore data for a filesystem (used after running fsck on
303 * the root filesystem and finding things to fix). The filesystem must
304 * be mounted read-only.
305 *
306 * Things to do to update the mount:
307 * 1) invalidate all cached meta-data.
308 * 2) re-read superblock from disk.
309 * 3) re-read summary information from disk.
310 * 4) invalidate all inactive vnodes.
311 * 5) invalidate all cached file data.
312 * 6) re-read inode data for all active vnodes.
313 */
314 int
315 ffs_reload(mountp, cred, p)
316 register struct mount *mountp;
317 struct ucred *cred;
318 struct proc *p;
319 {
320 register struct vnode *vp, *nvp, *devvp;
321 struct inode *ip;
322 struct buf *bp;
323 struct fs *fs, *newfs;
324 struct partinfo dpart;
325 int i, blks, size, error;
326 int32_t *lp;
327 caddr_t cp;
328
329 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
330 return (EINVAL);
331 /*
332 * Step 1: invalidate all cached meta-data.
333 */
334 devvp = VFSTOUFS(mountp)->um_devvp;
335 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
336 panic("ffs_reload: dirty1");
337 /*
338 * Step 2: re-read superblock from disk.
339 */
340 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
341 size = DEV_BSIZE;
342 else
343 size = dpart.disklab->d_secsize;
344 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
345 if (error) {
346 brelse(bp);
347 return (error);
348 }
349 fs = VFSTOUFS(mountp)->um_fs;
350 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
351 memcpy(newfs, bp->b_data, fs->fs_sbsize);
352 #ifdef FFS_EI
353 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP)
354 ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
355 #endif
356 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
357 newfs->fs_bsize < sizeof(struct fs)) {
358 brelse(bp);
359 free(newfs, M_UFSMNT);
360 return (EIO); /* XXX needs translation */
361 }
362 /*
363 * Copy pointer fields back into superblock before copying in XXX
364 * new superblock. These should really be in the ufsmount. XXX
365 * Note that important parameters (eg fs_ncg) are unchanged.
366 */
367 memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
368 newfs->fs_maxcluster = fs->fs_maxcluster;
369 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
370 if (fs->fs_sbsize < SBSIZE)
371 bp->b_flags |= B_INVAL;
372 brelse(bp);
373 free(newfs, M_UFSMNT);
374 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
375 ffs_oldfscompat(fs);
376 /*
377 * Step 3: re-read summary information from disk.
378 */
379 blks = howmany(fs->fs_cssize, fs->fs_fsize);
380 for (i = 0; i < blks; i += fs->fs_frag) {
381 size = fs->fs_bsize;
382 if (i + fs->fs_frag > blks)
383 size = (blks - i) * fs->fs_fsize;
384 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
385 NOCRED, &bp);
386 if (error) {
387 brelse(bp);
388 return (error);
389 }
390 #ifdef FFS_EI
391 if (UFS_MPNEEDSWAP(mountp))
392 ffs_csum_swap((struct csum*)bp->b_data,
393 (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
394 else
395 #endif
396 memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
397 (size_t)size);
398 brelse(bp);
399 }
400 /*
401 * We no longer know anything about clusters per cylinder group.
402 */
403 if (fs->fs_contigsumsize > 0) {
404 lp = fs->fs_maxcluster;
405 for (i = 0; i < fs->fs_ncg; i++)
406 *lp++ = fs->fs_contigsumsize;
407 }
408
409 loop:
410 simple_lock(&mntvnode_slock);
411 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
412 if (vp->v_mount != mountp) {
413 simple_unlock(&mntvnode_slock);
414 goto loop;
415 }
416 nvp = vp->v_mntvnodes.le_next;
417 /*
418 * Step 4: invalidate all inactive vnodes.
419 */
420 if (vrecycle(vp, &mntvnode_slock, p))
421 goto loop;
422 /*
423 * Step 5: invalidate all cached file data.
424 */
425 simple_lock(&vp->v_interlock);
426 simple_unlock(&mntvnode_slock);
427 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
428 goto loop;
429 if (vinvalbuf(vp, 0, cred, p, 0, 0))
430 panic("ffs_reload: dirty2");
431 /*
432 * Step 6: re-read inode data for all active vnodes.
433 */
434 ip = VTOI(vp);
435 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
436 (int)fs->fs_bsize, NOCRED, &bp);
437 if (error) {
438 brelse(bp);
439 vput(vp);
440 return (error);
441 }
442 cp = (caddr_t)bp->b_data +
443 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
444 #ifdef FFS_EI
445 if (UFS_MPNEEDSWAP(mountp))
446 ffs_dinode_swap((struct dinode *)cp,
447 &ip->i_din.ffs_din);
448 else
449 #endif
450 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
451 brelse(bp);
452 vput(vp);
453 simple_lock(&mntvnode_slock);
454 }
455 simple_unlock(&mntvnode_slock);
456 return (0);
457 }
458
459 /*
460 * Common code for mount and mountroot
461 */
462 int
463 ffs_mountfs(devvp, mp, p)
464 register struct vnode *devvp;
465 struct mount *mp;
466 struct proc *p;
467 {
468 struct ufsmount *ump;
469 struct buf *bp;
470 struct fs *fs;
471 dev_t dev;
472 struct partinfo dpart;
473 caddr_t base, space;
474 int blks;
475 int error, i, size, ronly;
476 #ifdef FFS_EI
477 int needswap;
478 #endif
479 int32_t *lp;
480 struct ucred *cred;
481 extern struct vnode *rootvp;
482 u_int64_t maxfilesize; /* XXX */
483 u_int32_t sbsize;
484
485 dev = devvp->v_rdev;
486 cred = p ? p->p_ucred : NOCRED;
487 /*
488 * Disallow multiple mounts of the same device.
489 * Disallow mounting of a device that is currently in use
490 * (except for root, which might share swap device for miniroot).
491 * Flush out any old buffers remaining from a previous use.
492 */
493 if ((error = vfs_mountedon(devvp)) != 0)
494 return (error);
495 if (vcount(devvp) > 1 && devvp != rootvp)
496 return (EBUSY);
497 if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0)
498 return (error);
499
500 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
501 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
502 if (error)
503 return (error);
504 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
505 size = DEV_BSIZE;
506 else
507 size = dpart.disklab->d_secsize;
508
509 bp = NULL;
510 ump = NULL;
511 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
512 if (error)
513 goto out;
514
515 fs = (struct fs*)bp->b_data;
516 if (fs->fs_magic == FS_MAGIC) {
517 sbsize = fs->fs_sbsize;
518 #ifdef FFS_EI
519 needswap = 0;
520 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
521 sbsize = bswap32(fs->fs_sbsize);
522 needswap = 1;
523 #endif
524 } else {
525 error = EINVAL;
526 goto out;
527 }
528 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
529 error = EINVAL;
530 goto out;
531 }
532
533 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
534 memcpy(fs, bp->b_data, sbsize);
535 #ifdef FFS_EI
536 if (needswap)
537 ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
538 #endif
539 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
540 error = EINVAL;
541 goto out;
542 }
543 /* make sure cylinder group summary area is a reasonable size. */
544 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
545 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
546 fs->fs_cssize >
547 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
548 error = EINVAL; /* XXX needs translation */
549 goto out2;
550 }
551 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
552 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
553 error = EROFS; /* XXX what should be returned? */
554 goto out2;
555 }
556 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
557 memset((caddr_t)ump, 0, sizeof *ump);
558 ump->um_fs = fs;
559 if (fs->fs_sbsize < SBSIZE)
560 bp->b_flags |= B_INVAL;
561 brelse(bp);
562 bp = NULL;
563 fs->fs_ronly = ronly;
564 if (ronly == 0) {
565 fs->fs_clean <<= 1;
566 fs->fs_fmod = 1;
567 }
568 size = fs->fs_cssize;
569 blks = howmany(size, fs->fs_fsize);
570 if (fs->fs_contigsumsize > 0)
571 size += fs->fs_ncg * sizeof(int32_t);
572 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
573 for (i = 0; i < blks; i += fs->fs_frag) {
574 size = fs->fs_bsize;
575 if (i + fs->fs_frag > blks)
576 size = (blks - i) * fs->fs_fsize;
577 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
578 cred, &bp);
579 if (error) {
580 free(base, M_UFSMNT);
581 goto out2;
582 }
583 #ifdef FFS_EI
584 if (needswap)
585 ffs_csum_swap((struct csum*)bp->b_data,
586 (struct csum*)space, size);
587 else
588 #endif
589 memcpy(space, bp->b_data, (u_int)size);
590
591 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
592 space += size;
593 brelse(bp);
594 bp = NULL;
595 }
596 if (fs->fs_contigsumsize > 0) {
597 fs->fs_maxcluster = lp = (int32_t *)space;
598 for (i = 0; i < fs->fs_ncg; i++)
599 *lp++ = fs->fs_contigsumsize;
600 }
601 mp->mnt_data = (qaddr_t)ump;
602 mp->mnt_stat.f_fsid.val[0] = (long)dev;
603 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
604 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
605 mp->mnt_flag |= MNT_LOCAL;
606 #ifdef FFS_EI
607 if (needswap)
608 ump->um_flags |= UFS_NEEDSWAP;
609 #endif
610 ump->um_mountp = mp;
611 ump->um_dev = dev;
612 ump->um_devvp = devvp;
613 ump->um_nindir = fs->fs_nindir;
614 ump->um_bptrtodb = fs->fs_fsbtodb;
615 ump->um_seqinc = fs->fs_frag;
616 for (i = 0; i < MAXQUOTAS; i++)
617 ump->um_quotas[i] = NULLVP;
618 devvp->v_specflags |= SI_MOUNTEDON;
619 ffs_oldfscompat(fs);
620 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
621 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
622 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
623 fs->fs_maxfilesize = maxfilesize; /* XXX */
624 return (0);
625 out2:
626 free(fs, M_UFSMNT);
627 out:
628 if (bp)
629 brelse(bp);
630 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
631 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
632 VOP_UNLOCK(devvp, 0);
633 if (ump) {
634 free(ump, M_UFSMNT);
635 mp->mnt_data = (qaddr_t)0;
636 }
637 return (error);
638 }
639
640 /*
641 * Sanity checks for old file systems.
642 *
643 * XXX - goes away some day.
644 */
645 int
646 ffs_oldfscompat(fs)
647 struct fs *fs;
648 {
649 int i;
650
651 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
652 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
653 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
654 fs->fs_nrpos = 8; /* XXX */
655 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
656 u_int64_t sizepb = fs->fs_bsize; /* XXX */
657 /* XXX */
658 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
659 for (i = 0; i < NIADDR; i++) { /* XXX */
660 sizepb *= NINDIR(fs); /* XXX */
661 fs->fs_maxfilesize += sizepb; /* XXX */
662 } /* XXX */
663 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
664 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
665 } /* XXX */
666 return (0);
667 }
668
669 /*
670 * unmount system call
671 */
672 int
673 ffs_unmount(mp, mntflags, p)
674 struct mount *mp;
675 int mntflags;
676 struct proc *p;
677 {
678 register struct ufsmount *ump;
679 register struct fs *fs;
680 int error, flags;
681
682 flags = 0;
683 if (mntflags & MNT_FORCE)
684 flags |= FORCECLOSE;
685 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
686 return (error);
687 ump = VFSTOUFS(mp);
688 fs = ump->um_fs;
689 if (fs->fs_ronly == 0 &&
690 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
691 fs->fs_clean & FS_WASCLEAN) {
692 fs->fs_clean = FS_ISCLEAN;
693 (void) ffs_sbupdate(ump, MNT_WAIT);
694 }
695 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
696 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
697 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
698 NOCRED, p);
699 vput(ump->um_devvp);
700 free(fs->fs_csp[0], M_UFSMNT);
701 free(fs, M_UFSMNT);
702 free(ump, M_UFSMNT);
703 mp->mnt_data = (qaddr_t)0;
704 mp->mnt_flag &= ~MNT_LOCAL;
705 return (error);
706 }
707
708 /*
709 * Flush out all the files in a filesystem.
710 */
711 int
712 ffs_flushfiles(mp, flags, p)
713 register struct mount *mp;
714 int flags;
715 struct proc *p;
716 {
717 extern int doforce;
718 register struct ufsmount *ump;
719 int error;
720
721 if (!doforce)
722 flags &= ~FORCECLOSE;
723 ump = VFSTOUFS(mp);
724 #ifdef QUOTA
725 if (mp->mnt_flag & MNT_QUOTA) {
726 int i;
727 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
728 return (error);
729 for (i = 0; i < MAXQUOTAS; i++) {
730 if (ump->um_quotas[i] == NULLVP)
731 continue;
732 quotaoff(p, mp, i);
733 }
734 /*
735 * Here we fall through to vflush again to ensure
736 * that we have gotten rid of all the system vnodes.
737 */
738 }
739 #endif
740 error = vflush(mp, NULLVP, flags);
741 return (error);
742 }
743
744 /*
745 * Get file system statistics.
746 */
747 int
748 ffs_statfs(mp, sbp, p)
749 struct mount *mp;
750 register struct statfs *sbp;
751 struct proc *p;
752 {
753 register struct ufsmount *ump;
754 register struct fs *fs;
755
756 ump = VFSTOUFS(mp);
757 fs = ump->um_fs;
758 if (fs->fs_magic != FS_MAGIC)
759 panic("ffs_statfs");
760 #ifdef COMPAT_09
761 sbp->f_type = 1;
762 #else
763 sbp->f_type = 0;
764 #endif
765 sbp->f_bsize = fs->fs_fsize;
766 sbp->f_iosize = fs->fs_bsize;
767 sbp->f_blocks = fs->fs_dsize;
768 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
769 fs->fs_cstotal.cs_nffree;
770 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
771 (100 - fs->fs_minfree) / (u_int64_t) 100) -
772 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
773 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
774 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
775 if (sbp != &mp->mnt_stat) {
776 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
777 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
778 }
779 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
780 return (0);
781 }
782
783 /*
784 * Go through the disk queues to initiate sandbagged IO;
785 * go through the inodes to write those that have been modified;
786 * initiate the writing of the super block if it has been modified.
787 *
788 * Note: we are always called with the filesystem marked `MPBUSY'.
789 */
790 int
791 ffs_sync(mp, waitfor, cred, p)
792 struct mount *mp;
793 int waitfor;
794 struct ucred *cred;
795 struct proc *p;
796 {
797 struct vnode *vp, *nvp;
798 struct inode *ip;
799 struct ufsmount *ump = VFSTOUFS(mp);
800 struct fs *fs;
801 int error, allerror = 0;
802
803 fs = ump->um_fs;
804 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
805 printf("fs = %s\n", fs->fs_fsmnt);
806 panic("update: rofs mod");
807 }
808 /*
809 * Write back each (modified) inode.
810 */
811 simple_lock(&mntvnode_slock);
812 loop:
813 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
814 /*
815 * If the vnode that we are about to sync is no longer
816 * associated with this mount point, start over.
817 */
818 if (vp->v_mount != mp)
819 goto loop;
820 simple_lock(&vp->v_interlock);
821 nvp = vp->v_mntvnodes.le_next;
822 ip = VTOI(vp);
823 if ((ip->i_flag &
824 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
825 vp->v_dirtyblkhd.lh_first == NULL) {
826 simple_unlock(&vp->v_interlock);
827 continue;
828 }
829 simple_unlock(&mntvnode_slock);
830 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
831 if (error) {
832 simple_lock(&mntvnode_slock);
833 if (error == ENOENT)
834 goto loop;
835 continue;
836 }
837 if ((error = VOP_FSYNC(vp, cred,
838 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
839 allerror = error;
840 vput(vp);
841 simple_lock(&mntvnode_slock);
842 }
843 simple_unlock(&mntvnode_slock);
844 /*
845 * Force stale file system control information to be flushed.
846 */
847 if ((error = VOP_FSYNC(ump->um_devvp, cred,
848 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
849 allerror = error;
850 #ifdef QUOTA
851 qsync(mp);
852 #endif
853 /*
854 * Write back modified superblock.
855 */
856 if (fs->fs_fmod != 0) {
857 fs->fs_fmod = 0;
858 fs->fs_time = time.tv_sec;
859 allerror = ffs_cgupdate(ump, waitfor);
860 }
861 return (allerror);
862 }
863
864 /*
865 * Look up a FFS dinode number to find its incore vnode, otherwise read it
866 * in from disk. If it is in core, wait for the lock bit to clear, then
867 * return the inode locked. Detection and handling of mount points must be
868 * done by the calling routine.
869 */
870 int
871 ffs_vget(mp, ino, vpp)
872 struct mount *mp;
873 ino_t ino;
874 struct vnode **vpp;
875 {
876 struct fs *fs;
877 struct inode *ip;
878 struct ufsmount *ump;
879 struct buf *bp;
880 struct vnode *vp;
881 dev_t dev;
882 int error;
883 caddr_t cp;
884
885 ump = VFSTOUFS(mp);
886 dev = ump->um_dev;
887 do {
888 if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
889 return (0);
890 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
891
892 /* Allocate a new vnode/inode. */
893 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
894 *vpp = NULL;
895 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
896 return (error);
897 }
898 /*
899 * XXX MFS ends up here, too, to allocate an inode. Should we
900 * XXX create another pool for MFS inodes?
901 */
902 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
903 memset((caddr_t)ip, 0, sizeof(struct inode));
904 vp->v_data = ip;
905 ip->i_vnode = vp;
906 ip->i_fs = fs = ump->um_fs;
907 ip->i_dev = dev;
908 ip->i_number = ino;
909 #ifdef QUOTA
910 {
911 int i;
912
913 for (i = 0; i < MAXQUOTAS; i++)
914 ip->i_dquot[i] = NODQUOT;
915 }
916 #endif
917 /*
918 * Put it onto its hash chain and lock it so that other requests for
919 * this inode will block if they arrive while we are sleeping waiting
920 * for old data structures to be purged or for the contents of the
921 * disk portion of this inode to be read.
922 */
923 ufs_ihashins(ip);
924 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
925
926 /* Read in the disk contents for the inode, copy into the inode. */
927 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
928 (int)fs->fs_bsize, NOCRED, &bp);
929 if (error) {
930 /*
931 * The inode does not contain anything useful, so it would
932 * be misleading to leave it on its hash chain. With mode
933 * still zero, it will be unlinked and returned to the free
934 * list by vput().
935 */
936 vput(vp);
937 brelse(bp);
938 *vpp = NULL;
939 return (error);
940 }
941 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
942 #ifdef FFS_EI
943 if (UFS_MPNEEDSWAP(mp))
944 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
945 else
946 #endif
947 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
948 brelse(bp);
949
950 /*
951 * Initialize the vnode from the inode, check for aliases.
952 * Note that the underlying vnode may have changed.
953 */
954 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
955 if (error) {
956 vput(vp);
957 *vpp = NULL;
958 return (error);
959 }
960 /*
961 * Finish inode initialization now that aliasing has been resolved.
962 */
963 ip->i_devvp = ump->um_devvp;
964 VREF(ip->i_devvp);
965 /*
966 * Ensure that uid and gid are correct. This is a temporary
967 * fix until fsck has been changed to do the update.
968 */
969 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
970 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
971 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
972 } /* XXX */
973
974 *vpp = vp;
975 return (0);
976 }
977
978 /*
979 * File handle to vnode
980 *
981 * Have to be really careful about stale file handles:
982 * - check that the inode number is valid
983 * - call ffs_vget() to get the locked inode
984 * - check for an unallocated inode (i_mode == 0)
985 * - check that the given client host has export rights and return
986 * those rights via. exflagsp and credanonp
987 */
988 int
989 ffs_fhtovp(mp, fhp, vpp)
990 register struct mount *mp;
991 struct fid *fhp;
992 struct vnode **vpp;
993 {
994 register struct ufid *ufhp;
995 struct fs *fs;
996
997 ufhp = (struct ufid *)fhp;
998 fs = VFSTOUFS(mp)->um_fs;
999 if (ufhp->ufid_ino < ROOTINO ||
1000 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1001 return (ESTALE);
1002 return (ufs_fhtovp(mp, ufhp, vpp));
1003 }
1004
1005 /*
1006 * Vnode pointer to File handle
1007 */
1008 /* ARGSUSED */
1009 int
1010 ffs_vptofh(vp, fhp)
1011 struct vnode *vp;
1012 struct fid *fhp;
1013 {
1014 register struct inode *ip;
1015 register struct ufid *ufhp;
1016
1017 ip = VTOI(vp);
1018 ufhp = (struct ufid *)fhp;
1019 ufhp->ufid_len = sizeof(struct ufid);
1020 ufhp->ufid_ino = ip->i_number;
1021 ufhp->ufid_gen = ip->i_ffs_gen;
1022 return (0);
1023 }
1024
1025 void
1026 ffs_init()
1027 {
1028 ufs_init();
1029
1030 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1031 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1032 }
1033
1034 int
1035 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1036 int *name;
1037 u_int namelen;
1038 void *oldp;
1039 size_t *oldlenp;
1040 void *newp;
1041 size_t newlen;
1042 struct proc *p;
1043 {
1044 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1045
1046 /* all sysctl names at this level are terminal */
1047 if (namelen != 1)
1048 return (ENOTDIR); /* overloaded */
1049
1050 switch (name[0]) {
1051 case FFS_CLUSTERREAD:
1052 return (sysctl_int(oldp, oldlenp, newp, newlen,
1053 &doclusterread));
1054 case FFS_CLUSTERWRITE:
1055 return (sysctl_int(oldp, oldlenp, newp, newlen,
1056 &doclusterwrite));
1057 case FFS_REALLOCBLKS:
1058 return (sysctl_int(oldp, oldlenp, newp, newlen,
1059 &doreallocblks));
1060 case FFS_ASYNCFREE:
1061 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1062 default:
1063 return (EOPNOTSUPP);
1064 }
1065 /* NOTREACHED */
1066 }
1067
1068 /*
1069 * Write a superblock and associated information back to disk.
1070 */
1071 int
1072 ffs_sbupdate(mp, waitfor)
1073 struct ufsmount *mp;
1074 int waitfor;
1075 {
1076 register struct fs *fs = mp->um_fs;
1077 register struct buf *bp;
1078 int i, error = 0;
1079 int32_t saved_nrpos = fs->fs_nrpos;
1080 int64_t saved_qbmask = fs->fs_qbmask;
1081 int64_t saved_qfmask = fs->fs_qfmask;
1082 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1083
1084 /* Restore compatibility to old file systems. XXX */
1085 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1086 fs->fs_nrpos = -1; /* XXX */
1087 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1088 int32_t *lp, tmp; /* XXX */
1089 /* XXX */
1090 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1091 tmp = lp[4]; /* XXX */
1092 for (i = 4; i > 0; i--) /* XXX */
1093 lp[i] = lp[i-1]; /* XXX */
1094 lp[0] = tmp; /* XXX */
1095 } /* XXX */
1096 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1097
1098 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1099 (int)fs->fs_sbsize, 0, 0);
1100 memcpy(bp->b_data, fs, fs->fs_sbsize);
1101 #ifdef FFS_EI
1102 if (mp->um_flags & UFS_NEEDSWAP)
1103 ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
1104 #endif
1105
1106 fs->fs_nrpos = saved_nrpos; /* XXX */
1107 fs->fs_qbmask = saved_qbmask; /* XXX */
1108 fs->fs_qfmask = saved_qfmask; /* XXX */
1109 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1110
1111 if (waitfor == MNT_WAIT)
1112 error = bwrite(bp);
1113 else
1114 bawrite(bp);
1115 return (error);
1116 }
1117
1118 int
1119 ffs_cgupdate(mp, waitfor)
1120 struct ufsmount *mp;
1121 int waitfor;
1122 {
1123 register struct fs *fs = mp->um_fs;
1124 register struct buf *bp;
1125 int blks;
1126 caddr_t space;
1127 int i, size, error = 0, allerror = 0;
1128
1129 allerror = ffs_sbupdate(mp, waitfor);
1130 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1131 space = (caddr_t)fs->fs_csp[0];
1132 for (i = 0; i < blks; i += fs->fs_frag) {
1133 size = fs->fs_bsize;
1134 if (i + fs->fs_frag > blks)
1135 size = (blks - i) * fs->fs_fsize;
1136 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1137 size, 0, 0);
1138 #ifdef FFS_EI
1139 if (mp->um_flags & UFS_NEEDSWAP)
1140 ffs_csum_swap((struct csum*)space,
1141 (struct csum*)bp->b_data, size);
1142 else
1143 #endif
1144 memcpy(bp->b_data, space, (u_int)size);
1145 space += size;
1146 if (waitfor == MNT_WAIT)
1147 error = bwrite(bp);
1148 else
1149 bawrite(bp);
1150 }
1151 if (!allerror && error)
1152 allerror = error;
1153 return (allerror);
1154 }
1155