ffs_vfsops.c revision 1.48 1 /* $NetBSD: ffs_vfsops.c,v 1.48 1999/02/26 23:44:49 wrstuden Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL) && !defined(_LKM)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 extern struct lock ufs_hashlock;
78
79 int ffs_sbupdate __P((struct ufsmount *, int));
80
81 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
82 extern struct vnodeopv_desc ffs_specop_opv_desc;
83 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
84
85 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
86 &ffs_vnodeop_opv_desc,
87 &ffs_specop_opv_desc,
88 &ffs_fifoop_opv_desc,
89 NULL,
90 };
91
92 struct vfsops ffs_vfsops = {
93 MOUNT_FFS,
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 ffs_mountroot,
107 ufs_check_export,
108 ffs_vnodeopv_descs,
109 };
110
111 struct pool ffs_inode_pool;
112
113 /*
114 * Called by main() when ffs is going to be mounted as root.
115 */
116
117 int
118 ffs_mountroot()
119 {
120 extern struct vnode *rootvp;
121 struct fs *fs;
122 struct mount *mp;
123 struct proc *p = curproc; /* XXX */
124 struct ufsmount *ump;
125 int error;
126
127 if (root_device->dv_class != DV_DISK)
128 return (ENODEV);
129
130 /*
131 * Get vnodes for rootdev.
132 */
133 if (bdevvp(rootdev, &rootvp))
134 panic("ffs_mountroot: can't setup bdevvp's");
135
136 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp)))
137 return (error);
138 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
139 mp->mnt_op->vfs_refcount--;
140 vfs_unbusy(mp);
141 free(mp, M_MOUNT);
142 return (error);
143 }
144 simple_lock(&mountlist_slock);
145 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
146 simple_unlock(&mountlist_slock);
147 ump = VFSTOUFS(mp);
148 fs = ump->um_fs;
149 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
150 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
151 (void)ffs_statfs(mp, &mp->mnt_stat, p);
152 vfs_unbusy(mp);
153 inittodr(fs->fs_time);
154 return (0);
155 }
156
157 /*
158 * VFS Operations.
159 *
160 * mount system call
161 */
162 int
163 ffs_mount(mp, path, data, ndp, p)
164 register struct mount *mp;
165 const char *path;
166 void *data;
167 struct nameidata *ndp;
168 struct proc *p;
169 {
170 struct vnode *devvp;
171 struct ufs_args args;
172 struct ufsmount *ump = NULL;
173 register struct fs *fs;
174 size_t size;
175 int error, flags;
176 mode_t accessmode;
177
178 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
179 if (error)
180 return (error);
181 /*
182 * If updating, check whether changing from read-only to
183 * read/write; if there is no device name, that's all we do.
184 */
185 if (mp->mnt_flag & MNT_UPDATE) {
186 ump = VFSTOUFS(mp);
187 fs = ump->um_fs;
188 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
189 flags = WRITECLOSE;
190 if (mp->mnt_flag & MNT_FORCE)
191 flags |= FORCECLOSE;
192 error = ffs_flushfiles(mp, flags, p);
193 if (error == 0 &&
194 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
195 fs->fs_clean & FS_WASCLEAN) {
196 fs->fs_clean = FS_ISCLEAN;
197 (void) ffs_sbupdate(ump, MNT_WAIT);
198 }
199 if (error)
200 return (error);
201 fs->fs_ronly = 1;
202 }
203 if (mp->mnt_flag & MNT_RELOAD) {
204 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
205 if (error)
206 return (error);
207 }
208 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
209 /*
210 * If upgrade to read-write by non-root, then verify
211 * that user has necessary permissions on the device.
212 */
213 if (p->p_ucred->cr_uid != 0) {
214 devvp = ump->um_devvp;
215 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
216 error = VOP_ACCESS(devvp, VREAD | VWRITE,
217 p->p_ucred, p);
218 VOP_UNLOCK(devvp, 0);
219 if (error)
220 return (error);
221 }
222 fs->fs_ronly = 0;
223 fs->fs_clean <<= 1;
224 fs->fs_fmod = 1;
225 }
226 if (args.fspec == 0) {
227 /*
228 * Process export requests.
229 */
230 return (vfs_export(mp, &ump->um_export, &args.export));
231 }
232 }
233 /*
234 * Not an update, or updating the name: look up the name
235 * and verify that it refers to a sensible block device.
236 */
237 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
238 if ((error = namei(ndp)) != 0)
239 return (error);
240 devvp = ndp->ni_vp;
241
242 if (devvp->v_type != VBLK) {
243 vrele(devvp);
244 return (ENOTBLK);
245 }
246 if (major(devvp->v_rdev) >= nblkdev) {
247 vrele(devvp);
248 return (ENXIO);
249 }
250 /*
251 * If mount by non-root, then verify that user has necessary
252 * permissions on the device.
253 */
254 if (p->p_ucred->cr_uid != 0) {
255 accessmode = VREAD;
256 if ((mp->mnt_flag & MNT_RDONLY) == 0)
257 accessmode |= VWRITE;
258 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
259 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
260 VOP_UNLOCK(devvp, 0);
261 if (error) {
262 vrele(devvp);
263 return (error);
264 }
265 }
266 if ((mp->mnt_flag & MNT_UPDATE) == 0)
267 error = ffs_mountfs(devvp, mp, p);
268 else {
269 if (devvp != ump->um_devvp)
270 error = EINVAL; /* needs translation */
271 else
272 vrele(devvp);
273 }
274 if (error) {
275 vrele(devvp);
276 return (error);
277 }
278 ump = VFSTOUFS(mp);
279 fs = ump->um_fs;
280 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
281 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
282 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
283 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
284 &size);
285 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
286 if (fs->fs_fmod != 0) { /* XXX */
287 fs->fs_fmod = 0;
288 if (fs->fs_clean & FS_WASCLEAN)
289 fs->fs_time = time.tv_sec;
290 else
291 printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
292 mp->mnt_stat.f_mntfromname, fs->fs_clean);
293 (void) ffs_cgupdate(ump, MNT_WAIT);
294 }
295 return (0);
296 }
297
298 /*
299 * Reload all incore data for a filesystem (used after running fsck on
300 * the root filesystem and finding things to fix). The filesystem must
301 * be mounted read-only.
302 *
303 * Things to do to update the mount:
304 * 1) invalidate all cached meta-data.
305 * 2) re-read superblock from disk.
306 * 3) re-read summary information from disk.
307 * 4) invalidate all inactive vnodes.
308 * 5) invalidate all cached file data.
309 * 6) re-read inode data for all active vnodes.
310 */
311 int
312 ffs_reload(mountp, cred, p)
313 register struct mount *mountp;
314 struct ucred *cred;
315 struct proc *p;
316 {
317 register struct vnode *vp, *nvp, *devvp;
318 struct inode *ip;
319 struct buf *bp;
320 struct fs *fs, *newfs;
321 struct partinfo dpart;
322 int i, blks, size, error;
323 int32_t *lp;
324 caddr_t cp;
325
326 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
327 return (EINVAL);
328 /*
329 * Step 1: invalidate all cached meta-data.
330 */
331 devvp = VFSTOUFS(mountp)->um_devvp;
332 if (vinvalbuf(devvp, 0, cred, p, 0, 0))
333 panic("ffs_reload: dirty1");
334 /*
335 * Step 2: re-read superblock from disk.
336 */
337 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
338 size = DEV_BSIZE;
339 else
340 size = dpart.disklab->d_secsize;
341 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
342 if (error) {
343 brelse(bp);
344 return (error);
345 }
346 fs = VFSTOUFS(mountp)->um_fs;
347 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
348 memcpy(newfs, bp->b_data, fs->fs_sbsize);
349 #ifdef FFS_EI
350 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP)
351 ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
352 #endif
353 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
354 newfs->fs_bsize < sizeof(struct fs)) {
355 brelse(bp);
356 free(newfs, M_UFSMNT);
357 return (EIO); /* XXX needs translation */
358 }
359 /*
360 * Copy pointer fields back into superblock before copying in XXX
361 * new superblock. These should really be in the ufsmount. XXX
362 * Note that important parameters (eg fs_ncg) are unchanged.
363 */
364 memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
365 newfs->fs_maxcluster = fs->fs_maxcluster;
366 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
367 if (fs->fs_sbsize < SBSIZE)
368 bp->b_flags |= B_INVAL;
369 brelse(bp);
370 free(newfs, M_UFSMNT);
371 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
372 ffs_oldfscompat(fs);
373 /*
374 * Step 3: re-read summary information from disk.
375 */
376 blks = howmany(fs->fs_cssize, fs->fs_fsize);
377 for (i = 0; i < blks; i += fs->fs_frag) {
378 size = fs->fs_bsize;
379 if (i + fs->fs_frag > blks)
380 size = (blks - i) * fs->fs_fsize;
381 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
382 NOCRED, &bp);
383 if (error) {
384 brelse(bp);
385 return (error);
386 }
387 #ifdef FFS_EI
388 if (UFS_MPNEEDSWAP(mountp))
389 ffs_csum_swap((struct csum*)bp->b_data,
390 (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
391 else
392 #endif
393 memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
394 (size_t)size);
395 brelse(bp);
396 }
397 /*
398 * We no longer know anything about clusters per cylinder group.
399 */
400 if (fs->fs_contigsumsize > 0) {
401 lp = fs->fs_maxcluster;
402 for (i = 0; i < fs->fs_ncg; i++)
403 *lp++ = fs->fs_contigsumsize;
404 }
405
406 loop:
407 simple_lock(&mntvnode_slock);
408 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
409 if (vp->v_mount != mountp) {
410 simple_unlock(&mntvnode_slock);
411 goto loop;
412 }
413 nvp = vp->v_mntvnodes.le_next;
414 /*
415 * Step 4: invalidate all inactive vnodes.
416 */
417 if (vrecycle(vp, &mntvnode_slock, p))
418 goto loop;
419 /*
420 * Step 5: invalidate all cached file data.
421 */
422 simple_lock(&vp->v_interlock);
423 simple_unlock(&mntvnode_slock);
424 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
425 goto loop;
426 if (vinvalbuf(vp, 0, cred, p, 0, 0))
427 panic("ffs_reload: dirty2");
428 /*
429 * Step 6: re-read inode data for all active vnodes.
430 */
431 ip = VTOI(vp);
432 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
433 (int)fs->fs_bsize, NOCRED, &bp);
434 if (error) {
435 brelse(bp);
436 vput(vp);
437 return (error);
438 }
439 cp = (caddr_t)bp->b_data +
440 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
441 #ifdef FFS_EI
442 if (UFS_MPNEEDSWAP(mountp))
443 ffs_dinode_swap((struct dinode *)cp,
444 &ip->i_din.ffs_din);
445 else
446 #endif
447 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
448 brelse(bp);
449 vput(vp);
450 simple_lock(&mntvnode_slock);
451 }
452 simple_unlock(&mntvnode_slock);
453 return (0);
454 }
455
456 /*
457 * Common code for mount and mountroot
458 */
459 int
460 ffs_mountfs(devvp, mp, p)
461 register struct vnode *devvp;
462 struct mount *mp;
463 struct proc *p;
464 {
465 struct ufsmount *ump;
466 struct buf *bp;
467 struct fs *fs;
468 dev_t dev;
469 struct partinfo dpart;
470 caddr_t base, space;
471 int blks;
472 int error, i, size, ronly, needswap;
473 int32_t *lp;
474 struct ucred *cred;
475 extern struct vnode *rootvp;
476 u_int64_t maxfilesize; /* XXX */
477 u_int32_t sbsize;
478
479 dev = devvp->v_rdev;
480 cred = p ? p->p_ucred : NOCRED;
481 /*
482 * Disallow multiple mounts of the same device.
483 * Disallow mounting of a device that is currently in use
484 * (except for root, which might share swap device for miniroot).
485 * Flush out any old buffers remaining from a previous use.
486 */
487 if ((error = vfs_mountedon(devvp)) != 0)
488 return (error);
489 if (vcount(devvp) > 1 && devvp != rootvp)
490 return (EBUSY);
491 if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0)
492 return (error);
493
494 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
495 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
496 if (error)
497 return (error);
498 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
499 size = DEV_BSIZE;
500 else
501 size = dpart.disklab->d_secsize;
502
503 bp = NULL;
504 ump = NULL;
505 error = bread(devvp, (ufs_daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
506 if (error)
507 goto out;
508
509 fs = (struct fs*)bp->b_data;
510 if (fs->fs_magic == FS_MAGIC) {
511 needswap = 0;
512 sbsize = fs->fs_sbsize;
513 #ifdef FFS_EI
514 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
515 needswap = 1;
516 sbsize = bswap32(fs->fs_sbsize);
517 #endif
518 } else {
519 error = EINVAL;
520 goto out;
521 }
522 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
523 error = EINVAL;
524 goto out;
525 }
526
527 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
528 memcpy(fs, bp->b_data, sbsize);
529 #ifdef FFS_EI
530 if (needswap)
531 ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
532 #endif
533
534 /* make sure cylinder group summary area is a reasonable size. */
535 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
536 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
537 fs->fs_cssize >
538 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
539 error = EINVAL; /* XXX needs translation */
540 goto out2;
541 }
542 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
543 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
544 error = EROFS; /* XXX what should be returned? */
545 goto out2;
546 }
547 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
548 memset((caddr_t)ump, 0, sizeof *ump);
549 ump->um_fs = fs;
550 if (fs->fs_sbsize < SBSIZE)
551 bp->b_flags |= B_INVAL;
552 brelse(bp);
553 bp = NULL;
554 fs->fs_ronly = ronly;
555 if (ronly == 0) {
556 fs->fs_clean <<= 1;
557 fs->fs_fmod = 1;
558 }
559 size = fs->fs_cssize;
560 blks = howmany(size, fs->fs_fsize);
561 if (fs->fs_contigsumsize > 0)
562 size += fs->fs_ncg * sizeof(int32_t);
563 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
564 for (i = 0; i < blks; i += fs->fs_frag) {
565 size = fs->fs_bsize;
566 if (i + fs->fs_frag > blks)
567 size = (blks - i) * fs->fs_fsize;
568 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
569 cred, &bp);
570 if (error) {
571 free(base, M_UFSMNT);
572 goto out2;
573 }
574 #ifdef FFS_EI
575 if (needswap)
576 ffs_csum_swap((struct csum*)bp->b_data,
577 (struct csum*)space, size);
578 else
579 #endif
580 memcpy(space, bp->b_data, (u_int)size);
581
582 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
583 space += size;
584 brelse(bp);
585 bp = NULL;
586 }
587 if (fs->fs_contigsumsize > 0) {
588 fs->fs_maxcluster = lp = (int32_t *)space;
589 for (i = 0; i < fs->fs_ncg; i++)
590 *lp++ = fs->fs_contigsumsize;
591 }
592 mp->mnt_data = (qaddr_t)ump;
593 mp->mnt_stat.f_fsid.val[0] = (long)dev;
594 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
595 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
596 mp->mnt_flag |= MNT_LOCAL;
597 #ifdef FFS_EI
598 if (needswap)
599 ump->um_flags |= UFS_NEEDSWAP;
600 #endif
601 ump->um_mountp = mp;
602 ump->um_dev = dev;
603 ump->um_devvp = devvp;
604 ump->um_nindir = fs->fs_nindir;
605 ump->um_bptrtodb = fs->fs_fsbtodb;
606 ump->um_seqinc = fs->fs_frag;
607 for (i = 0; i < MAXQUOTAS; i++)
608 ump->um_quotas[i] = NULLVP;
609 devvp->v_specflags |= SI_MOUNTEDON;
610 ffs_oldfscompat(fs);
611 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
612 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
613 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
614 fs->fs_maxfilesize = maxfilesize; /* XXX */
615 return (0);
616 out2:
617 free(fs, M_UFSMNT);
618 out:
619 if (bp)
620 brelse(bp);
621 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
622 if (ump) {
623 free(ump, M_UFSMNT);
624 mp->mnt_data = (qaddr_t)0;
625 }
626 return (error);
627 }
628
629 /*
630 * Sanity checks for old file systems.
631 *
632 * XXX - goes away some day.
633 */
634 int
635 ffs_oldfscompat(fs)
636 struct fs *fs;
637 {
638 int i;
639
640 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
641 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
642 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
643 fs->fs_nrpos = 8; /* XXX */
644 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
645 u_int64_t sizepb = fs->fs_bsize; /* XXX */
646 /* XXX */
647 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
648 for (i = 0; i < NIADDR; i++) { /* XXX */
649 sizepb *= NINDIR(fs); /* XXX */
650 fs->fs_maxfilesize += sizepb; /* XXX */
651 } /* XXX */
652 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
653 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
654 } /* XXX */
655 return (0);
656 }
657
658 /*
659 * unmount system call
660 */
661 int
662 ffs_unmount(mp, mntflags, p)
663 struct mount *mp;
664 int mntflags;
665 struct proc *p;
666 {
667 register struct ufsmount *ump;
668 register struct fs *fs;
669 int error, flags;
670
671 flags = 0;
672 if (mntflags & MNT_FORCE)
673 flags |= FORCECLOSE;
674 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
675 return (error);
676 ump = VFSTOUFS(mp);
677 fs = ump->um_fs;
678 if (fs->fs_ronly == 0 &&
679 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
680 fs->fs_clean & FS_WASCLEAN) {
681 fs->fs_clean = FS_ISCLEAN;
682 (void) ffs_sbupdate(ump, MNT_WAIT);
683 }
684 ump->um_devvp->v_specflags &= ~SI_MOUNTEDON;
685 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
686 NOCRED, p);
687 vrele(ump->um_devvp);
688 free(fs->fs_csp[0], M_UFSMNT);
689 free(fs, M_UFSMNT);
690 free(ump, M_UFSMNT);
691 mp->mnt_data = (qaddr_t)0;
692 mp->mnt_flag &= ~MNT_LOCAL;
693 return (error);
694 }
695
696 /*
697 * Flush out all the files in a filesystem.
698 */
699 int
700 ffs_flushfiles(mp, flags, p)
701 register struct mount *mp;
702 int flags;
703 struct proc *p;
704 {
705 extern int doforce;
706 register struct ufsmount *ump;
707 int error;
708
709 if (!doforce)
710 flags &= ~FORCECLOSE;
711 ump = VFSTOUFS(mp);
712 #ifdef QUOTA
713 if (mp->mnt_flag & MNT_QUOTA) {
714 int i;
715 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
716 return (error);
717 for (i = 0; i < MAXQUOTAS; i++) {
718 if (ump->um_quotas[i] == NULLVP)
719 continue;
720 quotaoff(p, mp, i);
721 }
722 /*
723 * Here we fall through to vflush again to ensure
724 * that we have gotten rid of all the system vnodes.
725 */
726 }
727 #endif
728 error = vflush(mp, NULLVP, flags);
729 return (error);
730 }
731
732 /*
733 * Get file system statistics.
734 */
735 int
736 ffs_statfs(mp, sbp, p)
737 struct mount *mp;
738 register struct statfs *sbp;
739 struct proc *p;
740 {
741 register struct ufsmount *ump;
742 register struct fs *fs;
743
744 ump = VFSTOUFS(mp);
745 fs = ump->um_fs;
746 if (fs->fs_magic != FS_MAGIC)
747 panic("ffs_statfs");
748 #ifdef COMPAT_09
749 sbp->f_type = 1;
750 #else
751 sbp->f_type = 0;
752 #endif
753 sbp->f_bsize = fs->fs_fsize;
754 sbp->f_iosize = fs->fs_bsize;
755 sbp->f_blocks = fs->fs_dsize;
756 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
757 fs->fs_cstotal.cs_nffree;
758 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
759 (100 - fs->fs_minfree) / (u_int64_t) 100) -
760 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
761 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
762 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
763 if (sbp != &mp->mnt_stat) {
764 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
765 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
766 }
767 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
768 return (0);
769 }
770
771 /*
772 * Go through the disk queues to initiate sandbagged IO;
773 * go through the inodes to write those that have been modified;
774 * initiate the writing of the super block if it has been modified.
775 *
776 * Note: we are always called with the filesystem marked `MPBUSY'.
777 */
778 int
779 ffs_sync(mp, waitfor, cred, p)
780 struct mount *mp;
781 int waitfor;
782 struct ucred *cred;
783 struct proc *p;
784 {
785 struct vnode *vp, *nvp;
786 struct inode *ip;
787 struct ufsmount *ump = VFSTOUFS(mp);
788 struct fs *fs;
789 int error, allerror = 0;
790
791 fs = ump->um_fs;
792 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
793 printf("fs = %s\n", fs->fs_fsmnt);
794 panic("update: rofs mod");
795 }
796 /*
797 * Write back each (modified) inode.
798 */
799 simple_lock(&mntvnode_slock);
800 loop:
801 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
802 /*
803 * If the vnode that we are about to sync is no longer
804 * associated with this mount point, start over.
805 */
806 if (vp->v_mount != mp)
807 goto loop;
808 simple_lock(&vp->v_interlock);
809 nvp = vp->v_mntvnodes.le_next;
810 ip = VTOI(vp);
811 if ((ip->i_flag &
812 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0 &&
813 vp->v_dirtyblkhd.lh_first == NULL) {
814 simple_unlock(&vp->v_interlock);
815 continue;
816 }
817 simple_unlock(&mntvnode_slock);
818 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
819 if (error) {
820 simple_lock(&mntvnode_slock);
821 if (error == ENOENT)
822 goto loop;
823 continue;
824 }
825 if ((error = VOP_FSYNC(vp, cred,
826 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
827 allerror = error;
828 vput(vp);
829 simple_lock(&mntvnode_slock);
830 }
831 simple_unlock(&mntvnode_slock);
832 /*
833 * Force stale file system control information to be flushed.
834 */
835 if ((error = VOP_FSYNC(ump->um_devvp, cred,
836 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
837 allerror = error;
838 #ifdef QUOTA
839 qsync(mp);
840 #endif
841 /*
842 * Write back modified superblock.
843 */
844 if (fs->fs_fmod != 0) {
845 fs->fs_fmod = 0;
846 fs->fs_time = time.tv_sec;
847 allerror = ffs_cgupdate(ump, waitfor);
848 }
849 return (allerror);
850 }
851
852 /*
853 * Look up a FFS dinode number to find its incore vnode, otherwise read it
854 * in from disk. If it is in core, wait for the lock bit to clear, then
855 * return the inode locked. Detection and handling of mount points must be
856 * done by the calling routine.
857 */
858 int
859 ffs_vget(mp, ino, vpp)
860 struct mount *mp;
861 ino_t ino;
862 struct vnode **vpp;
863 {
864 struct fs *fs;
865 struct inode *ip;
866 struct ufsmount *ump;
867 struct buf *bp;
868 struct vnode *vp;
869 dev_t dev;
870 int error;
871 caddr_t cp;
872
873 ump = VFSTOUFS(mp);
874 dev = ump->um_dev;
875 do {
876 if ((*vpp = ufs_ihashget(dev, ino)) != NULL)
877 return (0);
878 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
879
880 /* Allocate a new vnode/inode. */
881 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
882 *vpp = NULL;
883 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
884 return (error);
885 }
886 /*
887 * XXX MFS ends up here, too, to allocate an inode. Should we
888 * XXX create another pool for MFS inodes?
889 */
890 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
891 memset((caddr_t)ip, 0, sizeof(struct inode));
892 lockinit(&ip->i_lock, PINOD, "inode", 0, 0);
893 vp->v_data = ip;
894 ip->i_vnode = vp;
895 ip->i_fs = fs = ump->um_fs;
896 ip->i_dev = dev;
897 ip->i_number = ino;
898 #ifdef QUOTA
899 {
900 int i;
901
902 for (i = 0; i < MAXQUOTAS; i++)
903 ip->i_dquot[i] = NODQUOT;
904 }
905 #endif
906 /*
907 * Put it onto its hash chain and lock it so that other requests for
908 * this inode will block if they arrive while we are sleeping waiting
909 * for old data structures to be purged or for the contents of the
910 * disk portion of this inode to be read.
911 */
912 ufs_ihashins(ip);
913 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
914
915 /* Read in the disk contents for the inode, copy into the inode. */
916 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
917 (int)fs->fs_bsize, NOCRED, &bp);
918 if (error) {
919 /*
920 * The inode does not contain anything useful, so it would
921 * be misleading to leave it on its hash chain. With mode
922 * still zero, it will be unlinked and returned to the free
923 * list by vput().
924 */
925 vput(vp);
926 brelse(bp);
927 *vpp = NULL;
928 return (error);
929 }
930 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
931 #ifdef FFS_EI
932 if (UFS_MPNEEDSWAP(mp))
933 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
934 else
935 #endif
936 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
937 brelse(bp);
938
939 /*
940 * Initialize the vnode from the inode, check for aliases.
941 * Note that the underlying vnode may have changed.
942 */
943 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
944 if (error) {
945 vput(vp);
946 *vpp = NULL;
947 return (error);
948 }
949 /*
950 * Finish inode initialization now that aliasing has been resolved.
951 */
952 ip->i_devvp = ump->um_devvp;
953 VREF(ip->i_devvp);
954 /*
955 * Ensure that uid and gid are correct. This is a temporary
956 * fix until fsck has been changed to do the update.
957 */
958 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
959 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
960 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
961 } /* XXX */
962
963 *vpp = vp;
964 return (0);
965 }
966
967 /*
968 * File handle to vnode
969 *
970 * Have to be really careful about stale file handles:
971 * - check that the inode number is valid
972 * - call ffs_vget() to get the locked inode
973 * - check for an unallocated inode (i_mode == 0)
974 * - check that the given client host has export rights and return
975 * those rights via. exflagsp and credanonp
976 */
977 int
978 ffs_fhtovp(mp, fhp, vpp)
979 register struct mount *mp;
980 struct fid *fhp;
981 struct vnode **vpp;
982 {
983 register struct ufid *ufhp;
984 struct fs *fs;
985
986 ufhp = (struct ufid *)fhp;
987 fs = VFSTOUFS(mp)->um_fs;
988 if (ufhp->ufid_ino < ROOTINO ||
989 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
990 return (ESTALE);
991 return (ufs_fhtovp(mp, ufhp, vpp));
992 }
993
994 /*
995 * Vnode pointer to File handle
996 */
997 /* ARGSUSED */
998 int
999 ffs_vptofh(vp, fhp)
1000 struct vnode *vp;
1001 struct fid *fhp;
1002 {
1003 register struct inode *ip;
1004 register struct ufid *ufhp;
1005
1006 ip = VTOI(vp);
1007 ufhp = (struct ufid *)fhp;
1008 ufhp->ufid_len = sizeof(struct ufid);
1009 ufhp->ufid_ino = ip->i_number;
1010 ufhp->ufid_gen = ip->i_ffs_gen;
1011 return (0);
1012 }
1013
1014 void
1015 ffs_init()
1016 {
1017 ufs_init();
1018
1019 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1020 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1021 }
1022
1023 int
1024 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1025 int *name;
1026 u_int namelen;
1027 void *oldp;
1028 size_t *oldlenp;
1029 void *newp;
1030 size_t newlen;
1031 struct proc *p;
1032 {
1033 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1034
1035 /* all sysctl names at this level are terminal */
1036 if (namelen != 1)
1037 return (ENOTDIR); /* overloaded */
1038
1039 switch (name[0]) {
1040 case FFS_CLUSTERREAD:
1041 return (sysctl_int(oldp, oldlenp, newp, newlen,
1042 &doclusterread));
1043 case FFS_CLUSTERWRITE:
1044 return (sysctl_int(oldp, oldlenp, newp, newlen,
1045 &doclusterwrite));
1046 case FFS_REALLOCBLKS:
1047 return (sysctl_int(oldp, oldlenp, newp, newlen,
1048 &doreallocblks));
1049 case FFS_ASYNCFREE:
1050 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1051 default:
1052 return (EOPNOTSUPP);
1053 }
1054 /* NOTREACHED */
1055 }
1056
1057 /*
1058 * Write a superblock and associated information back to disk.
1059 */
1060 int
1061 ffs_sbupdate(mp, waitfor)
1062 struct ufsmount *mp;
1063 int waitfor;
1064 {
1065 register struct fs *fs = mp->um_fs;
1066 register struct buf *bp;
1067 int i, error = 0;
1068 int32_t saved_nrpos = fs->fs_nrpos;
1069 int64_t saved_qbmask = fs->fs_qbmask;
1070 int64_t saved_qfmask = fs->fs_qfmask;
1071 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1072
1073 /* Restore compatibility to old file systems. XXX */
1074 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1075 fs->fs_nrpos = -1; /* XXX */
1076 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1077 int32_t *lp, tmp; /* XXX */
1078 /* XXX */
1079 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1080 tmp = lp[4]; /* XXX */
1081 for (i = 4; i > 0; i--) /* XXX */
1082 lp[i] = lp[i-1]; /* XXX */
1083 lp[0] = tmp; /* XXX */
1084 } /* XXX */
1085 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1086
1087 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1088 (int)fs->fs_sbsize, 0, 0);
1089 memcpy(bp->b_data, fs, fs->fs_sbsize);
1090 #ifdef FFS_EI
1091 if (mp->um_flags & UFS_NEEDSWAP)
1092 ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
1093 #endif
1094
1095 fs->fs_nrpos = saved_nrpos; /* XXX */
1096 fs->fs_qbmask = saved_qbmask; /* XXX */
1097 fs->fs_qfmask = saved_qfmask; /* XXX */
1098 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1099
1100 if (waitfor == MNT_WAIT)
1101 error = bwrite(bp);
1102 else
1103 bawrite(bp);
1104 return (error);
1105 }
1106
1107 int
1108 ffs_cgupdate(mp, waitfor)
1109 struct ufsmount *mp;
1110 int waitfor;
1111 {
1112 register struct fs *fs = mp->um_fs;
1113 register struct buf *bp;
1114 int blks;
1115 caddr_t space;
1116 int i, size, error = 0, allerror = 0;
1117
1118 allerror = ffs_sbupdate(mp, waitfor);
1119 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1120 space = (caddr_t)fs->fs_csp[0];
1121 for (i = 0; i < blks; i += fs->fs_frag) {
1122 size = fs->fs_bsize;
1123 if (i + fs->fs_frag > blks)
1124 size = (blks - i) * fs->fs_fsize;
1125 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1126 size, 0, 0);
1127 #ifdef FFS_EI
1128 if (mp->um_flags & UFS_NEEDSWAP)
1129 ffs_csum_swap((struct csum*)space,
1130 (struct csum*)bp->b_data, size);
1131 else
1132 #endif
1133 memcpy(bp->b_data, space, (u_int)size);
1134 space += size;
1135 if (waitfor == MNT_WAIT)
1136 error = bwrite(bp);
1137 else
1138 bawrite(bp);
1139 }
1140 if (!allerror && error)
1141 allerror = error;
1142 return (allerror);
1143 }
1144