ffs_vfsops.c revision 1.107 1 /* $NetBSD: ffs_vfsops.c,v 1.107 2003/02/17 23:48:14 perseant Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #include <sys/cdefs.h>
39 __KERNEL_RCSID(0, "$NetBSD: ffs_vfsops.c,v 1.107 2003/02/17 23:48:14 perseant Exp $");
40
41 #if defined(_KERNEL_OPT)
42 #include "opt_ffs.h"
43 #include "opt_quota.h"
44 #include "opt_compat_netbsd.h"
45 #include "opt_softdep.h"
46 #endif
47
48 #include <sys/param.h>
49 #include <sys/systm.h>
50 #include <sys/namei.h>
51 #include <sys/proc.h>
52 #include <sys/kernel.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
55 #include <sys/mount.h>
56 #include <sys/buf.h>
57 #include <sys/device.h>
58 #include <sys/mbuf.h>
59 #include <sys/file.h>
60 #include <sys/disklabel.h>
61 #include <sys/ioctl.h>
62 #include <sys/errno.h>
63 #include <sys/malloc.h>
64 #include <sys/pool.h>
65 #include <sys/lock.h>
66 #include <sys/sysctl.h>
67 #include <sys/conf.h>
68
69 #include <miscfs/specfs/specdev.h>
70
71 #include <ufs/ufs/quota.h>
72 #include <ufs/ufs/ufsmount.h>
73 #include <ufs/ufs/inode.h>
74 #include <ufs/ufs/dir.h>
75 #include <ufs/ufs/ufs_extern.h>
76 #include <ufs/ufs/ufs_bswap.h>
77
78 #include <ufs/ffs/fs.h>
79 #include <ufs/ffs/ffs_extern.h>
80
81 /* how many times ffs_init() was called */
82 int ffs_initcount = 0;
83
84 extern struct lock ufs_hashlock;
85
86 extern const struct vnodeopv_desc ffs_vnodeop_opv_desc;
87 extern const struct vnodeopv_desc ffs_specop_opv_desc;
88 extern const struct vnodeopv_desc ffs_fifoop_opv_desc;
89
90 const struct vnodeopv_desc * const ffs_vnodeopv_descs[] = {
91 &ffs_vnodeop_opv_desc,
92 &ffs_specop_opv_desc,
93 &ffs_fifoop_opv_desc,
94 NULL,
95 };
96
97 struct vfsops ffs_vfsops = {
98 MOUNT_FFS,
99 ffs_mount,
100 ufs_start,
101 ffs_unmount,
102 ufs_root,
103 ufs_quotactl,
104 ffs_statfs,
105 ffs_sync,
106 ffs_vget,
107 ffs_fhtovp,
108 ffs_vptofh,
109 ffs_init,
110 ffs_reinit,
111 ffs_done,
112 ffs_sysctl,
113 ffs_mountroot,
114 ufs_check_export,
115 ffs_vnodeopv_descs,
116 };
117
118 struct genfs_ops ffs_genfsops = {
119 ffs_gop_size,
120 ufs_gop_alloc,
121 genfs_gop_write,
122 };
123
124 struct pool ffs_inode_pool;
125
126 /*
127 * Called by main() when ffs is going to be mounted as root.
128 */
129
130 int
131 ffs_mountroot()
132 {
133 struct fs *fs;
134 struct mount *mp;
135 struct proc *p = curproc; /* XXX */
136 struct ufsmount *ump;
137 int error;
138
139 if (root_device->dv_class != DV_DISK)
140 return (ENODEV);
141
142 /*
143 * Get vnodes for rootdev.
144 */
145 if (bdevvp(rootdev, &rootvp))
146 panic("ffs_mountroot: can't setup bdevvp's");
147
148 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
149 vrele(rootvp);
150 return (error);
151 }
152 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
153 mp->mnt_op->vfs_refcount--;
154 vfs_unbusy(mp);
155 free(mp, M_MOUNT);
156 vrele(rootvp);
157 return (error);
158 }
159 simple_lock(&mountlist_slock);
160 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
161 simple_unlock(&mountlist_slock);
162 ump = VFSTOUFS(mp);
163 fs = ump->um_fs;
164 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
165 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
166 (void)ffs_statfs(mp, &mp->mnt_stat, p);
167 vfs_unbusy(mp);
168 inittodr(fs->fs_time);
169 return (0);
170 }
171
172 /*
173 * VFS Operations.
174 *
175 * mount system call
176 */
177 int
178 ffs_mount(mp, path, data, ndp, p)
179 struct mount *mp;
180 const char *path;
181 void *data;
182 struct nameidata *ndp;
183 struct proc *p;
184 {
185 struct vnode *devvp = NULL;
186 struct ufs_args args;
187 struct ufsmount *ump = NULL;
188 struct fs *fs;
189 size_t size;
190 int error, flags, update;
191 mode_t accessmode;
192
193 if (mp->mnt_flag & MNT_GETARGS) {
194 ump = VFSTOUFS(mp);
195 if (ump == NULL)
196 return EIO;
197 args.fspec = NULL;
198 vfs_showexport(mp, &args.export, &ump->um_export);
199 return copyout(&args, data, sizeof(args));
200 }
201 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
202 if (error)
203 return (error);
204
205 #if !defined(SOFTDEP)
206 mp->mnt_flag &= ~MNT_SOFTDEP;
207 #endif
208
209 update = mp->mnt_flag & MNT_UPDATE;
210
211 /* Check arguments */
212 if (update) {
213 /* Use the extant mount */
214 ump = VFSTOUFS(mp);
215 devvp = ump->um_devvp;
216 if (args.fspec == NULL)
217 vref(devvp);
218 } else {
219 /* New mounts must have a filename for the device */
220 if (args.fspec == NULL)
221 return (EINVAL);
222 }
223
224 if (args.fspec != NULL) {
225 /*
226 * Look up the name and verify that it's sane.
227 */
228 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
229 if ((error = namei(ndp)) != 0)
230 return (error);
231 devvp = ndp->ni_vp;
232
233 if (!update) {
234 /*
235 * Be sure this is a valid block device
236 */
237 if (devvp->v_type != VBLK)
238 error = ENOTBLK;
239 else if (bdevsw_lookup(devvp->v_rdev) == NULL)
240 error = ENXIO;
241 } else {
242 /*
243 * Be sure we're still naming the same device
244 * used for our initial mount
245 */
246 if (devvp != ump->um_devvp)
247 error = EINVAL;
248 }
249 }
250
251 /*
252 * If mount by non-root, then verify that user has necessary
253 * permissions on the device.
254 */
255 if (error == 0 && p->p_ucred->cr_uid != 0) {
256 accessmode = VREAD;
257 if (update ?
258 (mp->mnt_flag & MNT_WANTRDWR) != 0 :
259 (mp->mnt_flag & MNT_RDONLY) == 0)
260 accessmode |= VWRITE;
261 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
262 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
263 VOP_UNLOCK(devvp, 0);
264 }
265
266 if (error) {
267 vrele(devvp);
268 return (error);
269 }
270
271 if (!update) {
272 error = ffs_mountfs(devvp, mp, p);
273 if (error) {
274 vrele(devvp);
275 return (error);
276 }
277
278 ump = VFSTOUFS(mp);
279 fs = ump->um_fs;
280 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
281 (MNT_SOFTDEP | MNT_ASYNC)) {
282 printf("%s fs uses soft updates, "
283 "ignoring async mode\n",
284 fs->fs_fsmnt);
285 mp->mnt_flag &= ~MNT_ASYNC;
286 }
287 } else {
288 /*
289 * Update the mount.
290 */
291
292 /*
293 * The initial mount got a reference on this
294 * device, so drop the one obtained via
295 * namei(), above.
296 */
297 vrele(devvp);
298
299 fs = ump->um_fs;
300 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
301 /*
302 * Changing from r/w to r/o
303 */
304 flags = WRITECLOSE;
305 if (mp->mnt_flag & MNT_FORCE)
306 flags |= FORCECLOSE;
307 if (mp->mnt_flag & MNT_SOFTDEP)
308 error = softdep_flushfiles(mp, flags, p);
309 else
310 error = ffs_flushfiles(mp, flags, p);
311 if (fs->fs_pendingblocks != 0 ||
312 fs->fs_pendinginodes != 0) {
313 printf("%s: update error: blocks %d files %d\n",
314 fs->fs_fsmnt, fs->fs_pendingblocks,
315 fs->fs_pendinginodes);
316 fs->fs_pendingblocks = 0;
317 fs->fs_pendinginodes = 0;
318 }
319 if (error == 0 &&
320 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
321 fs->fs_clean & FS_WASCLEAN) {
322 if (mp->mnt_flag & MNT_SOFTDEP)
323 fs->fs_flags &= ~FS_DOSOFTDEP;
324 fs->fs_clean = FS_ISCLEAN;
325 (void) ffs_sbupdate(ump, MNT_WAIT);
326 }
327 if (error)
328 return (error);
329 fs->fs_ronly = 1;
330 fs->fs_fmod = 0;
331 }
332
333 /*
334 * Flush soft dependencies if disabling it via an update
335 * mount. This may leave some items to be processed,
336 * so don't do this yet XXX.
337 */
338 if ((fs->fs_flags & FS_DOSOFTDEP) &&
339 !(mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
340 #ifdef notyet
341 flags = WRITECLOSE;
342 if (mp->mnt_flag & MNT_FORCE)
343 flags |= FORCECLOSE;
344 error = softdep_flushfiles(mp, flags, p);
345 if (error == 0 && ffs_cgupdate(ump, MNT_WAIT) == 0)
346 fs->fs_flags &= ~FS_DOSOFTDEP;
347 (void) ffs_sbupdate(ump, MNT_WAIT);
348 #elif defined(SOFTDEP)
349 mp->mnt_flag |= MNT_SOFTDEP;
350 #endif
351 }
352
353 /*
354 * When upgrading to a softdep mount, we must first flush
355 * all vnodes. (not done yet -- see above)
356 */
357 if (!(fs->fs_flags & FS_DOSOFTDEP) &&
358 (mp->mnt_flag & MNT_SOFTDEP) && fs->fs_ronly == 0) {
359 #ifdef notyet
360 flags = WRITECLOSE;
361 if (mp->mnt_flag & MNT_FORCE)
362 flags |= FORCECLOSE;
363 error = ffs_flushfiles(mp, flags, p);
364 #else
365 mp->mnt_flag &= ~MNT_SOFTDEP;
366 #endif
367 }
368
369 if (mp->mnt_flag & MNT_RELOAD) {
370 error = ffs_reload(mp, p->p_ucred, p);
371 if (error)
372 return (error);
373 }
374
375 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
376 /*
377 * Changing from read-only to read/write
378 */
379 fs->fs_ronly = 0;
380 fs->fs_clean <<= 1;
381 fs->fs_fmod = 1;
382 if ((fs->fs_flags & FS_DOSOFTDEP)) {
383 error = softdep_mount(devvp, mp, fs,
384 p->p_ucred);
385 if (error)
386 return (error);
387 }
388 }
389 if (args.fspec == 0) {
390 /*
391 * Process export requests.
392 */
393 return (vfs_export(mp, &ump->um_export, &args.export));
394 }
395 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
396 (MNT_SOFTDEP | MNT_ASYNC)) {
397 printf("%s fs uses soft updates, ignoring async mode\n",
398 fs->fs_fsmnt);
399 mp->mnt_flag &= ~MNT_ASYNC;
400 }
401 }
402
403 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
404 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
405 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
406 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
407 &size);
408 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
409 if (mp->mnt_flag & MNT_SOFTDEP)
410 fs->fs_flags |= FS_DOSOFTDEP;
411 else
412 fs->fs_flags &= ~FS_DOSOFTDEP;
413 if (fs->fs_fmod != 0) { /* XXX */
414 fs->fs_fmod = 0;
415 if (fs->fs_clean & FS_WASCLEAN)
416 fs->fs_time = time.tv_sec;
417 else {
418 printf("%s: file system not clean (fs_clean=%x); please fsck(8)\n",
419 mp->mnt_stat.f_mntfromname, fs->fs_clean);
420 printf("%s: lost blocks %d files %d\n",
421 mp->mnt_stat.f_mntfromname, fs->fs_pendingblocks,
422 fs->fs_pendinginodes);
423 }
424 (void) ffs_cgupdate(ump, MNT_WAIT);
425 }
426 return (0);
427 }
428
429 /*
430 * Reload all incore data for a filesystem (used after running fsck on
431 * the root filesystem and finding things to fix). The filesystem must
432 * be mounted read-only.
433 *
434 * Things to do to update the mount:
435 * 1) invalidate all cached meta-data.
436 * 2) re-read superblock from disk.
437 * 3) re-read summary information from disk.
438 * 4) invalidate all inactive vnodes.
439 * 5) invalidate all cached file data.
440 * 6) re-read inode data for all active vnodes.
441 */
442 int
443 ffs_reload(mountp, cred, p)
444 struct mount *mountp;
445 struct ucred *cred;
446 struct proc *p;
447 {
448 struct vnode *vp, *nvp, *devvp;
449 struct inode *ip;
450 void *space;
451 struct buf *bp;
452 struct fs *fs, *newfs;
453 struct partinfo dpart;
454 int i, blks, size, error;
455 int32_t *lp;
456 caddr_t cp;
457
458 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
459 return (EINVAL);
460 /*
461 * Step 1: invalidate all cached meta-data.
462 */
463 devvp = VFSTOUFS(mountp)->um_devvp;
464 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
465 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
466 VOP_UNLOCK(devvp, 0);
467 if (error)
468 panic("ffs_reload: dirty1");
469 /*
470 * Step 2: re-read superblock from disk.
471 */
472 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
473 size = DEV_BSIZE;
474 else
475 size = dpart.disklab->d_secsize;
476 error = bread(devvp, (daddr_t)(SBOFF / size), SBSIZE, NOCRED, &bp);
477 if (error) {
478 brelse(bp);
479 return (error);
480 }
481 fs = VFSTOUFS(mountp)->um_fs;
482 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
483 memcpy(newfs, bp->b_data, fs->fs_sbsize);
484 #ifdef FFS_EI
485 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
486 ffs_sb_swap((struct fs*)bp->b_data, newfs);
487 fs->fs_flags |= FS_SWAPPED;
488 }
489 #endif
490 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
491 newfs->fs_bsize < sizeof(struct fs)) {
492 brelse(bp);
493 free(newfs, M_UFSMNT);
494 return (EIO); /* XXX needs translation */
495 }
496 /*
497 * Copy pointer fields back into superblock before copying in XXX
498 * new superblock. These should really be in the ufsmount. XXX
499 * Note that important parameters (eg fs_ncg) are unchanged.
500 */
501 newfs->fs_csp = fs->fs_csp;
502 newfs->fs_maxcluster = fs->fs_maxcluster;
503 newfs->fs_contigdirs = fs->fs_contigdirs;
504 newfs->fs_ronly = fs->fs_ronly;
505 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
506 if (fs->fs_sbsize < SBSIZE)
507 bp->b_flags |= B_INVAL;
508 brelse(bp);
509 free(newfs, M_UFSMNT);
510
511 /* Recheck for apple UFS filesystem */
512 VFSTOUFS(mountp)->um_flags &= ~UFS_ISAPPLEUFS;
513 /* First check to see if this is tagged as an Apple UFS filesystem
514 * in the disklabel
515 */
516 if ((VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) == 0) &&
517 (dpart.part->p_fstype == FS_APPLEUFS)) {
518 VFSTOUFS(mountp)->um_flags |= UFS_ISAPPLEUFS;
519 }
520 #ifdef APPLE_UFS
521 else {
522 /* Manually look for an apple ufs label, and if a valid one
523 * is found, then treat it like an Apple UFS filesystem anyway
524 */
525 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
526 APPLEUFS_LABEL_SIZE, cred, &bp);
527 if (error) {
528 brelse(bp);
529 return (error);
530 }
531 error = ffs_appleufs_validate(fs->fs_fsmnt,
532 (struct appleufslabel *)bp->b_data,NULL);
533 if (error == 0) {
534 VFSTOUFS(mountp)->um_flags |= UFS_ISAPPLEUFS;
535 }
536 brelse(bp);
537 bp = NULL;
538 }
539 #else
540 if (VFSTOUFS(mountp)->um_flags & UFS_ISAPPLEUFS)
541 return (EIO);
542 #endif
543
544 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
545 if (UFS_MPISAPPLEUFS(mountp)) {
546 /* see comment about NeXT below */
547 mountp->mnt_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
548 }
549 ffs_oldfscompat(fs);
550 /* An old fsck may have zeroed these fields, so recheck them. */
551 if (fs->fs_avgfilesize <= 0)
552 fs->fs_avgfilesize = AVFILESIZ;
553 if (fs->fs_avgfpdir <= 0)
554 fs->fs_avgfpdir = AFPDIR;
555 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
556 fs->fs_pendingblocks = 0;
557 fs->fs_pendinginodes = 0;
558 }
559
560 ffs_statfs(mountp, &mountp->mnt_stat, p);
561 /*
562 * Step 3: re-read summary information from disk.
563 */
564 blks = howmany(fs->fs_cssize, fs->fs_fsize);
565 space = fs->fs_csp;
566 for (i = 0; i < blks; i += fs->fs_frag) {
567 size = fs->fs_bsize;
568 if (i + fs->fs_frag > blks)
569 size = (blks - i) * fs->fs_fsize;
570 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
571 NOCRED, &bp);
572 if (error) {
573 brelse(bp);
574 return (error);
575 }
576 #ifdef FFS_EI
577 if (UFS_FSNEEDSWAP(fs))
578 ffs_csum_swap((struct csum *)bp->b_data,
579 (struct csum *)space, size);
580 else
581 #endif
582 memcpy(space, bp->b_data, (size_t)size);
583 space = (char *)space + size;
584 brelse(bp);
585 }
586 if ((fs->fs_flags & FS_DOSOFTDEP))
587 softdep_mount(devvp, mountp, fs, cred);
588 /*
589 * We no longer know anything about clusters per cylinder group.
590 */
591 if (fs->fs_contigsumsize > 0) {
592 lp = fs->fs_maxcluster;
593 for (i = 0; i < fs->fs_ncg; i++)
594 *lp++ = fs->fs_contigsumsize;
595 }
596
597 loop:
598 simple_lock(&mntvnode_slock);
599 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
600 if (vp->v_mount != mountp) {
601 simple_unlock(&mntvnode_slock);
602 goto loop;
603 }
604 nvp = vp->v_mntvnodes.le_next;
605 /*
606 * Step 4: invalidate all inactive vnodes.
607 */
608 if (vrecycle(vp, &mntvnode_slock, p))
609 goto loop;
610 /*
611 * Step 5: invalidate all cached file data.
612 */
613 simple_lock(&vp->v_interlock);
614 simple_unlock(&mntvnode_slock);
615 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
616 goto loop;
617 if (vinvalbuf(vp, 0, cred, p, 0, 0))
618 panic("ffs_reload: dirty2");
619 /*
620 * Step 6: re-read inode data for all active vnodes.
621 */
622 ip = VTOI(vp);
623 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
624 (int)fs->fs_bsize, NOCRED, &bp);
625 if (error) {
626 brelse(bp);
627 vput(vp);
628 return (error);
629 }
630 cp = (caddr_t)bp->b_data +
631 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
632 #ifdef FFS_EI
633 if (UFS_FSNEEDSWAP(fs))
634 ffs_dinode_swap((struct dinode *)cp,
635 &ip->i_din.ffs_din);
636 else
637 #endif
638 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
639 ip->i_ffs_effnlink = ip->i_ffs_nlink;
640 brelse(bp);
641 vput(vp);
642 simple_lock(&mntvnode_slock);
643 }
644 simple_unlock(&mntvnode_slock);
645 return (0);
646 }
647
648 /*
649 * Common code for mount and mountroot
650 */
651 int
652 ffs_mountfs(devvp, mp, p)
653 struct vnode *devvp;
654 struct mount *mp;
655 struct proc *p;
656 {
657 struct ufsmount *ump;
658 struct buf *bp;
659 struct fs *fs;
660 dev_t dev;
661 struct partinfo dpart;
662 void *space;
663 int blks;
664 int error, i, size, ronly;
665 #ifdef FFS_EI
666 int needswap;
667 #endif
668 int32_t *lp;
669 struct ucred *cred;
670 u_int64_t maxfilesize; /* XXX */
671 u_int32_t sbsize;
672
673 dev = devvp->v_rdev;
674 cred = p ? p->p_ucred : NOCRED;
675 /*
676 * Disallow multiple mounts of the same device.
677 * Disallow mounting of a device that is currently in use
678 * (except for root, which might share swap device for miniroot).
679 * Flush out any old buffers remaining from a previous use.
680 */
681 if ((error = vfs_mountedon(devvp)) != 0)
682 return (error);
683 if (vcount(devvp) > 1 && devvp != rootvp)
684 return (EBUSY);
685 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
686 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
687 VOP_UNLOCK(devvp, 0);
688 if (error)
689 return (error);
690
691 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
692 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
693 if (error)
694 return (error);
695 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
696 size = DEV_BSIZE;
697 else
698 size = dpart.disklab->d_secsize;
699
700 bp = NULL;
701 ump = NULL;
702 error = bread(devvp, (daddr_t)(SBOFF / size), SBSIZE, cred, &bp);
703 if (error)
704 goto out;
705
706 fs = (struct fs*)bp->b_data;
707 if (fs->fs_magic == FS_MAGIC) {
708 sbsize = fs->fs_sbsize;
709 #ifdef FFS_EI
710 needswap = 0;
711 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
712 sbsize = bswap32(fs->fs_sbsize);
713 needswap = 1;
714 #endif
715 } else {
716 error = EINVAL;
717 goto out;
718 }
719 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
720 error = EINVAL;
721 goto out;
722 }
723
724 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
725 memcpy(fs, bp->b_data, sbsize);
726 #ifdef FFS_EI
727 if (needswap) {
728 ffs_sb_swap((struct fs*)bp->b_data, fs);
729 fs->fs_flags |= FS_SWAPPED;
730 }
731 #endif
732 ffs_oldfscompat(fs);
733
734 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
735 error = EINVAL;
736 goto out;
737 }
738 /* make sure cylinder group summary area is a reasonable size. */
739 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
740 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
741 fs->fs_cssize >
742 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
743 error = EINVAL; /* XXX needs translation */
744 goto out2;
745 }
746 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
747 fs->fs_pendingblocks = 0;
748 fs->fs_pendinginodes = 0;
749 }
750 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
751 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
752 error = EROFS; /* XXX what should be returned? */
753 goto out2;
754 }
755
756 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
757 memset((caddr_t)ump, 0, sizeof *ump);
758 ump->um_fs = fs;
759 if (fs->fs_sbsize < SBSIZE)
760 bp->b_flags |= B_INVAL;
761 brelse(bp);
762 bp = NULL;
763
764 /* First check to see if this is tagged as an Apple UFS filesystem
765 * in the disklabel
766 */
767 if ((VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) == 0) &&
768 (dpart.part->p_fstype == FS_APPLEUFS)) {
769 ump->um_flags |= UFS_ISAPPLEUFS;
770 }
771 #ifdef APPLE_UFS
772 else {
773 /* Manually look for an apple ufs label, and if a valid one
774 * is found, then treat it like an Apple UFS filesystem anyway
775 */
776 error = bread(devvp, (daddr_t)(APPLEUFS_LABEL_OFFSET / size),
777 APPLEUFS_LABEL_SIZE, cred, &bp);
778 if (error)
779 goto out;
780 error = ffs_appleufs_validate(fs->fs_fsmnt,
781 (struct appleufslabel *)bp->b_data,NULL);
782 if (error == 0) {
783 ump->um_flags |= UFS_ISAPPLEUFS;
784 }
785 brelse(bp);
786 bp = NULL;
787 }
788 #else
789 if (ump->um_flags & UFS_ISAPPLEUFS) {
790 error = EINVAL;
791 goto out;
792 }
793 #endif
794
795 /*
796 * verify that we can access the last block in the fs
797 * if we're mounting read/write.
798 */
799
800 if (!ronly) {
801 error = bread(devvp, fsbtodb(fs, fs->fs_size - 1), fs->fs_fsize,
802 cred, &bp);
803 if (bp->b_bcount != fs->fs_fsize)
804 error = EINVAL;
805 bp->b_flags |= B_INVAL;
806 if (error)
807 goto out;
808 brelse(bp);
809 bp = NULL;
810 }
811
812 fs->fs_ronly = ronly;
813 if (ronly == 0) {
814 fs->fs_clean <<= 1;
815 fs->fs_fmod = 1;
816 }
817 size = fs->fs_cssize;
818 blks = howmany(size, fs->fs_fsize);
819 if (fs->fs_contigsumsize > 0)
820 size += fs->fs_ncg * sizeof(int32_t);
821 size += fs->fs_ncg * sizeof(*fs->fs_contigdirs);
822 space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
823 fs->fs_csp = space;
824 for (i = 0; i < blks; i += fs->fs_frag) {
825 size = fs->fs_bsize;
826 if (i + fs->fs_frag > blks)
827 size = (blks - i) * fs->fs_fsize;
828 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
829 cred, &bp);
830 if (error) {
831 free(fs->fs_csp, M_UFSMNT);
832 goto out2;
833 }
834 #ifdef FFS_EI
835 if (needswap)
836 ffs_csum_swap((struct csum *)bp->b_data,
837 (struct csum *)space, size);
838 else
839 #endif
840 memcpy(space, bp->b_data, (u_int)size);
841
842 space = (char *)space + size;
843 brelse(bp);
844 bp = NULL;
845 }
846 if (fs->fs_contigsumsize > 0) {
847 fs->fs_maxcluster = lp = space;
848 for (i = 0; i < fs->fs_ncg; i++)
849 *lp++ = fs->fs_contigsumsize;
850 space = lp;
851 }
852 size = fs->fs_ncg * sizeof(*fs->fs_contigdirs);
853 fs->fs_contigdirs = space;
854 space = (char *)space + size;
855 memset(fs->fs_contigdirs, 0, size);
856 /* Compatibility for old filesystems - XXX */
857 if (fs->fs_avgfilesize <= 0)
858 fs->fs_avgfilesize = AVFILESIZ;
859 if (fs->fs_avgfpdir <= 0)
860 fs->fs_avgfpdir = AFPDIR;
861 mp->mnt_data = ump;
862 mp->mnt_stat.f_fsid.val[0] = (long)dev;
863 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
864 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
865 if (UFS_MPISAPPLEUFS(mp)) {
866 /* NeXT used to keep short symlinks in the inode even
867 * when using FS_42INODEFMT. In that case fs->fs_maxsymlinklen
868 * is probably -1, but we still need to be able to identify
869 * short symlinks.
870 */
871 mp->mnt_maxsymlinklen = APPLEUFS_MAXSYMLINKLEN;
872 }
873 mp->mnt_fs_bshift = fs->fs_bshift;
874 mp->mnt_dev_bshift = DEV_BSHIFT; /* XXX */
875 mp->mnt_flag |= MNT_LOCAL;
876 #ifdef FFS_EI
877 if (needswap)
878 ump->um_flags |= UFS_NEEDSWAP;
879 #endif
880 ump->um_mountp = mp;
881 ump->um_dev = dev;
882 ump->um_devvp = devvp;
883 ump->um_nindir = fs->fs_nindir;
884 ump->um_lognindir = ffs(fs->fs_nindir) - 1;
885 ump->um_bptrtodb = fs->fs_fsbtodb;
886 ump->um_seqinc = fs->fs_frag;
887 for (i = 0; i < MAXQUOTAS; i++)
888 ump->um_quotas[i] = NULLVP;
889 devvp->v_specmountpoint = mp;
890 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
891 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
892 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
893 fs->fs_maxfilesize = maxfilesize; /* XXX */
894 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
895 error = softdep_mount(devvp, mp, fs, cred);
896 if (error) {
897 free(fs->fs_csp, M_UFSMNT);
898 goto out;
899 }
900 }
901 return (0);
902 out2:
903 free(fs, M_UFSMNT);
904 out:
905 devvp->v_specmountpoint = NULL;
906 if (bp)
907 brelse(bp);
908 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
909 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
910 VOP_UNLOCK(devvp, 0);
911 if (ump) {
912 free(ump, M_UFSMNT);
913 mp->mnt_data = NULL;
914 }
915 return (error);
916 }
917
918 /*
919 * Sanity checks for old file systems.
920 *
921 * XXX - goes away some day.
922 */
923 int
924 ffs_oldfscompat(fs)
925 struct fs *fs;
926 {
927 int i;
928
929 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
930 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
931 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
932 fs->fs_nrpos = 8; /* XXX */
933 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
934 u_int64_t sizepb = fs->fs_bsize; /* XXX */
935 /* XXX */
936 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
937 for (i = 0; i < NIADDR; i++) { /* XXX */
938 sizepb *= NINDIR(fs); /* XXX */
939 fs->fs_maxfilesize += sizepb; /* XXX */
940 } /* XXX */
941 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
942 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
943 } /* XXX */
944 return (0);
945 }
946
947 /*
948 * unmount system call
949 */
950 int
951 ffs_unmount(mp, mntflags, p)
952 struct mount *mp;
953 int mntflags;
954 struct proc *p;
955 {
956 struct ufsmount *ump;
957 struct fs *fs;
958 int error, flags, penderr;
959
960 penderr = 0;
961 flags = 0;
962 if (mntflags & MNT_FORCE)
963 flags |= FORCECLOSE;
964 if (mp->mnt_flag & MNT_SOFTDEP) {
965 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
966 return (error);
967 } else {
968 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
969 return (error);
970 }
971 ump = VFSTOUFS(mp);
972 fs = ump->um_fs;
973 if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0) {
974 printf("%s: unmount pending error: blocks %d files %d\n",
975 fs->fs_fsmnt, fs->fs_pendingblocks, fs->fs_pendinginodes);
976 fs->fs_pendingblocks = 0;
977 fs->fs_pendinginodes = 0;
978 penderr = 1;
979 }
980 if (fs->fs_ronly == 0 &&
981 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
982 fs->fs_clean & FS_WASCLEAN) {
983 /*
984 * XXXX don't mark fs clean in the case of softdep
985 * pending block errors, until they are fixed.
986 */
987 if (penderr == 0) {
988 if (mp->mnt_flag & MNT_SOFTDEP)
989 fs->fs_flags &= ~FS_DOSOFTDEP;
990 fs->fs_clean = FS_ISCLEAN;
991 }
992 (void) ffs_sbupdate(ump, MNT_WAIT);
993 }
994 if (ump->um_devvp->v_type != VBAD)
995 ump->um_devvp->v_specmountpoint = NULL;
996 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
997 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
998 NOCRED, p);
999 vput(ump->um_devvp);
1000 free(fs->fs_csp, M_UFSMNT);
1001 free(fs, M_UFSMNT);
1002 free(ump, M_UFSMNT);
1003 mp->mnt_data = NULL;
1004 mp->mnt_flag &= ~MNT_LOCAL;
1005 return (error);
1006 }
1007
1008 /*
1009 * Flush out all the files in a filesystem.
1010 */
1011 int
1012 ffs_flushfiles(mp, flags, p)
1013 struct mount *mp;
1014 int flags;
1015 struct proc *p;
1016 {
1017 extern int doforce;
1018 struct ufsmount *ump;
1019 int error;
1020
1021 if (!doforce)
1022 flags &= ~FORCECLOSE;
1023 ump = VFSTOUFS(mp);
1024 #ifdef QUOTA
1025 if (mp->mnt_flag & MNT_QUOTA) {
1026 int i;
1027 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
1028 return (error);
1029 for (i = 0; i < MAXQUOTAS; i++) {
1030 if (ump->um_quotas[i] == NULLVP)
1031 continue;
1032 quotaoff(p, mp, i);
1033 }
1034 /*
1035 * Here we fall through to vflush again to ensure
1036 * that we have gotten rid of all the system vnodes.
1037 */
1038 }
1039 #endif
1040 /*
1041 * Flush all the files.
1042 */
1043 error = vflush(mp, NULLVP, flags);
1044 if (error)
1045 return (error);
1046 /*
1047 * Flush filesystem metadata.
1048 */
1049 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1050 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, 0, 0, p);
1051 VOP_UNLOCK(ump->um_devvp, 0);
1052 return (error);
1053 }
1054
1055 /*
1056 * Get file system statistics.
1057 */
1058 int
1059 ffs_statfs(mp, sbp, p)
1060 struct mount *mp;
1061 struct statfs *sbp;
1062 struct proc *p;
1063 {
1064 struct ufsmount *ump;
1065 struct fs *fs;
1066
1067 ump = VFSTOUFS(mp);
1068 fs = ump->um_fs;
1069 if (fs->fs_magic != FS_MAGIC)
1070 panic("ffs_statfs");
1071 #ifdef COMPAT_09
1072 sbp->f_type = 1;
1073 #else
1074 sbp->f_type = 0;
1075 #endif
1076 sbp->f_bsize = fs->fs_fsize;
1077 sbp->f_iosize = fs->fs_bsize;
1078 sbp->f_blocks = fs->fs_dsize;
1079 sbp->f_bfree = blkstofrags(fs, fs->fs_cstotal.cs_nbfree) +
1080 fs->fs_cstotal.cs_nffree + dbtofsb(fs, fs->fs_pendingblocks);
1081 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
1082 (100 - fs->fs_minfree) / (u_int64_t) 100) -
1083 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
1084 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
1085 sbp->f_ffree = fs->fs_cstotal.cs_nifree + fs->fs_pendinginodes;
1086 if (sbp != &mp->mnt_stat) {
1087 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
1088 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
1089 }
1090 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
1091 return (0);
1092 }
1093
1094 /*
1095 * Go through the disk queues to initiate sandbagged IO;
1096 * go through the inodes to write those that have been modified;
1097 * initiate the writing of the super block if it has been modified.
1098 *
1099 * Note: we are always called with the filesystem marked `MPBUSY'.
1100 */
1101 int
1102 ffs_sync(mp, waitfor, cred, p)
1103 struct mount *mp;
1104 int waitfor;
1105 struct ucred *cred;
1106 struct proc *p;
1107 {
1108 struct vnode *vp, *nvp;
1109 struct inode *ip;
1110 struct ufsmount *ump = VFSTOUFS(mp);
1111 struct fs *fs;
1112 int error, allerror = 0;
1113
1114 fs = ump->um_fs;
1115 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
1116 printf("fs = %s\n", fs->fs_fsmnt);
1117 panic("update: rofs mod");
1118 }
1119 /*
1120 * Write back each (modified) inode.
1121 */
1122 simple_lock(&mntvnode_slock);
1123 loop:
1124 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp != NULL; vp = nvp) {
1125 /*
1126 * If the vnode that we are about to sync is no longer
1127 * associated with this mount point, start over.
1128 */
1129 if (vp->v_mount != mp)
1130 goto loop;
1131 simple_lock(&vp->v_interlock);
1132 nvp = LIST_NEXT(vp, v_mntvnodes);
1133 ip = VTOI(vp);
1134 if (vp->v_type == VNON ||
1135 ((ip->i_flag &
1136 (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFIED | IN_ACCESSED)) == 0 &&
1137 LIST_EMPTY(&vp->v_dirtyblkhd) &&
1138 vp->v_uobj.uo_npages == 0))
1139 {
1140 simple_unlock(&vp->v_interlock);
1141 continue;
1142 }
1143 simple_unlock(&mntvnode_slock);
1144 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
1145 if (error) {
1146 simple_lock(&mntvnode_slock);
1147 if (error == ENOENT)
1148 goto loop;
1149 continue;
1150 }
1151 if ((error = VOP_FSYNC(vp, cred,
1152 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1153 allerror = error;
1154 vput(vp);
1155 simple_lock(&mntvnode_slock);
1156 }
1157 simple_unlock(&mntvnode_slock);
1158 /*
1159 * Force stale file system control information to be flushed.
1160 */
1161 if (waitfor != MNT_LAZY) {
1162 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
1163 waitfor = MNT_NOWAIT;
1164 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1165 if ((error = VOP_FSYNC(ump->um_devvp, cred,
1166 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0, p)) != 0)
1167 allerror = error;
1168 VOP_UNLOCK(ump->um_devvp, 0);
1169 }
1170 #ifdef QUOTA
1171 qsync(mp);
1172 #endif
1173 /*
1174 * Write back modified superblock.
1175 */
1176 if (fs->fs_fmod != 0) {
1177 fs->fs_fmod = 0;
1178 fs->fs_time = time.tv_sec;
1179 if ((error = ffs_cgupdate(ump, waitfor)))
1180 allerror = error;
1181 }
1182 return (allerror);
1183 }
1184
1185 /*
1186 * Look up a FFS dinode number to find its incore vnode, otherwise read it
1187 * in from disk. If it is in core, wait for the lock bit to clear, then
1188 * return the inode locked. Detection and handling of mount points must be
1189 * done by the calling routine.
1190 */
1191 int
1192 ffs_vget(mp, ino, vpp)
1193 struct mount *mp;
1194 ino_t ino;
1195 struct vnode **vpp;
1196 {
1197 struct fs *fs;
1198 struct inode *ip;
1199 struct ufsmount *ump;
1200 struct buf *bp;
1201 struct vnode *vp;
1202 dev_t dev;
1203 int error;
1204 caddr_t cp;
1205
1206 ump = VFSTOUFS(mp);
1207 dev = ump->um_dev;
1208
1209 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1210 return (0);
1211
1212 /* Allocate a new vnode/inode. */
1213 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
1214 *vpp = NULL;
1215 return (error);
1216 }
1217
1218 /*
1219 * If someone beat us to it while sleeping in getnewvnode(),
1220 * push back the freshly allocated vnode we don't need, and return.
1221 */
1222
1223 do {
1224 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1225 ungetnewvnode(vp);
1226 return (0);
1227 }
1228 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1229
1230 /*
1231 * XXX MFS ends up here, too, to allocate an inode. Should we
1232 * XXX create another pool for MFS inodes?
1233 */
1234
1235 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
1236 memset(ip, 0, sizeof(struct inode));
1237 vp->v_data = ip;
1238 ip->i_vnode = vp;
1239 ip->i_fs = fs = ump->um_fs;
1240 ip->i_dev = dev;
1241 ip->i_number = ino;
1242 LIST_INIT(&ip->i_pcbufhd);
1243 #ifdef QUOTA
1244 {
1245 int i;
1246
1247 for (i = 0; i < MAXQUOTAS; i++)
1248 ip->i_dquot[i] = NODQUOT;
1249 }
1250 #endif
1251
1252 /*
1253 * Put it onto its hash chain and lock it so that other requests for
1254 * this inode will block if they arrive while we are sleeping waiting
1255 * for old data structures to be purged or for the contents of the
1256 * disk portion of this inode to be read.
1257 */
1258
1259 ufs_ihashins(ip);
1260 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1261
1262 /* Read in the disk contents for the inode, copy into the inode. */
1263 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1264 (int)fs->fs_bsize, NOCRED, &bp);
1265 if (error) {
1266
1267 /*
1268 * The inode does not contain anything useful, so it would
1269 * be misleading to leave it on its hash chain. With mode
1270 * still zero, it will be unlinked and returned to the free
1271 * list by vput().
1272 */
1273
1274 vput(vp);
1275 brelse(bp);
1276 *vpp = NULL;
1277 return (error);
1278 }
1279 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1280 #ifdef FFS_EI
1281 if (UFS_FSNEEDSWAP(fs))
1282 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1283 else
1284 #endif
1285 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1286 if (DOINGSOFTDEP(vp))
1287 softdep_load_inodeblock(ip);
1288 else
1289 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1290 brelse(bp);
1291
1292 /*
1293 * Initialize the vnode from the inode, check for aliases.
1294 * Note that the underlying vnode may have changed.
1295 */
1296
1297 ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1298
1299 /*
1300 * Finish inode initialization now that aliasing has been resolved.
1301 */
1302
1303 genfs_node_init(vp, &ffs_genfsops);
1304 ip->i_devvp = ump->um_devvp;
1305 VREF(ip->i_devvp);
1306
1307 /*
1308 * Ensure that uid and gid are correct. This is a temporary
1309 * fix until fsck has been changed to do the update.
1310 */
1311
1312 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1313 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1314 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1315 } /* XXX */
1316 uvm_vnp_setsize(vp, ip->i_ffs_size);
1317 *vpp = vp;
1318 return (0);
1319 }
1320
1321 /*
1322 * File handle to vnode
1323 *
1324 * Have to be really careful about stale file handles:
1325 * - check that the inode number is valid
1326 * - call ffs_vget() to get the locked inode
1327 * - check for an unallocated inode (i_mode == 0)
1328 * - check that the given client host has export rights and return
1329 * those rights via. exflagsp and credanonp
1330 */
1331 int
1332 ffs_fhtovp(mp, fhp, vpp)
1333 struct mount *mp;
1334 struct fid *fhp;
1335 struct vnode **vpp;
1336 {
1337 struct ufid *ufhp;
1338 struct fs *fs;
1339
1340 ufhp = (struct ufid *)fhp;
1341 fs = VFSTOUFS(mp)->um_fs;
1342 if (ufhp->ufid_ino < ROOTINO ||
1343 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1344 return (ESTALE);
1345 return (ufs_fhtovp(mp, ufhp, vpp));
1346 }
1347
1348 /*
1349 * Vnode pointer to File handle
1350 */
1351 /* ARGSUSED */
1352 int
1353 ffs_vptofh(vp, fhp)
1354 struct vnode *vp;
1355 struct fid *fhp;
1356 {
1357 struct inode *ip;
1358 struct ufid *ufhp;
1359
1360 ip = VTOI(vp);
1361 ufhp = (struct ufid *)fhp;
1362 ufhp->ufid_len = sizeof(struct ufid);
1363 ufhp->ufid_ino = ip->i_number;
1364 ufhp->ufid_gen = ip->i_ffs_gen;
1365 return (0);
1366 }
1367
1368 void
1369 ffs_init()
1370 {
1371 if (ffs_initcount++ > 0)
1372 return;
1373
1374 softdep_initialize();
1375 ufs_init();
1376
1377 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1378 &pool_allocator_nointr);
1379 }
1380
1381 void
1382 ffs_reinit()
1383 {
1384 softdep_reinitialize();
1385 ufs_reinit();
1386 }
1387
1388 void
1389 ffs_done()
1390 {
1391 if (--ffs_initcount > 0)
1392 return;
1393
1394 /* XXX softdep cleanup ? */
1395 ufs_done();
1396 pool_destroy(&ffs_inode_pool);
1397 }
1398
1399 int
1400 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1401 int *name;
1402 u_int namelen;
1403 void *oldp;
1404 size_t *oldlenp;
1405 void *newp;
1406 size_t newlen;
1407 struct proc *p;
1408 {
1409 extern int doasyncfree;
1410 extern int ffs_log_changeopt;
1411
1412 /* all sysctl names at this level are terminal */
1413 if (namelen != 1)
1414 return (ENOTDIR); /* overloaded */
1415
1416 switch (name[0]) {
1417 case FFS_ASYNCFREE:
1418 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1419 case FFS_LOG_CHANGEOPT:
1420 return (sysctl_int(oldp, oldlenp, newp, newlen,
1421 &ffs_log_changeopt));
1422 default:
1423 return (EOPNOTSUPP);
1424 }
1425 /* NOTREACHED */
1426 }
1427
1428 /*
1429 * Write a superblock and associated information back to disk.
1430 */
1431 int
1432 ffs_sbupdate(mp, waitfor)
1433 struct ufsmount *mp;
1434 int waitfor;
1435 {
1436 struct fs *fs = mp->um_fs;
1437 struct buf *bp;
1438 int i, error = 0;
1439 int32_t saved_nrpos = fs->fs_nrpos;
1440 int64_t saved_qbmask = fs->fs_qbmask;
1441 int64_t saved_qfmask = fs->fs_qfmask;
1442 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1443 u_int8_t saveflag;
1444
1445 /* Restore compatibility to old file systems. XXX */
1446 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1447 fs->fs_nrpos = -1; /* XXX */
1448 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1449 int32_t *lp, tmp; /* XXX */
1450 /* XXX */
1451 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1452 tmp = lp[4]; /* XXX */
1453 for (i = 4; i > 0; i--) /* XXX */
1454 lp[i] = lp[i-1]; /* XXX */
1455 lp[0] = tmp; /* XXX */
1456 } /* XXX */
1457 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1458
1459 bp = getblk(mp->um_devvp, SBOFF >> (fs->fs_fshift - fs->fs_fsbtodb),
1460 (int)fs->fs_sbsize, 0, 0);
1461 saveflag = fs->fs_flags & FS_INTERNAL;
1462 fs->fs_flags &= ~FS_INTERNAL;
1463 memcpy(bp->b_data, fs, fs->fs_sbsize);
1464 #ifdef FFS_EI
1465 if (mp->um_flags & UFS_NEEDSWAP)
1466 ffs_sb_swap(fs, (struct fs*)bp->b_data);
1467 #endif
1468
1469 fs->fs_flags |= saveflag;
1470 fs->fs_nrpos = saved_nrpos; /* XXX */
1471 fs->fs_qbmask = saved_qbmask; /* XXX */
1472 fs->fs_qfmask = saved_qfmask; /* XXX */
1473 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1474
1475 if (waitfor == MNT_WAIT)
1476 error = bwrite(bp);
1477 else
1478 bawrite(bp);
1479 return (error);
1480 }
1481
1482 int
1483 ffs_cgupdate(mp, waitfor)
1484 struct ufsmount *mp;
1485 int waitfor;
1486 {
1487 struct fs *fs = mp->um_fs;
1488 struct buf *bp;
1489 int blks;
1490 void *space;
1491 int i, size, error = 0, allerror = 0;
1492
1493 allerror = ffs_sbupdate(mp, waitfor);
1494 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1495 space = fs->fs_csp;
1496 for (i = 0; i < blks; i += fs->fs_frag) {
1497 size = fs->fs_bsize;
1498 if (i + fs->fs_frag > blks)
1499 size = (blks - i) * fs->fs_fsize;
1500 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1501 size, 0, 0);
1502 #ifdef FFS_EI
1503 if (mp->um_flags & UFS_NEEDSWAP)
1504 ffs_csum_swap((struct csum*)space,
1505 (struct csum*)bp->b_data, size);
1506 else
1507 #endif
1508 memcpy(bp->b_data, space, (u_int)size);
1509 space = (char *)space + size;
1510 if (waitfor == MNT_WAIT)
1511 error = bwrite(bp);
1512 else
1513 bawrite(bp);
1514 }
1515 if (!allerror && error)
1516 allerror = error;
1517 return (allerror);
1518 }
1519