ffs_vfsops.c revision 1.52.2.2 1 /* $NetBSD: ffs_vfsops.c,v 1.52.2.2 1999/12/27 18:36:37 wrstuden Exp $ */
2
3 /*
4 * Copyright (c) 1989, 1991, 1993, 1994
5 * The Regents of the University of California. All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
17 * This product includes software developed by the University of
18 * California, Berkeley and its contributors.
19 * 4. Neither the name of the University nor the names of its contributors
20 * may be used to endorse or promote products derived from this software
21 * without specific prior written permission.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * SUCH DAMAGE.
34 *
35 * @(#)ffs_vfsops.c 8.31 (Berkeley) 5/20/95
36 */
37
38 #if defined(_KERNEL) && !defined(_LKM)
39 #include "opt_ffs.h"
40 #include "opt_quota.h"
41 #include "opt_compat_netbsd.h"
42 #endif
43
44 #include <sys/param.h>
45 #include <sys/systm.h>
46 #include <sys/namei.h>
47 #include <sys/proc.h>
48 #include <sys/kernel.h>
49 #include <sys/vnode.h>
50 #include <sys/socket.h>
51 #include <sys/mount.h>
52 #include <sys/buf.h>
53 #include <sys/device.h>
54 #include <sys/mbuf.h>
55 #include <sys/file.h>
56 #include <sys/disklabel.h>
57 #include <sys/ioctl.h>
58 #include <sys/errno.h>
59 #include <sys/malloc.h>
60 #include <sys/pool.h>
61 #include <sys/lock.h>
62 #include <vm/vm.h>
63 #include <sys/sysctl.h>
64
65 #include <miscfs/specfs/specdev.h>
66
67 #include <ufs/ufs/quota.h>
68 #include <ufs/ufs/ufsmount.h>
69 #include <ufs/ufs/inode.h>
70 #include <ufs/ufs/dir.h>
71 #include <ufs/ufs/ufs_extern.h>
72 #include <ufs/ufs/ufs_bswap.h>
73
74 #include <ufs/ffs/fs.h>
75 #include <ufs/ffs/ffs_extern.h>
76
77 extern struct lock ufs_hashlock;
78
79 int ffs_sbupdate __P((struct ufsmount *, int));
80
81 extern struct vnodeopv_desc ffs_vnodeop_opv_desc;
82 extern struct vnodeopv_desc ffs_specop_opv_desc;
83 extern struct vnodeopv_desc ffs_fifoop_opv_desc;
84
85 struct vnodeopv_desc *ffs_vnodeopv_descs[] = {
86 &ffs_vnodeop_opv_desc,
87 &ffs_specop_opv_desc,
88 &ffs_fifoop_opv_desc,
89 NULL,
90 };
91
92 struct vfsops ffs_vfsops = {
93 MOUNT_FFS,
94 ffs_mount,
95 ufs_start,
96 ffs_unmount,
97 ufs_root,
98 ufs_quotactl,
99 ffs_statfs,
100 ffs_sync,
101 ffs_vget,
102 ffs_fhtovp,
103 ffs_vptofh,
104 ffs_init,
105 ffs_sysctl,
106 ffs_mountroot,
107 ufs_check_export,
108 ffs_vnodeopv_descs,
109 };
110
111 struct pool ffs_inode_pool;
112
113 /*
114 * Called by main() when ffs is going to be mounted as root.
115 */
116
117 int
118 ffs_mountroot()
119 {
120 extern struct vnode *rootvp;
121 struct fs *fs;
122 struct mount *mp;
123 struct proc *p = curproc; /* XXX */
124 struct ufsmount *ump;
125 int error;
126
127 if (root_device->dv_class != DV_DISK)
128 return (ENODEV);
129
130 /*
131 * Get vnodes for rootdev.
132 */
133 if (bdevvp(rootdev, &rootvp))
134 panic("ffs_mountroot: can't setup bdevvp's");
135
136 if ((error = vfs_rootmountalloc(MOUNT_FFS, "root_device", &mp))) {
137 vrele(rootvp);
138 return (error);
139 }
140 if ((error = ffs_mountfs(rootvp, mp, p)) != 0) {
141 mp->mnt_op->vfs_refcount--;
142 vfs_unbusy(mp);
143 free(mp, M_MOUNT);
144 vrele(rootvp);
145 return (error);
146 }
147 simple_lock(&mountlist_slock);
148 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
149 simple_unlock(&mountlist_slock);
150 ump = VFSTOUFS(mp);
151 fs = ump->um_fs;
152 memset(fs->fs_fsmnt, 0, sizeof(fs->fs_fsmnt));
153 (void)copystr(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN - 1, 0);
154 (void)ffs_statfs(mp, &mp->mnt_stat, p);
155 vfs_unbusy(mp);
156 inittodr(fs->fs_time);
157 return (0);
158 }
159
160 /*
161 * VFS Operations.
162 *
163 * mount system call
164 */
165 int
166 ffs_mount(mp, path, data, ndp, p)
167 register struct mount *mp;
168 const char *path;
169 void *data;
170 struct nameidata *ndp;
171 struct proc *p;
172 {
173 struct vnode *devvp;
174 struct ufs_args args;
175 struct ufsmount *ump = NULL;
176 register struct fs *fs;
177 size_t size;
178 int error, flags;
179 mode_t accessmode;
180
181 error = copyin(data, (caddr_t)&args, sizeof (struct ufs_args));
182 if (error)
183 return (error);
184 /*
185 * If updating, check whether changing from read-only to
186 * read/write; if there is no device name, that's all we do.
187 */
188 if (mp->mnt_flag & MNT_UPDATE) {
189 ump = VFSTOUFS(mp);
190 fs = ump->um_fs;
191 if (fs->fs_ronly == 0 && (mp->mnt_flag & MNT_RDONLY)) {
192 flags = WRITECLOSE;
193 if (mp->mnt_flag & MNT_FORCE)
194 flags |= FORCECLOSE;
195 if (mp->mnt_flag & MNT_SOFTDEP)
196 error = softdep_flushfiles(mp, flags, p);
197 else
198 error = ffs_flushfiles(mp, flags, p);
199 if (error == 0 &&
200 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
201 fs->fs_clean & FS_WASCLEAN) {
202 fs->fs_clean = FS_ISCLEAN;
203 (void) ffs_sbupdate(ump, MNT_WAIT);
204 }
205 if (error)
206 return (error);
207 fs->fs_ronly = 1;
208 }
209 if (mp->mnt_flag & MNT_RELOAD) {
210 error = ffs_reload(mp, ndp->ni_cnd.cn_cred, p);
211 if (error)
212 return (error);
213 }
214 if (fs->fs_ronly && (mp->mnt_flag & MNT_WANTRDWR)) {
215 /*
216 * If upgrade to read-write by non-root, then verify
217 * that user has necessary permissions on the device.
218 */
219 devvp = ump->um_devvp;
220 if (p->p_ucred->cr_uid != 0) {
221 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
222 error = VOP_ACCESS(devvp, VREAD | VWRITE,
223 p->p_ucred, p);
224 VOP_UNLOCK(devvp, 0);
225 if (error)
226 return (error);
227 }
228 fs->fs_ronly = 0;
229 fs->fs_clean <<= 1;
230 fs->fs_fmod = 1;
231 if ((fs->fs_flags & FS_DOSOFTDEP)) {
232 error = softdep_mount(devvp, mp, fs,
233 p->p_ucred);
234 if (error)
235 return (error);
236 } else
237 mp->mnt_flag &= ~MNT_SOFTDEP;
238 }
239 if (args.fspec == 0) {
240 /*
241 * Process export requests.
242 */
243 return (vfs_export(mp, &ump->um_export, &args.export));
244 }
245 if ((mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
246 (MNT_SOFTDEP | MNT_ASYNC)) {
247 printf("%s fs uses soft updates, ignoring async mode\n",
248 fs->fs_fsmnt);
249 mp->mnt_flag &= ~MNT_ASYNC;
250 }
251 }
252 /*
253 * Not an update, or updating the name: look up the name
254 * and verify that it refers to a sensible block device.
255 */
256 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
257 if ((error = namei(ndp)) != 0)
258 return (error);
259 devvp = ndp->ni_vp;
260
261 if (devvp->v_type != VBLK) {
262 vrele(devvp);
263 return (ENOTBLK);
264 }
265 if (major(devvp->v_rdev) >= nblkdev) {
266 vrele(devvp);
267 return (ENXIO);
268 }
269 /*
270 * If mount by non-root, then verify that user has necessary
271 * permissions on the device.
272 */
273 if (p->p_ucred->cr_uid != 0) {
274 accessmode = VREAD;
275 if ((mp->mnt_flag & MNT_RDONLY) == 0)
276 accessmode |= VWRITE;
277 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
278 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
279 VOP_UNLOCK(devvp, 0);
280 if (error) {
281 vrele(devvp);
282 return (error);
283 }
284 }
285 if ((mp->mnt_flag & MNT_UPDATE) == 0) {
286 error = ffs_mountfs(devvp, mp, p);
287 if (!error && (mp->mnt_flag & (MNT_SOFTDEP | MNT_ASYNC)) ==
288 (MNT_SOFTDEP | MNT_ASYNC)) {
289 printf("%s fs uses soft updates, ignoring async mode\n",
290 fs->fs_fsmnt);
291 mp->mnt_flag &= ~MNT_ASYNC;
292 }
293 }
294 else {
295 if (devvp != ump->um_devvp)
296 error = EINVAL; /* needs translation */
297 else
298 vrele(devvp);
299 }
300 if (error) {
301 vrele(devvp);
302 return (error);
303 }
304 ump = VFSTOUFS(mp);
305 fs = ump->um_fs;
306 (void) copyinstr(path, fs->fs_fsmnt, sizeof(fs->fs_fsmnt) - 1, &size);
307 memset(fs->fs_fsmnt + size, 0, sizeof(fs->fs_fsmnt) - size);
308 memcpy(mp->mnt_stat.f_mntonname, fs->fs_fsmnt, MNAMELEN);
309 (void) copyinstr(args.fspec, mp->mnt_stat.f_mntfromname, MNAMELEN - 1,
310 &size);
311 memset(mp->mnt_stat.f_mntfromname + size, 0, MNAMELEN - size);
312 if (fs->fs_fmod != 0) { /* XXX */
313 fs->fs_fmod = 0;
314 if (fs->fs_clean & FS_WASCLEAN)
315 fs->fs_time = time.tv_sec;
316 else
317 printf("%s: file system not clean (fs_flags=%x); please fsck(8)\n",
318 mp->mnt_stat.f_mntfromname, fs->fs_clean);
319 (void) ffs_cgupdate(ump, MNT_WAIT);
320 }
321 return (0);
322 }
323
324 /*
325 * Reload all incore data for a filesystem (used after running fsck on
326 * the root filesystem and finding things to fix). The filesystem must
327 * be mounted read-only.
328 *
329 * Things to do to update the mount:
330 * 1) invalidate all cached meta-data.
331 * 2) re-read superblock from disk.
332 * 3) re-read summary information from disk.
333 * 4) invalidate all inactive vnodes.
334 * 5) invalidate all cached file data.
335 * 6) re-read inode data for all active vnodes.
336 */
337 int
338 ffs_reload(mountp, cred, p)
339 register struct mount *mountp;
340 struct ucred *cred;
341 struct proc *p;
342 {
343 register struct vnode *vp, *nvp, *devvp;
344 struct inode *ip;
345 struct buf *bp;
346 struct fs *fs, *newfs;
347 int i, blks, size, error;
348 int32_t *lp;
349 caddr_t cp;
350
351 if ((mountp->mnt_flag & MNT_RDONLY) == 0)
352 return (EINVAL);
353 /*
354 * Step 1: invalidate all cached meta-data.
355 */
356 devvp = VFSTOUFS(mountp)->um_devvp;
357 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
358 error = vinvalbuf(devvp, 0, cred, p, 0, 0);
359 VOP_UNLOCK(devvp, 0);
360 if (error)
361 panic("ffs_reload: dirty1");
362 /*
363 * Step 2: re-read superblock from disk.
364 */
365 #if 0
366 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, NOCRED, p) != 0)
367 size = DEV_BSIZE;
368 else
369 size = dpart.disklab->d_secsize;
370 #endif
371 if ((mountp->mnt_bshift = devvp->v_specbshift) <= 0)
372 return (EINVAL);
373
374 error = bread(devvp, (ufs_daddr_t)(SBOFF >> devvp->v_specbshift),
375 SBSIZE, NOCRED, &bp);
376 if (error) {
377 brelse(bp);
378 return (error);
379 }
380 fs = VFSTOUFS(mountp)->um_fs;
381 newfs = malloc(fs->fs_sbsize, M_UFSMNT, M_WAITOK);
382 memcpy(newfs, bp->b_data, fs->fs_sbsize);
383 #ifdef FFS_EI
384 if (VFSTOUFS(mountp)->um_flags & UFS_NEEDSWAP) {
385 ffs_sb_swap((struct fs*)bp->b_data, newfs, 0);
386 fs->fs_flags |= FS_SWAPPED;
387 }
388 #endif
389 if (newfs->fs_magic != FS_MAGIC || newfs->fs_bsize > MAXBSIZE ||
390 newfs->fs_bsize < sizeof(struct fs)) {
391 brelse(bp);
392 free(newfs, M_UFSMNT);
393 return (EIO); /* XXX needs translation */
394 }
395 /*
396 * Copy pointer fields back into superblock before copying in XXX
397 * new superblock. These should really be in the ufsmount. XXX
398 * Note that important parameters (eg fs_ncg) are unchanged.
399 */
400 memcpy(&newfs->fs_csp[0], &fs->fs_csp[0], sizeof(fs->fs_csp));
401 newfs->fs_maxcluster = fs->fs_maxcluster;
402 newfs->fs_fsbtodb = fs->fs_fsbtodb;
403 memcpy(fs, newfs, (u_int)fs->fs_sbsize);
404 if (fs->fs_sbsize < SBSIZE)
405 bp->b_flags |= B_INVAL;
406 brelse(bp);
407 free(newfs, M_UFSMNT);
408 mountp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
409 ffs_oldfscompat(fs);
410 ffs_statfs(mountp, &mountp->mnt_stat, p);
411 /*
412 * Step 3: re-read summary information from disk.
413 */
414 blks = howmany(fs->fs_cssize, fs->fs_fsize);
415 for (i = 0; i < blks; i += fs->fs_frag) {
416 size = fs->fs_bsize;
417 if (i + fs->fs_frag > blks)
418 size = (blks - i) * fs->fs_fsize;
419 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
420 NOCRED, &bp);
421 if (error) {
422 brelse(bp);
423 return (error);
424 }
425 #ifdef FFS_EI
426 if (UFS_FSNEEDSWAP(fs))
427 ffs_csum_swap((struct csum*)bp->b_data,
428 (struct csum*)fs->fs_csp[fragstoblks(fs, i)], size);
429 else
430 #endif
431 memcpy(fs->fs_csp[fragstoblks(fs, i)], bp->b_data,
432 (size_t)size);
433 brelse(bp);
434 }
435 if ((fs->fs_flags & FS_DOSOFTDEP))
436 softdep_mount(devvp, mountp, fs, cred);
437 else
438 mountp->mnt_flag &= ~MNT_SOFTDEP;
439 /*
440 * We no longer know anything about clusters per cylinder group.
441 */
442 if (fs->fs_contigsumsize > 0) {
443 lp = fs->fs_maxcluster;
444 for (i = 0; i < fs->fs_ncg; i++)
445 *lp++ = fs->fs_contigsumsize;
446 }
447
448 loop:
449 simple_lock(&mntvnode_slock);
450 for (vp = mountp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
451 if (vp->v_mount != mountp) {
452 simple_unlock(&mntvnode_slock);
453 goto loop;
454 }
455 nvp = vp->v_mntvnodes.le_next;
456 /*
457 * Step 4: invalidate all inactive vnodes.
458 */
459 if (vrecycle(vp, &mntvnode_slock, p))
460 goto loop;
461 /*
462 * Step 5: invalidate all cached file data.
463 */
464 simple_lock(&vp->v_interlock);
465 simple_unlock(&mntvnode_slock);
466 if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK))
467 goto loop;
468 if (vinvalbuf(vp, 0, cred, p, 0, 0))
469 panic("ffs_reload: dirty2");
470 /*
471 * Step 6: re-read inode data for all active vnodes.
472 */
473 ip = VTOI(vp);
474 error = bread(devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
475 (int)fs->fs_bsize, NOCRED, &bp);
476 if (error) {
477 brelse(bp);
478 vput(vp);
479 return (error);
480 }
481 cp = (caddr_t)bp->b_data +
482 (ino_to_fsbo(fs, ip->i_number) * DINODE_SIZE);
483 #ifdef FFS_EI
484 if (UFS_FSNEEDSWAP(fs))
485 ffs_dinode_swap((struct dinode *)cp,
486 &ip->i_din.ffs_din);
487 else
488 #endif
489 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
490 ip->i_ffs_effnlink = ip->i_ffs_nlink;
491 brelse(bp);
492 vput(vp);
493 simple_lock(&mntvnode_slock);
494 }
495 simple_unlock(&mntvnode_slock);
496 return (0);
497 }
498
499 /*
500 * Common code for mount and mountroot
501 */
502 int
503 ffs_mountfs(devvp, mp, p)
504 register struct vnode *devvp;
505 struct mount *mp;
506 struct proc *p;
507 {
508 struct ufsmount *ump;
509 struct buf *bp;
510 struct fs *fs;
511 dev_t dev;
512 caddr_t base, space;
513 int blks;
514 int error, i, size, ronly;
515 #ifdef FFS_EI
516 int needswap;
517 #endif
518 int32_t *lp;
519 struct ucred *cred;
520 extern struct vnode *rootvp;
521 u_int64_t maxfilesize; /* XXX */
522 u_int32_t sbsize;
523
524 dev = devvp->v_rdev;
525 cred = p ? p->p_ucred : NOCRED;
526 /*
527 * Disallow multiple mounts of the same device.
528 * Disallow mounting of a device that is currently in use
529 * (except for root, which might share swap device for miniroot).
530 * Flush out any old buffers remaining from a previous use.
531 */
532 if ((error = vfs_mountedon(devvp)) != 0)
533 return (error);
534 if (vcount(devvp) > 1 && devvp != rootvp)
535 return (EBUSY);
536 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
537 error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0);
538 VOP_UNLOCK(devvp, 0);
539 if (error)
540 return (error);
541
542 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
543 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
544 if (error)
545 return (error);
546 #if 0
547 if (VOP_IOCTL(devvp, DIOCGPART, (caddr_t)&dpart, FREAD, cred, p) != 0)
548 size = DEV_BSIZE;
549 else
550 size = dpart.disklab->d_secsize;
551 #endif
552 if ((mp->mnt_bshift = devvp->v_specbshift) <= 0)
553 return (EINVAL);
554
555 bp = NULL;
556 ump = NULL;
557 error = bread(devvp, (ufs_daddr_t)(SBOFF >> mp->mnt_bshift), SBSIZE,
558 cred, &bp);
559 if (error)
560 goto out;
561
562 fs = (struct fs*)bp->b_data;
563 if (fs->fs_magic == FS_MAGIC) {
564 sbsize = fs->fs_sbsize;
565 #ifdef FFS_EI
566 needswap = 0;
567 } else if (fs->fs_magic == bswap32(FS_MAGIC)) {
568 sbsize = bswap32(fs->fs_sbsize);
569 needswap = 1;
570 #endif
571 } else {
572 error = EINVAL;
573 goto out;
574 }
575 if (sbsize > MAXBSIZE || sbsize < sizeof(struct fs)) {
576 error = EINVAL;
577 goto out;
578 }
579
580 fs = malloc((u_long)sbsize, M_UFSMNT, M_WAITOK);
581 memcpy(fs, bp->b_data, sbsize);
582 #ifdef FFS_EI
583 if (needswap) {
584 ffs_sb_swap((struct fs*)bp->b_data, fs, 0);
585 fs->fs_flags |= FS_SWAPPED;
586 }
587 #endif
588 ffs_oldfscompat(fs);
589
590 if (fs->fs_bsize > MAXBSIZE || fs->fs_bsize < sizeof(struct fs)) {
591 error = EINVAL;
592 goto out;
593 }
594 /* make sure cylinder group summary area is a reasonable size. */
595 if (fs->fs_cgsize == 0 || fs->fs_cpg == 0 ||
596 fs->fs_ncg > fs->fs_ncyl / fs->fs_cpg + 1 ||
597 fs->fs_cssize >
598 fragroundup(fs, fs->fs_ncg * sizeof(struct csum))) {
599 error = EINVAL; /* XXX needs translation */
600 goto out2;
601 }
602 /* XXX updating 4.2 FFS superblocks trashes rotational layout tables */
603 if (fs->fs_postblformat == FS_42POSTBLFMT && !ronly) {
604 error = EROFS; /* XXX what should be returned? */
605 goto out2;
606 }
607 /* XXX bread assumes b_blkno in DEV_BSIZE unit. Calculate fsbtosb */
608 /* XXX wrs - no, it doesn't. All we need to do is recalculate
609 * fs_fsbtodb based on our current media. */
610
611 /* Make sure at most one fs frag per disk block */
612 if (fs->fs_fshift < mp->mnt_bshift) {
613 error = EINVAL; /* XXX needs translation */
614 goto out2;
615 }
616 fs->fs_fsbtodb = fs->fs_fshift - mp->mnt_bshift;
617 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK);
618 memset((caddr_t)ump, 0, sizeof *ump);
619 ump->um_fs = fs;
620 if (fs->fs_sbsize < SBSIZE)
621 bp->b_flags |= B_INVAL;
622 brelse(bp);
623 bp = NULL;
624 fs->fs_ronly = ronly;
625 if (ronly == 0) {
626 fs->fs_clean <<= 1;
627 fs->fs_fmod = 1;
628 }
629 size = fs->fs_cssize;
630 blks = howmany(size, fs->fs_fsize);
631 if (fs->fs_contigsumsize > 0)
632 size += fs->fs_ncg * sizeof(int32_t);
633 base = space = malloc((u_long)size, M_UFSMNT, M_WAITOK);
634 for (i = 0; i < blks; i += fs->fs_frag) {
635 size = fs->fs_bsize;
636 if (i + fs->fs_frag > blks)
637 size = (blks - i) * fs->fs_fsize;
638 error = bread(devvp, fsbtodb(fs, fs->fs_csaddr + i), size,
639 cred, &bp);
640 if (error) {
641 free(base, M_UFSMNT);
642 goto out2;
643 }
644 #ifdef FFS_EI
645 if (needswap)
646 ffs_csum_swap((struct csum*)bp->b_data,
647 (struct csum*)space, size);
648 else
649 #endif
650 memcpy(space, bp->b_data, (u_int)size);
651
652 fs->fs_csp[fragstoblks(fs, i)] = (struct csum *)space;
653 space += size;
654 brelse(bp);
655 bp = NULL;
656 }
657 if (fs->fs_contigsumsize > 0) {
658 fs->fs_maxcluster = lp = (int32_t *)space;
659 for (i = 0; i < fs->fs_ncg; i++)
660 *lp++ = fs->fs_contigsumsize;
661 }
662 mp->mnt_data = (qaddr_t)ump;
663 mp->mnt_stat.f_fsid.val[0] = (long)dev;
664 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_FFS);
665 mp->mnt_maxsymlinklen = fs->fs_maxsymlinklen;
666 mp->mnt_flag |= MNT_LOCAL;
667 #ifdef FFS_EI
668 if (needswap)
669 ump->um_flags |= UFS_NEEDSWAP;
670 #endif
671 ump->um_mountp = mp;
672 ump->um_dev = dev;
673 ump->um_devvp = devvp;
674 ump->um_nindir = fs->fs_nindir;
675 ump->um_bptrtodb = fs->fs_fsbtodb;
676 ump->um_seqinc = fs->fs_frag;
677 for (i = 0; i < MAXQUOTAS; i++)
678 ump->um_quotas[i] = NULLVP;
679 devvp->v_specmountpoint = mp;
680 ump->um_savedmaxfilesize = fs->fs_maxfilesize; /* XXX */
681 maxfilesize = (u_int64_t)0x80000000 * fs->fs_bsize - 1; /* XXX */
682 if (fs->fs_maxfilesize > maxfilesize) /* XXX */
683 fs->fs_maxfilesize = maxfilesize; /* XXX */
684 if (ronly == 0 && (fs->fs_flags & FS_DOSOFTDEP)) {
685 error = softdep_mount(devvp, mp, fs, cred);
686 if (error) {
687 free(base, M_UFSMNT);
688 goto out;
689 }
690 }
691 return (0);
692 out2:
693 free(fs, M_UFSMNT);
694 out:
695 devvp->v_specmountpoint = NULL;
696 if (bp)
697 brelse(bp);
698 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
699 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
700 VOP_UNLOCK(devvp, 0);
701 if (ump) {
702 free(ump, M_UFSMNT);
703 mp->mnt_data = (qaddr_t)0;
704 }
705 return (error);
706 }
707
708 /*
709 * Sanity checks for old file systems.
710 *
711 * XXX - goes away some day.
712 */
713 int
714 ffs_oldfscompat(fs)
715 struct fs *fs;
716 {
717 int i;
718
719 fs->fs_npsect = max(fs->fs_npsect, fs->fs_nsect); /* XXX */
720 fs->fs_interleave = max(fs->fs_interleave, 1); /* XXX */
721 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
722 fs->fs_nrpos = 8; /* XXX */
723 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
724 u_int64_t sizepb = fs->fs_bsize; /* XXX */
725 /* XXX */
726 fs->fs_maxfilesize = fs->fs_bsize * NDADDR - 1; /* XXX */
727 for (i = 0; i < NIADDR; i++) { /* XXX */
728 sizepb *= NINDIR(fs); /* XXX */
729 fs->fs_maxfilesize += sizepb; /* XXX */
730 } /* XXX */
731 fs->fs_qbmask = ~fs->fs_bmask; /* XXX */
732 fs->fs_qfmask = ~fs->fs_fmask; /* XXX */
733 } /* XXX */
734 return (0);
735 }
736
737 /*
738 * unmount system call
739 */
740 int
741 ffs_unmount(mp, mntflags, p)
742 struct mount *mp;
743 int mntflags;
744 struct proc *p;
745 {
746 register struct ufsmount *ump;
747 register struct fs *fs;
748 int error, flags;
749
750 flags = 0;
751 if (mntflags & MNT_FORCE)
752 flags |= FORCECLOSE;
753 if (mp->mnt_flag & MNT_SOFTDEP) {
754 if ((error = softdep_flushfiles(mp, flags, p)) != 0)
755 return (error);
756 } else {
757 if ((error = ffs_flushfiles(mp, flags, p)) != 0)
758 return (error);
759 }
760 ump = VFSTOUFS(mp);
761 fs = ump->um_fs;
762 if (fs->fs_ronly == 0 &&
763 ffs_cgupdate(ump, MNT_WAIT) == 0 &&
764 fs->fs_clean & FS_WASCLEAN) {
765 fs->fs_clean = FS_ISCLEAN;
766 (void) ffs_sbupdate(ump, MNT_WAIT);
767 }
768 if (ump->um_devvp->v_type != VBAD)
769 ump->um_devvp->v_specmountpoint = NULL;
770 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
771 error = VOP_CLOSE(ump->um_devvp, fs->fs_ronly ? FREAD : FREAD|FWRITE,
772 NOCRED, p);
773 vput(ump->um_devvp);
774 free(fs->fs_csp[0], M_UFSMNT);
775 free(fs, M_UFSMNT);
776 free(ump, M_UFSMNT);
777 mp->mnt_data = (qaddr_t)0;
778 mp->mnt_flag &= ~MNT_LOCAL;
779 return (error);
780 }
781
782 /*
783 * Flush out all the files in a filesystem.
784 */
785 int
786 ffs_flushfiles(mp, flags, p)
787 register struct mount *mp;
788 int flags;
789 struct proc *p;
790 {
791 extern int doforce;
792 register struct ufsmount *ump;
793 int error;
794
795 if (!doforce)
796 flags &= ~FORCECLOSE;
797 ump = VFSTOUFS(mp);
798 #ifdef QUOTA
799 if (mp->mnt_flag & MNT_QUOTA) {
800 int i;
801 if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0)
802 return (error);
803 for (i = 0; i < MAXQUOTAS; i++) {
804 if (ump->um_quotas[i] == NULLVP)
805 continue;
806 quotaoff(p, mp, i);
807 }
808 /*
809 * Here we fall through to vflush again to ensure
810 * that we have gotten rid of all the system vnodes.
811 */
812 }
813 #endif
814 /*
815 * Flush all the files.
816 */
817 error = vflush(mp, NULLVP, flags);
818 if (error)
819 return (error);
820 /*
821 * Flush filesystem metadata.
822 */
823 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
824 error = VOP_FSYNC(ump->um_devvp, p->p_ucred, FSYNC_WAIT, p);
825 VOP_UNLOCK(ump->um_devvp, 0);
826 return (error);
827 }
828
829 /*
830 * Get file system statistics.
831 */
832 int
833 ffs_statfs(mp, sbp, p)
834 struct mount *mp;
835 register struct statfs *sbp;
836 struct proc *p;
837 {
838 register struct ufsmount *ump;
839 register struct fs *fs;
840
841 ump = VFSTOUFS(mp);
842 fs = ump->um_fs;
843 if (fs->fs_magic != FS_MAGIC)
844 panic("ffs_statfs");
845 #ifdef COMPAT_09
846 sbp->f_type = 1;
847 #else
848 sbp->f_type = 0;
849 #endif
850 sbp->f_bsize = fs->fs_fsize;
851 sbp->f_iosize = fs->fs_bsize;
852 sbp->f_blocks = fs->fs_dsize;
853 sbp->f_bfree = fs->fs_cstotal.cs_nbfree * fs->fs_frag +
854 fs->fs_cstotal.cs_nffree;
855 sbp->f_bavail = (long) (((u_int64_t) fs->fs_dsize * (u_int64_t)
856 (100 - fs->fs_minfree) / (u_int64_t) 100) -
857 (u_int64_t) (fs->fs_dsize - sbp->f_bfree));
858 sbp->f_files = fs->fs_ncg * fs->fs_ipg - ROOTINO;
859 sbp->f_ffree = fs->fs_cstotal.cs_nifree;
860 if (sbp != &mp->mnt_stat) {
861 memcpy(sbp->f_mntonname, mp->mnt_stat.f_mntonname, MNAMELEN);
862 memcpy(sbp->f_mntfromname, mp->mnt_stat.f_mntfromname, MNAMELEN);
863 }
864 strncpy(sbp->f_fstypename, mp->mnt_op->vfs_name, MFSNAMELEN);
865 return (0);
866 }
867
868 /*
869 * Go through the disk queues to initiate sandbagged IO;
870 * go through the inodes to write those that have been modified;
871 * initiate the writing of the super block if it has been modified.
872 *
873 * Note: we are always called with the filesystem marked `MPBUSY'.
874 */
875 int
876 ffs_sync(mp, waitfor, cred, p)
877 struct mount *mp;
878 int waitfor;
879 struct ucred *cred;
880 struct proc *p;
881 {
882 struct vnode *vp, *nvp;
883 struct inode *ip;
884 struct ufsmount *ump = VFSTOUFS(mp);
885 struct fs *fs;
886 int error, allerror = 0;
887
888 fs = ump->um_fs;
889 if (fs->fs_fmod != 0 && fs->fs_ronly != 0) { /* XXX */
890 printf("fs = %s\n", fs->fs_fsmnt);
891 panic("update: rofs mod");
892 }
893 /*
894 * Write back each (modified) inode.
895 */
896 simple_lock(&mntvnode_slock);
897 loop:
898 for (vp = mp->mnt_vnodelist.lh_first; vp != NULL; vp = nvp) {
899 /*
900 * If the vnode that we are about to sync is no longer
901 * associated with this mount point, start over.
902 */
903 if (vp->v_mount != mp)
904 goto loop;
905 simple_lock(&vp->v_interlock);
906 nvp = vp->v_mntvnodes.le_next;
907 ip = VTOI(vp);
908 if ((vp->v_type == VNON || (ip->i_flag &
909 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) == 0) &&
910 (vp->v_dirtyblkhd.lh_first == NULL || waitfor == MNT_LAZY)){
911 simple_unlock(&vp->v_interlock);
912 continue;
913 }
914 simple_unlock(&mntvnode_slock);
915 error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK);
916 if (error) {
917 simple_lock(&mntvnode_slock);
918 if (error == ENOENT)
919 goto loop;
920 continue;
921 }
922 if ((error = VOP_FSYNC(vp, cred,
923 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
924 allerror = error;
925 vput(vp);
926 simple_lock(&mntvnode_slock);
927 }
928 simple_unlock(&mntvnode_slock);
929 /*
930 * Force stale file system control information to be flushed.
931 */
932 if (waitfor != MNT_LAZY) {
933 if (ump->um_mountp->mnt_flag & MNT_SOFTDEP)
934 waitfor = MNT_NOWAIT;
935 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
936 if ((error = VOP_FSYNC(ump->um_devvp, cred,
937 waitfor == MNT_WAIT ? FSYNC_WAIT : 0, p)) != 0)
938 allerror = error;
939 VOP_UNLOCK(ump->um_devvp, 0);
940 }
941 #ifdef QUOTA
942 qsync(mp);
943 #endif
944 /*
945 * Write back modified superblock.
946 */
947 if (fs->fs_fmod != 0) {
948 fs->fs_fmod = 0;
949 fs->fs_time = time.tv_sec;
950 allerror = ffs_cgupdate(ump, waitfor);
951 }
952 return (allerror);
953 }
954
955 /*
956 * Look up a FFS dinode number to find its incore vnode, otherwise read it
957 * in from disk. If it is in core, wait for the lock bit to clear, then
958 * return the inode locked. Detection and handling of mount points must be
959 * done by the calling routine.
960 */
961 int
962 ffs_vget(mp, ino, vpp)
963 struct mount *mp;
964 ino_t ino;
965 struct vnode **vpp;
966 {
967 struct fs *fs;
968 struct inode *ip;
969 struct ufsmount *ump;
970 struct buf *bp;
971 struct vnode *vp;
972 dev_t dev;
973 int error;
974 caddr_t cp;
975
976 ump = VFSTOUFS(mp);
977 dev = ump->um_dev;
978 do {
979 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
980 return (0);
981 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
982
983 /* Allocate a new vnode/inode. */
984 if ((error = getnewvnode(VT_UFS, mp, ffs_vnodeop_p, &vp)) != 0) {
985 *vpp = NULL;
986 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
987 return (error);
988 }
989 /*
990 * XXX MFS ends up here, too, to allocate an inode. Should we
991 * XXX create another pool for MFS inodes?
992 */
993 ip = pool_get(&ffs_inode_pool, PR_WAITOK);
994 memset((caddr_t)ip, 0, sizeof(struct inode));
995 vp->v_data = ip;
996 ip->i_vnode = vp;
997 ip->i_fs = fs = ump->um_fs;
998 ip->i_dev = dev;
999 ip->i_number = ino;
1000 #ifdef QUOTA
1001 {
1002 int i;
1003
1004 for (i = 0; i < MAXQUOTAS; i++)
1005 ip->i_dquot[i] = NODQUOT;
1006 }
1007 #endif
1008 /*
1009 * Put it onto its hash chain and lock it so that other requests for
1010 * this inode will block if they arrive while we are sleeping waiting
1011 * for old data structures to be purged or for the contents of the
1012 * disk portion of this inode to be read.
1013 */
1014 ufs_ihashins(ip);
1015 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1016
1017 /* Read in the disk contents for the inode, copy into the inode. */
1018 error = bread(ump->um_devvp, fsbtodb(fs, ino_to_fsba(fs, ino)),
1019 (int)fs->fs_bsize, NOCRED, &bp);
1020 if (error) {
1021 /*
1022 * The inode does not contain anything useful, so it would
1023 * be misleading to leave it on its hash chain. With mode
1024 * still zero, it will be unlinked and returned to the free
1025 * list by vput().
1026 */
1027 vput(vp);
1028 brelse(bp);
1029 *vpp = NULL;
1030 return (error);
1031 }
1032 cp = (caddr_t)bp->b_data + (ino_to_fsbo(fs, ino) * DINODE_SIZE);
1033 #ifdef FFS_EI
1034 if (UFS_FSNEEDSWAP(fs))
1035 ffs_dinode_swap((struct dinode *)cp, &ip->i_din.ffs_din);
1036 else
1037 #endif
1038 memcpy(&ip->i_din.ffs_din, cp, DINODE_SIZE);
1039 if (DOINGSOFTDEP(vp))
1040 softdep_load_inodeblock(ip);
1041 else
1042 ip->i_ffs_effnlink = ip->i_ffs_nlink;
1043 brelse(bp);
1044
1045 /*
1046 * Initialize the vnode from the inode, check for aliases.
1047 * Note that the underlying vnode may have changed.
1048 */
1049 error = ufs_vinit(mp, ffs_specop_p, ffs_fifoop_p, &vp);
1050 if (error) {
1051 vput(vp);
1052 *vpp = NULL;
1053 return (error);
1054 }
1055 /*
1056 * Finish inode initialization now that aliasing has been resolved.
1057 */
1058 ip->i_devvp = ump->um_devvp;
1059 VREF(ip->i_devvp);
1060 /*
1061 * Ensure that uid and gid are correct. This is a temporary
1062 * fix until fsck has been changed to do the update.
1063 */
1064 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1065 ip->i_ffs_uid = ip->i_din.ffs_din.di_ouid; /* XXX */
1066 ip->i_ffs_gid = ip->i_din.ffs_din.di_ogid; /* XXX */
1067 } /* XXX */
1068
1069 *vpp = vp;
1070 return (0);
1071 }
1072
1073 /*
1074 * File handle to vnode
1075 *
1076 * Have to be really careful about stale file handles:
1077 * - check that the inode number is valid
1078 * - call ffs_vget() to get the locked inode
1079 * - check for an unallocated inode (i_mode == 0)
1080 * - check that the given client host has export rights and return
1081 * those rights via. exflagsp and credanonp
1082 */
1083 int
1084 ffs_fhtovp(mp, fhp, vpp)
1085 register struct mount *mp;
1086 struct fid *fhp;
1087 struct vnode **vpp;
1088 {
1089 register struct ufid *ufhp;
1090 struct fs *fs;
1091
1092 ufhp = (struct ufid *)fhp;
1093 fs = VFSTOUFS(mp)->um_fs;
1094 if (ufhp->ufid_ino < ROOTINO ||
1095 ufhp->ufid_ino >= fs->fs_ncg * fs->fs_ipg)
1096 return (ESTALE);
1097 return (ufs_fhtovp(mp, ufhp, vpp));
1098 }
1099
1100 /*
1101 * Vnode pointer to File handle
1102 */
1103 /* ARGSUSED */
1104 int
1105 ffs_vptofh(vp, fhp)
1106 struct vnode *vp;
1107 struct fid *fhp;
1108 {
1109 register struct inode *ip;
1110 register struct ufid *ufhp;
1111
1112 ip = VTOI(vp);
1113 ufhp = (struct ufid *)fhp;
1114 ufhp->ufid_len = sizeof(struct ufid);
1115 ufhp->ufid_ino = ip->i_number;
1116 ufhp->ufid_gen = ip->i_ffs_gen;
1117 return (0);
1118 }
1119
1120 void
1121 ffs_init()
1122 {
1123 softdep_initialize();
1124 ufs_init();
1125
1126 pool_init(&ffs_inode_pool, sizeof(struct inode), 0, 0, 0, "ffsinopl",
1127 0, pool_page_alloc_nointr, pool_page_free_nointr, M_FFSNODE);
1128 }
1129
1130 int
1131 ffs_sysctl(name, namelen, oldp, oldlenp, newp, newlen, p)
1132 int *name;
1133 u_int namelen;
1134 void *oldp;
1135 size_t *oldlenp;
1136 void *newp;
1137 size_t newlen;
1138 struct proc *p;
1139 {
1140 extern int doclusterread, doclusterwrite, doreallocblks, doasyncfree;
1141
1142 /* all sysctl names at this level are terminal */
1143 if (namelen != 1)
1144 return (ENOTDIR); /* overloaded */
1145
1146 switch (name[0]) {
1147 case FFS_CLUSTERREAD:
1148 return (sysctl_int(oldp, oldlenp, newp, newlen,
1149 &doclusterread));
1150 case FFS_CLUSTERWRITE:
1151 return (sysctl_int(oldp, oldlenp, newp, newlen,
1152 &doclusterwrite));
1153 case FFS_REALLOCBLKS:
1154 return (sysctl_int(oldp, oldlenp, newp, newlen,
1155 &doreallocblks));
1156 case FFS_ASYNCFREE:
1157 return (sysctl_int(oldp, oldlenp, newp, newlen, &doasyncfree));
1158 default:
1159 return (EOPNOTSUPP);
1160 }
1161 /* NOTREACHED */
1162 }
1163
1164 /*
1165 * Write a superblock and associated information back to disk.
1166 */
1167 int
1168 ffs_sbupdate(mp, waitfor)
1169 struct ufsmount *mp;
1170 int waitfor;
1171 {
1172 register struct fs *fs = mp->um_fs;
1173 register struct buf *bp;
1174 int i, error = 0;
1175 int32_t saved_nrpos = fs->fs_nrpos;
1176 int64_t saved_qbmask = fs->fs_qbmask;
1177 int64_t saved_qfmask = fs->fs_qfmask;
1178 u_int64_t saved_maxfilesize = fs->fs_maxfilesize;
1179 u_int8_t saveflag;
1180
1181 /* Restore compatibility to old file systems. XXX */
1182 if (fs->fs_postblformat == FS_42POSTBLFMT) /* XXX */
1183 fs->fs_nrpos = -1; /* XXX */
1184 if (fs->fs_inodefmt < FS_44INODEFMT) { /* XXX */
1185 int32_t *lp, tmp; /* XXX */
1186 /* XXX */
1187 lp = (int32_t *)&fs->fs_qbmask; /* XXX nuke qfmask too */
1188 tmp = lp[4]; /* XXX */
1189 for (i = 4; i > 0; i--) /* XXX */
1190 lp[i] = lp[i-1]; /* XXX */
1191 lp[0] = tmp; /* XXX */
1192 } /* XXX */
1193 fs->fs_maxfilesize = mp->um_savedmaxfilesize; /* XXX */
1194
1195 bp = getblk(mp->um_devvp, SBOFF >> mp->um_mountp->mnt_bshift,
1196 (int)fs->fs_sbsize, 0, 0);
1197 saveflag = fs->fs_flags & FS_INTERNAL;
1198 fs->fs_flags &= ~FS_INTERNAL;
1199 memcpy(bp->b_data, fs, fs->fs_sbsize);
1200 #ifdef FFS_EI
1201 if (mp->um_flags & UFS_NEEDSWAP)
1202 ffs_sb_swap(fs, (struct fs*)bp->b_data, 1);
1203 #endif
1204
1205 fs->fs_flags |= saveflag;
1206 fs->fs_nrpos = saved_nrpos; /* XXX */
1207 fs->fs_qbmask = saved_qbmask; /* XXX */
1208 fs->fs_qfmask = saved_qfmask; /* XXX */
1209 fs->fs_maxfilesize = saved_maxfilesize; /* XXX */
1210
1211 if (waitfor == MNT_WAIT)
1212 error = bwrite(bp);
1213 else
1214 bawrite(bp);
1215 return (error);
1216 }
1217
1218 int
1219 ffs_cgupdate(mp, waitfor)
1220 struct ufsmount *mp;
1221 int waitfor;
1222 {
1223 register struct fs *fs = mp->um_fs;
1224 register struct buf *bp;
1225 int blks;
1226 caddr_t space;
1227 int i, size, error = 0, allerror = 0;
1228
1229 allerror = ffs_sbupdate(mp, waitfor);
1230 blks = howmany(fs->fs_cssize, fs->fs_fsize);
1231 space = (caddr_t)fs->fs_csp[0];
1232 for (i = 0; i < blks; i += fs->fs_frag) {
1233 size = fs->fs_bsize;
1234 if (i + fs->fs_frag > blks)
1235 size = (blks - i) * fs->fs_fsize;
1236 bp = getblk(mp->um_devvp, fsbtodb(fs, fs->fs_csaddr + i),
1237 size, 0, 0);
1238 #ifdef FFS_EI
1239 if (mp->um_flags & UFS_NEEDSWAP)
1240 ffs_csum_swap((struct csum*)space,
1241 (struct csum*)bp->b_data, size);
1242 else
1243 #endif
1244 memcpy(bp->b_data, space, (u_int)size);
1245 space += size;
1246 if (waitfor == MNT_WAIT)
1247 error = bwrite(bp);
1248 else
1249 bawrite(bp);
1250 }
1251 if (!allerror && error)
1252 allerror = error;
1253 return (allerror);
1254 }
1255