lfs_vfsops.c revision 1.144 1 /* $NetBSD: lfs_vfsops.c,v 1.144 2004/02/26 22:56:55 oster Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*-
39 * Copyright (c) 1989, 1991, 1993, 1994
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_vfsops.c 8.20 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_vfsops.c,v 1.144 2004/02/26 22:56:55 oster Exp $");
71
72 #if defined(_KERNEL_OPT)
73 #include "opt_quota.h"
74 #endif
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/namei.h>
79 #include <sys/proc.h>
80 #include <sys/kernel.h>
81 #include <sys/vnode.h>
82 #include <sys/mount.h>
83 #include <sys/kthread.h>
84 #include <sys/buf.h>
85 #include <sys/device.h>
86 #include <sys/mbuf.h>
87 #include <sys/file.h>
88 #include <sys/disklabel.h>
89 #include <sys/ioctl.h>
90 #include <sys/errno.h>
91 #include <sys/malloc.h>
92 #include <sys/pool.h>
93 #include <sys/socket.h>
94 #include <uvm/uvm_extern.h>
95 #include <sys/sysctl.h>
96 #include <sys/conf.h>
97
98 #include <miscfs/specfs/specdev.h>
99
100 #include <ufs/ufs/quota.h>
101 #include <ufs/ufs/inode.h>
102 #include <ufs/ufs/ufsmount.h>
103 #include <ufs/ufs/ufs_extern.h>
104
105 #include <uvm/uvm.h>
106 #include <uvm/uvm_stat.h>
107 #include <uvm/uvm_pager.h>
108 #include <uvm/uvm_pdaemon.h>
109
110 #include <ufs/lfs/lfs.h>
111 #include <ufs/lfs/lfs_extern.h>
112
113 #include <miscfs/genfs/genfs.h>
114 #include <miscfs/genfs/genfs_node.h>
115
116 static int lfs_gop_write(struct vnode *, struct vm_page **, int, int);
117 static boolean_t lfs_issequential_hole(const struct ufsmount *,
118 daddr_t, daddr_t);
119
120 static int lfs_mountfs(struct vnode *, struct mount *, struct proc *);
121 static daddr_t check_segsum(struct lfs *, daddr_t, u_int64_t,
122 struct ucred *, int, int *, struct proc *);
123
124 extern const struct vnodeopv_desc lfs_vnodeop_opv_desc;
125 extern const struct vnodeopv_desc lfs_specop_opv_desc;
126 extern const struct vnodeopv_desc lfs_fifoop_opv_desc;
127
128 pid_t lfs_writer_daemon = 0;
129 int lfs_do_flush = 0;
130
131 const struct vnodeopv_desc * const lfs_vnodeopv_descs[] = {
132 &lfs_vnodeop_opv_desc,
133 &lfs_specop_opv_desc,
134 &lfs_fifoop_opv_desc,
135 NULL,
136 };
137
138 struct vfsops lfs_vfsops = {
139 MOUNT_LFS,
140 lfs_mount,
141 ufs_start,
142 lfs_unmount,
143 ufs_root,
144 ufs_quotactl,
145 lfs_statfs,
146 lfs_sync,
147 lfs_vget,
148 lfs_fhtovp,
149 lfs_vptofh,
150 lfs_init,
151 lfs_reinit,
152 lfs_done,
153 NULL,
154 lfs_mountroot,
155 ufs_check_export,
156 lfs_vnodeopv_descs,
157 };
158
159 struct genfs_ops lfs_genfsops = {
160 lfs_gop_size,
161 ufs_gop_alloc,
162 lfs_gop_write,
163 };
164
165 struct pool lfs_inode_pool;
166 struct pool lfs_dinode_pool;
167 struct pool lfs_inoext_pool;
168
169 /*
170 * The writer daemon. UVM keeps track of how many dirty pages we are holding
171 * in lfs_subsys_pages; the daemon flushes the filesystem when this value
172 * crosses the (user-defined) threshhold LFS_MAX_PAGES.
173 */
174 static void
175 lfs_writerd(void *arg)
176 {
177 #ifdef LFS_PD
178 struct mount *mp, *nmp;
179 struct lfs *fs;
180 #endif
181
182 lfs_writer_daemon = curproc->p_pid;
183
184 simple_lock(&lfs_subsys_lock);
185 for (;;) {
186 ltsleep(&lfs_writer_daemon, PVM | PNORELOCK, "lfswriter", 0,
187 &lfs_subsys_lock);
188
189 #ifdef LFS_PD
190 /*
191 * Look through the list of LFSs to see if any of them
192 * have requested pageouts.
193 */
194 simple_lock(&mountlist_slock);
195 for (mp = CIRCLEQ_FIRST(&mountlist); mp != (void *)&mountlist;
196 mp = nmp) {
197 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
198 nmp = CIRCLEQ_NEXT(mp, mnt_list);
199 continue;
200 }
201 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS,
202 MFSNAMELEN) == 0) {
203 fs = VFSTOUFS(mp)->um_lfs;
204 if (fs->lfs_pdflush ||
205 !TAILQ_EMPTY(&fs->lfs_pchainhd)) {
206 fs->lfs_pdflush = 0;
207 lfs_flush_fs(fs, 0);
208 }
209 }
210
211 simple_lock(&mountlist_slock);
212 nmp = CIRCLEQ_NEXT(mp, mnt_list);
213 vfs_unbusy(mp);
214 }
215 simple_unlock(&mountlist_slock);
216 #endif /* LFS_PD */
217
218 /*
219 * If global state wants a flush, flush everything.
220 */
221 simple_lock(&lfs_subsys_lock);
222 while (lfs_do_flush || locked_queue_count > LFS_MAX_BUFS ||
223 locked_queue_bytes > LFS_MAX_BYTES ||
224 lfs_subsys_pages > LFS_MAX_PAGES) {
225
226 #ifdef DEBUG_LFS_FLUSH
227 if (lfs_do_flush)
228 printf("daemon: lfs_do_flush\n");
229 if (locked_queue_count > LFS_MAX_BUFS)
230 printf("daemon: lqc = %d, max %d\n",
231 locked_queue_count, LFS_MAX_BUFS);
232 if (locked_queue_bytes > LFS_MAX_BYTES)
233 printf("daemon: lqb = %ld, max %ld\n",
234 locked_queue_bytes, LFS_MAX_BYTES);
235 if (lfs_subsys_pages > LFS_MAX_PAGES)
236 printf("daemon: lssp = %d, max %d\n",
237 lfs_subsys_pages, LFS_MAX_PAGES);
238 #endif /* DEBUG_LFS_FLUSH */
239 lfs_flush(NULL, SEGM_WRITERD);
240 lfs_do_flush = 0;
241 }
242 }
243 /* NOTREACHED */
244 }
245
246 /*
247 * Initialize the filesystem, most work done by ufs_init.
248 */
249 void
250 lfs_init()
251 {
252 ufs_init();
253
254 /*
255 * XXX Same structure as FFS inodes? Should we share a common pool?
256 */
257 pool_init(&lfs_inode_pool, sizeof(struct inode), 0, 0, 0,
258 "lfsinopl", &pool_allocator_nointr);
259 pool_init(&lfs_dinode_pool, sizeof(struct ufs1_dinode), 0, 0, 0,
260 "lfsdinopl", &pool_allocator_nointr);
261 pool_init(&lfs_inoext_pool, sizeof(struct lfs_inode_ext), 8, 0, 0,
262 "lfsinoextpl", &pool_allocator_nointr);
263 #ifdef DEBUG
264 memset(lfs_log, 0, sizeof(lfs_log));
265 #endif
266 simple_lock_init(&lfs_subsys_lock);
267 }
268
269 void
270 lfs_reinit()
271 {
272 ufs_reinit();
273 }
274
275 void
276 lfs_done()
277 {
278 ufs_done();
279 pool_destroy(&lfs_inode_pool);
280 pool_destroy(&lfs_dinode_pool);
281 pool_destroy(&lfs_inoext_pool);
282 }
283
284 /*
285 * Called by main() when ufs is going to be mounted as root.
286 */
287 int
288 lfs_mountroot()
289 {
290 extern struct vnode *rootvp;
291 struct mount *mp;
292 struct proc *p = curproc; /* XXX */
293 int error;
294
295 if (root_device->dv_class != DV_DISK)
296 return (ENODEV);
297
298 if (rootdev == NODEV)
299 return (ENODEV);
300 /*
301 * Get vnodes for swapdev and rootdev.
302 */
303 if ((error = bdevvp(rootdev, &rootvp))) {
304 printf("lfs_mountroot: can't setup bdevvp's");
305 return (error);
306 }
307 if ((error = vfs_rootmountalloc(MOUNT_LFS, "root_device", &mp))) {
308 vrele(rootvp);
309 return (error);
310 }
311 if ((error = lfs_mountfs(rootvp, mp, p))) {
312 mp->mnt_op->vfs_refcount--;
313 vfs_unbusy(mp);
314 free(mp, M_MOUNT);
315 vrele(rootvp);
316 return (error);
317 }
318 simple_lock(&mountlist_slock);
319 CIRCLEQ_INSERT_TAIL(&mountlist, mp, mnt_list);
320 simple_unlock(&mountlist_slock);
321 (void)lfs_statfs(mp, &mp->mnt_stat, p);
322 vfs_unbusy(mp);
323 inittodr(VFSTOUFS(mp)->um_lfs->lfs_tstamp);
324 return (0);
325 }
326
327 /*
328 * VFS Operations.
329 *
330 * mount system call
331 */
332 int
333 lfs_mount(struct mount *mp, const char *path, void *data, struct nameidata *ndp, struct proc *p)
334 {
335 struct vnode *devvp;
336 struct ufs_args args;
337 struct ufsmount *ump = NULL;
338 struct lfs *fs = NULL; /* LFS */
339 int error;
340 mode_t accessmode;
341
342 if (mp->mnt_flag & MNT_GETARGS) {
343 ump = VFSTOUFS(mp);
344 if (ump == NULL)
345 return EIO;
346 args.fspec = NULL;
347 vfs_showexport(mp, &args.export, &ump->um_export);
348 return copyout(&args, data, sizeof(args));
349 }
350 error = copyin(data, &args, sizeof (struct ufs_args));
351 if (error)
352 return (error);
353
354 /*
355 * If updating, check whether changing from read-only to
356 * read/write; if there is no device name, that's all we do.
357 */
358 if (mp->mnt_flag & MNT_UPDATE) {
359 ump = VFSTOUFS(mp);
360 fs = ump->um_lfs;
361 if (fs->lfs_ronly && (mp->mnt_iflag & IMNT_WANTRDWR)) {
362 /*
363 * If upgrade to read-write by non-root, then verify
364 * that user has necessary permissions on the device.
365 */
366 if (p->p_ucred->cr_uid != 0) {
367 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
368 error = VOP_ACCESS(ump->um_devvp, VREAD|VWRITE,
369 p->p_ucred, p);
370 VOP_UNLOCK(ump->um_devvp, 0);
371 if (error)
372 return (error);
373 }
374 fs->lfs_ronly = 0;
375 }
376 if (args.fspec == 0) {
377 /*
378 * Process export requests.
379 */
380 return (vfs_export(mp, &ump->um_export, &args.export));
381 }
382 }
383 /*
384 * Not an update, or updating the name: look up the name
385 * and verify that it refers to a sensible block device.
386 */
387 NDINIT(ndp, LOOKUP, FOLLOW, UIO_USERSPACE, args.fspec, p);
388 if ((error = namei(ndp)) != 0)
389 return (error);
390 devvp = ndp->ni_vp;
391 if (devvp->v_type != VBLK) {
392 vrele(devvp);
393 return (ENOTBLK);
394 }
395 if (bdevsw_lookup(devvp->v_rdev) == NULL) {
396 vrele(devvp);
397 return (ENXIO);
398 }
399 /*
400 * If mount by non-root, then verify that user has necessary
401 * permissions on the device.
402 */
403 if (p->p_ucred->cr_uid != 0) {
404 accessmode = VREAD;
405 if ((mp->mnt_flag & MNT_RDONLY) == 0)
406 accessmode |= VWRITE;
407 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
408 error = VOP_ACCESS(devvp, accessmode, p->p_ucred, p);
409 if (error) {
410 vput(devvp);
411 return (error);
412 }
413 VOP_UNLOCK(devvp, 0);
414 }
415 if ((mp->mnt_flag & MNT_UPDATE) == 0)
416 error = lfs_mountfs(devvp, mp, p); /* LFS */
417 else {
418 if (devvp != ump->um_devvp)
419 error = EINVAL; /* needs translation */
420 else
421 vrele(devvp);
422 }
423 if (error) {
424 vrele(devvp);
425 return (error);
426 }
427 ump = VFSTOUFS(mp);
428 fs = ump->um_lfs; /* LFS */
429 return set_statfs_info(path, UIO_USERSPACE, args.fspec,
430 UIO_USERSPACE, mp, p);
431 }
432
433 /*
434 * Roll-forward code.
435 */
436
437 /*
438 * Load the appropriate indirect block, and change the appropriate pointer.
439 * Mark the block dirty. Do segment and avail accounting.
440 */
441 static int
442 update_meta(struct lfs *fs, ino_t ino, int version, daddr_t lbn,
443 daddr_t ndaddr, size_t size, struct proc *p)
444 {
445 int error;
446 struct vnode *vp;
447 struct inode *ip;
448 #ifdef DEBUG_LFS_RFW
449 daddr_t odaddr;
450 struct indir a[NIADDR];
451 int num;
452 int i;
453 #endif /* DEBUG_LFS_RFW */
454 struct buf *bp;
455 SEGUSE *sup;
456
457 KASSERT(lbn >= 0); /* no indirect blocks */
458
459 if ((error = lfs_rf_valloc(fs, ino, version, p, &vp)) != 0) {
460 #ifdef DEBUG_LFS_RFW
461 printf("update_meta: ino %d: lfs_rf_valloc returned %d\n", ino,
462 error);
463 #endif /* DEBUG_LFS_RFW */
464 return error;
465 }
466
467 if ((error = VOP_BALLOC(vp, (lbn << fs->lfs_bshift), size,
468 NOCRED, 0, &bp)) != 0) {
469 vput(vp);
470 return (error);
471 }
472 /* No need to write, the block is already on disk */
473 if (bp->b_flags & B_DELWRI) {
474 LFS_UNLOCK_BUF(bp);
475 fs->lfs_avail += btofsb(fs, bp->b_bcount);
476 }
477 bp->b_flags |= B_INVAL;
478 brelse(bp);
479
480 /*
481 * Extend the file, if it is not large enough already.
482 * XXX this is not exactly right, we don't know how much of the
483 * XXX last block is actually used. We hope that an inode will
484 * XXX appear later to give the correct size.
485 */
486 ip = VTOI(vp);
487 if (ip->i_size <= (lbn << fs->lfs_bshift)) {
488 u_int64_t newsize;
489
490 if (lbn < NDADDR)
491 newsize = ip->i_ffs1_size = (lbn << fs->lfs_bshift) +
492 (size - fs->lfs_fsize) + 1;
493 else
494 newsize = ip->i_ffs1_size = (lbn << fs->lfs_bshift) + 1;
495
496 if (ip->i_size < newsize) {
497 ip->i_size = newsize;
498 /*
499 * tell vm our new size for the case the inode won't
500 * appear later.
501 */
502 uvm_vnp_setsize(vp, newsize);
503 }
504 }
505
506 lfs_update_single(fs, NULL, vp, lbn, ndaddr, size);
507
508 LFS_SEGENTRY(sup, fs, dtosn(fs, ndaddr), bp);
509 sup->su_nbytes += size;
510 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, ndaddr), bp);
511
512 /* differences here should be due to UNWRITTEN indirect blocks. */
513 KASSERT((lblkno(fs, ip->i_size) > NDADDR &&
514 ip->i_lfs_effnblks == ip->i_ffs1_blocks) ||
515 ip->i_lfs_effnblks >= ip->i_ffs1_blocks);
516
517 #ifdef DEBUG_LFS_RFW
518 /* Now look again to make sure it worked */
519 ufs_bmaparray(vp, lbn, &odaddr, &a[0], &num, NULL, NULL);
520 for (i = num; i > 0; i--) {
521 if (!a[i].in_exists)
522 panic("update_meta: absent %d lv indirect block", i);
523 }
524 if (dbtofsb(fs, odaddr) != ndaddr)
525 printf("update_meta: failed setting ino %d lbn %" PRId64
526 " to %" PRId64 "\n", ino, lbn, ndaddr);
527 #endif /* DEBUG_LFS_RFW */
528 vput(vp);
529 return 0;
530 }
531
532 static int
533 update_inoblk(struct lfs *fs, daddr_t offset, struct ucred *cred,
534 struct proc *p)
535 {
536 struct vnode *devvp, *vp;
537 struct inode *ip;
538 struct ufs1_dinode *dip;
539 struct buf *dbp, *ibp;
540 int error;
541 daddr_t daddr;
542 IFILE *ifp;
543 SEGUSE *sup;
544
545 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
546
547 /*
548 * Get the inode, update times and perms.
549 * DO NOT update disk blocks, we do that separately.
550 */
551 error = bread(devvp, fsbtodb(fs, offset), fs->lfs_ibsize, cred, &dbp);
552 if (error) {
553 #ifdef DEBUG_LFS_RFW
554 printf("update_inoblk: bread returned %d\n", error);
555 #endif
556 return error;
557 }
558 dip = ((struct ufs1_dinode *)(dbp->b_data)) + INOPB(fs);
559 while (--dip >= (struct ufs1_dinode *)dbp->b_data) {
560 if (dip->di_inumber > LFS_IFILE_INUM) {
561 /* printf("ino %d version %d\n", dip->di_inumber,
562 dip->di_gen); */
563 error = lfs_rf_valloc(fs, dip->di_inumber, dip->di_gen,
564 p, &vp);
565 if (error) {
566 #ifdef DEBUG_LFS_RFW
567 printf("update_inoblk: lfs_rf_valloc returned %d\n", error);
568 #endif
569 continue;
570 }
571 ip = VTOI(vp);
572 if (dip->di_size != ip->i_size)
573 VOP_TRUNCATE(vp, dip->di_size, 0, NOCRED, p);
574 /* Get mode, link count, size, and times */
575 memcpy(ip->i_din.ffs1_din, dip,
576 offsetof(struct ufs1_dinode, di_db[0]));
577
578 /* Then the rest, except di_blocks */
579 ip->i_flags = ip->i_ffs1_flags = dip->di_flags;
580 ip->i_gen = ip->i_ffs1_gen = dip->di_gen;
581 ip->i_uid = ip->i_ffs1_uid = dip->di_uid;
582 ip->i_gid = ip->i_ffs1_gid = dip->di_gid;
583
584 ip->i_mode = ip->i_ffs1_mode;
585 ip->i_nlink = ip->i_ffs_effnlink = ip->i_ffs1_nlink;
586 ip->i_size = ip->i_ffs1_size;
587
588 LFS_SET_UINO(ip, IN_CHANGE | IN_MODIFIED | IN_UPDATE);
589
590 /* Re-initialize to get type right */
591 ufs_vinit(vp->v_mount, lfs_specop_p, lfs_fifoop_p,
592 &vp);
593 vput(vp);
594
595 /* Record change in location */
596 LFS_IENTRY(ifp, fs, dip->di_inumber, ibp);
597 daddr = ifp->if_daddr;
598 ifp->if_daddr = dbtofsb(fs, dbp->b_blkno);
599 error = LFS_BWRITE_LOG(ibp); /* Ifile */
600 /* And do segment accounting */
601 if (dtosn(fs, daddr) != dtosn(fs, dbtofsb(fs, dbp->b_blkno))) {
602 if (daddr > 0) {
603 LFS_SEGENTRY(sup, fs, dtosn(fs, daddr),
604 ibp);
605 sup->su_nbytes -= sizeof (struct ufs1_dinode);
606 LFS_WRITESEGENTRY(sup, fs,
607 dtosn(fs, daddr),
608 ibp);
609 }
610 LFS_SEGENTRY(sup, fs, dtosn(fs, dbtofsb(fs, dbp->b_blkno)),
611 ibp);
612 sup->su_nbytes += sizeof (struct ufs1_dinode);
613 LFS_WRITESEGENTRY(sup, fs,
614 dtosn(fs, dbtofsb(fs, dbp->b_blkno)),
615 ibp);
616 }
617 }
618 }
619 dbp->b_flags |= B_AGE;
620 brelse(dbp);
621
622 return 0;
623 }
624
625 #define CHECK_CKSUM 0x0001 /* Check the checksum to make sure it's valid */
626 #define CHECK_UPDATE 0x0002 /* Update Ifile for new data blocks / inodes */
627
628 static daddr_t
629 check_segsum(struct lfs *fs, daddr_t offset, u_int64_t nextserial,
630 struct ucred *cred, int flags, int *pseg_flags, struct proc *p)
631 {
632 struct vnode *devvp;
633 struct buf *bp, *dbp;
634 int error, nblocks = 0, ninos, i, j; /* XXX: gcc */
635 SEGSUM *ssp;
636 u_long *dp = NULL, *datap = NULL; /* XXX u_int32_t */
637 daddr_t oldoffset;
638 int32_t *iaddr; /* XXX ondisk32 */
639 FINFO *fip;
640 SEGUSE *sup;
641 size_t size;
642
643 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
644 /*
645 * If the segment has a superblock and we're at the top
646 * of the segment, skip the superblock.
647 */
648 if (sntod(fs, dtosn(fs, offset)) == offset) {
649 LFS_SEGENTRY(sup, fs, dtosn(fs, offset), bp);
650 if (sup->su_flags & SEGUSE_SUPERBLOCK)
651 offset += btofsb(fs, LFS_SBPAD);
652 brelse(bp);
653 }
654
655 /* Read in the segment summary */
656 error = bread(devvp, fsbtodb(fs, offset), fs->lfs_sumsize, cred, &bp);
657 if (error)
658 return -1;
659
660 /* Check summary checksum */
661 ssp = (SEGSUM *)bp->b_data;
662 if (flags & CHECK_CKSUM) {
663 if (ssp->ss_sumsum != cksum(&ssp->ss_datasum,
664 fs->lfs_sumsize -
665 sizeof(ssp->ss_sumsum))) {
666 #ifdef DEBUG_LFS_RFW
667 printf("Sumsum error at 0x%" PRIx64 "\n", offset);
668 #endif
669 offset = -1;
670 goto err1;
671 }
672 if (ssp->ss_nfinfo == 0 && ssp->ss_ninos == 0) {
673 #ifdef DEBUG_LFS_RFW
674 printf("Empty pseg at 0x%" PRIx64 "\n", offset);
675 #endif
676 offset = -1;
677 goto err1;
678 }
679 if (ssp->ss_create < fs->lfs_tstamp) {
680 #ifdef DEBUG_LFS_RFW
681 printf("Old data at 0x%" PRIx64 "\n", offset);
682 #endif
683 offset = -1;
684 goto err1;
685 }
686 }
687 if (fs->lfs_version > 1) {
688 if (ssp->ss_serial != nextserial) {
689 #ifdef DEBUG_LFS_RFW
690 printf("Unexpected serial number at 0x%" PRIx64
691 "\n", offset);
692 #endif
693 offset = -1;
694 goto err1;
695 }
696 if (ssp->ss_ident != fs->lfs_ident) {
697 #ifdef DEBUG_LFS_RFW
698 printf("Incorrect fsid (0x%x vs 0x%x) at 0x%"
699 PRIx64 "\n", ssp->ss_ident, fs->lfs_ident, offset);
700 #endif
701 offset = -1;
702 goto err1;
703 }
704 }
705 if (pseg_flags)
706 *pseg_flags = ssp->ss_flags;
707 oldoffset = offset;
708 offset += btofsb(fs, fs->lfs_sumsize);
709
710 ninos = howmany(ssp->ss_ninos, INOPB(fs));
711 /* XXX ondisk32 */
712 iaddr = (int32_t *)(bp->b_data + fs->lfs_sumsize - sizeof(int32_t));
713 if (flags & CHECK_CKSUM) {
714 /* Count blocks */
715 nblocks = 0;
716 fip = (FINFO *)(bp->b_data + SEGSUM_SIZE(fs));
717 for (i = 0; i < ssp->ss_nfinfo; ++i) {
718 nblocks += fip->fi_nblocks;
719 if (fip->fi_nblocks <= 0)
720 break;
721 /* XXX ondisk32 */
722 fip = (FINFO *)(((char *)fip) + FINFOSIZE +
723 (fip->fi_nblocks * sizeof(int32_t)));
724 }
725 nblocks += ninos;
726 /* Create the sum array */
727 datap = dp = (u_long *)malloc(nblocks * sizeof(u_long),
728 M_SEGMENT, M_WAITOK);
729 }
730
731 /* Handle individual blocks */
732 fip = (FINFO *)(bp->b_data + SEGSUM_SIZE(fs));
733 for (i = 0; i < ssp->ss_nfinfo || ninos; ++i) {
734 /* Inode block? */
735 if (ninos && *iaddr == offset) {
736 if (flags & CHECK_CKSUM) {
737 /* Read in the head and add to the buffer */
738 error = bread(devvp, fsbtodb(fs, offset), fs->lfs_bsize,
739 cred, &dbp);
740 if (error) {
741 offset = -1;
742 goto err2;
743 }
744 (*dp++) = ((u_long *)(dbp->b_data))[0];
745 dbp->b_flags |= B_AGE;
746 brelse(dbp);
747 }
748 if (flags & CHECK_UPDATE) {
749 if ((error = update_inoblk(fs, offset, cred, p))
750 != 0) {
751 offset = -1;
752 goto err2;
753 }
754 }
755 offset += btofsb(fs, fs->lfs_ibsize);
756 --iaddr;
757 --ninos;
758 --i; /* compensate */
759 continue;
760 }
761 /* printf("check: blocks from ino %d version %d\n",
762 fip->fi_ino, fip->fi_version); */
763 size = fs->lfs_bsize;
764 for (j = 0; j < fip->fi_nblocks; ++j) {
765 if (j == fip->fi_nblocks - 1)
766 size = fip->fi_lastlength;
767 if (flags & CHECK_CKSUM) {
768 error = bread(devvp, fsbtodb(fs, offset), size, cred, &dbp);
769 if (error) {
770 offset = -1;
771 goto err2;
772 }
773 (*dp++) = ((u_long *)(dbp->b_data))[0];
774 dbp->b_flags |= B_AGE;
775 brelse(dbp);
776 }
777 /* Account for and update any direct blocks */
778 if ((flags & CHECK_UPDATE) &&
779 fip->fi_ino > LFS_IFILE_INUM &&
780 fip->fi_blocks[j] >= 0) {
781 update_meta(fs, fip->fi_ino, fip->fi_version,
782 fip->fi_blocks[j], offset, size, p);
783 }
784 offset += btofsb(fs, size);
785 }
786 /* XXX ondisk32 */
787 fip = (FINFO *)(((char *)fip) + FINFOSIZE
788 + fip->fi_nblocks * sizeof(int32_t));
789 }
790 /* Checksum the array, compare */
791 if ((flags & CHECK_CKSUM) &&
792 ssp->ss_datasum != cksum(datap, nblocks * sizeof(u_long)))
793 {
794 #ifdef DEBUG_LFS_RFW
795 printf("Datasum error at 0x%" PRIx64 " (wanted %x got %x)\n",
796 offset, ssp->ss_datasum, cksum(datap, nblocks *
797 sizeof(u_long)));
798 #endif
799 offset = -1;
800 goto err2;
801 }
802
803 /* If we're at the end of the segment, move to the next */
804 if (dtosn(fs, offset + btofsb(fs, fs->lfs_sumsize + fs->lfs_bsize)) !=
805 dtosn(fs, offset)) {
806 if (dtosn(fs, offset) == dtosn(fs, ssp->ss_next)) {
807 offset = -1;
808 goto err2;
809 }
810 offset = ssp->ss_next;
811 #ifdef DEBUG_LFS_RFW
812 printf("LFS roll forward: moving on to offset 0x%" PRIx64
813 " -> segment %d\n", offset, dtosn(fs,offset));
814 #endif
815 }
816
817 if (flags & CHECK_UPDATE) {
818 fs->lfs_avail -= (offset - oldoffset);
819 /* Don't clog the buffer queue */
820 simple_lock(&lfs_subsys_lock);
821 if (locked_queue_count > LFS_MAX_BUFS ||
822 locked_queue_bytes > LFS_MAX_BYTES) {
823 lfs_flush(fs, SEGM_CKP);
824 }
825 simple_unlock(&lfs_subsys_lock);
826 }
827
828 err2:
829 if (flags & CHECK_CKSUM)
830 free(datap, M_SEGMENT);
831 err1:
832 bp->b_flags |= B_AGE;
833 brelse(bp);
834
835 /* XXX should we update the serial number even for bad psegs? */
836 if ((flags & CHECK_UPDATE) && offset > 0 && fs->lfs_version > 1)
837 fs->lfs_serial = nextserial;
838 return offset;
839 }
840
841 /*
842 * Common code for mount and mountroot
843 * LFS specific
844 */
845 int
846 lfs_mountfs(struct vnode *devvp, struct mount *mp, struct proc *p)
847 {
848 extern struct vnode *rootvp;
849 struct dlfs *tdfs, *dfs, *adfs;
850 struct lfs *fs;
851 struct ufsmount *ump;
852 struct vnode *vp;
853 struct buf *bp, *abp;
854 struct partinfo dpart;
855 dev_t dev;
856 int error, i, ronly, secsize, fsbsize;
857 struct ucred *cred;
858 CLEANERINFO *cip;
859 SEGUSE *sup;
860 int flags, dirty, do_rollforward;
861 daddr_t offset, oldoffset, lastgoodpseg, sb_addr;
862 int sn, curseg;
863
864 cred = p ? p->p_ucred : NOCRED;
865 /*
866 * Disallow multiple mounts of the same device.
867 * Disallow mounting of a device that is currently in use
868 * (except for root, which might share swap device for miniroot).
869 * Flush out any old buffers remaining from a previous use.
870 */
871 if ((error = vfs_mountedon(devvp)) != 0)
872 return (error);
873 if (vcount(devvp) > 1 && devvp != rootvp)
874 return (EBUSY);
875 if ((error = vinvalbuf(devvp, V_SAVE, cred, p, 0, 0)) != 0)
876 return (error);
877
878 ronly = (mp->mnt_flag & MNT_RDONLY) != 0;
879 error = VOP_OPEN(devvp, ronly ? FREAD : FREAD|FWRITE, FSCRED, p);
880 if (error)
881 return (error);
882 if (VOP_IOCTL(devvp, DIOCGPART, &dpart, FREAD, cred, p) != 0)
883 secsize = DEV_BSIZE;
884 else
885 secsize = dpart.disklab->d_secsize;
886
887 /* Don't free random space on error. */
888 bp = NULL;
889 abp = NULL;
890 ump = NULL;
891
892 sb_addr = LFS_LABELPAD / secsize;
893 while (1) {
894 /* Read in the superblock. */
895 error = bread(devvp, sb_addr, LFS_SBPAD, cred, &bp);
896 if (error)
897 goto out;
898 dfs = (struct dlfs *)bp->b_data;
899
900 /* Check the basics. */
901 if (dfs->dlfs_magic != LFS_MAGIC || dfs->dlfs_bsize >= MAXBSIZE ||
902 dfs->dlfs_version > LFS_VERSION ||
903 dfs->dlfs_bsize < sizeof(struct dlfs)) {
904 #ifdef DEBUG_LFS
905 printf("lfs_mountfs: primary superblock sanity failed\n");
906 #endif
907 error = EINVAL; /* XXX needs translation */
908 goto out;
909 }
910 if (dfs->dlfs_inodefmt > LFS_MAXINODEFMT)
911 printf("lfs_mountfs: warning: unknown inode format %d\n",
912 dfs->dlfs_inodefmt);
913
914 if (dfs->dlfs_version == 1)
915 fsbsize = secsize;
916 else {
917 fsbsize = 1 << (dfs->dlfs_bshift - dfs->dlfs_blktodb +
918 dfs->dlfs_fsbtodb);
919 /*
920 * Could be, if the frag size is large enough, that we
921 * don't have the "real" primary superblock. If that's
922 * the case, get the real one, and try again.
923 */
924 if (sb_addr != dfs->dlfs_sboffs[0] <<
925 dfs->dlfs_fsbtodb) {
926 /* #ifdef DEBUG_LFS */
927 printf("lfs_mountfs: sb daddr 0x%llx is not right, trying 0x%llx\n",
928 (long long)sb_addr, (long long)(dfs->dlfs_sboffs[0] <<
929 dfs->dlfs_fsbtodb));
930 /* #endif */
931 sb_addr = dfs->dlfs_sboffs[0] <<
932 dfs->dlfs_fsbtodb;
933 brelse(bp);
934 continue;
935 }
936 }
937 break;
938 }
939
940 /*
941 * Check the second superblock to see which is newer; then mount
942 * using the older of the two. This is necessary to ensure that
943 * the filesystem is valid if it was not unmounted cleanly.
944 */
945
946 if (dfs->dlfs_sboffs[1] &&
947 dfs->dlfs_sboffs[1] - LFS_LABELPAD / fsbsize > LFS_SBPAD / fsbsize)
948 {
949 error = bread(devvp, dfs->dlfs_sboffs[1] * (fsbsize / secsize),
950 LFS_SBPAD, cred, &abp);
951 if (error)
952 goto out;
953 adfs = (struct dlfs *)abp->b_data;
954
955 if (dfs->dlfs_version == 1) {
956 /* 1s resolution comparison */
957 if (adfs->dlfs_tstamp < dfs->dlfs_tstamp)
958 tdfs = adfs;
959 else
960 tdfs = dfs;
961 } else {
962 /* monotonic infinite-resolution comparison */
963 if (adfs->dlfs_serial < dfs->dlfs_serial)
964 tdfs = adfs;
965 else
966 tdfs = dfs;
967 }
968
969 /* Check the basics. */
970 if (tdfs->dlfs_magic != LFS_MAGIC ||
971 tdfs->dlfs_bsize > MAXBSIZE ||
972 tdfs->dlfs_version > LFS_VERSION ||
973 tdfs->dlfs_bsize < sizeof(struct dlfs)) {
974 #ifdef DEBUG_LFS
975 printf("lfs_mountfs: alt superblock sanity failed\n");
976 #endif
977 error = EINVAL; /* XXX needs translation */
978 goto out;
979 }
980 } else {
981 #ifdef DEBUG_LFS
982 printf("lfs_mountfs: invalid alt superblock daddr=0x%x\n",
983 dfs->dlfs_sboffs[1]);
984 #endif
985 error = EINVAL;
986 goto out;
987 }
988
989 /* Allocate the mount structure, copy the superblock into it. */
990 fs = malloc(sizeof(struct lfs), M_UFSMNT, M_WAITOK | M_ZERO);
991 memcpy(&fs->lfs_dlfs, tdfs, sizeof(struct dlfs));
992
993 /* Compatibility */
994 if (fs->lfs_version < 2) {
995 fs->lfs_sumsize = LFS_V1_SUMMARY_SIZE;
996 fs->lfs_ibsize = fs->lfs_bsize;
997 fs->lfs_start = fs->lfs_sboffs[0];
998 fs->lfs_tstamp = fs->lfs_otstamp;
999 fs->lfs_fsbtodb = 0;
1000 }
1001
1002 /* Before rolling forward, lock so vget will sleep for other procs */
1003 fs->lfs_flags = LFS_NOTYET;
1004 fs->lfs_rfpid = p->p_pid;
1005
1006 ump = malloc(sizeof *ump, M_UFSMNT, M_WAITOK | M_ZERO);
1007 ump->um_lfs = fs;
1008 ump->um_fstype = UFS1;
1009 if (sizeof(struct lfs) < LFS_SBPAD) { /* XXX why? */
1010 bp->b_flags |= B_INVAL;
1011 abp->b_flags |= B_INVAL;
1012 }
1013 brelse(bp);
1014 bp = NULL;
1015 brelse(abp);
1016 abp = NULL;
1017
1018 /* Set up the I/O information */
1019 fs->lfs_devbsize = secsize;
1020 fs->lfs_iocount = 0;
1021 fs->lfs_diropwait = 0;
1022 fs->lfs_activesb = 0;
1023 fs->lfs_uinodes = 0;
1024 fs->lfs_ravail = 0;
1025 fs->lfs_sbactive = 0;
1026
1027 /* Set up the ifile and lock aflags */
1028 fs->lfs_doifile = 0;
1029 fs->lfs_writer = 0;
1030 fs->lfs_dirops = 0;
1031 fs->lfs_nadirop = 0;
1032 fs->lfs_seglock = 0;
1033 fs->lfs_pdflush = 0;
1034 fs->lfs_sleepers = 0;
1035 simple_lock_init(&fs->lfs_interlock);
1036 lockinit(&fs->lfs_fraglock, PINOD, "lfs_fraglock", 0, 0);
1037
1038 /* Set the file system readonly/modify bits. */
1039 fs->lfs_ronly = ronly;
1040 if (ronly == 0)
1041 fs->lfs_fmod = 1;
1042
1043 /* Initialize the mount structure. */
1044 dev = devvp->v_rdev;
1045 mp->mnt_data = ump;
1046 mp->mnt_stat.f_fsid.val[0] = (long)dev;
1047 mp->mnt_stat.f_fsid.val[1] = makefstype(MOUNT_LFS);
1048 mp->mnt_stat.f_iosize = fs->lfs_bsize;
1049 mp->mnt_maxsymlinklen = fs->lfs_maxsymlinklen;
1050 mp->mnt_flag |= MNT_LOCAL;
1051 mp->mnt_fs_bshift = fs->lfs_bshift;
1052 ump->um_flags = 0;
1053 ump->um_mountp = mp;
1054 ump->um_dev = dev;
1055 ump->um_devvp = devvp;
1056 ump->um_bptrtodb = fs->lfs_fsbtodb;
1057 ump->um_seqinc = fragstofsb(fs, fs->lfs_frag);
1058 ump->um_nindir = fs->lfs_nindir;
1059 ump->um_lognindir = ffs(fs->lfs_nindir) - 1;
1060 for (i = 0; i < MAXQUOTAS; i++)
1061 ump->um_quotas[i] = NULLVP;
1062 devvp->v_specmountpoint = mp;
1063
1064 /* Set up reserved memory for pageout */
1065 lfs_setup_resblks(fs);
1066 /* Set up vdirop tailq */
1067 TAILQ_INIT(&fs->lfs_dchainhd);
1068 /* and paging tailq */
1069 TAILQ_INIT(&fs->lfs_pchainhd);
1070
1071 /*
1072 * We use the ifile vnode for almost every operation. Instead of
1073 * retrieving it from the hash table each time we retrieve it here,
1074 * artificially increment the reference count and keep a pointer
1075 * to it in the incore copy of the superblock.
1076 */
1077 if ((error = VFS_VGET(mp, LFS_IFILE_INUM, &vp)) != 0) {
1078 #ifdef DEBUG
1079 printf("lfs_mountfs: ifile vget failed, error=%d\n", error);
1080 #endif
1081 goto out;
1082 }
1083 fs->lfs_ivnode = vp;
1084 VREF(vp);
1085
1086 /* Set up segment usage flags for the autocleaner. */
1087 fs->lfs_nactive = 0;
1088 fs->lfs_suflags = (u_int32_t **)malloc(2 * sizeof(u_int32_t *),
1089 M_SEGMENT, M_WAITOK);
1090 fs->lfs_suflags[0] = (u_int32_t *)malloc(fs->lfs_nseg * sizeof(u_int32_t),
1091 M_SEGMENT, M_WAITOK);
1092 fs->lfs_suflags[1] = (u_int32_t *)malloc(fs->lfs_nseg * sizeof(u_int32_t),
1093 M_SEGMENT, M_WAITOK);
1094 memset(fs->lfs_suflags[1], 0, fs->lfs_nseg * sizeof(u_int32_t));
1095 for (i = 0; i < fs->lfs_nseg; i++) {
1096 int changed;
1097
1098 LFS_SEGENTRY(sup, fs, i, bp);
1099 changed = 0;
1100 if (!ronly) {
1101 if (sup->su_nbytes == 0 &&
1102 !(sup->su_flags & SEGUSE_EMPTY)) {
1103 sup->su_flags |= SEGUSE_EMPTY;
1104 ++changed;
1105 } else if (!(sup->su_nbytes == 0) &&
1106 (sup->su_flags & SEGUSE_EMPTY)) {
1107 sup->su_flags &= ~SEGUSE_EMPTY;
1108 ++changed;
1109 }
1110 if (sup->su_flags & SEGUSE_ACTIVE) {
1111 sup->su_flags &= ~SEGUSE_ACTIVE;
1112 ++changed;
1113 }
1114 }
1115 fs->lfs_suflags[0][i] = sup->su_flags;
1116 if (changed)
1117 LFS_WRITESEGENTRY(sup, fs, i, bp);
1118 else
1119 brelse(bp);
1120 }
1121
1122 /*
1123 * Roll forward.
1124 *
1125 * We don't automatically roll forward for v1 filesystems, because
1126 * of the danger that the clock was turned back between the last
1127 * checkpoint and crash. This would roll forward garbage.
1128 *
1129 * v2 filesystems don't have this problem because they use a
1130 * monotonically increasing serial number instead of a timestamp.
1131 */
1132 #ifdef LFS_DO_ROLLFORWARD
1133 do_rollforward = !fs->lfs_ronly;
1134 #else
1135 do_rollforward = (fs->lfs_version > 1 && !fs->lfs_ronly &&
1136 !(fs->lfs_pflags & LFS_PF_CLEAN));
1137 #endif
1138 if (do_rollforward) {
1139 u_int64_t nextserial;
1140 /*
1141 * Phase I: Find the address of the last good partial
1142 * segment that was written after the checkpoint. Mark
1143 * the segments in question dirty, so they won't be
1144 * reallocated.
1145 */
1146 lastgoodpseg = oldoffset = offset = fs->lfs_offset;
1147 flags = 0x0;
1148 #ifdef DEBUG_LFS_RFW
1149 printf("LFS roll forward phase 1: starting at offset 0x%"
1150 PRIx64 "\n", offset);
1151 #endif
1152 LFS_SEGENTRY(sup, fs, dtosn(fs, offset), bp);
1153 if (!(sup->su_flags & SEGUSE_DIRTY))
1154 --fs->lfs_nclean;
1155 sup->su_flags |= SEGUSE_DIRTY;
1156 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, offset), bp);
1157 nextserial = fs->lfs_serial + 1;
1158 while ((offset = check_segsum(fs, offset, nextserial,
1159 cred, CHECK_CKSUM, &flags, p)) > 0) {
1160 nextserial++;
1161 if (sntod(fs, oldoffset) != sntod(fs, offset)) {
1162 LFS_SEGENTRY(sup, fs, dtosn(fs, oldoffset),
1163 bp);
1164 if (!(sup->su_flags & SEGUSE_DIRTY))
1165 --fs->lfs_nclean;
1166 sup->su_flags |= SEGUSE_DIRTY;
1167 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, oldoffset),
1168 bp);
1169 }
1170
1171 #ifdef DEBUG_LFS_RFW
1172 printf("LFS roll forward phase 1: offset=0x%"
1173 PRIx64 "\n", offset);
1174 if (flags & SS_DIROP) {
1175 printf("lfs_mountfs: dirops at 0x%" PRIx64 "\n",
1176 oldoffset);
1177 if (!(flags & SS_CONT))
1178 printf("lfs_mountfs: dirops end "
1179 "at 0x%" PRIx64 "\n", oldoffset);
1180 }
1181 #endif
1182 if (!(flags & SS_CONT))
1183 lastgoodpseg = offset;
1184 oldoffset = offset;
1185 }
1186 #ifdef DEBUG_LFS_RFW
1187 if (flags & SS_CONT) {
1188 printf("LFS roll forward: warning: incomplete "
1189 "dirops discarded\n");
1190 }
1191 printf("LFS roll forward phase 1: completed: "
1192 "lastgoodpseg=0x%" PRIx64 "\n", lastgoodpseg);
1193 #endif
1194 oldoffset = fs->lfs_offset;
1195 if (fs->lfs_offset != lastgoodpseg) {
1196 /* Don't overwrite what we're trying to preserve */
1197 offset = fs->lfs_offset;
1198 fs->lfs_offset = lastgoodpseg;
1199 fs->lfs_curseg = sntod(fs, dtosn(fs, fs->lfs_offset));
1200 for (sn = curseg = dtosn(fs, fs->lfs_curseg);;) {
1201 sn = (sn + 1) % fs->lfs_nseg;
1202 if (sn == curseg)
1203 panic("lfs_mountfs: no clean segments");
1204 LFS_SEGENTRY(sup, fs, sn, bp);
1205 dirty = (sup->su_flags & SEGUSE_DIRTY);
1206 brelse(bp);
1207 if (!dirty)
1208 break;
1209 }
1210 fs->lfs_nextseg = sntod(fs, sn);
1211
1212 /*
1213 * Phase II: Roll forward from the first superblock.
1214 */
1215 while (offset != lastgoodpseg) {
1216 #ifdef DEBUG_LFS_RFW
1217 printf("LFS roll forward phase 2: 0x%"
1218 PRIx64 "\n", offset);
1219 #endif
1220 offset = check_segsum(fs, offset,
1221 fs->lfs_serial + 1, cred, CHECK_UPDATE,
1222 NULL, p);
1223 }
1224
1225 /*
1226 * Finish: flush our changes to disk.
1227 */
1228 lfs_segwrite(mp, SEGM_CKP | SEGM_SYNC);
1229 printf("lfs_mountfs: roll forward recovered %lld blocks\n",
1230 (long long)(lastgoodpseg - oldoffset));
1231 }
1232 #ifdef DEBUG_LFS_RFW
1233 printf("LFS roll forward complete\n");
1234 #endif
1235 }
1236 /* If writing, sb is not clean; record in case of immediate crash */
1237 if (!fs->lfs_ronly) {
1238 fs->lfs_pflags &= ~LFS_PF_CLEAN;
1239 lfs_writesuper(fs, fs->lfs_sboffs[0]);
1240 lfs_writesuper(fs, fs->lfs_sboffs[1]);
1241 }
1242
1243 /* Allow vget now that roll-forward is complete */
1244 fs->lfs_flags &= ~(LFS_NOTYET);
1245 wakeup(&fs->lfs_flags);
1246
1247 /*
1248 * Initialize the ifile cleaner info with information from
1249 * the superblock.
1250 */
1251 LFS_CLEANERINFO(cip, fs, bp);
1252 cip->clean = fs->lfs_nclean;
1253 cip->dirty = fs->lfs_nseg - fs->lfs_nclean;
1254 cip->avail = fs->lfs_avail;
1255 cip->bfree = fs->lfs_bfree;
1256 (void) LFS_BWRITE_LOG(bp); /* Ifile */
1257
1258 /*
1259 * Mark the current segment as ACTIVE, since we're going to
1260 * be writing to it.
1261 */
1262 LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_offset), bp);
1263 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1264 fs->lfs_nactive++;
1265 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, fs->lfs_offset), bp); /* Ifile */
1266
1267 /* Now that roll-forward is done, unlock the Ifile */
1268 vput(vp);
1269
1270 /* Comment on ifile size if it is too large */
1271 if (fs->lfs_ivnode->v_size / fs->lfs_bsize > LFS_MAX_BUFS) {
1272 fs->lfs_flags |= LFS_WARNED;
1273 printf("lfs_mountfs: please consider increasing NBUF to at least %lld\n",
1274 (long long)(fs->lfs_ivnode->v_size / fs->lfs_bsize) * (nbuf / LFS_MAX_BUFS));
1275 }
1276 if (fs->lfs_ivnode->v_size > LFS_MAX_BYTES) {
1277 fs->lfs_flags |= LFS_WARNED;
1278 printf("lfs_mountfs: please consider increasing BUFPAGES to at least %lld\n",
1279 (long long)(fs->lfs_ivnode->v_size * bufpages / LFS_MAX_BYTES));
1280 }
1281
1282 return (0);
1283 out:
1284 if (bp)
1285 brelse(bp);
1286 if (abp)
1287 brelse(abp);
1288 vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1289 (void)VOP_CLOSE(devvp, ronly ? FREAD : FREAD|FWRITE, cred, p);
1290 VOP_UNLOCK(devvp, 0);
1291 if (ump) {
1292 free(ump->um_lfs, M_UFSMNT);
1293 free(ump, M_UFSMNT);
1294 mp->mnt_data = NULL;
1295 }
1296
1297 /* Start the pagedaemon-anticipating daemon */
1298 if (lfs_writer_daemon == 0 &&
1299 kthread_create1(lfs_writerd, NULL, NULL, "lfs_writer") != 0)
1300 panic("fork lfs_writer");
1301
1302 return (error);
1303 }
1304
1305 /*
1306 * unmount system call
1307 */
1308 int
1309 lfs_unmount(struct mount *mp, int mntflags, struct proc *p)
1310 {
1311 struct ufsmount *ump;
1312 struct lfs *fs;
1313 int error, flags, ronly;
1314 int s;
1315
1316 flags = 0;
1317 if (mntflags & MNT_FORCE)
1318 flags |= FORCECLOSE;
1319
1320 ump = VFSTOUFS(mp);
1321 fs = ump->um_lfs;
1322
1323 /* wake up the cleaner so it can die */
1324 wakeup(&fs->lfs_nextseg);
1325 wakeup(&lfs_allclean_wakeup);
1326 simple_lock(&fs->lfs_interlock);
1327 while (fs->lfs_sleepers)
1328 ltsleep(&fs->lfs_sleepers, PRIBIO + 1, "lfs_sleepers", 0,
1329 &fs->lfs_interlock);
1330 simple_unlock(&fs->lfs_interlock);
1331
1332 #ifdef QUOTA
1333 if (mp->mnt_flag & MNT_QUOTA) {
1334 int i;
1335 error = vflush(mp, fs->lfs_ivnode, SKIPSYSTEM|flags);
1336 if (error)
1337 return (error);
1338 for (i = 0; i < MAXQUOTAS; i++) {
1339 if (ump->um_quotas[i] == NULLVP)
1340 continue;
1341 quotaoff(p, mp, i);
1342 }
1343 /*
1344 * Here we fall through to vflush again to ensure
1345 * that we have gotten rid of all the system vnodes.
1346 */
1347 }
1348 #endif
1349 if ((error = vflush(mp, fs->lfs_ivnode, flags)) != 0)
1350 return (error);
1351 if ((error = VFS_SYNC(mp, 1, p->p_ucred, p)) != 0)
1352 return (error);
1353 s = splbio();
1354 if (LIST_FIRST(&fs->lfs_ivnode->v_dirtyblkhd))
1355 panic("lfs_unmount: still dirty blocks on ifile vnode");
1356 splx(s);
1357
1358 /* Comment on ifile size if it has become too large */
1359 if (!(fs->lfs_flags & LFS_WARNED)) {
1360 if (fs->lfs_ivnode->v_size / fs->lfs_bsize > LFS_MAX_BUFS)
1361 printf("lfs_unmount: please consider increasing"
1362 " NBUF to at least %lld\n",
1363 (long long)(fs->lfs_ivnode->v_size /
1364 fs->lfs_bsize) *
1365 (long long)(nbuf / LFS_MAX_BUFS));
1366 if (fs->lfs_ivnode->v_size > LFS_MAX_BYTES)
1367 printf("lfs_unmount: please consider increasing"
1368 " BUFPAGES to at least %lld\n",
1369 (long long)(fs->lfs_ivnode->v_size *
1370 bufpages / LFS_MAX_BYTES));
1371 }
1372
1373 /* Explicitly write the superblock, to update serial and pflags */
1374 fs->lfs_pflags |= LFS_PF_CLEAN;
1375 lfs_writesuper(fs, fs->lfs_sboffs[0]);
1376 lfs_writesuper(fs, fs->lfs_sboffs[1]);
1377 while (fs->lfs_iocount)
1378 tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs_umount", 0);
1379
1380 /* Finish with the Ifile, now that we're done with it */
1381 vrele(fs->lfs_ivnode);
1382 vgone(fs->lfs_ivnode);
1383
1384 ronly = !fs->lfs_ronly;
1385 if (ump->um_devvp->v_type != VBAD)
1386 ump->um_devvp->v_specmountpoint = NULL;
1387 vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY);
1388 error = VOP_CLOSE(ump->um_devvp,
1389 ronly ? FREAD : FREAD|FWRITE, NOCRED, p);
1390 vput(ump->um_devvp);
1391
1392 /* Free per-mount data structures */
1393 free(fs->lfs_suflags[0], M_SEGMENT);
1394 free(fs->lfs_suflags[1], M_SEGMENT);
1395 free(fs->lfs_suflags, M_SEGMENT);
1396 lfs_free_resblks(fs);
1397 free(fs, M_UFSMNT);
1398 free(ump, M_UFSMNT);
1399
1400 mp->mnt_data = NULL;
1401 mp->mnt_flag &= ~MNT_LOCAL;
1402 return (error);
1403 }
1404
1405 /*
1406 * Get file system statistics.
1407 */
1408 int
1409 lfs_statfs(struct mount *mp, struct statfs *sbp, struct proc *p)
1410 {
1411 struct lfs *fs;
1412 struct ufsmount *ump;
1413
1414 ump = VFSTOUFS(mp);
1415 fs = ump->um_lfs;
1416 if (fs->lfs_magic != LFS_MAGIC)
1417 panic("lfs_statfs: magic");
1418
1419 sbp->f_type = 0;
1420 sbp->f_bsize = fs->lfs_fsize;
1421 sbp->f_iosize = fs->lfs_bsize;
1422 sbp->f_blocks = fsbtofrags(fs, LFS_EST_NONMETA(fs));
1423 sbp->f_bfree = fsbtofrags(fs, LFS_EST_BFREE(fs));
1424 sbp->f_bavail = fsbtofrags(fs, (long)LFS_EST_BFREE(fs) -
1425 (long)LFS_EST_RSVD(fs));
1426
1427 sbp->f_files = fs->lfs_bfree / btofsb(fs, fs->lfs_ibsize) * INOPB(fs);
1428 sbp->f_ffree = sbp->f_files - fs->lfs_nfiles;
1429 copy_statfs_info(sbp, mp);
1430 return (0);
1431 }
1432
1433 /*
1434 * Go through the disk queues to initiate sandbagged IO;
1435 * go through the inodes to write those that have been modified;
1436 * initiate the writing of the super block if it has been modified.
1437 *
1438 * Note: we are always called with the filesystem marked `MPBUSY'.
1439 */
1440 int
1441 lfs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct proc *p)
1442 {
1443 int error;
1444 struct lfs *fs;
1445
1446 fs = VFSTOUFS(mp)->um_lfs;
1447 if (fs->lfs_ronly)
1448 return 0;
1449 lfs_writer_enter(fs, "lfs_dirops");
1450
1451 /* All syncs must be checkpoints until roll-forward is implemented. */
1452 error = lfs_segwrite(mp, SEGM_CKP | (waitfor ? SEGM_SYNC : 0));
1453 lfs_writer_leave(fs);
1454 #ifdef QUOTA
1455 qsync(mp);
1456 #endif
1457 return (error);
1458 }
1459
1460 extern struct lock ufs_hashlock;
1461
1462 /*
1463 * Look up an LFS dinode number to find its incore vnode. If not already
1464 * in core, read it in from the specified device. Return the inode locked.
1465 * Detection and handling of mount points must be done by the calling routine.
1466 */
1467 int
1468 lfs_vget(struct mount *mp, ino_t ino, struct vnode **vpp)
1469 {
1470 struct lfs *fs;
1471 struct ufs1_dinode *dip;
1472 struct inode *ip;
1473 struct buf *bp;
1474 struct ifile *ifp;
1475 struct vnode *vp;
1476 struct ufsmount *ump;
1477 daddr_t daddr;
1478 dev_t dev;
1479 int error, retries;
1480 struct timespec ts;
1481
1482 ump = VFSTOUFS(mp);
1483 dev = ump->um_dev;
1484 fs = ump->um_lfs;
1485
1486 /*
1487 * If the filesystem is not completely mounted yet, suspend
1488 * any access requests (wait for roll-forward to complete).
1489 */
1490 while ((fs->lfs_flags & LFS_NOTYET) && curproc->p_pid != fs->lfs_rfpid)
1491 tsleep(&fs->lfs_flags, PRIBIO+1, "lfs_notyet", 0);
1492
1493 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL)
1494 return (0);
1495
1496 if ((error = getnewvnode(VT_LFS, mp, lfs_vnodeop_p, &vp)) != 0) {
1497 *vpp = NULL;
1498 return (error);
1499 }
1500
1501 do {
1502 if ((*vpp = ufs_ihashget(dev, ino, LK_EXCLUSIVE)) != NULL) {
1503 ungetnewvnode(vp);
1504 return (0);
1505 }
1506 } while (lockmgr(&ufs_hashlock, LK_EXCLUSIVE|LK_SLEEPFAIL, 0));
1507
1508 /* Translate the inode number to a disk address. */
1509 if (ino == LFS_IFILE_INUM)
1510 daddr = fs->lfs_idaddr;
1511 else {
1512 /* XXX bounds-check this too */
1513 LFS_IENTRY(ifp, fs, ino, bp);
1514 daddr = ifp->if_daddr;
1515 if (fs->lfs_version > 1) {
1516 ts.tv_sec = ifp->if_atime_sec;
1517 ts.tv_nsec = ifp->if_atime_nsec;
1518 }
1519
1520 brelse(bp);
1521 if (daddr == LFS_UNUSED_DADDR) {
1522 *vpp = NULLVP;
1523 ungetnewvnode(vp);
1524 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1525 return (ENOENT);
1526 }
1527 }
1528
1529 /* Allocate/init new vnode/inode. */
1530 lfs_vcreate(mp, ino, vp);
1531
1532 /*
1533 * Put it onto its hash chain and lock it so that other requests for
1534 * this inode will block if they arrive while we are sleeping waiting
1535 * for old data structures to be purged or for the contents of the
1536 * disk portion of this inode to be read.
1537 */
1538 ip = VTOI(vp);
1539 ufs_ihashins(ip);
1540 lockmgr(&ufs_hashlock, LK_RELEASE, 0);
1541
1542 /*
1543 * XXX
1544 * This may not need to be here, logically it should go down with
1545 * the i_devvp initialization.
1546 * Ask Kirk.
1547 */
1548 ip->i_lfs = ump->um_lfs;
1549
1550 /* Read in the disk contents for the inode, copy into the inode. */
1551 retries = 0;
1552 again:
1553 error = bread(ump->um_devvp, fsbtodb(fs, daddr),
1554 (fs->lfs_version == 1 ? fs->lfs_bsize : fs->lfs_ibsize),
1555 NOCRED, &bp);
1556 if (error) {
1557 /*
1558 * The inode does not contain anything useful, so it would
1559 * be misleading to leave it on its hash chain. With mode
1560 * still zero, it will be unlinked and returned to the free
1561 * list by vput().
1562 */
1563 vput(vp);
1564 brelse(bp);
1565 *vpp = NULL;
1566 return (error);
1567 }
1568
1569 dip = lfs_ifind(fs, ino, bp);
1570 if (dip == NULL) {
1571 /* Assume write has not completed yet; try again */
1572 bp->b_flags |= B_INVAL;
1573 brelse(bp);
1574 ++retries;
1575 if (retries > LFS_IFIND_RETRIES) {
1576 #ifdef DEBUG
1577 /* If the seglock is held look at the bpp to see
1578 what is there anyway */
1579 if (fs->lfs_seglock > 0) {
1580 struct buf **bpp;
1581 struct ufs1_dinode *dp;
1582 int i;
1583
1584 for (bpp = fs->lfs_sp->bpp;
1585 bpp != fs->lfs_sp->cbpp; ++bpp) {
1586 if ((*bpp)->b_vp == fs->lfs_ivnode &&
1587 bpp != fs->lfs_sp->bpp) {
1588 /* Inode block */
1589 printf("block 0x%" PRIx64 ": ",
1590 (*bpp)->b_blkno);
1591 dp = (struct ufs1_dinode *)(*bpp)->b_data;
1592 for (i = 0; i < INOPB(fs); i++)
1593 if (dp[i].di_u.inumber)
1594 printf("%d ", dp[i].di_u.inumber);
1595 printf("\n");
1596 }
1597 }
1598 }
1599 #endif
1600 panic("lfs_vget: dinode not found");
1601 }
1602 printf("lfs_vget: dinode %d not found, retrying...\n", ino);
1603 (void)tsleep(&fs->lfs_iocount, PRIBIO + 1, "lfs ifind", 1);
1604 goto again;
1605 }
1606 *ip->i_din.ffs1_din = *dip;
1607 brelse(bp);
1608
1609 if (fs->lfs_version > 1) {
1610 ip->i_ffs1_atime = ts.tv_sec;
1611 ip->i_ffs1_atimensec = ts.tv_nsec;
1612 }
1613
1614 lfs_vinit(mp, &vp);
1615
1616 *vpp = vp;
1617
1618 KASSERT(VOP_ISLOCKED(vp));
1619
1620 return (0);
1621 }
1622
1623 /*
1624 * File handle to vnode
1625 */
1626 int
1627 lfs_fhtovp(struct mount *mp, struct fid *fhp, struct vnode **vpp)
1628 {
1629 struct lfid *lfhp;
1630 struct buf *bp;
1631 IFILE *ifp;
1632 int32_t daddr;
1633 struct lfs *fs;
1634
1635 lfhp = (struct lfid *)fhp;
1636 if (lfhp->lfid_ino < LFS_IFILE_INUM)
1637 return ESTALE;
1638
1639 fs = VFSTOUFS(mp)->um_lfs;
1640 if (lfhp->lfid_ident != fs->lfs_ident)
1641 return ESTALE;
1642
1643 if (lfhp->lfid_ino >
1644 ((VTOI(fs->lfs_ivnode)->i_ffs1_size >> fs->lfs_bshift) -
1645 fs->lfs_cleansz - fs->lfs_segtabsz) * fs->lfs_ifpb)
1646 return ESTALE;
1647
1648 if (ufs_ihashlookup(VFSTOUFS(mp)->um_dev, lfhp->lfid_ino) == NULLVP) {
1649 LFS_IENTRY(ifp, fs, lfhp->lfid_ino, bp);
1650 daddr = ifp->if_daddr;
1651 brelse(bp);
1652 if (daddr == LFS_UNUSED_DADDR)
1653 return ESTALE;
1654 }
1655
1656 return (ufs_fhtovp(mp, &lfhp->lfid_ufid, vpp));
1657 }
1658
1659 /*
1660 * Vnode pointer to File handle
1661 */
1662 /* ARGSUSED */
1663 int
1664 lfs_vptofh(struct vnode *vp, struct fid *fhp)
1665 {
1666 struct inode *ip;
1667 struct lfid *lfhp;
1668
1669 ip = VTOI(vp);
1670 lfhp = (struct lfid *)fhp;
1671 lfhp->lfid_len = sizeof(struct lfid);
1672 lfhp->lfid_ino = ip->i_number;
1673 lfhp->lfid_gen = ip->i_gen;
1674 lfhp->lfid_ident = ip->i_lfs->lfs_ident;
1675 return (0);
1676 }
1677
1678 static int
1679 sysctl_lfs_dostats(SYSCTLFN_ARGS)
1680 {
1681 extern struct lfs_stats lfs_stats;
1682 extern int lfs_dostats;
1683 int error;
1684
1685 error = sysctl_lookup(SYSCTLFN_CALL(rnode));
1686 if (error || newp == NULL)
1687 return (error);
1688
1689 if (lfs_dostats == 0)
1690 memset(&lfs_stats,0,sizeof(lfs_stats));
1691
1692 return (0);
1693 }
1694
1695 SYSCTL_SETUP(sysctl_vfs_lfs_setup, "sysctl vfs.lfs setup")
1696 {
1697 extern int lfs_writeindir, lfs_dostats, lfs_clean_vnhead;
1698
1699 sysctl_createv(SYSCTL_PERMANENT,
1700 CTLTYPE_NODE, "vfs", NULL,
1701 NULL, 0, NULL, 0,
1702 CTL_VFS, CTL_EOL);
1703 sysctl_createv(SYSCTL_PERMANENT,
1704 CTLTYPE_NODE, "lfs", NULL,
1705 NULL, 0, NULL, 0,
1706 CTL_VFS, 5, CTL_EOL);
1707 /*
1708 * XXX the "5" above could be dynamic, thereby eliminating one
1709 * more instance of the "number to vfs" mapping problem, but
1710 * "2" is the order as taken from sys/mount.h
1711 */
1712
1713 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1714 CTLTYPE_INT, "flushindir", NULL,
1715 NULL, 0, &lfs_writeindir, 0,
1716 CTL_VFS, 5, LFS_WRITEINDIR, CTL_EOL);
1717 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1718 CTLTYPE_INT, "clean_vnhead", NULL,
1719 NULL, 0, &lfs_clean_vnhead, 0,
1720 CTL_VFS, 5, LFS_CLEAN_VNHEAD, CTL_EOL);
1721 sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
1722 CTLTYPE_INT, "dostats", NULL,
1723 sysctl_lfs_dostats, 0, &lfs_dostats, 0,
1724 CTL_VFS, 5, LFS_DOSTATS, CTL_EOL);
1725 }
1726
1727 /*
1728 * ufs_bmaparray callback function for writing.
1729 *
1730 * Since blocks will be written to the new segment anyway,
1731 * we don't care about current daddr of them.
1732 */
1733 static boolean_t
1734 lfs_issequential_hole(const struct ufsmount *ump,
1735 daddr_t daddr0, daddr_t daddr1)
1736 {
1737
1738 KASSERT(daddr0 == UNWRITTEN ||
1739 (0 <= daddr0 && daddr0 <= LFS_MAX_DADDR));
1740 KASSERT(daddr1 == UNWRITTEN ||
1741 (0 <= daddr1 && daddr1 <= LFS_MAX_DADDR));
1742
1743 /* NOTE: all we want to know here is 'hole or not'. */
1744 /* NOTE: UNASSIGNED is converted to 0 by ufs_bmaparray. */
1745
1746 /*
1747 * treat UNWRITTENs and all resident blocks as 'contiguous'
1748 */
1749 if (daddr0 != 0 && daddr1 != 0)
1750 return TRUE;
1751
1752 /*
1753 * both are in hole?
1754 */
1755 if (daddr0 == 0 && daddr1 == 0)
1756 return TRUE; /* all holes are 'contiguous' for us. */
1757
1758 return FALSE;
1759 }
1760
1761 /*
1762 * lfs_gop_write functions exactly like genfs_gop_write, except that
1763 * (1) it requires the seglock to be held by its caller, and sp->fip
1764 * to be properly initialized (it will return without re-initializing
1765 * sp->fip, and without calling lfs_writeseg).
1766 * (2) it uses the remaining space in the segment, rather than VOP_BMAP,
1767 * to determine how large a block it can write at once (though it does
1768 * still use VOP_BMAP to find holes in the file);
1769 * (3) it calls lfs_gatherblock instead of VOP_STRATEGY on its blocks
1770 * (leaving lfs_writeseg to deal with the cluster blocks, so we might
1771 * now have clusters of clusters, ick.)
1772 */
1773 static int
1774 lfs_gop_write(struct vnode *vp, struct vm_page **pgs, int npages, int flags)
1775 {
1776 int i, s, error, run;
1777 int fs_bshift;
1778 vaddr_t kva;
1779 off_t eof, offset, startoffset;
1780 size_t bytes, iobytes, skipbytes;
1781 daddr_t lbn, blkno;
1782 struct vm_page *pg;
1783 struct buf *mbp, *bp;
1784 struct vnode *devvp = VTOI(vp)->i_devvp;
1785 struct inode *ip = VTOI(vp);
1786 struct lfs *fs = ip->i_lfs;
1787 struct segment *sp = fs->lfs_sp;
1788 UVMHIST_FUNC("lfs_gop_write"); UVMHIST_CALLED(ubchist);
1789
1790 /* The Ifile lives in the buffer cache */
1791 if (vp == fs->lfs_ivnode)
1792 return genfs_compat_gop_write(vp, pgs, npages, flags);
1793
1794 /*
1795 * Sometimes things slip past the filters in lfs_putpages,
1796 * and the pagedaemon tries to write pages---problem is
1797 * that the pagedaemon never acquires the segment lock.
1798 *
1799 * Unbusy and unclean the pages, and put them on the ACTIVE
1800 * queue under the hypothesis that they couldn't have got here
1801 * unless they were modified *quite* recently.
1802 *
1803 * XXXUBC that last statement is an oversimplification of course.
1804 */
1805 if (!(fs->lfs_seglock) || fs->lfs_lockpid != curproc->p_pid) {
1806 simple_lock(&vp->v_interlock);
1807 #ifdef DEBUG
1808 printf("lfs_gop_write: seglock not held\n");
1809 #endif
1810 uvm_lock_pageq();
1811 for (i = 0; i < npages; i++) {
1812 pg = pgs[i];
1813
1814 if (pg->flags & PG_PAGEOUT)
1815 uvmexp.paging--;
1816 if (pg->flags & PG_DELWRI) {
1817 uvm_pageunwire(pg);
1818 }
1819 uvm_pageactivate(pg);
1820 pg->flags &= ~(PG_CLEAN|PG_DELWRI|PG_PAGEOUT|PG_RELEASED);
1821 #ifdef DEBUG_LFS
1822 printf("pg[%d]->flags = %x\n", i, pg->flags);
1823 printf("pg[%d]->pqflags = %x\n", i, pg->pqflags);
1824 printf("pg[%d]->uanon = %p\n", i, pg->uanon);
1825 printf("pg[%d]->uobject = %p\n", i, pg->uobject);
1826 printf("pg[%d]->wire_count = %d\n", i, pg->wire_count);
1827 printf("pg[%d]->loan_count = %d\n", i, pg->loan_count);
1828 #endif
1829 }
1830 /* uvm_pageunbusy takes care of PG_BUSY, PG_WANTED */
1831 uvm_page_unbusy(pgs, npages);
1832 uvm_unlock_pageq();
1833 simple_unlock(&vp->v_interlock);
1834 return EAGAIN;
1835 }
1836
1837 UVMHIST_LOG(ubchist, "vp %p pgs %p npages %d flags 0x%x",
1838 vp, pgs, npages, flags);
1839
1840 GOP_SIZE(vp, vp->v_size, &eof, GOP_SIZE_WRITE);
1841
1842 if (vp->v_type == VREG)
1843 fs_bshift = vp->v_mount->mnt_fs_bshift;
1844 else
1845 fs_bshift = DEV_BSHIFT;
1846 error = 0;
1847 pg = pgs[0];
1848 startoffset = pg->offset;
1849 bytes = MIN(npages << PAGE_SHIFT, eof - startoffset);
1850 skipbytes = 0;
1851
1852 /* KASSERT(bytes != 0); */
1853 if (bytes == 0)
1854 printf("ino %d bytes == 0 offset %" PRId64 "\n",
1855 VTOI(vp)->i_number, pgs[0]->offset);
1856
1857 /* Swap PG_DELWRI for PG_PAGEOUT */
1858 for (i = 0; i < npages; i++)
1859 if (pgs[i]->flags & PG_DELWRI) {
1860 KASSERT(!(pgs[i]->flags & PG_PAGEOUT));
1861 pgs[i]->flags &= ~PG_DELWRI;
1862 pgs[i]->flags |= PG_PAGEOUT;
1863 uvmexp.paging++;
1864 uvm_lock_pageq();
1865 uvm_pageunwire(pgs[i]);
1866 uvm_unlock_pageq();
1867 }
1868
1869 /*
1870 * Check to make sure we're starting on a block boundary.
1871 * We'll check later to make sure we always write entire
1872 * blocks (or fragments).
1873 */
1874 if (startoffset & fs->lfs_bmask)
1875 printf("%" PRId64 " & %" PRId64 " = %" PRId64 "\n",
1876 startoffset, fs->lfs_bmask,
1877 startoffset & fs->lfs_bmask);
1878 KASSERT((startoffset & fs->lfs_bmask) == 0);
1879 if (bytes & fs->lfs_ffmask) {
1880 printf("lfs_gop_write: asked to write %ld bytes\n", (long)bytes);
1881 panic("lfs_gop_write: non-integer blocks");
1882 }
1883
1884 kva = uvm_pagermapin(pgs, npages,
1885 UVMPAGER_MAPIN_WRITE | UVMPAGER_MAPIN_WAITOK);
1886
1887 s = splbio();
1888 simple_lock(&global_v_numoutput_slock);
1889 vp->v_numoutput += 2; /* one for biodone, one for aiodone */
1890 simple_unlock(&global_v_numoutput_slock);
1891 mbp = pool_get(&bufpool, PR_WAITOK);
1892 splx(s);
1893
1894 memset(mbp, 0, sizeof(*bp));
1895 BUF_INIT(mbp);
1896 UVMHIST_LOG(ubchist, "vp %p mbp %p num now %d bytes 0x%x",
1897 vp, mbp, vp->v_numoutput, bytes);
1898 mbp->b_bufsize = npages << PAGE_SHIFT;
1899 mbp->b_data = (void *)kva;
1900 mbp->b_resid = mbp->b_bcount = bytes;
1901 mbp->b_flags = B_BUSY|B_WRITE|B_AGE|B_CALL;
1902 mbp->b_iodone = uvm_aio_biodone;
1903 mbp->b_vp = vp;
1904
1905 bp = NULL;
1906 for (offset = startoffset;
1907 bytes > 0;
1908 offset += iobytes, bytes -= iobytes) {
1909 lbn = offset >> fs_bshift;
1910 error = ufs_bmaparray(vp, lbn, &blkno, NULL, NULL, &run,
1911 lfs_issequential_hole);
1912 if (error) {
1913 UVMHIST_LOG(ubchist, "ufs_bmaparray() -> %d",
1914 error,0,0,0);
1915 skipbytes += bytes;
1916 bytes = 0;
1917 break;
1918 }
1919
1920 iobytes = MIN((((off_t)lbn + 1 + run) << fs_bshift) - offset,
1921 bytes);
1922 if (blkno == (daddr_t)-1) {
1923 skipbytes += iobytes;
1924 continue;
1925 }
1926
1927 /*
1928 * Discover how much we can really pack into this buffer.
1929 */
1930 /* If no room in the current segment, finish it up */
1931 if (sp->sum_bytes_left < sizeof(int32_t) ||
1932 sp->seg_bytes_left < (1 << fs->lfs_bshift)) {
1933 int version;
1934
1935 lfs_updatemeta(sp);
1936
1937 version = sp->fip->fi_version;
1938 (void) lfs_writeseg(fs, sp);
1939
1940 sp->fip->fi_version = version;
1941 sp->fip->fi_ino = ip->i_number;
1942 /* Add the current file to the segment summary. */
1943 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1944 sp->sum_bytes_left -= FINFOSIZE;
1945 }
1946 /* Check both for space in segment and space in segsum */
1947 iobytes = MIN(iobytes, (sp->seg_bytes_left >> fs_bshift)
1948 << fs_bshift);
1949 iobytes = MIN(iobytes, (sp->sum_bytes_left / sizeof(int32_t))
1950 << fs_bshift);
1951 KASSERT(iobytes > 0);
1952
1953 /* if it's really one i/o, don't make a second buf */
1954 if (offset == startoffset && iobytes == bytes) {
1955 bp = mbp;
1956 /* printf("bp is mbp\n"); */
1957 /* correct overcount if there is no second buffer */
1958 s = splbio();
1959 simple_lock(&global_v_numoutput_slock);
1960 --vp->v_numoutput;
1961 simple_unlock(&global_v_numoutput_slock);
1962 splx(s);
1963 } else {
1964 /* printf("bp is not mbp\n"); */
1965 s = splbio();
1966 bp = pool_get(&bufpool, PR_WAITOK);
1967 UVMHIST_LOG(ubchist, "vp %p bp %p num now %d",
1968 vp, bp, vp->v_numoutput, 0);
1969 splx(s);
1970 memset(bp, 0, sizeof(*bp));
1971 BUF_INIT(bp);
1972 bp->b_data = (char *)kva +
1973 (vaddr_t)(offset - pg->offset);
1974 bp->b_resid = bp->b_bcount = iobytes;
1975 bp->b_flags = B_BUSY|B_WRITE|B_CALL;
1976 bp->b_iodone = uvm_aio_biodone1;
1977 }
1978
1979 /* XXX This is silly ... is this necessary? */
1980 bp->b_vp = NULL;
1981 s = splbio();
1982 bgetvp(vp, bp);
1983 splx(s);
1984
1985 bp->b_lblkno = lblkno(fs, offset);
1986 bp->b_private = mbp;
1987 if (devvp->v_type == VBLK) {
1988 bp->b_dev = devvp->v_rdev;
1989 }
1990 VOP_BWRITE(bp);
1991 while (lfs_gatherblock(sp, bp, NULL))
1992 continue;
1993 }
1994
1995 if (skipbytes) {
1996 UVMHIST_LOG(ubchist, "skipbytes %d", skipbytes, 0,0,0);
1997 s = splbio();
1998 if (error) {
1999 mbp->b_flags |= B_ERROR;
2000 mbp->b_error = error;
2001 }
2002 mbp->b_resid -= skipbytes;
2003 if (mbp->b_resid == 0) {
2004 biodone(mbp);
2005 }
2006 splx(s);
2007 }
2008 UVMHIST_LOG(ubchist, "returning 0", 0,0,0,0);
2009 return (0);
2010 }
2011
2012 /*
2013 * finish vnode/inode initialization.
2014 * used by lfs_vget and lfs_fastvget.
2015 */
2016 void
2017 lfs_vinit(struct mount *mp, struct vnode **vpp)
2018 {
2019 struct vnode *vp = *vpp;
2020 struct inode *ip = VTOI(vp);
2021 struct ufsmount *ump = VFSTOUFS(mp);
2022 int i;
2023
2024 ip->i_mode = ip->i_ffs1_mode;
2025 ip->i_ffs_effnlink = ip->i_nlink = ip->i_ffs1_nlink;
2026 ip->i_lfs_osize = ip->i_size = ip->i_ffs1_size;
2027 ip->i_flags = ip->i_ffs1_flags;
2028 ip->i_gen = ip->i_ffs1_gen;
2029 ip->i_uid = ip->i_ffs1_uid;
2030 ip->i_gid = ip->i_ffs1_gid;
2031
2032 ip->i_lfs_effnblks = ip->i_ffs1_blocks;
2033
2034 /*
2035 * Initialize the vnode from the inode, check for aliases. In all
2036 * cases re-init ip, the underlying vnode/inode may have changed.
2037 */
2038 ufs_vinit(mp, lfs_specop_p, lfs_fifoop_p, &vp);
2039
2040 memset(ip->i_lfs_fragsize, 0, NDADDR * sizeof(*ip->i_lfs_fragsize));
2041 if (vp->v_type != VLNK ||
2042 VTOI(vp)->i_size >= vp->v_mount->mnt_maxsymlinklen) {
2043 struct lfs *fs = ump->um_lfs;
2044 #ifdef DEBUG
2045 for (i = (ip->i_size + fs->lfs_bsize - 1) >> fs->lfs_bshift;
2046 i < NDADDR; i++) {
2047 if (ip->i_ffs1_db[i] != 0) {
2048 inconsistent:
2049 lfs_dump_dinode(ip->i_din.ffs1_din);
2050 panic("inconsistent inode");
2051 }
2052 }
2053 for ( ; i < NDADDR + NIADDR; i++) {
2054 if (ip->i_ffs1_ib[i - NDADDR] != 0) {
2055 goto inconsistent;
2056 }
2057 }
2058 #endif /* DEBUG */
2059 for (i = 0; i < NDADDR; i++)
2060 if (ip->i_ffs1_db[i] != 0)
2061 ip->i_lfs_fragsize[i] = blksize(fs, ip, i);
2062 }
2063
2064 #ifdef DEBUG
2065 if (vp->v_type == VNON) {
2066 printf("lfs_vinit: ino %d is type VNON! (ifmt=%o)\n",
2067 ip->i_number, (ip->i_mode & IFMT) >> 12);
2068 lfs_dump_dinode(ip->i_din.ffs1_din);
2069 #ifdef DDB
2070 Debugger();
2071 #endif /* DDB */
2072 }
2073 #endif /* DEBUG */
2074
2075 /*
2076 * Finish inode initialization now that aliasing has been resolved.
2077 */
2078
2079 ip->i_devvp = ump->um_devvp;
2080 VREF(ip->i_devvp);
2081 genfs_node_init(vp, &lfs_genfsops);
2082 uvm_vnp_setsize(vp, ip->i_size);
2083
2084 *vpp = vp;
2085 }
2086