lfs_segment.c revision 1.74 1 /* $NetBSD: lfs_segment.c,v 1.74 2002/05/14 20:03:54 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.74 2002/05/14 20:03:54 perseant Exp $");
75
76 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
77
78 #if defined(_KERNEL_OPT)
79 #include "opt_ddb.h"
80 #endif
81
82 #include <sys/param.h>
83 #include <sys/systm.h>
84 #include <sys/namei.h>
85 #include <sys/kernel.h>
86 #include <sys/resourcevar.h>
87 #include <sys/file.h>
88 #include <sys/stat.h>
89 #include <sys/buf.h>
90 #include <sys/proc.h>
91 #include <sys/conf.h>
92 #include <sys/vnode.h>
93 #include <sys/malloc.h>
94 #include <sys/mount.h>
95
96 #include <miscfs/specfs/specdev.h>
97 #include <miscfs/fifofs/fifo.h>
98
99 #include <ufs/ufs/inode.h>
100 #include <ufs/ufs/dir.h>
101 #include <ufs/ufs/ufsmount.h>
102 #include <ufs/ufs/ufs_extern.h>
103
104 #include <ufs/lfs/lfs.h>
105 #include <ufs/lfs/lfs_extern.h>
106
107 #include <uvm/uvm_extern.h>
108
109 extern int count_lock_queue(void);
110 extern struct simplelock vnode_free_list_slock; /* XXX */
111
112 static void lfs_cluster_callback(struct buf *);
113 static struct buf **lookahead_pagemove(struct buf **, int, size_t *);
114
115 /*
116 * Determine if it's OK to start a partial in this segment, or if we need
117 * to go on to a new segment.
118 */
119 #define LFS_PARTIAL_FITS(fs) \
120 ((fs)->lfs_fsbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
121 fragstofsb((fs), (fs)->lfs_frag))
122
123 void lfs_callback(struct buf *);
124 int lfs_gather(struct lfs *, struct segment *,
125 struct vnode *, int (*)(struct lfs *, struct buf *));
126 int lfs_gatherblock(struct segment *, struct buf *, int *);
127 void lfs_iset(struct inode *, ufs_daddr_t, time_t);
128 int lfs_match_fake(struct lfs *, struct buf *);
129 int lfs_match_data(struct lfs *, struct buf *);
130 int lfs_match_dindir(struct lfs *, struct buf *);
131 int lfs_match_indir(struct lfs *, struct buf *);
132 int lfs_match_tindir(struct lfs *, struct buf *);
133 void lfs_newseg(struct lfs *);
134 void lfs_shellsort(struct buf **, ufs_daddr_t *, int);
135 void lfs_supercallback(struct buf *);
136 void lfs_updatemeta(struct segment *);
137 int lfs_vref(struct vnode *);
138 void lfs_vunref(struct vnode *);
139 void lfs_writefile(struct lfs *, struct segment *, struct vnode *);
140 int lfs_writeinode(struct lfs *, struct segment *, struct inode *);
141 int lfs_writeseg(struct lfs *, struct segment *);
142 void lfs_writesuper(struct lfs *, daddr_t);
143 int lfs_writevnodes(struct lfs *fs, struct mount *mp,
144 struct segment *sp, int dirops);
145
146 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
147 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
148 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
149 int lfs_dirvcount = 0; /* # active dirops */
150
151 /* Statistics Counters */
152 int lfs_dostats = 1;
153 struct lfs_stats lfs_stats;
154
155 extern int locked_queue_count;
156 extern long locked_queue_bytes;
157
158 /* op values to lfs_writevnodes */
159 #define VN_REG 0
160 #define VN_DIROP 1
161 #define VN_EMPTY 2
162 #define VN_CLEAN 3
163
164 #define LFS_MAX_ACTIVE 10
165
166 /*
167 * XXX KS - Set modification time on the Ifile, so the cleaner can
168 * read the fs mod time off of it. We don't set IN_UPDATE here,
169 * since we don't really need this to be flushed to disk (and in any
170 * case that wouldn't happen to the Ifile until we checkpoint).
171 */
172 void
173 lfs_imtime(struct lfs *fs)
174 {
175 struct timespec ts;
176 struct inode *ip;
177
178 TIMEVAL_TO_TIMESPEC(&time, &ts);
179 ip = VTOI(fs->lfs_ivnode);
180 ip->i_ffs_mtime = ts.tv_sec;
181 ip->i_ffs_mtimensec = ts.tv_nsec;
182 }
183
184 /*
185 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
186 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
187 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
188 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
189 */
190
191 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
192 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
193 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
194
195 int
196 lfs_vflush(struct vnode *vp)
197 {
198 struct inode *ip;
199 struct lfs *fs;
200 struct segment *sp;
201 struct buf *bp, *nbp, *tbp, *tnbp;
202 int error, s;
203
204 ip = VTOI(vp);
205 fs = VFSTOUFS(vp->v_mount)->um_lfs;
206
207 if (ip->i_flag & IN_CLEANING) {
208 #ifdef DEBUG_LFS
209 ivndebug(vp,"vflush/in_cleaning");
210 #endif
211 LFS_CLR_UINO(ip, IN_CLEANING);
212 LFS_SET_UINO(ip, IN_MODIFIED);
213
214 /*
215 * Toss any cleaning buffers that have real counterparts
216 * to avoid losing new data
217 */
218 s = splbio();
219 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
220 nbp = bp->b_vnbufs.le_next;
221 if (bp->b_flags & B_CALL) {
222 for (tbp = vp->v_dirtyblkhd.lh_first; tbp;
223 tbp = tnbp)
224 {
225 tnbp = tbp->b_vnbufs.le_next;
226 if (tbp->b_vp == bp->b_vp
227 && tbp->b_lblkno == bp->b_lblkno
228 && tbp != bp)
229 {
230 fs->lfs_avail += btofsb(fs, bp->b_bcount);
231 wakeup(&fs->lfs_avail);
232 lfs_freebuf(bp);
233 bp = NULL;
234 break;
235 }
236 }
237 }
238 }
239 splx(s);
240 }
241
242 /* If the node is being written, wait until that is done */
243 s = splbio();
244 if (WRITEINPROG(vp)) {
245 #ifdef DEBUG_LFS
246 ivndebug(vp,"vflush/writeinprog");
247 #endif
248 tsleep(vp, PRIBIO+1, "lfs_vw", 0);
249 }
250 splx(s);
251
252 /* Protect against VXLOCK deadlock in vinvalbuf() */
253 lfs_seglock(fs, SEGM_SYNC);
254
255 /* If we're supposed to flush a freed inode, just toss it */
256 /* XXX - seglock, so these buffers can't be gathered, right? */
257 if (ip->i_ffs_mode == 0) {
258 printf("lfs_vflush: ino %d is freed, not flushing\n",
259 ip->i_number);
260 s = splbio();
261 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = nbp) {
262 nbp = bp->b_vnbufs.le_next;
263 if (bp->b_flags & B_DELWRI) { /* XXX always true? */
264 fs->lfs_avail += btofsb(fs, bp->b_bcount);
265 wakeup(&fs->lfs_avail);
266 }
267 /* Copied from lfs_writeseg */
268 if (bp->b_flags & B_CALL) {
269 /* if B_CALL, it was created with newbuf */
270 lfs_freebuf(bp);
271 bp = NULL;
272 } else {
273 bremfree(bp);
274 LFS_UNLOCK_BUF(bp);
275 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
276 B_GATHERED);
277 bp->b_flags |= B_DONE;
278 reassignbuf(bp, vp);
279 brelse(bp);
280 }
281 }
282 splx(s);
283 LFS_CLR_UINO(ip, IN_CLEANING);
284 LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
285 ip->i_flag &= ~IN_ALLMOD;
286 printf("lfs_vflush: done not flushing ino %d\n",
287 ip->i_number);
288 lfs_segunlock(fs);
289 return 0;
290 }
291
292 SET_FLUSHING(fs,vp);
293 if (fs->lfs_nactive > LFS_MAX_ACTIVE) {
294 error = lfs_segwrite(vp->v_mount, SEGM_SYNC|SEGM_CKP);
295 CLR_FLUSHING(fs,vp);
296 lfs_segunlock(fs);
297 return error;
298 }
299 sp = fs->lfs_sp;
300
301 if (vp->v_dirtyblkhd.lh_first == NULL) {
302 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
303 } else if ((ip->i_flag & IN_CLEANING) &&
304 (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
305 #ifdef DEBUG_LFS
306 ivndebug(vp,"vflush/clean");
307 #endif
308 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
309 } else if (lfs_dostats) {
310 if (vp->v_dirtyblkhd.lh_first || (VTOI(vp)->i_flag & IN_ALLMOD))
311 ++lfs_stats.vflush_invoked;
312 #ifdef DEBUG_LFS
313 ivndebug(vp,"vflush");
314 #endif
315 }
316
317 #ifdef DIAGNOSTIC
318 /* XXX KS This actually can happen right now, though it shouldn't(?) */
319 if (vp->v_flag & VDIROP) {
320 printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
321 /* panic("VDIROP being flushed...this can\'t happen"); */
322 }
323 if (vp->v_usecount < 0) {
324 printf("usecount=%ld\n", (long)vp->v_usecount);
325 panic("lfs_vflush: usecount<0");
326 }
327 #endif
328
329 do {
330 do {
331 if (vp->v_dirtyblkhd.lh_first != NULL)
332 lfs_writefile(fs, sp, vp);
333 } while (lfs_writeinode(fs, sp, ip));
334 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
335
336 if (lfs_dostats) {
337 ++lfs_stats.nwrites;
338 if (sp->seg_flags & SEGM_SYNC)
339 ++lfs_stats.nsync_writes;
340 if (sp->seg_flags & SEGM_CKP)
341 ++lfs_stats.ncheckpoints;
342 }
343 /*
344 * If we were called from somewhere that has already held the seglock
345 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
346 * the write to complete because we are still locked.
347 * Since lfs_vflush() must return the vnode with no dirty buffers,
348 * we must explicitly wait, if that is the case.
349 *
350 * We compare the iocount against 1, not 0, because it is
351 * artificially incremented by lfs_seglock().
352 */
353 if (fs->lfs_seglock > 1) {
354 s = splbio();
355 while (fs->lfs_iocount > 1)
356 (void)tsleep(&fs->lfs_iocount, PRIBIO + 1,
357 "lfs_vflush", 0);
358 splx(s);
359 }
360 lfs_segunlock(fs);
361
362 CLR_FLUSHING(fs,vp);
363 return (0);
364 }
365
366 #ifdef DEBUG_LFS_VERBOSE
367 # define vndebug(vp,str) if (VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
368 #else
369 # define vndebug(vp,str)
370 #endif
371
372 int
373 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
374 {
375 struct inode *ip;
376 struct vnode *vp;
377 int inodes_written = 0, only_cleaning;
378 int needs_unlock;
379
380 #ifndef LFS_NO_BACKVP_HACK
381 /* BEGIN HACK */
382 #define VN_OFFSET (((caddr_t)&vp->v_mntvnodes.le_next) - (caddr_t)vp)
383 #define BACK_VP(VP) ((struct vnode *)(((caddr_t)VP->v_mntvnodes.le_prev) - VN_OFFSET))
384 #define BEG_OF_VLIST ((struct vnode *)(((caddr_t)&mp->mnt_vnodelist.lh_first) - VN_OFFSET))
385
386 /* Find last vnode. */
387 loop: for (vp = mp->mnt_vnodelist.lh_first;
388 vp && vp->v_mntvnodes.le_next != NULL;
389 vp = vp->v_mntvnodes.le_next);
390 for (; vp && vp != BEG_OF_VLIST; vp = BACK_VP(vp)) {
391 #else
392 loop:
393 for (vp = mp->mnt_vnodelist.lh_first;
394 vp != NULL;
395 vp = vp->v_mntvnodes.le_next) {
396 #endif
397 /*
398 * If the vnode that we are about to sync is no longer
399 * associated with this mount point, start over.
400 */
401 if (vp->v_mount != mp) {
402 printf("lfs_writevnodes: starting over\n");
403 goto loop;
404 }
405
406 ip = VTOI(vp);
407 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
408 (op != VN_DIROP && op != VN_CLEAN && (vp->v_flag & VDIROP))) {
409 vndebug(vp,"dirop");
410 continue;
411 }
412
413 if (op == VN_EMPTY && vp->v_dirtyblkhd.lh_first) {
414 vndebug(vp,"empty");
415 continue;
416 }
417
418 if (vp->v_type == VNON) {
419 continue;
420 }
421
422 if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
423 && vp != fs->lfs_flushvp
424 && !(ip->i_flag & IN_CLEANING)) {
425 vndebug(vp,"cleaning");
426 continue;
427 }
428
429 if (lfs_vref(vp)) {
430 vndebug(vp,"vref");
431 continue;
432 }
433
434 needs_unlock = 0;
435 if (VOP_ISLOCKED(vp)) {
436 if (vp != fs->lfs_ivnode &&
437 vp->v_lock.lk_lockholder != curproc->p_pid) {
438 #ifdef DEBUG_LFS
439 printf("lfs_writevnodes: not writing ino %d,"
440 " locked by pid %d\n",
441 VTOI(vp)->i_number,
442 vp->v_lock.lk_lockholder);
443 #endif
444 lfs_vunref(vp);
445 continue;
446 }
447 } else if (vp != fs->lfs_ivnode) {
448 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
449 needs_unlock = 1;
450 }
451
452 only_cleaning = 0;
453 /*
454 * Write the inode/file if dirty and it's not the IFILE.
455 */
456 if ((ip->i_flag & IN_ALLMOD) ||
457 (vp->v_dirtyblkhd.lh_first != NULL))
458 {
459 only_cleaning = ((ip->i_flag & IN_ALLMOD) == IN_CLEANING);
460
461 if (ip->i_number != LFS_IFILE_INUM
462 && vp->v_dirtyblkhd.lh_first != NULL)
463 {
464 lfs_writefile(fs, sp, vp);
465 }
466 if (vp->v_dirtyblkhd.lh_first != NULL) {
467 if (WRITEINPROG(vp)) {
468 #ifdef DEBUG_LFS
469 ivndebug(vp,"writevnodes/write2");
470 #endif
471 } else if (!(ip->i_flag & IN_ALLMOD)) {
472 #ifdef DEBUG_LFS
473 printf("<%d>",ip->i_number);
474 #endif
475 LFS_SET_UINO(ip, IN_MODIFIED);
476 }
477 }
478 (void) lfs_writeinode(fs, sp, ip);
479 inodes_written++;
480 }
481
482 if (needs_unlock)
483 VOP_UNLOCK(vp, 0);
484
485 if (lfs_clean_vnhead && only_cleaning)
486 lfs_vunref_head(vp);
487 else
488 lfs_vunref(vp);
489 }
490 return inodes_written;
491 }
492
493 /*
494 * Do a checkpoint.
495 */
496 int
497 lfs_segwrite(struct mount *mp, int flags)
498 {
499 struct buf *bp;
500 struct inode *ip;
501 struct lfs *fs;
502 struct segment *sp;
503 struct vnode *vp;
504 SEGUSE *segusep;
505 ufs_daddr_t ibno;
506 int do_ckp, did_ckp, error, i;
507 int writer_set = 0;
508 int dirty;
509 int redo;
510
511 fs = VFSTOUFS(mp)->um_lfs;
512
513 if (fs->lfs_ronly)
514 return EROFS;
515
516 lfs_imtime(fs);
517
518 /* printf("lfs_segwrite: ifile flags are 0x%lx\n",
519 (long)(VTOI(fs->lfs_ivnode)->i_flag)); */
520
521 #if 0
522 /*
523 * If we are not the cleaner, and there is no space available,
524 * wait until cleaner writes.
525 */
526 if (!(flags & SEGM_CLEAN) && !(fs->lfs_seglock && fs->lfs_sp &&
527 (fs->lfs_sp->seg_flags & SEGM_CLEAN)))
528 {
529 while (fs->lfs_avail <= 0) {
530 LFS_CLEANERINFO(cip, fs, bp);
531 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
532
533 wakeup(&lfs_allclean_wakeup);
534 wakeup(&fs->lfs_nextseg);
535 error = tsleep(&fs->lfs_avail, PRIBIO + 1, "lfs_av2",
536 0);
537 if (error) {
538 return (error);
539 }
540 }
541 }
542 #endif
543 /*
544 * Allocate a segment structure and enough space to hold pointers to
545 * the maximum possible number of buffers which can be described in a
546 * single summary block.
547 */
548 do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
549 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
550 sp = fs->lfs_sp;
551
552 /*
553 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
554 * in which case we have to flush *all* buffers off of this vnode.
555 * We don't care about other nodes, but write any non-dirop nodes
556 * anyway in anticipation of another getnewvnode().
557 *
558 * If we're cleaning we only write cleaning and ifile blocks, and
559 * no dirops, since otherwise we'd risk corruption in a crash.
560 */
561 if (sp->seg_flags & SEGM_CLEAN)
562 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
563 else {
564 lfs_writevnodes(fs, mp, sp, VN_REG);
565 if (!fs->lfs_dirops || !fs->lfs_flushvp) {
566 while (fs->lfs_dirops)
567 if ((error = tsleep(&fs->lfs_writer, PRIBIO + 1,
568 "lfs writer", 0)))
569 {
570 /* XXX why not segunlock? */
571 free(sp->bpp, M_SEGMENT);
572 sp->bpp = NULL;
573 free(sp, M_SEGMENT);
574 fs->lfs_sp = NULL;
575 return (error);
576 }
577 fs->lfs_writer++;
578 writer_set = 1;
579 lfs_writevnodes(fs, mp, sp, VN_DIROP);
580 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
581 }
582 }
583
584 /*
585 * If we are doing a checkpoint, mark everything since the
586 * last checkpoint as no longer ACTIVE.
587 */
588 if (do_ckp) {
589 for (ibno = fs->lfs_cleansz + fs->lfs_segtabsz;
590 --ibno >= fs->lfs_cleansz; ) {
591 dirty = 0;
592 if (bread(fs->lfs_ivnode, ibno, fs->lfs_bsize, NOCRED, &bp))
593
594 panic("lfs_segwrite: ifile read");
595 segusep = (SEGUSE *)bp->b_data;
596 for (i = fs->lfs_sepb; i--;) {
597 if (segusep->su_flags & SEGUSE_ACTIVE) {
598 segusep->su_flags &= ~SEGUSE_ACTIVE;
599 ++dirty;
600 }
601 if (fs->lfs_version > 1)
602 ++segusep;
603 else
604 segusep = (SEGUSE *)
605 ((SEGUSE_V1 *)segusep + 1);
606 }
607
608 /* But the current segment is still ACTIVE */
609 segusep = (SEGUSE *)bp->b_data;
610 if (dtosn(fs, fs->lfs_curseg) / fs->lfs_sepb ==
611 (ibno-fs->lfs_cleansz)) {
612 if (fs->lfs_version > 1)
613 segusep[dtosn(fs, fs->lfs_curseg) %
614 fs->lfs_sepb].su_flags |=
615 SEGUSE_ACTIVE;
616 else
617 ((SEGUSE *)
618 ((SEGUSE_V1 *)(bp->b_data) +
619 (dtosn(fs, fs->lfs_curseg) %
620 fs->lfs_sepb)))->su_flags
621 |= SEGUSE_ACTIVE;
622 --dirty;
623 }
624 if (dirty)
625 error = LFS_BWRITE_LOG(bp); /* Ifile */
626 else
627 brelse(bp);
628 }
629 }
630
631 did_ckp = 0;
632 if (do_ckp || fs->lfs_doifile) {
633 do {
634 vp = fs->lfs_ivnode;
635
636 vget(vp, LK_EXCLUSIVE | LK_CANRECURSE | LK_RETRY);
637 #ifdef DEBUG
638 LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0);
639 #endif
640 fs->lfs_flags &= ~LFS_IFDIRTY;
641
642 ip = VTOI(vp);
643 /* if (vp->v_dirtyblkhd.lh_first != NULL) */
644 lfs_writefile(fs, sp, vp);
645 if (ip->i_flag & IN_ALLMOD)
646 ++did_ckp;
647 redo = lfs_writeinode(fs, sp, ip);
648
649 vput(vp);
650 redo += lfs_writeseg(fs, sp);
651 redo += (fs->lfs_flags & LFS_IFDIRTY);
652 } while (redo && do_ckp);
653
654 /* The ifile should now be all clear */
655 if (do_ckp && vp->v_dirtyblkhd.lh_first) {
656 struct buf *bp;
657 int s, warned = 0, dopanic = 0;
658 s = splbio();
659 for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {
660 if (!(bp->b_flags & B_GATHERED)) {
661 if (!warned)
662 printf("lfs_segwrite: ifile still has dirty blocks?!\n");
663 ++dopanic;
664 ++warned;
665 printf("bp=%p, lbn %d, flags 0x%lx\n",
666 bp, bp->b_lblkno, bp->b_flags);
667 }
668 }
669 if (dopanic)
670 panic("dirty blocks");
671 splx(s);
672 }
673 LFS_CLR_UINO(ip, IN_ALLMOD);
674 } else {
675 (void) lfs_writeseg(fs, sp);
676 }
677
678 /*
679 * If the I/O count is non-zero, sleep until it reaches zero.
680 * At the moment, the user's process hangs around so we can
681 * sleep.
682 */
683 fs->lfs_doifile = 0;
684 if (writer_set && --fs->lfs_writer == 0)
685 wakeup(&fs->lfs_dirops);
686
687 /*
688 * If we didn't write the Ifile, we didn't really do anything.
689 * That means that (1) there is a checkpoint on disk and (2)
690 * nothing has changed since it was written.
691 *
692 * Take the flags off of the segment so that lfs_segunlock
693 * doesn't have to write the superblock either.
694 */
695 if (did_ckp == 0) {
696 sp->seg_flags &= ~(SEGM_SYNC|SEGM_CKP);
697 /* if (do_ckp) printf("lfs_segwrite: no checkpoint\n"); */
698 }
699
700 if (lfs_dostats) {
701 ++lfs_stats.nwrites;
702 if (sp->seg_flags & SEGM_SYNC)
703 ++lfs_stats.nsync_writes;
704 if (sp->seg_flags & SEGM_CKP)
705 ++lfs_stats.ncheckpoints;
706 }
707 lfs_segunlock(fs);
708 return (0);
709 }
710
711 /*
712 * Write the dirty blocks associated with a vnode.
713 */
714 void
715 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
716 {
717 struct buf *bp;
718 struct finfo *fip;
719 IFILE *ifp;
720
721
722 if (sp->seg_bytes_left < fs->lfs_bsize ||
723 sp->sum_bytes_left < sizeof(struct finfo))
724 (void) lfs_writeseg(fs, sp);
725
726 sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(ufs_daddr_t);
727 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
728
729 if (vp->v_flag & VDIROP)
730 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
731
732 fip = sp->fip;
733 fip->fi_nblocks = 0;
734 fip->fi_ino = VTOI(vp)->i_number;
735 LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
736 fip->fi_version = ifp->if_version;
737 brelse(bp);
738
739 if (sp->seg_flags & SEGM_CLEAN) {
740 lfs_gather(fs, sp, vp, lfs_match_fake);
741 /*
742 * For a file being flushed, we need to write *all* blocks.
743 * This means writing the cleaning blocks first, and then
744 * immediately following with any non-cleaning blocks.
745 * The same is true of the Ifile since checkpoints assume
746 * that all valid Ifile blocks are written.
747 */
748 if (IS_FLUSHING(fs,vp) || VTOI(vp)->i_number == LFS_IFILE_INUM)
749 lfs_gather(fs, sp, vp, lfs_match_data);
750 } else
751 lfs_gather(fs, sp, vp, lfs_match_data);
752
753 /*
754 * It may not be necessary to write the meta-data blocks at this point,
755 * as the roll-forward recovery code should be able to reconstruct the
756 * list.
757 *
758 * We have to write them anyway, though, under two conditions: (1) the
759 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
760 * checkpointing.
761 */
762 if (lfs_writeindir
763 || IS_FLUSHING(fs,vp)
764 || (sp->seg_flags & SEGM_CKP))
765 {
766 lfs_gather(fs, sp, vp, lfs_match_indir);
767 lfs_gather(fs, sp, vp, lfs_match_dindir);
768 lfs_gather(fs, sp, vp, lfs_match_tindir);
769 }
770 fip = sp->fip;
771 if (fip->fi_nblocks != 0) {
772 sp->fip = (FINFO*)((caddr_t)fip + sizeof(struct finfo) +
773 sizeof(ufs_daddr_t) * (fip->fi_nblocks-1));
774 sp->start_lbp = &sp->fip->fi_blocks[0];
775 } else {
776 sp->sum_bytes_left += sizeof(FINFO) - sizeof(ufs_daddr_t);
777 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
778 }
779 }
780
781 int
782 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
783 {
784 struct buf *bp, *ibp;
785 struct dinode *cdp;
786 IFILE *ifp;
787 SEGUSE *sup;
788 ufs_daddr_t daddr;
789 daddr_t *daddrp;
790 ino_t ino;
791 int error, i, ndx, fsb = 0;
792 int redo_ifile = 0;
793 struct timespec ts;
794 int gotblk = 0;
795
796 if (!(ip->i_flag & IN_ALLMOD))
797 return (0);
798
799 /* Allocate a new inode block if necessary. */
800 if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) && sp->ibp == NULL) {
801 /* Allocate a new segment if necessary. */
802 if (sp->seg_bytes_left < fs->lfs_ibsize ||
803 sp->sum_bytes_left < sizeof(ufs_daddr_t))
804 (void) lfs_writeseg(fs, sp);
805
806 /* Get next inode block. */
807 daddr = fs->lfs_offset;
808 fs->lfs_offset += btofsb(fs, fs->lfs_ibsize);
809 sp->ibp = *sp->cbpp++ =
810 getblk(VTOI(fs->lfs_ivnode)->i_devvp, fsbtodb(fs, daddr),
811 fs->lfs_ibsize, 0, 0);
812 gotblk++;
813
814 /* Zero out inode numbers */
815 for (i = 0; i < INOPB(fs); ++i)
816 ((struct dinode *)sp->ibp->b_data)[i].di_inumber = 0;
817
818 ++sp->start_bpp;
819 fs->lfs_avail -= btofsb(fs, fs->lfs_ibsize);
820 /* Set remaining space counters. */
821 sp->seg_bytes_left -= fs->lfs_ibsize;
822 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
823 ndx = fs->lfs_sumsize / sizeof(ufs_daddr_t) -
824 sp->ninodes / INOPB(fs) - 1;
825 ((ufs_daddr_t *)(sp->segsum))[ndx] = daddr;
826 }
827
828 /* Update the inode times and copy the inode onto the inode page. */
829 TIMEVAL_TO_TIMESPEC(&time, &ts);
830 /* XXX kludge --- don't redirty the ifile just to put times on it */
831 if (ip->i_number != LFS_IFILE_INUM)
832 LFS_ITIMES(ip, &ts, &ts, &ts);
833
834 /*
835 * If this is the Ifile, and we've already written the Ifile in this
836 * partial segment, just overwrite it (it's not on disk yet) and
837 * continue.
838 *
839 * XXX we know that the bp that we get the second time around has
840 * already been gathered.
841 */
842 if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
843 *(sp->idp) = ip->i_din.ffs_din;
844 return 0;
845 }
846
847 bp = sp->ibp;
848 cdp = ((struct dinode *)bp->b_data) + (sp->ninodes % INOPB(fs));
849 *cdp = ip->i_din.ffs_din;
850 #ifdef LFS_IFILE_FRAG_ADDRESSING
851 if (fs->lfs_version > 1)
852 fsb = (sp->ninodes % INOPB(fs)) / INOPF(fs);
853 #endif
854
855 /*
856 * If we are cleaning, ensure that we don't write UNWRITTEN disk
857 * addresses to disk.
858 */
859 if (ip->i_lfs_effnblks != ip->i_ffs_blocks) {
860 #ifdef DEBUG_LFS
861 printf("lfs_writeinode: cleansing ino %d (%d != %d)\n",
862 ip->i_number, ip->i_lfs_effnblks, ip->i_ffs_blocks);
863 #endif
864 for (daddrp = cdp->di_db; daddrp < cdp->di_ib + NIADDR;
865 daddrp++) {
866 if (*daddrp == UNWRITTEN) {
867 #ifdef DEBUG_LFS
868 printf("lfs_writeinode: wiping UNWRITTEN\n");
869 #endif
870 *daddrp = 0;
871 }
872 }
873 }
874
875 if (ip->i_flag & IN_CLEANING)
876 LFS_CLR_UINO(ip, IN_CLEANING);
877 else {
878 /* XXX IN_ALLMOD */
879 LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
880 IN_UPDATE);
881 if (ip->i_lfs_effnblks == ip->i_ffs_blocks)
882 LFS_CLR_UINO(ip, IN_MODIFIED);
883 #ifdef DEBUG_LFS
884 else
885 printf("lfs_writeinode: ino %d: real blks=%d, "
886 "eff=%d\n", ip->i_number, ip->i_ffs_blocks,
887 ip->i_lfs_effnblks);
888 #endif
889 }
890
891 if (ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
892 sp->idp = ((struct dinode *)bp->b_data) +
893 (sp->ninodes % INOPB(fs));
894 if (gotblk) {
895 LFS_LOCK_BUF(bp);
896 brelse(bp);
897 }
898
899 /* Increment inode count in segment summary block. */
900 ++((SEGSUM *)(sp->segsum))->ss_ninos;
901
902 /* If this page is full, set flag to allocate a new page. */
903 if (++sp->ninodes % INOPB(fs) == 0)
904 sp->ibp = NULL;
905
906 /*
907 * If updating the ifile, update the super-block. Update the disk
908 * address and access times for this inode in the ifile.
909 */
910 ino = ip->i_number;
911 if (ino == LFS_IFILE_INUM) {
912 daddr = fs->lfs_idaddr;
913 fs->lfs_idaddr = dbtofsb(fs, bp->b_blkno);
914 } else {
915 LFS_IENTRY(ifp, fs, ino, ibp);
916 daddr = ifp->if_daddr;
917 ifp->if_daddr = dbtofsb(fs, bp->b_blkno) + fsb;
918 #ifdef LFS_DEBUG_NEXTFREE
919 if (ino > 3 && ifp->if_nextfree) {
920 vprint("lfs_writeinode",ITOV(ip));
921 printf("lfs_writeinode: updating free ino %d\n",
922 ip->i_number);
923 }
924 #endif
925 error = LFS_BWRITE_LOG(ibp); /* Ifile */
926 }
927
928 /*
929 * Account the inode: it no longer belongs to its former segment,
930 * though it will not belong to the new segment until that segment
931 * is actually written.
932 */
933 #ifdef DEBUG
934 /*
935 * The inode's last address should not be in the current partial
936 * segment, except under exceptional circumstances (lfs_writevnodes
937 * had to start over, and in the meantime more blocks were written
938 * to a vnode). Although the previous inode won't be accounted in
939 * su_nbytes until lfs_writeseg, this shouldn't be a problem as we
940 * have more data blocks in the current partial segment.
941 */
942 if (daddr >= fs->lfs_lastpseg && daddr <= dbtofsb(fs, bp->b_blkno))
943 printf("lfs_writeinode: last inode addr in current pseg "
944 "(ino %d daddr 0x%x)\n", ino, daddr);
945 #endif
946 if (daddr != LFS_UNUSED_DADDR) {
947 LFS_SEGENTRY(sup, fs, dtosn(fs, daddr), bp);
948 #ifdef DIAGNOSTIC
949 if (sup->su_nbytes < DINODE_SIZE) {
950 printf("lfs_writeinode: negative bytes "
951 "(segment %d short by %d)\n",
952 dtosn(fs, daddr),
953 (int)DINODE_SIZE - sup->su_nbytes);
954 panic("lfs_writeinode: negative bytes");
955 sup->su_nbytes = DINODE_SIZE;
956 }
957 #endif
958 #ifdef DEBUG_SU_NBYTES
959 printf("seg %d -= %d for ino %d inode\n",
960 dtosn(fs, daddr), DINODE_SIZE, ino);
961 #endif
962 sup->su_nbytes -= DINODE_SIZE;
963 redo_ifile =
964 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
965 if (redo_ifile)
966 fs->lfs_flags |= LFS_IFDIRTY;
967 error = LFS_BWRITE_LOG(bp); /* Ifile */
968 }
969 return (redo_ifile);
970 }
971
972 int
973 lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
974 {
975 struct lfs *fs;
976 int version;
977
978 /*
979 * If full, finish this segment. We may be doing I/O, so
980 * release and reacquire the splbio().
981 */
982 #ifdef DIAGNOSTIC
983 if (sp->vp == NULL)
984 panic ("lfs_gatherblock: Null vp in segment");
985 #endif
986 fs = sp->fs;
987 if (sp->sum_bytes_left < sizeof(ufs_daddr_t) ||
988 sp->seg_bytes_left < bp->b_bcount) {
989 if (sptr)
990 splx(*sptr);
991 lfs_updatemeta(sp);
992
993 version = sp->fip->fi_version;
994 (void) lfs_writeseg(fs, sp);
995
996 sp->fip->fi_version = version;
997 sp->fip->fi_ino = VTOI(sp->vp)->i_number;
998 /* Add the current file to the segment summary. */
999 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1000 sp->sum_bytes_left -=
1001 sizeof(struct finfo) - sizeof(ufs_daddr_t);
1002
1003 if (sptr)
1004 *sptr = splbio();
1005 return (1);
1006 }
1007
1008 #ifdef DEBUG
1009 if (bp->b_flags & B_GATHERED) {
1010 printf("lfs_gatherblock: already gathered! Ino %d, lbn %d\n",
1011 sp->fip->fi_ino, bp->b_lblkno);
1012 return (0);
1013 }
1014 #endif
1015 /* Insert into the buffer list, update the FINFO block. */
1016 bp->b_flags |= B_GATHERED;
1017 bp->b_flags &= ~B_DONE;
1018
1019 *sp->cbpp++ = bp;
1020 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno;
1021
1022 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
1023 sp->seg_bytes_left -= bp->b_bcount;
1024 return (0);
1025 }
1026
1027 int
1028 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp, int (*match)(struct lfs *, struct buf *))
1029 {
1030 struct buf *bp;
1031 int s, count = 0;
1032
1033 sp->vp = vp;
1034 s = splbio();
1035
1036 #ifndef LFS_NO_BACKBUF_HACK
1037 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {
1038 #else /* LFS_NO_BACKBUF_HACK */
1039 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
1040 # define BUF_OFFSET (((void *)&bp->b_vnbufs.le_next) - (void *)bp)
1041 # define BACK_BUF(BP) ((struct buf *)(((void *)BP->b_vnbufs.le_prev) - BUF_OFFSET))
1042 # define BEG_OF_LIST ((struct buf *)(((void *)&vp->v_dirtyblkhd.lh_first) - BUF_OFFSET))
1043 /* Find last buffer. */
1044 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp && bp->b_vnbufs.le_next != NULL;
1045 bp = bp->b_vnbufs.le_next);
1046 for (; bp && bp != BEG_OF_LIST; bp = BACK_BUF(bp)) {
1047 #endif /* LFS_NO_BACKBUF_HACK */
1048 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp)) {
1049 #ifdef DEBUG_LFS
1050 if (vp == fs->lfs_ivnode && (bp->b_flags & (B_BUSY|B_GATHERED)) == B_BUSY)
1051 printf("(%d:%lx)", bp->b_lblkno, bp->b_flags);
1052 #endif
1053 continue;
1054 }
1055 if (vp->v_type == VBLK) {
1056 /* For block devices, just write the blocks. */
1057 /* XXX Do we really need to even do this? */
1058 #ifdef DEBUG_LFS
1059 if (count == 0)
1060 printf("BLK(");
1061 printf(".");
1062 #endif
1063 /* Get the block before bwrite, so we don't corrupt the free list */
1064 bp->b_flags |= B_BUSY;
1065 bremfree(bp);
1066 bwrite(bp);
1067 } else {
1068 #ifdef DIAGNOSTIC
1069 if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) {
1070 printf("lfs_gather: lbn %d is B_INVAL\n",
1071 bp->b_lblkno);
1072 VOP_PRINT(bp->b_vp);
1073 }
1074 if (!(bp->b_flags & B_DELWRI))
1075 panic("lfs_gather: bp not B_DELWRI");
1076 if (!(bp->b_flags & B_LOCKED)) {
1077 printf("lfs_gather: lbn %d blk %d"
1078 " not B_LOCKED\n", bp->b_lblkno,
1079 dbtofsb(fs, bp->b_blkno));
1080 VOP_PRINT(bp->b_vp);
1081 panic("lfs_gather: bp not B_LOCKED");
1082 }
1083 #endif
1084 if (lfs_gatherblock(sp, bp, &s)) {
1085 goto loop;
1086 }
1087 }
1088 count++;
1089 }
1090 splx(s);
1091 #ifdef DEBUG_LFS
1092 if (vp->v_type == VBLK && count)
1093 printf(")\n");
1094 #endif
1095 lfs_updatemeta(sp);
1096 sp->vp = NULL;
1097 return count;
1098 }
1099
1100 /*
1101 * Update the metadata that points to the blocks listed in the FINFO
1102 * array.
1103 */
1104 void
1105 lfs_updatemeta(struct segment *sp)
1106 {
1107 SEGUSE *sup;
1108 struct buf *bp;
1109 struct lfs *fs;
1110 struct vnode *vp;
1111 struct indir a[NIADDR + 2], *ap;
1112 struct inode *ip;
1113 ufs_daddr_t daddr, lbn, off;
1114 daddr_t ooff;
1115 int error, i, nblocks, num;
1116 int bb;
1117
1118 vp = sp->vp;
1119 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
1120 if (nblocks < 0)
1121 panic("This is a bad thing\n");
1122 if (vp == NULL || nblocks == 0)
1123 return;
1124
1125 /* Sort the blocks. */
1126 /*
1127 * XXX KS - We have to sort even if the blocks come from the
1128 * cleaner, because there might be other pending blocks on the
1129 * same inode...and if we don't sort, and there are fragments
1130 * present, blocks may be written in the wrong place.
1131 */
1132 /* if (!(sp->seg_flags & SEGM_CLEAN)) */
1133 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks);
1134
1135 /*
1136 * Record the length of the last block in case it's a fragment.
1137 * If there are indirect blocks present, they sort last. An
1138 * indirect block will be lfs_bsize and its presence indicates
1139 * that you cannot have fragments.
1140 */
1141 sp->fip->fi_lastlength = sp->start_bpp[nblocks - 1]->b_bcount;
1142
1143 /*
1144 * Assign disk addresses, and update references to the logical
1145 * block and the segment usage information.
1146 */
1147 fs = sp->fs;
1148 for (i = nblocks; i--; ++sp->start_bpp) {
1149 lbn = *sp->start_lbp++;
1150
1151 (*sp->start_bpp)->b_blkno = fsbtodb(fs, fs->lfs_offset);
1152 off = fs->lfs_offset;
1153 if ((*sp->start_bpp)->b_blkno == (*sp->start_bpp)->b_lblkno) {
1154 printf("lfs_updatemeta: ino %d blk %d"
1155 " has same lbn and daddr\n",
1156 VTOI(vp)->i_number, off);
1157 }
1158 #ifdef DIAGNOSTIC
1159 if ((*sp->start_bpp)->b_bcount < fs->lfs_bsize && i != 0)
1160 panic("lfs_updatemeta: fragment is not last block\n");
1161 #endif
1162 bb = fragstofsb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
1163 fs->lfs_offset += bb;
1164 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL);
1165 if (daddr > 0)
1166 daddr = dbtofsb(fs, daddr);
1167 if (error)
1168 panic("lfs_updatemeta: ufs_bmaparray %d", error);
1169 ip = VTOI(vp);
1170 switch (num) {
1171 case 0:
1172 ooff = ip->i_ffs_db[lbn];
1173 #ifdef DEBUG
1174 if (ooff == 0) {
1175 printf("lfs_updatemeta[1]: warning: writing "
1176 "ino %d lbn %d at 0x%x, was 0x0\n",
1177 ip->i_number, lbn, off);
1178 }
1179 #endif
1180 if (ooff == UNWRITTEN)
1181 ip->i_ffs_blocks += bb;
1182 ip->i_ffs_db[lbn] = off;
1183 break;
1184 case 1:
1185 ooff = ip->i_ffs_ib[a[0].in_off];
1186 #ifdef DEBUG
1187 if (ooff == 0) {
1188 printf("lfs_updatemeta[2]: warning: writing "
1189 "ino %d lbn %d at 0x%x, was 0x0\n",
1190 ip->i_number, lbn, off);
1191 }
1192 #endif
1193 if (ooff == UNWRITTEN)
1194 ip->i_ffs_blocks += bb;
1195 ip->i_ffs_ib[a[0].in_off] = off;
1196 break;
1197 default:
1198 ap = &a[num - 1];
1199 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
1200 panic("lfs_updatemeta: bread bno %d",
1201 ap->in_lbn);
1202
1203 ooff = ((ufs_daddr_t *)bp->b_data)[ap->in_off];
1204 #if DEBUG
1205 if (ooff == 0) {
1206 printf("lfs_updatemeta[3]: warning: writing "
1207 "ino %d lbn %d at 0x%x, was 0x0\n",
1208 ip->i_number, lbn, off);
1209 }
1210 #endif
1211 if (ooff == UNWRITTEN)
1212 ip->i_ffs_blocks += bb;
1213 ((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
1214 (void) VOP_BWRITE(bp);
1215 }
1216 #ifdef DEBUG
1217 if (daddr >= fs->lfs_lastpseg && daddr <= off) {
1218 printf("lfs_updatemeta: ino %d, lbn %d, addr = %x "
1219 "in same pseg\n", VTOI(sp->vp)->i_number,
1220 (*sp->start_bpp)->b_lblkno, daddr);
1221 }
1222 #endif
1223 /* Update segment usage information. */
1224 if (daddr > 0) {
1225 LFS_SEGENTRY(sup, fs, dtosn(fs, daddr), bp);
1226 #ifdef DIAGNOSTIC
1227 if (sup->su_nbytes < (*sp->start_bpp)->b_bcount) {
1228 /* XXX -- Change to a panic. */
1229 printf("lfs_updatemeta: negative bytes "
1230 "(segment %d short by %ld)\n",
1231 dtosn(fs, daddr),
1232 (*sp->start_bpp)->b_bcount -
1233 sup->su_nbytes);
1234 printf("lfs_updatemeta: ino %d, lbn %d, "
1235 "addr = 0x%x\n", VTOI(sp->vp)->i_number,
1236 (*sp->start_bpp)->b_lblkno, daddr);
1237 panic("lfs_updatemeta: negative bytes");
1238 sup->su_nbytes = (*sp->start_bpp)->b_bcount;
1239 }
1240 #endif
1241 #ifdef DEBUG_SU_NBYTES
1242 printf("seg %d -= %ld for ino %d lbn %d db 0x%x\n",
1243 dtosn(fs, daddr), (*sp->start_bpp)->b_bcount,
1244 VTOI(sp->vp)->i_number,
1245 (*sp->start_bpp)->b_lblkno, daddr);
1246 #endif
1247 sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
1248 if (!(bp->b_flags & B_GATHERED))
1249 fs->lfs_flags |= LFS_IFDIRTY;
1250 error = LFS_BWRITE_LOG(bp); /* Ifile */
1251 }
1252 }
1253 }
1254
1255 /*
1256 * Start a new segment.
1257 */
1258 int
1259 lfs_initseg(struct lfs *fs)
1260 {
1261 struct segment *sp;
1262 SEGUSE *sup;
1263 SEGSUM *ssp;
1264 struct buf *bp, *sbp;
1265 int repeat;
1266
1267 sp = fs->lfs_sp;
1268
1269 repeat = 0;
1270 /* Advance to the next segment. */
1271 if (!LFS_PARTIAL_FITS(fs)) {
1272 /* lfs_avail eats the remaining space */
1273 fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset -
1274 fs->lfs_curseg);
1275 /* Wake up any cleaning procs waiting on this file system. */
1276 wakeup(&lfs_allclean_wakeup);
1277 wakeup(&fs->lfs_nextseg);
1278 lfs_newseg(fs);
1279 repeat = 1;
1280 fs->lfs_offset = fs->lfs_curseg;
1281 sp->seg_number = dtosn(fs, fs->lfs_curseg);
1282 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg);
1283 /*
1284 * If the segment contains a superblock, update the offset
1285 * and summary address to skip over it.
1286 */
1287 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1288 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1289 fs->lfs_offset += btofsb(fs, LFS_SBPAD);
1290 sp->seg_bytes_left -= LFS_SBPAD;
1291 }
1292 brelse(bp);
1293 /* Segment zero could also contain the labelpad */
1294 if (fs->lfs_version > 1 && sp->seg_number == 0 &&
1295 fs->lfs_start < btofsb(fs, LFS_LABELPAD)) {
1296 fs->lfs_offset += btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
1297 sp->seg_bytes_left -= LFS_LABELPAD - fsbtob(fs, fs->lfs_start);
1298 }
1299 } else {
1300 sp->seg_number = dtosn(fs, fs->lfs_curseg);
1301 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg -
1302 (fs->lfs_offset - fs->lfs_curseg));
1303 }
1304 fs->lfs_lastpseg = fs->lfs_offset;
1305
1306 sp->fs = fs;
1307 sp->ibp = NULL;
1308 sp->idp = NULL;
1309 sp->ninodes = 0;
1310
1311 /* Get a new buffer for SEGSUM and enter it into the buffer list. */
1312 sp->cbpp = sp->bpp;
1313 #ifdef LFS_MALLOC_SUMMARY
1314 sbp = *sp->cbpp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
1315 fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize);
1316 sp->segsum = (*sp->cbpp)->b_data;
1317 #else
1318 sbp = *sp->cbpp = getblk(VTOI(fs->lfs_ivnode)->i_devvp,
1319 fsbtodb(fs, fs->lfs_offset), NBPG, 0, 0);
1320 memset(sbp->b_data, 0x5a, NBPG);
1321 sp->segsum = (*sp->cbpp)->b_data + NBPG - fs->lfs_sumsize;
1322 #endif
1323 bzero(sp->segsum, fs->lfs_sumsize);
1324 sp->start_bpp = ++sp->cbpp;
1325 fs->lfs_offset += btofsb(fs, fs->lfs_sumsize);
1326
1327 /* Set point to SEGSUM, initialize it. */
1328 ssp = sp->segsum;
1329 ssp->ss_next = fs->lfs_nextseg;
1330 ssp->ss_nfinfo = ssp->ss_ninos = 0;
1331 ssp->ss_magic = SS_MAGIC;
1332
1333 /* Set pointer to first FINFO, initialize it. */
1334 sp->fip = (struct finfo *)((caddr_t)sp->segsum + SEGSUM_SIZE(fs));
1335 sp->fip->fi_nblocks = 0;
1336 sp->start_lbp = &sp->fip->fi_blocks[0];
1337 sp->fip->fi_lastlength = 0;
1338
1339 sp->seg_bytes_left -= fs->lfs_sumsize;
1340 sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs);
1341
1342 #ifndef LFS_MALLOC_SUMMARY
1343 LFS_LOCK_BUF(sbp);
1344 brelse(sbp);
1345 #endif
1346 return (repeat);
1347 }
1348
1349 /*
1350 * Return the next segment to write.
1351 */
1352 void
1353 lfs_newseg(struct lfs *fs)
1354 {
1355 CLEANERINFO *cip;
1356 SEGUSE *sup;
1357 struct buf *bp;
1358 int curseg, isdirty, sn;
1359
1360 LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
1361 #ifdef DEBUG_SU_NBYTES
1362 printf("lfs_newseg: seg %d := 0 in newseg\n", /* XXXDEBUG */
1363 dtosn(fs, fs->lfs_nextseg)); /* XXXDEBUG */
1364 #endif
1365 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1366 sup->su_nbytes = 0;
1367 sup->su_nsums = 0;
1368 sup->su_ninos = 0;
1369 (void) LFS_BWRITE_LOG(bp); /* Ifile */
1370
1371 LFS_CLEANERINFO(cip, fs, bp);
1372 --cip->clean;
1373 ++cip->dirty;
1374 fs->lfs_nclean = cip->clean;
1375 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1376
1377 fs->lfs_lastseg = fs->lfs_curseg;
1378 fs->lfs_curseg = fs->lfs_nextseg;
1379 for (sn = curseg = dtosn(fs, fs->lfs_curseg) + fs->lfs_interleave;;) {
1380 sn = (sn + 1) % fs->lfs_nseg;
1381 if (sn == curseg)
1382 panic("lfs_nextseg: no clean segments");
1383 LFS_SEGENTRY(sup, fs, sn, bp);
1384 isdirty = sup->su_flags & SEGUSE_DIRTY;
1385 brelse(bp);
1386 if (!isdirty)
1387 break;
1388 }
1389
1390 ++fs->lfs_nactive;
1391 fs->lfs_nextseg = sntod(fs, sn);
1392 if (lfs_dostats) {
1393 ++lfs_stats.segsused;
1394 }
1395 }
1396
1397 static struct buf **
1398 lookahead_pagemove(struct buf **bpp, int nblocks, size_t *size)
1399 {
1400 size_t maxsize;
1401 #ifndef LFS_NO_PAGEMOVE
1402 struct buf *bp;
1403 #endif
1404
1405 maxsize = *size;
1406 *size = 0;
1407 #ifdef LFS_NO_PAGEMOVE
1408 return bpp;
1409 #else
1410 while((bp = *bpp) != NULL && *size < maxsize && nblocks--) {
1411 if(bp->b_flags & B_CALL)
1412 return bpp;
1413 if(bp->b_bcount % NBPG)
1414 return bpp;
1415 *size += bp->b_bcount;
1416 ++bpp;
1417 }
1418 return NULL;
1419 #endif
1420 }
1421
1422 #define BQUEUES 4 /* XXX */
1423 #define BQ_EMPTY 3 /* XXX */
1424 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
1425
1426 #define BUFHASH(dvp, lbn) \
1427 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
1428 extern LIST_HEAD(bufhashhdr, buf) invalhash;
1429 /*
1430 * Insq/Remq for the buffer hash lists.
1431 */
1432 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
1433 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
1434
1435 static struct buf *
1436 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, int n)
1437 {
1438 struct lfs_cluster *cl;
1439 struct buf **bpp, *bp;
1440 int s;
1441
1442 cl = (struct lfs_cluster *)malloc(sizeof(*cl), M_SEGMENT, M_WAITOK);
1443 bpp = (struct buf **)malloc(n*sizeof(*bpp), M_SEGMENT, M_WAITOK);
1444 memset(cl,0,sizeof(*cl));
1445 cl->fs = fs;
1446 cl->bpp = bpp;
1447 cl->bufcount = 0;
1448 cl->bufsize = 0;
1449
1450 /* Get an empty buffer header, or maybe one with something on it */
1451 s = splbio();
1452 if((bp = bufqueues[BQ_EMPTY].tqh_first) != NULL) {
1453 bremfree(bp);
1454 /* clear out various other fields */
1455 bp->b_flags = B_BUSY;
1456 bp->b_dev = NODEV;
1457 bp->b_blkno = bp->b_lblkno = 0;
1458 bp->b_error = 0;
1459 bp->b_resid = 0;
1460 bp->b_bcount = 0;
1461
1462 /* nuke any credentials we were holding */
1463 /* XXXXXX */
1464
1465 bremhash(bp);
1466
1467 /* disassociate us from our vnode, if we had one... */
1468 if (bp->b_vp)
1469 brelvp(bp);
1470 }
1471 splx(s);
1472 while (!bp)
1473 bp = getnewbuf(0, 0);
1474 s = splbio();
1475 bgetvp(vp, bp);
1476 binshash(bp,&invalhash);
1477 splx(s);
1478 bp->b_bcount = 0;
1479 bp->b_blkno = bp->b_lblkno = addr;
1480
1481 bp->b_flags |= B_CALL;
1482 bp->b_iodone = lfs_cluster_callback;
1483 cl->saveaddr = bp->b_saveaddr; /* XXX is this ever used? */
1484 bp->b_saveaddr = (caddr_t)cl;
1485
1486 return bp;
1487 }
1488
1489 int
1490 lfs_writeseg(struct lfs *fs, struct segment *sp)
1491 {
1492 struct buf **bpp, *bp, *cbp, *newbp, **pmlastbpp;
1493 SEGUSE *sup;
1494 SEGSUM *ssp;
1495 dev_t i_dev;
1496 char *datap, *dp;
1497 int do_again, i, nblocks, s;
1498 size_t el_size;
1499 struct lfs_cluster *cl;
1500 int (*strategy)(void *);
1501 struct vop_strategy_args vop_strategy_a;
1502 u_short ninos;
1503 struct vnode *devvp;
1504 char *p;
1505 struct vnode *vp;
1506 struct inode *ip;
1507 size_t pmsize;
1508 int use_pagemove;
1509 daddr_t pseg_daddr;
1510 daddr_t *daddrp;
1511 int changed;
1512 #if defined(DEBUG) && defined(LFS_PROPELLER)
1513 static int propeller;
1514 char propstring[4] = "-\\|/";
1515
1516 printf("%c\b",propstring[propeller++]);
1517 if (propeller == 4)
1518 propeller = 0;
1519 #endif
1520 pseg_daddr = (*(sp->bpp))->b_blkno;
1521
1522 /*
1523 * If there are no buffers other than the segment summary to write
1524 * and it is not a checkpoint, don't do anything. On a checkpoint,
1525 * even if there aren't any buffers, you need to write the superblock.
1526 */
1527 if ((nblocks = sp->cbpp - sp->bpp) == 1)
1528 return (0);
1529
1530 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1531 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1532
1533 /* Update the segment usage information. */
1534 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1535
1536 /* Loop through all blocks, except the segment summary. */
1537 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
1538 if ((*bpp)->b_vp != devvp) {
1539 sup->su_nbytes += (*bpp)->b_bcount;
1540 #ifdef DEBUG_SU_NBYTES
1541 printf("seg %d += %ld for ino %d lbn %d db 0x%x\n",
1542 sp->seg_number, (*bpp)->b_bcount,
1543 VTOI((*bpp)->b_vp)->i_number,
1544 (*bpp)->b_lblkno, (*bpp)->b_blkno);
1545 #endif
1546 }
1547 }
1548
1549 ssp = (SEGSUM *)sp->segsum;
1550
1551 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1552 #ifdef DEBUG_SU_NBYTES
1553 printf("seg %d += %d for %d inodes\n", /* XXXDEBUG */
1554 sp->seg_number, ssp->ss_ninos * DINODE_SIZE,
1555 ssp->ss_ninos);
1556 #endif
1557 sup->su_nbytes += ssp->ss_ninos * DINODE_SIZE;
1558 /* sup->su_nbytes += fs->lfs_sumsize; */
1559 if (fs->lfs_version == 1)
1560 sup->su_olastmod = time.tv_sec;
1561 else
1562 sup->su_lastmod = time.tv_sec;
1563 sup->su_ninos += ninos;
1564 ++sup->su_nsums;
1565 fs->lfs_dmeta += (btofsb(fs, fs->lfs_sumsize) + btofsb(fs, ninos *
1566 fs->lfs_ibsize));
1567 fs->lfs_avail -= btofsb(fs, fs->lfs_sumsize);
1568
1569 do_again = !(bp->b_flags & B_GATHERED);
1570 (void)LFS_BWRITE_LOG(bp); /* Ifile */
1571 /*
1572 * Mark blocks B_BUSY, to prevent then from being changed between
1573 * the checksum computation and the actual write.
1574 *
1575 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
1576 * there are any, replace them with copies that have UNASSIGNED
1577 * instead.
1578 */
1579 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1580 ++bpp;
1581 if ((*bpp)->b_flags & B_CALL)
1582 continue;
1583 bp = *bpp;
1584 again:
1585 s = splbio();
1586 if (bp->b_flags & B_BUSY) {
1587 #ifdef DEBUG
1588 printf("lfs_writeseg: avoiding potential data "
1589 "summary corruption for ino %d, lbn %d\n",
1590 VTOI(bp->b_vp)->i_number, bp->b_lblkno);
1591 #endif
1592 bp->b_flags |= B_WANTED;
1593 tsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0);
1594 splx(s);
1595 goto again;
1596 }
1597 bp->b_flags |= B_BUSY;
1598 splx(s);
1599 /* Check and replace indirect block UNWRITTEN bogosity */
1600 if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
1601 VTOI(bp->b_vp)->i_ffs_blocks !=
1602 VTOI(bp->b_vp)->i_lfs_effnblks) {
1603 #ifdef DEBUG_LFS
1604 printf("lfs_writeseg: cleansing ino %d (%d != %d)\n",
1605 VTOI(bp->b_vp)->i_number,
1606 VTOI(bp->b_vp)->i_lfs_effnblks,
1607 VTOI(bp->b_vp)->i_ffs_blocks);
1608 #endif
1609 /* Make a copy we'll make changes to */
1610 newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
1611 bp->b_bcount);
1612 newbp->b_blkno = bp->b_blkno;
1613 memcpy(newbp->b_data, bp->b_data,
1614 newbp->b_bcount);
1615 *bpp = newbp;
1616
1617 changed = 0;
1618 for (daddrp = (daddr_t *)(newbp->b_data);
1619 daddrp < (daddr_t *)(newbp->b_data +
1620 newbp->b_bcount); daddrp++) {
1621 if (*daddrp == UNWRITTEN) {
1622 ++changed;
1623 #ifdef DEBUG_LFS
1624 printf("lfs_writeseg: replacing UNWRITTEN\n");
1625 #endif
1626 *daddrp = 0;
1627 }
1628 }
1629 /*
1630 * Get rid of the old buffer. Don't mark it clean,
1631 * though, if it still has dirty data on it.
1632 */
1633 if (changed) {
1634 bp->b_flags &= ~(B_ERROR | B_GATHERED);
1635 if (bp->b_flags & B_CALL) {
1636 lfs_freebuf(bp);
1637 bp = NULL;
1638 } else {
1639 /* Still on free list, leave it there */
1640 s = splbio();
1641 bp->b_flags &= ~B_BUSY;
1642 if (bp->b_flags & B_WANTED)
1643 wakeup(bp);
1644 splx(s);
1645 /*
1646 * We have to re-decrement lfs_avail
1647 * since this block is going to come
1648 * back around to us in the next
1649 * segment.
1650 */
1651 fs->lfs_avail -= btofsb(fs, bp->b_bcount);
1652 }
1653 } else {
1654 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
1655 B_GATHERED);
1656 if (bp->b_flags & B_CALL) {
1657 lfs_freebuf(bp);
1658 bp = NULL;
1659 } else {
1660 bremfree(bp);
1661 bp->b_flags |= B_DONE;
1662 reassignbuf(bp, bp->b_vp);
1663 LFS_UNLOCK_BUF(bp);
1664 brelse(bp);
1665 }
1666 }
1667
1668 }
1669 }
1670 /*
1671 * Compute checksum across data and then across summary; the first
1672 * block (the summary block) is skipped. Set the create time here
1673 * so that it's guaranteed to be later than the inode mod times.
1674 *
1675 * XXX
1676 * Fix this to do it inline, instead of malloc/copy.
1677 */
1678 if (fs->lfs_version == 1)
1679 el_size = sizeof(u_long);
1680 else
1681 el_size = sizeof(u_int32_t);
1682 datap = dp = malloc(nblocks * el_size, M_SEGMENT, M_WAITOK);
1683 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1684 if (((*++bpp)->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1685 if (copyin((*bpp)->b_saveaddr, dp, el_size))
1686 panic("lfs_writeseg: copyin failed [1]: "
1687 "ino %d blk %d",
1688 VTOI((*bpp)->b_vp)->i_number,
1689 (*bpp)->b_lblkno);
1690 } else
1691 memcpy(dp, (*bpp)->b_data, el_size);
1692 dp += el_size;
1693 }
1694 if (fs->lfs_version == 1)
1695 ssp->ss_ocreate = time.tv_sec;
1696 else {
1697 ssp->ss_create = time.tv_sec;
1698 ssp->ss_serial = ++fs->lfs_serial;
1699 ssp->ss_ident = fs->lfs_ident;
1700 }
1701 #ifndef LFS_MALLOC_SUMMARY
1702 /* Set the summary block busy too */
1703 (*(sp->bpp))->b_flags |= B_BUSY;
1704 #endif
1705 ssp->ss_datasum = cksum(datap, (nblocks - 1) * el_size);
1706 ssp->ss_sumsum =
1707 cksum(&ssp->ss_datasum, fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
1708 free(datap, M_SEGMENT);
1709 datap = dp = NULL;
1710 #ifdef DIAGNOSTIC
1711 if (fs->lfs_bfree < btofsb(fs, ninos * fs->lfs_ibsize) + btofsb(fs, fs->lfs_sumsize))
1712 panic("lfs_writeseg: No diskspace for summary");
1713 #endif
1714 fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) +
1715 btofsb(fs, fs->lfs_sumsize));
1716
1717 strategy = devvp->v_op[VOFFSET(vop_strategy)];
1718
1719 /*
1720 * When we simply write the blocks we lose a rotation for every block
1721 * written. To avoid this problem, we use pagemove to cluster
1722 * the buffers into a chunk and write the chunk. CHUNKSIZE is the
1723 * largest size I/O devices can handle.
1724 *
1725 * XXX - right now MAXPHYS is only 64k; could it be larger?
1726 */
1727
1728 #define CHUNKSIZE MAXPHYS
1729
1730 if (devvp == NULL)
1731 panic("devvp is NULL");
1732 for (bpp = sp->bpp, i = nblocks; i;) {
1733 cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
1734 cl = (struct lfs_cluster *)cbp->b_saveaddr;
1735
1736 cbp->b_dev = i_dev;
1737 cbp->b_flags |= B_ASYNC | B_BUSY;
1738 cbp->b_bcount = 0;
1739
1740 /*
1741 * Find out if we can use pagemove to build the cluster,
1742 * or if we are stuck using malloc/copy. If this is the
1743 * first cluster, set the shift flag (see below).
1744 */
1745 pmsize = CHUNKSIZE;
1746 use_pagemove = 0;
1747 if(bpp == sp->bpp) {
1748 /* Summary blocks have to get special treatment */
1749 pmlastbpp = lookahead_pagemove(bpp + 1, i - 1, &pmsize);
1750 if(pmsize >= CHUNKSIZE - fs->lfs_sumsize ||
1751 pmlastbpp == NULL) {
1752 use_pagemove = 1;
1753 cl->flags |= LFS_CL_SHIFT;
1754 } else {
1755 /*
1756 * If we're not using pagemove, we have
1757 * to copy the summary down to the bottom
1758 * end of the block.
1759 */
1760 #ifndef LFS_MALLOC_SUMMARY
1761 memcpy((*bpp)->b_data, (*bpp)->b_data +
1762 NBPG - fs->lfs_sumsize,
1763 fs->lfs_sumsize);
1764 #endif /* LFS_MALLOC_SUMMARY */
1765 }
1766 } else {
1767 pmlastbpp = lookahead_pagemove(bpp, i, &pmsize);
1768 if(pmsize >= CHUNKSIZE || pmlastbpp == NULL) {
1769 use_pagemove = 1;
1770 }
1771 }
1772 if(use_pagemove == 0) {
1773 cl->flags |= LFS_CL_MALLOC;
1774 cl->olddata = cbp->b_data;
1775 cbp->b_data = malloc(CHUNKSIZE, M_SEGMENT, M_WAITOK);
1776 }
1777 #if defined(DEBUG) && defined(DIAGNOSTIC)
1778 if(dtosn(fs, dbtofsb(fs, (*bpp)->b_blkno + btodb((*bpp)->b_bcount - 1))) !=
1779 dtosn(fs, dbtofsb(fs, cbp->b_blkno))) {
1780 printf("block at %x (%d), cbp at %x (%d)\n",
1781 (*bpp)->b_blkno, dtosn(fs, dbtofsb(fs, (*bpp)->b_blkno)),
1782 cbp->b_blkno, dtosn(fs, dbtofsb(fs, cbp->b_blkno)));
1783 panic("lfs_writeseg: Segment overwrite");
1784 }
1785 #endif
1786
1787 /*
1788 * Construct the cluster.
1789 */
1790 s = splbio();
1791 while (fs->lfs_iocount >= LFS_THROTTLE) {
1792 #ifdef DEBUG_LFS
1793 printf("[%d]", fs->lfs_iocount);
1794 #endif
1795 tsleep(&fs->lfs_iocount, PRIBIO+1, "lfs_throttle", 0);
1796 }
1797 ++fs->lfs_iocount;
1798
1799 for (p = cbp->b_data; i && cbp->b_bcount < CHUNKSIZE; i--) {
1800 bp = *bpp;
1801
1802 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1803 break;
1804
1805 /*
1806 * Fake buffers from the cleaner are marked as B_INVAL.
1807 * We need to copy the data from user space rather than
1808 * from the buffer indicated.
1809 * XXX == what do I do on an error?
1810 */
1811 if ((bp->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1812 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1813 panic("lfs_writeseg: copyin failed [2]");
1814 } else if (use_pagemove) {
1815 pagemove(bp->b_data, p, bp->b_bcount);
1816 cbp->b_bufsize += bp->b_bcount;
1817 bp->b_bufsize -= bp->b_bcount;
1818 } else {
1819 bcopy(bp->b_data, p, bp->b_bcount);
1820 /* printf("copy in %p\n", bp->b_data); */
1821 }
1822
1823 /*
1824 * XXX If we are *not* shifting, the summary
1825 * block is only fs->lfs_sumsize. Otherwise,
1826 * it is NBPG but shifted.
1827 */
1828 if(bpp == sp->bpp && !(cl->flags & LFS_CL_SHIFT)) {
1829 p += fs->lfs_sumsize;
1830 cbp->b_bcount += fs->lfs_sumsize;
1831 cl->bufsize += fs->lfs_sumsize;
1832 } else {
1833 p += bp->b_bcount;
1834 cbp->b_bcount += bp->b_bcount;
1835 cl->bufsize += bp->b_bcount;
1836 }
1837 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_DONE);
1838 cl->bpp[cl->bufcount++] = bp;
1839 vp = bp->b_vp;
1840 ++vp->v_numoutput;
1841
1842 /*
1843 * Although it cannot be freed for reuse before the
1844 * cluster is written to disk, this buffer does not
1845 * need to be held busy. Therefore we unbusy it,
1846 * while leaving it on the locked list. It will
1847 * be freed or requeued by the callback depending
1848 * on whether it has had B_DELWRI set again in the
1849 * meantime.
1850 *
1851 * If we are using pagemove, we have to hold the block
1852 * busy to prevent its contents from changing before
1853 * it hits the disk, and invalidating the checksum.
1854 */
1855 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
1856 #ifdef LFS_MNOBUSY
1857 if (cl->flags & LFS_CL_MALLOC) {
1858 if (!(bp->b_flags & B_CALL))
1859 brelse(bp); /* Still B_LOCKED */
1860 }
1861 #endif
1862 bpp++;
1863
1864 /*
1865 * If this is the last block for this vnode, but
1866 * there are other blocks on its dirty list,
1867 * set IN_MODIFIED/IN_CLEANING depending on what
1868 * sort of block. Only do this for our mount point,
1869 * not for, e.g., inode blocks that are attached to
1870 * the devvp.
1871 * XXX KS - Shouldn't we set *both* if both types
1872 * of blocks are present (traverse the dirty list?)
1873 */
1874 if ((i == 1 ||
1875 (i > 1 && vp && *bpp && (*bpp)->b_vp != vp)) &&
1876 (bp = vp->v_dirtyblkhd.lh_first) != NULL &&
1877 vp->v_mount == fs->lfs_ivnode->v_mount)
1878 {
1879 ip = VTOI(vp);
1880 #ifdef DEBUG_LFS
1881 printf("lfs_writeseg: marking ino %d\n",
1882 ip->i_number);
1883 #endif
1884 if (bp->b_flags & B_CALL)
1885 LFS_SET_UINO(ip, IN_CLEANING);
1886 else
1887 LFS_SET_UINO(ip, IN_MODIFIED);
1888 }
1889 wakeup(vp);
1890 }
1891 ++cbp->b_vp->v_numoutput;
1892 splx(s);
1893 /*
1894 * In order to include the summary in a clustered block,
1895 * it may be necessary to shift the block forward (since
1896 * summary blocks are in generay smaller than can be
1897 * addressed by pagemove(). After the write, the block
1898 * will be corrected before disassembly.
1899 */
1900 if(cl->flags & LFS_CL_SHIFT) {
1901 cbp->b_data += (NBPG - fs->lfs_sumsize);
1902 cbp->b_bcount -= (NBPG - fs->lfs_sumsize);
1903 }
1904 vop_strategy_a.a_desc = VDESC(vop_strategy);
1905 vop_strategy_a.a_bp = cbp;
1906 (strategy)(&vop_strategy_a);
1907 }
1908
1909 if (lfs_dostats) {
1910 ++lfs_stats.psegwrites;
1911 lfs_stats.blocktot += nblocks - 1;
1912 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
1913 ++lfs_stats.psyncwrites;
1914 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
1915 ++lfs_stats.pcleanwrites;
1916 lfs_stats.cleanblocks += nblocks - 1;
1917 }
1918 }
1919 return (lfs_initseg(fs) || do_again);
1920 }
1921
1922 void
1923 lfs_writesuper(struct lfs *fs, daddr_t daddr)
1924 {
1925 struct buf *bp;
1926 dev_t i_dev;
1927 int (*strategy)(void *);
1928 int s;
1929 struct vop_strategy_args vop_strategy_a;
1930
1931 /*
1932 * If we can write one superblock while another is in
1933 * progress, we risk not having a complete checkpoint if we crash.
1934 * So, block here if a superblock write is in progress.
1935 */
1936 s = splbio();
1937 while (fs->lfs_sbactive) {
1938 tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
1939 }
1940 fs->lfs_sbactive = daddr;
1941 splx(s);
1942 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1943 strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)];
1944
1945 /* Set timestamp of this version of the superblock */
1946 if (fs->lfs_version == 1)
1947 fs->lfs_otstamp = time.tv_sec;
1948 fs->lfs_tstamp = time.tv_sec;
1949
1950 /* Checksum the superblock and copy it into a buffer. */
1951 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
1952 bp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp, fsbtodb(fs, daddr), LFS_SBPAD);
1953 *(struct dlfs *)bp->b_data = fs->lfs_dlfs;
1954
1955 bp->b_dev = i_dev;
1956 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
1957 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
1958 bp->b_iodone = lfs_supercallback;
1959 /* XXX KS - same nasty hack as above */
1960 bp->b_saveaddr = (caddr_t)fs;
1961
1962 vop_strategy_a.a_desc = VDESC(vop_strategy);
1963 vop_strategy_a.a_bp = bp;
1964 s = splbio();
1965 ++bp->b_vp->v_numoutput;
1966 ++fs->lfs_iocount;
1967 splx(s);
1968 (strategy)(&vop_strategy_a);
1969 }
1970
1971 /*
1972 * Logical block number match routines used when traversing the dirty block
1973 * chain.
1974 */
1975 int
1976 lfs_match_fake(struct lfs *fs, struct buf *bp)
1977 {
1978 return (bp->b_flags & B_CALL);
1979 }
1980
1981 int
1982 lfs_match_data(struct lfs *fs, struct buf *bp)
1983 {
1984 return (bp->b_lblkno >= 0);
1985 }
1986
1987 int
1988 lfs_match_indir(struct lfs *fs, struct buf *bp)
1989 {
1990 int lbn;
1991
1992 lbn = bp->b_lblkno;
1993 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
1994 }
1995
1996 int
1997 lfs_match_dindir(struct lfs *fs, struct buf *bp)
1998 {
1999 int lbn;
2000
2001 lbn = bp->b_lblkno;
2002 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
2003 }
2004
2005 int
2006 lfs_match_tindir(struct lfs *fs, struct buf *bp)
2007 {
2008 int lbn;
2009
2010 lbn = bp->b_lblkno;
2011 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
2012 }
2013
2014 /*
2015 * XXX - The only buffers that are going to hit these functions are the
2016 * segment write blocks, or the segment summaries, or the superblocks.
2017 *
2018 * All of the above are created by lfs_newbuf, and so do not need to be
2019 * released via brelse.
2020 */
2021 void
2022 lfs_callback(struct buf *bp)
2023 {
2024 /* struct lfs *fs; */
2025 /* fs = (struct lfs *)bp->b_saveaddr; */
2026 lfs_freebuf(bp);
2027 }
2028
2029 void
2030 lfs_supercallback(struct buf *bp)
2031 {
2032 struct lfs *fs;
2033
2034 fs = (struct lfs *)bp->b_saveaddr;
2035 fs->lfs_sbactive = 0;
2036 wakeup(&fs->lfs_sbactive);
2037 if (--fs->lfs_iocount < LFS_THROTTLE)
2038 wakeup(&fs->lfs_iocount);
2039 lfs_freebuf(bp);
2040 }
2041
2042 static void
2043 lfs_cluster_callback(struct buf *bp)
2044 {
2045 struct lfs_cluster *cl;
2046 struct lfs *fs;
2047 struct buf *tbp;
2048 struct vnode *vp;
2049 int error=0;
2050 char *cp;
2051 extern int locked_queue_count;
2052 extern long locked_queue_bytes;
2053
2054 if(bp->b_flags & B_ERROR)
2055 error = bp->b_error;
2056
2057 cl = (struct lfs_cluster *)bp->b_saveaddr;
2058 fs = cl->fs;
2059 bp->b_saveaddr = cl->saveaddr;
2060
2061 /* If shifted, shift back now */
2062 if(cl->flags & LFS_CL_SHIFT) {
2063 bp->b_data -= (NBPG - fs->lfs_sumsize);
2064 bp->b_bcount += (NBPG - fs->lfs_sumsize);
2065 }
2066
2067 cp = (char *)bp->b_data + cl->bufsize;
2068 /* Put the pages back, and release the buffer */
2069 while(cl->bufcount--) {
2070 tbp = cl->bpp[cl->bufcount];
2071 if(!(cl->flags & LFS_CL_MALLOC)) {
2072 cp -= tbp->b_bcount;
2073 printf("pm(%p,%p,%lx)",cp,tbp->b_data,tbp->b_bcount);
2074 pagemove(cp, tbp->b_data, tbp->b_bcount);
2075 bp->b_bufsize -= tbp->b_bcount;
2076 tbp->b_bufsize += tbp->b_bcount;
2077 }
2078 if(error) {
2079 tbp->b_flags |= B_ERROR;
2080 tbp->b_error = error;
2081 }
2082
2083 /*
2084 * We're done with tbp. If it has not been re-dirtied since
2085 * the cluster was written, free it. Otherwise, keep it on
2086 * the locked list to be written again.
2087 */
2088 if ((tbp->b_flags & (B_LOCKED | B_DELWRI)) == B_LOCKED)
2089 LFS_UNLOCK_BUF(tbp);
2090 tbp->b_flags &= ~B_GATHERED;
2091
2092 LFS_BCLEAN_LOG(fs, tbp);
2093
2094 vp = tbp->b_vp;
2095 /* Segment summary for a shifted cluster */
2096 if(!cl->bufcount && (cl->flags & LFS_CL_SHIFT))
2097 tbp->b_flags |= B_INVAL;
2098 if(!(tbp->b_flags & B_CALL)) {
2099 bremfree(tbp);
2100 if(vp)
2101 reassignbuf(tbp, vp);
2102 tbp->b_flags |= B_ASYNC; /* for biodone */
2103 }
2104 #ifdef DIAGNOSTIC
2105 if (tbp->b_flags & B_DONE) {
2106 printf("blk %d biodone already (flags %lx)\n",
2107 cl->bufcount, (long)tbp->b_flags);
2108 }
2109 #endif
2110 if (tbp->b_flags & (B_BUSY | B_CALL)) {
2111 biodone(tbp);
2112 }
2113 }
2114
2115 /* Fix up the cluster buffer, and release it */
2116 if(!(cl->flags & LFS_CL_MALLOC) && bp->b_bufsize) {
2117 printf("PM(%p,%p,%lx)", (char *)bp->b_data + bp->b_bcount,
2118 (char *)bp->b_data, bp->b_bufsize);
2119 pagemove((char *)bp->b_data + bp->b_bcount,
2120 (char *)bp->b_data, bp->b_bufsize);
2121 }
2122 if(cl->flags & LFS_CL_MALLOC) {
2123 free(bp->b_data, M_SEGMENT);
2124 bp->b_data = cl->olddata;
2125 }
2126 bp->b_bcount = 0;
2127 bp->b_iodone = NULL;
2128 bp->b_flags &= ~B_DELWRI;
2129 bp->b_flags |= B_DONE;
2130 reassignbuf(bp, bp->b_vp);
2131 brelse(bp);
2132
2133 free(cl->bpp, M_SEGMENT);
2134 free(cl, M_SEGMENT);
2135
2136 #ifdef DIAGNOSTIC
2137 if (fs->lfs_iocount == 0)
2138 panic("lfs_callback: zero iocount\n");
2139 #endif
2140 if (--fs->lfs_iocount < LFS_THROTTLE)
2141 wakeup(&fs->lfs_iocount);
2142 #if 0
2143 if (fs->lfs_iocount == 0) {
2144 /*
2145 * XXX - do we really want to do this in a callback?
2146 *
2147 * Vinvalbuf can move locked buffers off the locked queue
2148 * and we have no way of knowing about this. So, after
2149 * doing a big write, we recalculate how many buffers are
2150 * really still left on the locked queue.
2151 */
2152 lfs_countlocked(&locked_queue_count, &locked_queue_bytes, "lfs_cluster_callback");
2153 wakeup(&locked_queue_count);
2154 }
2155 #endif
2156 }
2157
2158 /*
2159 * Shellsort (diminishing increment sort) from Data Structures and
2160 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
2161 * see also Knuth Vol. 3, page 84. The increments are selected from
2162 * formula (8), page 95. Roughly O(N^3/2).
2163 */
2164 /*
2165 * This is our own private copy of shellsort because we want to sort
2166 * two parallel arrays (the array of buffer pointers and the array of
2167 * logical block numbers) simultaneously. Note that we cast the array
2168 * of logical block numbers to a unsigned in this routine so that the
2169 * negative block numbers (meta data blocks) sort AFTER the data blocks.
2170 */
2171
2172 void
2173 lfs_shellsort(struct buf **bp_array, ufs_daddr_t *lb_array, int nmemb)
2174 {
2175 static int __rsshell_increments[] = { 4, 1, 0 };
2176 int incr, *incrp, t1, t2;
2177 struct buf *bp_temp;
2178 u_long lb_temp;
2179
2180 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
2181 for (t1 = incr; t1 < nmemb; ++t1)
2182 for (t2 = t1 - incr; t2 >= 0;)
2183 if (lb_array[t2] > lb_array[t2 + incr]) {
2184 lb_temp = lb_array[t2];
2185 lb_array[t2] = lb_array[t2 + incr];
2186 lb_array[t2 + incr] = lb_temp;
2187 bp_temp = bp_array[t2];
2188 bp_array[t2] = bp_array[t2 + incr];
2189 bp_array[t2 + incr] = bp_temp;
2190 t2 -= incr;
2191 } else
2192 break;
2193 }
2194
2195 /*
2196 * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
2197 */
2198 int
2199 lfs_vref(struct vnode *vp)
2200 {
2201 /*
2202 * If we return 1 here during a flush, we risk vinvalbuf() not
2203 * being able to flush all of the pages from this vnode, which
2204 * will cause it to panic. So, return 0 if a flush is in progress.
2205 */
2206 if (vp->v_flag & VXLOCK) {
2207 if (IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2208 return 0;
2209 }
2210 return (1);
2211 }
2212 return (vget(vp, 0));
2213 }
2214
2215 /*
2216 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
2217 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
2218 */
2219 void
2220 lfs_vunref(struct vnode *vp)
2221 {
2222 /*
2223 * Analogous to lfs_vref, if the node is flushing, fake it.
2224 */
2225 if ((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2226 return;
2227 }
2228
2229 simple_lock(&vp->v_interlock);
2230 #ifdef DIAGNOSTIC
2231 if (vp->v_usecount <= 0) {
2232 printf("lfs_vunref: inum is %d\n", VTOI(vp)->i_number);
2233 printf("lfs_vunref: flags are 0x%lx\n", (u_long)vp->v_flag);
2234 printf("lfs_vunref: usecount = %ld\n", (long)vp->v_usecount);
2235 panic("lfs_vunref: v_usecount<0");
2236 }
2237 #endif
2238 vp->v_usecount--;
2239 if (vp->v_usecount > 0) {
2240 simple_unlock(&vp->v_interlock);
2241 return;
2242 }
2243 /*
2244 * insert at tail of LRU list
2245 */
2246 simple_lock(&vnode_free_list_slock);
2247 if (vp->v_holdcnt > 0)
2248 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2249 else
2250 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2251 simple_unlock(&vnode_free_list_slock);
2252 simple_unlock(&vp->v_interlock);
2253 }
2254
2255 /*
2256 * We use this when we have vnodes that were loaded in solely for cleaning.
2257 * There is no reason to believe that these vnodes will be referenced again
2258 * soon, since the cleaning process is unrelated to normal filesystem
2259 * activity. Putting cleaned vnodes at the tail of the list has the effect
2260 * of flushing the vnode LRU. So, put vnodes that were loaded only for
2261 * cleaning at the head of the list, instead.
2262 */
2263 void
2264 lfs_vunref_head(struct vnode *vp)
2265 {
2266 simple_lock(&vp->v_interlock);
2267 #ifdef DIAGNOSTIC
2268 if (vp->v_usecount == 0) {
2269 panic("lfs_vunref: v_usecount<0");
2270 }
2271 #endif
2272 vp->v_usecount--;
2273 if (vp->v_usecount > 0) {
2274 simple_unlock(&vp->v_interlock);
2275 return;
2276 }
2277 /*
2278 * insert at head of LRU list
2279 */
2280 simple_lock(&vnode_free_list_slock);
2281 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2282 simple_unlock(&vnode_free_list_slock);
2283 simple_unlock(&vp->v_interlock);
2284 }
2285
2286