lfs_segment.c revision 1.142 1 /* $NetBSD: lfs_segment.c,v 1.142 2003/10/25 18:26:46 christos Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.142 2003/10/25 18:26:46 christos Exp $");
71
72 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
73
74 #if defined(_KERNEL_OPT)
75 #include "opt_ddb.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/namei.h>
81 #include <sys/kernel.h>
82 #include <sys/resourcevar.h>
83 #include <sys/file.h>
84 #include <sys/stat.h>
85 #include <sys/buf.h>
86 #include <sys/proc.h>
87 #include <sys/vnode.h>
88 #include <sys/mount.h>
89
90 #include <miscfs/specfs/specdev.h>
91 #include <miscfs/fifofs/fifo.h>
92
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/dir.h>
95 #include <ufs/ufs/ufsmount.h>
96 #include <ufs/ufs/ufs_extern.h>
97
98 #include <ufs/lfs/lfs.h>
99 #include <ufs/lfs/lfs_extern.h>
100
101 #include <uvm/uvm.h>
102 #include <uvm/uvm_extern.h>
103
104 MALLOC_DEFINE(M_SEGMENT, "LFS segment", "Segment for LFS");
105
106 extern int count_lock_queue(void);
107 extern struct simplelock vnode_free_list_slock; /* XXX */
108 extern struct simplelock bqueue_slock; /* XXX */
109
110 static void lfs_generic_callback(struct buf *, void (*)(struct buf *));
111 static void lfs_super_aiodone(struct buf *);
112 static void lfs_cluster_aiodone(struct buf *);
113 static void lfs_cluster_callback(struct buf *);
114
115 /*
116 * Determine if it's OK to start a partial in this segment, or if we need
117 * to go on to a new segment.
118 */
119 #define LFS_PARTIAL_FITS(fs) \
120 ((fs)->lfs_fsbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
121 fragstofsb((fs), (fs)->lfs_frag))
122
123 int lfs_match_fake(struct lfs *, struct buf *);
124 void lfs_newseg(struct lfs *);
125 /* XXX ondisk32 */
126 void lfs_shellsort(struct buf **, int32_t *, int, int);
127 void lfs_supercallback(struct buf *);
128 void lfs_updatemeta(struct segment *);
129 void lfs_writesuper(struct lfs *, daddr_t);
130 int lfs_writevnodes(struct lfs *fs, struct mount *mp,
131 struct segment *sp, int dirops);
132
133 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
134 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
135 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
136 int lfs_dirvcount = 0; /* # active dirops */
137
138 /* Statistics Counters */
139 int lfs_dostats = 1;
140 struct lfs_stats lfs_stats;
141
142 /* op values to lfs_writevnodes */
143 #define VN_REG 0
144 #define VN_DIROP 1
145 #define VN_EMPTY 2
146 #define VN_CLEAN 3
147
148 /*
149 * XXX KS - Set modification time on the Ifile, so the cleaner can
150 * read the fs mod time off of it. We don't set IN_UPDATE here,
151 * since we don't really need this to be flushed to disk (and in any
152 * case that wouldn't happen to the Ifile until we checkpoint).
153 */
154 void
155 lfs_imtime(struct lfs *fs)
156 {
157 struct timespec ts;
158 struct inode *ip;
159
160 TIMEVAL_TO_TIMESPEC(&time, &ts);
161 ip = VTOI(fs->lfs_ivnode);
162 ip->i_ffs1_mtime = ts.tv_sec;
163 ip->i_ffs1_mtimensec = ts.tv_nsec;
164 }
165
166 /*
167 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
168 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
169 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
170 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
171 */
172
173 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
174 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
175 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
176
177 int
178 lfs_vflush(struct vnode *vp)
179 {
180 struct inode *ip;
181 struct lfs *fs;
182 struct segment *sp;
183 struct buf *bp, *nbp, *tbp, *tnbp;
184 int error, s;
185 int flushed;
186 #if 0
187 int redo;
188 #endif
189
190 ip = VTOI(vp);
191 fs = VFSTOUFS(vp->v_mount)->um_lfs;
192
193 if (ip->i_flag & IN_CLEANING) {
194 #ifdef DEBUG_LFS
195 ivndebug(vp,"vflush/in_cleaning");
196 #endif
197 LFS_CLR_UINO(ip, IN_CLEANING);
198 LFS_SET_UINO(ip, IN_MODIFIED);
199
200 /*
201 * Toss any cleaning buffers that have real counterparts
202 * to avoid losing new data.
203 */
204 s = splbio();
205 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
206 nbp = LIST_NEXT(bp, b_vnbufs);
207 if (!LFS_IS_MALLOC_BUF(bp))
208 continue;
209 /*
210 * Look for pages matching the range covered
211 * by cleaning blocks. It's okay if more dirty
212 * pages appear, so long as none disappear out
213 * from under us.
214 */
215 if (bp->b_lblkno > 0 && vp->v_type == VREG &&
216 vp != fs->lfs_ivnode) {
217 struct vm_page *pg;
218 voff_t off;
219
220 simple_lock(&vp->v_interlock);
221 for (off = lblktosize(fs, bp->b_lblkno);
222 off < lblktosize(fs, bp->b_lblkno + 1);
223 off += PAGE_SIZE) {
224 pg = uvm_pagelookup(&vp->v_uobj, off);
225 if (pg == NULL)
226 continue;
227 if ((pg->flags & PG_CLEAN) == 0 ||
228 pmap_is_modified(pg)) {
229 fs->lfs_avail += btofsb(fs,
230 bp->b_bcount);
231 wakeup(&fs->lfs_avail);
232 lfs_freebuf(fs, bp);
233 bp = NULL;
234 goto nextbp;
235 }
236 }
237 simple_unlock(&vp->v_interlock);
238 }
239 for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp;
240 tbp = tnbp)
241 {
242 tnbp = LIST_NEXT(tbp, b_vnbufs);
243 if (tbp->b_vp == bp->b_vp
244 && tbp->b_lblkno == bp->b_lblkno
245 && tbp != bp)
246 {
247 fs->lfs_avail += btofsb(fs,
248 bp->b_bcount);
249 wakeup(&fs->lfs_avail);
250 lfs_freebuf(fs, bp);
251 bp = NULL;
252 break;
253 }
254 }
255 nextbp:
256 ;
257 }
258 splx(s);
259 }
260
261 /* If the node is being written, wait until that is done */
262 s = splbio();
263 if (WRITEINPROG(vp)) {
264 #ifdef DEBUG_LFS
265 ivndebug(vp,"vflush/writeinprog");
266 #endif
267 tsleep(vp, PRIBIO+1, "lfs_vw", 0);
268 }
269 splx(s);
270
271 /* Protect against VXLOCK deadlock in vinvalbuf() */
272 lfs_seglock(fs, SEGM_SYNC);
273
274 /* If we're supposed to flush a freed inode, just toss it */
275 /* XXX - seglock, so these buffers can't be gathered, right? */
276 if (ip->i_mode == 0) {
277 printf("lfs_vflush: ino %d is freed, not flushing\n",
278 ip->i_number);
279 s = splbio();
280 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
281 nbp = LIST_NEXT(bp, b_vnbufs);
282 if (bp->b_flags & B_DELWRI) { /* XXX always true? */
283 fs->lfs_avail += btofsb(fs, bp->b_bcount);
284 wakeup(&fs->lfs_avail);
285 }
286 /* Copied from lfs_writeseg */
287 if (bp->b_flags & B_CALL) {
288 biodone(bp);
289 } else {
290 bremfree(bp);
291 LFS_UNLOCK_BUF(bp);
292 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
293 B_GATHERED);
294 bp->b_flags |= B_DONE;
295 reassignbuf(bp, vp);
296 brelse(bp);
297 }
298 }
299 splx(s);
300 LFS_CLR_UINO(ip, IN_CLEANING);
301 LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
302 ip->i_flag &= ~IN_ALLMOD;
303 printf("lfs_vflush: done not flushing ino %d\n",
304 ip->i_number);
305 lfs_segunlock(fs);
306 return 0;
307 }
308
309 SET_FLUSHING(fs,vp);
310 if (fs->lfs_nactive > LFS_MAX_ACTIVE ||
311 (fs->lfs_sp->seg_flags & SEGM_CKP)) {
312 error = lfs_segwrite(vp->v_mount, SEGM_CKP | SEGM_SYNC);
313 CLR_FLUSHING(fs,vp);
314 lfs_segunlock(fs);
315 return error;
316 }
317 sp = fs->lfs_sp;
318
319 flushed = 0;
320 if (VPISEMPTY(vp)) {
321 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
322 ++flushed;
323 } else if ((ip->i_flag & IN_CLEANING) &&
324 (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
325 #ifdef DEBUG_LFS
326 ivndebug(vp,"vflush/clean");
327 #endif
328 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
329 ++flushed;
330 } else if (lfs_dostats) {
331 if (!VPISEMPTY(vp) || (VTOI(vp)->i_flag & IN_ALLMOD))
332 ++lfs_stats.vflush_invoked;
333 #ifdef DEBUG_LFS
334 ivndebug(vp,"vflush");
335 #endif
336 }
337
338 #ifdef DIAGNOSTIC
339 /* XXX KS This actually can happen right now, though it shouldn't(?) */
340 if (vp->v_flag & VDIROP) {
341 printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
342 /* panic("VDIROP being flushed...this can\'t happen"); */
343 }
344 if (vp->v_usecount < 0) {
345 printf("usecount=%ld\n", (long)vp->v_usecount);
346 panic("lfs_vflush: usecount<0");
347 }
348 #endif
349
350 #if 1
351 do {
352 do {
353 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
354 lfs_writefile(fs, sp, vp);
355 } while (lfs_writeinode(fs, sp, ip));
356 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
357 #else
358 if (flushed && vp != fs->lfs_ivnode)
359 lfs_writeseg(fs, sp);
360 else do {
361 fs->lfs_flags &= ~LFS_IFDIRTY;
362 lfs_writefile(fs, sp, vp);
363 redo = lfs_writeinode(fs, sp, ip);
364 redo += lfs_writeseg(fs, sp);
365 redo += (fs->lfs_flags & LFS_IFDIRTY);
366 } while (redo && vp == fs->lfs_ivnode);
367 #endif
368 if (lfs_dostats) {
369 ++lfs_stats.nwrites;
370 if (sp->seg_flags & SEGM_SYNC)
371 ++lfs_stats.nsync_writes;
372 if (sp->seg_flags & SEGM_CKP)
373 ++lfs_stats.ncheckpoints;
374 }
375 /*
376 * If we were called from somewhere that has already held the seglock
377 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
378 * the write to complete because we are still locked.
379 * Since lfs_vflush() must return the vnode with no dirty buffers,
380 * we must explicitly wait, if that is the case.
381 *
382 * We compare the iocount against 1, not 0, because it is
383 * artificially incremented by lfs_seglock().
384 */
385 simple_lock(&fs->lfs_interlock);
386 if (fs->lfs_seglock > 1) {
387 simple_unlock(&fs->lfs_interlock);
388 while (fs->lfs_iocount > 1)
389 (void)tsleep(&fs->lfs_iocount, PRIBIO + 1,
390 "lfs_vflush", 0);
391 } else
392 simple_unlock(&fs->lfs_interlock);
393
394 lfs_segunlock(fs);
395
396 /* Wait for these buffers to be recovered by aiodoned */
397 s = splbio();
398 simple_lock(&global_v_numoutput_slock);
399 while (vp->v_numoutput > 0) {
400 ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf2", 0,
401 &global_v_numoutput_slock);
402 }
403 simple_unlock(&global_v_numoutput_slock);
404 splx(s);
405
406 CLR_FLUSHING(fs,vp);
407 return (0);
408 }
409
410 #ifdef DEBUG_LFS_VERBOSE
411 # define vndebug(vp,str) if (VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
412 #else
413 # define vndebug(vp,str)
414 #endif
415
416 int
417 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
418 {
419 struct inode *ip;
420 struct vnode *vp, *nvp;
421 int inodes_written = 0, only_cleaning;
422
423 #ifndef LFS_NO_BACKVP_HACK
424 /* BEGIN HACK */
425 #define VN_OFFSET \
426 (((caddr_t)&LIST_NEXT(vp, v_mntvnodes)) - (caddr_t)vp)
427 #define BACK_VP(VP) \
428 ((struct vnode *)(((caddr_t)(VP)->v_mntvnodes.le_prev) - VN_OFFSET))
429 #define BEG_OF_VLIST \
430 ((struct vnode *)(((caddr_t)&LIST_FIRST(&mp->mnt_vnodelist)) \
431 - VN_OFFSET))
432
433 /* Find last vnode. */
434 loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist);
435 vp && LIST_NEXT(vp, v_mntvnodes) != NULL;
436 vp = LIST_NEXT(vp, v_mntvnodes));
437 for (; vp && vp != BEG_OF_VLIST; vp = nvp) {
438 nvp = BACK_VP(vp);
439 #else
440 loop:
441 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
442 nvp = LIST_NEXT(vp, v_mntvnodes);
443 #endif
444 /*
445 * If the vnode that we are about to sync is no longer
446 * associated with this mount point, start over.
447 */
448 if (vp->v_mount != mp) {
449 printf("lfs_writevnodes: starting over\n");
450 /*
451 * After this, pages might be busy
452 * due to our own previous putpages.
453 * Start actual segment write here to avoid deadlock.
454 */
455 (void)lfs_writeseg(fs, sp);
456 goto loop;
457 }
458
459 if (vp->v_type == VNON) {
460 continue;
461 }
462
463 ip = VTOI(vp);
464 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
465 (op != VN_DIROP && op != VN_CLEAN &&
466 (vp->v_flag & VDIROP))) {
467 vndebug(vp,"dirop");
468 continue;
469 }
470
471 if (op == VN_EMPTY && !VPISEMPTY(vp)) {
472 vndebug(vp,"empty");
473 continue;
474 }
475
476 if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
477 && vp != fs->lfs_flushvp
478 && !(ip->i_flag & IN_CLEANING)) {
479 vndebug(vp,"cleaning");
480 continue;
481 }
482
483 if (lfs_vref(vp)) {
484 vndebug(vp,"vref");
485 continue;
486 }
487
488 only_cleaning = 0;
489 /*
490 * Write the inode/file if dirty and it's not the IFILE.
491 */
492 if ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp)) {
493 only_cleaning =
494 ((ip->i_flag & IN_ALLMOD) == IN_CLEANING);
495
496 if (ip->i_number != LFS_IFILE_INUM)
497 lfs_writefile(fs, sp, vp);
498 if (!VPISEMPTY(vp)) {
499 if (WRITEINPROG(vp)) {
500 #ifdef DEBUG_LFS
501 ivndebug(vp,"writevnodes/write2");
502 #endif
503 } else if (!(ip->i_flag & IN_ALLMOD)) {
504 #ifdef DEBUG_LFS
505 printf("<%d>",ip->i_number);
506 #endif
507 LFS_SET_UINO(ip, IN_MODIFIED);
508 }
509 }
510 (void) lfs_writeinode(fs, sp, ip);
511 inodes_written++;
512 }
513
514 if (lfs_clean_vnhead && only_cleaning)
515 lfs_vunref_head(vp);
516 else
517 lfs_vunref(vp);
518 }
519 return inodes_written;
520 }
521
522 /*
523 * Do a checkpoint.
524 */
525 int
526 lfs_segwrite(struct mount *mp, int flags)
527 {
528 struct buf *bp;
529 struct inode *ip;
530 struct lfs *fs;
531 struct segment *sp;
532 struct vnode *vp;
533 SEGUSE *segusep;
534 int do_ckp, did_ckp, error, s;
535 unsigned n, segleft, maxseg, sn, i, curseg;
536 int writer_set = 0;
537 int dirty;
538 int redo;
539
540 fs = VFSTOUFS(mp)->um_lfs;
541
542 if (fs->lfs_ronly)
543 return EROFS;
544
545 lfs_imtime(fs);
546
547 /*
548 * Allocate a segment structure and enough space to hold pointers to
549 * the maximum possible number of buffers which can be described in a
550 * single summary block.
551 */
552 do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
553 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
554 sp = fs->lfs_sp;
555
556 /*
557 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
558 * in which case we have to flush *all* buffers off of this vnode.
559 * We don't care about other nodes, but write any non-dirop nodes
560 * anyway in anticipation of another getnewvnode().
561 *
562 * If we're cleaning we only write cleaning and ifile blocks, and
563 * no dirops, since otherwise we'd risk corruption in a crash.
564 */
565 if (sp->seg_flags & SEGM_CLEAN)
566 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
567 else if (!(sp->seg_flags & SEGM_FORCE_CKP)) {
568 lfs_writevnodes(fs, mp, sp, VN_REG);
569 if (!fs->lfs_dirops || !fs->lfs_flushvp) {
570 error = lfs_writer_enter(fs, "lfs writer");
571 if (error) {
572 printf("segwrite mysterious error\n");
573 /* XXX why not segunlock? */
574 pool_put(&fs->lfs_bpppool, sp->bpp);
575 sp->bpp = NULL;
576 pool_put(&fs->lfs_segpool, sp);
577 sp = fs->lfs_sp = NULL;
578 return (error);
579 }
580 writer_set = 1;
581 lfs_writevnodes(fs, mp, sp, VN_DIROP);
582 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
583 }
584 }
585
586 /*
587 * If we are doing a checkpoint, mark everything since the
588 * last checkpoint as no longer ACTIVE.
589 */
590 if (do_ckp) {
591 segleft = fs->lfs_nseg;
592 curseg = 0;
593 for (n = 0; n < fs->lfs_segtabsz; n++) {
594 dirty = 0;
595 if (bread(fs->lfs_ivnode,
596 fs->lfs_cleansz + n, fs->lfs_bsize, NOCRED, &bp))
597 panic("lfs_segwrite: ifile read");
598 segusep = (SEGUSE *)bp->b_data;
599 maxseg = min(segleft, fs->lfs_sepb);
600 for (i = 0; i < maxseg; i++) {
601 sn = curseg + i;
602 if (sn != dtosn(fs, fs->lfs_curseg) &&
603 segusep->su_flags & SEGUSE_ACTIVE) {
604 segusep->su_flags &= ~SEGUSE_ACTIVE;
605 --fs->lfs_nactive;
606 ++dirty;
607 }
608 fs->lfs_suflags[fs->lfs_activesb][sn] =
609 segusep->su_flags;
610 if (fs->lfs_version > 1)
611 ++segusep;
612 else
613 segusep = (SEGUSE *)
614 ((SEGUSE_V1 *)segusep + 1);
615 }
616
617 if (dirty)
618 error = LFS_BWRITE_LOG(bp); /* Ifile */
619 else
620 brelse(bp);
621 segleft -= fs->lfs_sepb;
622 curseg += fs->lfs_sepb;
623 }
624 }
625
626 did_ckp = 0;
627 if (do_ckp || fs->lfs_doifile) {
628 do {
629 vp = fs->lfs_ivnode;
630
631 #ifdef DEBUG
632 LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0);
633 #endif
634 fs->lfs_flags &= ~LFS_IFDIRTY;
635
636 ip = VTOI(vp);
637
638 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
639 lfs_writefile(fs, sp, vp);
640
641 if (ip->i_flag & IN_ALLMOD)
642 ++did_ckp;
643 redo = lfs_writeinode(fs, sp, ip);
644 redo += lfs_writeseg(fs, sp);
645 redo += (fs->lfs_flags & LFS_IFDIRTY);
646 } while (redo && do_ckp);
647
648 /*
649 * Unless we are unmounting, the Ifile may continue to have
650 * dirty blocks even after a checkpoint, due to changes to
651 * inodes' atime. If we're checkpointing, it's "impossible"
652 * for other parts of the Ifile to be dirty after the loop
653 * above, since we hold the segment lock.
654 */
655 s = splbio();
656 if (LIST_EMPTY(&vp->v_dirtyblkhd)) {
657 LFS_CLR_UINO(ip, IN_ALLMOD);
658 }
659 #ifdef DIAGNOSTIC
660 else if (do_ckp) {
661 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
662 if (bp->b_lblkno < fs->lfs_cleansz +
663 fs->lfs_segtabsz &&
664 !(bp->b_flags & B_GATHERED)) {
665 panic("dirty blocks");
666 }
667 }
668 }
669 #endif
670 splx(s);
671 } else {
672 (void) lfs_writeseg(fs, sp);
673 }
674
675 /* Note Ifile no longer needs to be written */
676 fs->lfs_doifile = 0;
677 if (writer_set)
678 lfs_writer_leave(fs);
679
680 /*
681 * If we didn't write the Ifile, we didn't really do anything.
682 * That means that (1) there is a checkpoint on disk and (2)
683 * nothing has changed since it was written.
684 *
685 * Take the flags off of the segment so that lfs_segunlock
686 * doesn't have to write the superblock either.
687 */
688 if (do_ckp && !did_ckp) {
689 sp->seg_flags &= ~SEGM_CKP;
690 }
691
692 if (lfs_dostats) {
693 ++lfs_stats.nwrites;
694 if (sp->seg_flags & SEGM_SYNC)
695 ++lfs_stats.nsync_writes;
696 if (sp->seg_flags & SEGM_CKP)
697 ++lfs_stats.ncheckpoints;
698 }
699 lfs_segunlock(fs);
700 return (0);
701 }
702
703 /*
704 * Write the dirty blocks associated with a vnode.
705 */
706 void
707 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
708 {
709 struct buf *bp;
710 struct finfo *fip;
711 struct inode *ip;
712 IFILE *ifp;
713 int i, frag;
714
715 ip = VTOI(vp);
716
717 if (sp->seg_bytes_left < fs->lfs_bsize ||
718 sp->sum_bytes_left < sizeof(struct finfo))
719 (void) lfs_writeseg(fs, sp);
720
721 sp->sum_bytes_left -= FINFOSIZE;
722 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
723
724 if (vp->v_flag & VDIROP)
725 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
726
727 fip = sp->fip;
728 fip->fi_nblocks = 0;
729 fip->fi_ino = ip->i_number;
730 LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
731 fip->fi_version = ifp->if_version;
732 brelse(bp);
733
734 if (sp->seg_flags & SEGM_CLEAN) {
735 lfs_gather(fs, sp, vp, lfs_match_fake);
736 /*
737 * For a file being flushed, we need to write *all* blocks.
738 * This means writing the cleaning blocks first, and then
739 * immediately following with any non-cleaning blocks.
740 * The same is true of the Ifile since checkpoints assume
741 * that all valid Ifile blocks are written.
742 */
743 if (IS_FLUSHING(fs,vp) || vp == fs->lfs_ivnode) {
744 lfs_gather(fs, sp, vp, lfs_match_data);
745 /*
746 * Don't call VOP_PUTPAGES: if we're flushing,
747 * we've already done it, and the Ifile doesn't
748 * use the page cache.
749 */
750 }
751 } else {
752 lfs_gather(fs, sp, vp, lfs_match_data);
753 /*
754 * If we're flushing, we've already called VOP_PUTPAGES
755 * so don't do it again. Otherwise, we want to write
756 * everything we've got.
757 */
758 if (!IS_FLUSHING(fs, vp)) {
759 simple_lock(&vp->v_interlock);
760 VOP_PUTPAGES(vp, 0, 0,
761 PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED);
762 }
763 }
764
765 /*
766 * It may not be necessary to write the meta-data blocks at this point,
767 * as the roll-forward recovery code should be able to reconstruct the
768 * list.
769 *
770 * We have to write them anyway, though, under two conditions: (1) the
771 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
772 * checkpointing.
773 *
774 * BUT if we are cleaning, we might have indirect blocks that refer to
775 * new blocks not being written yet, in addition to fragments being
776 * moved out of a cleaned segment. If that is the case, don't
777 * write the indirect blocks, or the finfo will have a small block
778 * in the middle of it!
779 * XXX in this case isn't the inode size wrong too?
780 */
781 frag = 0;
782 if (sp->seg_flags & SEGM_CLEAN) {
783 for (i = 0; i < NDADDR; i++)
784 if (ip->i_lfs_fragsize[i] > 0 &&
785 ip->i_lfs_fragsize[i] < fs->lfs_bsize)
786 ++frag;
787 }
788 #ifdef DIAGNOSTIC
789 if (frag > 1)
790 panic("lfs_writefile: more than one fragment!");
791 #endif
792 if (IS_FLUSHING(fs, vp) ||
793 (frag == 0 && (lfs_writeindir || (sp->seg_flags & SEGM_CKP)))) {
794 lfs_gather(fs, sp, vp, lfs_match_indir);
795 lfs_gather(fs, sp, vp, lfs_match_dindir);
796 lfs_gather(fs, sp, vp, lfs_match_tindir);
797 }
798 fip = sp->fip;
799 if (fip->fi_nblocks != 0) {
800 sp->fip = (FINFO*)((caddr_t)fip + FINFOSIZE +
801 sizeof(int32_t) * (fip->fi_nblocks));
802 sp->start_lbp = &sp->fip->fi_blocks[0];
803 } else {
804 sp->sum_bytes_left += FINFOSIZE;
805 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
806 }
807 }
808
809 int
810 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
811 {
812 struct buf *bp, *ibp;
813 struct ufs1_dinode *cdp;
814 IFILE *ifp;
815 SEGUSE *sup;
816 daddr_t daddr;
817 int32_t *daddrp; /* XXX ondisk32 */
818 ino_t ino;
819 int error, i, ndx, fsb = 0;
820 int redo_ifile = 0;
821 struct timespec ts;
822 int gotblk = 0;
823
824 if (!(ip->i_flag & IN_ALLMOD))
825 return (0);
826
827 /* Allocate a new inode block if necessary. */
828 if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) &&
829 sp->ibp == NULL) {
830 /* Allocate a new segment if necessary. */
831 if (sp->seg_bytes_left < fs->lfs_ibsize ||
832 sp->sum_bytes_left < sizeof(int32_t))
833 (void) lfs_writeseg(fs, sp);
834
835 /* Get next inode block. */
836 daddr = fs->lfs_offset;
837 fs->lfs_offset += btofsb(fs, fs->lfs_ibsize);
838 sp->ibp = *sp->cbpp++ =
839 getblk(VTOI(fs->lfs_ivnode)->i_devvp,
840 fsbtodb(fs, daddr), fs->lfs_ibsize, 0, 0);
841 gotblk++;
842
843 /* Zero out inode numbers */
844 for (i = 0; i < INOPB(fs); ++i)
845 ((struct ufs1_dinode *)sp->ibp->b_data)[i].di_inumber =
846 0;
847
848 ++sp->start_bpp;
849 fs->lfs_avail -= btofsb(fs, fs->lfs_ibsize);
850 /* Set remaining space counters. */
851 sp->seg_bytes_left -= fs->lfs_ibsize;
852 sp->sum_bytes_left -= sizeof(int32_t);
853 ndx = fs->lfs_sumsize / sizeof(int32_t) -
854 sp->ninodes / INOPB(fs) - 1;
855 ((int32_t *)(sp->segsum))[ndx] = daddr;
856 }
857
858 /* Update the inode times and copy the inode onto the inode page. */
859 TIMEVAL_TO_TIMESPEC(&time, &ts);
860 /* XXX kludge --- don't redirty the ifile just to put times on it */
861 if (ip->i_number != LFS_IFILE_INUM)
862 LFS_ITIMES(ip, &ts, &ts, &ts);
863
864 /*
865 * If this is the Ifile, and we've already written the Ifile in this
866 * partial segment, just overwrite it (it's not on disk yet) and
867 * continue.
868 *
869 * XXX we know that the bp that we get the second time around has
870 * already been gathered.
871 */
872 if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
873 *(sp->idp) = *ip->i_din.ffs1_din;
874 ip->i_lfs_osize = ip->i_size;
875 return 0;
876 }
877
878 bp = sp->ibp;
879 cdp = ((struct ufs1_dinode *)bp->b_data) + (sp->ninodes % INOPB(fs));
880 *cdp = *ip->i_din.ffs1_din;
881 #ifdef LFS_IFILE_FRAG_ADDRESSING
882 if (fs->lfs_version > 1)
883 fsb = (sp->ninodes % INOPB(fs)) / INOPF(fs);
884 #endif
885
886 /*
887 * If we are cleaning, ensure that we don't write UNWRITTEN disk
888 * addresses to disk; possibly revert the inode size.
889 * XXX By not writing these blocks, we are making the lfs_avail
890 * XXX count on disk wrong by the same amount. We should be
891 * XXX able to "borrow" from lfs_avail and return it after the
892 * XXX Ifile is written. See also in lfs_writeseg.
893 */
894 if (ip->i_lfs_effnblks != ip->i_ffs1_blocks) {
895 cdp->di_size = ip->i_lfs_osize;
896 #ifdef DEBUG_LFS
897 printf("lfs_writeinode: cleansing ino %d (%d != %d)\n",
898 ip->i_number, ip->i_lfs_effnblks, ip->i_ffs1_blocks);
899 #endif
900 for (daddrp = cdp->di_db; daddrp < cdp->di_ib + NIADDR;
901 daddrp++) {
902 if (*daddrp == UNWRITTEN) {
903 #ifdef DEBUG_LFS
904 printf("lfs_writeinode: wiping UNWRITTEN\n");
905 #endif
906 *daddrp = 0;
907 }
908 }
909 } else {
910 /* If all blocks are goig to disk, update the "size on disk" */
911 ip->i_lfs_osize = ip->i_size;
912 }
913
914 if (ip->i_flag & IN_CLEANING)
915 LFS_CLR_UINO(ip, IN_CLEANING);
916 else {
917 /* XXX IN_ALLMOD */
918 LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
919 IN_UPDATE);
920 if (ip->i_lfs_effnblks == ip->i_ffs1_blocks)
921 LFS_CLR_UINO(ip, IN_MODIFIED);
922 #ifdef DEBUG_LFS
923 else
924 printf("lfs_writeinode: ino %d: real blks=%d, "
925 "eff=%d\n", ip->i_number, ip->i_ffs1_blocks,
926 ip->i_lfs_effnblks);
927 #endif
928 }
929
930 if (ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
931 sp->idp = ((struct ufs1_dinode *)bp->b_data) +
932 (sp->ninodes % INOPB(fs));
933 if (gotblk) {
934 LFS_LOCK_BUF(bp);
935 brelse(bp);
936 }
937
938 /* Increment inode count in segment summary block. */
939 ++((SEGSUM *)(sp->segsum))->ss_ninos;
940
941 /* If this page is full, set flag to allocate a new page. */
942 if (++sp->ninodes % INOPB(fs) == 0)
943 sp->ibp = NULL;
944
945 /*
946 * If updating the ifile, update the super-block. Update the disk
947 * address and access times for this inode in the ifile.
948 */
949 ino = ip->i_number;
950 if (ino == LFS_IFILE_INUM) {
951 daddr = fs->lfs_idaddr;
952 fs->lfs_idaddr = dbtofsb(fs, bp->b_blkno);
953 } else {
954 LFS_IENTRY(ifp, fs, ino, ibp);
955 daddr = ifp->if_daddr;
956 ifp->if_daddr = dbtofsb(fs, bp->b_blkno) + fsb;
957 #ifdef LFS_DEBUG_NEXTFREE
958 if (ino > 3 && ifp->if_nextfree) {
959 vprint("lfs_writeinode",ITOV(ip));
960 printf("lfs_writeinode: updating free ino %d\n",
961 ip->i_number);
962 }
963 #endif
964 error = LFS_BWRITE_LOG(ibp); /* Ifile */
965 }
966
967 /*
968 * The inode's last address should not be in the current partial
969 * segment, except under exceptional circumstances (lfs_writevnodes
970 * had to start over, and in the meantime more blocks were written
971 * to a vnode). Both inodes will be accounted to this segment
972 * in lfs_writeseg so we need to subtract the earlier version
973 * here anyway. The segment count can temporarily dip below
974 * zero here; keep track of how many duplicates we have in
975 * "dupino" so we don't panic below.
976 */
977 if (daddr >= fs->lfs_lastpseg && daddr <= dbtofsb(fs, bp->b_blkno)) {
978 ++sp->ndupino;
979 printf("lfs_writeinode: last inode addr in current pseg "
980 "(ino %d daddr 0x%llx) ndupino=%d\n", ino,
981 (long long)daddr, sp->ndupino);
982 }
983 /*
984 * Account the inode: it no longer belongs to its former segment,
985 * though it will not belong to the new segment until that segment
986 * is actually written.
987 */
988 if (daddr != LFS_UNUSED_DADDR) {
989 u_int32_t oldsn = dtosn(fs, daddr);
990 #ifdef DIAGNOSTIC
991 int ndupino = (sp->seg_number == oldsn) ? sp->ndupino : 0;
992 #endif
993 LFS_SEGENTRY(sup, fs, oldsn, bp);
994 #ifdef DIAGNOSTIC
995 if (sup->su_nbytes +
996 sizeof (struct ufs1_dinode) * ndupino
997 < sizeof (struct ufs1_dinode)) {
998 printf("lfs_writeinode: negative bytes "
999 "(segment %" PRIu32 " short by %d, "
1000 "oldsn=%" PRIu32 ", cursn=%" PRIu32
1001 ", daddr=%" PRId64 ", su_nbytes=%u, "
1002 "ndupino=%d)\n",
1003 dtosn(fs, daddr),
1004 (int)sizeof (struct ufs1_dinode) *
1005 (1 - sp->ndupino) - sup->su_nbytes,
1006 oldsn, sp->seg_number, daddr,
1007 (unsigned int)sup->su_nbytes,
1008 sp->ndupino);
1009 panic("lfs_writeinode: negative bytes");
1010 sup->su_nbytes = sizeof (struct ufs1_dinode);
1011 }
1012 #endif
1013 #ifdef DEBUG_SU_NBYTES
1014 printf("seg %d -= %d for ino %d inode\n",
1015 dtosn(fs, daddr), sizeof (struct ufs1_dinode), ino);
1016 #endif
1017 sup->su_nbytes -= sizeof (struct ufs1_dinode);
1018 redo_ifile =
1019 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
1020 if (redo_ifile)
1021 fs->lfs_flags |= LFS_IFDIRTY;
1022 LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */
1023 }
1024 return (redo_ifile);
1025 }
1026
1027 int
1028 lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
1029 {
1030 struct lfs *fs;
1031 int version;
1032 int j, blksinblk;
1033
1034 /*
1035 * If full, finish this segment. We may be doing I/O, so
1036 * release and reacquire the splbio().
1037 */
1038 #ifdef DIAGNOSTIC
1039 if (sp->vp == NULL)
1040 panic ("lfs_gatherblock: Null vp in segment");
1041 #endif
1042 fs = sp->fs;
1043 blksinblk = howmany(bp->b_bcount, fs->lfs_bsize);
1044 if (sp->sum_bytes_left < sizeof(int32_t) * blksinblk ||
1045 sp->seg_bytes_left < bp->b_bcount) {
1046 if (sptr)
1047 splx(*sptr);
1048 lfs_updatemeta(sp);
1049
1050 version = sp->fip->fi_version;
1051 (void) lfs_writeseg(fs, sp);
1052
1053 sp->fip->fi_version = version;
1054 sp->fip->fi_ino = VTOI(sp->vp)->i_number;
1055 /* Add the current file to the segment summary. */
1056 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1057 sp->sum_bytes_left -= FINFOSIZE;
1058
1059 if (sptr)
1060 *sptr = splbio();
1061 return (1);
1062 }
1063
1064 #ifdef DEBUG
1065 if (bp->b_flags & B_GATHERED) {
1066 printf("lfs_gatherblock: already gathered! Ino %d,"
1067 " lbn %" PRId64 "\n",
1068 sp->fip->fi_ino, bp->b_lblkno);
1069 return (0);
1070 }
1071 #endif
1072 /* Insert into the buffer list, update the FINFO block. */
1073 bp->b_flags |= B_GATHERED;
1074
1075 *sp->cbpp++ = bp;
1076 for (j = 0; j < blksinblk; j++)
1077 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno + j;
1078
1079 sp->sum_bytes_left -= sizeof(int32_t) * blksinblk;
1080 sp->seg_bytes_left -= bp->b_bcount;
1081 return (0);
1082 }
1083
1084 int
1085 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp,
1086 int (*match)(struct lfs *, struct buf *))
1087 {
1088 struct buf *bp, *nbp;
1089 int s, count = 0;
1090
1091 KASSERT(sp->vp == NULL);
1092 sp->vp = vp;
1093 s = splbio();
1094
1095 #ifndef LFS_NO_BACKBUF_HACK
1096 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
1097 # define BUF_OFFSET \
1098 (((caddr_t)&LIST_NEXT(bp, b_vnbufs)) - (caddr_t)bp)
1099 # define BACK_BUF(BP) \
1100 ((struct buf *)(((caddr_t)(BP)->b_vnbufs.le_prev) - BUF_OFFSET))
1101 # define BEG_OF_LIST \
1102 ((struct buf *)(((caddr_t)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET))
1103
1104 loop:
1105 /* Find last buffer. */
1106 for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1107 bp && LIST_NEXT(bp, b_vnbufs) != NULL;
1108 bp = LIST_NEXT(bp, b_vnbufs))
1109 /* nothing */;
1110 for (; bp && bp != BEG_OF_LIST; bp = nbp) {
1111 nbp = BACK_BUF(bp);
1112 #else /* LFS_NO_BACKBUF_HACK */
1113 loop:
1114 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1115 nbp = LIST_NEXT(bp, b_vnbufs);
1116 #endif /* LFS_NO_BACKBUF_HACK */
1117 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp)) {
1118 #ifdef DEBUG_LFS
1119 if (vp == fs->lfs_ivnode &&
1120 (bp->b_flags & (B_BUSY|B_GATHERED)) == B_BUSY)
1121 printf("(%" PRId64 ":%lx)",
1122 bp->b_lblkno, bp->b_flags);
1123 #endif
1124 continue;
1125 }
1126 if (vp->v_type == VBLK) {
1127 /* For block devices, just write the blocks. */
1128 /* XXX Do we really need to even do this? */
1129 #ifdef DEBUG_LFS
1130 if (count == 0)
1131 printf("BLK(");
1132 printf(".");
1133 #endif
1134 /*
1135 * Get the block before bwrite,
1136 * so we don't corrupt the free list
1137 */
1138 bp->b_flags |= B_BUSY;
1139 bremfree(bp);
1140 bwrite(bp);
1141 } else {
1142 #ifdef DIAGNOSTIC
1143 # ifdef LFS_USE_B_INVAL
1144 if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) {
1145 printf("lfs_gather: lbn %" PRId64 " is "
1146 "B_INVAL\n", bp->b_lblkno);
1147 VOP_PRINT(bp->b_vp);
1148 }
1149 # endif /* LFS_USE_B_INVAL */
1150 if (!(bp->b_flags & B_DELWRI))
1151 panic("lfs_gather: bp not B_DELWRI");
1152 if (!(bp->b_flags & B_LOCKED)) {
1153 printf("lfs_gather: lbn %" PRId64 " blk "
1154 "%" PRId64 " not B_LOCKED\n",
1155 bp->b_lblkno,
1156 dbtofsb(fs, bp->b_blkno));
1157 VOP_PRINT(bp->b_vp);
1158 panic("lfs_gather: bp not B_LOCKED");
1159 }
1160 #endif
1161 if (lfs_gatherblock(sp, bp, &s)) {
1162 goto loop;
1163 }
1164 }
1165 count++;
1166 }
1167 splx(s);
1168 #ifdef DEBUG_LFS
1169 if (vp->v_type == VBLK && count)
1170 printf(")\n");
1171 #endif
1172 lfs_updatemeta(sp);
1173 KASSERT(sp->vp == vp);
1174 sp->vp = NULL;
1175 return count;
1176 }
1177
1178 #if DEBUG
1179 # define DEBUG_OOFF(n) do { \
1180 if (ooff == 0) { \
1181 printf("lfs_updatemeta[%d]: warning: writing " \
1182 "ino %d lbn %" PRId64 " at 0x%" PRIx32 \
1183 ", was 0x0 (or %" PRId64 ")\n", \
1184 (n), ip->i_number, lbn, ndaddr, daddr); \
1185 } \
1186 } while (0)
1187 #else
1188 # define DEBUG_OOFF(n)
1189 #endif
1190
1191 /*
1192 * Change the given block's address to ndaddr, finding its previous
1193 * location using ufs_bmaparray().
1194 *
1195 * Account for this change in the segment table.
1196 */
1197 void
1198 lfs_update_single(struct lfs *fs, struct segment *sp, daddr_t lbn,
1199 int32_t ndaddr, int size)
1200 {
1201 SEGUSE *sup;
1202 struct buf *bp;
1203 struct indir a[NIADDR + 2], *ap;
1204 struct inode *ip;
1205 struct vnode *vp;
1206 daddr_t daddr, ooff;
1207 int num, error;
1208 int bb, osize, obb;
1209
1210 KASSERT(sp->vp != NULL);
1211 vp = sp->vp;
1212 ip = VTOI(vp);
1213
1214 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL, NULL);
1215 if (error)
1216 panic("lfs_updatemeta: ufs_bmaparray returned %d", error);
1217
1218 KASSERT(daddr <= LFS_MAX_DADDR);
1219 if (daddr > 0)
1220 daddr = dbtofsb(fs, daddr);
1221
1222 bb = fragstofsb(fs, numfrags(fs, size));
1223 switch (num) {
1224 case 0:
1225 ooff = ip->i_ffs1_db[lbn];
1226 DEBUG_OOFF(0);
1227 if (ooff == UNWRITTEN)
1228 ip->i_ffs1_blocks += bb;
1229 else {
1230 /* possible fragment truncation or extension */
1231 obb = btofsb(fs, ip->i_lfs_fragsize[lbn]);
1232 ip->i_ffs1_blocks += (bb - obb);
1233 }
1234 ip->i_ffs1_db[lbn] = ndaddr;
1235 break;
1236 case 1:
1237 ooff = ip->i_ffs1_ib[a[0].in_off];
1238 DEBUG_OOFF(1);
1239 if (ooff == UNWRITTEN)
1240 ip->i_ffs1_blocks += bb;
1241 ip->i_ffs1_ib[a[0].in_off] = ndaddr;
1242 break;
1243 default:
1244 ap = &a[num - 1];
1245 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
1246 panic("lfs_updatemeta: bread bno %" PRId64,
1247 ap->in_lbn);
1248
1249 /* XXX ondisk32 */
1250 ooff = ((int32_t *)bp->b_data)[ap->in_off];
1251 DEBUG_OOFF(num);
1252 if (ooff == UNWRITTEN)
1253 ip->i_ffs1_blocks += bb;
1254 /* XXX ondisk32 */
1255 ((int32_t *)bp->b_data)[ap->in_off] = ndaddr;
1256 (void) VOP_BWRITE(bp);
1257 }
1258
1259 /*
1260 * Though we'd rather it couldn't, this *can* happen right now
1261 * if cleaning blocks and regular blocks coexist.
1262 */
1263 /* KASSERT(daddr < fs->lfs_lastpseg || daddr > ndaddr); */
1264
1265 /*
1266 * Update segment usage information, based on old size
1267 * and location.
1268 */
1269 if (daddr > 0) {
1270 u_int32_t oldsn = dtosn(fs, daddr);
1271 #ifdef DIAGNOSTIC
1272 int ndupino = (sp->seg_number == oldsn) ?
1273 sp->ndupino : 0;
1274 #endif
1275 KASSERT(oldsn >= 0 && oldsn < fs->lfs_nseg);
1276 if (lbn >= 0 && lbn < NDADDR)
1277 osize = ip->i_lfs_fragsize[lbn];
1278 else
1279 osize = fs->lfs_bsize;
1280 LFS_SEGENTRY(sup, fs, oldsn, bp);
1281 #ifdef DIAGNOSTIC
1282 if (sup->su_nbytes + sizeof (struct ufs1_dinode) * ndupino
1283 < osize) {
1284 printf("lfs_updatemeta: negative bytes "
1285 "(segment %" PRIu32 " short by %" PRId64
1286 ")\n", dtosn(fs, daddr),
1287 (int64_t)osize -
1288 (sizeof (struct ufs1_dinode) * sp->ndupino +
1289 sup->su_nbytes));
1290 printf("lfs_updatemeta: ino %d, lbn %" PRId64
1291 ", addr = 0x%" PRIx64 "\n",
1292 VTOI(sp->vp)->i_number, lbn, daddr);
1293 printf("lfs_updatemeta: ndupino=%d\n", ndupino);
1294 panic("lfs_updatemeta: negative bytes");
1295 sup->su_nbytes = osize -
1296 sizeof (struct ufs1_dinode) * sp->ndupino;
1297 }
1298 #endif
1299 #ifdef DEBUG_SU_NBYTES
1300 printf("seg %" PRIu32 " -= %d for ino %d lbn %" PRId64
1301 " db 0x%" PRIx64 "\n",
1302 dtosn(fs, daddr), osize,
1303 VTOI(sp->vp)->i_number, lbn, daddr);
1304 #endif
1305 sup->su_nbytes -= osize;
1306 if (!(bp->b_flags & B_GATHERED))
1307 fs->lfs_flags |= LFS_IFDIRTY;
1308 LFS_WRITESEGENTRY(sup, fs, oldsn, bp);
1309 }
1310 /*
1311 * Now that this block has a new address, and its old
1312 * segment no longer owns it, we can forget about its
1313 * old size.
1314 */
1315 if (lbn >= 0 && lbn < NDADDR)
1316 ip->i_lfs_fragsize[lbn] = size;
1317 }
1318
1319 /*
1320 * Update the metadata that points to the blocks listed in the FINFO
1321 * array.
1322 */
1323 void
1324 lfs_updatemeta(struct segment *sp)
1325 {
1326 struct buf *sbp;
1327 struct lfs *fs;
1328 struct vnode *vp;
1329 daddr_t lbn;
1330 int i, nblocks, num;
1331 int bb;
1332 int bytesleft, size;
1333
1334 vp = sp->vp;
1335 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
1336 KASSERT(nblocks >= 0);
1337 KASSERT(vp != NULL);
1338 if (nblocks == 0)
1339 return;
1340
1341 /*
1342 * This count may be high due to oversize blocks from lfs_gop_write.
1343 * Correct for this. (XXX we should be able to keep track of these.)
1344 */
1345 fs = sp->fs;
1346 for (i = 0; i < nblocks; i++) {
1347 if (sp->start_bpp[i] == NULL) {
1348 printf("nblocks = %d, not %d\n", i, nblocks);
1349 nblocks = i;
1350 break;
1351 }
1352 num = howmany(sp->start_bpp[i]->b_bcount, fs->lfs_bsize);
1353 KASSERT(sp->start_bpp[i]->b_lblkno >= 0 || num == 1);
1354 nblocks -= num - 1;
1355 }
1356
1357 KASSERT(vp->v_type == VREG ||
1358 nblocks == &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp);
1359 KASSERT(nblocks == sp->cbpp - sp->start_bpp);
1360
1361 /*
1362 * Sort the blocks.
1363 *
1364 * We have to sort even if the blocks come from the
1365 * cleaner, because there might be other pending blocks on the
1366 * same inode...and if we don't sort, and there are fragments
1367 * present, blocks may be written in the wrong place.
1368 */
1369 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks, fs->lfs_bsize);
1370
1371 /*
1372 * Record the length of the last block in case it's a fragment.
1373 * If there are indirect blocks present, they sort last. An
1374 * indirect block will be lfs_bsize and its presence indicates
1375 * that you cannot have fragments.
1376 *
1377 * XXX This last is a lie. A cleaned fragment can coexist with
1378 * XXX a later indirect block. This will continue to be
1379 * XXX true until lfs_markv is fixed to do everything with
1380 * XXX fake blocks (including fake inodes and fake indirect blocks).
1381 */
1382 sp->fip->fi_lastlength = ((sp->start_bpp[nblocks - 1]->b_bcount - 1) &
1383 fs->lfs_bmask) + 1;
1384
1385 /*
1386 * Assign disk addresses, and update references to the logical
1387 * block and the segment usage information.
1388 */
1389 for (i = nblocks; i--; ++sp->start_bpp) {
1390 sbp = *sp->start_bpp;
1391 lbn = *sp->start_lbp;
1392 KASSERT(sbp->b_lblkno == lbn);
1393
1394 sbp->b_blkno = fsbtodb(fs, fs->lfs_offset);
1395
1396 /*
1397 * If we write a frag in the wrong place, the cleaner won't
1398 * be able to correctly identify its size later, and the
1399 * segment will be uncleanable. (Even worse, it will assume
1400 * that the indirect block that actually ends the list
1401 * is of a smaller size!)
1402 */
1403 if ((sbp->b_bcount & fs->lfs_bmask) && i != 0)
1404 panic("lfs_updatemeta: fragment is not last block");
1405
1406 /*
1407 * For each subblock in this possibly oversized block,
1408 * update its address on disk.
1409 */
1410 KASSERT(lbn >= 0 || sbp->b_bcount == fs->lfs_bsize);
1411 KASSERT(vp == sbp->b_vp);
1412 for (bytesleft = sbp->b_bcount; bytesleft > 0;
1413 bytesleft -= fs->lfs_bsize) {
1414 size = MIN(bytesleft, fs->lfs_bsize);
1415 bb = fragstofsb(fs, numfrags(fs, size));
1416 lbn = *sp->start_lbp++;
1417 lfs_update_single(fs, sp, lbn, fs->lfs_offset, size);
1418 fs->lfs_offset += bb;
1419 }
1420
1421 }
1422 }
1423
1424 /*
1425 * Start a new partial segment.
1426 *
1427 * Return 1 when we entered to a new segment.
1428 * Otherwise, return 0.
1429 */
1430 int
1431 lfs_initseg(struct lfs *fs)
1432 {
1433 struct segment *sp = fs->lfs_sp;
1434 SEGSUM *ssp;
1435 struct buf *sbp; /* buffer for SEGSUM */
1436 int repeat = 0; /* return value */
1437
1438 /* Advance to the next segment. */
1439 if (!LFS_PARTIAL_FITS(fs)) {
1440 SEGUSE *sup;
1441 struct buf *bp;
1442
1443 /* lfs_avail eats the remaining space */
1444 fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset -
1445 fs->lfs_curseg);
1446 /* Wake up any cleaning procs waiting on this file system. */
1447 wakeup(&lfs_allclean_wakeup);
1448 wakeup(&fs->lfs_nextseg);
1449 lfs_newseg(fs);
1450 repeat = 1;
1451 fs->lfs_offset = fs->lfs_curseg;
1452
1453 sp->seg_number = dtosn(fs, fs->lfs_curseg);
1454 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg);
1455
1456 /*
1457 * If the segment contains a superblock, update the offset
1458 * and summary address to skip over it.
1459 */
1460 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1461 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1462 fs->lfs_offset += btofsb(fs, LFS_SBPAD);
1463 sp->seg_bytes_left -= LFS_SBPAD;
1464 }
1465 brelse(bp);
1466 /* Segment zero could also contain the labelpad */
1467 if (fs->lfs_version > 1 && sp->seg_number == 0 &&
1468 fs->lfs_start < btofsb(fs, LFS_LABELPAD)) {
1469 fs->lfs_offset +=
1470 btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
1471 sp->seg_bytes_left -=
1472 LFS_LABELPAD - fsbtob(fs, fs->lfs_start);
1473 }
1474 } else {
1475 sp->seg_number = dtosn(fs, fs->lfs_curseg);
1476 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg -
1477 (fs->lfs_offset - fs->lfs_curseg));
1478 }
1479 fs->lfs_lastpseg = fs->lfs_offset;
1480
1481 /* Record first address of this partial segment */
1482 if (sp->seg_flags & SEGM_CLEAN) {
1483 fs->lfs_cleanint[fs->lfs_cleanind] = fs->lfs_offset;
1484 if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) {
1485 /* "1" is the artificial inc in lfs_seglock */
1486 while (fs->lfs_iocount > 1) {
1487 tsleep(&fs->lfs_iocount, PRIBIO + 1,
1488 "lfs_initseg", 0);
1489 }
1490 fs->lfs_cleanind = 0;
1491 }
1492 }
1493
1494 sp->fs = fs;
1495 sp->ibp = NULL;
1496 sp->idp = NULL;
1497 sp->ninodes = 0;
1498 sp->ndupino = 0;
1499
1500 sp->cbpp = sp->bpp;
1501
1502 /* Get a new buffer for SEGSUM */
1503 sbp = *sp->cbpp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
1504 fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize, LFS_NB_SUMMARY);
1505
1506 /* ... and enter it into the buffer list. */
1507 *sp->cbpp = sbp;
1508 sp->cbpp++;
1509 fs->lfs_offset += btofsb(fs, fs->lfs_sumsize);
1510
1511 sp->start_bpp = sp->cbpp;
1512
1513 /* Set point to SEGSUM, initialize it. */
1514 ssp = sp->segsum = sbp->b_data;
1515 memset(ssp, 0, fs->lfs_sumsize);
1516 ssp->ss_next = fs->lfs_nextseg;
1517 ssp->ss_nfinfo = ssp->ss_ninos = 0;
1518 ssp->ss_magic = SS_MAGIC;
1519
1520 /* Set pointer to first FINFO, initialize it. */
1521 sp->fip = (struct finfo *)((caddr_t)sp->segsum + SEGSUM_SIZE(fs));
1522 sp->fip->fi_nblocks = 0;
1523 sp->start_lbp = &sp->fip->fi_blocks[0];
1524 sp->fip->fi_lastlength = 0;
1525
1526 sp->seg_bytes_left -= fs->lfs_sumsize;
1527 sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs);
1528
1529 return (repeat);
1530 }
1531
1532 /*
1533 * Return the next segment to write.
1534 */
1535 void
1536 lfs_newseg(struct lfs *fs)
1537 {
1538 CLEANERINFO *cip;
1539 SEGUSE *sup;
1540 struct buf *bp;
1541 int curseg, isdirty, sn;
1542
1543 LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
1544 #ifdef DEBUG_SU_NBYTES
1545 printf("lfs_newseg: seg %d := 0 in newseg\n", /* XXXDEBUG */
1546 dtosn(fs, fs->lfs_nextseg)); /* XXXDEBUG */
1547 #endif
1548 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1549 sup->su_nbytes = 0;
1550 sup->su_nsums = 0;
1551 sup->su_ninos = 0;
1552 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
1553
1554 LFS_CLEANERINFO(cip, fs, bp);
1555 --cip->clean;
1556 ++cip->dirty;
1557 fs->lfs_nclean = cip->clean;
1558 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1559
1560 fs->lfs_lastseg = fs->lfs_curseg;
1561 fs->lfs_curseg = fs->lfs_nextseg;
1562 for (sn = curseg = dtosn(fs, fs->lfs_curseg) + fs->lfs_interleave;;) {
1563 sn = (sn + 1) % fs->lfs_nseg;
1564 if (sn == curseg)
1565 panic("lfs_nextseg: no clean segments");
1566 LFS_SEGENTRY(sup, fs, sn, bp);
1567 isdirty = sup->su_flags & SEGUSE_DIRTY;
1568 /* Check SEGUSE_EMPTY as we go along */
1569 if (isdirty && sup->su_nbytes == 0 &&
1570 !(sup->su_flags & SEGUSE_EMPTY))
1571 LFS_WRITESEGENTRY(sup, fs, sn, bp);
1572 else
1573 brelse(bp);
1574
1575 if (!isdirty)
1576 break;
1577 }
1578
1579 ++fs->lfs_nactive;
1580 fs->lfs_nextseg = sntod(fs, sn);
1581 if (lfs_dostats) {
1582 ++lfs_stats.segsused;
1583 }
1584 }
1585
1586 static struct buf *
1587 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, int n)
1588 {
1589 struct lfs_cluster *cl;
1590 struct buf **bpp, *bp;
1591 int s;
1592
1593 cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK);
1594 bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK);
1595 memset(cl, 0, sizeof(*cl));
1596 cl->fs = fs;
1597 cl->bpp = bpp;
1598 cl->bufcount = 0;
1599 cl->bufsize = 0;
1600
1601 /* If this segment is being written synchronously, note that */
1602 if (fs->lfs_sp->seg_flags & SEGM_SYNC) {
1603 cl->flags |= LFS_CL_SYNC;
1604 cl->seg = fs->lfs_sp;
1605 ++cl->seg->seg_iocount;
1606 /* printf("+ %x => %d\n", cl->seg, cl->seg->seg_iocount); */
1607 }
1608
1609 /* Get an empty buffer header, or maybe one with something on it */
1610 s = splbio();
1611 bp = pool_get(&bufpool, PR_WAITOK); /* XXX should use lfs_malloc? */
1612 splx(s);
1613 memset(bp, 0, sizeof(*bp));
1614 BUF_INIT(bp);
1615
1616 bp->b_flags = B_BUSY | B_CALL;
1617 bp->b_dev = NODEV;
1618 bp->b_blkno = bp->b_lblkno = addr;
1619 bp->b_iodone = lfs_cluster_callback;
1620 bp->b_saveaddr = (caddr_t)cl;
1621 bp->b_vp = vp;
1622
1623 return bp;
1624 }
1625
1626 int
1627 lfs_writeseg(struct lfs *fs, struct segment *sp)
1628 {
1629 struct buf **bpp, *bp, *cbp, *newbp;
1630 SEGUSE *sup;
1631 SEGSUM *ssp;
1632 dev_t i_dev;
1633 char *datap, *dp;
1634 int i, s;
1635 int do_again, nblocks, byteoffset;
1636 size_t el_size;
1637 struct lfs_cluster *cl;
1638 int (*strategy)(void *);
1639 struct vop_strategy_args vop_strategy_a;
1640 u_short ninos;
1641 struct vnode *devvp;
1642 char *p = NULL;
1643 struct vnode *vp;
1644 int32_t *daddrp; /* XXX ondisk32 */
1645 int changed;
1646 #if defined(DEBUG) && defined(LFS_PROPELLER)
1647 static int propeller;
1648 char propstring[4] = "-\\|/";
1649
1650 printf("%c\b",propstring[propeller++]);
1651 if (propeller == 4)
1652 propeller = 0;
1653 #endif
1654
1655 /*
1656 * If there are no buffers other than the segment summary to write
1657 * and it is not a checkpoint, don't do anything. On a checkpoint,
1658 * even if there aren't any buffers, you need to write the superblock.
1659 */
1660 if ((nblocks = sp->cbpp - sp->bpp) == 1)
1661 return (0);
1662
1663 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1664 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1665
1666 /* Update the segment usage information. */
1667 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1668
1669 /* Loop through all blocks, except the segment summary. */
1670 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
1671 if ((*bpp)->b_vp != devvp) {
1672 sup->su_nbytes += (*bpp)->b_bcount;
1673 #ifdef DEBUG_SU_NBYTES
1674 printf("seg %" PRIu32 " += %ld for ino %d lbn %" PRId64
1675 " db 0x%" PRIx64 "\n", sp->seg_number, (*bpp)->b_bcount,
1676 VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno,
1677 (*bpp)->b_blkno);
1678 #endif
1679 }
1680 }
1681
1682 ssp = (SEGSUM *)sp->segsum;
1683
1684 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1685 #ifdef DEBUG_SU_NBYTES
1686 printf("seg %d += %d for %d inodes\n", /* XXXDEBUG */
1687 sp->seg_number, ssp->ss_ninos * sizeof (struct ufs1_dinode),
1688 ssp->ss_ninos);
1689 #endif
1690 sup->su_nbytes += ssp->ss_ninos * sizeof (struct ufs1_dinode);
1691 /* sup->su_nbytes += fs->lfs_sumsize; */
1692 if (fs->lfs_version == 1)
1693 sup->su_olastmod = time.tv_sec;
1694 else
1695 sup->su_lastmod = time.tv_sec;
1696 sup->su_ninos += ninos;
1697 ++sup->su_nsums;
1698 fs->lfs_dmeta += (btofsb(fs, fs->lfs_sumsize) + btofsb(fs, ninos *
1699 fs->lfs_ibsize));
1700 fs->lfs_avail -= btofsb(fs, fs->lfs_sumsize);
1701
1702 do_again = !(bp->b_flags & B_GATHERED);
1703 LFS_WRITESEGENTRY(sup, fs, sp->seg_number, bp); /* Ifile */
1704
1705 /*
1706 * Mark blocks B_BUSY, to prevent then from being changed between
1707 * the checksum computation and the actual write.
1708 *
1709 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
1710 * there are any, replace them with copies that have UNASSIGNED
1711 * instead.
1712 */
1713 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1714 ++bpp;
1715 bp = *bpp;
1716 if (bp->b_flags & B_CALL) { /* UBC or malloced buffer */
1717 bp->b_flags |= B_BUSY;
1718 continue;
1719 }
1720 again:
1721 s = splbio();
1722 if (bp->b_flags & B_BUSY) {
1723 #ifdef DEBUG
1724 printf("lfs_writeseg: avoiding potential data summary "
1725 "corruption for ino %d, lbn %" PRId64 "\n",
1726 VTOI(bp->b_vp)->i_number, bp->b_lblkno);
1727 #endif
1728 bp->b_flags |= B_WANTED;
1729 tsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0);
1730 splx(s);
1731 goto again;
1732 }
1733 bp->b_flags |= B_BUSY;
1734 splx(s);
1735 /*
1736 * Check and replace indirect block UNWRITTEN bogosity.
1737 * XXX See comment in lfs_writefile.
1738 */
1739 if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
1740 VTOI(bp->b_vp)->i_ffs1_blocks !=
1741 VTOI(bp->b_vp)->i_lfs_effnblks) {
1742 #ifdef DEBUG_LFS
1743 printf("lfs_writeseg: cleansing ino %d (%d != %d)\n",
1744 VTOI(bp->b_vp)->i_number,
1745 VTOI(bp->b_vp)->i_lfs_effnblks,
1746 VTOI(bp->b_vp)->i_ffs1_blocks);
1747 #endif
1748 /* Make a copy we'll make changes to */
1749 newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
1750 bp->b_bcount, LFS_NB_IBLOCK);
1751 newbp->b_blkno = bp->b_blkno;
1752 memcpy(newbp->b_data, bp->b_data,
1753 newbp->b_bcount);
1754
1755 changed = 0;
1756 /* XXX ondisk32 */
1757 for (daddrp = (int32_t *)(newbp->b_data);
1758 daddrp < (int32_t *)(newbp->b_data +
1759 newbp->b_bcount); daddrp++) {
1760 if (*daddrp == UNWRITTEN) {
1761 #ifdef DEBUG_LFS
1762 off_t doff;
1763 int32_t ioff;
1764
1765 ioff =
1766 daddrp - (int32_t *)(newbp->b_data);
1767 doff =
1768 (-bp->b_lblkno + ioff) * fs->lfs_bsize;
1769 printf("ino %d lbn %" PRId64
1770 " entry %d off %" PRIx64 "\n",
1771 VTOI(bp->b_vp)->i_number,
1772 bp->b_lblkno, ioff, doff);
1773 if (bp->b_vp->v_type == VREG) {
1774 /*
1775 * What is up with this page?
1776 */
1777 struct vm_page *pg;
1778 for (; doff / fs->lfs_bsize == (-bp->b_lblkno + ioff);
1779 doff += PAGE_SIZE) {
1780 pg = uvm_pagelookup(&bp->b_vp->v_uobj, doff);
1781 if (pg == NULL)
1782 printf(" page at %" PRIx64 " is NULL\n", doff);
1783 else
1784 printf(" page at %" PRIx64
1785 " flags 0x%x pqflags 0x%x\n",
1786 doff, pg->flags, pg->pqflags);
1787 }
1788 }
1789 #endif /* DEBUG_LFS */
1790 ++changed;
1791 *daddrp = 0;
1792 }
1793 }
1794 /*
1795 * Get rid of the old buffer. Don't mark it clean,
1796 * though, if it still has dirty data on it.
1797 */
1798 if (changed) {
1799 #ifdef DEBUG_LFS
1800 printf("lfs_writeseg: replacing UNWRITTEN(%d):"
1801 " bp = %p newbp = %p\n", changed, bp,
1802 newbp);
1803 #endif
1804 *bpp = newbp;
1805 bp->b_flags &= ~(B_ERROR | B_GATHERED);
1806 if (bp->b_flags & B_CALL) {
1807 printf("lfs_writeseg: "
1808 "indir bp should not be B_CALL\n");
1809 s = splbio();
1810 biodone(bp);
1811 splx(s);
1812 bp = NULL;
1813 } else {
1814 /* Still on free list, leave it there */
1815 s = splbio();
1816 bp->b_flags &= ~B_BUSY;
1817 if (bp->b_flags & B_WANTED)
1818 wakeup(bp);
1819 splx(s);
1820 /*
1821 * We have to re-decrement lfs_avail
1822 * since this block is going to come
1823 * back around to us in the next
1824 * segment.
1825 */
1826 fs->lfs_avail -=
1827 btofsb(fs, bp->b_bcount);
1828 }
1829 } else {
1830 lfs_freebuf(fs, newbp);
1831 }
1832 }
1833 }
1834 /*
1835 * Compute checksum across data and then across summary; the first
1836 * block (the summary block) is skipped. Set the create time here
1837 * so that it's guaranteed to be later than the inode mod times.
1838 *
1839 * XXX
1840 * Fix this to do it inline, instead of malloc/copy.
1841 */
1842 datap = dp = pool_get(&fs->lfs_bpppool, PR_WAITOK);
1843 if (fs->lfs_version == 1)
1844 el_size = sizeof(u_long);
1845 else
1846 el_size = sizeof(u_int32_t);
1847 for (bpp = sp->bpp, i = nblocks - 1; i--; ) {
1848 ++bpp;
1849 /* Loop through gop_write cluster blocks */
1850 for (byteoffset = 0; byteoffset < (*bpp)->b_bcount;
1851 byteoffset += fs->lfs_bsize) {
1852 #ifdef LFS_USE_B_INVAL
1853 if (((*bpp)->b_flags & (B_CALL | B_INVAL)) ==
1854 (B_CALL | B_INVAL)) {
1855 if (copyin((caddr_t)(*bpp)->b_saveaddr +
1856 byteoffset, dp, el_size)) {
1857 panic("lfs_writeseg: copyin failed [1]:"
1858 " ino %d blk %" PRId64,
1859 VTOI((*bpp)->b_vp)->i_number,
1860 (*bpp)->b_lblkno);
1861 }
1862 } else
1863 #endif /* LFS_USE_B_INVAL */
1864 {
1865 memcpy(dp, (*bpp)->b_data + byteoffset,
1866 el_size);
1867 }
1868 dp += el_size;
1869 }
1870 }
1871 if (fs->lfs_version == 1)
1872 ssp->ss_ocreate = time.tv_sec;
1873 else {
1874 ssp->ss_create = time.tv_sec;
1875 ssp->ss_serial = ++fs->lfs_serial;
1876 ssp->ss_ident = fs->lfs_ident;
1877 }
1878 ssp->ss_datasum = cksum(datap, dp - datap);
1879 ssp->ss_sumsum =
1880 cksum(&ssp->ss_datasum, fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
1881 pool_put(&fs->lfs_bpppool, datap);
1882 datap = dp = NULL;
1883 #ifdef DIAGNOSTIC
1884 if (fs->lfs_bfree <
1885 btofsb(fs, ninos * fs->lfs_ibsize) + btofsb(fs, fs->lfs_sumsize))
1886 panic("lfs_writeseg: No diskspace for summary");
1887 #endif
1888 fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) +
1889 btofsb(fs, fs->lfs_sumsize));
1890
1891 strategy = devvp->v_op[VOFFSET(vop_strategy)];
1892
1893 /*
1894 * When we simply write the blocks we lose a rotation for every block
1895 * written. To avoid this problem, we cluster the buffers into a
1896 * chunk and write the chunk. MAXPHYS is the largest size I/O
1897 * devices can handle, use that for the size of the chunks.
1898 *
1899 * Blocks that are already clusters (from GOP_WRITE), however, we
1900 * don't bother to copy into other clusters.
1901 */
1902
1903 #define CHUNKSIZE MAXPHYS
1904
1905 if (devvp == NULL)
1906 panic("devvp is NULL");
1907 for (bpp = sp->bpp, i = nblocks; i;) {
1908 cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
1909 cl = (struct lfs_cluster *)cbp->b_saveaddr;
1910
1911 cbp->b_dev = i_dev;
1912 cbp->b_flags |= B_ASYNC | B_BUSY;
1913 cbp->b_bcount = 0;
1914
1915 #if defined(DEBUG) && defined(DIAGNOSTIC)
1916 if (bpp - sp->bpp > (fs->lfs_sumsize - SEGSUM_SIZE(fs))
1917 / sizeof(int32_t)) {
1918 panic("lfs_writeseg: real bpp overwrite");
1919 }
1920 if (bpp - sp->bpp > fs->lfs_ssize / fs->lfs_fsize) {
1921 panic("lfs_writeseg: theoretical bpp overwrite");
1922 }
1923 #endif
1924
1925 /*
1926 * Construct the cluster.
1927 */
1928 ++fs->lfs_iocount;
1929 while (i && cbp->b_bcount < CHUNKSIZE) {
1930 bp = *bpp;
1931
1932 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1933 break;
1934 if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC))
1935 break;
1936
1937 /* Clusters from GOP_WRITE are expedited */
1938 if (bp->b_bcount > fs->lfs_bsize) {
1939 if (cbp->b_bcount > 0)
1940 /* Put in its own buffer */
1941 break;
1942 else {
1943 cbp->b_data = bp->b_data;
1944 }
1945 } else if (cbp->b_bcount == 0) {
1946 p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE,
1947 LFS_NB_CLUSTER);
1948 cl->flags |= LFS_CL_MALLOC;
1949 }
1950 #ifdef DIAGNOSTIC
1951 if (dtosn(fs, dbtofsb(fs, bp->b_blkno +
1952 btodb(bp->b_bcount - 1))) !=
1953 sp->seg_number) {
1954 printf("blk size %ld daddr %" PRIx64
1955 " not in seg %d\n",
1956 bp->b_bcount, bp->b_blkno,
1957 sp->seg_number);
1958 panic("segment overwrite");
1959 }
1960 #endif
1961
1962 #ifdef LFS_USE_B_INVAL
1963 /*
1964 * Fake buffers from the cleaner are marked as B_INVAL.
1965 * We need to copy the data from user space rather than
1966 * from the buffer indicated.
1967 * XXX == what do I do on an error?
1968 */
1969 if ((bp->b_flags & (B_CALL|B_INVAL)) ==
1970 (B_CALL|B_INVAL)) {
1971 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1972 panic("lfs_writeseg: "
1973 "copyin failed [2]");
1974 } else
1975 #endif /* LFS_USE_B_INVAL */
1976 if (cl->flags & LFS_CL_MALLOC) {
1977 /* copy data into our cluster. */
1978 memcpy(p, bp->b_data, bp->b_bcount);
1979 p += bp->b_bcount;
1980 }
1981
1982 cbp->b_bcount += bp->b_bcount;
1983 cl->bufsize += bp->b_bcount;
1984
1985 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_DONE);
1986 cl->bpp[cl->bufcount++] = bp;
1987 vp = bp->b_vp;
1988 s = splbio();
1989 reassignbuf(bp, vp);
1990 V_INCR_NUMOUTPUT(vp);
1991 splx(s);
1992
1993 bpp++;
1994 i--;
1995 }
1996 s = splbio();
1997 V_INCR_NUMOUTPUT(devvp);
1998 splx(s);
1999 vop_strategy_a.a_desc = VDESC(vop_strategy);
2000 vop_strategy_a.a_bp = cbp;
2001 (strategy)(&vop_strategy_a);
2002 curproc->p_stats->p_ru.ru_oublock++;
2003 }
2004
2005 if (lfs_dostats) {
2006 ++lfs_stats.psegwrites;
2007 lfs_stats.blocktot += nblocks - 1;
2008 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
2009 ++lfs_stats.psyncwrites;
2010 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
2011 ++lfs_stats.pcleanwrites;
2012 lfs_stats.cleanblocks += nblocks - 1;
2013 }
2014 }
2015 return (lfs_initseg(fs) || do_again);
2016 }
2017
2018 void
2019 lfs_writesuper(struct lfs *fs, daddr_t daddr)
2020 {
2021 struct buf *bp;
2022 dev_t i_dev;
2023 int (*strategy)(void *);
2024 int s;
2025 struct vop_strategy_args vop_strategy_a;
2026
2027 /*
2028 * If we can write one superblock while another is in
2029 * progress, we risk not having a complete checkpoint if we crash.
2030 * So, block here if a superblock write is in progress.
2031 */
2032 s = splbio();
2033 while (fs->lfs_sbactive) {
2034 tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
2035 }
2036 fs->lfs_sbactive = daddr;
2037 splx(s);
2038 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
2039 strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)];
2040
2041 /* Set timestamp of this version of the superblock */
2042 if (fs->lfs_version == 1)
2043 fs->lfs_otstamp = time.tv_sec;
2044 fs->lfs_tstamp = time.tv_sec;
2045
2046 /* Checksum the superblock and copy it into a buffer. */
2047 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
2048 bp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
2049 fsbtodb(fs, daddr), LFS_SBPAD, LFS_NB_SBLOCK);
2050 memset(bp->b_data + sizeof(struct dlfs), 0,
2051 LFS_SBPAD - sizeof(struct dlfs));
2052 *(struct dlfs *)bp->b_data = fs->lfs_dlfs;
2053
2054 bp->b_dev = i_dev;
2055 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
2056 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
2057 bp->b_iodone = lfs_supercallback;
2058 /* XXX KS - same nasty hack as above */
2059 bp->b_saveaddr = (caddr_t)fs;
2060
2061 vop_strategy_a.a_desc = VDESC(vop_strategy);
2062 vop_strategy_a.a_bp = bp;
2063 curproc->p_stats->p_ru.ru_oublock++;
2064 s = splbio();
2065 V_INCR_NUMOUTPUT(bp->b_vp);
2066 splx(s);
2067 ++fs->lfs_iocount;
2068 (strategy)(&vop_strategy_a);
2069 }
2070
2071 /*
2072 * Logical block number match routines used when traversing the dirty block
2073 * chain.
2074 */
2075 int
2076 lfs_match_fake(struct lfs *fs, struct buf *bp)
2077 {
2078
2079 return LFS_IS_MALLOC_BUF(bp);
2080 }
2081
2082 #if 0
2083 int
2084 lfs_match_real(struct lfs *fs, struct buf *bp)
2085 {
2086
2087 return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp));
2088 }
2089 #endif
2090
2091 int
2092 lfs_match_data(struct lfs *fs, struct buf *bp)
2093 {
2094
2095 return (bp->b_lblkno >= 0);
2096 }
2097
2098 int
2099 lfs_match_indir(struct lfs *fs, struct buf *bp)
2100 {
2101 daddr_t lbn;
2102
2103 lbn = bp->b_lblkno;
2104 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
2105 }
2106
2107 int
2108 lfs_match_dindir(struct lfs *fs, struct buf *bp)
2109 {
2110 daddr_t lbn;
2111
2112 lbn = bp->b_lblkno;
2113 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
2114 }
2115
2116 int
2117 lfs_match_tindir(struct lfs *fs, struct buf *bp)
2118 {
2119 daddr_t lbn;
2120
2121 lbn = bp->b_lblkno;
2122 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
2123 }
2124
2125 /*
2126 * XXX - The only buffers that are going to hit these functions are the
2127 * segment write blocks, or the segment summaries, or the superblocks.
2128 *
2129 * All of the above are created by lfs_newbuf, and so do not need to be
2130 * released via brelse.
2131 */
2132 void
2133 lfs_callback(struct buf *bp)
2134 {
2135 struct lfs *fs;
2136
2137 fs = (struct lfs *)bp->b_saveaddr;
2138 lfs_freebuf(fs, bp);
2139 }
2140
2141 static void
2142 lfs_super_aiodone(struct buf *bp)
2143 {
2144 struct lfs *fs;
2145
2146 fs = (struct lfs *)bp->b_saveaddr;
2147 fs->lfs_sbactive = 0;
2148 wakeup(&fs->lfs_sbactive);
2149 if (--fs->lfs_iocount <= 1)
2150 wakeup(&fs->lfs_iocount);
2151 lfs_freebuf(fs, bp);
2152 }
2153
2154 static void
2155 lfs_cluster_aiodone(struct buf *bp)
2156 {
2157 struct lfs_cluster *cl;
2158 struct lfs *fs;
2159 struct buf *tbp, *fbp;
2160 struct vnode *vp, *devvp;
2161 struct inode *ip;
2162 int s, error=0;
2163
2164 if (bp->b_flags & B_ERROR)
2165 error = bp->b_error;
2166
2167 cl = (struct lfs_cluster *)bp->b_saveaddr;
2168 fs = cl->fs;
2169 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2170
2171 /* Put the pages back, and release the buffer */
2172 while (cl->bufcount--) {
2173 tbp = cl->bpp[cl->bufcount];
2174 if (error) {
2175 tbp->b_flags |= B_ERROR;
2176 tbp->b_error = error;
2177 }
2178
2179 /*
2180 * We're done with tbp. If it has not been re-dirtied since
2181 * the cluster was written, free it. Otherwise, keep it on
2182 * the locked list to be written again.
2183 */
2184 vp = tbp->b_vp;
2185
2186 tbp->b_flags &= ~B_GATHERED;
2187
2188 LFS_BCLEAN_LOG(fs, tbp);
2189
2190 if (!(tbp->b_flags & B_CALL)) {
2191 KASSERT(tbp->b_flags & B_LOCKED);
2192 s = splbio();
2193 simple_lock(&bqueue_slock);
2194 bremfree(tbp);
2195 simple_unlock(&bqueue_slock);
2196 if (vp)
2197 reassignbuf(tbp, vp);
2198 splx(s);
2199 tbp->b_flags |= B_ASYNC; /* for biodone */
2200 }
2201
2202 if ((tbp->b_flags & (B_LOCKED | B_DELWRI)) == B_LOCKED)
2203 LFS_UNLOCK_BUF(tbp);
2204
2205 #ifdef DIAGNOSTIC
2206 if (tbp->b_flags & B_DONE) {
2207 printf("blk %d biodone already (flags %lx)\n",
2208 cl->bufcount, (long)tbp->b_flags);
2209 }
2210 #endif
2211 if (tbp->b_flags & (B_BUSY | B_CALL)) {
2212 if ((tbp->b_flags & B_CALL) &&
2213 !LFS_IS_MALLOC_BUF(tbp)) {
2214 /* printf("flags 0x%lx\n", tbp->b_flags); */
2215 /*
2216 * A buffer from the page daemon.
2217 * We use the same iodone as it does,
2218 * so we must manually disassociate its
2219 * buffers from the vp.
2220 */
2221 if (tbp->b_vp) {
2222 /* This is just silly */
2223 s = splbio();
2224 brelvp(tbp);
2225 tbp->b_vp = vp;
2226 splx(s);
2227 }
2228 /* Put it back the way it was */
2229 tbp->b_flags |= B_ASYNC;
2230 /* Master buffers have B_AGE */
2231 if (tbp->b_private == tbp)
2232 tbp->b_flags |= B_AGE;
2233 }
2234 s = splbio();
2235 biodone(tbp);
2236
2237 /*
2238 * If this is the last block for this vnode, but
2239 * there are other blocks on its dirty list,
2240 * set IN_MODIFIED/IN_CLEANING depending on what
2241 * sort of block. Only do this for our mount point,
2242 * not for, e.g., inode blocks that are attached to
2243 * the devvp.
2244 * XXX KS - Shouldn't we set *both* if both types
2245 * of blocks are present (traverse the dirty list?)
2246 */
2247 simple_lock(&global_v_numoutput_slock);
2248 if (vp != devvp && vp->v_numoutput == 0 &&
2249 (fbp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) {
2250 ip = VTOI(vp);
2251 #ifdef DEBUG_LFS
2252 printf("lfs_cluster_aiodone: marking ino %d\n",
2253 ip->i_number);
2254 #endif
2255 if (LFS_IS_MALLOC_BUF(fbp))
2256 LFS_SET_UINO(ip, IN_CLEANING);
2257 else
2258 LFS_SET_UINO(ip, IN_MODIFIED);
2259 }
2260 simple_unlock(&global_v_numoutput_slock);
2261 splx(s);
2262 wakeup(vp);
2263 }
2264 }
2265
2266 /* Fix up the cluster buffer, and release it */
2267 if (cl->flags & LFS_CL_MALLOC)
2268 lfs_free(fs, bp->b_data, LFS_NB_CLUSTER);
2269 s = splbio();
2270 pool_put(&bufpool, bp); /* XXX should use lfs_free? */
2271 splx(s);
2272
2273 /* Note i/o done */
2274 if (cl->flags & LFS_CL_SYNC) {
2275 if (--cl->seg->seg_iocount == 0)
2276 wakeup(&cl->seg->seg_iocount);
2277 /* printf("- %x => %d\n", cl->seg, cl->seg->seg_iocount); */
2278 }
2279 #ifdef DIAGNOSTIC
2280 if (fs->lfs_iocount == 0)
2281 panic("lfs_cluster_aiodone: zero iocount");
2282 #endif
2283 if (--fs->lfs_iocount <= 1)
2284 wakeup(&fs->lfs_iocount);
2285
2286 pool_put(&fs->lfs_bpppool, cl->bpp);
2287 cl->bpp = NULL;
2288 pool_put(&fs->lfs_clpool, cl);
2289 }
2290
2291 static void
2292 lfs_generic_callback(struct buf *bp, void (*aiodone)(struct buf *))
2293 {
2294 /* reset b_iodone for when this is a single-buf i/o. */
2295 bp->b_iodone = aiodone;
2296
2297 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
2298 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
2299 wakeup(&uvm.aiodoned);
2300 simple_unlock(&uvm.aiodoned_lock);
2301 }
2302
2303 static void
2304 lfs_cluster_callback(struct buf *bp)
2305 {
2306
2307 lfs_generic_callback(bp, lfs_cluster_aiodone);
2308 }
2309
2310 void
2311 lfs_supercallback(struct buf *bp)
2312 {
2313
2314 lfs_generic_callback(bp, lfs_super_aiodone);
2315 }
2316
2317 /*
2318 * Shellsort (diminishing increment sort) from Data Structures and
2319 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
2320 * see also Knuth Vol. 3, page 84. The increments are selected from
2321 * formula (8), page 95. Roughly O(N^3/2).
2322 */
2323 /*
2324 * This is our own private copy of shellsort because we want to sort
2325 * two parallel arrays (the array of buffer pointers and the array of
2326 * logical block numbers) simultaneously. Note that we cast the array
2327 * of logical block numbers to a unsigned in this routine so that the
2328 * negative block numbers (meta data blocks) sort AFTER the data blocks.
2329 */
2330
2331 void
2332 lfs_shellsort(struct buf **bp_array, int32_t *lb_array, int nmemb, int size)
2333 {
2334 static int __rsshell_increments[] = { 4, 1, 0 };
2335 int incr, *incrp, t1, t2;
2336 struct buf *bp_temp;
2337
2338 #ifdef DEBUG
2339 incr = 0;
2340 for (t1 = 0; t1 < nmemb; t1++) {
2341 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2342 if (lb_array[incr++] != bp_array[t1]->b_lblkno + t2) {
2343 /* dump before panic */
2344 printf("lfs_shellsort: nmemb=%d, size=%d\n",
2345 nmemb, size);
2346 incr = 0;
2347 for (t1 = 0; t1 < nmemb; t1++) {
2348 const struct buf *bp = bp_array[t1];
2349
2350 printf("bp[%d]: lbn=%" PRIu64 ", size=%"
2351 PRIu64 "\n", t1,
2352 (uint64_t)bp->b_bcount,
2353 (uint64_t)bp->b_lblkno);
2354 printf("lbns:");
2355 for (t2 = 0; t2 * size < bp->b_bcount;
2356 t2++) {
2357 printf(" %" PRId32,
2358 lb_array[incr++]);
2359 }
2360 printf("\n");
2361 }
2362 panic("lfs_shellsort: inconsistent input");
2363 }
2364 }
2365 }
2366 #endif
2367
2368 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
2369 for (t1 = incr; t1 < nmemb; ++t1)
2370 for (t2 = t1 - incr; t2 >= 0;)
2371 if ((u_int32_t)bp_array[t2]->b_lblkno >
2372 (u_int32_t)bp_array[t2 + incr]->b_lblkno) {
2373 bp_temp = bp_array[t2];
2374 bp_array[t2] = bp_array[t2 + incr];
2375 bp_array[t2 + incr] = bp_temp;
2376 t2 -= incr;
2377 } else
2378 break;
2379
2380 /* Reform the list of logical blocks */
2381 incr = 0;
2382 for (t1 = 0; t1 < nmemb; t1++) {
2383 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2384 lb_array[incr++] = bp_array[t1]->b_lblkno + t2;
2385 }
2386 }
2387 }
2388
2389 /*
2390 * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
2391 */
2392 int
2393 lfs_vref(struct vnode *vp)
2394 {
2395 /*
2396 * If we return 1 here during a flush, we risk vinvalbuf() not
2397 * being able to flush all of the pages from this vnode, which
2398 * will cause it to panic. So, return 0 if a flush is in progress.
2399 */
2400 if (vp->v_flag & VXLOCK) {
2401 if (IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2402 return 0;
2403 }
2404 return (1);
2405 }
2406 return (vget(vp, 0));
2407 }
2408
2409 /*
2410 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
2411 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
2412 */
2413 void
2414 lfs_vunref(struct vnode *vp)
2415 {
2416 /*
2417 * Analogous to lfs_vref, if the node is flushing, fake it.
2418 */
2419 if ((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2420 return;
2421 }
2422
2423 simple_lock(&vp->v_interlock);
2424 #ifdef DIAGNOSTIC
2425 if (vp->v_usecount <= 0) {
2426 printf("lfs_vunref: inum is %d\n", VTOI(vp)->i_number);
2427 printf("lfs_vunref: flags are 0x%lx\n", (u_long)vp->v_flag);
2428 printf("lfs_vunref: usecount = %ld\n", (long)vp->v_usecount);
2429 panic("lfs_vunref: v_usecount<0");
2430 }
2431 #endif
2432 vp->v_usecount--;
2433 if (vp->v_usecount > 0) {
2434 simple_unlock(&vp->v_interlock);
2435 return;
2436 }
2437 /*
2438 * insert at tail of LRU list
2439 */
2440 simple_lock(&vnode_free_list_slock);
2441 if (vp->v_holdcnt > 0)
2442 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2443 else
2444 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2445 simple_unlock(&vnode_free_list_slock);
2446 simple_unlock(&vp->v_interlock);
2447 }
2448
2449 /*
2450 * We use this when we have vnodes that were loaded in solely for cleaning.
2451 * There is no reason to believe that these vnodes will be referenced again
2452 * soon, since the cleaning process is unrelated to normal filesystem
2453 * activity. Putting cleaned vnodes at the tail of the list has the effect
2454 * of flushing the vnode LRU. So, put vnodes that were loaded only for
2455 * cleaning at the head of the list, instead.
2456 */
2457 void
2458 lfs_vunref_head(struct vnode *vp)
2459 {
2460
2461 simple_lock(&vp->v_interlock);
2462 #ifdef DIAGNOSTIC
2463 if (vp->v_usecount == 0) {
2464 panic("lfs_vunref: v_usecount<0");
2465 }
2466 #endif
2467 vp->v_usecount--;
2468 if (vp->v_usecount > 0) {
2469 simple_unlock(&vp->v_interlock);
2470 return;
2471 }
2472 /*
2473 * insert at head of LRU list
2474 */
2475 simple_lock(&vnode_free_list_slock);
2476 if (vp->v_holdcnt > 0)
2477 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2478 else
2479 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2480 simple_unlock(&vnode_free_list_slock);
2481 simple_unlock(&vp->v_interlock);
2482 }
2483
2484