lfs_segment.c revision 1.151 1 /* $NetBSD: lfs_segment.c,v 1.151 2004/03/09 06:43:18 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000, 2001, 2002, 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
53 *
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
65 *
66 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
67 */
68
69 #include <sys/cdefs.h>
70 __KERNEL_RCSID(0, "$NetBSD: lfs_segment.c,v 1.151 2004/03/09 06:43:18 yamt Exp $");
71
72 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
73
74 #if defined(_KERNEL_OPT)
75 #include "opt_ddb.h"
76 #endif
77
78 #include <sys/param.h>
79 #include <sys/systm.h>
80 #include <sys/namei.h>
81 #include <sys/kernel.h>
82 #include <sys/resourcevar.h>
83 #include <sys/file.h>
84 #include <sys/stat.h>
85 #include <sys/buf.h>
86 #include <sys/proc.h>
87 #include <sys/vnode.h>
88 #include <sys/mount.h>
89
90 #include <miscfs/specfs/specdev.h>
91 #include <miscfs/fifofs/fifo.h>
92
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/dir.h>
95 #include <ufs/ufs/ufsmount.h>
96 #include <ufs/ufs/ufs_extern.h>
97
98 #include <ufs/lfs/lfs.h>
99 #include <ufs/lfs/lfs_extern.h>
100
101 #include <uvm/uvm.h>
102 #include <uvm/uvm_extern.h>
103
104 MALLOC_DEFINE(M_SEGMENT, "LFS segment", "Segment for LFS");
105
106 extern int count_lock_queue(void);
107 extern struct simplelock vnode_free_list_slock; /* XXX */
108 extern struct simplelock bqueue_slock; /* XXX */
109
110 static void lfs_generic_callback(struct buf *, void (*)(struct buf *));
111 static void lfs_super_aiodone(struct buf *);
112 static void lfs_cluster_aiodone(struct buf *);
113 static void lfs_cluster_callback(struct buf *);
114
115 /*
116 * Determine if it's OK to start a partial in this segment, or if we need
117 * to go on to a new segment.
118 */
119 #define LFS_PARTIAL_FITS(fs) \
120 ((fs)->lfs_fsbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
121 fragstofsb((fs), (fs)->lfs_frag))
122
123 int lfs_match_fake(struct lfs *, struct buf *);
124 void lfs_newseg(struct lfs *);
125 /* XXX ondisk32 */
126 void lfs_shellsort(struct buf **, int32_t *, int, int);
127 void lfs_supercallback(struct buf *);
128 void lfs_updatemeta(struct segment *);
129 void lfs_writesuper(struct lfs *, daddr_t);
130 int lfs_writevnodes(struct lfs *fs, struct mount *mp,
131 struct segment *sp, int dirops);
132
133 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
134 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
135 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
136 int lfs_dirvcount = 0; /* # active dirops */
137
138 /* Statistics Counters */
139 int lfs_dostats = 1;
140 struct lfs_stats lfs_stats;
141
142 /* op values to lfs_writevnodes */
143 #define VN_REG 0
144 #define VN_DIROP 1
145 #define VN_EMPTY 2
146 #define VN_CLEAN 3
147
148 /*
149 * XXX KS - Set modification time on the Ifile, so the cleaner can
150 * read the fs mod time off of it. We don't set IN_UPDATE here,
151 * since we don't really need this to be flushed to disk (and in any
152 * case that wouldn't happen to the Ifile until we checkpoint).
153 */
154 void
155 lfs_imtime(struct lfs *fs)
156 {
157 struct timespec ts;
158 struct inode *ip;
159
160 TIMEVAL_TO_TIMESPEC(&time, &ts);
161 ip = VTOI(fs->lfs_ivnode);
162 ip->i_ffs1_mtime = ts.tv_sec;
163 ip->i_ffs1_mtimensec = ts.tv_nsec;
164 }
165
166 /*
167 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
168 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
169 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
170 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
171 */
172
173 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
174 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
175 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
176
177 int
178 lfs_vflush(struct vnode *vp)
179 {
180 struct inode *ip;
181 struct lfs *fs;
182 struct segment *sp;
183 struct buf *bp, *nbp, *tbp, *tnbp;
184 int error, s;
185 int flushed;
186 #if 0
187 int redo;
188 #endif
189
190 ip = VTOI(vp);
191 fs = VFSTOUFS(vp->v_mount)->um_lfs;
192
193 if (ip->i_flag & IN_CLEANING) {
194 #ifdef DEBUG_LFS
195 ivndebug(vp,"vflush/in_cleaning");
196 #endif
197 LFS_CLR_UINO(ip, IN_CLEANING);
198 LFS_SET_UINO(ip, IN_MODIFIED);
199
200 /*
201 * Toss any cleaning buffers that have real counterparts
202 * to avoid losing new data.
203 */
204 s = splbio();
205 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
206 nbp = LIST_NEXT(bp, b_vnbufs);
207 if (!LFS_IS_MALLOC_BUF(bp))
208 continue;
209 /*
210 * Look for pages matching the range covered
211 * by cleaning blocks. It's okay if more dirty
212 * pages appear, so long as none disappear out
213 * from under us.
214 */
215 if (bp->b_lblkno > 0 && vp->v_type == VREG &&
216 vp != fs->lfs_ivnode) {
217 struct vm_page *pg;
218 voff_t off;
219
220 simple_lock(&vp->v_interlock);
221 for (off = lblktosize(fs, bp->b_lblkno);
222 off < lblktosize(fs, bp->b_lblkno + 1);
223 off += PAGE_SIZE) {
224 pg = uvm_pagelookup(&vp->v_uobj, off);
225 if (pg == NULL)
226 continue;
227 if ((pg->flags & PG_CLEAN) == 0 ||
228 pmap_is_modified(pg)) {
229 fs->lfs_avail += btofsb(fs,
230 bp->b_bcount);
231 wakeup(&fs->lfs_avail);
232 lfs_freebuf(fs, bp);
233 bp = NULL;
234 goto nextbp;
235 }
236 }
237 simple_unlock(&vp->v_interlock);
238 }
239 for (tbp = LIST_FIRST(&vp->v_dirtyblkhd); tbp;
240 tbp = tnbp)
241 {
242 tnbp = LIST_NEXT(tbp, b_vnbufs);
243 if (tbp->b_vp == bp->b_vp
244 && tbp->b_lblkno == bp->b_lblkno
245 && tbp != bp)
246 {
247 fs->lfs_avail += btofsb(fs,
248 bp->b_bcount);
249 wakeup(&fs->lfs_avail);
250 lfs_freebuf(fs, bp);
251 bp = NULL;
252 break;
253 }
254 }
255 nextbp:
256 ;
257 }
258 splx(s);
259 }
260
261 /* If the node is being written, wait until that is done */
262 s = splbio();
263 if (WRITEINPROG(vp)) {
264 #ifdef DEBUG_LFS
265 ivndebug(vp,"vflush/writeinprog");
266 #endif
267 tsleep(vp, PRIBIO+1, "lfs_vw", 0);
268 }
269 splx(s);
270
271 /* Protect against VXLOCK deadlock in vinvalbuf() */
272 lfs_seglock(fs, SEGM_SYNC);
273
274 /* If we're supposed to flush a freed inode, just toss it */
275 /* XXX - seglock, so these buffers can't be gathered, right? */
276 if (ip->i_mode == 0) {
277 printf("lfs_vflush: ino %d is freed, not flushing\n",
278 ip->i_number);
279 s = splbio();
280 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
281 nbp = LIST_NEXT(bp, b_vnbufs);
282 if (bp->b_flags & B_DELWRI) { /* XXX always true? */
283 fs->lfs_avail += btofsb(fs, bp->b_bcount);
284 wakeup(&fs->lfs_avail);
285 }
286 /* Copied from lfs_writeseg */
287 if (bp->b_flags & B_CALL) {
288 biodone(bp);
289 } else {
290 bremfree(bp);
291 LFS_UNLOCK_BUF(bp);
292 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
293 B_GATHERED);
294 bp->b_flags |= B_DONE;
295 reassignbuf(bp, vp);
296 brelse(bp);
297 }
298 }
299 splx(s);
300 LFS_CLR_UINO(ip, IN_CLEANING);
301 LFS_CLR_UINO(ip, IN_MODIFIED | IN_ACCESSED);
302 ip->i_flag &= ~IN_ALLMOD;
303 printf("lfs_vflush: done not flushing ino %d\n",
304 ip->i_number);
305 lfs_segunlock(fs);
306 return 0;
307 }
308
309 SET_FLUSHING(fs,vp);
310 if (fs->lfs_nactive > LFS_MAX_ACTIVE ||
311 (fs->lfs_sp->seg_flags & SEGM_CKP)) {
312 error = lfs_segwrite(vp->v_mount, SEGM_CKP | SEGM_SYNC);
313 CLR_FLUSHING(fs,vp);
314 lfs_segunlock(fs);
315 return error;
316 }
317 sp = fs->lfs_sp;
318
319 flushed = 0;
320 if (VPISEMPTY(vp)) {
321 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
322 ++flushed;
323 } else if ((ip->i_flag & IN_CLEANING) &&
324 (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
325 #ifdef DEBUG_LFS
326 ivndebug(vp,"vflush/clean");
327 #endif
328 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
329 ++flushed;
330 } else if (lfs_dostats) {
331 if (!VPISEMPTY(vp) || (VTOI(vp)->i_flag & IN_ALLMOD))
332 ++lfs_stats.vflush_invoked;
333 #ifdef DEBUG_LFS
334 ivndebug(vp,"vflush");
335 #endif
336 }
337
338 #ifdef DIAGNOSTIC
339 /* XXX KS This actually can happen right now, though it shouldn't(?) */
340 if (vp->v_flag & VDIROP) {
341 printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
342 /* panic("VDIROP being flushed...this can\'t happen"); */
343 }
344 if (vp->v_usecount < 0) {
345 printf("usecount=%ld\n", (long)vp->v_usecount);
346 panic("lfs_vflush: usecount<0");
347 }
348 #endif
349
350 #if 1
351 do {
352 do {
353 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
354 lfs_writefile(fs, sp, vp);
355 } while (lfs_writeinode(fs, sp, ip));
356 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
357 #else
358 if (flushed && vp != fs->lfs_ivnode)
359 lfs_writeseg(fs, sp);
360 else do {
361 fs->lfs_flags &= ~LFS_IFDIRTY;
362 lfs_writefile(fs, sp, vp);
363 redo = lfs_writeinode(fs, sp, ip);
364 redo += lfs_writeseg(fs, sp);
365 redo += (fs->lfs_flags & LFS_IFDIRTY);
366 } while (redo && vp == fs->lfs_ivnode);
367 #endif
368 if (lfs_dostats) {
369 ++lfs_stats.nwrites;
370 if (sp->seg_flags & SEGM_SYNC)
371 ++lfs_stats.nsync_writes;
372 if (sp->seg_flags & SEGM_CKP)
373 ++lfs_stats.ncheckpoints;
374 }
375 /*
376 * If we were called from somewhere that has already held the seglock
377 * (e.g., lfs_markv()), the lfs_segunlock will not wait for
378 * the write to complete because we are still locked.
379 * Since lfs_vflush() must return the vnode with no dirty buffers,
380 * we must explicitly wait, if that is the case.
381 *
382 * We compare the iocount against 1, not 0, because it is
383 * artificially incremented by lfs_seglock().
384 */
385 simple_lock(&fs->lfs_interlock);
386 if (fs->lfs_seglock > 1) {
387 simple_unlock(&fs->lfs_interlock);
388 while (fs->lfs_iocount > 1)
389 (void)tsleep(&fs->lfs_iocount, PRIBIO + 1,
390 "lfs_vflush", 0);
391 } else
392 simple_unlock(&fs->lfs_interlock);
393
394 lfs_segunlock(fs);
395
396 /* Wait for these buffers to be recovered by aiodoned */
397 s = splbio();
398 simple_lock(&global_v_numoutput_slock);
399 while (vp->v_numoutput > 0) {
400 vp->v_flag |= VBWAIT;
401 ltsleep(&vp->v_numoutput, PRIBIO + 1, "lfs_vf2", 0,
402 &global_v_numoutput_slock);
403 }
404 simple_unlock(&global_v_numoutput_slock);
405 splx(s);
406
407 CLR_FLUSHING(fs,vp);
408 return (0);
409 }
410
411 #ifdef DEBUG_LFS_VERBOSE
412 # define vndebug(vp,str) if (VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
413 #else
414 # define vndebug(vp,str)
415 #endif
416
417 int
418 lfs_writevnodes(struct lfs *fs, struct mount *mp, struct segment *sp, int op)
419 {
420 struct inode *ip;
421 struct vnode *vp, *nvp;
422 int inodes_written = 0, only_cleaning;
423
424 #ifndef LFS_NO_BACKVP_HACK
425 /* BEGIN HACK */
426 #define VN_OFFSET \
427 (((caddr_t)&LIST_NEXT(vp, v_mntvnodes)) - (caddr_t)vp)
428 #define BACK_VP(VP) \
429 ((struct vnode *)(((caddr_t)(VP)->v_mntvnodes.le_prev) - VN_OFFSET))
430 #define BEG_OF_VLIST \
431 ((struct vnode *)(((caddr_t)&LIST_FIRST(&mp->mnt_vnodelist)) \
432 - VN_OFFSET))
433
434 /* Find last vnode. */
435 loop: for (vp = LIST_FIRST(&mp->mnt_vnodelist);
436 vp && LIST_NEXT(vp, v_mntvnodes) != NULL;
437 vp = LIST_NEXT(vp, v_mntvnodes));
438 for (; vp && vp != BEG_OF_VLIST; vp = nvp) {
439 nvp = BACK_VP(vp);
440 #else
441 loop:
442 for (vp = LIST_FIRST(&mp->mnt_vnodelist); vp; vp = nvp) {
443 nvp = LIST_NEXT(vp, v_mntvnodes);
444 #endif
445 /*
446 * If the vnode that we are about to sync is no longer
447 * associated with this mount point, start over.
448 */
449 if (vp->v_mount != mp) {
450 printf("lfs_writevnodes: starting over\n");
451 /*
452 * After this, pages might be busy
453 * due to our own previous putpages.
454 * Start actual segment write here to avoid deadlock.
455 */
456 (void)lfs_writeseg(fs, sp);
457 goto loop;
458 }
459
460 if (vp->v_type == VNON) {
461 continue;
462 }
463
464 ip = VTOI(vp);
465 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
466 (op != VN_DIROP && op != VN_CLEAN &&
467 (vp->v_flag & VDIROP))) {
468 vndebug(vp,"dirop");
469 continue;
470 }
471
472 if (op == VN_EMPTY && !VPISEMPTY(vp)) {
473 vndebug(vp,"empty");
474 continue;
475 }
476
477 if (op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
478 && vp != fs->lfs_flushvp
479 && !(ip->i_flag & IN_CLEANING)) {
480 vndebug(vp,"cleaning");
481 continue;
482 }
483
484 if (lfs_vref(vp)) {
485 vndebug(vp,"vref");
486 continue;
487 }
488
489 only_cleaning = 0;
490 /*
491 * Write the inode/file if dirty and it's not the IFILE.
492 */
493 if ((ip->i_flag & IN_ALLMOD) || !VPISEMPTY(vp)) {
494 only_cleaning =
495 ((ip->i_flag & IN_ALLMOD) == IN_CLEANING);
496
497 if (ip->i_number != LFS_IFILE_INUM)
498 lfs_writefile(fs, sp, vp);
499 if (!VPISEMPTY(vp)) {
500 if (WRITEINPROG(vp)) {
501 #ifdef DEBUG_LFS
502 ivndebug(vp,"writevnodes/write2");
503 #endif
504 } else if (!(ip->i_flag & IN_ALLMOD)) {
505 #ifdef DEBUG_LFS
506 printf("<%d>",ip->i_number);
507 #endif
508 LFS_SET_UINO(ip, IN_MODIFIED);
509 }
510 }
511 (void) lfs_writeinode(fs, sp, ip);
512 inodes_written++;
513 }
514
515 if (lfs_clean_vnhead && only_cleaning)
516 lfs_vunref_head(vp);
517 else
518 lfs_vunref(vp);
519 }
520 return inodes_written;
521 }
522
523 /*
524 * Do a checkpoint.
525 */
526 int
527 lfs_segwrite(struct mount *mp, int flags)
528 {
529 struct buf *bp;
530 struct inode *ip;
531 struct lfs *fs;
532 struct segment *sp;
533 struct vnode *vp;
534 SEGUSE *segusep;
535 int do_ckp, did_ckp, error, s;
536 unsigned n, segleft, maxseg, sn, i, curseg;
537 int writer_set = 0;
538 int dirty;
539 int redo;
540
541 fs = VFSTOUFS(mp)->um_lfs;
542
543 if (fs->lfs_ronly)
544 return EROFS;
545
546 lfs_imtime(fs);
547
548 /*
549 * Allocate a segment structure and enough space to hold pointers to
550 * the maximum possible number of buffers which can be described in a
551 * single summary block.
552 */
553 do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
554 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
555 sp = fs->lfs_sp;
556
557 /*
558 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
559 * in which case we have to flush *all* buffers off of this vnode.
560 * We don't care about other nodes, but write any non-dirop nodes
561 * anyway in anticipation of another getnewvnode().
562 *
563 * If we're cleaning we only write cleaning and ifile blocks, and
564 * no dirops, since otherwise we'd risk corruption in a crash.
565 */
566 if (sp->seg_flags & SEGM_CLEAN)
567 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
568 else if (!(sp->seg_flags & SEGM_FORCE_CKP)) {
569 lfs_writevnodes(fs, mp, sp, VN_REG);
570 if (!fs->lfs_dirops || !fs->lfs_flushvp) {
571 error = lfs_writer_enter(fs, "lfs writer");
572 if (error) {
573 printf("segwrite mysterious error\n");
574 /* XXX why not segunlock? */
575 pool_put(&fs->lfs_bpppool, sp->bpp);
576 sp->bpp = NULL;
577 pool_put(&fs->lfs_segpool, sp);
578 sp = fs->lfs_sp = NULL;
579 return (error);
580 }
581 writer_set = 1;
582 lfs_writevnodes(fs, mp, sp, VN_DIROP);
583 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
584 }
585 }
586
587 /*
588 * If we are doing a checkpoint, mark everything since the
589 * last checkpoint as no longer ACTIVE.
590 */
591 if (do_ckp) {
592 segleft = fs->lfs_nseg;
593 curseg = 0;
594 for (n = 0; n < fs->lfs_segtabsz; n++) {
595 dirty = 0;
596 if (bread(fs->lfs_ivnode,
597 fs->lfs_cleansz + n, fs->lfs_bsize, NOCRED, &bp))
598 panic("lfs_segwrite: ifile read");
599 segusep = (SEGUSE *)bp->b_data;
600 maxseg = min(segleft, fs->lfs_sepb);
601 for (i = 0; i < maxseg; i++) {
602 sn = curseg + i;
603 if (sn != dtosn(fs, fs->lfs_curseg) &&
604 segusep->su_flags & SEGUSE_ACTIVE) {
605 segusep->su_flags &= ~SEGUSE_ACTIVE;
606 --fs->lfs_nactive;
607 ++dirty;
608 }
609 fs->lfs_suflags[fs->lfs_activesb][sn] =
610 segusep->su_flags;
611 if (fs->lfs_version > 1)
612 ++segusep;
613 else
614 segusep = (SEGUSE *)
615 ((SEGUSE_V1 *)segusep + 1);
616 }
617
618 if (dirty)
619 error = LFS_BWRITE_LOG(bp); /* Ifile */
620 else
621 brelse(bp);
622 segleft -= fs->lfs_sepb;
623 curseg += fs->lfs_sepb;
624 }
625 }
626
627 did_ckp = 0;
628 if (do_ckp || fs->lfs_doifile) {
629 do {
630 vp = fs->lfs_ivnode;
631
632 #ifdef DEBUG
633 LFS_ENTER_LOG("pretend", __FILE__, __LINE__, 0, 0);
634 #endif
635 fs->lfs_flags &= ~LFS_IFDIRTY;
636
637 ip = VTOI(vp);
638
639 if (LIST_FIRST(&vp->v_dirtyblkhd) != NULL)
640 lfs_writefile(fs, sp, vp);
641
642 if (ip->i_flag & IN_ALLMOD)
643 ++did_ckp;
644 redo = lfs_writeinode(fs, sp, ip);
645 redo += lfs_writeseg(fs, sp);
646 redo += (fs->lfs_flags & LFS_IFDIRTY);
647 } while (redo && do_ckp);
648
649 /*
650 * Unless we are unmounting, the Ifile may continue to have
651 * dirty blocks even after a checkpoint, due to changes to
652 * inodes' atime. If we're checkpointing, it's "impossible"
653 * for other parts of the Ifile to be dirty after the loop
654 * above, since we hold the segment lock.
655 */
656 s = splbio();
657 if (LIST_EMPTY(&vp->v_dirtyblkhd)) {
658 LFS_CLR_UINO(ip, IN_ALLMOD);
659 }
660 #ifdef DIAGNOSTIC
661 else if (do_ckp) {
662 LIST_FOREACH(bp, &vp->v_dirtyblkhd, b_vnbufs) {
663 if (bp->b_lblkno < fs->lfs_cleansz +
664 fs->lfs_segtabsz &&
665 !(bp->b_flags & B_GATHERED)) {
666 panic("dirty blocks");
667 }
668 }
669 }
670 #endif
671 splx(s);
672 } else {
673 (void) lfs_writeseg(fs, sp);
674 }
675
676 /* Note Ifile no longer needs to be written */
677 fs->lfs_doifile = 0;
678 if (writer_set)
679 lfs_writer_leave(fs);
680
681 /*
682 * If we didn't write the Ifile, we didn't really do anything.
683 * That means that (1) there is a checkpoint on disk and (2)
684 * nothing has changed since it was written.
685 *
686 * Take the flags off of the segment so that lfs_segunlock
687 * doesn't have to write the superblock either.
688 */
689 if (do_ckp && !did_ckp) {
690 sp->seg_flags &= ~SEGM_CKP;
691 }
692
693 if (lfs_dostats) {
694 ++lfs_stats.nwrites;
695 if (sp->seg_flags & SEGM_SYNC)
696 ++lfs_stats.nsync_writes;
697 if (sp->seg_flags & SEGM_CKP)
698 ++lfs_stats.ncheckpoints;
699 }
700 lfs_segunlock(fs);
701 return (0);
702 }
703
704 /*
705 * Write the dirty blocks associated with a vnode.
706 */
707 void
708 lfs_writefile(struct lfs *fs, struct segment *sp, struct vnode *vp)
709 {
710 struct buf *bp;
711 struct finfo *fip;
712 struct inode *ip;
713 IFILE *ifp;
714 int i, frag;
715
716 ip = VTOI(vp);
717
718 if (sp->seg_bytes_left < fs->lfs_bsize ||
719 sp->sum_bytes_left < sizeof(struct finfo))
720 (void) lfs_writeseg(fs, sp);
721
722 sp->sum_bytes_left -= FINFOSIZE;
723 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
724
725 if (vp->v_flag & VDIROP)
726 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
727
728 fip = sp->fip;
729 fip->fi_nblocks = 0;
730 fip->fi_ino = ip->i_number;
731 LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
732 fip->fi_version = ifp->if_version;
733 brelse(bp);
734
735 if (sp->seg_flags & SEGM_CLEAN) {
736 lfs_gather(fs, sp, vp, lfs_match_fake);
737 /*
738 * For a file being flushed, we need to write *all* blocks.
739 * This means writing the cleaning blocks first, and then
740 * immediately following with any non-cleaning blocks.
741 * The same is true of the Ifile since checkpoints assume
742 * that all valid Ifile blocks are written.
743 */
744 if (IS_FLUSHING(fs,vp) || vp == fs->lfs_ivnode) {
745 lfs_gather(fs, sp, vp, lfs_match_data);
746 /*
747 * Don't call VOP_PUTPAGES: if we're flushing,
748 * we've already done it, and the Ifile doesn't
749 * use the page cache.
750 */
751 }
752 } else {
753 lfs_gather(fs, sp, vp, lfs_match_data);
754 /*
755 * If we're flushing, we've already called VOP_PUTPAGES
756 * so don't do it again. Otherwise, we want to write
757 * everything we've got.
758 */
759 if (!IS_FLUSHING(fs, vp)) {
760 simple_lock(&vp->v_interlock);
761 VOP_PUTPAGES(vp, 0, 0,
762 PGO_CLEANIT | PGO_ALLPAGES | PGO_LOCKED);
763 }
764 }
765
766 /*
767 * It may not be necessary to write the meta-data blocks at this point,
768 * as the roll-forward recovery code should be able to reconstruct the
769 * list.
770 *
771 * We have to write them anyway, though, under two conditions: (1) the
772 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
773 * checkpointing.
774 *
775 * BUT if we are cleaning, we might have indirect blocks that refer to
776 * new blocks not being written yet, in addition to fragments being
777 * moved out of a cleaned segment. If that is the case, don't
778 * write the indirect blocks, or the finfo will have a small block
779 * in the middle of it!
780 * XXX in this case isn't the inode size wrong too?
781 */
782 frag = 0;
783 if (sp->seg_flags & SEGM_CLEAN) {
784 for (i = 0; i < NDADDR; i++)
785 if (ip->i_lfs_fragsize[i] > 0 &&
786 ip->i_lfs_fragsize[i] < fs->lfs_bsize)
787 ++frag;
788 }
789 #ifdef DIAGNOSTIC
790 if (frag > 1)
791 panic("lfs_writefile: more than one fragment!");
792 #endif
793 if (IS_FLUSHING(fs, vp) ||
794 (frag == 0 && (lfs_writeindir || (sp->seg_flags & SEGM_CKP)))) {
795 lfs_gather(fs, sp, vp, lfs_match_indir);
796 lfs_gather(fs, sp, vp, lfs_match_dindir);
797 lfs_gather(fs, sp, vp, lfs_match_tindir);
798 }
799 fip = sp->fip;
800 if (fip->fi_nblocks != 0) {
801 sp->fip = (FINFO*)((caddr_t)fip + FINFOSIZE +
802 sizeof(int32_t) * (fip->fi_nblocks));
803 sp->start_lbp = &sp->fip->fi_blocks[0];
804 } else {
805 sp->sum_bytes_left += FINFOSIZE;
806 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
807 }
808 }
809
810 int
811 lfs_writeinode(struct lfs *fs, struct segment *sp, struct inode *ip)
812 {
813 struct buf *bp, *ibp;
814 struct ufs1_dinode *cdp;
815 IFILE *ifp;
816 SEGUSE *sup;
817 daddr_t daddr;
818 int32_t *daddrp; /* XXX ondisk32 */
819 ino_t ino;
820 int error, i, ndx, fsb = 0;
821 int redo_ifile = 0;
822 struct timespec ts;
823 int gotblk = 0;
824
825 if (!(ip->i_flag & IN_ALLMOD))
826 return (0);
827
828 /* Allocate a new inode block if necessary. */
829 if ((ip->i_number != LFS_IFILE_INUM || sp->idp == NULL) &&
830 sp->ibp == NULL) {
831 /* Allocate a new segment if necessary. */
832 if (sp->seg_bytes_left < fs->lfs_ibsize ||
833 sp->sum_bytes_left < sizeof(int32_t))
834 (void) lfs_writeseg(fs, sp);
835
836 /* Get next inode block. */
837 daddr = fs->lfs_offset;
838 fs->lfs_offset += btofsb(fs, fs->lfs_ibsize);
839 sp->ibp = *sp->cbpp++ =
840 getblk(VTOI(fs->lfs_ivnode)->i_devvp,
841 fsbtodb(fs, daddr), fs->lfs_ibsize, 0, 0);
842 gotblk++;
843
844 /* Zero out inode numbers */
845 for (i = 0; i < INOPB(fs); ++i)
846 ((struct ufs1_dinode *)sp->ibp->b_data)[i].di_inumber =
847 0;
848
849 ++sp->start_bpp;
850 fs->lfs_avail -= btofsb(fs, fs->lfs_ibsize);
851 /* Set remaining space counters. */
852 sp->seg_bytes_left -= fs->lfs_ibsize;
853 sp->sum_bytes_left -= sizeof(int32_t);
854 ndx = fs->lfs_sumsize / sizeof(int32_t) -
855 sp->ninodes / INOPB(fs) - 1;
856 ((int32_t *)(sp->segsum))[ndx] = daddr;
857 }
858
859 /* Update the inode times and copy the inode onto the inode page. */
860 TIMEVAL_TO_TIMESPEC(&time, &ts);
861 /* XXX kludge --- don't redirty the ifile just to put times on it */
862 if (ip->i_number != LFS_IFILE_INUM)
863 LFS_ITIMES(ip, &ts, &ts, &ts);
864
865 /*
866 * If this is the Ifile, and we've already written the Ifile in this
867 * partial segment, just overwrite it (it's not on disk yet) and
868 * continue.
869 *
870 * XXX we know that the bp that we get the second time around has
871 * already been gathered.
872 */
873 if (ip->i_number == LFS_IFILE_INUM && sp->idp) {
874 *(sp->idp) = *ip->i_din.ffs1_din;
875 ip->i_lfs_osize = ip->i_size;
876 return 0;
877 }
878
879 bp = sp->ibp;
880 cdp = ((struct ufs1_dinode *)bp->b_data) + (sp->ninodes % INOPB(fs));
881 *cdp = *ip->i_din.ffs1_din;
882 #ifdef LFS_IFILE_FRAG_ADDRESSING
883 if (fs->lfs_version > 1)
884 fsb = (sp->ninodes % INOPB(fs)) / INOPF(fs);
885 #endif
886
887 /*
888 * If we are cleaning, ensure that we don't write UNWRITTEN disk
889 * addresses to disk; possibly revert the inode size.
890 * XXX By not writing these blocks, we are making the lfs_avail
891 * XXX count on disk wrong by the same amount. We should be
892 * XXX able to "borrow" from lfs_avail and return it after the
893 * XXX Ifile is written. See also in lfs_writeseg.
894 */
895 if (ip->i_lfs_effnblks != ip->i_ffs1_blocks) {
896 cdp->di_size = ip->i_lfs_osize;
897 #ifdef DEBUG_LFS
898 printf("lfs_writeinode: cleansing ino %d (%d != %d)\n",
899 ip->i_number, ip->i_lfs_effnblks, ip->i_ffs1_blocks);
900 #endif
901 for (daddrp = cdp->di_db; daddrp < cdp->di_ib + NIADDR;
902 daddrp++) {
903 if (*daddrp == UNWRITTEN) {
904 #ifdef DEBUG_LFS
905 printf("lfs_writeinode: wiping UNWRITTEN\n");
906 #endif
907 *daddrp = 0;
908 }
909 }
910 } else {
911 /* If all blocks are goig to disk, update the "size on disk" */
912 ip->i_lfs_osize = ip->i_size;
913 }
914
915 if (ip->i_flag & IN_CLEANING)
916 LFS_CLR_UINO(ip, IN_CLEANING);
917 else {
918 /* XXX IN_ALLMOD */
919 LFS_CLR_UINO(ip, IN_ACCESSED | IN_ACCESS | IN_CHANGE |
920 IN_UPDATE);
921 if (ip->i_lfs_effnblks == ip->i_ffs1_blocks)
922 LFS_CLR_UINO(ip, IN_MODIFIED);
923 #ifdef DEBUG_LFS
924 else
925 printf("lfs_writeinode: ino %d: real blks=%d, "
926 "eff=%d\n", ip->i_number, ip->i_ffs1_blocks,
927 ip->i_lfs_effnblks);
928 #endif
929 }
930
931 if (ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
932 sp->idp = ((struct ufs1_dinode *)bp->b_data) +
933 (sp->ninodes % INOPB(fs));
934 if (gotblk) {
935 LFS_LOCK_BUF(bp);
936 brelse(bp);
937 }
938
939 /* Increment inode count in segment summary block. */
940 ++((SEGSUM *)(sp->segsum))->ss_ninos;
941
942 /* If this page is full, set flag to allocate a new page. */
943 if (++sp->ninodes % INOPB(fs) == 0)
944 sp->ibp = NULL;
945
946 /*
947 * If updating the ifile, update the super-block. Update the disk
948 * address and access times for this inode in the ifile.
949 */
950 ino = ip->i_number;
951 if (ino == LFS_IFILE_INUM) {
952 daddr = fs->lfs_idaddr;
953 fs->lfs_idaddr = dbtofsb(fs, bp->b_blkno);
954 } else {
955 LFS_IENTRY(ifp, fs, ino, ibp);
956 daddr = ifp->if_daddr;
957 ifp->if_daddr = dbtofsb(fs, bp->b_blkno) + fsb;
958 #ifdef LFS_DEBUG_NEXTFREE
959 if (ino > 3 && ifp->if_nextfree) {
960 vprint("lfs_writeinode",ITOV(ip));
961 printf("lfs_writeinode: updating free ino %d\n",
962 ip->i_number);
963 }
964 #endif
965 error = LFS_BWRITE_LOG(ibp); /* Ifile */
966 }
967
968 /*
969 * The inode's last address should not be in the current partial
970 * segment, except under exceptional circumstances (lfs_writevnodes
971 * had to start over, and in the meantime more blocks were written
972 * to a vnode). Both inodes will be accounted to this segment
973 * in lfs_writeseg so we need to subtract the earlier version
974 * here anyway. The segment count can temporarily dip below
975 * zero here; keep track of how many duplicates we have in
976 * "dupino" so we don't panic below.
977 */
978 if (daddr >= fs->lfs_lastpseg && daddr <= dbtofsb(fs, bp->b_blkno)) {
979 ++sp->ndupino;
980 printf("lfs_writeinode: last inode addr in current pseg "
981 "(ino %d daddr 0x%llx) ndupino=%d\n", ino,
982 (long long)daddr, sp->ndupino);
983 }
984 /*
985 * Account the inode: it no longer belongs to its former segment,
986 * though it will not belong to the new segment until that segment
987 * is actually written.
988 */
989 if (daddr != LFS_UNUSED_DADDR) {
990 u_int32_t oldsn = dtosn(fs, daddr);
991 #ifdef DIAGNOSTIC
992 int ndupino = (sp->seg_number == oldsn) ? sp->ndupino : 0;
993 #endif
994 LFS_SEGENTRY(sup, fs, oldsn, bp);
995 #ifdef DIAGNOSTIC
996 if (sup->su_nbytes +
997 sizeof (struct ufs1_dinode) * ndupino
998 < sizeof (struct ufs1_dinode)) {
999 printf("lfs_writeinode: negative bytes "
1000 "(segment %" PRIu32 " short by %d, "
1001 "oldsn=%" PRIu32 ", cursn=%" PRIu32
1002 ", daddr=%" PRId64 ", su_nbytes=%u, "
1003 "ndupino=%d)\n",
1004 dtosn(fs, daddr),
1005 (int)sizeof (struct ufs1_dinode) *
1006 (1 - sp->ndupino) - sup->su_nbytes,
1007 oldsn, sp->seg_number, daddr,
1008 (unsigned int)sup->su_nbytes,
1009 sp->ndupino);
1010 panic("lfs_writeinode: negative bytes");
1011 sup->su_nbytes = sizeof (struct ufs1_dinode);
1012 }
1013 #endif
1014 #ifdef DEBUG_SU_NBYTES
1015 printf("seg %d -= %d for ino %d inode\n",
1016 dtosn(fs, daddr), sizeof (struct ufs1_dinode), ino);
1017 #endif
1018 sup->su_nbytes -= sizeof (struct ufs1_dinode);
1019 redo_ifile =
1020 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
1021 if (redo_ifile)
1022 fs->lfs_flags |= LFS_IFDIRTY;
1023 LFS_WRITESEGENTRY(sup, fs, oldsn, bp); /* Ifile */
1024 }
1025 return (redo_ifile);
1026 }
1027
1028 int
1029 lfs_gatherblock(struct segment *sp, struct buf *bp, int *sptr)
1030 {
1031 struct lfs *fs;
1032 int version;
1033 int j, blksinblk;
1034
1035 /*
1036 * If full, finish this segment. We may be doing I/O, so
1037 * release and reacquire the splbio().
1038 */
1039 #ifdef DIAGNOSTIC
1040 if (sp->vp == NULL)
1041 panic ("lfs_gatherblock: Null vp in segment");
1042 #endif
1043 fs = sp->fs;
1044 blksinblk = howmany(bp->b_bcount, fs->lfs_bsize);
1045 if (sp->sum_bytes_left < sizeof(int32_t) * blksinblk ||
1046 sp->seg_bytes_left < bp->b_bcount) {
1047 if (sptr)
1048 splx(*sptr);
1049 lfs_updatemeta(sp);
1050
1051 version = sp->fip->fi_version;
1052 (void) lfs_writeseg(fs, sp);
1053
1054 sp->fip->fi_version = version;
1055 sp->fip->fi_ino = VTOI(sp->vp)->i_number;
1056 /* Add the current file to the segment summary. */
1057 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
1058 sp->sum_bytes_left -= FINFOSIZE;
1059
1060 if (sptr)
1061 *sptr = splbio();
1062 return (1);
1063 }
1064
1065 #ifdef DEBUG
1066 if (bp->b_flags & B_GATHERED) {
1067 printf("lfs_gatherblock: already gathered! Ino %d,"
1068 " lbn %" PRId64 "\n",
1069 sp->fip->fi_ino, bp->b_lblkno);
1070 return (0);
1071 }
1072 #endif
1073 /* Insert into the buffer list, update the FINFO block. */
1074 bp->b_flags |= B_GATHERED;
1075
1076 *sp->cbpp++ = bp;
1077 for (j = 0; j < blksinblk; j++)
1078 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno + j;
1079
1080 sp->sum_bytes_left -= sizeof(int32_t) * blksinblk;
1081 sp->seg_bytes_left -= bp->b_bcount;
1082 return (0);
1083 }
1084
1085 int
1086 lfs_gather(struct lfs *fs, struct segment *sp, struct vnode *vp,
1087 int (*match)(struct lfs *, struct buf *))
1088 {
1089 struct buf *bp, *nbp;
1090 int s, count = 0;
1091
1092 KASSERT(sp->vp == NULL);
1093 sp->vp = vp;
1094 s = splbio();
1095
1096 #ifndef LFS_NO_BACKBUF_HACK
1097 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
1098 # define BUF_OFFSET \
1099 (((caddr_t)&LIST_NEXT(bp, b_vnbufs)) - (caddr_t)bp)
1100 # define BACK_BUF(BP) \
1101 ((struct buf *)(((caddr_t)(BP)->b_vnbufs.le_prev) - BUF_OFFSET))
1102 # define BEG_OF_LIST \
1103 ((struct buf *)(((caddr_t)&LIST_FIRST(&vp->v_dirtyblkhd)) - BUF_OFFSET))
1104
1105 loop:
1106 /* Find last buffer. */
1107 for (bp = LIST_FIRST(&vp->v_dirtyblkhd);
1108 bp && LIST_NEXT(bp, b_vnbufs) != NULL;
1109 bp = LIST_NEXT(bp, b_vnbufs))
1110 /* nothing */;
1111 for (; bp && bp != BEG_OF_LIST; bp = nbp) {
1112 nbp = BACK_BUF(bp);
1113 #else /* LFS_NO_BACKBUF_HACK */
1114 loop:
1115 for (bp = LIST_FIRST(&vp->v_dirtyblkhd); bp; bp = nbp) {
1116 nbp = LIST_NEXT(bp, b_vnbufs);
1117 #endif /* LFS_NO_BACKBUF_HACK */
1118 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp)) {
1119 #ifdef DEBUG_LFS
1120 if (vp == fs->lfs_ivnode &&
1121 (bp->b_flags & (B_BUSY|B_GATHERED)) == B_BUSY)
1122 printf("(%" PRId64 ":%lx)",
1123 bp->b_lblkno, bp->b_flags);
1124 #endif
1125 continue;
1126 }
1127 if (vp->v_type == VBLK) {
1128 /* For block devices, just write the blocks. */
1129 /* XXX Do we really need to even do this? */
1130 #ifdef DEBUG_LFS
1131 if (count == 0)
1132 printf("BLK(");
1133 printf(".");
1134 #endif
1135 /*
1136 * Get the block before bwrite,
1137 * so we don't corrupt the free list
1138 */
1139 bp->b_flags |= B_BUSY;
1140 bremfree(bp);
1141 bwrite(bp);
1142 } else {
1143 #ifdef DIAGNOSTIC
1144 # ifdef LFS_USE_B_INVAL
1145 if ((bp->b_flags & (B_CALL|B_INVAL)) == B_INVAL) {
1146 printf("lfs_gather: lbn %" PRId64 " is "
1147 "B_INVAL\n", bp->b_lblkno);
1148 VOP_PRINT(bp->b_vp);
1149 }
1150 # endif /* LFS_USE_B_INVAL */
1151 if (!(bp->b_flags & B_DELWRI))
1152 panic("lfs_gather: bp not B_DELWRI");
1153 if (!(bp->b_flags & B_LOCKED)) {
1154 printf("lfs_gather: lbn %" PRId64 " blk "
1155 "%" PRId64 " not B_LOCKED\n",
1156 bp->b_lblkno,
1157 dbtofsb(fs, bp->b_blkno));
1158 VOP_PRINT(bp->b_vp);
1159 panic("lfs_gather: bp not B_LOCKED");
1160 }
1161 #endif
1162 if (lfs_gatherblock(sp, bp, &s)) {
1163 goto loop;
1164 }
1165 }
1166 count++;
1167 }
1168 splx(s);
1169 #ifdef DEBUG_LFS
1170 if (vp->v_type == VBLK && count)
1171 printf(")\n");
1172 #endif
1173 lfs_updatemeta(sp);
1174 KASSERT(sp->vp == vp);
1175 sp->vp = NULL;
1176 return count;
1177 }
1178
1179 #if DEBUG
1180 # define DEBUG_OOFF(n) do { \
1181 if (ooff == 0) { \
1182 printf("lfs_updatemeta[%d]: warning: writing " \
1183 "ino %d lbn %" PRId64 " at 0x%" PRIx32 \
1184 ", was 0x0 (or %" PRId64 ")\n", \
1185 (n), ip->i_number, lbn, ndaddr, daddr); \
1186 } \
1187 } while (0)
1188 #else
1189 # define DEBUG_OOFF(n)
1190 #endif
1191
1192 /*
1193 * Change the given block's address to ndaddr, finding its previous
1194 * location using ufs_bmaparray().
1195 *
1196 * Account for this change in the segment table.
1197 *
1198 * called with sp == NULL by roll-forwarding code.
1199 */
1200 void
1201 lfs_update_single(struct lfs *fs, struct segment *sp, struct vnode *vp,
1202 daddr_t lbn, int32_t ndaddr, int size)
1203 {
1204 SEGUSE *sup;
1205 struct buf *bp;
1206 struct indir a[NIADDR + 2], *ap;
1207 struct inode *ip;
1208 daddr_t daddr, ooff;
1209 int num, error;
1210 int bb, osize, obb;
1211
1212 KASSERT(sp == NULL || sp->vp == vp);
1213 ip = VTOI(vp);
1214
1215 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL, NULL);
1216 if (error)
1217 panic("lfs_updatemeta: ufs_bmaparray returned %d", error);
1218
1219 KASSERT(daddr <= LFS_MAX_DADDR);
1220 if (daddr > 0)
1221 daddr = dbtofsb(fs, daddr);
1222
1223 bb = fragstofsb(fs, numfrags(fs, size));
1224 switch (num) {
1225 case 0:
1226 ooff = ip->i_ffs1_db[lbn];
1227 DEBUG_OOFF(0);
1228 if (ooff == UNWRITTEN)
1229 ip->i_ffs1_blocks += bb;
1230 else {
1231 /* possible fragment truncation or extension */
1232 obb = btofsb(fs, ip->i_lfs_fragsize[lbn]);
1233 ip->i_ffs1_blocks += (bb - obb);
1234 }
1235 ip->i_ffs1_db[lbn] = ndaddr;
1236 break;
1237 case 1:
1238 ooff = ip->i_ffs1_ib[a[0].in_off];
1239 DEBUG_OOFF(1);
1240 if (ooff == UNWRITTEN)
1241 ip->i_ffs1_blocks += bb;
1242 ip->i_ffs1_ib[a[0].in_off] = ndaddr;
1243 break;
1244 default:
1245 ap = &a[num - 1];
1246 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
1247 panic("lfs_updatemeta: bread bno %" PRId64,
1248 ap->in_lbn);
1249
1250 /* XXX ondisk32 */
1251 ooff = ((int32_t *)bp->b_data)[ap->in_off];
1252 DEBUG_OOFF(num);
1253 if (ooff == UNWRITTEN)
1254 ip->i_ffs1_blocks += bb;
1255 /* XXX ondisk32 */
1256 ((int32_t *)bp->b_data)[ap->in_off] = ndaddr;
1257 (void) VOP_BWRITE(bp);
1258 }
1259
1260 KASSERT(ooff == 0 || ooff == UNWRITTEN || ooff == daddr);
1261
1262 /*
1263 * Though we'd rather it couldn't, this *can* happen right now
1264 * if cleaning blocks and regular blocks coexist.
1265 */
1266 /* KASSERT(daddr < fs->lfs_lastpseg || daddr > ndaddr); */
1267
1268 /*
1269 * Update segment usage information, based on old size
1270 * and location.
1271 */
1272 if (daddr > 0) {
1273 u_int32_t oldsn = dtosn(fs, daddr);
1274 #ifdef DIAGNOSTIC
1275 int ndupino;
1276
1277 if (sp && sp->seg_number == oldsn) {
1278 ndupino = sp->ndupino;
1279 } else {
1280 ndupino = 0;
1281 }
1282 #endif
1283 KASSERT(oldsn >= 0 && oldsn < fs->lfs_nseg);
1284 if (lbn >= 0 && lbn < NDADDR)
1285 osize = ip->i_lfs_fragsize[lbn];
1286 else
1287 osize = fs->lfs_bsize;
1288 LFS_SEGENTRY(sup, fs, oldsn, bp);
1289 #ifdef DIAGNOSTIC
1290 if (sup->su_nbytes + sizeof (struct ufs1_dinode) * ndupino
1291 < osize) {
1292 printf("lfs_updatemeta: negative bytes "
1293 "(segment %" PRIu32 " short by %" PRId64
1294 ")\n", dtosn(fs, daddr),
1295 (int64_t)osize -
1296 (sizeof (struct ufs1_dinode) * ndupino +
1297 sup->su_nbytes));
1298 printf("lfs_updatemeta: ino %d, lbn %" PRId64
1299 ", addr = 0x%" PRIx64 "\n",
1300 ip->i_number, lbn, daddr);
1301 printf("lfs_updatemeta: ndupino=%d\n", ndupino);
1302 panic("lfs_updatemeta: negative bytes");
1303 sup->su_nbytes = osize -
1304 sizeof (struct ufs1_dinode) * ndupino;
1305 }
1306 #endif
1307 #ifdef DEBUG_SU_NBYTES
1308 printf("seg %" PRIu32 " -= %d for ino %d lbn %" PRId64
1309 " db 0x%" PRIx64 "\n",
1310 dtosn(fs, daddr), osize,
1311 ip->i_number, lbn, daddr);
1312 #endif
1313 sup->su_nbytes -= osize;
1314 if (!(bp->b_flags & B_GATHERED))
1315 fs->lfs_flags |= LFS_IFDIRTY;
1316 LFS_WRITESEGENTRY(sup, fs, oldsn, bp);
1317 }
1318 /*
1319 * Now that this block has a new address, and its old
1320 * segment no longer owns it, we can forget about its
1321 * old size.
1322 */
1323 if (lbn >= 0 && lbn < NDADDR)
1324 ip->i_lfs_fragsize[lbn] = size;
1325 }
1326
1327 /*
1328 * Update the metadata that points to the blocks listed in the FINFO
1329 * array.
1330 */
1331 void
1332 lfs_updatemeta(struct segment *sp)
1333 {
1334 struct buf *sbp;
1335 struct lfs *fs;
1336 struct vnode *vp;
1337 daddr_t lbn;
1338 int i, nblocks, num;
1339 int bb;
1340 int bytesleft, size;
1341
1342 vp = sp->vp;
1343 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
1344 KASSERT(nblocks >= 0);
1345 KASSERT(vp != NULL);
1346 if (nblocks == 0)
1347 return;
1348
1349 /*
1350 * This count may be high due to oversize blocks from lfs_gop_write.
1351 * Correct for this. (XXX we should be able to keep track of these.)
1352 */
1353 fs = sp->fs;
1354 for (i = 0; i < nblocks; i++) {
1355 if (sp->start_bpp[i] == NULL) {
1356 printf("nblocks = %d, not %d\n", i, nblocks);
1357 nblocks = i;
1358 break;
1359 }
1360 num = howmany(sp->start_bpp[i]->b_bcount, fs->lfs_bsize);
1361 KASSERT(sp->start_bpp[i]->b_lblkno >= 0 || num == 1);
1362 nblocks -= num - 1;
1363 }
1364
1365 KASSERT(vp->v_type == VREG ||
1366 nblocks == &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp);
1367 KASSERT(nblocks == sp->cbpp - sp->start_bpp);
1368
1369 /*
1370 * Sort the blocks.
1371 *
1372 * We have to sort even if the blocks come from the
1373 * cleaner, because there might be other pending blocks on the
1374 * same inode...and if we don't sort, and there are fragments
1375 * present, blocks may be written in the wrong place.
1376 */
1377 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks, fs->lfs_bsize);
1378
1379 /*
1380 * Record the length of the last block in case it's a fragment.
1381 * If there are indirect blocks present, they sort last. An
1382 * indirect block will be lfs_bsize and its presence indicates
1383 * that you cannot have fragments.
1384 *
1385 * XXX This last is a lie. A cleaned fragment can coexist with
1386 * XXX a later indirect block. This will continue to be
1387 * XXX true until lfs_markv is fixed to do everything with
1388 * XXX fake blocks (including fake inodes and fake indirect blocks).
1389 */
1390 sp->fip->fi_lastlength = ((sp->start_bpp[nblocks - 1]->b_bcount - 1) &
1391 fs->lfs_bmask) + 1;
1392
1393 /*
1394 * Assign disk addresses, and update references to the logical
1395 * block and the segment usage information.
1396 */
1397 for (i = nblocks; i--; ++sp->start_bpp) {
1398 sbp = *sp->start_bpp;
1399 lbn = *sp->start_lbp;
1400 KASSERT(sbp->b_lblkno == lbn);
1401
1402 sbp->b_blkno = fsbtodb(fs, fs->lfs_offset);
1403
1404 /*
1405 * If we write a frag in the wrong place, the cleaner won't
1406 * be able to correctly identify its size later, and the
1407 * segment will be uncleanable. (Even worse, it will assume
1408 * that the indirect block that actually ends the list
1409 * is of a smaller size!)
1410 */
1411 if ((sbp->b_bcount & fs->lfs_bmask) && i != 0)
1412 panic("lfs_updatemeta: fragment is not last block");
1413
1414 /*
1415 * For each subblock in this possibly oversized block,
1416 * update its address on disk.
1417 */
1418 KASSERT(lbn >= 0 || sbp->b_bcount == fs->lfs_bsize);
1419 KASSERT(vp == sbp->b_vp);
1420 for (bytesleft = sbp->b_bcount; bytesleft > 0;
1421 bytesleft -= fs->lfs_bsize) {
1422 size = MIN(bytesleft, fs->lfs_bsize);
1423 bb = fragstofsb(fs, numfrags(fs, size));
1424 lbn = *sp->start_lbp++;
1425 lfs_update_single(fs, sp, sp->vp, lbn, fs->lfs_offset,
1426 size);
1427 fs->lfs_offset += bb;
1428 }
1429
1430 }
1431 }
1432
1433 /*
1434 * Start a new partial segment.
1435 *
1436 * Return 1 when we entered to a new segment.
1437 * Otherwise, return 0.
1438 */
1439 int
1440 lfs_initseg(struct lfs *fs)
1441 {
1442 struct segment *sp = fs->lfs_sp;
1443 SEGSUM *ssp;
1444 struct buf *sbp; /* buffer for SEGSUM */
1445 int repeat = 0; /* return value */
1446
1447 /* Advance to the next segment. */
1448 if (!LFS_PARTIAL_FITS(fs)) {
1449 SEGUSE *sup;
1450 struct buf *bp;
1451
1452 /* lfs_avail eats the remaining space */
1453 fs->lfs_avail -= fs->lfs_fsbpseg - (fs->lfs_offset -
1454 fs->lfs_curseg);
1455 /* Wake up any cleaning procs waiting on this file system. */
1456 wakeup(&lfs_allclean_wakeup);
1457 wakeup(&fs->lfs_nextseg);
1458 lfs_newseg(fs);
1459 repeat = 1;
1460 fs->lfs_offset = fs->lfs_curseg;
1461
1462 sp->seg_number = dtosn(fs, fs->lfs_curseg);
1463 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg);
1464
1465 /*
1466 * If the segment contains a superblock, update the offset
1467 * and summary address to skip over it.
1468 */
1469 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1470 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1471 fs->lfs_offset += btofsb(fs, LFS_SBPAD);
1472 sp->seg_bytes_left -= LFS_SBPAD;
1473 }
1474 brelse(bp);
1475 /* Segment zero could also contain the labelpad */
1476 if (fs->lfs_version > 1 && sp->seg_number == 0 &&
1477 fs->lfs_start < btofsb(fs, LFS_LABELPAD)) {
1478 fs->lfs_offset +=
1479 btofsb(fs, LFS_LABELPAD) - fs->lfs_start;
1480 sp->seg_bytes_left -=
1481 LFS_LABELPAD - fsbtob(fs, fs->lfs_start);
1482 }
1483 } else {
1484 sp->seg_number = dtosn(fs, fs->lfs_curseg);
1485 sp->seg_bytes_left = fsbtob(fs, fs->lfs_fsbpseg -
1486 (fs->lfs_offset - fs->lfs_curseg));
1487 }
1488 fs->lfs_lastpseg = fs->lfs_offset;
1489
1490 /* Record first address of this partial segment */
1491 if (sp->seg_flags & SEGM_CLEAN) {
1492 fs->lfs_cleanint[fs->lfs_cleanind] = fs->lfs_offset;
1493 if (++fs->lfs_cleanind >= LFS_MAX_CLEANIND) {
1494 /* "1" is the artificial inc in lfs_seglock */
1495 while (fs->lfs_iocount > 1) {
1496 tsleep(&fs->lfs_iocount, PRIBIO + 1,
1497 "lfs_initseg", 0);
1498 }
1499 fs->lfs_cleanind = 0;
1500 }
1501 }
1502
1503 sp->fs = fs;
1504 sp->ibp = NULL;
1505 sp->idp = NULL;
1506 sp->ninodes = 0;
1507 sp->ndupino = 0;
1508
1509 sp->cbpp = sp->bpp;
1510
1511 /* Get a new buffer for SEGSUM */
1512 sbp = lfs_newbuf(fs, VTOI(fs->lfs_ivnode)->i_devvp,
1513 fsbtodb(fs, fs->lfs_offset), fs->lfs_sumsize, LFS_NB_SUMMARY);
1514
1515 /* ... and enter it into the buffer list. */
1516 *sp->cbpp = sbp;
1517 sp->cbpp++;
1518 fs->lfs_offset += btofsb(fs, fs->lfs_sumsize);
1519
1520 sp->start_bpp = sp->cbpp;
1521
1522 /* Set point to SEGSUM, initialize it. */
1523 ssp = sp->segsum = sbp->b_data;
1524 memset(ssp, 0, fs->lfs_sumsize);
1525 ssp->ss_next = fs->lfs_nextseg;
1526 ssp->ss_nfinfo = ssp->ss_ninos = 0;
1527 ssp->ss_magic = SS_MAGIC;
1528
1529 /* Set pointer to first FINFO, initialize it. */
1530 sp->fip = (struct finfo *)((caddr_t)sp->segsum + SEGSUM_SIZE(fs));
1531 sp->fip->fi_nblocks = 0;
1532 sp->start_lbp = &sp->fip->fi_blocks[0];
1533 sp->fip->fi_lastlength = 0;
1534
1535 sp->seg_bytes_left -= fs->lfs_sumsize;
1536 sp->sum_bytes_left = fs->lfs_sumsize - SEGSUM_SIZE(fs);
1537
1538 return (repeat);
1539 }
1540
1541 /*
1542 * Return the next segment to write.
1543 */
1544 void
1545 lfs_newseg(struct lfs *fs)
1546 {
1547 CLEANERINFO *cip;
1548 SEGUSE *sup;
1549 struct buf *bp;
1550 int curseg, isdirty, sn;
1551
1552 LFS_SEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
1553 #ifdef DEBUG_SU_NBYTES
1554 printf("lfs_newseg: seg %d := 0 in newseg\n", /* XXXDEBUG */
1555 dtosn(fs, fs->lfs_nextseg)); /* XXXDEBUG */
1556 #endif
1557 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1558 sup->su_nbytes = 0;
1559 sup->su_nsums = 0;
1560 sup->su_ninos = 0;
1561 LFS_WRITESEGENTRY(sup, fs, dtosn(fs, fs->lfs_nextseg), bp);
1562
1563 LFS_CLEANERINFO(cip, fs, bp);
1564 --cip->clean;
1565 ++cip->dirty;
1566 fs->lfs_nclean = cip->clean;
1567 LFS_SYNC_CLEANERINFO(cip, fs, bp, 1);
1568
1569 fs->lfs_lastseg = fs->lfs_curseg;
1570 fs->lfs_curseg = fs->lfs_nextseg;
1571 for (sn = curseg = dtosn(fs, fs->lfs_curseg) + fs->lfs_interleave;;) {
1572 sn = (sn + 1) % fs->lfs_nseg;
1573 if (sn == curseg)
1574 panic("lfs_nextseg: no clean segments");
1575 LFS_SEGENTRY(sup, fs, sn, bp);
1576 isdirty = sup->su_flags & SEGUSE_DIRTY;
1577 /* Check SEGUSE_EMPTY as we go along */
1578 if (isdirty && sup->su_nbytes == 0 &&
1579 !(sup->su_flags & SEGUSE_EMPTY))
1580 LFS_WRITESEGENTRY(sup, fs, sn, bp);
1581 else
1582 brelse(bp);
1583
1584 if (!isdirty)
1585 break;
1586 }
1587
1588 ++fs->lfs_nactive;
1589 fs->lfs_nextseg = sntod(fs, sn);
1590 if (lfs_dostats) {
1591 ++lfs_stats.segsused;
1592 }
1593 }
1594
1595 static struct buf *
1596 lfs_newclusterbuf(struct lfs *fs, struct vnode *vp, daddr_t addr, int n)
1597 {
1598 struct lfs_cluster *cl;
1599 struct buf **bpp, *bp;
1600 int s;
1601
1602 cl = (struct lfs_cluster *)pool_get(&fs->lfs_clpool, PR_WAITOK);
1603 bpp = (struct buf **)pool_get(&fs->lfs_bpppool, PR_WAITOK);
1604 memset(cl, 0, sizeof(*cl));
1605 cl->fs = fs;
1606 cl->bpp = bpp;
1607 cl->bufcount = 0;
1608 cl->bufsize = 0;
1609
1610 /* If this segment is being written synchronously, note that */
1611 if (fs->lfs_sp->seg_flags & SEGM_SYNC) {
1612 cl->flags |= LFS_CL_SYNC;
1613 cl->seg = fs->lfs_sp;
1614 ++cl->seg->seg_iocount;
1615 /* printf("+ %x => %d\n", cl->seg, cl->seg->seg_iocount); */
1616 }
1617
1618 /* Get an empty buffer header, or maybe one with something on it */
1619 s = splbio();
1620 bp = pool_get(&bufpool, PR_WAITOK); /* XXX should use lfs_malloc? */
1621 splx(s);
1622 memset(bp, 0, sizeof(*bp));
1623 BUF_INIT(bp);
1624
1625 bp->b_flags = B_BUSY | B_CALL;
1626 bp->b_dev = NODEV;
1627 bp->b_blkno = bp->b_lblkno = addr;
1628 bp->b_iodone = lfs_cluster_callback;
1629 bp->b_private = cl;
1630 bp->b_vp = vp;
1631
1632 return bp;
1633 }
1634
1635 int
1636 lfs_writeseg(struct lfs *fs, struct segment *sp)
1637 {
1638 struct buf **bpp, *bp, *cbp, *newbp;
1639 SEGUSE *sup;
1640 SEGSUM *ssp;
1641 char *datap, *dp;
1642 int i, s;
1643 int do_again, nblocks, byteoffset;
1644 size_t el_size;
1645 struct lfs_cluster *cl;
1646 u_short ninos;
1647 struct vnode *devvp;
1648 char *p = NULL;
1649 struct vnode *vp;
1650 int32_t *daddrp; /* XXX ondisk32 */
1651 int changed;
1652 #if defined(DEBUG) && defined(LFS_PROPELLER)
1653 static int propeller;
1654 char propstring[4] = "-\\|/";
1655
1656 printf("%c\b",propstring[propeller++]);
1657 if (propeller == 4)
1658 propeller = 0;
1659 #endif
1660
1661 /*
1662 * If there are no buffers other than the segment summary to write
1663 * and it is not a checkpoint, don't do anything. On a checkpoint,
1664 * even if there aren't any buffers, you need to write the superblock.
1665 */
1666 if ((nblocks = sp->cbpp - sp->bpp) == 1)
1667 return (0);
1668
1669 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1670
1671 /* Update the segment usage information. */
1672 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1673
1674 /* Loop through all blocks, except the segment summary. */
1675 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
1676 if ((*bpp)->b_vp != devvp) {
1677 sup->su_nbytes += (*bpp)->b_bcount;
1678 #ifdef DEBUG_SU_NBYTES
1679 printf("seg %" PRIu32 " += %ld for ino %d lbn %" PRId64
1680 " db 0x%" PRIx64 "\n", sp->seg_number, (*bpp)->b_bcount,
1681 VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno,
1682 (*bpp)->b_blkno);
1683 #endif
1684 }
1685 }
1686
1687 ssp = (SEGSUM *)sp->segsum;
1688
1689 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1690 #ifdef DEBUG_SU_NBYTES
1691 printf("seg %d += %d for %d inodes\n", /* XXXDEBUG */
1692 sp->seg_number, ssp->ss_ninos * sizeof (struct ufs1_dinode),
1693 ssp->ss_ninos);
1694 #endif
1695 sup->su_nbytes += ssp->ss_ninos * sizeof (struct ufs1_dinode);
1696 /* sup->su_nbytes += fs->lfs_sumsize; */
1697 if (fs->lfs_version == 1)
1698 sup->su_olastmod = time.tv_sec;
1699 else
1700 sup->su_lastmod = time.tv_sec;
1701 sup->su_ninos += ninos;
1702 ++sup->su_nsums;
1703 fs->lfs_dmeta += (btofsb(fs, fs->lfs_sumsize) + btofsb(fs, ninos *
1704 fs->lfs_ibsize));
1705 fs->lfs_avail -= btofsb(fs, fs->lfs_sumsize);
1706
1707 do_again = !(bp->b_flags & B_GATHERED);
1708 LFS_WRITESEGENTRY(sup, fs, sp->seg_number, bp); /* Ifile */
1709
1710 /*
1711 * Mark blocks B_BUSY, to prevent then from being changed between
1712 * the checksum computation and the actual write.
1713 *
1714 * If we are cleaning, check indirect blocks for UNWRITTEN, and if
1715 * there are any, replace them with copies that have UNASSIGNED
1716 * instead.
1717 */
1718 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1719 ++bpp;
1720 bp = *bpp;
1721 if (bp->b_flags & B_CALL) { /* UBC or malloced buffer */
1722 bp->b_flags |= B_BUSY;
1723 continue;
1724 }
1725 again:
1726 s = splbio();
1727 if (bp->b_flags & B_BUSY) {
1728 #ifdef DEBUG
1729 printf("lfs_writeseg: avoiding potential data summary "
1730 "corruption for ino %d, lbn %" PRId64 "\n",
1731 VTOI(bp->b_vp)->i_number, bp->b_lblkno);
1732 #endif
1733 bp->b_flags |= B_WANTED;
1734 tsleep(bp, (PRIBIO + 1), "lfs_writeseg", 0);
1735 splx(s);
1736 goto again;
1737 }
1738 bp->b_flags |= B_BUSY;
1739 splx(s);
1740 /*
1741 * Check and replace indirect block UNWRITTEN bogosity.
1742 * XXX See comment in lfs_writefile.
1743 */
1744 if (bp->b_lblkno < 0 && bp->b_vp != devvp && bp->b_vp &&
1745 VTOI(bp->b_vp)->i_ffs1_blocks !=
1746 VTOI(bp->b_vp)->i_lfs_effnblks) {
1747 #ifdef DEBUG_LFS
1748 printf("lfs_writeseg: cleansing ino %d (%d != %d)\n",
1749 VTOI(bp->b_vp)->i_number,
1750 VTOI(bp->b_vp)->i_lfs_effnblks,
1751 VTOI(bp->b_vp)->i_ffs1_blocks);
1752 #endif
1753 /* Make a copy we'll make changes to */
1754 newbp = lfs_newbuf(fs, bp->b_vp, bp->b_lblkno,
1755 bp->b_bcount, LFS_NB_IBLOCK);
1756 newbp->b_blkno = bp->b_blkno;
1757 memcpy(newbp->b_data, bp->b_data,
1758 newbp->b_bcount);
1759
1760 changed = 0;
1761 /* XXX ondisk32 */
1762 for (daddrp = (int32_t *)(newbp->b_data);
1763 daddrp < (int32_t *)(newbp->b_data +
1764 newbp->b_bcount); daddrp++) {
1765 if (*daddrp == UNWRITTEN) {
1766 #ifdef DEBUG_LFS
1767 off_t doff;
1768 int32_t ioff;
1769
1770 ioff =
1771 daddrp - (int32_t *)(newbp->b_data);
1772 doff =
1773 (-bp->b_lblkno + ioff) * fs->lfs_bsize;
1774 printf("ino %d lbn %" PRId64
1775 " entry %d off %" PRIx64 "\n",
1776 VTOI(bp->b_vp)->i_number,
1777 bp->b_lblkno, ioff, doff);
1778 if (bp->b_vp->v_type == VREG) {
1779 /*
1780 * What is up with this page?
1781 */
1782 struct vm_page *pg;
1783 for (; doff / fs->lfs_bsize == (-bp->b_lblkno + ioff);
1784 doff += PAGE_SIZE) {
1785 pg = uvm_pagelookup(&bp->b_vp->v_uobj, doff);
1786 if (pg == NULL)
1787 printf(" page at %" PRIx64 " is NULL\n", doff);
1788 else
1789 printf(" page at %" PRIx64
1790 " flags 0x%x pqflags 0x%x\n",
1791 doff, pg->flags, pg->pqflags);
1792 }
1793 }
1794 #endif /* DEBUG_LFS */
1795 ++changed;
1796 *daddrp = 0;
1797 }
1798 }
1799 /*
1800 * Get rid of the old buffer. Don't mark it clean,
1801 * though, if it still has dirty data on it.
1802 */
1803 if (changed) {
1804 #ifdef DEBUG_LFS
1805 printf("lfs_writeseg: replacing UNWRITTEN(%d):"
1806 " bp = %p newbp = %p\n", changed, bp,
1807 newbp);
1808 #endif
1809 *bpp = newbp;
1810 bp->b_flags &= ~(B_ERROR | B_GATHERED);
1811 if (bp->b_flags & B_CALL) {
1812 printf("lfs_writeseg: "
1813 "indir bp should not be B_CALL\n");
1814 s = splbio();
1815 biodone(bp);
1816 splx(s);
1817 bp = NULL;
1818 } else {
1819 /* Still on free list, leave it there */
1820 s = splbio();
1821 bp->b_flags &= ~B_BUSY;
1822 if (bp->b_flags & B_WANTED)
1823 wakeup(bp);
1824 splx(s);
1825 /*
1826 * We have to re-decrement lfs_avail
1827 * since this block is going to come
1828 * back around to us in the next
1829 * segment.
1830 */
1831 fs->lfs_avail -=
1832 btofsb(fs, bp->b_bcount);
1833 }
1834 } else {
1835 lfs_freebuf(fs, newbp);
1836 }
1837 }
1838 }
1839 /*
1840 * Compute checksum across data and then across summary; the first
1841 * block (the summary block) is skipped. Set the create time here
1842 * so that it's guaranteed to be later than the inode mod times.
1843 *
1844 * XXX
1845 * Fix this to do it inline, instead of malloc/copy.
1846 */
1847 datap = dp = pool_get(&fs->lfs_bpppool, PR_WAITOK);
1848 if (fs->lfs_version == 1)
1849 el_size = sizeof(u_long);
1850 else
1851 el_size = sizeof(u_int32_t);
1852 for (bpp = sp->bpp, i = nblocks - 1; i--; ) {
1853 ++bpp;
1854 /* Loop through gop_write cluster blocks */
1855 for (byteoffset = 0; byteoffset < (*bpp)->b_bcount;
1856 byteoffset += fs->lfs_bsize) {
1857 #ifdef LFS_USE_B_INVAL
1858 if (((*bpp)->b_flags & (B_CALL | B_INVAL)) ==
1859 (B_CALL | B_INVAL)) {
1860 if (copyin((caddr_t)(*bpp)->b_saveaddr +
1861 byteoffset, dp, el_size)) {
1862 panic("lfs_writeseg: copyin failed [1]:"
1863 " ino %d blk %" PRId64,
1864 VTOI((*bpp)->b_vp)->i_number,
1865 (*bpp)->b_lblkno);
1866 }
1867 } else
1868 #endif /* LFS_USE_B_INVAL */
1869 {
1870 memcpy(dp, (*bpp)->b_data + byteoffset,
1871 el_size);
1872 }
1873 dp += el_size;
1874 }
1875 }
1876 if (fs->lfs_version == 1)
1877 ssp->ss_ocreate = time.tv_sec;
1878 else {
1879 ssp->ss_create = time.tv_sec;
1880 ssp->ss_serial = ++fs->lfs_serial;
1881 ssp->ss_ident = fs->lfs_ident;
1882 }
1883 ssp->ss_datasum = cksum(datap, dp - datap);
1884 ssp->ss_sumsum =
1885 cksum(&ssp->ss_datasum, fs->lfs_sumsize - sizeof(ssp->ss_sumsum));
1886 pool_put(&fs->lfs_bpppool, datap);
1887 datap = dp = NULL;
1888 #ifdef DIAGNOSTIC
1889 if (fs->lfs_bfree <
1890 btofsb(fs, ninos * fs->lfs_ibsize) + btofsb(fs, fs->lfs_sumsize))
1891 panic("lfs_writeseg: No diskspace for summary");
1892 #endif
1893 fs->lfs_bfree -= (btofsb(fs, ninos * fs->lfs_ibsize) +
1894 btofsb(fs, fs->lfs_sumsize));
1895
1896 /*
1897 * When we simply write the blocks we lose a rotation for every block
1898 * written. To avoid this problem, we cluster the buffers into a
1899 * chunk and write the chunk. MAXPHYS is the largest size I/O
1900 * devices can handle, use that for the size of the chunks.
1901 *
1902 * Blocks that are already clusters (from GOP_WRITE), however, we
1903 * don't bother to copy into other clusters.
1904 */
1905
1906 #define CHUNKSIZE MAXPHYS
1907
1908 if (devvp == NULL)
1909 panic("devvp is NULL");
1910 for (bpp = sp->bpp, i = nblocks; i;) {
1911 cbp = lfs_newclusterbuf(fs, devvp, (*bpp)->b_blkno, i);
1912 cl = cbp->b_private;
1913
1914 cbp->b_flags |= B_ASYNC | B_BUSY;
1915 cbp->b_bcount = 0;
1916
1917 #if defined(DEBUG) && defined(DIAGNOSTIC)
1918 if (bpp - sp->bpp > (fs->lfs_sumsize - SEGSUM_SIZE(fs))
1919 / sizeof(int32_t)) {
1920 panic("lfs_writeseg: real bpp overwrite");
1921 }
1922 if (bpp - sp->bpp > segsize(fs) / fs->lfs_fsize) {
1923 panic("lfs_writeseg: theoretical bpp overwrite");
1924 }
1925 #endif
1926
1927 /*
1928 * Construct the cluster.
1929 */
1930 ++fs->lfs_iocount;
1931 while (i && cbp->b_bcount < CHUNKSIZE) {
1932 bp = *bpp;
1933
1934 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1935 break;
1936 if (cbp->b_bcount > 0 && !(cl->flags & LFS_CL_MALLOC))
1937 break;
1938
1939 /* Clusters from GOP_WRITE are expedited */
1940 if (bp->b_bcount > fs->lfs_bsize) {
1941 if (cbp->b_bcount > 0)
1942 /* Put in its own buffer */
1943 break;
1944 else {
1945 cbp->b_data = bp->b_data;
1946 }
1947 } else if (cbp->b_bcount == 0) {
1948 p = cbp->b_data = lfs_malloc(fs, CHUNKSIZE,
1949 LFS_NB_CLUSTER);
1950 cl->flags |= LFS_CL_MALLOC;
1951 }
1952 #ifdef DIAGNOSTIC
1953 if (dtosn(fs, dbtofsb(fs, bp->b_blkno +
1954 btodb(bp->b_bcount - 1))) !=
1955 sp->seg_number) {
1956 printf("blk size %ld daddr %" PRIx64
1957 " not in seg %d\n",
1958 bp->b_bcount, bp->b_blkno,
1959 sp->seg_number);
1960 panic("segment overwrite");
1961 }
1962 #endif
1963
1964 #ifdef LFS_USE_B_INVAL
1965 /*
1966 * Fake buffers from the cleaner are marked as B_INVAL.
1967 * We need to copy the data from user space rather than
1968 * from the buffer indicated.
1969 * XXX == what do I do on an error?
1970 */
1971 if ((bp->b_flags & (B_CALL|B_INVAL)) ==
1972 (B_CALL|B_INVAL)) {
1973 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1974 panic("lfs_writeseg: "
1975 "copyin failed [2]");
1976 } else
1977 #endif /* LFS_USE_B_INVAL */
1978 if (cl->flags & LFS_CL_MALLOC) {
1979 /* copy data into our cluster. */
1980 memcpy(p, bp->b_data, bp->b_bcount);
1981 p += bp->b_bcount;
1982 }
1983
1984 cbp->b_bcount += bp->b_bcount;
1985 cl->bufsize += bp->b_bcount;
1986
1987 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI | B_DONE);
1988 cl->bpp[cl->bufcount++] = bp;
1989 vp = bp->b_vp;
1990 s = splbio();
1991 reassignbuf(bp, vp);
1992 V_INCR_NUMOUTPUT(vp);
1993 splx(s);
1994
1995 bpp++;
1996 i--;
1997 }
1998 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
1999 BIO_SETPRIO(cbp, BPRIO_TIMECRITICAL);
2000 else
2001 BIO_SETPRIO(cbp, BPRIO_TIMELIMITED);
2002 s = splbio();
2003 V_INCR_NUMOUTPUT(devvp);
2004 splx(s);
2005 VOP_STRATEGY(devvp, cbp);
2006 curproc->p_stats->p_ru.ru_oublock++;
2007 }
2008
2009 if (lfs_dostats) {
2010 ++lfs_stats.psegwrites;
2011 lfs_stats.blocktot += nblocks - 1;
2012 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
2013 ++lfs_stats.psyncwrites;
2014 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
2015 ++lfs_stats.pcleanwrites;
2016 lfs_stats.cleanblocks += nblocks - 1;
2017 }
2018 }
2019 return (lfs_initseg(fs) || do_again);
2020 }
2021
2022 void
2023 lfs_writesuper(struct lfs *fs, daddr_t daddr)
2024 {
2025 struct buf *bp;
2026 int s;
2027 struct vnode *devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2028
2029 /*
2030 * If we can write one superblock while another is in
2031 * progress, we risk not having a complete checkpoint if we crash.
2032 * So, block here if a superblock write is in progress.
2033 */
2034 s = splbio();
2035 while (fs->lfs_sbactive) {
2036 tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
2037 }
2038 fs->lfs_sbactive = daddr;
2039 splx(s);
2040
2041 /* Set timestamp of this version of the superblock */
2042 if (fs->lfs_version == 1)
2043 fs->lfs_otstamp = time.tv_sec;
2044 fs->lfs_tstamp = time.tv_sec;
2045
2046 /* Checksum the superblock and copy it into a buffer. */
2047 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
2048 bp = lfs_newbuf(fs, devvp,
2049 fsbtodb(fs, daddr), LFS_SBPAD, LFS_NB_SBLOCK);
2050 memset(bp->b_data + sizeof(struct dlfs), 0,
2051 LFS_SBPAD - sizeof(struct dlfs));
2052 *(struct dlfs *)bp->b_data = fs->lfs_dlfs;
2053
2054 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
2055 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
2056 bp->b_iodone = lfs_supercallback;
2057
2058 if (fs->lfs_sp != NULL && fs->lfs_sp->seg_flags & SEGM_SYNC)
2059 BIO_SETPRIO(bp, BPRIO_TIMECRITICAL);
2060 else
2061 BIO_SETPRIO(bp, BPRIO_TIMELIMITED);
2062 curproc->p_stats->p_ru.ru_oublock++;
2063 s = splbio();
2064 V_INCR_NUMOUTPUT(bp->b_vp);
2065 splx(s);
2066 ++fs->lfs_iocount;
2067 VOP_STRATEGY(devvp, bp);
2068 }
2069
2070 /*
2071 * Logical block number match routines used when traversing the dirty block
2072 * chain.
2073 */
2074 int
2075 lfs_match_fake(struct lfs *fs, struct buf *bp)
2076 {
2077
2078 return LFS_IS_MALLOC_BUF(bp);
2079 }
2080
2081 #if 0
2082 int
2083 lfs_match_real(struct lfs *fs, struct buf *bp)
2084 {
2085
2086 return (lfs_match_data(fs, bp) && !lfs_match_fake(fs, bp));
2087 }
2088 #endif
2089
2090 int
2091 lfs_match_data(struct lfs *fs, struct buf *bp)
2092 {
2093
2094 return (bp->b_lblkno >= 0);
2095 }
2096
2097 int
2098 lfs_match_indir(struct lfs *fs, struct buf *bp)
2099 {
2100 daddr_t lbn;
2101
2102 lbn = bp->b_lblkno;
2103 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
2104 }
2105
2106 int
2107 lfs_match_dindir(struct lfs *fs, struct buf *bp)
2108 {
2109 daddr_t lbn;
2110
2111 lbn = bp->b_lblkno;
2112 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
2113 }
2114
2115 int
2116 lfs_match_tindir(struct lfs *fs, struct buf *bp)
2117 {
2118 daddr_t lbn;
2119
2120 lbn = bp->b_lblkno;
2121 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
2122 }
2123
2124 /*
2125 * XXX - The only buffers that are going to hit these functions are the
2126 * segment write blocks, or the segment summaries, or the superblocks.
2127 *
2128 * All of the above are created by lfs_newbuf, and so do not need to be
2129 * released via brelse.
2130 */
2131 void
2132 lfs_callback(struct buf *bp)
2133 {
2134 struct lfs *fs;
2135
2136 fs = bp->b_private;
2137 lfs_freebuf(fs, bp);
2138 }
2139
2140 static void
2141 lfs_super_aiodone(struct buf *bp)
2142 {
2143 struct lfs *fs;
2144
2145 fs = bp->b_private;
2146 fs->lfs_sbactive = 0;
2147 wakeup(&fs->lfs_sbactive);
2148 if (--fs->lfs_iocount <= 1)
2149 wakeup(&fs->lfs_iocount);
2150 lfs_freebuf(fs, bp);
2151 }
2152
2153 static void
2154 lfs_cluster_aiodone(struct buf *bp)
2155 {
2156 struct lfs_cluster *cl;
2157 struct lfs *fs;
2158 struct buf *tbp, *fbp;
2159 struct vnode *vp, *devvp;
2160 struct inode *ip;
2161 int s, error=0;
2162
2163 if (bp->b_flags & B_ERROR)
2164 error = bp->b_error;
2165
2166 cl = bp->b_private;
2167 fs = cl->fs;
2168 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
2169
2170 /* Put the pages back, and release the buffer */
2171 while (cl->bufcount--) {
2172 tbp = cl->bpp[cl->bufcount];
2173 if (error) {
2174 tbp->b_flags |= B_ERROR;
2175 tbp->b_error = error;
2176 }
2177
2178 /*
2179 * We're done with tbp. If it has not been re-dirtied since
2180 * the cluster was written, free it. Otherwise, keep it on
2181 * the locked list to be written again.
2182 */
2183 vp = tbp->b_vp;
2184
2185 tbp->b_flags &= ~B_GATHERED;
2186
2187 LFS_BCLEAN_LOG(fs, tbp);
2188
2189 if (!(tbp->b_flags & B_CALL)) {
2190 KASSERT(tbp->b_flags & B_LOCKED);
2191 s = splbio();
2192 simple_lock(&bqueue_slock);
2193 bremfree(tbp);
2194 simple_unlock(&bqueue_slock);
2195 if (vp)
2196 reassignbuf(tbp, vp);
2197 splx(s);
2198 tbp->b_flags |= B_ASYNC; /* for biodone */
2199 }
2200
2201 if ((tbp->b_flags & (B_LOCKED | B_DELWRI)) == B_LOCKED)
2202 LFS_UNLOCK_BUF(tbp);
2203
2204 #ifdef DIAGNOSTIC
2205 if (tbp->b_flags & B_DONE) {
2206 printf("blk %d biodone already (flags %lx)\n",
2207 cl->bufcount, (long)tbp->b_flags);
2208 }
2209 #endif
2210 if (tbp->b_flags & (B_BUSY | B_CALL)) {
2211 if ((tbp->b_flags & B_CALL) &&
2212 !LFS_IS_MALLOC_BUF(tbp)) {
2213 /* printf("flags 0x%lx\n", tbp->b_flags); */
2214 /*
2215 * A buffer from the page daemon.
2216 * We use the same iodone as it does,
2217 * so we must manually disassociate its
2218 * buffers from the vp.
2219 */
2220 if (tbp->b_vp) {
2221 /* This is just silly */
2222 s = splbio();
2223 brelvp(tbp);
2224 tbp->b_vp = vp;
2225 splx(s);
2226 }
2227 /* Put it back the way it was */
2228 tbp->b_flags |= B_ASYNC;
2229 /* Master buffers have B_AGE */
2230 if (tbp->b_private == tbp)
2231 tbp->b_flags |= B_AGE;
2232 }
2233 s = splbio();
2234 biodone(tbp);
2235
2236 /*
2237 * If this is the last block for this vnode, but
2238 * there are other blocks on its dirty list,
2239 * set IN_MODIFIED/IN_CLEANING depending on what
2240 * sort of block. Only do this for our mount point,
2241 * not for, e.g., inode blocks that are attached to
2242 * the devvp.
2243 * XXX KS - Shouldn't we set *both* if both types
2244 * of blocks are present (traverse the dirty list?)
2245 */
2246 simple_lock(&global_v_numoutput_slock);
2247 if (vp != devvp && vp->v_numoutput == 0 &&
2248 (fbp = LIST_FIRST(&vp->v_dirtyblkhd)) != NULL) {
2249 ip = VTOI(vp);
2250 #ifdef DEBUG_LFS
2251 printf("lfs_cluster_aiodone: marking ino %d\n",
2252 ip->i_number);
2253 #endif
2254 if (LFS_IS_MALLOC_BUF(fbp))
2255 LFS_SET_UINO(ip, IN_CLEANING);
2256 else
2257 LFS_SET_UINO(ip, IN_MODIFIED);
2258 }
2259 simple_unlock(&global_v_numoutput_slock);
2260 splx(s);
2261 wakeup(vp);
2262 }
2263 }
2264
2265 /* Fix up the cluster buffer, and release it */
2266 if (cl->flags & LFS_CL_MALLOC)
2267 lfs_free(fs, bp->b_data, LFS_NB_CLUSTER);
2268 s = splbio();
2269 pool_put(&bufpool, bp); /* XXX should use lfs_free? */
2270 splx(s);
2271
2272 /* Note i/o done */
2273 if (cl->flags & LFS_CL_SYNC) {
2274 if (--cl->seg->seg_iocount == 0)
2275 wakeup(&cl->seg->seg_iocount);
2276 /* printf("- %x => %d\n", cl->seg, cl->seg->seg_iocount); */
2277 }
2278 #ifdef DIAGNOSTIC
2279 if (fs->lfs_iocount == 0)
2280 panic("lfs_cluster_aiodone: zero iocount");
2281 #endif
2282 if (--fs->lfs_iocount <= 1)
2283 wakeup(&fs->lfs_iocount);
2284
2285 pool_put(&fs->lfs_bpppool, cl->bpp);
2286 cl->bpp = NULL;
2287 pool_put(&fs->lfs_clpool, cl);
2288 }
2289
2290 static void
2291 lfs_generic_callback(struct buf *bp, void (*aiodone)(struct buf *))
2292 {
2293 /* reset b_iodone for when this is a single-buf i/o. */
2294 bp->b_iodone = aiodone;
2295
2296 simple_lock(&uvm.aiodoned_lock); /* locks uvm.aio_done */
2297 TAILQ_INSERT_TAIL(&uvm.aio_done, bp, b_freelist);
2298 wakeup(&uvm.aiodoned);
2299 simple_unlock(&uvm.aiodoned_lock);
2300 }
2301
2302 static void
2303 lfs_cluster_callback(struct buf *bp)
2304 {
2305
2306 lfs_generic_callback(bp, lfs_cluster_aiodone);
2307 }
2308
2309 void
2310 lfs_supercallback(struct buf *bp)
2311 {
2312
2313 lfs_generic_callback(bp, lfs_super_aiodone);
2314 }
2315
2316 /*
2317 * Shellsort (diminishing increment sort) from Data Structures and
2318 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
2319 * see also Knuth Vol. 3, page 84. The increments are selected from
2320 * formula (8), page 95. Roughly O(N^3/2).
2321 */
2322 /*
2323 * This is our own private copy of shellsort because we want to sort
2324 * two parallel arrays (the array of buffer pointers and the array of
2325 * logical block numbers) simultaneously. Note that we cast the array
2326 * of logical block numbers to a unsigned in this routine so that the
2327 * negative block numbers (meta data blocks) sort AFTER the data blocks.
2328 */
2329
2330 void
2331 lfs_shellsort(struct buf **bp_array, int32_t *lb_array, int nmemb, int size)
2332 {
2333 static int __rsshell_increments[] = { 4, 1, 0 };
2334 int incr, *incrp, t1, t2;
2335 struct buf *bp_temp;
2336
2337 #ifdef DEBUG
2338 incr = 0;
2339 for (t1 = 0; t1 < nmemb; t1++) {
2340 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2341 if (lb_array[incr++] != bp_array[t1]->b_lblkno + t2) {
2342 /* dump before panic */
2343 printf("lfs_shellsort: nmemb=%d, size=%d\n",
2344 nmemb, size);
2345 incr = 0;
2346 for (t1 = 0; t1 < nmemb; t1++) {
2347 const struct buf *bp = bp_array[t1];
2348
2349 printf("bp[%d]: lbn=%" PRIu64 ", size=%"
2350 PRIu64 "\n", t1,
2351 (uint64_t)bp->b_bcount,
2352 (uint64_t)bp->b_lblkno);
2353 printf("lbns:");
2354 for (t2 = 0; t2 * size < bp->b_bcount;
2355 t2++) {
2356 printf(" %" PRId32,
2357 lb_array[incr++]);
2358 }
2359 printf("\n");
2360 }
2361 panic("lfs_shellsort: inconsistent input");
2362 }
2363 }
2364 }
2365 #endif
2366
2367 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
2368 for (t1 = incr; t1 < nmemb; ++t1)
2369 for (t2 = t1 - incr; t2 >= 0;)
2370 if ((u_int32_t)bp_array[t2]->b_lblkno >
2371 (u_int32_t)bp_array[t2 + incr]->b_lblkno) {
2372 bp_temp = bp_array[t2];
2373 bp_array[t2] = bp_array[t2 + incr];
2374 bp_array[t2 + incr] = bp_temp;
2375 t2 -= incr;
2376 } else
2377 break;
2378
2379 /* Reform the list of logical blocks */
2380 incr = 0;
2381 for (t1 = 0; t1 < nmemb; t1++) {
2382 for (t2 = 0; t2 * size < bp_array[t1]->b_bcount; t2++) {
2383 lb_array[incr++] = bp_array[t1]->b_lblkno + t2;
2384 }
2385 }
2386 }
2387
2388 /*
2389 * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
2390 */
2391 int
2392 lfs_vref(struct vnode *vp)
2393 {
2394 /*
2395 * If we return 1 here during a flush, we risk vinvalbuf() not
2396 * being able to flush all of the pages from this vnode, which
2397 * will cause it to panic. So, return 0 if a flush is in progress.
2398 */
2399 if (vp->v_flag & VXLOCK) {
2400 if (IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2401 return 0;
2402 }
2403 return (1);
2404 }
2405 return (vget(vp, 0));
2406 }
2407
2408 /*
2409 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
2410 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
2411 */
2412 void
2413 lfs_vunref(struct vnode *vp)
2414 {
2415 /*
2416 * Analogous to lfs_vref, if the node is flushing, fake it.
2417 */
2418 if ((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
2419 return;
2420 }
2421
2422 simple_lock(&vp->v_interlock);
2423 #ifdef DIAGNOSTIC
2424 if (vp->v_usecount <= 0) {
2425 printf("lfs_vunref: inum is %d\n", VTOI(vp)->i_number);
2426 printf("lfs_vunref: flags are 0x%lx\n", (u_long)vp->v_flag);
2427 printf("lfs_vunref: usecount = %ld\n", (long)vp->v_usecount);
2428 panic("lfs_vunref: v_usecount<0");
2429 }
2430 #endif
2431 vp->v_usecount--;
2432 if (vp->v_usecount > 0) {
2433 simple_unlock(&vp->v_interlock);
2434 return;
2435 }
2436 /*
2437 * insert at tail of LRU list
2438 */
2439 simple_lock(&vnode_free_list_slock);
2440 if (vp->v_holdcnt > 0)
2441 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2442 else
2443 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
2444 simple_unlock(&vnode_free_list_slock);
2445 simple_unlock(&vp->v_interlock);
2446 }
2447
2448 /*
2449 * We use this when we have vnodes that were loaded in solely for cleaning.
2450 * There is no reason to believe that these vnodes will be referenced again
2451 * soon, since the cleaning process is unrelated to normal filesystem
2452 * activity. Putting cleaned vnodes at the tail of the list has the effect
2453 * of flushing the vnode LRU. So, put vnodes that were loaded only for
2454 * cleaning at the head of the list, instead.
2455 */
2456 void
2457 lfs_vunref_head(struct vnode *vp)
2458 {
2459
2460 simple_lock(&vp->v_interlock);
2461 #ifdef DIAGNOSTIC
2462 if (vp->v_usecount == 0) {
2463 panic("lfs_vunref: v_usecount<0");
2464 }
2465 #endif
2466 vp->v_usecount--;
2467 if (vp->v_usecount > 0) {
2468 simple_unlock(&vp->v_interlock);
2469 return;
2470 }
2471 /*
2472 * insert at head of LRU list
2473 */
2474 simple_lock(&vnode_free_list_slock);
2475 if (vp->v_holdcnt > 0)
2476 TAILQ_INSERT_TAIL(&vnode_hold_list, vp, v_freelist);
2477 else
2478 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
2479 simple_unlock(&vnode_free_list_slock);
2480 simple_unlock(&vp->v_interlock);
2481 }
2482
2483