lfs_segment.c revision 1.33 1 /* $NetBSD: lfs_segment.c,v 1.33 1999/11/09 02:21:06 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
71 */
72
73 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
74
75 #include "opt_ddb.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/namei.h>
79 #include <sys/kernel.h>
80 #include <sys/resourcevar.h>
81 #include <sys/file.h>
82 #include <sys/stat.h>
83 #include <sys/buf.h>
84 #include <sys/proc.h>
85 #include <sys/conf.h>
86 #include <sys/vnode.h>
87 #include <sys/malloc.h>
88 #include <sys/mount.h>
89
90 #include <miscfs/specfs/specdev.h>
91 #include <miscfs/fifofs/fifo.h>
92
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/dir.h>
96 #include <ufs/ufs/ufsmount.h>
97 #include <ufs/ufs/ufs_extern.h>
98
99 #include <ufs/lfs/lfs.h>
100 #include <ufs/lfs/lfs_extern.h>
101
102 extern int count_lock_queue __P((void));
103 extern struct simplelock vnode_free_list_slock; /* XXX */
104 extern TAILQ_HEAD(freelst, vnode) vnode_free_list; /* XXX */
105
106 /*
107 * Determine if it's OK to start a partial in this segment, or if we need
108 * to go on to a new segment.
109 */
110 #define LFS_PARTIAL_FITS(fs) \
111 ((fs)->lfs_dbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
112 1 << (fs)->lfs_fsbtodb)
113
114 void lfs_callback __P((struct buf *));
115 int lfs_gather __P((struct lfs *, struct segment *,
116 struct vnode *, int (*) __P((struct lfs *, struct buf *))));
117 int lfs_gatherblock __P((struct segment *, struct buf *, int *));
118 void lfs_iset __P((struct inode *, ufs_daddr_t, time_t));
119 int lfs_match_fake __P((struct lfs *, struct buf *));
120 int lfs_match_data __P((struct lfs *, struct buf *));
121 int lfs_match_dindir __P((struct lfs *, struct buf *));
122 int lfs_match_indir __P((struct lfs *, struct buf *));
123 int lfs_match_tindir __P((struct lfs *, struct buf *));
124 void lfs_newseg __P((struct lfs *));
125 void lfs_shellsort __P((struct buf **, ufs_daddr_t *, register int));
126 void lfs_supercallback __P((struct buf *));
127 void lfs_updatemeta __P((struct segment *));
128 int lfs_vref __P((struct vnode *));
129 void lfs_vunref __P((struct vnode *));
130 void lfs_writefile __P((struct lfs *, struct segment *, struct vnode *));
131 int lfs_writeinode __P((struct lfs *, struct segment *, struct inode *));
132 int lfs_writeseg __P((struct lfs *, struct segment *));
133 void lfs_writesuper __P((struct lfs *, daddr_t));
134 int lfs_writevnodes __P((struct lfs *fs, struct mount *mp,
135 struct segment *sp, int dirops));
136
137 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
138 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
139 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
140 int lfs_dirvcount = 0; /* # active dirops */
141
142 /* Statistics Counters */
143 int lfs_dostats = 1;
144 struct lfs_stats lfs_stats;
145
146 /* op values to lfs_writevnodes */
147 #define VN_REG 0
148 #define VN_DIROP 1
149 #define VN_EMPTY 2
150 #define VN_CLEAN 3
151
152 #define LFS_MAX_ACTIVE 10
153
154 /*
155 * XXX KS - Set modification time on the Ifile, so the cleaner can
156 * read the fs mod time off of it. We don't set IN_UPDATE here,
157 * since we don't really need this to be flushed to disk (and in any
158 * case that wouldn't happen to the Ifile until we checkpoint).
159 */
160 void
161 lfs_imtime(fs)
162 struct lfs *fs;
163 {
164 struct timespec ts;
165 struct inode *ip;
166
167 TIMEVAL_TO_TIMESPEC(&time, &ts);
168 ip = VTOI(fs->lfs_ivnode);
169 ip->i_ffs_mtime = ts.tv_sec;
170 ip->i_ffs_mtimensec = ts.tv_nsec;
171 }
172
173 /*
174 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
175 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
176 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
177 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
178 */
179
180 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
181 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
182 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
183
184 int
185 lfs_vflush(vp)
186 struct vnode *vp;
187 {
188 struct inode *ip;
189 struct lfs *fs;
190 struct segment *sp;
191 struct buf *bp, *nbp;
192 int error, s;
193
194 ip = VTOI(vp);
195 fs = VFSTOUFS(vp->v_mount)->um_lfs;
196
197 if(ip->i_flag & IN_CLEANING) {
198 #ifdef DEBUG_LFS
199 ivndebug(vp,"vflush/in_cleaning");
200 #endif
201 ip->i_flag &= ~IN_CLEANING;
202 if(ip->i_flag & IN_MODIFIED) {
203 fs->lfs_uinodes--;
204 } else
205 ip->i_flag |= IN_MODIFIED;
206 }
207
208 /* If the node is being written, wait until that is done */
209 if(WRITEINPROG(vp)) {
210 #ifdef DEBUG_LFS
211 ivndebug(vp,"vflush/writeinprog");
212 #endif
213 tsleep(vp, PRIBIO+1, "lfs_vw", 0);
214 }
215
216 /* Protect against VXLOCK deadlock in vinvalbuf() */
217 lfs_seglock(fs, SEGM_SYNC);
218
219 /* If we're supposed to flush a freed inode, just toss it */
220 /* XXX - seglock, so these buffers can't be gathered, right? */
221 if(ip->i_ffs_mode == 0) {
222 printf("lfs_vflush: ino %d is freed, not flushing\n",
223 ip->i_number);
224 s = splbio();
225 for(bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) {
226 nbp = bp->b_vnbufs.le_next;
227 /* Copied from lfs_writeseg */
228 if (bp->b_flags & B_CALL) {
229 /* if B_CALL, it was created with newbuf */
230 lfs_freebuf(bp);
231 } else {
232 bremfree(bp);
233 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
234 B_LOCKED | B_GATHERED);
235 bp->b_flags |= B_DONE;
236 reassignbuf(bp, vp);
237 brelse(bp);
238 }
239 }
240 splx(s);
241 if(ip->i_flag & IN_CLEANING)
242 fs->lfs_uinodes--;
243 if(ip->i_flag & IN_MODIFIED)
244 fs->lfs_uinodes--;
245 ip->i_flag &= ~(IN_MODIFIED|IN_UPDATE|IN_ACCESS|IN_CHANGE|IN_CLEANING);
246 printf("lfs_vflush: done not flushing ino %d\n",
247 ip->i_number);
248 lfs_segunlock(fs);
249 return 0;
250 }
251
252 SET_FLUSHING(fs,vp);
253 if (fs->lfs_nactive > LFS_MAX_ACTIVE) {
254 error = lfs_segwrite(vp->v_mount, SEGM_SYNC|SEGM_CKP);
255 CLR_FLUSHING(fs,vp);
256 lfs_segunlock(fs);
257 return error;
258 }
259 sp = fs->lfs_sp;
260
261 if (vp->v_dirtyblkhd.lh_first == NULL) {
262 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
263 } else if((ip->i_flag & IN_CLEANING) && (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
264 #ifdef DEBUG_LFS
265 ivndebug(vp,"vflush/clean");
266 #endif
267 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
268 }
269 else if(lfs_dostats) {
270 if(vp->v_dirtyblkhd.lh_first || (VTOI(vp)->i_flag & (IN_MODIFIED|IN_UPDATE|IN_ACCESS|IN_CHANGE|IN_CLEANING)))
271 ++lfs_stats.vflush_invoked;
272 #ifdef DEBUG_LFS
273 ivndebug(vp,"vflush");
274 #endif
275 }
276
277 #ifdef DIAGNOSTIC
278 /* XXX KS This actually can happen right now, though it shouldn't(?) */
279 if(vp->v_flag & VDIROP) {
280 printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
281 /* panic("VDIROP being flushed...this can\'t happen"); */
282 }
283 if(vp->v_usecount<0) {
284 printf("usecount=%ld\n",vp->v_usecount);
285 panic("lfs_vflush: usecount<0");
286 }
287 #endif
288
289 do {
290 do {
291 if (vp->v_dirtyblkhd.lh_first != NULL)
292 lfs_writefile(fs, sp, vp);
293 } while (lfs_writeinode(fs, sp, ip));
294 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
295
296 if(lfs_dostats) {
297 ++lfs_stats.nwrites;
298 if (sp->seg_flags & SEGM_SYNC)
299 ++lfs_stats.nsync_writes;
300 if (sp->seg_flags & SEGM_CKP)
301 ++lfs_stats.ncheckpoints;
302 }
303 lfs_segunlock(fs);
304
305 CLR_FLUSHING(fs,vp);
306 return (0);
307 }
308
309 #ifdef DEBUG_LFS_VERBOSE
310 # define vndebug(vp,str) if(VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
311 #else
312 # define vndebug(vp,str)
313 #endif
314
315 int
316 lfs_writevnodes(fs, mp, sp, op)
317 struct lfs *fs;
318 struct mount *mp;
319 struct segment *sp;
320 int op;
321 {
322 struct inode *ip;
323 struct vnode *vp;
324 int inodes_written=0, only_cleaning;
325
326 #ifndef LFS_NO_BACKVP_HACK
327 /* BEGIN HACK */
328 #define VN_OFFSET (((caddr_t)&vp->v_mntvnodes.le_next) - (caddr_t)vp)
329 #define BACK_VP(VP) ((struct vnode *)(((caddr_t)VP->v_mntvnodes.le_prev) - VN_OFFSET))
330 #define BEG_OF_VLIST ((struct vnode *)(((caddr_t)&mp->mnt_vnodelist.lh_first) - VN_OFFSET))
331
332 /* Find last vnode. */
333 loop: for (vp = mp->mnt_vnodelist.lh_first;
334 vp && vp->v_mntvnodes.le_next != NULL;
335 vp = vp->v_mntvnodes.le_next);
336 for (; vp && vp != BEG_OF_VLIST; vp = BACK_VP(vp)) {
337 #else
338 loop:
339 for (vp = mp->mnt_vnodelist.lh_first;
340 vp != NULL;
341 vp = vp->v_mntvnodes.le_next) {
342 #endif
343 /*
344 * If the vnode that we are about to sync is no longer
345 * associated with this mount point, start over.
346 */
347 if (vp->v_mount != mp)
348 goto loop;
349
350 ip = VTOI(vp);
351 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
352 (op != VN_DIROP && op != VN_CLEAN && (vp->v_flag & VDIROP))) {
353 vndebug(vp,"dirop");
354 continue;
355 }
356
357 if (op == VN_EMPTY && vp->v_dirtyblkhd.lh_first) {
358 vndebug(vp,"empty");
359 continue;
360 }
361
362 if (vp->v_type == VNON) {
363 continue;
364 }
365
366 if(op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
367 && !(ip->i_flag & IN_CLEANING)) {
368 vndebug(vp,"cleaning");
369 continue;
370 }
371
372 if (lfs_vref(vp)) {
373 vndebug(vp,"vref");
374 continue;
375 }
376
377 #if 0 /* XXX KS - if we skip the ifile, things could go badly for us. */
378 if(WRITEINPROG(vp)) {
379 lfs_vunref(vp);
380 #ifdef DEBUG_LFS
381 ivndebug(vp,"writevnodes/writeinprog");
382 #endif
383 continue;
384 }
385 #endif
386 only_cleaning = 0;
387 /*
388 * Write the inode/file if dirty and it's not the
389 * the IFILE.
390 */
391 if ((ip->i_flag &
392 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE | IN_CLEANING) ||
393 vp->v_dirtyblkhd.lh_first != NULL))
394 {
395 only_cleaning = ((ip->i_flag & (IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE|IN_CLEANING))==IN_CLEANING);
396
397 if(ip->i_number != LFS_IFILE_INUM
398 && vp->v_dirtyblkhd.lh_first != NULL)
399 {
400 lfs_writefile(fs, sp, vp);
401 }
402 if(vp->v_dirtyblkhd.lh_first != NULL) {
403 if(WRITEINPROG(vp)) {
404 #ifdef DEBUG_LFS
405 ivndebug(vp,"writevnodes/write2");
406 #endif
407 } else if(!(ip->i_flag & (IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE|IN_CLEANING))) {
408 #ifdef DEBUG_LFS
409 printf("<%d>",ip->i_number);
410 #endif
411 ip->i_flag |= IN_MODIFIED;
412 ++fs->lfs_uinodes;
413 }
414 }
415 (void) lfs_writeinode(fs, sp, ip);
416 inodes_written++;
417 }
418
419 if(vp->v_flag & VDIROP) {
420 --lfs_dirvcount;
421 vp->v_flag &= ~VDIROP;
422 wakeup(&lfs_dirvcount);
423 lfs_vunref(vp);
424 }
425
426 if(lfs_clean_vnhead && only_cleaning)
427 lfs_vunref_head(vp);
428 else
429 lfs_vunref(vp);
430 }
431 return inodes_written;
432 }
433
434 int
435 lfs_segwrite(mp, flags)
436 struct mount *mp;
437 int flags; /* Do a checkpoint. */
438 {
439 struct buf *bp;
440 struct inode *ip;
441 struct lfs *fs;
442 struct segment *sp;
443 struct vnode *vp;
444 SEGUSE *segusep;
445 ufs_daddr_t ibno;
446 int do_ckp, error, i;
447 int writer_set = 0;
448 int need_unlock = 0;
449
450 fs = VFSTOUFS(mp)->um_lfs;
451
452 lfs_imtime(fs);
453
454 /*
455 * If we are not the cleaner, and we have fewer than MIN_FREE_SEGS
456 * clean segments, wait until cleaner writes.
457 */
458 if(!(flags & SEGM_CLEAN)
459 && (!fs->lfs_seglock || !(fs->lfs_sp->seg_flags & SEGM_CLEAN)))
460 {
461 do {
462 if (fs->lfs_nclean <= MIN_FREE_SEGS
463 || fs->lfs_avail <= 0)
464 {
465 wakeup(&lfs_allclean_wakeup);
466 wakeup(&fs->lfs_nextseg);
467 error = tsleep(&fs->lfs_avail, PRIBIO + 1,
468 "lfs_avail", 0);
469 if (error) {
470 return (error);
471 }
472 }
473 } while (fs->lfs_nclean <= MIN_FREE_SEGS || fs->lfs_avail <= 0);
474 }
475
476 /*
477 * Allocate a segment structure and enough space to hold pointers to
478 * the maximum possible number of buffers which can be described in a
479 * single summary block.
480 */
481 do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
482 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
483 sp = fs->lfs_sp;
484
485 /*
486 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
487 * in which case we have to flush *all* buffers off of this vnode.
488 */
489 if((sp->seg_flags & SEGM_CLEAN) && !(fs->lfs_flushvp))
490 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
491 else {
492 lfs_writevnodes(fs, mp, sp, VN_REG);
493 /*
494 * XXX KS - If we're cleaning, we can't wait for dirops,
495 * because they might be waiting on us. The downside of this
496 * is that, if we write anything besides cleaning blocks
497 * while cleaning, the checkpoint is not completely
498 * consistent.
499 */
500 if(!(sp->seg_flags & SEGM_CLEAN)) {
501 while(fs->lfs_dirops)
502 if((error = tsleep(&fs->lfs_writer, PRIBIO + 1,
503 "lfs writer", 0)))
504 {
505 free(sp->bpp, M_SEGMENT);
506 free(sp, M_SEGMENT);
507 return (error);
508 }
509 fs->lfs_writer++;
510 writer_set=1;
511 lfs_writevnodes(fs, mp, sp, VN_DIROP);
512 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
513 }
514 }
515
516 /*
517 * If we are doing a checkpoint, mark everything since the
518 * last checkpoint as no longer ACTIVE.
519 */
520 if (do_ckp) {
521 for (ibno = fs->lfs_cleansz + fs->lfs_segtabsz;
522 --ibno >= fs->lfs_cleansz; ) {
523 if (bread(fs->lfs_ivnode, ibno, fs->lfs_bsize, NOCRED, &bp))
524
525 panic("lfs_segwrite: ifile read");
526 segusep = (SEGUSE *)bp->b_data;
527 for (i = fs->lfs_sepb; i--; segusep++)
528 segusep->su_flags &= ~SEGUSE_ACTIVE;
529
530 /* But the current segment is still ACTIVE */
531 if (fs->lfs_curseg/fs->lfs_sepb==(ibno-fs->lfs_cleansz))
532 ((SEGUSE *)(bp->b_data))[fs->lfs_curseg%fs->lfs_sepb].su_flags |= SEGUSE_ACTIVE;
533 error = VOP_BWRITE(bp);
534 }
535 }
536
537 if (do_ckp || fs->lfs_doifile) {
538 redo:
539 vp = fs->lfs_ivnode;
540 /*
541 * Depending on the circumstances of our calling, the ifile
542 * inode might be locked. If it is, and if it is locked by
543 * us, we should VREF instead of vget here.
544 */
545 need_unlock = 0;
546 if(VOP_ISLOCKED(vp)
547 && vp->v_lock.lk_lockholder == curproc->p_pid) {
548 VREF(vp);
549 } else {
550 while (vget(vp, LK_EXCLUSIVE))
551 continue;
552 need_unlock = 1;
553 }
554 ip = VTOI(vp);
555 if (vp->v_dirtyblkhd.lh_first != NULL)
556 lfs_writefile(fs, sp, vp);
557 (void)lfs_writeinode(fs, sp, ip);
558
559 /* Only vput if we used vget() above. */
560 if(need_unlock)
561 vput(vp);
562 else
563 vrele(vp);
564
565 if (lfs_writeseg(fs, sp) && do_ckp)
566 goto redo;
567 } else {
568 (void) lfs_writeseg(fs, sp);
569 }
570
571 /*
572 * If the I/O count is non-zero, sleep until it reaches zero.
573 * At the moment, the user's process hangs around so we can
574 * sleep.
575 */
576 fs->lfs_doifile = 0;
577 if(writer_set && --fs->lfs_writer==0)
578 wakeup(&fs->lfs_dirops);
579
580 if(lfs_dostats) {
581 ++lfs_stats.nwrites;
582 if (sp->seg_flags & SEGM_SYNC)
583 ++lfs_stats.nsync_writes;
584 if (sp->seg_flags & SEGM_CKP)
585 ++lfs_stats.ncheckpoints;
586 }
587 lfs_segunlock(fs);
588 return (0);
589 }
590
591 /*
592 * Write the dirty blocks associated with a vnode.
593 */
594 void
595 lfs_writefile(fs, sp, vp)
596 struct lfs *fs;
597 struct segment *sp;
598 struct vnode *vp;
599 {
600 struct buf *bp;
601 struct finfo *fip;
602 IFILE *ifp;
603
604
605 if (sp->seg_bytes_left < fs->lfs_bsize ||
606 sp->sum_bytes_left < sizeof(struct finfo))
607 (void) lfs_writeseg(fs, sp);
608
609 sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(ufs_daddr_t);
610 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
611
612 if(vp->v_flag & VDIROP)
613 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
614
615 fip = sp->fip;
616 fip->fi_nblocks = 0;
617 fip->fi_ino = VTOI(vp)->i_number;
618 LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
619 fip->fi_version = ifp->if_version;
620 brelse(bp);
621
622 /*
623 * It may not be necessary to write the meta-data blocks at this point,
624 * as the roll-forward recovery code should be able to reconstruct the
625 * list.
626 *
627 * We have to write them anyway, though, under two conditions: (1) the
628 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
629 * checkpointing.
630 */
631 if((sp->seg_flags & SEGM_CLEAN)
632 && VTOI(vp)->i_number != LFS_IFILE_INUM
633 && !IS_FLUSHING(fs,vp))
634 {
635 lfs_gather(fs, sp, vp, lfs_match_fake);
636 } else
637 lfs_gather(fs, sp, vp, lfs_match_data);
638
639 if(lfs_writeindir
640 || IS_FLUSHING(fs,vp)
641 || (sp->seg_flags & SEGM_CKP))
642 {
643 lfs_gather(fs, sp, vp, lfs_match_indir);
644 lfs_gather(fs, sp, vp, lfs_match_dindir);
645 /* XXX KS - when is TRIPLE not true? */ /* #ifdef TRIPLE */
646 lfs_gather(fs, sp, vp, lfs_match_tindir);
647 /* #endif */
648 }
649 fip = sp->fip;
650 if (fip->fi_nblocks != 0) {
651 sp->fip = (FINFO*)((caddr_t)fip + sizeof(struct finfo) +
652 sizeof(ufs_daddr_t) * (fip->fi_nblocks-1));
653 sp->start_lbp = &sp->fip->fi_blocks[0];
654 } else {
655 sp->sum_bytes_left += sizeof(FINFO) - sizeof(ufs_daddr_t);
656 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
657 }
658 }
659
660 int
661 lfs_writeinode(fs, sp, ip)
662 struct lfs *fs;
663 struct segment *sp;
664 struct inode *ip;
665 {
666 struct buf *bp, *ibp;
667 IFILE *ifp;
668 SEGUSE *sup;
669 ufs_daddr_t daddr;
670 ino_t ino;
671 int error, i, ndx;
672 int redo_ifile = 0;
673 struct timespec ts;
674 int gotblk=0;
675
676 if (!(ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE | IN_CLEANING)))
677 return(0);
678
679 /* Allocate a new inode block if necessary. */
680 if ((ip->i_number != LFS_IFILE_INUM || sp->idp==NULL) && sp->ibp == NULL) {
681 /* Allocate a new segment if necessary. */
682 if (sp->seg_bytes_left < fs->lfs_bsize ||
683 sp->sum_bytes_left < sizeof(ufs_daddr_t))
684 (void) lfs_writeseg(fs, sp);
685
686 /* Get next inode block. */
687 daddr = fs->lfs_offset;
688 fs->lfs_offset += fsbtodb(fs, 1);
689 sp->ibp = *sp->cbpp++ =
690 getblk(VTOI(fs->lfs_ivnode)->i_devvp, daddr, fs->lfs_bsize, 0, 0);
691 gotblk++;
692
693 /* Zero out inode numbers */
694 for (i = 0; i < INOPB(fs); ++i)
695 ((struct dinode *)sp->ibp->b_data)[i].di_inumber = 0;
696
697 ++sp->start_bpp;
698 fs->lfs_avail -= fsbtodb(fs, 1);
699 /* Set remaining space counters. */
700 sp->seg_bytes_left -= fs->lfs_bsize;
701 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
702 ndx = LFS_SUMMARY_SIZE / sizeof(ufs_daddr_t) -
703 sp->ninodes / INOPB(fs) - 1;
704 ((ufs_daddr_t *)(sp->segsum))[ndx] = daddr;
705 }
706
707 /* Update the inode times and copy the inode onto the inode page. */
708 if (ip->i_flag & (IN_CLEANING|IN_MODIFIED))
709 --fs->lfs_uinodes;
710 TIMEVAL_TO_TIMESPEC(&time, &ts);
711 LFS_ITIMES(ip, &ts, &ts, &ts);
712
713 if(ip->i_flag & IN_CLEANING)
714 ip->i_flag &= ~IN_CLEANING;
715 else
716 ip->i_flag &= ~(IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE);
717
718 /*
719 * If this is the Ifile, and we've already written the Ifile in this
720 * partial segment, just overwrite it (it's not on disk yet) and
721 * continue.
722 *
723 * XXX we know that the bp that we get the second time around has
724 * already been gathered.
725 */
726 if(ip->i_number == LFS_IFILE_INUM && sp->idp) {
727 *(sp->idp) = ip->i_din.ffs_din;
728 return 0;
729 }
730
731 bp = sp->ibp;
732 ((struct dinode *)bp->b_data)[sp->ninodes % INOPB(fs)] =
733 ip->i_din.ffs_din;
734
735 if(ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
736 sp->idp = ((struct dinode *)bp->b_data)+(sp->ninodes % INOPB(fs));
737 if(gotblk) {
738 bp->b_flags |= B_LOCKED;
739 brelse(bp);
740 }
741
742 /* Increment inode count in segment summary block. */
743 ++((SEGSUM *)(sp->segsum))->ss_ninos;
744
745 /* If this page is full, set flag to allocate a new page. */
746 if (++sp->ninodes % INOPB(fs) == 0)
747 sp->ibp = NULL;
748
749 /*
750 * If updating the ifile, update the super-block. Update the disk
751 * address and access times for this inode in the ifile.
752 */
753 ino = ip->i_number;
754 if (ino == LFS_IFILE_INUM) {
755 daddr = fs->lfs_idaddr;
756 fs->lfs_idaddr = bp->b_blkno;
757 } else {
758 LFS_IENTRY(ifp, fs, ino, ibp);
759 daddr = ifp->if_daddr;
760 ifp->if_daddr = bp->b_blkno;
761 #ifdef LFS_DEBUG_NEXTFREE
762 if(ino > 3 && ifp->if_nextfree) {
763 vprint("lfs_writeinode",ITOV(ip));
764 printf("lfs_writeinode: updating free ino %d\n",
765 ip->i_number);
766 }
767 #endif
768 error = VOP_BWRITE(ibp);
769 }
770
771 /*
772 * No need to update segment usage if there was no former inode address
773 * or if the last inode address is in the current partial segment.
774 */
775 if (daddr>0 && !(daddr >= fs->lfs_lastpseg && daddr <= bp->b_blkno)) {
776 LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
777 #ifdef DIAGNOSTIC
778 if (sup->su_nbytes < DINODE_SIZE) {
779 /* XXX -- Change to a panic. */
780 printf("lfs_writeinode: negative bytes (segment %d short by %d)\n",
781 datosn(fs, daddr), (int)DINODE_SIZE - sup->su_nbytes);
782 panic("lfs_writeinode: negative bytes");
783 sup->su_nbytes = DINODE_SIZE;
784 }
785 #endif
786 sup->su_nbytes -= DINODE_SIZE;
787 redo_ifile =
788 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
789 error = VOP_BWRITE(bp);
790 }
791 return (redo_ifile);
792 }
793
794 int
795 lfs_gatherblock(sp, bp, sptr)
796 struct segment *sp;
797 struct buf *bp;
798 int *sptr;
799 {
800 struct lfs *fs;
801 int version;
802
803 /*
804 * If full, finish this segment. We may be doing I/O, so
805 * release and reacquire the splbio().
806 */
807 #ifdef DIAGNOSTIC
808 if (sp->vp == NULL)
809 panic ("lfs_gatherblock: Null vp in segment");
810 #endif
811 fs = sp->fs;
812 if (sp->sum_bytes_left < sizeof(ufs_daddr_t) ||
813 sp->seg_bytes_left < bp->b_bcount) {
814 if (sptr)
815 splx(*sptr);
816 lfs_updatemeta(sp);
817
818 version = sp->fip->fi_version;
819 (void) lfs_writeseg(fs, sp);
820
821 sp->fip->fi_version = version;
822 sp->fip->fi_ino = VTOI(sp->vp)->i_number;
823 /* Add the current file to the segment summary. */
824 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
825 sp->sum_bytes_left -=
826 sizeof(struct finfo) - sizeof(ufs_daddr_t);
827
828 if (sptr)
829 *sptr = splbio();
830 return(1);
831 }
832
833 #ifdef DEBUG
834 if(bp->b_flags & B_GATHERED) {
835 printf("lfs_gatherblock: already gathered! Ino %d, lbn %d\n",
836 sp->fip->fi_ino, bp->b_lblkno);
837 return(0);
838 }
839 #endif
840 /* Insert into the buffer list, update the FINFO block. */
841 bp->b_flags |= B_GATHERED;
842 *sp->cbpp++ = bp;
843 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno;
844
845 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
846 sp->seg_bytes_left -= bp->b_bcount;
847 return(0);
848 }
849
850 int
851 lfs_gather(fs, sp, vp, match)
852 struct lfs *fs;
853 struct segment *sp;
854 struct vnode *vp;
855 int (*match) __P((struct lfs *, struct buf *));
856 {
857 struct buf *bp;
858 int s, count=0;
859
860 sp->vp = vp;
861 s = splbio();
862
863 #ifndef LFS_NO_BACKBUF_HACK
864 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {
865 #else /* LFS_NO_BACKBUF_HACK */
866 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
867 # define BUF_OFFSET (((void *)&bp->b_vnbufs.le_next) - (void *)bp)
868 # define BACK_BUF(BP) ((struct buf *)(((void *)BP->b_vnbufs.le_prev) - BUF_OFFSET))
869 # define BEG_OF_LIST ((struct buf *)(((void *)&vp->v_dirtyblkhd.lh_first) - BUF_OFFSET))
870 /* Find last buffer. */
871 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp && bp->b_vnbufs.le_next != NULL;
872 bp = bp->b_vnbufs.le_next);
873 for (; bp && bp != BEG_OF_LIST; bp = BACK_BUF(bp)) {
874 #endif /* LFS_NO_BACKBUF_HACK */
875 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp))
876 continue;
877 if(vp->v_type == VBLK) {
878 /* For block devices, just write the blocks. */
879 /* XXX Do we really need to even do this? */
880 #ifdef DEBUG_LFS
881 if(count==0)
882 printf("BLK(");
883 printf(".");
884 #endif
885 /* Get the block before bwrite, so we don't corrupt the free list */
886 bp->b_flags |= B_BUSY;
887 bremfree(bp);
888 bwrite(bp);
889 } else {
890 #ifdef DIAGNOSTIC
891 if (!(bp->b_flags & B_DELWRI))
892 panic("lfs_gather: bp not B_DELWRI");
893 if (!(bp->b_flags & B_LOCKED)) {
894 printf("lfs_gather: lbn %d blk %d not B_LOCKED\n", bp->b_lblkno, bp->b_blkno);
895 VOP_PRINT(bp->b_vp);
896 panic("lfs_gather: bp not B_LOCKED");
897 }
898 #endif
899 if (lfs_gatherblock(sp, bp, &s)) {
900 goto loop;
901 }
902 }
903 count++;
904 }
905 splx(s);
906 #ifdef DEBUG_LFS
907 if(vp->v_type == VBLK && count)
908 printf(")\n");
909 #endif
910 lfs_updatemeta(sp);
911 sp->vp = NULL;
912 return count;
913 }
914
915 /*
916 * Update the metadata that points to the blocks listed in the FINFO
917 * array.
918 */
919 void
920 lfs_updatemeta(sp)
921 struct segment *sp;
922 {
923 SEGUSE *sup;
924 struct buf *bp;
925 struct lfs *fs;
926 struct vnode *vp;
927 struct indir a[NIADDR + 2], *ap;
928 struct inode *ip;
929 ufs_daddr_t daddr, lbn, off;
930 int error, i, nblocks, num;
931
932 vp = sp->vp;
933 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
934 if (nblocks < 0)
935 panic("This is a bad thing\n");
936 if (vp == NULL || nblocks == 0)
937 return;
938
939 /* Sort the blocks. */
940 /*
941 * XXX KS - We have to sort even if the blocks come from the
942 * cleaner, because there might be other pending blocks on the
943 * same inode...and if we don't sort, and there are fragments
944 * present, blocks may be written in the wrong place.
945 */
946 /* if (!(sp->seg_flags & SEGM_CLEAN)) */
947 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks);
948
949 /*
950 * Record the length of the last block in case it's a fragment.
951 * If there are indirect blocks present, they sort last. An
952 * indirect block will be lfs_bsize and its presence indicates
953 * that you cannot have fragments.
954 */
955 sp->fip->fi_lastlength = sp->start_bpp[nblocks - 1]->b_bcount;
956
957 /*
958 * Assign disk addresses, and update references to the logical
959 * block and the segment usage information.
960 */
961 fs = sp->fs;
962 for (i = nblocks; i--; ++sp->start_bpp) {
963 lbn = *sp->start_lbp++;
964
965 (*sp->start_bpp)->b_blkno = off = fs->lfs_offset;
966 if((*sp->start_bpp)->b_blkno == (*sp->start_bpp)->b_lblkno) {
967 printf("lfs_updatemeta: ino %d blk %d has same lbn and daddr\n", VTOI(vp)->i_number, off);
968 }
969 fs->lfs_offset +=
970 fragstodb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
971 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL);
972 if (error)
973 panic("lfs_updatemeta: ufs_bmaparray %d", error);
974 ip = VTOI(vp);
975 switch (num) {
976 case 0:
977 ip->i_ffs_db[lbn] = off;
978 break;
979 case 1:
980 ip->i_ffs_ib[a[0].in_off] = off;
981 break;
982 default:
983 ap = &a[num - 1];
984 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
985 panic("lfs_updatemeta: bread bno %d",
986 ap->in_lbn);
987 /*
988 * Bread may create a new (indirect) block which needs
989 * to get counted for the inode.
990 */
991 if (/* bp->b_blkno == -1 && */
992 !(bp->b_flags & (B_DELWRI|B_DONE))) {
993 ip->i_ffs_blocks += fsbtodb(fs, 1);
994 fs->lfs_bfree -= fragstodb(fs, fs->lfs_frag);
995 }
996 ((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
997 VOP_BWRITE(bp);
998 }
999 /* Update segment usage information. */
1000 if (daddr != UNASSIGNED && !(daddr >= fs->lfs_lastpseg && daddr <= off)) {
1001 LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
1002 #ifdef DIAGNOSTIC
1003 if (sup->su_nbytes < (*sp->start_bpp)->b_bcount) {
1004 /* XXX -- Change to a panic. */
1005 printf("lfs_updatemeta: negative bytes (segment %d short by %ld)\n",
1006 datosn(fs, daddr), (*sp->start_bpp)->b_bcount - sup->su_nbytes);
1007 printf("lfs_updatemeta: ino %d, lbn %d, addr = %x\n",
1008 VTOI(sp->vp)->i_number, (*sp->start_bpp)->b_lblkno, daddr);
1009 panic("lfs_updatemeta: negative bytes");
1010 sup->su_nbytes = (*sp->start_bpp)->b_bcount;
1011 }
1012 #endif
1013 sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
1014 error = VOP_BWRITE(bp);
1015 }
1016 }
1017 }
1018
1019 /*
1020 * Start a new segment.
1021 */
1022 int
1023 lfs_initseg(fs)
1024 struct lfs *fs;
1025 {
1026 struct segment *sp;
1027 SEGUSE *sup;
1028 SEGSUM *ssp;
1029 struct buf *bp;
1030 int repeat;
1031
1032 sp = fs->lfs_sp;
1033
1034 repeat = 0;
1035 /* Advance to the next segment. */
1036 if (!LFS_PARTIAL_FITS(fs)) {
1037 /* Wake up any cleaning procs waiting on this file system. */
1038 wakeup(&lfs_allclean_wakeup);
1039 wakeup(&fs->lfs_nextseg);
1040 lfs_newseg(fs);
1041 repeat = 1;
1042 fs->lfs_offset = fs->lfs_curseg;
1043 sp->seg_number = datosn(fs, fs->lfs_curseg);
1044 sp->seg_bytes_left = fs->lfs_dbpseg * DEV_BSIZE;
1045 /*
1046 * If the segment contains a superblock, update the offset
1047 * and summary address to skip over it.
1048 */
1049 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1050 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1051 fs->lfs_offset += LFS_SBPAD / DEV_BSIZE;
1052 sp->seg_bytes_left -= LFS_SBPAD;
1053 }
1054 brelse(bp);
1055 } else {
1056 sp->seg_number = datosn(fs, fs->lfs_curseg);
1057 sp->seg_bytes_left = (fs->lfs_dbpseg -
1058 (fs->lfs_offset - fs->lfs_curseg)) * DEV_BSIZE;
1059 }
1060 fs->lfs_lastpseg = fs->lfs_offset;
1061
1062 sp->fs = fs;
1063 sp->ibp = NULL;
1064 sp->idp = NULL;
1065 sp->ninodes = 0;
1066
1067 /* Get a new buffer for SEGSUM and enter it into the buffer list. */
1068 sp->cbpp = sp->bpp;
1069 *sp->cbpp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp,
1070 fs->lfs_offset, LFS_SUMMARY_SIZE);
1071 sp->segsum = (*sp->cbpp)->b_data;
1072 bzero(sp->segsum, LFS_SUMMARY_SIZE);
1073 sp->start_bpp = ++sp->cbpp;
1074 fs->lfs_offset += LFS_SUMMARY_SIZE / DEV_BSIZE;
1075
1076 /* Set point to SEGSUM, initialize it. */
1077 ssp = sp->segsum;
1078 ssp->ss_next = fs->lfs_nextseg;
1079 ssp->ss_nfinfo = ssp->ss_ninos = 0;
1080 ssp->ss_magic = SS_MAGIC;
1081
1082 /* Set pointer to first FINFO, initialize it. */
1083 sp->fip = (struct finfo *)((caddr_t)sp->segsum + sizeof(SEGSUM));
1084 sp->fip->fi_nblocks = 0;
1085 sp->start_lbp = &sp->fip->fi_blocks[0];
1086 sp->fip->fi_lastlength = 0;
1087
1088 sp->seg_bytes_left -= LFS_SUMMARY_SIZE;
1089 sp->sum_bytes_left = LFS_SUMMARY_SIZE - sizeof(SEGSUM);
1090
1091 return(repeat);
1092 }
1093
1094 /*
1095 * Return the next segment to write.
1096 */
1097 void
1098 lfs_newseg(fs)
1099 struct lfs *fs;
1100 {
1101 CLEANERINFO *cip;
1102 SEGUSE *sup;
1103 struct buf *bp;
1104 int curseg, isdirty, sn;
1105
1106 LFS_SEGENTRY(sup, fs, datosn(fs, fs->lfs_nextseg), bp);
1107 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1108 sup->su_nbytes = 0;
1109 sup->su_nsums = 0;
1110 sup->su_ninos = 0;
1111 (void) VOP_BWRITE(bp);
1112
1113 LFS_CLEANERINFO(cip, fs, bp);
1114 --cip->clean;
1115 ++cip->dirty;
1116 fs->lfs_nclean = cip->clean;
1117 (void) VOP_BWRITE(bp);
1118
1119 fs->lfs_lastseg = fs->lfs_curseg;
1120 fs->lfs_curseg = fs->lfs_nextseg;
1121 for (sn = curseg = datosn(fs, fs->lfs_curseg);;) {
1122 sn = (sn + 1) % fs->lfs_nseg;
1123 if (sn == curseg)
1124 panic("lfs_nextseg: no clean segments");
1125 LFS_SEGENTRY(sup, fs, sn, bp);
1126 isdirty = sup->su_flags & SEGUSE_DIRTY;
1127 brelse(bp);
1128 if (!isdirty)
1129 break;
1130 }
1131
1132 ++fs->lfs_nactive;
1133 fs->lfs_nextseg = sntoda(fs, sn);
1134 if(lfs_dostats) {
1135 ++lfs_stats.segsused;
1136 }
1137 }
1138
1139 int
1140 lfs_writeseg(fs, sp)
1141 struct lfs *fs;
1142 struct segment *sp;
1143 {
1144 extern int locked_queue_count;
1145 extern long locked_queue_bytes;
1146 struct buf **bpp, *bp, *cbp;
1147 SEGUSE *sup;
1148 SEGSUM *ssp;
1149 dev_t i_dev;
1150 u_long *datap, *dp;
1151 int do_again, i, nblocks, s;
1152 #ifdef LFS_TRACK_IOS
1153 int j;
1154 #endif
1155 int (*strategy)__P((void *));
1156 struct vop_strategy_args vop_strategy_a;
1157 u_short ninos;
1158 struct vnode *devvp;
1159 char *p;
1160 struct vnode *vn;
1161 struct inode *ip;
1162 #if defined(DEBUG) && defined(LFS_PROPELLER)
1163 static int propeller;
1164 char propstring[4] = "-\\|/";
1165
1166 printf("%c\b",propstring[propeller++]);
1167 if(propeller==4)
1168 propeller = 0;
1169 #endif
1170
1171 /*
1172 * If there are no buffers other than the segment summary to write
1173 * and it is not a checkpoint, don't do anything. On a checkpoint,
1174 * even if there aren't any buffers, you need to write the superblock.
1175 */
1176 if ((nblocks = sp->cbpp - sp->bpp) == 1)
1177 return (0);
1178
1179 #ifdef DEBUG_LFS
1180 lfs_check_bpp(fs,sp,__FILE__,__LINE__);
1181 #endif
1182 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1183 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1184
1185 /* Update the segment usage information. */
1186 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1187
1188 /* Loop through all blocks, except the segment summary. */
1189 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
1190 if((*bpp)->b_vp != devvp)
1191 sup->su_nbytes += (*bpp)->b_bcount;
1192 }
1193
1194 ssp = (SEGSUM *)sp->segsum;
1195
1196 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1197 sup->su_nbytes += ssp->ss_ninos * DINODE_SIZE;
1198 /* sup->su_nbytes += LFS_SUMMARY_SIZE; */
1199 sup->su_lastmod = time.tv_sec;
1200 sup->su_ninos += ninos;
1201 ++sup->su_nsums;
1202
1203 do_again = !(bp->b_flags & B_GATHERED);
1204 (void)VOP_BWRITE(bp);
1205 /*
1206 * Compute checksum across data and then across summary; the first
1207 * block (the summary block) is skipped. Set the create time here
1208 * so that it's guaranteed to be later than the inode mod times.
1209 *
1210 * XXX
1211 * Fix this to do it inline, instead of malloc/copy.
1212 */
1213 datap = dp = malloc(nblocks * sizeof(u_long), M_SEGMENT, M_WAITOK);
1214 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1215 if (((*++bpp)->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1216 if (copyin((*bpp)->b_saveaddr, dp++, sizeof(u_long)))
1217 panic("lfs_writeseg: copyin failed [1]: ino %d blk %d", VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno);
1218 } else {
1219 if( !((*bpp)->b_flags & B_CALL) ) {
1220 /*
1221 * Before we record data for a checksm,
1222 * make sure the data won't change in between
1223 * the checksum calculation and the write,
1224 * by marking the buffer B_BUSY. It will
1225 * be freed later by brelse().
1226 */
1227 again:
1228 s = splbio();
1229 if((*bpp)->b_flags & B_BUSY) {
1230 #ifdef DEBUG
1231 printf("lfs_writeseg: avoiding potential data summary corruption for ino %d, lbn %d\n",
1232 VTOI((*bpp)->b_vp)->i_number,
1233 bp->b_lblkno);
1234 #endif
1235 (*bpp)->b_flags |= B_WANTED;
1236 tsleep((*bpp), (PRIBIO + 1),
1237 "lfs_writeseg", 0);
1238 splx(s);
1239 goto again;
1240 }
1241 (*bpp)->b_flags |= B_BUSY;
1242 splx(s);
1243 }
1244 *dp++ = ((u_long *)(*bpp)->b_data)[0];
1245 }
1246 }
1247 ssp->ss_create = time.tv_sec;
1248 ssp->ss_datasum = cksum(datap, (nblocks - 1) * sizeof(u_long));
1249 ssp->ss_sumsum =
1250 cksum(&ssp->ss_datasum, LFS_SUMMARY_SIZE - sizeof(ssp->ss_sumsum));
1251 free(datap, M_SEGMENT);
1252 #ifdef DIAGNOSTIC
1253 if (fs->lfs_bfree < fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE)
1254 panic("lfs_writeseg: No diskspace for summary");
1255 #endif
1256 fs->lfs_bfree -= (fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE);
1257
1258 strategy = devvp->v_op[VOFFSET(vop_strategy)];
1259
1260 /*
1261 * When we simply write the blocks we lose a rotation for every block
1262 * written. To avoid this problem, we allocate memory in chunks, copy
1263 * the buffers into the chunk and write the chunk. CHUNKSIZE is the
1264 * largest size I/O devices can handle.
1265 * When the data is copied to the chunk, turn off the the B_LOCKED bit
1266 * and brelse the buffer (which will move them to the LRU list). Add
1267 * the B_CALL flag to the buffer header so we can count I/O's for the
1268 * checkpoints and so we can release the allocated memory.
1269 *
1270 * XXX
1271 * This should be removed if the new virtual memory system allows us to
1272 * easily make the buffers contiguous in kernel memory and if that's
1273 * fast enough.
1274 */
1275
1276 #define CHUNKSIZE MAXPHYS
1277
1278 if(devvp==NULL)
1279 panic("devvp is NULL");
1280 for (bpp = sp->bpp,i = nblocks; i;) {
1281 cbp = lfs_newbuf(devvp, (*bpp)->b_blkno, CHUNKSIZE);
1282 cbp->b_dev = i_dev;
1283 cbp->b_flags |= B_ASYNC | B_BUSY;
1284 cbp->b_bcount = 0;
1285
1286 #ifdef DIAGNOSTIC
1287 if(datosn(fs,(*bpp)->b_blkno + ((*bpp)->b_bcount - 1)/DEV_BSIZE) != datosn(fs,cbp->b_blkno)) {
1288 panic("lfs_writeseg: Segment overwrite");
1289 }
1290 #endif
1291
1292 if(fs->lfs_iocount >= LFS_THROTTLE) {
1293 tsleep(&fs->lfs_iocount, PRIBIO+1, "lfs throttle", 0);
1294 }
1295 s = splbio();
1296 ++fs->lfs_iocount;
1297 #ifdef LFS_TRACK_IOS
1298 for(j=0;j<LFS_THROTTLE;j++) {
1299 if(fs->lfs_pending[j]==LFS_UNUSED_DADDR) {
1300 fs->lfs_pending[j] = cbp->b_blkno;
1301 break;
1302 }
1303 }
1304 #endif /* LFS_TRACK_IOS */
1305 for (p = cbp->b_data; i && cbp->b_bcount < CHUNKSIZE; i--) {
1306 bp = *bpp;
1307
1308 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1309 break;
1310
1311 /*
1312 * Fake buffers from the cleaner are marked as B_INVAL.
1313 * We need to copy the data from user space rather than
1314 * from the buffer indicated.
1315 * XXX == what do I do on an error?
1316 */
1317 if ((bp->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1318 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1319 panic("lfs_writeseg: copyin failed [2]");
1320 } else
1321 bcopy(bp->b_data, p, bp->b_bcount);
1322 p += bp->b_bcount;
1323 cbp->b_bcount += bp->b_bcount;
1324 if (bp->b_flags & B_LOCKED) {
1325 --locked_queue_count;
1326 locked_queue_bytes -= bp->b_bufsize;
1327 }
1328 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
1329 B_LOCKED | B_GATHERED);
1330 vn = bp->b_vp;
1331 if (bp->b_flags & B_CALL) {
1332 /* if B_CALL, it was created with newbuf */
1333 lfs_freebuf(bp);
1334 } else {
1335 bremfree(bp);
1336 bp->b_flags |= B_DONE;
1337 if(vn)
1338 reassignbuf(bp, vn);
1339 brelse(bp);
1340 }
1341 if(bp->b_flags & B_NEEDCOMMIT) { /* XXX */
1342 bp->b_flags &= ~B_NEEDCOMMIT;
1343 wakeup(bp);
1344 }
1345
1346 bpp++;
1347
1348 /*
1349 * If this is the last block for this vnode, but
1350 * there are other blocks on its dirty list,
1351 * set IN_MODIFIED/IN_CLEANING depending on what
1352 * sort of block. Only do this for our mount point,
1353 * not for, e.g., inode blocks that are attached to
1354 * the devvp.
1355 */
1356 if(i>1 && vn && *bpp && (*bpp)->b_vp != vn
1357 && (*bpp)->b_vp && (bp=vn->v_dirtyblkhd.lh_first)!=NULL &&
1358 vn->v_mount == fs->lfs_ivnode->v_mount)
1359 {
1360 ip = VTOI(vn);
1361 #ifdef DEBUG_LFS
1362 printf("lfs_writeseg: marking ino %d\n",ip->i_number);
1363 #endif
1364 if(!(ip->i_flag & (IN_CLEANING|IN_MODIFIED))) {
1365 fs->lfs_uinodes++;
1366 if(bp->b_flags & B_CALL)
1367 ip->i_flag |= IN_CLEANING;
1368 else
1369 ip->i_flag |= IN_MODIFIED;
1370 }
1371 }
1372 /* if(vn->v_dirtyblkhd.lh_first == NULL) */
1373 wakeup(vn);
1374 }
1375 ++cbp->b_vp->v_numoutput;
1376 splx(s);
1377 /*
1378 * XXXX This is a gross and disgusting hack. Since these
1379 * buffers are physically addressed, they hang off the
1380 * device vnode (devvp). As a result, they have no way
1381 * of getting to the LFS superblock or lfs structure to
1382 * keep track of the number of I/O's pending. So, I am
1383 * going to stuff the fs into the saveaddr field of
1384 * the buffer (yuk).
1385 */
1386 cbp->b_saveaddr = (caddr_t)fs;
1387 vop_strategy_a.a_desc = VDESC(vop_strategy);
1388 vop_strategy_a.a_bp = cbp;
1389 (strategy)(&vop_strategy_a);
1390 }
1391 /*
1392 * XXX
1393 * Vinvalbuf can move locked buffers off the locked queue
1394 * and we have no way of knowing about this. So, after
1395 * doing a big write, we recalculate how many buffers are
1396 * really still left on the locked queue.
1397 */
1398 lfs_countlocked(&locked_queue_count,&locked_queue_bytes);
1399 wakeup(&locked_queue_count);
1400 if(lfs_dostats) {
1401 ++lfs_stats.psegwrites;
1402 lfs_stats.blocktot += nblocks - 1;
1403 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
1404 ++lfs_stats.psyncwrites;
1405 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
1406 ++lfs_stats.pcleanwrites;
1407 lfs_stats.cleanblocks += nblocks - 1;
1408 }
1409 }
1410 return (lfs_initseg(fs) || do_again);
1411 }
1412
1413 void
1414 lfs_writesuper(fs, daddr)
1415 struct lfs *fs;
1416 daddr_t daddr;
1417 {
1418 struct buf *bp;
1419 dev_t i_dev;
1420 int (*strategy) __P((void *));
1421 int s;
1422 struct vop_strategy_args vop_strategy_a;
1423
1424 #ifdef LFS_CANNOT_ROLLFW
1425 /*
1426 * If we can write one superblock while another is in
1427 * progress, we risk not having a complete checkpoint if we crash.
1428 * So, block here if a superblock write is in progress.
1429 *
1430 * XXX - should be a proper lock, not this hack
1431 */
1432 while(fs->lfs_sbactive) {
1433 tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
1434 }
1435 fs->lfs_sbactive = daddr;
1436 #endif
1437 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1438 strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)];
1439
1440 /* Set timestamp of this version of the superblock */
1441 fs->lfs_tstamp = time.tv_sec;
1442
1443 /* Checksum the superblock and copy it into a buffer. */
1444 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
1445 bp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, daddr, LFS_SBPAD);
1446 *(struct dlfs *)bp->b_data = fs->lfs_dlfs;
1447
1448 bp->b_dev = i_dev;
1449 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
1450 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
1451 bp->b_iodone = lfs_supercallback;
1452 /* XXX KS - same nasty hack as above */
1453 bp->b_saveaddr = (caddr_t)fs;
1454
1455 vop_strategy_a.a_desc = VDESC(vop_strategy);
1456 vop_strategy_a.a_bp = bp;
1457 s = splbio();
1458 ++bp->b_vp->v_numoutput;
1459 splx(s);
1460 (strategy)(&vop_strategy_a);
1461 }
1462
1463 /*
1464 * Logical block number match routines used when traversing the dirty block
1465 * chain.
1466 */
1467 int
1468 lfs_match_fake(fs, bp)
1469 struct lfs *fs;
1470 struct buf *bp;
1471 {
1472 return (bp->b_flags & B_CALL);
1473 }
1474
1475 int
1476 lfs_match_data(fs, bp)
1477 struct lfs *fs;
1478 struct buf *bp;
1479 {
1480 return (bp->b_lblkno >= 0);
1481 }
1482
1483 int
1484 lfs_match_indir(fs, bp)
1485 struct lfs *fs;
1486 struct buf *bp;
1487 {
1488 int lbn;
1489
1490 lbn = bp->b_lblkno;
1491 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
1492 }
1493
1494 int
1495 lfs_match_dindir(fs, bp)
1496 struct lfs *fs;
1497 struct buf *bp;
1498 {
1499 int lbn;
1500
1501 lbn = bp->b_lblkno;
1502 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
1503 }
1504
1505 int
1506 lfs_match_tindir(fs, bp)
1507 struct lfs *fs;
1508 struct buf *bp;
1509 {
1510 int lbn;
1511
1512 lbn = bp->b_lblkno;
1513 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
1514 }
1515
1516 /*
1517 * XXX - The only buffers that are going to hit these functions are the
1518 * segment write blocks, or the segment summaries, or the superblocks.
1519 *
1520 * All of the above are created by lfs_newbuf, and so do not need to be
1521 * released via brelse.
1522 */
1523 void
1524 lfs_callback(bp)
1525 struct buf *bp;
1526 {
1527 struct lfs *fs;
1528 #ifdef LFS_TRACK_IOS
1529 int j;
1530 #endif
1531
1532 fs = (struct lfs *)bp->b_saveaddr;
1533 #ifdef DIAGNOSTIC
1534 if (fs->lfs_iocount == 0)
1535 panic("lfs_callback: zero iocount\n");
1536 #endif
1537 if (--fs->lfs_iocount < LFS_THROTTLE)
1538 wakeup(&fs->lfs_iocount);
1539 #ifdef LFS_TRACK_IOS
1540 for(j=0;j<LFS_THROTTLE;j++) {
1541 if(fs->lfs_pending[j]==bp->b_blkno) {
1542 fs->lfs_pending[j] = LFS_UNUSED_DADDR;
1543 wakeup(&(fs->lfs_pending[j]));
1544 break;
1545 }
1546 }
1547 #endif /* LFS_TRACK_IOS */
1548
1549 lfs_freebuf(bp);
1550 }
1551
1552 void
1553 lfs_supercallback(bp)
1554 struct buf *bp;
1555 {
1556 #ifdef LFS_CANNOT_ROLLFW
1557 struct lfs *fs;
1558
1559 fs = (struct lfs *)bp->b_saveaddr;
1560 fs->lfs_sbactive=NULL;
1561 wakeup(&fs->lfs_sbactive);
1562 #endif
1563 lfs_freebuf(bp);
1564 }
1565
1566 /*
1567 * Shellsort (diminishing increment sort) from Data Structures and
1568 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
1569 * see also Knuth Vol. 3, page 84. The increments are selected from
1570 * formula (8), page 95. Roughly O(N^3/2).
1571 */
1572 /*
1573 * This is our own private copy of shellsort because we want to sort
1574 * two parallel arrays (the array of buffer pointers and the array of
1575 * logical block numbers) simultaneously. Note that we cast the array
1576 * of logical block numbers to a unsigned in this routine so that the
1577 * negative block numbers (meta data blocks) sort AFTER the data blocks.
1578 */
1579
1580 void
1581 lfs_shellsort(bp_array, lb_array, nmemb)
1582 struct buf **bp_array;
1583 ufs_daddr_t *lb_array;
1584 register int nmemb;
1585 {
1586 static int __rsshell_increments[] = { 4, 1, 0 };
1587 register int incr, *incrp, t1, t2;
1588 struct buf *bp_temp;
1589 u_long lb_temp;
1590
1591 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
1592 for (t1 = incr; t1 < nmemb; ++t1)
1593 for (t2 = t1 - incr; t2 >= 0;)
1594 if (lb_array[t2] > lb_array[t2 + incr]) {
1595 lb_temp = lb_array[t2];
1596 lb_array[t2] = lb_array[t2 + incr];
1597 lb_array[t2 + incr] = lb_temp;
1598 bp_temp = bp_array[t2];
1599 bp_array[t2] = bp_array[t2 + incr];
1600 bp_array[t2 + incr] = bp_temp;
1601 t2 -= incr;
1602 } else
1603 break;
1604 }
1605
1606 /*
1607 * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
1608 */
1609 int
1610 lfs_vref(vp)
1611 register struct vnode *vp;
1612 {
1613 /*
1614 * If we return 1 here during a flush, we risk vinvalbuf() not
1615 * being able to flush all of the pages from this vnode, which
1616 * will cause it to panic. So, return 0 if a flush is in progress.
1617 */
1618 if (vp->v_flag & VXLOCK) {
1619 if(IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
1620 return 0;
1621 }
1622 return(1);
1623 }
1624 return (vget(vp, 0));
1625 }
1626
1627 /*
1628 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
1629 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
1630 */
1631 void
1632 lfs_vunref(vp)
1633 register struct vnode *vp;
1634 {
1635 /*
1636 * Analogous to lfs_vref, if the node is flushing, fake it.
1637 */
1638 if((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
1639 return;
1640 }
1641
1642 simple_lock(&vp->v_interlock);
1643 #ifdef DIAGNOSTIC
1644 if(vp->v_usecount<=0) {
1645 printf("lfs_vunref: flags are 0x%lx\n", vp->v_flag);
1646 printf("lfs_vunref: usecount = %ld\n", vp->v_usecount);
1647 panic("lfs_vunref: v_usecount<0");
1648 }
1649 #endif
1650 vp->v_usecount--;
1651 if (vp->v_usecount > 0) {
1652 simple_unlock(&vp->v_interlock);
1653 return;
1654 }
1655 #ifdef DIAGNOSTIC
1656 if(VOP_ISLOCKED(vp))
1657 panic("lfs_vunref: vnode locked");
1658 #endif
1659 /*
1660 * insert at tail of LRU list
1661 */
1662 simple_lock(&vnode_free_list_slock);
1663 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1664 simple_unlock(&vnode_free_list_slock);
1665 simple_unlock(&vp->v_interlock);
1666 }
1667
1668 /*
1669 * We use this when we have vnodes that were loaded in solely for cleaning.
1670 * There is no reason to believe that these vnodes will be referenced again
1671 * soon, since the cleaning process is unrelated to normal filesystem
1672 * activity. Putting cleaned vnodes at the tail of the list has the effect
1673 * of flushing the vnode LRU. So, put vnodes that were loaded only for
1674 * cleaning at the head of the list, instead.
1675 */
1676 void
1677 lfs_vunref_head(vp)
1678 register struct vnode *vp;
1679 {
1680 simple_lock(&vp->v_interlock);
1681 #ifdef DIAGNOSTIC
1682 if(vp->v_usecount==0) {
1683 panic("lfs_vunref: v_usecount<0");
1684 }
1685 #endif
1686 vp->v_usecount--;
1687 if (vp->v_usecount > 0) {
1688 simple_unlock(&vp->v_interlock);
1689 return;
1690 }
1691 #ifdef DIAGNOSTIC
1692 if(VOP_ISLOCKED(vp))
1693 panic("lfs_vunref_head: vnode locked");
1694 #endif
1695 /*
1696 * insert at head of LRU list
1697 */
1698 simple_lock(&vnode_free_list_slock);
1699 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1700 simple_unlock(&vnode_free_list_slock);
1701 simple_unlock(&vp->v_interlock);
1702 }
1703
1704