lfs_segment.c revision 1.31 1 /* $NetBSD: lfs_segment.c,v 1.31 1999/10/01 22:07:42 mycroft Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
71 */
72
73 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
74
75 #include "opt_ddb.h"
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/namei.h>
79 #include <sys/kernel.h>
80 #include <sys/resourcevar.h>
81 #include <sys/file.h>
82 #include <sys/stat.h>
83 #include <sys/buf.h>
84 #include <sys/proc.h>
85 #include <sys/conf.h>
86 #include <sys/vnode.h>
87 #include <sys/malloc.h>
88 #include <sys/mount.h>
89
90 #include <miscfs/specfs/specdev.h>
91 #include <miscfs/fifofs/fifo.h>
92
93 #include <ufs/ufs/quota.h>
94 #include <ufs/ufs/inode.h>
95 #include <ufs/ufs/dir.h>
96 #include <ufs/ufs/ufsmount.h>
97 #include <ufs/ufs/ufs_extern.h>
98
99 #include <ufs/lfs/lfs.h>
100 #include <ufs/lfs/lfs_extern.h>
101
102 extern int count_lock_queue __P((void));
103 extern struct simplelock vnode_free_list_slock; /* XXX */
104 extern TAILQ_HEAD(freelst, vnode) vnode_free_list; /* XXX */
105
106 /*
107 * Determine if it's OK to start a partial in this segment, or if we need
108 * to go on to a new segment.
109 */
110 #define LFS_PARTIAL_FITS(fs) \
111 ((fs)->lfs_dbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
112 1 << (fs)->lfs_fsbtodb)
113
114 void lfs_callback __P((struct buf *));
115 int lfs_gather __P((struct lfs *, struct segment *,
116 struct vnode *, int (*) __P((struct lfs *, struct buf *))));
117 int lfs_gatherblock __P((struct segment *, struct buf *, int *));
118 void lfs_iset __P((struct inode *, ufs_daddr_t, time_t));
119 int lfs_match_fake __P((struct lfs *, struct buf *));
120 int lfs_match_data __P((struct lfs *, struct buf *));
121 int lfs_match_dindir __P((struct lfs *, struct buf *));
122 int lfs_match_indir __P((struct lfs *, struct buf *));
123 int lfs_match_tindir __P((struct lfs *, struct buf *));
124 void lfs_newseg __P((struct lfs *));
125 void lfs_shellsort __P((struct buf **, ufs_daddr_t *, register int));
126 void lfs_supercallback __P((struct buf *));
127 void lfs_updatemeta __P((struct segment *));
128 int lfs_vref __P((struct vnode *));
129 void lfs_vunref __P((struct vnode *));
130 void lfs_writefile __P((struct lfs *, struct segment *, struct vnode *));
131 int lfs_writeinode __P((struct lfs *, struct segment *, struct inode *));
132 int lfs_writeseg __P((struct lfs *, struct segment *));
133 void lfs_writesuper __P((struct lfs *, daddr_t));
134 int lfs_writevnodes __P((struct lfs *fs, struct mount *mp,
135 struct segment *sp, int dirops));
136
137 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
138 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
139 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
140
141 /* Statistics Counters */
142 int lfs_dostats = 1;
143 struct lfs_stats lfs_stats;
144
145 /* op values to lfs_writevnodes */
146 #define VN_REG 0
147 #define VN_DIROP 1
148 #define VN_EMPTY 2
149 #define VN_CLEAN 3
150
151 #define LFS_MAX_ACTIVE 10
152
153 /*
154 * XXX KS - Set modification time on the Ifile, so the cleaner can
155 * read the fs mod time off of it. We don't set IN_UPDATE here,
156 * since we don't really need this to be flushed to disk (and in any
157 * case that wouldn't happen to the Ifile until we checkpoint).
158 */
159 void
160 lfs_imtime(fs)
161 struct lfs *fs;
162 {
163 struct timespec ts;
164 struct inode *ip;
165
166 TIMEVAL_TO_TIMESPEC(&time, &ts);
167 ip = VTOI(fs->lfs_ivnode);
168 ip->i_ffs_mtime = ts.tv_sec;
169 ip->i_ffs_mtimensec = ts.tv_nsec;
170 }
171
172 /*
173 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
174 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
175 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
176 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
177 */
178
179 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
180 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
181 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
182
183 int
184 lfs_vflush(vp)
185 struct vnode *vp;
186 {
187 struct inode *ip;
188 struct lfs *fs;
189 struct segment *sp;
190 struct buf *bp, *nbp;
191 int error, s;
192
193 ip = VTOI(vp);
194 fs = VFSTOUFS(vp->v_mount)->um_lfs;
195
196 if(ip->i_flag & IN_CLEANING) {
197 #ifdef DEBUG_LFS
198 ivndebug(vp,"vflush/in_cleaning");
199 #endif
200 ip->i_flag &= ~IN_CLEANING;
201 if(ip->i_flag & IN_MODIFIED) {
202 fs->lfs_uinodes--;
203 } else
204 ip->i_flag |= IN_MODIFIED;
205 }
206
207 /* If the node is being written, wait until that is done */
208 if(WRITEINPROG(vp)) {
209 #ifdef DEBUG_LFS
210 ivndebug(vp,"vflush/writeinprog");
211 #endif
212 tsleep(vp, PRIBIO+1, "lfs_vw", 0);
213 }
214
215 /* Protect against VXLOCK deadlock in vinvalbuf() */
216 lfs_seglock(fs, SEGM_SYNC);
217
218 /* If we're supposed to flush a freed inode, just toss it */
219 /* XXX - seglock, so these buffers can't be gathered, right? */
220 if(ip->i_ffs_mode == 0) {
221 printf("lfs_vflush: ino %d is freed, not flushing\n",
222 ip->i_number);
223 s = splbio();
224 for(bp=vp->v_dirtyblkhd.lh_first; bp; bp=nbp) {
225 nbp = bp->b_vnbufs.le_next;
226 /* Copied from lfs_writeseg */
227 if (bp->b_flags & B_CALL) {
228 /* if B_CALL, it was created with newbuf */
229 lfs_freebuf(bp);
230 } else {
231 bremfree(bp);
232 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
233 B_LOCKED | B_GATHERED);
234 bp->b_flags |= B_DONE;
235 reassignbuf(bp, vp);
236 brelse(bp);
237 }
238 }
239 splx(s);
240 if(ip->i_flag & IN_CLEANING)
241 fs->lfs_uinodes--;
242 if(ip->i_flag & IN_MODIFIED)
243 fs->lfs_uinodes--;
244 ip->i_flag &= ~(IN_MODIFIED|IN_UPDATE|IN_ACCESS|IN_CHANGE|IN_CLEANING);
245 printf("lfs_vflush: done not flushing ino %d\n",
246 ip->i_number);
247 lfs_segunlock(fs);
248 return 0;
249 }
250
251 SET_FLUSHING(fs,vp);
252 if (fs->lfs_nactive > LFS_MAX_ACTIVE) {
253 error = lfs_segwrite(vp->v_mount, SEGM_SYNC|SEGM_CKP);
254 CLR_FLUSHING(fs,vp);
255 lfs_segunlock(fs);
256 return error;
257 }
258 sp = fs->lfs_sp;
259
260 if (vp->v_dirtyblkhd.lh_first == NULL) {
261 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
262 } else if((ip->i_flag & IN_CLEANING) && (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
263 #ifdef DEBUG_LFS
264 ivndebug(vp,"vflush/clean");
265 #endif
266 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
267 }
268 else if(lfs_dostats) {
269 if(vp->v_dirtyblkhd.lh_first || (VTOI(vp)->i_flag & (IN_MODIFIED|IN_UPDATE|IN_ACCESS|IN_CHANGE|IN_CLEANING)))
270 ++lfs_stats.vflush_invoked;
271 #ifdef DEBUG_LFS
272 ivndebug(vp,"vflush");
273 #endif
274 }
275
276 #ifdef DIAGNOSTIC
277 /* XXX KS This actually can happen right now, though it shouldn't(?) */
278 if(vp->v_flag & VDIROP) {
279 printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
280 /* panic("VDIROP being flushed...this can\'t happen"); */
281 }
282 if(vp->v_usecount<0) {
283 printf("usecount=%ld\n",vp->v_usecount);
284 panic("lfs_vflush: usecount<0");
285 }
286 #endif
287
288 do {
289 do {
290 if (vp->v_dirtyblkhd.lh_first != NULL)
291 lfs_writefile(fs, sp, vp);
292 } while (lfs_writeinode(fs, sp, ip));
293 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
294
295 if(lfs_dostats) {
296 ++lfs_stats.nwrites;
297 if (sp->seg_flags & SEGM_SYNC)
298 ++lfs_stats.nsync_writes;
299 if (sp->seg_flags & SEGM_CKP)
300 ++lfs_stats.ncheckpoints;
301 }
302 lfs_segunlock(fs);
303
304 CLR_FLUSHING(fs,vp);
305 return (0);
306 }
307
308 #ifdef DEBUG_LFS_VERBOSE
309 # define vndebug(vp,str) if(VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
310 #else
311 # define vndebug(vp,str)
312 #endif
313
314 int
315 lfs_writevnodes(fs, mp, sp, op)
316 struct lfs *fs;
317 struct mount *mp;
318 struct segment *sp;
319 int op;
320 {
321 struct inode *ip;
322 struct vnode *vp;
323 int inodes_written=0, only_cleaning;
324
325 #ifndef LFS_NO_BACKVP_HACK
326 /* BEGIN HACK */
327 #define VN_OFFSET (((caddr_t)&vp->v_mntvnodes.le_next) - (caddr_t)vp)
328 #define BACK_VP(VP) ((struct vnode *)(((caddr_t)VP->v_mntvnodes.le_prev) - VN_OFFSET))
329 #define BEG_OF_VLIST ((struct vnode *)(((caddr_t)&mp->mnt_vnodelist.lh_first) - VN_OFFSET))
330
331 /* Find last vnode. */
332 loop: for (vp = mp->mnt_vnodelist.lh_first;
333 vp && vp->v_mntvnodes.le_next != NULL;
334 vp = vp->v_mntvnodes.le_next);
335 for (; vp && vp != BEG_OF_VLIST; vp = BACK_VP(vp)) {
336 #else
337 loop:
338 for (vp = mp->mnt_vnodelist.lh_first;
339 vp != NULL;
340 vp = vp->v_mntvnodes.le_next) {
341 #endif
342 /*
343 * If the vnode that we are about to sync is no longer
344 * associated with this mount point, start over.
345 */
346 if (vp->v_mount != mp)
347 goto loop;
348
349 ip = VTOI(vp);
350 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
351 (op != VN_DIROP && op != VN_CLEAN && (vp->v_flag & VDIROP))) {
352 vndebug(vp,"dirop");
353 continue;
354 }
355
356 if (op == VN_EMPTY && vp->v_dirtyblkhd.lh_first) {
357 vndebug(vp,"empty");
358 continue;
359 }
360
361 if (vp->v_type == VNON) {
362 continue;
363 }
364
365 if(op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
366 && !(ip->i_flag & IN_CLEANING)) {
367 vndebug(vp,"cleaning");
368 continue;
369 }
370
371 if (lfs_vref(vp)) {
372 vndebug(vp,"vref");
373 continue;
374 }
375
376 #if 0 /* XXX KS - if we skip the ifile, things could go badly for us. */
377 if(WRITEINPROG(vp)) {
378 lfs_vunref(vp);
379 #ifdef DEBUG_LFS
380 ivndebug(vp,"writevnodes/writeinprog");
381 #endif
382 continue;
383 }
384 #endif
385 only_cleaning = 0;
386 /*
387 * Write the inode/file if dirty and it's not the
388 * the IFILE.
389 */
390 if ((ip->i_flag &
391 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE | IN_CLEANING) ||
392 vp->v_dirtyblkhd.lh_first != NULL))
393 {
394 only_cleaning = ((ip->i_flag & (IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE|IN_CLEANING))==IN_CLEANING);
395
396 if(ip->i_number != LFS_IFILE_INUM
397 && vp->v_dirtyblkhd.lh_first != NULL)
398 {
399 lfs_writefile(fs, sp, vp);
400 }
401 if(vp->v_dirtyblkhd.lh_first != NULL) {
402 if(WRITEINPROG(vp)) {
403 #ifdef DEBUG_LFS
404 ivndebug(vp,"writevnodes/write2");
405 #endif
406 } else if(!(ip->i_flag & (IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE|IN_CLEANING))) {
407 #ifdef DEBUG_LFS
408 printf("<%d>",ip->i_number);
409 #endif
410 ip->i_flag |= IN_MODIFIED;
411 ++fs->lfs_uinodes;
412 }
413 }
414 (void) lfs_writeinode(fs, sp, ip);
415 inodes_written++;
416 }
417
418 if(vp->v_flag & VDIROP) {
419 --fs->lfs_dirvcount;
420 vp->v_flag &= ~VDIROP;
421 wakeup(&fs->lfs_dirvcount);
422 lfs_vunref(vp);
423 }
424
425 if(lfs_clean_vnhead && only_cleaning)
426 lfs_vunref_head(vp);
427 else
428 lfs_vunref(vp);
429 }
430 return inodes_written;
431 }
432
433 int
434 lfs_segwrite(mp, flags)
435 struct mount *mp;
436 int flags; /* Do a checkpoint. */
437 {
438 struct buf *bp;
439 struct inode *ip;
440 struct lfs *fs;
441 struct segment *sp;
442 struct vnode *vp;
443 SEGUSE *segusep;
444 ufs_daddr_t ibno;
445 int do_ckp, error, i;
446 int writer_set = 0;
447 int need_unlock = 0;
448
449 fs = VFSTOUFS(mp)->um_lfs;
450
451 lfs_imtime(fs);
452
453 /*
454 * If we are not the cleaner, and we have fewer than MIN_FREE_SEGS
455 * clean segments, wait until cleaner writes.
456 */
457 if(!(flags & SEGM_CLEAN)
458 && (!fs->lfs_seglock || !(fs->lfs_sp->seg_flags & SEGM_CLEAN)))
459 {
460 do {
461 if (fs->lfs_nclean <= MIN_FREE_SEGS
462 || fs->lfs_avail <= 0)
463 {
464 wakeup(&lfs_allclean_wakeup);
465 wakeup(&fs->lfs_nextseg);
466 error = tsleep(&fs->lfs_avail, PRIBIO + 1,
467 "lfs_avail", 0);
468 if (error) {
469 return (error);
470 }
471 }
472 } while (fs->lfs_nclean <= MIN_FREE_SEGS || fs->lfs_avail <= 0);
473 }
474
475 /*
476 * Allocate a segment structure and enough space to hold pointers to
477 * the maximum possible number of buffers which can be described in a
478 * single summary block.
479 */
480 do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
481 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
482 sp = fs->lfs_sp;
483
484 /*
485 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
486 * in which case we have to flush *all* buffers off of this vnode.
487 */
488 if((sp->seg_flags & SEGM_CLEAN) && !(fs->lfs_flushvp))
489 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
490 else {
491 lfs_writevnodes(fs, mp, sp, VN_REG);
492 /*
493 * XXX KS - If we're cleaning, we can't wait for dirops,
494 * because they might be waiting on us. The downside of this
495 * is that, if we write anything besides cleaning blocks
496 * while cleaning, the checkpoint is not completely
497 * consistent.
498 */
499 if(!(sp->seg_flags & SEGM_CLEAN)) {
500 while(fs->lfs_dirops)
501 if((error = tsleep(&fs->lfs_writer, PRIBIO + 1,
502 "lfs writer", 0)))
503 {
504 free(sp->bpp, M_SEGMENT);
505 free(sp, M_SEGMENT);
506 return (error);
507 }
508 fs->lfs_writer++;
509 writer_set=1;
510 lfs_writevnodes(fs, mp, sp, VN_DIROP);
511 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
512 }
513 }
514
515 /*
516 * If we are doing a checkpoint, mark everything since the
517 * last checkpoint as no longer ACTIVE.
518 */
519 if (do_ckp) {
520 for (ibno = fs->lfs_cleansz + fs->lfs_segtabsz;
521 --ibno >= fs->lfs_cleansz; ) {
522 if (bread(fs->lfs_ivnode, ibno, fs->lfs_bsize, NOCRED, &bp))
523
524 panic("lfs_segwrite: ifile read");
525 segusep = (SEGUSE *)bp->b_data;
526 for (i = fs->lfs_sepb; i--; segusep++)
527 segusep->su_flags &= ~SEGUSE_ACTIVE;
528
529 /* But the current segment is still ACTIVE */
530 if (fs->lfs_curseg/fs->lfs_sepb==(ibno-fs->lfs_cleansz))
531 ((SEGUSE *)(bp->b_data))[fs->lfs_curseg%fs->lfs_sepb].su_flags |= SEGUSE_ACTIVE;
532 error = VOP_BWRITE(bp);
533 }
534 }
535
536 if (do_ckp || fs->lfs_doifile) {
537 redo:
538 vp = fs->lfs_ivnode;
539 /*
540 * Depending on the circumstances of our calling, the ifile
541 * inode might be locked. If it is, and if it is locked by
542 * us, we should VREF instead of vget here.
543 */
544 need_unlock = 0;
545 if(VOP_ISLOCKED(vp)
546 && vp->v_lock.lk_lockholder == curproc->p_pid) {
547 VREF(vp);
548 } else {
549 while (vget(vp, LK_EXCLUSIVE))
550 continue;
551 need_unlock = 1;
552 }
553 ip = VTOI(vp);
554 if (vp->v_dirtyblkhd.lh_first != NULL)
555 lfs_writefile(fs, sp, vp);
556 (void)lfs_writeinode(fs, sp, ip);
557
558 /* Only vput if we used vget() above. */
559 if(need_unlock)
560 vput(vp);
561 else
562 vrele(vp);
563
564 if (lfs_writeseg(fs, sp) && do_ckp)
565 goto redo;
566 } else {
567 (void) lfs_writeseg(fs, sp);
568 }
569
570 /*
571 * If the I/O count is non-zero, sleep until it reaches zero.
572 * At the moment, the user's process hangs around so we can
573 * sleep.
574 */
575 fs->lfs_doifile = 0;
576 if(writer_set && --fs->lfs_writer==0)
577 wakeup(&fs->lfs_dirops);
578
579 if(lfs_dostats) {
580 ++lfs_stats.nwrites;
581 if (sp->seg_flags & SEGM_SYNC)
582 ++lfs_stats.nsync_writes;
583 if (sp->seg_flags & SEGM_CKP)
584 ++lfs_stats.ncheckpoints;
585 }
586 lfs_segunlock(fs);
587 return (0);
588 }
589
590 /*
591 * Write the dirty blocks associated with a vnode.
592 */
593 void
594 lfs_writefile(fs, sp, vp)
595 struct lfs *fs;
596 struct segment *sp;
597 struct vnode *vp;
598 {
599 struct buf *bp;
600 struct finfo *fip;
601 IFILE *ifp;
602
603
604 if (sp->seg_bytes_left < fs->lfs_bsize ||
605 sp->sum_bytes_left < sizeof(struct finfo))
606 (void) lfs_writeseg(fs, sp);
607
608 sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(ufs_daddr_t);
609 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
610
611 if(vp->v_flag & VDIROP)
612 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
613
614 fip = sp->fip;
615 fip->fi_nblocks = 0;
616 fip->fi_ino = VTOI(vp)->i_number;
617 LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
618 fip->fi_version = ifp->if_version;
619 brelse(bp);
620
621 /*
622 * It may not be necessary to write the meta-data blocks at this point,
623 * as the roll-forward recovery code should be able to reconstruct the
624 * list.
625 *
626 * We have to write them anyway, though, under two conditions: (1) the
627 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
628 * checkpointing.
629 */
630 if((sp->seg_flags & SEGM_CLEAN)
631 && VTOI(vp)->i_number != LFS_IFILE_INUM
632 && !IS_FLUSHING(fs,vp))
633 {
634 lfs_gather(fs, sp, vp, lfs_match_fake);
635 } else
636 lfs_gather(fs, sp, vp, lfs_match_data);
637
638 if(lfs_writeindir
639 || IS_FLUSHING(fs,vp)
640 || (sp->seg_flags & SEGM_CKP))
641 {
642 lfs_gather(fs, sp, vp, lfs_match_indir);
643 lfs_gather(fs, sp, vp, lfs_match_dindir);
644 /* XXX KS - when is TRIPLE not true? */ /* #ifdef TRIPLE */
645 lfs_gather(fs, sp, vp, lfs_match_tindir);
646 /* #endif */
647 }
648 fip = sp->fip;
649 if (fip->fi_nblocks != 0) {
650 sp->fip = (FINFO*)((caddr_t)fip + sizeof(struct finfo) +
651 sizeof(ufs_daddr_t) * (fip->fi_nblocks-1));
652 sp->start_lbp = &sp->fip->fi_blocks[0];
653 } else {
654 sp->sum_bytes_left += sizeof(FINFO) - sizeof(ufs_daddr_t);
655 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
656 }
657 }
658
659 int
660 lfs_writeinode(fs, sp, ip)
661 struct lfs *fs;
662 struct segment *sp;
663 struct inode *ip;
664 {
665 struct buf *bp, *ibp;
666 IFILE *ifp;
667 SEGUSE *sup;
668 ufs_daddr_t daddr;
669 ino_t ino;
670 int error, i, ndx;
671 int redo_ifile = 0;
672 struct timespec ts;
673 int gotblk=0;
674
675 if (!(ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE | IN_CLEANING)))
676 return(0);
677
678 /* Allocate a new inode block if necessary. */
679 if ((ip->i_number != LFS_IFILE_INUM || sp->idp==NULL) && sp->ibp == NULL) {
680 /* Allocate a new segment if necessary. */
681 if (sp->seg_bytes_left < fs->lfs_bsize ||
682 sp->sum_bytes_left < sizeof(ufs_daddr_t))
683 (void) lfs_writeseg(fs, sp);
684
685 /* Get next inode block. */
686 daddr = fs->lfs_offset;
687 fs->lfs_offset += fsbtodb(fs, 1);
688 sp->ibp = *sp->cbpp++ =
689 getblk(VTOI(fs->lfs_ivnode)->i_devvp, daddr, fs->lfs_bsize, 0, 0);
690 gotblk++;
691
692 /* Zero out inode numbers */
693 for (i = 0; i < INOPB(fs); ++i)
694 ((struct dinode *)sp->ibp->b_data)[i].di_inumber = 0;
695
696 ++sp->start_bpp;
697 fs->lfs_avail -= fsbtodb(fs, 1);
698 /* Set remaining space counters. */
699 sp->seg_bytes_left -= fs->lfs_bsize;
700 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
701 ndx = LFS_SUMMARY_SIZE / sizeof(ufs_daddr_t) -
702 sp->ninodes / INOPB(fs) - 1;
703 ((ufs_daddr_t *)(sp->segsum))[ndx] = daddr;
704 }
705
706 /* Update the inode times and copy the inode onto the inode page. */
707 if (ip->i_flag & (IN_CLEANING|IN_MODIFIED))
708 --fs->lfs_uinodes;
709 TIMEVAL_TO_TIMESPEC(&time, &ts);
710 LFS_ITIMES(ip, &ts, &ts, &ts);
711
712 if(ip->i_flag & IN_CLEANING)
713 ip->i_flag &= ~IN_CLEANING;
714 else
715 ip->i_flag &= ~(IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE);
716
717 /*
718 * If this is the Ifile, and we've already written the Ifile in this
719 * partial segment, just overwrite it (it's not on disk yet) and
720 * continue.
721 *
722 * XXX we know that the bp that we get the second time around has
723 * already been gathered.
724 */
725 if(ip->i_number == LFS_IFILE_INUM && sp->idp) {
726 *(sp->idp) = ip->i_din.ffs_din;
727 return 0;
728 }
729
730 bp = sp->ibp;
731 ((struct dinode *)bp->b_data)[sp->ninodes % INOPB(fs)] =
732 ip->i_din.ffs_din;
733
734 if(ip->i_number == LFS_IFILE_INUM) /* We know sp->idp == NULL */
735 sp->idp = ((struct dinode *)bp->b_data)+(sp->ninodes % INOPB(fs));
736 if(gotblk) {
737 bp->b_flags |= B_LOCKED;
738 brelse(bp);
739 }
740
741 /* Increment inode count in segment summary block. */
742 ++((SEGSUM *)(sp->segsum))->ss_ninos;
743
744 /* If this page is full, set flag to allocate a new page. */
745 if (++sp->ninodes % INOPB(fs) == 0)
746 sp->ibp = NULL;
747
748 /*
749 * If updating the ifile, update the super-block. Update the disk
750 * address and access times for this inode in the ifile.
751 */
752 ino = ip->i_number;
753 if (ino == LFS_IFILE_INUM) {
754 daddr = fs->lfs_idaddr;
755 fs->lfs_idaddr = bp->b_blkno;
756 } else {
757 LFS_IENTRY(ifp, fs, ino, ibp);
758 daddr = ifp->if_daddr;
759 ifp->if_daddr = bp->b_blkno;
760 #ifdef LFS_DEBUG_NEXTFREE
761 if(ino > 3 && ifp->if_nextfree) {
762 vprint("lfs_writeinode",ITOV(ip));
763 printf("lfs_writeinode: updating free ino %d\n",
764 ip->i_number);
765 }
766 #endif
767 error = VOP_BWRITE(ibp);
768 }
769
770 /*
771 * No need to update segment usage if there was no former inode address
772 * or if the last inode address is in the current partial segment.
773 */
774 if (daddr != LFS_UNUSED_DADDR &&
775 !(daddr >= fs->lfs_lastpseg && daddr <= bp->b_blkno)) {
776 LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
777 #ifdef DIAGNOSTIC
778 if (sup->su_nbytes < DINODE_SIZE) {
779 /* XXX -- Change to a panic. */
780 printf("lfs_writeinode: negative bytes (segment %d short by %d)\n",
781 datosn(fs, daddr), (int)DINODE_SIZE - sup->su_nbytes);
782 panic("lfs_writeinode: negative bytes");
783 sup->su_nbytes = DINODE_SIZE;
784 }
785 #endif
786 sup->su_nbytes -= DINODE_SIZE;
787 redo_ifile =
788 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
789 error = VOP_BWRITE(bp);
790 }
791 return (redo_ifile);
792 }
793
794 int
795 lfs_gatherblock(sp, bp, sptr)
796 struct segment *sp;
797 struct buf *bp;
798 int *sptr;
799 {
800 struct lfs *fs;
801 int version;
802
803 /*
804 * If full, finish this segment. We may be doing I/O, so
805 * release and reacquire the splbio().
806 */
807 #ifdef DIAGNOSTIC
808 if (sp->vp == NULL)
809 panic ("lfs_gatherblock: Null vp in segment");
810 #endif
811 fs = sp->fs;
812 if (sp->sum_bytes_left < sizeof(ufs_daddr_t) ||
813 sp->seg_bytes_left < bp->b_bcount) {
814 if (sptr)
815 splx(*sptr);
816 lfs_updatemeta(sp);
817
818 version = sp->fip->fi_version;
819 (void) lfs_writeseg(fs, sp);
820
821 sp->fip->fi_version = version;
822 sp->fip->fi_ino = VTOI(sp->vp)->i_number;
823 /* Add the current file to the segment summary. */
824 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
825 sp->sum_bytes_left -=
826 sizeof(struct finfo) - sizeof(ufs_daddr_t);
827
828 if (sptr)
829 *sptr = splbio();
830 return(1);
831 }
832
833 #ifdef DEBUG
834 if(bp->b_flags & B_GATHERED) {
835 printf("lfs_gatherblock: already gathered! Ino %d, lbn %d\n",
836 sp->fip->fi_ino, bp->b_lblkno);
837 return(0);
838 }
839 #endif
840 /* Insert into the buffer list, update the FINFO block. */
841 bp->b_flags |= B_GATHERED;
842 *sp->cbpp++ = bp;
843 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno;
844
845 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
846 sp->seg_bytes_left -= bp->b_bcount;
847 return(0);
848 }
849
850 int
851 lfs_gather(fs, sp, vp, match)
852 struct lfs *fs;
853 struct segment *sp;
854 struct vnode *vp;
855 int (*match) __P((struct lfs *, struct buf *));
856 {
857 struct buf *bp;
858 int s, count=0;
859
860 sp->vp = vp;
861 s = splbio();
862
863 #ifndef LFS_NO_BACKBUF_HACK
864 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {
865 #else /* LFS_NO_BACKBUF_HACK */
866 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
867 # define BUF_OFFSET (((void *)&bp->b_vnbufs.le_next) - (void *)bp)
868 # define BACK_BUF(BP) ((struct buf *)(((void *)BP->b_vnbufs.le_prev) - BUF_OFFSET))
869 # define BEG_OF_LIST ((struct buf *)(((void *)&vp->v_dirtyblkhd.lh_first) - BUF_OFFSET))
870 /* Find last buffer. */
871 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp && bp->b_vnbufs.le_next != NULL;
872 bp = bp->b_vnbufs.le_next);
873 for (; bp && bp != BEG_OF_LIST; bp = BACK_BUF(bp)) {
874 #endif /* LFS_NO_BACKBUF_HACK */
875 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp))
876 continue;
877 if(vp->v_type == VBLK) {
878 /* For block devices, just write the blocks. */
879 /* XXX Do we really need to even do this? */
880 #ifdef DEBUG_LFS
881 if(count==0)
882 printf("BLK(");
883 printf(".");
884 #endif
885 /* Get the block before bwrite, so we don't corrupt the free list */
886 bp->b_flags |= B_BUSY;
887 bremfree(bp);
888 bwrite(bp);
889 } else {
890 #ifdef DIAGNOSTIC
891 if (!(bp->b_flags & B_DELWRI))
892 panic("lfs_gather: bp not B_DELWRI");
893 if (!(bp->b_flags & B_LOCKED)) {
894 printf("lfs_gather: lbn %d blk %d not B_LOCKED\n", bp->b_lblkno, bp->b_blkno);
895 VOP_PRINT(bp->b_vp);
896 panic("lfs_gather: bp not B_LOCKED");
897 }
898 #endif
899 if (lfs_gatherblock(sp, bp, &s)) {
900 goto loop;
901 }
902 }
903 count++;
904 }
905 splx(s);
906 #ifdef DEBUG_LFS
907 if(vp->v_type == VBLK && count)
908 printf(")\n");
909 #endif
910 lfs_updatemeta(sp);
911 sp->vp = NULL;
912 return count;
913 }
914
915 /*
916 * Update the metadata that points to the blocks listed in the FINFO
917 * array.
918 */
919 void
920 lfs_updatemeta(sp)
921 struct segment *sp;
922 {
923 SEGUSE *sup;
924 struct buf *bp;
925 struct lfs *fs;
926 struct vnode *vp;
927 struct indir a[NIADDR + 2], *ap;
928 struct inode *ip;
929 ufs_daddr_t daddr, lbn, off;
930 int error, i, nblocks, num;
931
932 vp = sp->vp;
933 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
934 if (nblocks < 0)
935 panic("This is a bad thing\n");
936 if (vp == NULL || nblocks == 0)
937 return;
938
939 /* Sort the blocks. */
940 /*
941 * XXX KS - We have to sort even if the blocks come from the
942 * cleaner, because there might be other pending blocks on the
943 * same inode...and if we don't sort, and there are fragments
944 * present, blocks may be written in the wrong place.
945 */
946 /* if (!(sp->seg_flags & SEGM_CLEAN)) */
947 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks);
948
949 /*
950 * Record the length of the last block in case it's a fragment.
951 * If there are indirect blocks present, they sort last. An
952 * indirect block will be lfs_bsize and its presence indicates
953 * that you cannot have fragments.
954 */
955 sp->fip->fi_lastlength = sp->start_bpp[nblocks - 1]->b_bcount;
956
957 /*
958 * Assign disk addresses, and update references to the logical
959 * block and the segment usage information.
960 */
961 fs = sp->fs;
962 for (i = nblocks; i--; ++sp->start_bpp) {
963 lbn = *sp->start_lbp++;
964
965 (*sp->start_bpp)->b_blkno = off = fs->lfs_offset;
966 if((*sp->start_bpp)->b_blkno == (*sp->start_bpp)->b_lblkno) {
967 printf("lfs_updatemeta: ino %d blk %d has same lbn and daddr\n", VTOI(vp)->i_number, off);
968 }
969 fs->lfs_offset +=
970 fragstodb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
971 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL);
972 if (error)
973 panic("lfs_updatemeta: ufs_bmaparray %d", error);
974 ip = VTOI(vp);
975 switch (num) {
976 case 0:
977 ip->i_ffs_db[lbn] = off;
978 break;
979 case 1:
980 ip->i_ffs_ib[a[0].in_off] = off;
981 break;
982 default:
983 ap = &a[num - 1];
984 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
985 panic("lfs_updatemeta: bread bno %d",
986 ap->in_lbn);
987 /*
988 * Bread may create a new (indirect) block which needs
989 * to get counted for the inode.
990 */
991 if (/* bp->b_blkno == -1 && */
992 !(bp->b_flags & (B_DELWRI|B_DONE))) {
993 ip->i_ffs_blocks += fsbtodb(fs, 1);
994 fs->lfs_bfree -= fragstodb(fs, fs->lfs_frag);
995 }
996 ((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
997 VOP_BWRITE(bp);
998 }
999 /* Update segment usage information. */
1000 if (daddr != UNASSIGNED && !(daddr >= fs->lfs_lastpseg && daddr <= off)) {
1001 LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
1002 #ifdef DIAGNOSTIC
1003 if (sup->su_nbytes < (*sp->start_bpp)->b_bcount) {
1004 /* XXX -- Change to a panic. */
1005 printf("lfs_updatemeta: negative bytes (segment %d short by %ld)\n",
1006 datosn(fs, daddr), (*sp->start_bpp)->b_bcount - sup->su_nbytes);
1007 printf("lfs_updatemeta: ino %d, lbn %d, addr = %x\n",
1008 VTOI(sp->vp)->i_number, (*sp->start_bpp)->b_lblkno, daddr);
1009 panic("lfs_updatemeta: negative bytes");
1010 sup->su_nbytes = (*sp->start_bpp)->b_bcount;
1011 }
1012 #endif
1013 sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
1014 error = VOP_BWRITE(bp);
1015 }
1016 }
1017 }
1018
1019 /*
1020 * Start a new segment.
1021 */
1022 int
1023 lfs_initseg(fs)
1024 struct lfs *fs;
1025 {
1026 struct segment *sp;
1027 SEGUSE *sup;
1028 SEGSUM *ssp;
1029 struct buf *bp;
1030 int repeat;
1031
1032 sp = fs->lfs_sp;
1033
1034 repeat = 0;
1035 /* Advance to the next segment. */
1036 if (!LFS_PARTIAL_FITS(fs)) {
1037 /* Wake up any cleaning procs waiting on this file system. */
1038 wakeup(&lfs_allclean_wakeup);
1039 wakeup(&fs->lfs_nextseg);
1040 lfs_newseg(fs);
1041 repeat = 1;
1042 fs->lfs_offset = fs->lfs_curseg;
1043 sp->seg_number = datosn(fs, fs->lfs_curseg);
1044 sp->seg_bytes_left = fs->lfs_dbpseg * DEV_BSIZE;
1045 /*
1046 * If the segment contains a superblock, update the offset
1047 * and summary address to skip over it.
1048 */
1049 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1050 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
1051 fs->lfs_offset += LFS_SBPAD / DEV_BSIZE;
1052 sp->seg_bytes_left -= LFS_SBPAD;
1053 }
1054 brelse(bp);
1055 } else {
1056 sp->seg_number = datosn(fs, fs->lfs_curseg);
1057 sp->seg_bytes_left = (fs->lfs_dbpseg -
1058 (fs->lfs_offset - fs->lfs_curseg)) * DEV_BSIZE;
1059 }
1060 fs->lfs_lastpseg = fs->lfs_offset;
1061
1062 sp->fs = fs;
1063 sp->ibp = NULL;
1064 sp->idp = NULL;
1065 sp->ninodes = 0;
1066
1067 /* Get a new buffer for SEGSUM and enter it into the buffer list. */
1068 sp->cbpp = sp->bpp;
1069 *sp->cbpp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp,
1070 fs->lfs_offset, LFS_SUMMARY_SIZE);
1071 sp->segsum = (*sp->cbpp)->b_data;
1072 bzero(sp->segsum, LFS_SUMMARY_SIZE);
1073 sp->start_bpp = ++sp->cbpp;
1074 fs->lfs_offset += LFS_SUMMARY_SIZE / DEV_BSIZE;
1075
1076 /* Set point to SEGSUM, initialize it. */
1077 ssp = sp->segsum;
1078 ssp->ss_next = fs->lfs_nextseg;
1079 ssp->ss_nfinfo = ssp->ss_ninos = 0;
1080 ssp->ss_magic = SS_MAGIC;
1081
1082 /* Set pointer to first FINFO, initialize it. */
1083 sp->fip = (struct finfo *)((caddr_t)sp->segsum + sizeof(SEGSUM));
1084 sp->fip->fi_nblocks = 0;
1085 sp->start_lbp = &sp->fip->fi_blocks[0];
1086 sp->fip->fi_lastlength = 0;
1087
1088 sp->seg_bytes_left -= LFS_SUMMARY_SIZE;
1089 sp->sum_bytes_left = LFS_SUMMARY_SIZE - sizeof(SEGSUM);
1090
1091 return(repeat);
1092 }
1093
1094 /*
1095 * Return the next segment to write.
1096 */
1097 void
1098 lfs_newseg(fs)
1099 struct lfs *fs;
1100 {
1101 CLEANERINFO *cip;
1102 SEGUSE *sup;
1103 struct buf *bp;
1104 int curseg, isdirty, sn;
1105
1106 LFS_SEGENTRY(sup, fs, datosn(fs, fs->lfs_nextseg), bp);
1107 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1108 sup->su_nbytes = 0;
1109 sup->su_nsums = 0;
1110 sup->su_ninos = 0;
1111 (void) VOP_BWRITE(bp);
1112
1113 LFS_CLEANERINFO(cip, fs, bp);
1114 --cip->clean;
1115 ++cip->dirty;
1116 fs->lfs_nclean = cip->clean;
1117 (void) VOP_BWRITE(bp);
1118
1119 fs->lfs_lastseg = fs->lfs_curseg;
1120 fs->lfs_curseg = fs->lfs_nextseg;
1121 for (sn = curseg = datosn(fs, fs->lfs_curseg);;) {
1122 sn = (sn + 1) % fs->lfs_nseg;
1123 if (sn == curseg)
1124 panic("lfs_nextseg: no clean segments");
1125 LFS_SEGENTRY(sup, fs, sn, bp);
1126 isdirty = sup->su_flags & SEGUSE_DIRTY;
1127 brelse(bp);
1128 if (!isdirty)
1129 break;
1130 }
1131
1132 ++fs->lfs_nactive;
1133 fs->lfs_nextseg = sntoda(fs, sn);
1134 if(lfs_dostats) {
1135 ++lfs_stats.segsused;
1136 }
1137 }
1138
1139 int
1140 lfs_writeseg(fs, sp)
1141 struct lfs *fs;
1142 struct segment *sp;
1143 {
1144 extern int locked_queue_count;
1145 extern long locked_queue_bytes;
1146 struct buf **bpp, *bp, *cbp;
1147 SEGUSE *sup;
1148 SEGSUM *ssp;
1149 dev_t i_dev;
1150 u_long *datap, *dp;
1151 int do_again, i, nblocks, s;
1152 #ifdef LFS_TRACK_IOS
1153 int j;
1154 #endif
1155 int (*strategy)__P((void *));
1156 struct vop_strategy_args vop_strategy_a;
1157 u_short ninos;
1158 struct vnode *devvp;
1159 char *p;
1160 struct vnode *vn;
1161 struct inode *ip;
1162 #if defined(DEBUG) && defined(LFS_PROPELLER)
1163 static int propeller;
1164 char propstring[4] = "-\\|/";
1165
1166 printf("%c\b",propstring[propeller++]);
1167 if(propeller==4)
1168 propeller = 0;
1169 #endif
1170
1171 /*
1172 * If there are no buffers other than the segment summary to write
1173 * and it is not a checkpoint, don't do anything. On a checkpoint,
1174 * even if there aren't any buffers, you need to write the superblock.
1175 */
1176 if ((nblocks = sp->cbpp - sp->bpp) == 1)
1177 return (0);
1178
1179 #ifdef DEBUG_LFS
1180 lfs_check_bpp(fs,sp,__FILE__,__LINE__);
1181 #endif
1182 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1183 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1184
1185 /* Update the segment usage information. */
1186 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1187
1188 /* Loop through all blocks, except the segment summary. */
1189 for (bpp = sp->bpp; ++bpp < sp->cbpp; ) {
1190 if((*bpp)->b_vp != devvp)
1191 sup->su_nbytes += (*bpp)->b_bcount;
1192 }
1193
1194 ssp = (SEGSUM *)sp->segsum;
1195
1196 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1197 sup->su_nbytes += ssp->ss_ninos * DINODE_SIZE;
1198 /* sup->su_nbytes += LFS_SUMMARY_SIZE; */
1199 sup->su_lastmod = time.tv_sec;
1200 sup->su_ninos += ninos;
1201 ++sup->su_nsums;
1202
1203 do_again = !(bp->b_flags & B_GATHERED);
1204 (void)VOP_BWRITE(bp);
1205 /*
1206 * Compute checksum across data and then across summary; the first
1207 * block (the summary block) is skipped. Set the create time here
1208 * so that it's guaranteed to be later than the inode mod times.
1209 *
1210 * XXX
1211 * Fix this to do it inline, instead of malloc/copy.
1212 */
1213 datap = dp = malloc(nblocks * sizeof(u_long), M_SEGMENT, M_WAITOK);
1214 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1215 if (((*++bpp)->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1216 if (copyin((*bpp)->b_saveaddr, dp++, sizeof(u_long)))
1217 panic("lfs_writeseg: copyin failed [1]: ino %d blk %d", VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno);
1218 } else {
1219 if( !((*bpp)->b_flags & B_CALL) ) {
1220 /*
1221 * Before we record data for a checksm,
1222 * make sure the data won't change in between
1223 * the checksum calculation and the write,
1224 * by marking the buffer B_BUSY. It will
1225 * be freed later by brelse().
1226 */
1227 again:
1228 s = splbio();
1229 if((*bpp)->b_flags & B_BUSY) {
1230 #ifdef DEBUG
1231 printf("lfs_writeseg: avoiding potential data summary corruption for ino %d, lbn %d\n",
1232 VTOI((*bpp)->b_vp)->i_number,
1233 bp->b_lblkno);
1234 #endif
1235 (*bpp)->b_flags |= B_WANTED;
1236 tsleep((*bpp), (PRIBIO + 1),
1237 "lfs_writeseg", 0);
1238 splx(s);
1239 goto again;
1240 }
1241 (*bpp)->b_flags |= B_BUSY;
1242 splx(s);
1243 }
1244 *dp++ = ((u_long *)(*bpp)->b_data)[0];
1245 }
1246 }
1247 ssp->ss_create = time.tv_sec;
1248 ssp->ss_datasum = cksum(datap, (nblocks - 1) * sizeof(u_long));
1249 ssp->ss_sumsum =
1250 cksum(&ssp->ss_datasum, LFS_SUMMARY_SIZE - sizeof(ssp->ss_sumsum));
1251 free(datap, M_SEGMENT);
1252 #ifdef DIAGNOSTIC
1253 if (fs->lfs_bfree < fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE)
1254 panic("lfs_writeseg: No diskspace for summary");
1255 #endif
1256 fs->lfs_bfree -= (fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE);
1257
1258 strategy = devvp->v_op[VOFFSET(vop_strategy)];
1259
1260 /*
1261 * When we simply write the blocks we lose a rotation for every block
1262 * written. To avoid this problem, we allocate memory in chunks, copy
1263 * the buffers into the chunk and write the chunk. CHUNKSIZE is the
1264 * largest size I/O devices can handle.
1265 * When the data is copied to the chunk, turn off the the B_LOCKED bit
1266 * and brelse the buffer (which will move them to the LRU list). Add
1267 * the B_CALL flag to the buffer header so we can count I/O's for the
1268 * checkpoints and so we can release the allocated memory.
1269 *
1270 * XXX
1271 * This should be removed if the new virtual memory system allows us to
1272 * easily make the buffers contiguous in kernel memory and if that's
1273 * fast enough.
1274 */
1275
1276 #define CHUNKSIZE MAXPHYS
1277
1278 if(devvp==NULL)
1279 panic("devvp is NULL");
1280 for (bpp = sp->bpp,i = nblocks; i;) {
1281 cbp = lfs_newbuf(devvp, (*bpp)->b_blkno, CHUNKSIZE);
1282 cbp->b_dev = i_dev;
1283 cbp->b_flags |= B_ASYNC | B_BUSY;
1284 cbp->b_bcount = 0;
1285
1286 #ifdef DIAGNOSTIC
1287 if(datosn(fs,(*bpp)->b_blkno + ((*bpp)->b_bcount - 1)/DEV_BSIZE) != datosn(fs,cbp->b_blkno)) {
1288 panic("lfs_writeseg: Segment overwrite");
1289 }
1290 #endif
1291
1292 if(fs->lfs_iocount >= LFS_THROTTLE) {
1293 tsleep(&fs->lfs_iocount, PRIBIO+1, "lfs throttle", 0);
1294 }
1295 s = splbio();
1296 ++fs->lfs_iocount;
1297 #ifdef LFS_TRACK_IOS
1298 for(j=0;j<LFS_THROTTLE;j++) {
1299 if(fs->lfs_pending[j]==LFS_UNUSED_DADDR) {
1300 fs->lfs_pending[j] = cbp->b_blkno;
1301 break;
1302 }
1303 }
1304 #endif /* LFS_TRACK_IOS */
1305 for (p = cbp->b_data; i && cbp->b_bcount < CHUNKSIZE; i--) {
1306 bp = *bpp;
1307
1308 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1309 break;
1310
1311 /*
1312 * Fake buffers from the cleaner are marked as B_INVAL.
1313 * We need to copy the data from user space rather than
1314 * from the buffer indicated.
1315 * XXX == what do I do on an error?
1316 */
1317 if ((bp->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1318 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1319 panic("lfs_writeseg: copyin failed [2]");
1320 } else
1321 bcopy(bp->b_data, p, bp->b_bcount);
1322 p += bp->b_bcount;
1323 cbp->b_bcount += bp->b_bcount;
1324 if (bp->b_flags & B_LOCKED) {
1325 --locked_queue_count;
1326 locked_queue_bytes -= bp->b_bufsize;
1327 }
1328 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
1329 B_LOCKED | B_GATHERED);
1330 vn = bp->b_vp;
1331 if (bp->b_flags & B_CALL) {
1332 /* if B_CALL, it was created with newbuf */
1333 lfs_freebuf(bp);
1334 } else {
1335 bremfree(bp);
1336 bp->b_flags |= B_DONE;
1337 if(vn)
1338 reassignbuf(bp, vn);
1339 brelse(bp);
1340 }
1341 if(bp->b_flags & B_NEEDCOMMIT) { /* XXX */
1342 bp->b_flags &= ~B_NEEDCOMMIT;
1343 wakeup(bp);
1344 }
1345
1346 bpp++;
1347
1348 /*
1349 * If this is the last block for this vnode, but
1350 * there are other blocks on its dirty list,
1351 * set IN_MODIFIED/IN_CLEANING depending on what
1352 * sort of block. Only do this for our mount point,
1353 * not for, e.g., inode blocks that are attached to
1354 * the devvp.
1355 */
1356 if(i>1 && vn && *bpp && (*bpp)->b_vp != vn
1357 && (*bpp)->b_vp && (bp=vn->v_dirtyblkhd.lh_first)!=NULL &&
1358 vn->v_mount == fs->lfs_ivnode->v_mount)
1359 {
1360 ip = VTOI(vn);
1361 #ifdef DEBUG_LFS
1362 printf("lfs_writeseg: marking ino %d\n",ip->i_number);
1363 #endif
1364 if(!(ip->i_flag & (IN_CLEANING|IN_MODIFIED))) {
1365 fs->lfs_uinodes++;
1366 if(bp->b_flags & B_CALL)
1367 ip->i_flag |= IN_CLEANING;
1368 else
1369 ip->i_flag |= IN_MODIFIED;
1370 }
1371 }
1372 /* if(vn->v_dirtyblkhd.lh_first == NULL) */
1373 wakeup(vn);
1374 }
1375 ++cbp->b_vp->v_numoutput;
1376 splx(s);
1377 /*
1378 * XXXX This is a gross and disgusting hack. Since these
1379 * buffers are physically addressed, they hang off the
1380 * device vnode (devvp). As a result, they have no way
1381 * of getting to the LFS superblock or lfs structure to
1382 * keep track of the number of I/O's pending. So, I am
1383 * going to stuff the fs into the saveaddr field of
1384 * the buffer (yuk).
1385 */
1386 cbp->b_saveaddr = (caddr_t)fs;
1387 vop_strategy_a.a_desc = VDESC(vop_strategy);
1388 vop_strategy_a.a_bp = cbp;
1389 (strategy)(&vop_strategy_a);
1390 }
1391 /*
1392 * XXX
1393 * Vinvalbuf can move locked buffers off the locked queue
1394 * and we have no way of knowing about this. So, after
1395 * doing a big write, we recalculate how many buffers are
1396 * really still left on the locked queue.
1397 */
1398 lfs_countlocked(&locked_queue_count,&locked_queue_bytes);
1399 wakeup(&locked_queue_count);
1400 if(lfs_dostats) {
1401 ++lfs_stats.psegwrites;
1402 lfs_stats.blocktot += nblocks - 1;
1403 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
1404 ++lfs_stats.psyncwrites;
1405 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
1406 ++lfs_stats.pcleanwrites;
1407 lfs_stats.cleanblocks += nblocks - 1;
1408 }
1409 }
1410 return (lfs_initseg(fs) || do_again);
1411 }
1412
1413 void
1414 lfs_writesuper(fs, daddr)
1415 struct lfs *fs;
1416 daddr_t daddr;
1417 {
1418 struct buf *bp;
1419 dev_t i_dev;
1420 int (*strategy) __P((void *));
1421 int s;
1422 struct vop_strategy_args vop_strategy_a;
1423
1424 #ifdef LFS_CANNOT_ROLLFW
1425 /*
1426 * If we can write one superblock while another is in
1427 * progress, we risk not having a complete checkpoint if we crash.
1428 * So, block here if a superblock write is in progress.
1429 *
1430 * XXX - should be a proper lock, not this hack
1431 */
1432 while(fs->lfs_sbactive) {
1433 tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
1434 }
1435 fs->lfs_sbactive = daddr;
1436 #endif
1437 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1438 strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)];
1439
1440 /* Set timestamp of this version of the superblock */
1441 fs->lfs_tstamp = time.tv_sec;
1442
1443 /* Checksum the superblock and copy it into a buffer. */
1444 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
1445 bp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, daddr, LFS_SBPAD);
1446 *(struct dlfs *)bp->b_data = fs->lfs_dlfs;
1447
1448 bp->b_dev = i_dev;
1449 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
1450 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
1451 bp->b_iodone = lfs_supercallback;
1452 /* XXX KS - same nasty hack as above */
1453 bp->b_saveaddr = (caddr_t)fs;
1454
1455 vop_strategy_a.a_desc = VDESC(vop_strategy);
1456 vop_strategy_a.a_bp = bp;
1457 s = splbio();
1458 ++bp->b_vp->v_numoutput;
1459 splx(s);
1460 (strategy)(&vop_strategy_a);
1461 }
1462
1463 /*
1464 * Logical block number match routines used when traversing the dirty block
1465 * chain.
1466 */
1467 int
1468 lfs_match_fake(fs, bp)
1469 struct lfs *fs;
1470 struct buf *bp;
1471 {
1472 return (bp->b_flags & B_CALL);
1473 }
1474
1475 int
1476 lfs_match_data(fs, bp)
1477 struct lfs *fs;
1478 struct buf *bp;
1479 {
1480 return (bp->b_lblkno >= 0);
1481 }
1482
1483 int
1484 lfs_match_indir(fs, bp)
1485 struct lfs *fs;
1486 struct buf *bp;
1487 {
1488 int lbn;
1489
1490 lbn = bp->b_lblkno;
1491 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
1492 }
1493
1494 int
1495 lfs_match_dindir(fs, bp)
1496 struct lfs *fs;
1497 struct buf *bp;
1498 {
1499 int lbn;
1500
1501 lbn = bp->b_lblkno;
1502 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
1503 }
1504
1505 int
1506 lfs_match_tindir(fs, bp)
1507 struct lfs *fs;
1508 struct buf *bp;
1509 {
1510 int lbn;
1511
1512 lbn = bp->b_lblkno;
1513 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
1514 }
1515
1516 /*
1517 * XXX - The only buffers that are going to hit these functions are the
1518 * segment write blocks, or the segment summaries, or the superblocks.
1519 *
1520 * All of the above are created by lfs_newbuf, and so do not need to be
1521 * released via brelse.
1522 */
1523 void
1524 lfs_callback(bp)
1525 struct buf *bp;
1526 {
1527 struct lfs *fs;
1528 #ifdef LFS_TRACK_IOS
1529 int j;
1530 #endif
1531
1532 fs = (struct lfs *)bp->b_saveaddr;
1533 #ifdef DIAGNOSTIC
1534 if (fs->lfs_iocount == 0)
1535 panic("lfs_callback: zero iocount\n");
1536 #endif
1537 if (--fs->lfs_iocount < LFS_THROTTLE)
1538 wakeup(&fs->lfs_iocount);
1539 #ifdef LFS_TRACK_IOS
1540 for(j=0;j<LFS_THROTTLE;j++) {
1541 if(fs->lfs_pending[j]==bp->b_blkno) {
1542 fs->lfs_pending[j] = LFS_UNUSED_DADDR;
1543 wakeup(&(fs->lfs_pending[j]));
1544 break;
1545 }
1546 }
1547 #endif /* LFS_TRACK_IOS */
1548
1549 lfs_freebuf(bp);
1550 }
1551
1552 void
1553 lfs_supercallback(bp)
1554 struct buf *bp;
1555 {
1556 #ifdef LFS_CANNOT_ROLLFW
1557 struct lfs *fs;
1558
1559 fs = (struct lfs *)bp->b_saveaddr;
1560 fs->lfs_sbactive=NULL;
1561 wakeup(&fs->lfs_sbactive);
1562 #endif
1563 lfs_freebuf(bp);
1564 }
1565
1566 /*
1567 * Shellsort (diminishing increment sort) from Data Structures and
1568 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
1569 * see also Knuth Vol. 3, page 84. The increments are selected from
1570 * formula (8), page 95. Roughly O(N^3/2).
1571 */
1572 /*
1573 * This is our own private copy of shellsort because we want to sort
1574 * two parallel arrays (the array of buffer pointers and the array of
1575 * logical block numbers) simultaneously. Note that we cast the array
1576 * of logical block numbers to a unsigned in this routine so that the
1577 * negative block numbers (meta data blocks) sort AFTER the data blocks.
1578 */
1579
1580 void
1581 lfs_shellsort(bp_array, lb_array, nmemb)
1582 struct buf **bp_array;
1583 ufs_daddr_t *lb_array;
1584 register int nmemb;
1585 {
1586 static int __rsshell_increments[] = { 4, 1, 0 };
1587 register int incr, *incrp, t1, t2;
1588 struct buf *bp_temp;
1589 u_long lb_temp;
1590
1591 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
1592 for (t1 = incr; t1 < nmemb; ++t1)
1593 for (t2 = t1 - incr; t2 >= 0;)
1594 if (lb_array[t2] > lb_array[t2 + incr]) {
1595 lb_temp = lb_array[t2];
1596 lb_array[t2] = lb_array[t2 + incr];
1597 lb_array[t2 + incr] = lb_temp;
1598 bp_temp = bp_array[t2];
1599 bp_array[t2] = bp_array[t2 + incr];
1600 bp_array[t2 + incr] = bp_temp;
1601 t2 -= incr;
1602 } else
1603 break;
1604 }
1605
1606 /*
1607 * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
1608 */
1609 int
1610 lfs_vref(vp)
1611 register struct vnode *vp;
1612 {
1613 /*
1614 * If we return 1 here during a flush, we risk vinvalbuf() not
1615 * being able to flush all of the pages from this vnode, which
1616 * will cause it to panic. So, return 0 if a flush is in progress.
1617 */
1618 if (vp->v_flag & VXLOCK) {
1619 if(IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
1620 return 0;
1621 }
1622 return(1);
1623 }
1624 return (vget(vp, 0));
1625 }
1626
1627 /*
1628 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
1629 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
1630 */
1631 void
1632 lfs_vunref(vp)
1633 register struct vnode *vp;
1634 {
1635 /*
1636 * Analogous to lfs_vref, if the node is flushing, fake it.
1637 */
1638 if((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
1639 return;
1640 }
1641
1642 simple_lock(&vp->v_interlock);
1643 #ifdef DIAGNOSTIC
1644 if(vp->v_usecount<=0) {
1645 printf("lfs_vunref: flags are 0x%lx\n", vp->v_flag);
1646 printf("lfs_vunref: usecount = %ld\n", vp->v_usecount);
1647 panic("lfs_vunref: v_usecount<0");
1648 }
1649 #endif
1650 vp->v_usecount--;
1651 if (vp->v_usecount > 0) {
1652 simple_unlock(&vp->v_interlock);
1653 return;
1654 }
1655 #ifdef DIAGNOSTIC
1656 if(VOP_ISLOCKED(vp))
1657 panic("lfs_vunref: vnode locked");
1658 #endif
1659 /*
1660 * insert at tail of LRU list
1661 */
1662 simple_lock(&vnode_free_list_slock);
1663 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1664 simple_unlock(&vnode_free_list_slock);
1665 simple_unlock(&vp->v_interlock);
1666 }
1667
1668 /*
1669 * We use this when we have vnodes that were loaded in solely for cleaning.
1670 * There is no reason to believe that these vnodes will be referenced again
1671 * soon, since the cleaning process is unrelated to normal filesystem
1672 * activity. Putting cleaned vnodes at the tail of the list has the effect
1673 * of flushing the vnode LRU. So, put vnodes that were loaded only for
1674 * cleaning at the head of the list, instead.
1675 */
1676 void
1677 lfs_vunref_head(vp)
1678 register struct vnode *vp;
1679 {
1680 simple_lock(&vp->v_interlock);
1681 #ifdef DIAGNOSTIC
1682 if(vp->v_usecount==0) {
1683 panic("lfs_vunref: v_usecount<0");
1684 }
1685 #endif
1686 vp->v_usecount--;
1687 if (vp->v_usecount > 0) {
1688 simple_unlock(&vp->v_interlock);
1689 return;
1690 }
1691 #ifdef DIAGNOSTIC
1692 if(VOP_ISLOCKED(vp))
1693 panic("lfs_vunref_head: vnode locked");
1694 #endif
1695 /*
1696 * insert at head of LRU list
1697 */
1698 simple_lock(&vnode_free_list_slock);
1699 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1700 simple_unlock(&vnode_free_list_slock);
1701 simple_unlock(&vp->v_interlock);
1702 }
1703
1704