lfs_segment.c revision 1.25 1 /* $NetBSD: lfs_segment.c,v 1.25 1999/04/12 00:11:01 perseant Exp $ */
2
3 /*-
4 * Copyright (c) 1999 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_segment.c 8.10 (Berkeley) 6/10/95
71 */
72
73 #define ivndebug(vp,str) printf("ino %d: %s\n",VTOI(vp)->i_number,(str))
74
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/namei.h>
78 #include <sys/kernel.h>
79 #include <sys/resourcevar.h>
80 #include <sys/file.h>
81 #include <sys/stat.h>
82 #include <sys/buf.h>
83 #include <sys/proc.h>
84 #include <sys/conf.h>
85 #include <sys/vnode.h>
86 #include <sys/malloc.h>
87 #include <sys/mount.h>
88
89 #include <miscfs/specfs/specdev.h>
90 #include <miscfs/fifofs/fifo.h>
91
92 #include <ufs/ufs/quota.h>
93 #include <ufs/ufs/inode.h>
94 #include <ufs/ufs/dir.h>
95 #include <ufs/ufs/ufsmount.h>
96 #include <ufs/ufs/ufs_extern.h>
97
98 #include <ufs/lfs/lfs.h>
99 #include <ufs/lfs/lfs_extern.h>
100
101 extern int count_lock_queue __P((void));
102 extern struct simplelock vnode_free_list_slock; /* XXX */
103 extern TAILQ_HEAD(freelst, vnode) vnode_free_list; /* XXX */
104
105 /*
106 * Determine if it's OK to start a partial in this segment, or if we need
107 * to go on to a new segment.
108 */
109 #define LFS_PARTIAL_FITS(fs) \
110 ((fs)->lfs_dbpseg - ((fs)->lfs_offset - (fs)->lfs_curseg) > \
111 1 << (fs)->lfs_fsbtodb)
112
113 void lfs_callback __P((struct buf *));
114 int lfs_gather __P((struct lfs *, struct segment *,
115 struct vnode *, int (*) __P((struct lfs *, struct buf *))));
116 int lfs_gatherblock __P((struct segment *, struct buf *, int *));
117 void lfs_iset __P((struct inode *, ufs_daddr_t, time_t));
118 int lfs_match_fake __P((struct lfs *, struct buf *));
119 int lfs_match_data __P((struct lfs *, struct buf *));
120 int lfs_match_dindir __P((struct lfs *, struct buf *));
121 int lfs_match_indir __P((struct lfs *, struct buf *));
122 int lfs_match_tindir __P((struct lfs *, struct buf *));
123 void lfs_newseg __P((struct lfs *));
124 void lfs_shellsort __P((struct buf **, ufs_daddr_t *, register int));
125 void lfs_supercallback __P((struct buf *));
126 void lfs_updatemeta __P((struct segment *));
127 int lfs_vref __P((struct vnode *));
128 void lfs_vunref __P((struct vnode *));
129 void lfs_writefile __P((struct lfs *, struct segment *, struct vnode *));
130 int lfs_writeinode __P((struct lfs *, struct segment *, struct inode *));
131 int lfs_writeseg __P((struct lfs *, struct segment *));
132 void lfs_writesuper __P((struct lfs *, daddr_t));
133 int lfs_writevnodes __P((struct lfs *fs, struct mount *mp,
134 struct segment *sp, int dirops));
135
136 int lfs_allclean_wakeup; /* Cleaner wakeup address. */
137 int lfs_writeindir = 1; /* whether to flush indir on non-ckp */
138 int lfs_clean_vnhead = 0; /* Allow freeing to head of vn list */
139
140 /* Statistics Counters */
141 int lfs_dostats = 1;
142 struct lfs_stats lfs_stats;
143
144 /* op values to lfs_writevnodes */
145 #define VN_REG 0
146 #define VN_DIROP 1
147 #define VN_EMPTY 2
148 #define VN_CLEAN 3
149
150 #define LFS_MAX_ACTIVE 10
151
152 /*
153 * XXX KS - Set modification time on the Ifile, so the cleaner can
154 * read the fs mod time off of it. We don't set IN_UPDATE here,
155 * since we don't really need this to be flushed to disk (and in any
156 * case that wouldn't happen to the Ifile until we checkpoint).
157 */
158 void
159 lfs_imtime(fs)
160 struct lfs *fs;
161 {
162 struct timespec ts;
163 struct inode *ip;
164
165 TIMEVAL_TO_TIMESPEC(&time, &ts);
166 ip = VTOI(fs->lfs_ivnode);
167 ip->i_ffs_mtime = ts.tv_sec;
168 ip->i_ffs_mtimensec = ts.tv_nsec;
169 }
170
171 /*
172 * Ifile and meta data blocks are not marked busy, so segment writes MUST be
173 * single threaded. Currently, there are two paths into lfs_segwrite, sync()
174 * and getnewbuf(). They both mark the file system busy. Lfs_vflush()
175 * explicitly marks the file system busy. So lfs_segwrite is safe. I think.
176 */
177
178 #define SET_FLUSHING(fs,vp) (fs)->lfs_flushvp = (vp)
179 #define IS_FLUSHING(fs,vp) ((fs)->lfs_flushvp == (vp))
180 #define CLR_FLUSHING(fs,vp) (fs)->lfs_flushvp = NULL
181
182 int
183 lfs_vflush(vp)
184 struct vnode *vp;
185 {
186 struct inode *ip;
187 struct lfs *fs;
188 struct segment *sp;
189 int error;
190
191 ip = VTOI(vp);
192 fs = VFSTOUFS(vp->v_mount)->um_lfs;
193
194 if(ip->i_flag & IN_CLEANING) {
195 #ifdef DEBUG_LFS
196 ivndebug(vp,"vflush/in_cleaning");
197 #endif
198 ip->i_flag &= ~IN_CLEANING;
199 if(ip->i_flag & IN_MODIFIED) {
200 fs->lfs_uinodes--;
201 } else
202 ip->i_flag |= IN_MODIFIED;
203 }
204
205 /* If the node is being written, wait until that is done */
206 if(WRITEINPROG(vp)) {
207 #ifdef DEBUG_LFS
208 ivndebug(vp,"vflush/writeinprog");
209 #endif
210 tsleep(vp, PRIBIO+1, "lfs_vw", 0);
211 }
212
213 /* Protect against VXLOCK deadlock in vinvalbuf() */
214 lfs_seglock(fs, SEGM_SYNC);
215 SET_FLUSHING(fs,vp);
216 if (fs->lfs_nactive > LFS_MAX_ACTIVE) {
217 error = lfs_segwrite(vp->v_mount, SEGM_SYNC|SEGM_CKP);
218 CLR_FLUSHING(fs,vp);
219 lfs_segunlock(fs);
220 return error;
221 }
222 sp = fs->lfs_sp;
223
224 if (vp->v_dirtyblkhd.lh_first == NULL) {
225 lfs_writevnodes(fs, vp->v_mount, sp, VN_EMPTY);
226 } else if((ip->i_flag & IN_CLEANING) && (fs->lfs_sp->seg_flags & SEGM_CLEAN)) {
227 #ifdef DEBUG_LFS
228 ivndebug(vp,"vflush/clean");
229 #endif
230 lfs_writevnodes(fs, vp->v_mount, sp, VN_CLEAN);
231 }
232 else if(lfs_dostats) {
233 if(vp->v_dirtyblkhd.lh_first || (VTOI(vp)->i_flag & (IN_MODIFIED|IN_UPDATE|IN_ACCESS|IN_CHANGE|IN_CLEANING)))
234 ++lfs_stats.vflush_invoked;
235 #ifdef DEBUG_LFS
236 ivndebug(vp,"vflush");
237 #endif
238 }
239
240 #ifdef DIAGNOSTIC
241 /* XXX KS This actually can happen right now, though it shouldn't(?) */
242 if(vp->v_flag & VDIROP) {
243 printf("lfs_vflush: flushing VDIROP, this shouldn\'t be\n");
244 /* panic("VDIROP being flushed...this can\'t happen"); */
245 }
246 if(vp->v_usecount<0) {
247 printf("usecount=%d\n",vp->v_usecount);
248 panic("lfs_vflush: usecount<0");
249 }
250 #endif
251
252 do {
253 do {
254 if (vp->v_dirtyblkhd.lh_first != NULL)
255 lfs_writefile(fs, sp, vp);
256 } while (lfs_writeinode(fs, sp, ip));
257 } while (lfs_writeseg(fs, sp) && ip->i_number == LFS_IFILE_INUM);
258
259 if(lfs_dostats) {
260 ++lfs_stats.nwrites;
261 if (sp->seg_flags & SEGM_SYNC)
262 ++lfs_stats.nsync_writes;
263 if (sp->seg_flags & SEGM_CKP)
264 ++lfs_stats.ncheckpoints;
265 }
266 lfs_segunlock(fs);
267
268 CLR_FLUSHING(fs,vp);
269 return (0);
270 }
271
272 #ifdef DEBUG_LFS_VERBOSE
273 # define vndebug(vp,str) if(VTOI(vp)->i_flag & IN_CLEANING) printf("not writing ino %d because %s (op %d)\n",VTOI(vp)->i_number,(str),op)
274 #else
275 # define vndebug(vp,str)
276 #endif
277
278 int
279 lfs_writevnodes(fs, mp, sp, op)
280 struct lfs *fs;
281 struct mount *mp;
282 struct segment *sp;
283 int op;
284 {
285 struct inode *ip;
286 struct vnode *vp;
287 int inodes_written=0, only_cleaning;
288
289 #ifndef LFS_NO_BACKVP_HACK
290 /* BEGIN HACK */
291 #define VN_OFFSET (((caddr_t)&vp->v_mntvnodes.le_next) - (caddr_t)vp)
292 #define BACK_VP(VP) ((struct vnode *)(((caddr_t)VP->v_mntvnodes.le_prev) - VN_OFFSET))
293 #define BEG_OF_VLIST ((struct vnode *)(((caddr_t)&mp->mnt_vnodelist.lh_first) - VN_OFFSET))
294
295 /* Find last vnode. */
296 loop: for (vp = mp->mnt_vnodelist.lh_first;
297 vp && vp->v_mntvnodes.le_next != NULL;
298 vp = vp->v_mntvnodes.le_next);
299 for (; vp && vp != BEG_OF_VLIST; vp = BACK_VP(vp)) {
300 #else
301 loop:
302 for (vp = mp->mnt_vnodelist.lh_first;
303 vp != NULL;
304 vp = vp->v_mntvnodes.le_next) {
305 #endif
306 /*
307 * If the vnode that we are about to sync is no longer
308 * associated with this mount point, start over.
309 */
310 if (vp->v_mount != mp)
311 goto loop;
312
313 ip = VTOI(vp);
314 if ((op == VN_DIROP && !(vp->v_flag & VDIROP)) ||
315 (op != VN_DIROP && op != VN_CLEAN && (vp->v_flag & VDIROP))) {
316 vndebug(vp,"dirop");
317 continue;
318 }
319
320 if (op == VN_EMPTY && vp->v_dirtyblkhd.lh_first) {
321 vndebug(vp,"empty");
322 continue;
323 }
324
325 if (vp->v_type == VNON) {
326 continue;
327 }
328
329 if(op == VN_CLEAN && ip->i_number != LFS_IFILE_INUM
330 && !(ip->i_flag & IN_CLEANING)) {
331 vndebug(vp,"cleaning");
332 continue;
333 }
334
335 if (lfs_vref(vp)) {
336 vndebug(vp,"vref");
337 continue;
338 }
339
340 #if 0 /* XXX KS - if we skip the ifile, things could go badly for us. */
341 if(WRITEINPROG(vp)) {
342 lfs_vunref(vp);
343 #ifdef DEBUG_LFS
344 ivndebug(vp,"writevnodes/writeinprog");
345 #endif
346 continue;
347 }
348 #endif
349 only_cleaning = 0;
350 /*
351 * Write the inode/file if dirty and it's not the
352 * the IFILE.
353 */
354 if ((ip->i_flag &
355 (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE | IN_CLEANING) ||
356 vp->v_dirtyblkhd.lh_first != NULL))
357 {
358 only_cleaning = ((ip->i_flag & (IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE|IN_CLEANING))==IN_CLEANING);
359
360 if(ip->i_number != LFS_IFILE_INUM
361 && vp->v_dirtyblkhd.lh_first != NULL)
362 {
363 lfs_writefile(fs, sp, vp);
364 }
365 if(vp->v_dirtyblkhd.lh_first != NULL) {
366 if(WRITEINPROG(vp)) {
367 #ifdef DEBUG_LFS
368 ivndebug(vp,"writevnodes/write2");
369 #endif
370 } else if(!(ip->i_flag & (IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE|IN_CLEANING))) {
371 #ifdef DEBUG_LFS
372 printf("<%d>",ip->i_number);
373 #endif
374 ip->i_flag |= IN_MODIFIED;
375 ++fs->lfs_uinodes;
376 }
377 }
378 (void) lfs_writeinode(fs, sp, ip);
379 inodes_written++;
380 }
381
382 if(vp->v_flag & VDIROP) {
383 --fs->lfs_dirvcount;
384 vp->v_flag &= ~VDIROP;
385 wakeup(&fs->lfs_dirvcount);
386 lfs_vunref(vp);
387 }
388
389 if(lfs_clean_vnhead && only_cleaning)
390 lfs_vunref_head(vp);
391 else
392 lfs_vunref(vp);
393 }
394 return inodes_written;
395 }
396
397 int
398 lfs_segwrite(mp, flags)
399 struct mount *mp;
400 int flags; /* Do a checkpoint. */
401 {
402 struct buf *bp;
403 struct inode *ip;
404 struct lfs *fs;
405 struct segment *sp;
406 struct vnode *vp;
407 SEGUSE *segusep;
408 ufs_daddr_t ibno;
409 int do_ckp, error, i;
410 int writer_set = 0;
411 int need_unlock = 0;
412
413 fs = VFSTOUFS(mp)->um_lfs;
414
415 lfs_imtime(fs);
416
417 /*
418 * If we are not the cleaner, and we have fewer than MIN_FREE_SEGS
419 * clean segments, wait until cleaner writes.
420 */
421 if(!(flags & SEGM_CLEAN)
422 && (!fs->lfs_seglock || !(fs->lfs_sp->seg_flags & SEGM_CLEAN)))
423 {
424 do {
425 if (fs->lfs_nclean <= MIN_FREE_SEGS
426 || fs->lfs_avail <= 0)
427 {
428 wakeup(&lfs_allclean_wakeup);
429 wakeup(&fs->lfs_nextseg);
430 error = tsleep(&fs->lfs_avail, PRIBIO + 1,
431 "lfs_avail", 0);
432 if (error) {
433 return (error);
434 }
435 }
436 } while (fs->lfs_nclean <= MIN_FREE_SEGS || fs->lfs_avail <= 0);
437 }
438
439 /*
440 * Allocate a segment structure and enough space to hold pointers to
441 * the maximum possible number of buffers which can be described in a
442 * single summary block.
443 */
444 do_ckp = (flags & SEGM_CKP) || fs->lfs_nactive > LFS_MAX_ACTIVE;
445 lfs_seglock(fs, flags | (do_ckp ? SEGM_CKP : 0));
446 sp = fs->lfs_sp;
447
448 /*
449 * If lfs_flushvp is non-NULL, we are called from lfs_vflush,
450 * in which case we have to flush *all* buffers off of this vnode.
451 */
452 if((sp->seg_flags & SEGM_CLEAN) && !(fs->lfs_flushvp))
453 lfs_writevnodes(fs, mp, sp, VN_CLEAN);
454 else {
455 lfs_writevnodes(fs, mp, sp, VN_REG);
456 /*
457 * XXX KS - If we're cleaning, we can't wait for dirops,
458 * because they might be waiting on us. The downside of this
459 * is that, if we write anything besides cleaning blocks
460 * while cleaning, the checkpoint is not completely
461 * consistent.
462 */
463 if(!(sp->seg_flags & SEGM_CLEAN)) {
464 while(fs->lfs_dirops)
465 if((error = tsleep(&fs->lfs_writer, PRIBIO + 1,
466 "lfs writer", 0)))
467 {
468 free(sp->bpp, M_SEGMENT);
469 free(sp, M_SEGMENT);
470 return (error);
471 }
472 fs->lfs_writer++;
473 writer_set=1;
474 lfs_writevnodes(fs, mp, sp, VN_DIROP);
475 ((SEGSUM *)(sp->segsum))->ss_flags &= ~(SS_CONT);
476 }
477 }
478
479 /*
480 * If we are doing a checkpoint, mark everything since the
481 * last checkpoint as no longer ACTIVE.
482 */
483 if (do_ckp) {
484 for (ibno = fs->lfs_cleansz + fs->lfs_segtabsz;
485 --ibno >= fs->lfs_cleansz; ) {
486 if (bread(fs->lfs_ivnode, ibno, fs->lfs_bsize, NOCRED, &bp))
487
488 panic("lfs_segwrite: ifile read");
489 segusep = (SEGUSE *)bp->b_data;
490 for (i = fs->lfs_sepb; i--; segusep++)
491 segusep->su_flags &= ~SEGUSE_ACTIVE;
492
493 /* But the current segment is still ACTIVE */
494 if (fs->lfs_curseg/fs->lfs_sepb==(ibno-fs->lfs_cleansz))
495 ((SEGUSE *)(bp->b_data))[fs->lfs_curseg%fs->lfs_sepb].su_flags |= SEGUSE_ACTIVE;
496 error = VOP_BWRITE(bp);
497 }
498 }
499
500 if (do_ckp || fs->lfs_doifile) {
501 redo:
502 vp = fs->lfs_ivnode;
503 /*
504 * Depending on the circumstances of our calling, the ifile
505 * inode might be locked. If it is, and if it is locked by
506 * us, we should VREF instead of vget here.
507 */
508 need_unlock = 0;
509 if(VOP_ISLOCKED(vp)
510 && VTOI(vp)->i_lock.lk_lockholder == curproc->p_pid) {
511 VREF(vp);
512 } else {
513 while (vget(vp, LK_EXCLUSIVE))
514 continue;
515 need_unlock = 1;
516 }
517 ip = VTOI(vp);
518 if (vp->v_dirtyblkhd.lh_first != NULL)
519 lfs_writefile(fs, sp, vp);
520 (void)lfs_writeinode(fs, sp, ip);
521
522 /* Only vput if we used vget() above. */
523 if(need_unlock)
524 vput(vp);
525 else
526 vrele(vp);
527
528 if (lfs_writeseg(fs, sp) && do_ckp)
529 goto redo;
530 } else {
531 (void) lfs_writeseg(fs, sp);
532 }
533
534 /*
535 * If the I/O count is non-zero, sleep until it reaches zero.
536 * At the moment, the user's process hangs around so we can
537 * sleep.
538 */
539 fs->lfs_doifile = 0;
540 if(writer_set && --fs->lfs_writer==0)
541 wakeup(&fs->lfs_dirops);
542
543 if(lfs_dostats) {
544 ++lfs_stats.nwrites;
545 if (sp->seg_flags & SEGM_SYNC)
546 ++lfs_stats.nsync_writes;
547 if (sp->seg_flags & SEGM_CKP)
548 ++lfs_stats.ncheckpoints;
549 }
550 lfs_segunlock(fs);
551 return (0);
552 }
553
554 /*
555 * Write the dirty blocks associated with a vnode.
556 */
557 void
558 lfs_writefile(fs, sp, vp)
559 struct lfs *fs;
560 struct segment *sp;
561 struct vnode *vp;
562 {
563 struct buf *bp;
564 struct finfo *fip;
565 IFILE *ifp;
566
567
568 if (sp->seg_bytes_left < fs->lfs_bsize ||
569 sp->sum_bytes_left < sizeof(struct finfo))
570 (void) lfs_writeseg(fs, sp);
571
572 sp->sum_bytes_left -= sizeof(struct finfo) - sizeof(ufs_daddr_t);
573 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
574
575 if(vp->v_flag & VDIROP)
576 ((SEGSUM *)(sp->segsum))->ss_flags |= (SS_DIROP|SS_CONT);
577
578 fip = sp->fip;
579 fip->fi_nblocks = 0;
580 fip->fi_ino = VTOI(vp)->i_number;
581 LFS_IENTRY(ifp, fs, fip->fi_ino, bp);
582 fip->fi_version = ifp->if_version;
583 brelse(bp);
584
585 /*
586 * It may not be necessary to write the meta-data blocks at this point,
587 * as the roll-forward recovery code should be able to reconstruct the
588 * list.
589 *
590 * We have to write them anyway, though, under two conditions: (1) the
591 * vnode is being flushed (for reuse by vinvalbuf); or (2) we are
592 * checkpointing.
593 */
594 if((sp->seg_flags & SEGM_CLEAN)
595 && VTOI(vp)->i_number != LFS_IFILE_INUM
596 && !IS_FLUSHING(fs,vp))
597 {
598 lfs_gather(fs, sp, vp, lfs_match_fake);
599 } else
600 lfs_gather(fs, sp, vp, lfs_match_data);
601
602 if(lfs_writeindir
603 || IS_FLUSHING(fs,vp)
604 || (sp->seg_flags & SEGM_CKP))
605 {
606 lfs_gather(fs, sp, vp, lfs_match_indir);
607 lfs_gather(fs, sp, vp, lfs_match_dindir);
608 /* XXX KS - when is TRIPLE not true? */ /* #ifdef TRIPLE */
609 lfs_gather(fs, sp, vp, lfs_match_tindir);
610 /* #endif */
611 }
612 fip = sp->fip;
613 if (fip->fi_nblocks != 0) {
614 sp->fip = (FINFO*)((caddr_t)fip + sizeof(struct finfo) +
615 sizeof(ufs_daddr_t) * (fip->fi_nblocks-1));
616 sp->start_lbp = &sp->fip->fi_blocks[0];
617 } else {
618 sp->sum_bytes_left += sizeof(FINFO) - sizeof(ufs_daddr_t);
619 --((SEGSUM *)(sp->segsum))->ss_nfinfo;
620 }
621 }
622
623 int
624 lfs_writeinode(fs, sp, ip)
625 struct lfs *fs;
626 struct segment *sp;
627 struct inode *ip;
628 {
629 struct buf *bp, *ibp;
630 IFILE *ifp;
631 SEGUSE *sup;
632 ufs_daddr_t daddr;
633 ino_t ino;
634 int error, i, ndx;
635 int redo_ifile = 0;
636 struct timespec ts;
637 int gotblk=0;
638
639 if (!(ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE | IN_CLEANING)))
640 return(0);
641
642 /* Allocate a new inode block if necessary. */
643 if (sp->ibp == NULL) {
644 /* Allocate a new segment if necessary. */
645 if (sp->seg_bytes_left < fs->lfs_bsize ||
646 sp->sum_bytes_left < sizeof(ufs_daddr_t))
647 (void) lfs_writeseg(fs, sp);
648
649 /* Get next inode block. */
650 daddr = fs->lfs_offset;
651 fs->lfs_offset += fsbtodb(fs, 1);
652 sp->ibp = *sp->cbpp++ =
653 getblk(VTOI(fs->lfs_ivnode)->i_devvp, daddr, fs->lfs_bsize, 0, 0);
654 gotblk++;
655
656 /* Zero out inode numbers */
657 for (i = 0; i < INOPB(fs); ++i)
658 ((struct dinode *)sp->ibp->b_data)[i].di_inumber = 0;
659
660 ++sp->start_bpp;
661 fs->lfs_avail -= fsbtodb(fs, 1);
662 /* Set remaining space counters. */
663 sp->seg_bytes_left -= fs->lfs_bsize;
664 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
665 ndx = LFS_SUMMARY_SIZE / sizeof(ufs_daddr_t) -
666 sp->ninodes / INOPB(fs) - 1;
667 ((ufs_daddr_t *)(sp->segsum))[ndx] = daddr;
668 }
669
670 /* Update the inode times and copy the inode onto the inode page. */
671 if (ip->i_flag & (IN_CLEANING|IN_MODIFIED))
672 --fs->lfs_uinodes;
673 TIMEVAL_TO_TIMESPEC(&time, &ts);
674 LFS_ITIMES(ip, &ts, &ts, &ts);
675
676 if(ip->i_flag & IN_CLEANING)
677 ip->i_flag &= ~IN_CLEANING;
678 else
679 ip->i_flag &= ~(IN_ACCESS|IN_CHANGE|IN_MODIFIED|IN_UPDATE);
680
681 bp = sp->ibp;
682 ((struct dinode *)bp->b_data)[sp->ninodes % INOPB(fs)] =
683 ip->i_din.ffs_din;
684 if(gotblk) {
685 bp->b_flags |= B_LOCKED;
686 brelse(bp);
687 }
688
689 /* Increment inode count in segment summary block. */
690 ++((SEGSUM *)(sp->segsum))->ss_ninos;
691
692 /* If this page is full, set flag to allocate a new page. */
693 if (++sp->ninodes % INOPB(fs) == 0)
694 sp->ibp = NULL;
695
696 /*
697 * If updating the ifile, update the super-block. Update the disk
698 * address and access times for this inode in the ifile.
699 */
700 ino = ip->i_number;
701 if (ino == LFS_IFILE_INUM) {
702 daddr = fs->lfs_idaddr;
703 fs->lfs_idaddr = bp->b_blkno;
704 } else {
705 LFS_IENTRY(ifp, fs, ino, ibp);
706 daddr = ifp->if_daddr;
707 ifp->if_daddr = bp->b_blkno;
708 error = VOP_BWRITE(ibp);
709 }
710
711 /*
712 * No need to update segment usage if there was no former inode address
713 * or if the last inode address is in the current partial segment.
714 */
715 if (daddr != LFS_UNUSED_DADDR &&
716 !(daddr >= fs->lfs_lastpseg && daddr <= bp->b_blkno)) {
717 LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
718 #ifdef DIAGNOSTIC
719 if (sup->su_nbytes < DINODE_SIZE) {
720 /* XXX -- Change to a panic. */
721 printf("lfs_writeinode: negative bytes (segment %d)\n",
722 datosn(fs, daddr));
723 panic("negative bytes");
724 }
725 #endif
726 sup->su_nbytes -= DINODE_SIZE;
727 redo_ifile =
728 (ino == LFS_IFILE_INUM && !(bp->b_flags & B_GATHERED));
729 error = VOP_BWRITE(bp);
730 }
731 return (redo_ifile);
732 }
733
734 int
735 lfs_gatherblock(sp, bp, sptr)
736 struct segment *sp;
737 struct buf *bp;
738 int *sptr;
739 {
740 struct lfs *fs;
741 int version;
742
743 /*
744 * If full, finish this segment. We may be doing I/O, so
745 * release and reacquire the splbio().
746 */
747 #ifdef DIAGNOSTIC
748 if (sp->vp == NULL)
749 panic ("lfs_gatherblock: Null vp in segment");
750 #endif
751 fs = sp->fs;
752 if (sp->sum_bytes_left < sizeof(ufs_daddr_t) ||
753 sp->seg_bytes_left < bp->b_bcount) {
754 if (sptr)
755 splx(*sptr);
756 lfs_updatemeta(sp);
757
758 version = sp->fip->fi_version;
759 (void) lfs_writeseg(fs, sp);
760
761 sp->fip->fi_version = version;
762 sp->fip->fi_ino = VTOI(sp->vp)->i_number;
763 /* Add the current file to the segment summary. */
764 ++((SEGSUM *)(sp->segsum))->ss_nfinfo;
765 sp->sum_bytes_left -=
766 sizeof(struct finfo) - sizeof(ufs_daddr_t);
767
768 if (sptr)
769 *sptr = splbio();
770 return(1);
771 }
772
773 #ifdef DEBUG
774 if(bp->b_flags & B_GATHERED) {
775 printf("lfs_gatherblock: already gathered! Ino %d, lbn %d\n",
776 sp->fip->fi_ino, bp->b_lblkno);
777 return(0);
778 }
779 #endif
780 /* Insert into the buffer list, update the FINFO block. */
781 bp->b_flags |= B_GATHERED;
782 *sp->cbpp++ = bp;
783 sp->fip->fi_blocks[sp->fip->fi_nblocks++] = bp->b_lblkno;
784
785 sp->sum_bytes_left -= sizeof(ufs_daddr_t);
786 sp->seg_bytes_left -= bp->b_bcount;
787 return(0);
788 }
789
790 int
791 lfs_gather(fs, sp, vp, match)
792 struct lfs *fs;
793 struct segment *sp;
794 struct vnode *vp;
795 int (*match) __P((struct lfs *, struct buf *));
796 {
797 struct buf *bp;
798 int s, count=0;
799
800 sp->vp = vp;
801 s = splbio();
802
803 #ifndef LFS_NO_BACKBUF_HACK
804 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp; bp = bp->b_vnbufs.le_next) {
805 #else /* LFS_NO_BACKBUF_HACK */
806 /* This is a hack to see if ordering the blocks in LFS makes a difference. */
807 # define BUF_OFFSET (((void *)&bp->b_vnbufs.le_next) - (void *)bp)
808 # define BACK_BUF(BP) ((struct buf *)(((void *)BP->b_vnbufs.le_prev) - BUF_OFFSET))
809 # define BEG_OF_LIST ((struct buf *)(((void *)&vp->v_dirtyblkhd.lh_first) - BUF_OFFSET))
810 /* Find last buffer. */
811 loop: for (bp = vp->v_dirtyblkhd.lh_first; bp && bp->b_vnbufs.le_next != NULL;
812 bp = bp->b_vnbufs.le_next);
813 for (; bp && bp != BEG_OF_LIST; bp = BACK_BUF(bp)) {
814 #endif /* LFS_NO_BACKBUF_HACK */
815 if ((bp->b_flags & (B_BUSY|B_GATHERED)) || !match(fs, bp))
816 continue;
817 #ifdef DIAGNOSTIC
818 if (!(bp->b_flags & B_DELWRI))
819 panic("lfs_gather: bp not B_DELWRI");
820 if (!(bp->b_flags & B_LOCKED))
821 panic("lfs_gather: bp not B_LOCKED");
822 #endif
823 count++;
824 if (lfs_gatherblock(sp, bp, &s)) {
825 goto loop;
826 }
827 }
828 splx(s);
829 lfs_updatemeta(sp);
830 sp->vp = NULL;
831 return count;
832 }
833
834 /*
835 * Update the metadata that points to the blocks listed in the FINFO
836 * array.
837 */
838 void
839 lfs_updatemeta(sp)
840 struct segment *sp;
841 {
842 SEGUSE *sup;
843 struct buf *bp;
844 struct lfs *fs;
845 struct vnode *vp;
846 struct indir a[NIADDR + 2], *ap;
847 struct inode *ip;
848 ufs_daddr_t daddr, lbn, off;
849 int error, i, nblocks, num;
850
851 vp = sp->vp;
852 nblocks = &sp->fip->fi_blocks[sp->fip->fi_nblocks] - sp->start_lbp;
853 if (nblocks < 0)
854 panic("This is a bad thing\n");
855 if (vp == NULL || nblocks == 0)
856 return;
857
858 /* Sort the blocks. */
859 /*
860 * XXX KS - We have to sort even if the blocks come from the
861 * cleaner, because there might be other pending blocks on the
862 * same inode...and if we don't sort, and there are fragments
863 * present, blocks may be written in the wrong place.
864 */
865 /* if (!(sp->seg_flags & SEGM_CLEAN)) */
866 lfs_shellsort(sp->start_bpp, sp->start_lbp, nblocks);
867
868 /*
869 * Record the length of the last block in case it's a fragment.
870 * If there are indirect blocks present, they sort last. An
871 * indirect block will be lfs_bsize and its presence indicates
872 * that you cannot have fragments.
873 */
874 sp->fip->fi_lastlength = sp->start_bpp[nblocks - 1]->b_bcount;
875
876 /*
877 * Assign disk addresses, and update references to the logical
878 * block and the segment usage information.
879 */
880 fs = sp->fs;
881 for (i = nblocks; i--; ++sp->start_bpp) {
882 lbn = *sp->start_lbp++;
883
884 (*sp->start_bpp)->b_blkno = off = fs->lfs_offset;
885 if((*sp->start_bpp)->b_blkno == (*sp->start_bpp)->b_lblkno) {
886 printf("lfs_updatemeta: ino %d blk %d has same lbn and daddr\n", VTOI(vp)->i_number, off);
887 }
888 fs->lfs_offset +=
889 fragstodb(fs, numfrags(fs, (*sp->start_bpp)->b_bcount));
890
891 error = ufs_bmaparray(vp, lbn, &daddr, a, &num, NULL);
892 if (error)
893 panic("lfs_updatemeta: ufs_bmaparray %d", error);
894 ip = VTOI(vp);
895 switch (num) {
896 case 0:
897 ip->i_ffs_db[lbn] = off;
898 break;
899 case 1:
900 ip->i_ffs_ib[a[0].in_off] = off;
901 break;
902 default:
903 ap = &a[num - 1];
904 if (bread(vp, ap->in_lbn, fs->lfs_bsize, NOCRED, &bp))
905 panic("lfs_updatemeta: bread bno %d",
906 ap->in_lbn);
907 /*
908 * Bread may create a new (indirect) block which needs
909 * to get counted for the inode.
910 */
911 if (/* bp->b_blkno == -1 && */
912 !(bp->b_flags & (B_DELWRI|B_DONE))) {
913 ip->i_ffs_blocks += fsbtodb(fs, 1);
914 fs->lfs_bfree -= fragstodb(fs, fs->lfs_frag);
915 }
916 ((ufs_daddr_t *)bp->b_data)[ap->in_off] = off;
917 VOP_BWRITE(bp);
918 }
919 /* Update segment usage information. */
920 if (daddr != UNASSIGNED &&
921 !(daddr >= fs->lfs_lastpseg && daddr <= off)) {
922 LFS_SEGENTRY(sup, fs, datosn(fs, daddr), bp);
923 #ifdef DIAGNOSTIC
924 if (sup->su_nbytes < (*sp->start_bpp)->b_bcount) {
925 /* XXX -- Change to a panic. */
926 printf("lfs_updatemeta: negative bytes (segment %d)\n",
927 datosn(fs, daddr));
928 printf("lfs_updatemeta: bp = 0x%p, addr = 0x%p\n",
929 bp, bp->b_un.b_addr);
930 /* panic ("Negative Bytes"); */
931 }
932 #endif
933 sup->su_nbytes -= (*sp->start_bpp)->b_bcount;
934 error = VOP_BWRITE(bp);
935 }
936 }
937 }
938
939 /*
940 * Start a new segment.
941 */
942 int
943 lfs_initseg(fs)
944 struct lfs *fs;
945 {
946 struct segment *sp;
947 SEGUSE *sup;
948 SEGSUM *ssp;
949 struct buf *bp;
950 int repeat;
951
952 sp = fs->lfs_sp;
953
954 repeat = 0;
955 /* Advance to the next segment. */
956 if (!LFS_PARTIAL_FITS(fs)) {
957 /* Wake up any cleaning procs waiting on this file system. */
958 wakeup(&lfs_allclean_wakeup);
959 wakeup(&fs->lfs_nextseg);
960 lfs_newseg(fs);
961 repeat = 1;
962 fs->lfs_offset = fs->lfs_curseg;
963 sp->seg_number = datosn(fs, fs->lfs_curseg);
964 sp->seg_bytes_left = fs->lfs_dbpseg * DEV_BSIZE;
965 /*
966 * If the segment contains a superblock, update the offset
967 * and summary address to skip over it.
968 */
969 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
970 if (sup->su_flags & SEGUSE_SUPERBLOCK) {
971 fs->lfs_offset += LFS_SBPAD / DEV_BSIZE;
972 sp->seg_bytes_left -= LFS_SBPAD;
973 }
974 brelse(bp);
975 } else {
976 sp->seg_number = datosn(fs, fs->lfs_curseg);
977 sp->seg_bytes_left = (fs->lfs_dbpseg -
978 (fs->lfs_offset - fs->lfs_curseg)) * DEV_BSIZE;
979 }
980 fs->lfs_lastpseg = fs->lfs_offset;
981
982 sp->fs = fs;
983 sp->ibp = NULL;
984 sp->ninodes = 0;
985
986 /* Get a new buffer for SEGSUM and enter it into the buffer list. */
987 sp->cbpp = sp->bpp;
988 *sp->cbpp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp,
989 fs->lfs_offset, LFS_SUMMARY_SIZE);
990 sp->segsum = (*sp->cbpp)->b_data;
991 bzero(sp->segsum, LFS_SUMMARY_SIZE);
992 sp->start_bpp = ++sp->cbpp;
993 fs->lfs_offset += LFS_SUMMARY_SIZE / DEV_BSIZE;
994
995 /* Set point to SEGSUM, initialize it. */
996 ssp = sp->segsum;
997 ssp->ss_next = fs->lfs_nextseg;
998 ssp->ss_nfinfo = ssp->ss_ninos = 0;
999 ssp->ss_magic = SS_MAGIC;
1000
1001 /* Set pointer to first FINFO, initialize it. */
1002 sp->fip = (struct finfo *)((caddr_t)sp->segsum + sizeof(SEGSUM));
1003 sp->fip->fi_nblocks = 0;
1004 sp->start_lbp = &sp->fip->fi_blocks[0];
1005 sp->fip->fi_lastlength = 0;
1006
1007 sp->seg_bytes_left -= LFS_SUMMARY_SIZE;
1008 sp->sum_bytes_left = LFS_SUMMARY_SIZE - sizeof(SEGSUM);
1009
1010 return(repeat);
1011 }
1012
1013 /*
1014 * Return the next segment to write.
1015 */
1016 void
1017 lfs_newseg(fs)
1018 struct lfs *fs;
1019 {
1020 CLEANERINFO *cip;
1021 SEGUSE *sup;
1022 struct buf *bp;
1023 int curseg, isdirty, sn;
1024
1025 LFS_SEGENTRY(sup, fs, datosn(fs, fs->lfs_nextseg), bp);
1026 sup->su_flags |= SEGUSE_DIRTY | SEGUSE_ACTIVE;
1027 sup->su_nbytes = 0;
1028 sup->su_nsums = 0;
1029 sup->su_ninos = 0;
1030 (void) VOP_BWRITE(bp);
1031
1032 LFS_CLEANERINFO(cip, fs, bp);
1033 --cip->clean;
1034 ++cip->dirty;
1035 fs->lfs_nclean = cip->clean;
1036 (void) VOP_BWRITE(bp);
1037
1038 fs->lfs_lastseg = fs->lfs_curseg;
1039 fs->lfs_curseg = fs->lfs_nextseg;
1040 for (sn = curseg = datosn(fs, fs->lfs_curseg);;) {
1041 sn = (sn + 1) % fs->lfs_nseg;
1042 if (sn == curseg)
1043 panic("lfs_nextseg: no clean segments");
1044 LFS_SEGENTRY(sup, fs, sn, bp);
1045 isdirty = sup->su_flags & SEGUSE_DIRTY;
1046 brelse(bp);
1047 if (!isdirty)
1048 break;
1049 }
1050
1051 ++fs->lfs_nactive;
1052 fs->lfs_nextseg = sntoda(fs, sn);
1053 if(lfs_dostats) {
1054 ++lfs_stats.segsused;
1055 }
1056 }
1057
1058 int
1059 lfs_writeseg(fs, sp)
1060 struct lfs *fs;
1061 struct segment *sp;
1062 {
1063 extern int locked_queue_count;
1064 extern long locked_queue_bytes;
1065 struct buf **bpp, *bp, *cbp;
1066 SEGUSE *sup;
1067 SEGSUM *ssp;
1068 dev_t i_dev;
1069 u_long *datap, *dp;
1070 int do_again, i, nblocks, s;
1071 #ifdef LFS_TRACK_IOS
1072 int j;
1073 #endif
1074 int (*strategy)__P((void *));
1075 struct vop_strategy_args vop_strategy_a;
1076 u_short ninos;
1077 struct vnode *devvp;
1078 char *p;
1079 struct vnode *vn;
1080 #if defined(DEBUG) && defined(LFS_PROPELLER)
1081 static int propeller;
1082 char propstring[4] = "-\\|/";
1083
1084 printf("%c\b",propstring[propeller++]);
1085 if(propeller==4)
1086 propeller = 0;
1087 #endif
1088
1089 /*
1090 * If there are no buffers other than the segment summary to write
1091 * and it is not a checkpoint, don't do anything. On a checkpoint,
1092 * even if there aren't any buffers, you need to write the superblock.
1093 */
1094 if ((nblocks = sp->cbpp - sp->bpp) == 1)
1095 return (0);
1096
1097 #ifdef DEBUG_LFS
1098 lfs_check_bpp(fs,sp,__FILE__,__LINE__);
1099 #endif
1100
1101 /* Update the segment usage information. */
1102 LFS_SEGENTRY(sup, fs, sp->seg_number, bp);
1103
1104 /* Loop through all blocks, except the segment summary. */
1105 for (bpp = sp->bpp; ++bpp < sp->cbpp; )
1106 sup->su_nbytes += (*bpp)->b_bcount;
1107
1108 ssp = (SEGSUM *)sp->segsum;
1109
1110 ninos = (ssp->ss_ninos + INOPB(fs) - 1) / INOPB(fs);
1111 /* sup->su_nbytes += ssp->ss_ninos * DINODE_SIZE; */
1112 sup->su_nbytes += LFS_SUMMARY_SIZE;
1113 sup->su_lastmod = time.tv_sec;
1114 sup->su_ninos += ninos;
1115 ++sup->su_nsums;
1116
1117 do_again = !(bp->b_flags & B_GATHERED);
1118 (void)VOP_BWRITE(bp);
1119 /*
1120 * Compute checksum across data and then across summary; the first
1121 * block (the summary block) is skipped. Set the create time here
1122 * so that it's guaranteed to be later than the inode mod times.
1123 *
1124 * XXX
1125 * Fix this to do it inline, instead of malloc/copy.
1126 */
1127 datap = dp = malloc(nblocks * sizeof(u_long), M_SEGMENT, M_WAITOK);
1128 for (bpp = sp->bpp, i = nblocks - 1; i--;) {
1129 if (((*++bpp)->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1130 if (copyin((*bpp)->b_saveaddr, dp++, sizeof(u_long)))
1131 panic("lfs_writeseg: copyin failed [1]: ino %d blk %d", VTOI((*bpp)->b_vp)->i_number, (*bpp)->b_lblkno);
1132 } else {
1133 if( !((*bpp)->b_flags & B_CALL) ) {
1134 /*
1135 * Before we record data for a checksm,
1136 * make sure the data won't change in between
1137 * the checksum calculation and the write,
1138 * by marking the buffer B_BUSY. It will
1139 * be freed later by brelse().
1140 */
1141 again:
1142 s = splbio();
1143 if((*bpp)->b_flags & B_BUSY) {
1144 #ifdef DEBUG
1145 printf("lfs_writeseg: avoiding potential data summary corruption for ino %d, lbn %d\n",
1146 VTOI((*bpp)->b_vp)->i_number,
1147 bp->b_lblkno);
1148 #endif
1149 (*bpp)->b_flags |= B_WANTED;
1150 tsleep((*bpp), (PRIBIO + 1),
1151 "lfs_writeseg", 0);
1152 splx(s);
1153 goto again;
1154 }
1155 (*bpp)->b_flags |= B_BUSY;
1156 splx(s);
1157 }
1158 *dp++ = ((u_long *)(*bpp)->b_data)[0];
1159 }
1160 }
1161 ssp->ss_create = time.tv_sec;
1162 ssp->ss_datasum = cksum(datap, (nblocks - 1) * sizeof(u_long));
1163 ssp->ss_sumsum =
1164 cksum(&ssp->ss_datasum, LFS_SUMMARY_SIZE - sizeof(ssp->ss_sumsum));
1165 free(datap, M_SEGMENT);
1166 #ifdef DIAGNOSTIC
1167 if (fs->lfs_bfree < fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE)
1168 panic("lfs_writeseg: No diskspace for summary");
1169 #endif
1170 fs->lfs_bfree -= (fsbtodb(fs, ninos) + LFS_SUMMARY_SIZE / DEV_BSIZE);
1171
1172 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1173 devvp = VTOI(fs->lfs_ivnode)->i_devvp;
1174 strategy = devvp->v_op[VOFFSET(vop_strategy)];
1175
1176 /*
1177 * When we simply write the blocks we lose a rotation for every block
1178 * written. To avoid this problem, we allocate memory in chunks, copy
1179 * the buffers into the chunk and write the chunk. CHUNKSIZE is the
1180 * largest size I/O devices can handle.
1181 * When the data is copied to the chunk, turn off the the B_LOCKED bit
1182 * and brelse the buffer (which will move them to the LRU list). Add
1183 * the B_CALL flag to the buffer header so we can count I/O's for the
1184 * checkpoints and so we can release the allocated memory.
1185 *
1186 * XXX
1187 * This should be removed if the new virtual memory system allows us to
1188 * easily make the buffers contiguous in kernel memory and if that's
1189 * fast enough.
1190 */
1191
1192 #define CHUNKSIZE MAXPHYS
1193
1194 if(devvp==NULL)
1195 panic("devvp is NULL");
1196 for (bpp = sp->bpp,i = nblocks; i;) {
1197 cbp = lfs_newbuf(devvp, (*bpp)->b_blkno, CHUNKSIZE);
1198 cbp->b_dev = i_dev;
1199 cbp->b_flags |= B_ASYNC | B_BUSY;
1200 cbp->b_bcount = 0;
1201
1202 #ifdef DIAGNOSTIC
1203 if(datosn(fs,(*bpp)->b_blkno + ((*bpp)->b_bcount - 1)/DEV_BSIZE) != datosn(fs,cbp->b_blkno)) {
1204 panic("lfs_writeseg: Segment overwrite");
1205 }
1206 #endif
1207
1208 if(fs->lfs_iocount >= LFS_THROTTLE) {
1209 tsleep(&fs->lfs_iocount, PRIBIO+1, "lfs throttle", 0);
1210 }
1211 s = splbio();
1212 ++fs->lfs_iocount;
1213 #ifdef LFS_TRACK_IOS
1214 for(j=0;j<LFS_THROTTLE;j++) {
1215 if(fs->lfs_pending[j]==LFS_UNUSED_DADDR) {
1216 fs->lfs_pending[j] = cbp->b_blkno;
1217 break;
1218 }
1219 }
1220 #endif /* LFS_TRACK_IOS */
1221 for (p = cbp->b_data; i && cbp->b_bcount < CHUNKSIZE; i--) {
1222 bp = *bpp;
1223
1224 if (bp->b_bcount > (CHUNKSIZE - cbp->b_bcount))
1225 break;
1226
1227 /*
1228 * Fake buffers from the cleaner are marked as B_INVAL.
1229 * We need to copy the data from user space rather than
1230 * from the buffer indicated.
1231 * XXX == what do I do on an error?
1232 */
1233 if ((bp->b_flags & (B_CALL|B_INVAL)) == (B_CALL|B_INVAL)) {
1234 if (copyin(bp->b_saveaddr, p, bp->b_bcount))
1235 panic("lfs_writeseg: copyin failed [2]");
1236 } else
1237 bcopy(bp->b_data, p, bp->b_bcount);
1238 p += bp->b_bcount;
1239 cbp->b_bcount += bp->b_bcount;
1240 if (bp->b_flags & B_LOCKED) {
1241 --locked_queue_count;
1242 locked_queue_bytes -= bp->b_bufsize;
1243 }
1244 bp->b_flags &= ~(B_ERROR | B_READ | B_DELWRI |
1245 B_LOCKED | B_GATHERED);
1246 vn = bp->b_vp;
1247 if (bp->b_flags & B_CALL) {
1248 /* if B_CALL, it was created with newbuf */
1249 lfs_freebuf(bp);
1250 } else {
1251 bremfree(bp);
1252 bp->b_flags |= B_DONE;
1253 if(vn)
1254 reassignbuf(bp, vn);
1255 brelse(bp);
1256 }
1257 if(bp->b_flags & B_NEEDCOMMIT) { /* XXX */
1258 bp->b_flags &= ~B_NEEDCOMMIT;
1259 wakeup(bp);
1260 }
1261 /* if(vn->v_dirtyblkhd.lh_first == NULL) */
1262 wakeup(vn);
1263 bpp++;
1264 }
1265 ++cbp->b_vp->v_numoutput;
1266 splx(s);
1267 /*
1268 * XXXX This is a gross and disgusting hack. Since these
1269 * buffers are physically addressed, they hang off the
1270 * device vnode (devvp). As a result, they have no way
1271 * of getting to the LFS superblock or lfs structure to
1272 * keep track of the number of I/O's pending. So, I am
1273 * going to stuff the fs into the saveaddr field of
1274 * the buffer (yuk).
1275 */
1276 cbp->b_saveaddr = (caddr_t)fs;
1277 vop_strategy_a.a_desc = VDESC(vop_strategy);
1278 vop_strategy_a.a_bp = cbp;
1279 (strategy)(&vop_strategy_a);
1280 }
1281 /*
1282 * XXX
1283 * Vinvalbuf can move locked buffers off the locked queue
1284 * and we have no way of knowing about this. So, after
1285 * doing a big write, we recalculate how many buffers are
1286 * really still left on the locked queue.
1287 */
1288 lfs_countlocked(&locked_queue_count,&locked_queue_bytes);
1289 wakeup(&locked_queue_count);
1290 if(lfs_dostats) {
1291 ++lfs_stats.psegwrites;
1292 lfs_stats.blocktot += nblocks - 1;
1293 if (fs->lfs_sp->seg_flags & SEGM_SYNC)
1294 ++lfs_stats.psyncwrites;
1295 if (fs->lfs_sp->seg_flags & SEGM_CLEAN) {
1296 ++lfs_stats.pcleanwrites;
1297 lfs_stats.cleanblocks += nblocks - 1;
1298 }
1299 }
1300 return (lfs_initseg(fs) || do_again);
1301 }
1302
1303 void
1304 lfs_writesuper(fs, daddr)
1305 struct lfs *fs;
1306 daddr_t daddr;
1307 {
1308 struct buf *bp;
1309 dev_t i_dev;
1310 int (*strategy) __P((void *));
1311 int s;
1312 struct vop_strategy_args vop_strategy_a;
1313
1314 #ifdef LFS_CANNOT_ROLLFW
1315 /*
1316 * If we can write one superblock while another is in
1317 * progress, we risk not having a complete checkpoint if we crash.
1318 * So, block here if a superblock write is in progress.
1319 *
1320 * XXX - should be a proper lock, not this hack
1321 */
1322 while(fs->lfs_sbactive) {
1323 tsleep(&fs->lfs_sbactive, PRIBIO+1, "lfs sb", 0);
1324 }
1325 fs->lfs_sbactive = daddr;
1326 #endif
1327 i_dev = VTOI(fs->lfs_ivnode)->i_dev;
1328 strategy = VTOI(fs->lfs_ivnode)->i_devvp->v_op[VOFFSET(vop_strategy)];
1329
1330 /* Set timestamp of this version of the superblock */
1331 fs->lfs_tstamp = time.tv_sec;
1332
1333 /* Checksum the superblock and copy it into a buffer. */
1334 fs->lfs_cksum = lfs_sb_cksum(&(fs->lfs_dlfs));
1335 bp = lfs_newbuf(VTOI(fs->lfs_ivnode)->i_devvp, daddr, LFS_SBPAD);
1336 *(struct dlfs *)bp->b_data = fs->lfs_dlfs;
1337
1338 bp->b_dev = i_dev;
1339 bp->b_flags |= B_BUSY | B_CALL | B_ASYNC;
1340 bp->b_flags &= ~(B_DONE | B_ERROR | B_READ | B_DELWRI);
1341 bp->b_iodone = lfs_supercallback;
1342 /* XXX KS - same nasty hack as above */
1343 bp->b_saveaddr = (caddr_t)fs;
1344
1345 vop_strategy_a.a_desc = VDESC(vop_strategy);
1346 vop_strategy_a.a_bp = bp;
1347 s = splbio();
1348 ++bp->b_vp->v_numoutput;
1349 splx(s);
1350 (strategy)(&vop_strategy_a);
1351 }
1352
1353 /*
1354 * Logical block number match routines used when traversing the dirty block
1355 * chain.
1356 */
1357 int
1358 lfs_match_fake(fs, bp)
1359 struct lfs *fs;
1360 struct buf *bp;
1361 {
1362 return (bp->b_flags & B_CALL);
1363 }
1364
1365 int
1366 lfs_match_data(fs, bp)
1367 struct lfs *fs;
1368 struct buf *bp;
1369 {
1370 return (bp->b_lblkno >= 0);
1371 }
1372
1373 int
1374 lfs_match_indir(fs, bp)
1375 struct lfs *fs;
1376 struct buf *bp;
1377 {
1378 int lbn;
1379
1380 lbn = bp->b_lblkno;
1381 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 0);
1382 }
1383
1384 int
1385 lfs_match_dindir(fs, bp)
1386 struct lfs *fs;
1387 struct buf *bp;
1388 {
1389 int lbn;
1390
1391 lbn = bp->b_lblkno;
1392 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 1);
1393 }
1394
1395 int
1396 lfs_match_tindir(fs, bp)
1397 struct lfs *fs;
1398 struct buf *bp;
1399 {
1400 int lbn;
1401
1402 lbn = bp->b_lblkno;
1403 return (lbn < 0 && (-lbn - NDADDR) % NINDIR(fs) == 2);
1404 }
1405
1406 /*
1407 * XXX - The only buffers that are going to hit these functions are the
1408 * segment write blocks, or the segment summaries, or the superblocks.
1409 *
1410 * All of the above are created by lfs_newbuf, and so do not need to be
1411 * released via brelse.
1412 */
1413 void
1414 lfs_callback(bp)
1415 struct buf *bp;
1416 {
1417 struct lfs *fs;
1418 #ifdef LFS_TRACK_IOS
1419 int j;
1420 #endif
1421
1422 fs = (struct lfs *)bp->b_saveaddr;
1423 #ifdef DIAGNOSTIC
1424 if (fs->lfs_iocount == 0)
1425 panic("lfs_callback: zero iocount\n");
1426 #endif
1427 if (--fs->lfs_iocount < LFS_THROTTLE)
1428 wakeup(&fs->lfs_iocount);
1429 #ifdef LFS_TRACK_IOS
1430 for(j=0;j<LFS_THROTTLE;j++) {
1431 if(fs->lfs_pending[j]==bp->b_blkno) {
1432 fs->lfs_pending[j] = LFS_UNUSED_DADDR;
1433 wakeup(&(fs->lfs_pending[j]));
1434 break;
1435 }
1436 }
1437 #endif /* LFS_TRACK_IOS */
1438
1439 lfs_freebuf(bp);
1440 }
1441
1442 void
1443 lfs_supercallback(bp)
1444 struct buf *bp;
1445 {
1446 #ifdef LFS_CANNOT_ROLLFW
1447 struct lfs *fs;
1448
1449 fs = (struct lfs *)bp->b_saveaddr;
1450 fs->lfs_sbactive=NULL;
1451 wakeup(&fs->lfs_sbactive);
1452 #endif
1453 lfs_freebuf(bp);
1454 }
1455
1456 /*
1457 * Shellsort (diminishing increment sort) from Data Structures and
1458 * Algorithms, Aho, Hopcraft and Ullman, 1983 Edition, page 290;
1459 * see also Knuth Vol. 3, page 84. The increments are selected from
1460 * formula (8), page 95. Roughly O(N^3/2).
1461 */
1462 /*
1463 * This is our own private copy of shellsort because we want to sort
1464 * two parallel arrays (the array of buffer pointers and the array of
1465 * logical block numbers) simultaneously. Note that we cast the array
1466 * of logical block numbers to a unsigned in this routine so that the
1467 * negative block numbers (meta data blocks) sort AFTER the data blocks.
1468 */
1469
1470 void
1471 lfs_shellsort(bp_array, lb_array, nmemb)
1472 struct buf **bp_array;
1473 ufs_daddr_t *lb_array;
1474 register int nmemb;
1475 {
1476 static int __rsshell_increments[] = { 4, 1, 0 };
1477 register int incr, *incrp, t1, t2;
1478 struct buf *bp_temp;
1479 u_long lb_temp;
1480
1481 for (incrp = __rsshell_increments; (incr = *incrp++) != 0;)
1482 for (t1 = incr; t1 < nmemb; ++t1)
1483 for (t2 = t1 - incr; t2 >= 0;)
1484 if (lb_array[t2] > lb_array[t2 + incr]) {
1485 lb_temp = lb_array[t2];
1486 lb_array[t2] = lb_array[t2 + incr];
1487 lb_array[t2 + incr] = lb_temp;
1488 bp_temp = bp_array[t2];
1489 bp_array[t2] = bp_array[t2 + incr];
1490 bp_array[t2 + incr] = bp_temp;
1491 t2 -= incr;
1492 } else
1493 break;
1494 }
1495
1496 /*
1497 * Check VXLOCK. Return 1 if the vnode is locked. Otherwise, vget it.
1498 */
1499 int
1500 lfs_vref(vp)
1501 register struct vnode *vp;
1502 {
1503 /*
1504 * If we return 1 here during a flush, we risk vinvalbuf() not
1505 * being able to flush all of the pages from this vnode, which
1506 * will cause it to panic. So, return 0 if a flush is in progress.
1507 */
1508 if (vp->v_flag & VXLOCK) {
1509 if(IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
1510 return 0;
1511 }
1512 return(1);
1513 }
1514 return (vget(vp, 0));
1515 }
1516
1517 /*
1518 * This is vrele except that we do not want to VOP_INACTIVE this vnode. We
1519 * inline vrele here to avoid the vn_lock and VOP_INACTIVE call at the end.
1520 */
1521 void
1522 lfs_vunref(vp)
1523 register struct vnode *vp;
1524 {
1525 /*
1526 * Analogous to lfs_vref, if the node is flushing, fake it.
1527 */
1528 if((vp->v_flag & VXLOCK) && IS_FLUSHING(VTOI(vp)->i_lfs,vp)) {
1529 return;
1530 }
1531
1532 simple_lock(&vp->v_interlock);
1533 #ifdef DIAGNOSTIC
1534 if(vp->v_usecount<=0) {
1535 printf("lfs_vunref: flags are 0x%lx\n", vp->v_flag);
1536 printf("lfs_vunref: usecount = %d\n", vp->v_usecount);
1537 panic("lfs_vunref: v_usecount<0");
1538 }
1539 #endif
1540 vp->v_usecount--;
1541 if (vp->v_usecount > 0) {
1542 simple_unlock(&vp->v_interlock);
1543 return;
1544 }
1545 #ifdef DIAGNOSTIC
1546 if(VOP_ISLOCKED(vp))
1547 panic("lfs_vunref: vnode locked");
1548 #endif
1549 /*
1550 * insert at tail of LRU list
1551 */
1552 simple_lock(&vnode_free_list_slock);
1553 TAILQ_INSERT_TAIL(&vnode_free_list, vp, v_freelist);
1554 simple_unlock(&vnode_free_list_slock);
1555 simple_unlock(&vp->v_interlock);
1556 }
1557
1558 /*
1559 * We use this when we have vnodes that were loaded in solely for cleaning.
1560 * There is no reason to believe that these vnodes will be referenced again
1561 * soon, since the cleaning process is unrelated to normal filesystem
1562 * activity. Putting cleaned vnodes at the tail of the list has the effect
1563 * of flushing the vnode LRU. So, put vnodes that were loaded only for
1564 * cleaning at the head of the list, instead.
1565 */
1566 void
1567 lfs_vunref_head(vp)
1568 register struct vnode *vp;
1569 {
1570 simple_lock(&vp->v_interlock);
1571 #ifdef DIAGNOSTIC
1572 if(vp->v_usecount==0) {
1573 panic("lfs_vunref: v_usecount<0");
1574 }
1575 #endif
1576 vp->v_usecount--;
1577 if (vp->v_usecount > 0) {
1578 simple_unlock(&vp->v_interlock);
1579 return;
1580 }
1581 #ifdef DIAGNOSTIC
1582 if(VOP_ISLOCKED(vp))
1583 panic("lfs_vunref_head: vnode locked");
1584 #endif
1585 /*
1586 * insert at head of LRU list
1587 */
1588 simple_lock(&vnode_free_list_slock);
1589 TAILQ_INSERT_HEAD(&vnode_free_list, vp, v_freelist);
1590 simple_unlock(&vnode_free_list_slock);
1591 simple_unlock(&vp->v_interlock);
1592 }
1593
1594