lfs_bio.c revision 1.51 1 /* $NetBSD: lfs_bio.c,v 1.51 2002/12/26 13:37:18 yamt Exp $ */
2
3 /*-
4 * Copyright (c) 1999, 2000 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Konrad E. Schroder <perseant (at) hhhh.org>.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38 /*
39 * Copyright (c) 1991, 1993
40 * The Regents of the University of California. All rights reserved.
41 *
42 * Redistribution and use in source and binary forms, with or without
43 * modification, are permitted provided that the following conditions
44 * are met:
45 * 1. Redistributions of source code must retain the above copyright
46 * notice, this list of conditions and the following disclaimer.
47 * 2. Redistributions in binary form must reproduce the above copyright
48 * notice, this list of conditions and the following disclaimer in the
49 * documentation and/or other materials provided with the distribution.
50 * 3. All advertising materials mentioning features or use of this software
51 * must display the following acknowledgement:
52 * This product includes software developed by the University of
53 * California, Berkeley and its contributors.
54 * 4. Neither the name of the University nor the names of its contributors
55 * may be used to endorse or promote products derived from this software
56 * without specific prior written permission.
57 *
58 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
59 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
60 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
61 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
62 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
63 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
64 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
65 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
66 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
67 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
68 * SUCH DAMAGE.
69 *
70 * @(#)lfs_bio.c 8.10 (Berkeley) 6/10/95
71 */
72
73 #include <sys/cdefs.h>
74 __KERNEL_RCSID(0, "$NetBSD: lfs_bio.c,v 1.51 2002/12/26 13:37:18 yamt Exp $");
75
76 #include <sys/param.h>
77 #include <sys/systm.h>
78 #include <sys/proc.h>
79 #include <sys/buf.h>
80 #include <sys/vnode.h>
81 #include <sys/resourcevar.h>
82 #include <sys/mount.h>
83 #include <sys/kernel.h>
84
85 #include <ufs/ufs/inode.h>
86 #include <ufs/ufs/ufsmount.h>
87 #include <ufs/ufs/ufs_extern.h>
88
89 #include <sys/malloc.h>
90 #include <ufs/lfs/lfs.h>
91 #include <ufs/lfs/lfs_extern.h>
92
93 /* Macros to clear/set/test flags. */
94 # define SET(t, f) (t) |= (f)
95 # define CLR(t, f) (t) &= ~(f)
96 # define ISSET(t, f) ((t) & (f))
97
98 /*
99 * LFS block write function.
100 *
101 * XXX
102 * No write cost accounting is done.
103 * This is almost certainly wrong for synchronous operations and NFS.
104 */
105 int locked_queue_count = 0; /* XXX Count of locked-down buffers. */
106 long locked_queue_bytes = 0L; /* XXX Total size of locked buffers. */
107 int lfs_writing = 0; /* Set if already kicked off a writer
108 because of buffer space */
109 extern int lfs_dostats;
110
111 /*
112 * reserved number/bytes of locked buffers
113 */
114 int locked_queue_rcount = 0;
115 long locked_queue_rbytes = 0L;
116
117 int lfs_fits_buf(struct lfs *, int, int);
118 int lfs_reservebuf(struct lfs *, int, int);
119 int lfs_reserveavail(struct lfs *, struct vnode *, int);
120
121 int
122 lfs_fits_buf(struct lfs *fs, int n, int bytes)
123 {
124 int count_fit =
125 (locked_queue_count + locked_queue_rcount + n < LFS_WAIT_BUFS);
126 int bytes_fit =
127 (locked_queue_bytes + locked_queue_rbytes + bytes < LFS_WAIT_BYTES);
128
129 #ifdef DEBUG_LFS
130 if (!count_fit) {
131 printf("lfs_fits_buf: no fit count: %d + %d + %d >= %d\n",
132 locked_queue_count, locked_queue_rcount,
133 n, LFS_WAIT_BUFS);
134 }
135 if (!bytes_fit) {
136 printf("lfs_fits_buf: no fit bytes: %ld + %ld + %d >= %d\n",
137 locked_queue_bytes, locked_queue_rbytes,
138 bytes, LFS_WAIT_BYTES);
139 }
140 #endif /* DEBUG_LFS */
141
142 return (count_fit && bytes_fit);
143 }
144
145 int
146 lfs_reservebuf(struct lfs *fs, int n, int bytes)
147 {
148 KASSERT(locked_queue_rcount >= 0);
149 KASSERT(locked_queue_rbytes >= 0);
150
151 while (n > 0 && !lfs_fits_buf(fs, n, bytes)) {
152 int error;
153
154 ++fs->lfs_writer;
155 lfs_flush(fs, 0);
156 if (--fs->lfs_writer == 0)
157 wakeup(&fs->lfs_dirops);
158
159 error = tsleep(&locked_queue_count, PCATCH | PUSER,
160 "lfsresbuf", hz * LFS_BUFWAIT);
161 if (error && error != EWOULDBLOCK)
162 return error;
163 }
164
165 locked_queue_rcount += n;
166 locked_queue_rbytes += bytes;
167
168 KASSERT(locked_queue_rcount >= 0);
169 KASSERT(locked_queue_rbytes >= 0);
170
171 return 0;
172 }
173
174 /*
175 * Try to reserve some blocks, prior to performing a sensitive operation that
176 * requires the vnode lock to be honored. If there is not enough space, give
177 * up the vnode lock temporarily and wait for the space to become available.
178 *
179 * Called with vp locked. (Note nowever that if fsb < 0, vp is ignored.)
180 *
181 * XXX YAMT - it isn't safe to unlock vp here
182 * because the node might be modified while we sleep.
183 * (eg. cached states like i_offset might be stale,
184 * the vnode might be truncated, etc..)
185 * maybe we should have a way to restart the vnode op. (EVOPRESTART?)
186 *
187 * XXX YAMT - we unlock the vnode so that cleaner can lock it.
188 * but it isn't enough. eg. for VOP_REMOVE, we should unlock the vnode that
189 * is going to be removed as well.
190 */
191 int
192 lfs_reserveavail(struct lfs *fs, struct vnode *vp, int fsb)
193 {
194 CLEANERINFO *cip;
195 struct buf *bp;
196 int error, slept;
197
198 slept = 0;
199 while (fsb > 0 && !lfs_fits(fs, fsb + fs->lfs_ravail) &&
200 vp != fs->lfs_unlockvp) {
201 #if 0
202 /*
203 * XXX ideally, we should unlock vnodes here
204 * because we might sleep very long time.
205 */
206 VOP_UNLOCK(vp, 0);
207 #else
208 /*
209 * XXX since we'll sleep for cleaner with vnode lock holding,
210 * deadlock will occur if cleaner wants to acquire the vnode
211 * lock.
212 * (eg. lfs_markv -> lfs_fastvget -> getnewvnode -> vclean)
213 */
214 #endif
215
216 if (!slept) {
217 #ifdef DEBUG
218 printf("lfs_reserve: waiting for %ld (bfree = %d,"
219 " est_bfree = %d)\n",
220 fsb + fs->lfs_ravail, fs->lfs_bfree,
221 LFS_EST_BFREE(fs));
222 #endif
223 }
224 ++slept;
225
226 /* Wake up the cleaner */
227 LFS_CLEANERINFO(cip, fs, bp);
228 LFS_SYNC_CLEANERINFO(cip, fs, bp, 0);
229 wakeup(&lfs_allclean_wakeup);
230 wakeup(&fs->lfs_nextseg);
231
232 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "lfs_reserve",
233 0);
234 #if 0
235 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); /* XXX use lockstatus */
236 #endif
237 if (error)
238 return error;
239 }
240 #ifdef DEBUG
241 if (slept)
242 printf("lfs_reserve: woke up\n");
243 #endif
244 fs->lfs_ravail += fsb;
245
246 return 0;
247 }
248
249 #ifdef DIAGNOSTIC
250 int lfs_rescount;
251 int lfs_rescountdirop;
252 #endif
253
254 int
255 lfs_reserve(struct lfs *fs, struct vnode *vp, int fsb)
256 {
257 int error;
258 int cantwait;
259
260 cantwait = (VTOI(vp)->i_flag & IN_ADIROP);
261 #ifdef DIAGNOSTIC
262 if (cantwait) {
263 if (fsb > 0)
264 lfs_rescountdirop++;
265 else if (fsb < 0)
266 lfs_rescountdirop--;
267 if (lfs_rescountdirop < 0)
268 panic("lfs_rescountdirop");
269 }
270 else {
271 if (fsb > 0)
272 lfs_rescount++;
273 else if (fsb < 0)
274 lfs_rescount--;
275 if (lfs_rescount < 0)
276 panic("lfs_rescount");
277 }
278 #endif
279 if (cantwait)
280 return 0;
281
282 error = lfs_reserveavail(fs, vp, fsb);
283 if (error)
284 return error;
285
286 /*
287 * XXX just a guess. should be more precise.
288 */
289 error = lfs_reservebuf(fs, fragstoblks(fs, fsb), fsbtob(fs, fsb));
290 if (error)
291 lfs_reserveavail(fs, vp, -fsb);
292
293 return error;
294 }
295
296 int
297 lfs_bwrite(void *v)
298 {
299 struct vop_bwrite_args /* {
300 struct buf *a_bp;
301 } */ *ap = v;
302 struct buf *bp = ap->a_bp;
303
304 #ifdef DIAGNOSTIC
305 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly == 0 && (bp->b_flags & B_ASYNC)) {
306 panic("bawrite LFS buffer");
307 }
308 #endif /* DIAGNOSTIC */
309 return lfs_bwrite_ext(bp,0);
310 }
311
312 /*
313 * Determine if there is enough room currently available to write fsb
314 * blocks. We need enough blocks for the new blocks, the current
315 * inode blocks (including potentially the ifile inode), a summary block,
316 * and the segment usage table, plus an ifile block.
317 */
318 int
319 lfs_fits(struct lfs *fs, int fsb)
320 {
321 int needed;
322
323 needed = fsb + btofsb(fs, fs->lfs_sumsize) +
324 ((howmany(fs->lfs_uinodes + 1, INOPB(fs)) + fs->lfs_segtabsz +
325 1) << (fs->lfs_blktodb - fs->lfs_fsbtodb));
326
327 if (needed >= fs->lfs_avail) {
328 #ifdef DEBUG
329 printf("lfs_fits: no fit: fsb = %d, uinodes = %d, "
330 "needed = %d, avail = %d\n",
331 fsb, fs->lfs_uinodes, needed, fs->lfs_avail);
332 #endif
333 return 0;
334 }
335 return 1;
336 }
337
338 int
339 lfs_availwait(struct lfs *fs, int fsb)
340 {
341 int error;
342 CLEANERINFO *cip;
343 struct buf *cbp;
344
345 while (!lfs_fits(fs, fsb)) {
346 /*
347 * Out of space, need cleaner to run.
348 * Update the cleaner info, then wake it up.
349 * Note the cleanerinfo block is on the ifile
350 * so it CANT_WAIT.
351 */
352 LFS_CLEANERINFO(cip, fs, cbp);
353 LFS_SYNC_CLEANERINFO(cip, fs, cbp, 0);
354
355 printf("lfs_availwait: out of available space, "
356 "waiting on cleaner\n");
357
358 wakeup(&lfs_allclean_wakeup);
359 wakeup(&fs->lfs_nextseg);
360 #ifdef DIAGNOSTIC
361 if (fs->lfs_seglock && fs->lfs_lockpid == curproc->p_pid)
362 panic("lfs_availwait: deadlock");
363 #endif
364 error = tsleep(&fs->lfs_avail, PCATCH | PUSER, "cleaner", 0);
365 if (error)
366 return (error);
367 }
368 return 0;
369 }
370
371 int
372 lfs_bwrite_ext(struct buf *bp, int flags)
373 {
374 struct lfs *fs;
375 struct inode *ip;
376 int fsb, s;
377
378 KASSERT(bp->b_flags & B_BUSY);
379 KASSERT(flags & BW_CLEAN || !(bp->b_flags & B_CALL));
380
381 /*
382 * Don't write *any* blocks if we're mounted read-only.
383 * In particular the cleaner can't write blocks either.
384 */
385 if (VTOI(bp->b_vp)->i_lfs->lfs_ronly) {
386 bp->b_flags &= ~(B_DELWRI | B_READ | B_ERROR);
387 LFS_UNLOCK_BUF(bp);
388 if (bp->b_flags & B_CALL)
389 bp->b_flags &= ~B_BUSY;
390 else
391 brelse(bp);
392 return EROFS;
393 }
394
395 /*
396 * Set the delayed write flag and use reassignbuf to move the buffer
397 * from the clean list to the dirty one.
398 *
399 * Set the B_LOCKED flag and unlock the buffer, causing brelse to move
400 * the buffer onto the LOCKED free list. This is necessary, otherwise
401 * getnewbuf() would try to reclaim the buffers using bawrite, which
402 * isn't going to work.
403 *
404 * XXX we don't let meta-data writes run out of space because they can
405 * come from the segment writer. We need to make sure that there is
406 * enough space reserved so that there's room to write meta-data
407 * blocks.
408 */
409 if (!(bp->b_flags & B_LOCKED)) {
410 fs = VFSTOUFS(bp->b_vp->v_mount)->um_lfs;
411 fsb = fragstofsb(fs, numfrags(fs, bp->b_bcount));
412
413 ip = VTOI(bp->b_vp);
414 if (flags & BW_CLEAN) {
415 LFS_SET_UINO(ip, IN_CLEANING);
416 } else {
417 LFS_SET_UINO(ip, IN_MODIFIED);
418 if (bp->b_lblkno >= 0)
419 LFS_SET_UINO(ip, IN_UPDATE);
420 }
421 fs->lfs_avail -= fsb;
422 bp->b_flags |= B_DELWRI;
423
424 LFS_LOCK_BUF(bp);
425 bp->b_flags &= ~(B_READ | B_DONE | B_ERROR);
426 s = splbio();
427 reassignbuf(bp, bp->b_vp);
428 splx(s);
429 }
430
431 if (bp->b_flags & B_CALL)
432 bp->b_flags &= ~B_BUSY;
433 else
434 brelse(bp);
435
436 return (0);
437 }
438
439 void
440 lfs_flush_fs(struct lfs *fs, int flags)
441 {
442 if (fs->lfs_ronly == 0 && fs->lfs_dirops == 0)
443 {
444 /* disallow dirops during flush */
445 fs->lfs_writer++;
446
447 /*
448 * We set the queue to 0 here because we
449 * are about to write all the dirty
450 * buffers we have. If more come in
451 * while we're writing the segment, they
452 * may not get written, so we want the
453 * count to reflect these new writes
454 * after the segwrite completes.
455 */
456 if (lfs_dostats)
457 ++lfs_stats.flush_invoked;
458 lfs_segwrite(fs->lfs_ivnode->v_mount, flags);
459
460 /* XXX KS - allow dirops again */
461 if (--fs->lfs_writer == 0)
462 wakeup(&fs->lfs_dirops);
463 }
464 }
465
466 /*
467 * XXX
468 * This routine flushes buffers out of the B_LOCKED queue when LFS has too
469 * many locked down. Eventually the pageout daemon will simply call LFS
470 * when pages need to be reclaimed. Note, we have one static count of locked
471 * buffers, so we can't have more than a single file system. To make this
472 * work for multiple file systems, put the count into the mount structure.
473 */
474 void
475 lfs_flush(struct lfs *fs, int flags)
476 {
477 struct mount *mp, *nmp;
478
479 if (lfs_dostats)
480 ++lfs_stats.write_exceeded;
481 if (lfs_writing && flags == 0) {/* XXX flags */
482 #ifdef DEBUG_LFS
483 printf("lfs_flush: not flushing because another flush is active\n");
484 #endif
485 return;
486 }
487 lfs_writing = 1;
488
489 simple_lock(&mountlist_slock);
490 for (mp = mountlist.cqh_first; mp != (void *)&mountlist; mp = nmp) {
491 if (vfs_busy(mp, LK_NOWAIT, &mountlist_slock)) {
492 nmp = mp->mnt_list.cqe_next;
493 continue;
494 }
495 if (strncmp(&mp->mnt_stat.f_fstypename[0], MOUNT_LFS, MFSNAMELEN) == 0)
496 lfs_flush_fs(((struct ufsmount *)mp->mnt_data)->ufsmount_u.lfs, flags);
497 simple_lock(&mountlist_slock);
498 nmp = mp->mnt_list.cqe_next;
499 vfs_unbusy(mp);
500 }
501 simple_unlock(&mountlist_slock);
502
503 LFS_DEBUG_COUNTLOCKED("flush");
504
505 lfs_writing = 0;
506 }
507
508 #define INOCOUNT(fs) howmany((fs)->lfs_uinodes, INOPB(fs))
509 #define INOBYTES(fs) ((fs)->lfs_uinodes * DINODE_SIZE)
510
511 int
512 lfs_check(struct vnode *vp, ufs_daddr_t blkno, int flags)
513 {
514 int error;
515 struct lfs *fs;
516 struct inode *ip;
517 extern int lfs_dirvcount;
518
519 error = 0;
520 ip = VTOI(vp);
521
522 /* If out of buffers, wait on writer */
523 /* XXX KS - if it's the Ifile, we're probably the cleaner! */
524 if (ip->i_number == LFS_IFILE_INUM)
525 return 0;
526 /* If we're being called from inside a dirop, don't sleep */
527 if (ip->i_flag & IN_ADIROP)
528 return 0;
529
530 fs = ip->i_lfs;
531
532 /*
533 * If we would flush below, but dirops are active, sleep.
534 * Note that a dirop cannot ever reach this code!
535 */
536 while (fs->lfs_dirops > 0 &&
537 (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
538 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
539 lfs_dirvcount > LFS_MAXDIROP || fs->lfs_diropwait > 0))
540 {
541 ++fs->lfs_diropwait;
542 tsleep(&fs->lfs_writer, PRIBIO+1, "bufdirop", 0);
543 --fs->lfs_diropwait;
544 }
545
546 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
547 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES ||
548 lfs_dirvcount > LFS_MAXDIROP || fs->lfs_diropwait > 0)
549 {
550 ++fs->lfs_writer;
551 lfs_flush(fs, flags);
552 if (--fs->lfs_writer == 0)
553 wakeup(&fs->lfs_dirops);
554 }
555
556 while (locked_queue_count + INOCOUNT(fs) > LFS_WAIT_BUFS
557 || locked_queue_bytes + INOBYTES(fs) > LFS_WAIT_BYTES)
558 {
559 if (lfs_dostats)
560 ++lfs_stats.wait_exceeded;
561 #ifdef DEBUG_LFS
562 printf("lfs_check: waiting: count=%d, bytes=%ld\n",
563 locked_queue_count, locked_queue_bytes);
564 #endif
565 error = tsleep(&locked_queue_count, PCATCH | PUSER,
566 "buffers", hz * LFS_BUFWAIT);
567 if (error != EWOULDBLOCK)
568 break;
569 /*
570 * lfs_flush might not flush all the buffers, if some of the
571 * inodes were locked or if most of them were Ifile blocks
572 * and we weren't asked to checkpoint. Try flushing again
573 * to keep us from blocking indefinitely.
574 */
575 if (locked_queue_count + INOCOUNT(fs) > LFS_MAX_BUFS ||
576 locked_queue_bytes + INOBYTES(fs) > LFS_MAX_BYTES)
577 {
578 ++fs->lfs_writer;
579 lfs_flush(fs, flags | SEGM_CKP);
580 if (--fs->lfs_writer == 0)
581 wakeup(&fs->lfs_dirops);
582 }
583 }
584 return (error);
585 }
586
587 /*
588 * Allocate a new buffer header.
589 */
590 #ifdef MALLOCLOG
591 # define DOMALLOC(S, T, F) _malloc((S), (T), (F), file, line)
592 struct buf *
593 lfs_newbuf_malloclog(struct lfs *fs, struct vnode *vp, ufs_daddr_t daddr, size_t size, char *file, int line)
594 #else
595 # define DOMALLOC(S, T, F) malloc((S), (T), (F))
596 struct buf *
597 lfs_newbuf(struct lfs *fs, struct vnode *vp, ufs_daddr_t daddr, size_t size)
598 #endif
599 {
600 struct buf *bp;
601 size_t nbytes;
602 int s;
603
604 nbytes = roundup(size, fsbtob(fs, 1));
605
606 bp = DOMALLOC(sizeof(struct buf), M_SEGMENT, M_WAITOK);
607 bzero(bp, sizeof(struct buf));
608 if (nbytes) {
609 bp->b_data = DOMALLOC(nbytes, M_SEGMENT, M_WAITOK);
610 bzero(bp->b_data, nbytes);
611 }
612 #ifdef DIAGNOSTIC
613 if (vp == NULL)
614 panic("vp is NULL in lfs_newbuf");
615 if (bp == NULL)
616 panic("bp is NULL after malloc in lfs_newbuf");
617 #endif
618 s = splbio();
619 bgetvp(vp, bp);
620 splx(s);
621
622 bp->b_saveaddr = (caddr_t)fs;
623 bp->b_bufsize = size;
624 bp->b_bcount = size;
625 bp->b_lblkno = daddr;
626 bp->b_blkno = daddr;
627 bp->b_error = 0;
628 bp->b_resid = 0;
629 bp->b_iodone = lfs_callback;
630 bp->b_flags |= B_BUSY | B_CALL | B_NOCACHE;
631
632 return (bp);
633 }
634
635 #ifdef MALLOCLOG
636 # define DOFREE(A, T) _free((A), (T), file, line)
637 void
638 lfs_freebuf_malloclog(struct buf *bp, char *file, int line)
639 #else
640 # define DOFREE(A, T) free((A), (T))
641 void
642 lfs_freebuf(struct buf *bp)
643 #endif
644 {
645 int s;
646
647 s = splbio();
648 if (bp->b_vp)
649 brelvp(bp);
650 splx(s);
651 if (!(bp->b_flags & B_INVAL)) { /* B_INVAL indicates a "fake" buffer */
652 DOFREE(bp->b_data, M_SEGMENT);
653 bp->b_data = NULL;
654 }
655 DOFREE(bp, M_SEGMENT);
656 }
657
658 /*
659 * Definitions for the buffer free lists.
660 */
661 #define BQUEUES 4 /* number of free buffer queues */
662
663 #define BQ_LOCKED 0 /* super-blocks &c */
664 #define BQ_LRU 1 /* lru, useful buffers */
665 #define BQ_AGE 2 /* rubbish */
666 #define BQ_EMPTY 3 /* buffer headers with no memory */
667
668 extern TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
669
670 /*
671 * Return a count of buffers on the "locked" queue.
672 * Don't count malloced buffers, since they don't detract from the total.
673 */
674 void
675 lfs_countlocked(int *count, long *bytes, char *msg)
676 {
677 struct buf *bp;
678 int n = 0;
679 long int size = 0L;
680
681 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
682 bp = bp->b_freelist.tqe_next) {
683 if (bp->b_flags & B_CALL) /* Malloced buffer */
684 continue;
685 n++;
686 size += bp->b_bufsize;
687 #ifdef DEBUG_LOCKED_LIST
688 if (n > nbuf)
689 panic("lfs_countlocked: this can't happen: more"
690 " buffers locked than exist");
691 #endif
692 }
693 #ifdef DEBUG_LOCKED_LIST
694 /* Theoretically this function never really does anything */
695 if (n != *count)
696 printf("lfs_countlocked: %s: adjusted buf count from %d to %d\n",
697 msg, *count, n);
698 if (size != *bytes)
699 printf("lfs_countlocked: %s: adjusted byte count from %ld to %ld\n",
700 msg, *bytes, size);
701 #endif
702 *count = n;
703 *bytes = size;
704 return;
705 }
706