vfs_bio.c revision 1.36 1 /* $NetBSD: vfs_bio.c,v 1.36 1995/06/20 10:42:33 cgd Exp $ */
2
3 /*-
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 */
43
44 /*
45 * Some references:
46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 * UNIX Operating System (Addison Welley, 1989)
49 */
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61
62 /* Macros to clear/set/test flags. */
63 #define SET(t, f) (t) |= (f)
64 #define CLR(t, f) (t) &= ~(f)
65 #define ISSET(t, f) ((t) & (f))
66
67 /*
68 * Definitions for the buffer hash lists.
69 */
70 #define BUFHASH(dvp, lbn) \
71 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
72 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
73 u_long bufhash;
74
75 /*
76 * Insq/Remq for the buffer hash lists.
77 */
78 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
79 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
80
81 /*
82 * Definitions for the buffer free lists.
83 */
84 #define BQUEUES 4 /* number of free buffer queues */
85
86 #define BQ_LOCKED 0 /* super-blocks &c */
87 #define BQ_LRU 1 /* lru, useful buffers */
88 #define BQ_AGE 2 /* rubbish */
89 #define BQ_EMPTY 3 /* buffer headers with no memory */
90
91 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
92 int needbuffer;
93
94 /*
95 * Insq/Remq for the buffer free lists.
96 */
97 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
98 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
99
100 void
101 bremfree(bp)
102 struct buf *bp;
103 {
104 struct bqueues *dp = NULL;
105
106 /*
107 * We only calculate the head of the freelist when removing
108 * the last element of the list as that is the only time that
109 * it is needed (e.g. to reset the tail pointer).
110 *
111 * NB: This makes an assumption about how tailq's are implemented.
112 */
113 if (bp->b_freelist.tqe_next == NULL) {
114 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
115 if (dp->tqh_last == &bp->b_freelist.tqe_next)
116 break;
117 if (dp == &bufqueues[BQUEUES])
118 panic("bremfree: lost tail");
119 }
120 TAILQ_REMOVE(dp, bp, b_freelist);
121 }
122
123 /*
124 * Initialize buffers and hash links for buffers.
125 */
126 void
127 bufinit()
128 {
129 register struct buf *bp;
130 struct bqueues *dp;
131 register int i;
132 int base, residual;
133
134 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
135 TAILQ_INIT(dp);
136 bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
137 base = bufpages / nbuf;
138 residual = bufpages % nbuf;
139 for (i = 0; i < nbuf; i++) {
140 bp = &buf[i];
141 bzero((char *)bp, sizeof *bp);
142 bp->b_dev = NODEV;
143 bp->b_rcred = NOCRED;
144 bp->b_wcred = NOCRED;
145 bp->b_vnbufs.le_next = NOLIST;
146 bp->b_data = buffers + i * MAXBSIZE;
147 if (i < residual)
148 bp->b_bufsize = (base + 1) * CLBYTES;
149 else
150 bp->b_bufsize = base * CLBYTES;
151 bp->b_flags = B_INVAL;
152 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
153 binsheadfree(bp, dp);
154 binshash(bp, &invalhash);
155 }
156 }
157
158 __inline struct buf *
159 bio_doread(vp, blkno, size, cred, async)
160 struct vnode *vp;
161 daddr_t blkno;
162 int size;
163 struct ucred *cred;
164 int async;
165 {
166 register struct buf *bp;
167
168 bp = getblk(vp, blkno, size, 0, 0);
169
170 /*
171 * If buffer does not have data valid, start a read.
172 * Note that if buffer is B_INVAL, getblk() won't return it.
173 * Therefore, it's valid if it's I/O has completed or been delayed.
174 */
175 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
176 /* Start I/O for the buffer (keeping credentials). */
177 SET(bp->b_flags, B_READ | async);
178 if (cred != NOCRED && bp->b_rcred == NOCRED) {
179 crhold(cred);
180 bp->b_rcred = cred;
181 }
182 VOP_STRATEGY(bp);
183
184 /* Pay for the read. */
185 curproc->p_stats->p_ru.ru_inblock++; /* XXX */
186 } else if (async) {
187 brelse(bp);
188 }
189
190 return (bp);
191 }
192
193 /*
194 * Read a disk block.
195 * This algorithm described in Bach (p.54).
196 */
197 bread(vp, blkno, size, cred, bpp)
198 struct vnode *vp;
199 daddr_t blkno;
200 int size;
201 struct ucred *cred;
202 struct buf **bpp;
203 {
204 register struct buf *bp;
205
206 /* Get buffer for block. */
207 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
208
209 /* Wait for the read to complete, and return result. */
210 return (biowait(bp));
211 }
212
213 /*
214 * Read-ahead multiple disk blocks. The first is sync, the rest async.
215 * Trivial modification to the breada algorithm presented in Bach (p.55).
216 */
217 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
218 struct vnode *vp;
219 daddr_t blkno; int size;
220 daddr_t rablks[]; int rasizes[];
221 int nrablks;
222 struct ucred *cred;
223 struct buf **bpp;
224 {
225 register struct buf *bp;
226 int i;
227
228 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
229
230 /*
231 * For each of the read-ahead blocks, start a read, if necessary.
232 */
233 for (i = 0; i < nrablks; i++) {
234 /* If it's in the cache, just go on to next one. */
235 if (incore(vp, rablks[i]))
236 continue;
237
238 /* Get a buffer for the read-ahead block */
239 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
240 }
241
242 /* Otherwise, we had to start a read for it; wait until it's valid. */
243 return (biowait(bp));
244 }
245
246 /*
247 * Read with single-block read-ahead. Defined in Bach (p.55), but
248 * implemented as a call to breadn().
249 * XXX for compatibility with old file systems.
250 */
251 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
252 struct vnode *vp;
253 daddr_t blkno; int size;
254 daddr_t rablkno; int rabsize;
255 struct ucred *cred;
256 struct buf **bpp;
257 {
258
259 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
260 }
261
262 /*
263 * Block write. Described in Bach (p.56)
264 */
265 bwrite(bp)
266 struct buf *bp;
267 {
268 int rv, s, sync, wasdelayed;
269
270 /* Remember buffer type, to switch on it later. */
271 sync = !ISSET(bp->b_flags, B_ASYNC);
272 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
273 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
274
275 if (!sync) {
276 /*
277 * If not synchronous, pay for the I/O operation and make
278 * sure the buf is on the correct vnode queue. We have
279 * to do this now, because if we don't, the vnode may not
280 * be properly notified that its I/O has completed.
281 */
282 if (wasdelayed)
283 reassignbuf(bp, bp->b_vp);
284 else
285 curproc->p_stats->p_ru.ru_oublock++;
286 }
287
288 /* Initiate disk write. Make sure the appropriate party is charged. */
289 SET(bp->b_flags, B_WRITEINPROG);
290 bp->b_vp->v_numoutput++;
291 VOP_STRATEGY(bp);
292
293 if (sync) {
294 /*
295 * If I/O was synchronous, wait for it to complete.
296 */
297 rv = biowait(bp);
298
299 /*
300 * Pay for the I/O operation, if it's not been paid for, and
301 * make sure it's on the correct vnode queue. (async operatings
302 * were payed for above.)
303 */
304 if (wasdelayed)
305 reassignbuf(bp, bp->b_vp);
306 else
307 curproc->p_stats->p_ru.ru_oublock++;
308
309 /* Release the buffer. */
310 brelse(bp);
311
312 return (rv);
313 } else {
314 return (0);
315 }
316 }
317
318 int
319 vn_bwrite(ap)
320 struct vop_bwrite_args *ap;
321 {
322
323 return (bwrite(ap->a_bp));
324 }
325
326 /*
327 * Delayed write.
328 *
329 * The buffer is marked dirty, but is not queued for I/O.
330 * This routine should be used when the buffer is expected
331 * to be modified again soon, typically a small write that
332 * partially fills a buffer.
333 *
334 * NB: magnetic tapes cannot be delayed; they must be
335 * written in the order that the writes are requested.
336 *
337 * Described in Leffler, et al. (pp. 208-213).
338 */
339 void
340 bdwrite(bp)
341 struct buf *bp;
342 {
343
344 /*
345 * If the block hasn't been seen before:
346 * (1) Mark it as having been seen,
347 * (2) Charge for the write.
348 * (3) Make sure it's on its vnode's correct block list,
349 */
350 if (!ISSET(bp->b_flags, B_DELWRI)) {
351 SET(bp->b_flags, B_DELWRI);
352 curproc->p_stats->p_ru.ru_oublock++; /* XXX */
353 reassignbuf(bp, bp->b_vp);
354 }
355
356 /* If this is a tape block, write the block now. */
357 if (bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
358 bwrite(bp);
359 return;
360 }
361
362 /* Otherwise, the "write" is done, so mark and release the buffer. */
363 SET(bp->b_flags, B_DONE);
364 brelse(bp);
365 }
366
367 /*
368 * Asynchronous block write; just an asynchronous bwrite().
369 */
370 void
371 bawrite(bp)
372 struct buf *bp;
373 {
374
375 SET(bp->b_flags, B_ASYNC);
376 VOP_BWRITE(bp);
377 }
378
379 /*
380 * Release a buffer on to the free lists.
381 * Described in Bach (p. 46).
382 */
383 void
384 brelse(bp)
385 struct buf *bp;
386 {
387 struct bqueues *bufq;
388 int s;
389
390 /* Wake up any processes waiting for any buffer to become free. */
391 if (needbuffer) {
392 needbuffer = 0;
393 wakeup(&needbuffer);
394 }
395
396 /* Wake up any proceeses waiting for _this_ buffer to become free. */
397 if (ISSET(bp->b_flags, B_WANTED)) {
398 CLR(bp->b_flags, B_WANTED);
399 wakeup(bp);
400 }
401
402 /* Block disk interrupts. */
403 s = splbio();
404
405 /*
406 * Determine which queue the buffer should be on, then put it there.
407 */
408
409 /* If it's locked, don't report an error; try again later. */
410 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
411 CLR(bp->b_flags, B_ERROR);
412
413 /* If it's not cacheable, or an error, mark it invalid. */
414 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
415 SET(bp->b_flags, B_INVAL);
416
417 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
418 /*
419 * If it's invalid or empty, dissociate it from its vnode
420 * and put on the head of the appropriate queue.
421 */
422 if (bp->b_vp)
423 brelvp(bp);
424 CLR(bp->b_flags, B_DELWRI);
425 if (bp->b_bufsize <= 0)
426 /* no data */
427 bufq = &bufqueues[BQ_EMPTY];
428 else
429 /* invalid data */
430 bufq = &bufqueues[BQ_AGE];
431 binsheadfree(bp, bufq);
432 } else {
433 /*
434 * It has valid data. Put it on the end of the appropriate
435 * queue, so that it'll stick around for as long as possible.
436 */
437 if (ISSET(bp->b_flags, B_LOCKED))
438 /* locked in core */
439 bufq = &bufqueues[BQ_LOCKED];
440 else if (ISSET(bp->b_flags, B_AGE))
441 /* stale but valid data */
442 bufq = &bufqueues[BQ_AGE];
443 else
444 /* valid data */
445 bufq = &bufqueues[BQ_LRU];
446 binstailfree(bp, bufq);
447 }
448
449 /* Unlock the buffer. */
450 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
451
452 /* Allow disk interrupts. */
453 splx(s);
454 }
455
456 /*
457 * Determine if a block is in the cache.
458 * Just look on what would be its hash chain. If it's there, return
459 * a pointer to it, unless it's marked invalid. If it's marked invalid,
460 * we normally don't return the buffer, unless the caller explicitly
461 * wants us to.
462 */
463 struct buf *
464 incore(vp, blkno)
465 struct vnode *vp;
466 daddr_t blkno;
467 {
468 struct buf *bp;
469
470 bp = BUFHASH(vp, blkno)->lh_first;
471
472 /* Search hash chain */
473 for (; bp != NULL; bp = bp->b_hash.le_next) {
474 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
475 !ISSET(bp->b_flags, B_INVAL))
476 return (bp);
477 }
478
479 return (0);
480 }
481
482 /*
483 * Get a block of requested size that is associated with
484 * a given vnode and block offset. If it is found in the
485 * block cache, mark it as having been found, make it busy
486 * and return it. Otherwise, return an empty block of the
487 * correct size. It is up to the caller to insure that the
488 * cached blocks be of the correct size.
489 */
490 struct buf *
491 getblk(vp, blkno, size, slpflag, slptimeo)
492 register struct vnode *vp;
493 daddr_t blkno;
494 int size, slpflag, slptimeo;
495 {
496 struct buf *bp;
497 int s, err;
498
499 start:
500 s = splbio();
501 if (bp = incore(vp, blkno)) { /* XXX NFS VOP_BWRITE foolishness */
502 if (ISSET(bp->b_flags, B_BUSY)) {
503 SET(bp->b_flags, B_WANTED);
504 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
505 slptimeo);
506 splx(s);
507 if (err)
508 return (NULL);
509 goto start;
510 }
511 SET(bp->b_flags, (B_BUSY | B_CACHE));
512 bremfree(bp);
513 splx(s);
514 allocbuf(bp, size);
515 } else {
516 splx(s);
517 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
518 goto start;
519 binshash(bp, BUFHASH(vp, blkno));
520 allocbuf(bp, size);
521 bp->b_blkno = bp->b_lblkno = blkno;
522 s = splbio();
523 bgetvp(vp, bp);
524 splx(s);
525 }
526 return (bp);
527 }
528
529 /*
530 * Get an empty, disassociated buffer of given size.
531 */
532 struct buf *
533 geteblk(size)
534 int size;
535 {
536 struct buf *bp;
537
538 while ((bp = getnewbuf(0, 0)) == 0)
539 ;
540 SET(bp->b_flags, B_INVAL);
541 binshash(bp, &invalhash);
542 allocbuf(bp, size);
543
544 return (bp);
545 }
546
547 /*
548 * Expand or contract the actual memory allocated to a buffer.
549 *
550 * If the buffer shrinks, data is lost, so it's up to the
551 * caller to have written it out *first*; this routine will not
552 * start a write. If the buffer grows, it's the callers
553 * responsibility to fill out the buffer's additional contents.
554 */
555 allocbuf(bp, size)
556 struct buf *bp;
557 int size;
558 {
559 struct buf *nbp;
560 vm_size_t desired_size;
561 int s;
562
563 desired_size = roundup(size, CLBYTES);
564 if (desired_size > MAXBSIZE)
565 panic("allocbuf: buffer larger than MAXBSIZE requested");
566
567 if (bp->b_bufsize == desired_size)
568 goto out;
569
570 /*
571 * If the buffer is smaller than the desired size, we need to snarf
572 * it from other buffers. Get buffers (via getnewbuf()), and
573 * steal their pages.
574 */
575 while (bp->b_bufsize < desired_size) {
576 int amt;
577
578 /* find a buffer */
579 while ((nbp = getnewbuf(0, 0)) == NULL)
580 ;
581 SET(nbp->b_flags, B_INVAL);
582 binshash(nbp, &invalhash);
583
584 /* and steal its pages, up to the amount we need */
585 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
586 pagemove((nbp->b_data + nbp->b_bufsize - amt),
587 bp->b_data + bp->b_bufsize, amt);
588 bp->b_bufsize += amt;
589 nbp->b_bufsize -= amt;
590
591 /* reduce transfer count if we stole some data */
592 if (nbp->b_bcount > nbp->b_bufsize)
593 nbp->b_bcount = nbp->b_bufsize;
594
595 #ifdef DIAGNOSTIC
596 if (nbp->b_bufsize < 0)
597 panic("allocbuf: negative bufsize");
598 #endif
599
600 brelse(nbp);
601 }
602
603 /*
604 * If we want a buffer smaller than the current size,
605 * shrink this buffer. Grab a buf head from the EMPTY queue,
606 * move a page onto it, and put it on front of the AGE queue.
607 * If there are no free buffer headers, leave the buffer alone.
608 */
609 if (bp->b_bufsize > desired_size) {
610 s = splbio();
611 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
612 /* No free buffer head */
613 splx(s);
614 goto out;
615 }
616 bremfree(nbp);
617 SET(nbp->b_flags, B_BUSY);
618 splx(s);
619
620 /* move the page to it and note this change */
621 pagemove(bp->b_data + desired_size,
622 nbp->b_data, bp->b_bufsize - desired_size);
623 nbp->b_bufsize = bp->b_bufsize - desired_size;
624 bp->b_bufsize = desired_size;
625 nbp->b_bcount = 0;
626 SET(nbp->b_flags, B_INVAL);
627
628 /* release the newly-filled buffer and leave */
629 brelse(nbp);
630 }
631
632 out:
633 bp->b_bcount = size;
634 }
635
636 /*
637 * Find a buffer which is available for use.
638 * Select something from a free list.
639 * Preference is to AGE list, then LRU list.
640 */
641 struct buf *
642 getnewbuf(slpflag, slptimeo)
643 int slpflag, slptimeo;
644 {
645 register struct buf *bp;
646 int s;
647
648 start:
649 s = splbio();
650 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
651 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
652 bremfree(bp);
653 } else {
654 /* wait for a free buffer of any kind */
655 needbuffer = 1;
656 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
657 splx(s);
658 return (0);
659 }
660
661 /* Buffer is no longer on free lists. */
662 SET(bp->b_flags, B_BUSY);
663 splx(s);
664
665 /* If buffer was a delayed write, start it, and go back to the top. */
666 if (ISSET(bp->b_flags, B_DELWRI)) {
667 bawrite (bp);
668 goto start;
669 }
670
671 /* disassociate us from our vnode, if we had one... */
672 s = splbio();
673 if (bp->b_vp)
674 brelvp(bp);
675 splx(s);
676
677 /* clear out various other fields */
678 bp->b_flags = B_BUSY;
679 bp->b_dev = NODEV;
680 bp->b_blkno = bp->b_lblkno = 0;
681 bp->b_iodone = 0;
682 bp->b_error = 0;
683 bp->b_resid = 0;
684 bp->b_bcount = 0;
685 bp->b_dirtyoff = bp->b_dirtyend = 0;
686 bp->b_validoff = bp->b_validend = 0;
687
688 /* nuke any credentials we were holding */
689 if (bp->b_rcred != NOCRED) {
690 crfree(bp->b_rcred);
691 bp->b_rcred = NOCRED;
692 }
693 if (bp->b_wcred != NOCRED) {
694 crfree(bp->b_wcred);
695 bp->b_wcred = NOCRED;
696 }
697
698 bremhash(bp);
699 return (bp);
700 }
701
702 /*
703 * Wait for operations on the buffer to complete.
704 * When they do, extract and return the I/O's error value.
705 */
706 int
707 biowait(bp)
708 struct buf *bp;
709 {
710 int s;
711
712 s = splbio();
713 while (!ISSET(bp->b_flags, B_DONE))
714 tsleep(bp, PRIBIO + 1, "biowait", 0);
715 splx(s);
716
717 /* check for interruption of I/O (e.g. via NFS), then errors. */
718 if (ISSET(bp->b_flags, B_EINTR)) {
719 CLR(bp->b_flags, B_EINTR);
720 return (EINTR);
721 } else if (ISSET(bp->b_flags, B_ERROR))
722 return (bp->b_error ? bp->b_error : EIO);
723 else
724 return (0);
725 }
726
727 /*
728 * Mark I/O complete on a buffer.
729 *
730 * If a callback has been requested, e.g. the pageout
731 * daemon, do so. Otherwise, awaken waiting processes.
732 *
733 * [ Leffler, et al., says on p.247:
734 * "This routine wakes up the blocked process, frees the buffer
735 * for an asynchronous write, or, for a request by the pagedaemon
736 * process, invokes a procedure specified in the buffer structure" ]
737 *
738 * In real life, the pagedaemon (or other system processes) wants
739 * to do async stuff to, and doesn't want the buffer brelse()'d.
740 * (for swap pager, that puts swap buffers on the free lists (!!!),
741 * for the vn device, that puts malloc'd buffers on the free lists!)
742 */
743 void
744 biodone(bp)
745 struct buf *bp;
746 {
747 if (ISSET(bp->b_flags, B_DONE))
748 panic("biodone already");
749 SET(bp->b_flags, B_DONE); /* note that it's done */
750
751 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
752 vwakeup(bp);
753
754 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
755 CLR(bp->b_flags, B_CALL); /* but note callout done */
756 (*bp->b_iodone)(bp);
757 } else if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release it */
758 brelse(bp);
759 else { /* or just wakeup the buffer */
760 CLR(bp->b_flags, B_WANTED);
761 wakeup(bp);
762 }
763 }
764
765 /*
766 * Return a count of buffers on the "locked" queue.
767 */
768 int
769 count_lock_queue()
770 {
771 register struct buf *bp;
772 register int n = 0;
773
774 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
775 bp = bp->b_freelist.tqe_next)
776 n++;
777 return (n);
778 }
779
780 #ifdef DEBUG
781 /*
782 * Print out statistics on the current allocation of the buffer pool.
783 * Can be enabled to print out on every ``sync'' by setting "syncprt"
784 * in vfs_syscalls.c using sysctl.
785 */
786 void
787 vfs_bufstats()
788 {
789 int s, i, j, count;
790 register struct buf *bp;
791 register struct bqueues *dp;
792 int counts[MAXBSIZE/CLBYTES+1];
793 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
794
795 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
796 count = 0;
797 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
798 counts[j] = 0;
799 s = splbio();
800 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
801 counts[bp->b_bufsize/CLBYTES]++;
802 count++;
803 }
804 splx(s);
805 printf("%s: total-%d", bname[i], count);
806 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
807 if (counts[j] != 0)
808 printf(", %d-%d", j * CLBYTES, counts[j]);
809 printf("\n");
810 }
811 }
812 #endif /* DEBUG */
813