vfs_bio.c revision 1.45 1 /* $NetBSD: vfs_bio.c,v 1.45 1996/06/17 22:21:31 pk Exp $ */
2
3 /*-
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 */
43
44 /*
45 * Some references:
46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 * UNIX Operating System (Addison Welley, 1989)
49 */
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61
62 #include <vm/vm.h>
63
64 /* Macros to clear/set/test flags. */
65 #define SET(t, f) (t) |= (f)
66 #define CLR(t, f) (t) &= ~(f)
67 #define ISSET(t, f) ((t) & (f))
68
69 /*
70 * Definitions for the buffer hash lists.
71 */
72 #define BUFHASH(dvp, lbn) \
73 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
74 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
75 u_long bufhash;
76
77 /*
78 * Insq/Remq for the buffer hash lists.
79 */
80 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
81 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
82
83 /*
84 * Definitions for the buffer free lists.
85 */
86 #define BQUEUES 4 /* number of free buffer queues */
87
88 #define BQ_LOCKED 0 /* super-blocks &c */
89 #define BQ_LRU 1 /* lru, useful buffers */
90 #define BQ_AGE 2 /* rubbish */
91 #define BQ_EMPTY 3 /* buffer headers with no memory */
92
93 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
94 int needbuffer;
95
96 /*
97 * Insq/Remq for the buffer free lists.
98 */
99 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
100 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
101
102 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
103 struct ucred *, int));
104 int count_lock_queue __P((void));
105
106 void
107 bremfree(bp)
108 struct buf *bp;
109 {
110 struct bqueues *dp = NULL;
111
112 /*
113 * We only calculate the head of the freelist when removing
114 * the last element of the list as that is the only time that
115 * it is needed (e.g. to reset the tail pointer).
116 *
117 * NB: This makes an assumption about how tailq's are implemented.
118 */
119 if (bp->b_freelist.tqe_next == NULL) {
120 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
121 if (dp->tqh_last == &bp->b_freelist.tqe_next)
122 break;
123 if (dp == &bufqueues[BQUEUES])
124 panic("bremfree: lost tail");
125 }
126 TAILQ_REMOVE(dp, bp, b_freelist);
127 }
128
129 /*
130 * Initialize buffers and hash links for buffers.
131 */
132 void
133 bufinit()
134 {
135 register struct buf *bp;
136 struct bqueues *dp;
137 register int i;
138 int base, residual;
139
140 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
141 TAILQ_INIT(dp);
142 bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
143 base = bufpages / nbuf;
144 residual = bufpages % nbuf;
145 for (i = 0; i < nbuf; i++) {
146 bp = &buf[i];
147 bzero((char *)bp, sizeof *bp);
148 bp->b_dev = NODEV;
149 bp->b_rcred = NOCRED;
150 bp->b_wcred = NOCRED;
151 bp->b_vnbufs.le_next = NOLIST;
152 bp->b_data = buffers + i * MAXBSIZE;
153 if (i < residual)
154 bp->b_bufsize = (base + 1) * CLBYTES;
155 else
156 bp->b_bufsize = base * CLBYTES;
157 bp->b_flags = B_INVAL;
158 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
159 binsheadfree(bp, dp);
160 binshash(bp, &invalhash);
161 }
162 }
163
164 static __inline struct buf *
165 bio_doread(vp, blkno, size, cred, async)
166 struct vnode *vp;
167 daddr_t blkno;
168 int size;
169 struct ucred *cred;
170 int async;
171 {
172 register struct buf *bp;
173
174 bp = getblk(vp, blkno, size, 0, 0);
175
176 /*
177 * If buffer does not have data valid, start a read.
178 * Note that if buffer is B_INVAL, getblk() won't return it.
179 * Therefore, it's valid if it's I/O has completed or been delayed.
180 */
181 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
182 /* Start I/O for the buffer (keeping credentials). */
183 SET(bp->b_flags, B_READ | async);
184 if (cred != NOCRED && bp->b_rcred == NOCRED) {
185 crhold(cred);
186 bp->b_rcred = cred;
187 }
188 VOP_STRATEGY(bp);
189
190 /* Pay for the read. */
191 curproc->p_stats->p_ru.ru_inblock++; /* XXX */
192 } else if (async) {
193 brelse(bp);
194 }
195
196 return (bp);
197 }
198
199 /*
200 * Read a disk block.
201 * This algorithm described in Bach (p.54).
202 */
203 int
204 bread(vp, blkno, size, cred, bpp)
205 struct vnode *vp;
206 daddr_t blkno;
207 int size;
208 struct ucred *cred;
209 struct buf **bpp;
210 {
211 register struct buf *bp;
212
213 /* Get buffer for block. */
214 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
215
216 /* Wait for the read to complete, and return result. */
217 return (biowait(bp));
218 }
219
220 /*
221 * Read-ahead multiple disk blocks. The first is sync, the rest async.
222 * Trivial modification to the breada algorithm presented in Bach (p.55).
223 */
224 int
225 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
226 struct vnode *vp;
227 daddr_t blkno; int size;
228 daddr_t rablks[]; int rasizes[];
229 int nrablks;
230 struct ucred *cred;
231 struct buf **bpp;
232 {
233 register struct buf *bp;
234 int i;
235
236 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
237
238 /*
239 * For each of the read-ahead blocks, start a read, if necessary.
240 */
241 for (i = 0; i < nrablks; i++) {
242 /* If it's in the cache, just go on to next one. */
243 if (incore(vp, rablks[i]))
244 continue;
245
246 /* Get a buffer for the read-ahead block */
247 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
248 }
249
250 /* Otherwise, we had to start a read for it; wait until it's valid. */
251 return (biowait(bp));
252 }
253
254 /*
255 * Read with single-block read-ahead. Defined in Bach (p.55), but
256 * implemented as a call to breadn().
257 * XXX for compatibility with old file systems.
258 */
259 int
260 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
261 struct vnode *vp;
262 daddr_t blkno; int size;
263 daddr_t rablkno; int rabsize;
264 struct ucred *cred;
265 struct buf **bpp;
266 {
267
268 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
269 }
270
271 /*
272 * Block write. Described in Bach (p.56)
273 */
274 int
275 bwrite(bp)
276 struct buf *bp;
277 {
278 int rv, sync, wasdelayed, s;
279
280 /*
281 * Remember buffer type, to switch on it later. If the write was
282 * synchronous, but the file system was mounted with MNT_ASYNC,
283 * convert it to a delayed write.
284 * XXX note that this relies on delayed tape writes being converted
285 * to async, not sync writes (which is safe, but ugly).
286 */
287 sync = !ISSET(bp->b_flags, B_ASYNC);
288 if (sync && bp->b_vp && bp->b_vp->v_mount &&
289 ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
290 bdwrite(bp);
291 return (0);
292 }
293 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
294 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
295
296 s = splbio();
297 if (!sync) {
298 /*
299 * If not synchronous, pay for the I/O operation and make
300 * sure the buf is on the correct vnode queue. We have
301 * to do this now, because if we don't, the vnode may not
302 * be properly notified that its I/O has completed.
303 */
304 if (wasdelayed)
305 reassignbuf(bp, bp->b_vp);
306 else
307 curproc->p_stats->p_ru.ru_oublock++;
308 }
309
310 /* Initiate disk write. Make sure the appropriate party is charged. */
311 bp->b_vp->v_numoutput++;
312 splx(s);
313 SET(bp->b_flags, B_WRITEINPROG);
314 VOP_STRATEGY(bp);
315
316 if (sync) {
317 /*
318 * If I/O was synchronous, wait for it to complete.
319 */
320 rv = biowait(bp);
321
322 /*
323 * Pay for the I/O operation, if it's not been paid for, and
324 * make sure it's on the correct vnode queue. (async operatings
325 * were payed for above.)
326 */
327 s = splbio();
328 if (wasdelayed)
329 reassignbuf(bp, bp->b_vp);
330 else
331 curproc->p_stats->p_ru.ru_oublock++;
332 splx(s);
333
334 /* Release the buffer. */
335 brelse(bp);
336
337 return (rv);
338 } else {
339 return (0);
340 }
341 }
342
343 int
344 vn_bwrite(v)
345 void *v;
346 {
347 struct vop_bwrite_args *ap = v;
348
349 return (bwrite(ap->a_bp));
350 }
351
352 /*
353 * Delayed write.
354 *
355 * The buffer is marked dirty, but is not queued for I/O.
356 * This routine should be used when the buffer is expected
357 * to be modified again soon, typically a small write that
358 * partially fills a buffer.
359 *
360 * NB: magnetic tapes cannot be delayed; they must be
361 * written in the order that the writes are requested.
362 *
363 * Described in Leffler, et al. (pp. 208-213).
364 */
365 void
366 bdwrite(bp)
367 struct buf *bp;
368 {
369 int s;
370
371 /*
372 * If the block hasn't been seen before:
373 * (1) Mark it as having been seen,
374 * (2) Charge for the write,
375 * (3) Make sure it's on its vnode's correct block list.
376 */
377 if (!ISSET(bp->b_flags, B_DELWRI)) {
378 SET(bp->b_flags, B_DELWRI);
379 curproc->p_stats->p_ru.ru_oublock++; /* XXX */
380 s = splbio();
381 reassignbuf(bp, bp->b_vp);
382 splx(s);
383 }
384
385 /* If this is a tape block, write the block now. */
386 if (bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
387 bawrite(bp);
388 return;
389 }
390
391 /* Otherwise, the "write" is done, so mark and release the buffer. */
392 CLR(bp->b_flags, B_NEEDCOMMIT);
393 SET(bp->b_flags, B_DONE);
394 brelse(bp);
395 }
396
397 /*
398 * Asynchronous block write; just an asynchronous bwrite().
399 */
400 void
401 bawrite(bp)
402 struct buf *bp;
403 {
404
405 SET(bp->b_flags, B_ASYNC);
406 VOP_BWRITE(bp);
407 }
408
409 /*
410 * Release a buffer on to the free lists.
411 * Described in Bach (p. 46).
412 */
413 void
414 brelse(bp)
415 struct buf *bp;
416 {
417 struct bqueues *bufq;
418 int s;
419
420 /* Wake up any processes waiting for any buffer to become free. */
421 if (needbuffer) {
422 needbuffer = 0;
423 wakeup(&needbuffer);
424 }
425
426 /* Wake up any proceeses waiting for _this_ buffer to become free. */
427 if (ISSET(bp->b_flags, B_WANTED)) {
428 CLR(bp->b_flags, B_WANTED);
429 wakeup(bp);
430 }
431
432 /* Block disk interrupts. */
433 s = splbio();
434
435 /*
436 * Determine which queue the buffer should be on, then put it there.
437 */
438
439 /* If it's locked, don't report an error; try again later. */
440 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
441 CLR(bp->b_flags, B_ERROR);
442
443 /* If it's not cacheable, or an error, mark it invalid. */
444 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
445 SET(bp->b_flags, B_INVAL);
446
447 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
448 /*
449 * If it's invalid or empty, dissociate it from its vnode
450 * and put on the head of the appropriate queue.
451 */
452 if (bp->b_vp)
453 brelvp(bp);
454 CLR(bp->b_flags, B_DELWRI);
455 if (bp->b_bufsize <= 0)
456 /* no data */
457 bufq = &bufqueues[BQ_EMPTY];
458 else
459 /* invalid data */
460 bufq = &bufqueues[BQ_AGE];
461 binsheadfree(bp, bufq);
462 } else {
463 /*
464 * It has valid data. Put it on the end of the appropriate
465 * queue, so that it'll stick around for as long as possible.
466 */
467 if (ISSET(bp->b_flags, B_LOCKED))
468 /* locked in core */
469 bufq = &bufqueues[BQ_LOCKED];
470 else if (ISSET(bp->b_flags, B_AGE))
471 /* stale but valid data */
472 bufq = &bufqueues[BQ_AGE];
473 else
474 /* valid data */
475 bufq = &bufqueues[BQ_LRU];
476 binstailfree(bp, bufq);
477 }
478
479 /* Unlock the buffer. */
480 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
481
482 /* Allow disk interrupts. */
483 splx(s);
484 }
485
486 /*
487 * Determine if a block is in the cache.
488 * Just look on what would be its hash chain. If it's there, return
489 * a pointer to it, unless it's marked invalid. If it's marked invalid,
490 * we normally don't return the buffer, unless the caller explicitly
491 * wants us to.
492 */
493 struct buf *
494 incore(vp, blkno)
495 struct vnode *vp;
496 daddr_t blkno;
497 {
498 struct buf *bp;
499
500 bp = BUFHASH(vp, blkno)->lh_first;
501
502 /* Search hash chain */
503 for (; bp != NULL; bp = bp->b_hash.le_next) {
504 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
505 !ISSET(bp->b_flags, B_INVAL))
506 return (bp);
507 }
508
509 return (0);
510 }
511
512 /*
513 * Get a block of requested size that is associated with
514 * a given vnode and block offset. If it is found in the
515 * block cache, mark it as having been found, make it busy
516 * and return it. Otherwise, return an empty block of the
517 * correct size. It is up to the caller to insure that the
518 * cached blocks be of the correct size.
519 */
520 struct buf *
521 getblk(vp, blkno, size, slpflag, slptimeo)
522 register struct vnode *vp;
523 daddr_t blkno;
524 int size, slpflag, slptimeo;
525 {
526 struct bufhashhdr *bh;
527 struct buf *bp;
528 int s, err;
529
530 /*
531 * XXX
532 * The following is an inlined version of 'incore()', but with
533 * the 'invalid' test moved to after the 'busy' test. It's
534 * necessary because there are some cases in which the NFS
535 * code sets B_INVAL prior to writing data to the server, but
536 * in which the buffers actually contain valid data. In this
537 * case, we can't allow the system to allocate a new buffer for
538 * the block until the write is finished.
539 */
540 bh = BUFHASH(vp, blkno);
541 start:
542 bp = bh->lh_first;
543 for (; bp != NULL; bp = bp->b_hash.le_next) {
544 if (bp->b_lblkno != blkno || bp->b_vp != vp)
545 continue;
546
547 s = splbio();
548 if (ISSET(bp->b_flags, B_BUSY)) {
549 SET(bp->b_flags, B_WANTED);
550 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
551 slptimeo);
552 splx(s);
553 if (err)
554 return (NULL);
555 goto start;
556 }
557
558 if (!ISSET(bp->b_flags, B_INVAL)) {
559 SET(bp->b_flags, (B_BUSY | B_CACHE));
560 bremfree(bp);
561 splx(s);
562 break;
563 }
564 splx(s);
565 }
566
567 if (bp == NULL) {
568 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
569 goto start;
570 binshash(bp, bh);
571 bp->b_blkno = bp->b_lblkno = blkno;
572 s = splbio();
573 bgetvp(vp, bp);
574 splx(s);
575 }
576 allocbuf(bp, size);
577 return (bp);
578 }
579
580 /*
581 * Get an empty, disassociated buffer of given size.
582 */
583 struct buf *
584 geteblk(size)
585 int size;
586 {
587 struct buf *bp;
588
589 while ((bp = getnewbuf(0, 0)) == 0)
590 ;
591 SET(bp->b_flags, B_INVAL);
592 binshash(bp, &invalhash);
593 allocbuf(bp, size);
594
595 return (bp);
596 }
597
598 /*
599 * Expand or contract the actual memory allocated to a buffer.
600 *
601 * If the buffer shrinks, data is lost, so it's up to the
602 * caller to have written it out *first*; this routine will not
603 * start a write. If the buffer grows, it's the callers
604 * responsibility to fill out the buffer's additional contents.
605 */
606 void
607 allocbuf(bp, size)
608 struct buf *bp;
609 int size;
610 {
611 struct buf *nbp;
612 vm_size_t desired_size;
613 int s;
614
615 desired_size = roundup(size, CLBYTES);
616 if (desired_size > MAXBSIZE)
617 panic("allocbuf: buffer larger than MAXBSIZE requested");
618
619 if (bp->b_bufsize == desired_size)
620 goto out;
621
622 /*
623 * If the buffer is smaller than the desired size, we need to snarf
624 * it from other buffers. Get buffers (via getnewbuf()), and
625 * steal their pages.
626 */
627 while (bp->b_bufsize < desired_size) {
628 int amt;
629
630 /* find a buffer */
631 while ((nbp = getnewbuf(0, 0)) == NULL)
632 ;
633 SET(nbp->b_flags, B_INVAL);
634 binshash(nbp, &invalhash);
635
636 /* and steal its pages, up to the amount we need */
637 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
638 pagemove((nbp->b_data + nbp->b_bufsize - amt),
639 bp->b_data + bp->b_bufsize, amt);
640 bp->b_bufsize += amt;
641 nbp->b_bufsize -= amt;
642
643 /* reduce transfer count if we stole some data */
644 if (nbp->b_bcount > nbp->b_bufsize)
645 nbp->b_bcount = nbp->b_bufsize;
646
647 #ifdef DIAGNOSTIC
648 if (nbp->b_bufsize < 0)
649 panic("allocbuf: negative bufsize");
650 #endif
651
652 brelse(nbp);
653 }
654
655 /*
656 * If we want a buffer smaller than the current size,
657 * shrink this buffer. Grab a buf head from the EMPTY queue,
658 * move a page onto it, and put it on front of the AGE queue.
659 * If there are no free buffer headers, leave the buffer alone.
660 */
661 if (bp->b_bufsize > desired_size) {
662 s = splbio();
663 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
664 /* No free buffer head */
665 splx(s);
666 goto out;
667 }
668 bremfree(nbp);
669 SET(nbp->b_flags, B_BUSY);
670 splx(s);
671
672 /* move the page to it and note this change */
673 pagemove(bp->b_data + desired_size,
674 nbp->b_data, bp->b_bufsize - desired_size);
675 nbp->b_bufsize = bp->b_bufsize - desired_size;
676 bp->b_bufsize = desired_size;
677 nbp->b_bcount = 0;
678 SET(nbp->b_flags, B_INVAL);
679
680 /* release the newly-filled buffer and leave */
681 brelse(nbp);
682 }
683
684 out:
685 bp->b_bcount = size;
686 }
687
688 /*
689 * Find a buffer which is available for use.
690 * Select something from a free list.
691 * Preference is to AGE list, then LRU list.
692 */
693 struct buf *
694 getnewbuf(slpflag, slptimeo)
695 int slpflag, slptimeo;
696 {
697 register struct buf *bp;
698 int s;
699
700 start:
701 s = splbio();
702 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
703 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
704 bremfree(bp);
705 } else {
706 /* wait for a free buffer of any kind */
707 needbuffer = 1;
708 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
709 splx(s);
710 return (0);
711 }
712
713 /* Buffer is no longer on free lists. */
714 SET(bp->b_flags, B_BUSY);
715
716 /* If buffer was a delayed write, start it, and go back to the top. */
717 if (ISSET(bp->b_flags, B_DELWRI)) {
718 splx(s);
719 bawrite (bp);
720 goto start;
721 }
722
723 /* disassociate us from our vnode, if we had one... */
724 if (bp->b_vp)
725 brelvp(bp);
726 splx(s);
727
728 /* clear out various other fields */
729 bp->b_flags = B_BUSY;
730 bp->b_dev = NODEV;
731 bp->b_blkno = bp->b_lblkno = 0;
732 bp->b_iodone = 0;
733 bp->b_error = 0;
734 bp->b_resid = 0;
735 bp->b_bcount = 0;
736 bp->b_dirtyoff = bp->b_dirtyend = 0;
737 bp->b_validoff = bp->b_validend = 0;
738
739 /* nuke any credentials we were holding */
740 if (bp->b_rcred != NOCRED) {
741 crfree(bp->b_rcred);
742 bp->b_rcred = NOCRED;
743 }
744 if (bp->b_wcred != NOCRED) {
745 crfree(bp->b_wcred);
746 bp->b_wcred = NOCRED;
747 }
748
749 bremhash(bp);
750 return (bp);
751 }
752
753 /*
754 * Wait for operations on the buffer to complete.
755 * When they do, extract and return the I/O's error value.
756 */
757 int
758 biowait(bp)
759 struct buf *bp;
760 {
761 int s;
762
763 s = splbio();
764 while (!ISSET(bp->b_flags, B_DONE))
765 tsleep(bp, PRIBIO + 1, "biowait", 0);
766 splx(s);
767
768 /* check for interruption of I/O (e.g. via NFS), then errors. */
769 if (ISSET(bp->b_flags, B_EINTR)) {
770 CLR(bp->b_flags, B_EINTR);
771 return (EINTR);
772 } else if (ISSET(bp->b_flags, B_ERROR))
773 return (bp->b_error ? bp->b_error : EIO);
774 else
775 return (0);
776 }
777
778 /*
779 * Mark I/O complete on a buffer.
780 *
781 * If a callback has been requested, e.g. the pageout
782 * daemon, do so. Otherwise, awaken waiting processes.
783 *
784 * [ Leffler, et al., says on p.247:
785 * "This routine wakes up the blocked process, frees the buffer
786 * for an asynchronous write, or, for a request by the pagedaemon
787 * process, invokes a procedure specified in the buffer structure" ]
788 *
789 * In real life, the pagedaemon (or other system processes) wants
790 * to do async stuff to, and doesn't want the buffer brelse()'d.
791 * (for swap pager, that puts swap buffers on the free lists (!!!),
792 * for the vn device, that puts malloc'd buffers on the free lists!)
793 */
794 void
795 biodone(bp)
796 struct buf *bp;
797 {
798 if (ISSET(bp->b_flags, B_DONE))
799 panic("biodone already");
800 SET(bp->b_flags, B_DONE); /* note that it's done */
801
802 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
803 vwakeup(bp);
804
805 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
806 CLR(bp->b_flags, B_CALL); /* but note callout done */
807 (*bp->b_iodone)(bp);
808 } else if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release it */
809 brelse(bp);
810 else { /* or just wakeup the buffer */
811 CLR(bp->b_flags, B_WANTED);
812 wakeup(bp);
813 }
814 }
815
816 /*
817 * Return a count of buffers on the "locked" queue.
818 */
819 int
820 count_lock_queue()
821 {
822 register struct buf *bp;
823 register int n = 0;
824
825 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
826 bp = bp->b_freelist.tqe_next)
827 n++;
828 return (n);
829 }
830
831 #ifdef DEBUG
832 /*
833 * Print out statistics on the current allocation of the buffer pool.
834 * Can be enabled to print out on every ``sync'' by setting "syncprt"
835 * in vfs_syscalls.c using sysctl.
836 */
837 void
838 vfs_bufstats()
839 {
840 int s, i, j, count;
841 register struct buf *bp;
842 register struct bqueues *dp;
843 int counts[MAXBSIZE/CLBYTES+1];
844 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
845
846 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
847 count = 0;
848 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
849 counts[j] = 0;
850 s = splbio();
851 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
852 counts[bp->b_bufsize/CLBYTES]++;
853 count++;
854 }
855 splx(s);
856 printf("%s: total-%d", bname[i], count);
857 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
858 if (counts[j] != 0)
859 printf(", %d-%d", j * CLBYTES, counts[j]);
860 printf("\n");
861 }
862 }
863 #endif /* DEBUG */
864