vfs_bio.c revision 1.55 1 /* $NetBSD: vfs_bio.c,v 1.55 1998/08/04 04:03:18 perry Exp $ */
2
3 /*-
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 */
43
44 /*
45 * Some references:
46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 * UNIX Operating System (Addison Welley, 1989)
49 */
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61
62 #include <vm/vm.h>
63
64 /* Macros to clear/set/test flags. */
65 #define SET(t, f) (t) |= (f)
66 #define CLR(t, f) (t) &= ~(f)
67 #define ISSET(t, f) ((t) & (f))
68
69 /*
70 * Definitions for the buffer hash lists.
71 */
72 #define BUFHASH(dvp, lbn) \
73 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
74 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
75 u_long bufhash;
76
77 /*
78 * Insq/Remq for the buffer hash lists.
79 */
80 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
81 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
82
83 /*
84 * Definitions for the buffer free lists.
85 */
86 #define BQUEUES 4 /* number of free buffer queues */
87
88 #define BQ_LOCKED 0 /* super-blocks &c */
89 #define BQ_LRU 1 /* lru, useful buffers */
90 #define BQ_AGE 2 /* rubbish */
91 #define BQ_EMPTY 3 /* buffer headers with no memory */
92
93 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
94 int needbuffer;
95
96 /*
97 * Insq/Remq for the buffer free lists.
98 */
99 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
100 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
101
102 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
103 struct ucred *, int));
104 int count_lock_queue __P((void));
105
106 void
107 bremfree(bp)
108 struct buf *bp;
109 {
110 struct bqueues *dp = NULL;
111
112 /*
113 * We only calculate the head of the freelist when removing
114 * the last element of the list as that is the only time that
115 * it is needed (e.g. to reset the tail pointer).
116 *
117 * NB: This makes an assumption about how tailq's are implemented.
118 */
119 if (bp->b_freelist.tqe_next == NULL) {
120 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
121 if (dp->tqh_last == &bp->b_freelist.tqe_next)
122 break;
123 if (dp == &bufqueues[BQUEUES])
124 panic("bremfree: lost tail");
125 }
126 TAILQ_REMOVE(dp, bp, b_freelist);
127 }
128
129 /*
130 * Initialize buffers and hash links for buffers.
131 */
132 void
133 bufinit()
134 {
135 register struct buf *bp;
136 struct bqueues *dp;
137 register int i;
138 int base, residual;
139
140 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
141 TAILQ_INIT(dp);
142 bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
143 base = bufpages / nbuf;
144 residual = bufpages % nbuf;
145 for (i = 0; i < nbuf; i++) {
146 bp = &buf[i];
147 memset((char *)bp, 0, sizeof(*bp));
148 bp->b_dev = NODEV;
149 bp->b_rcred = NOCRED;
150 bp->b_wcred = NOCRED;
151 bp->b_vnbufs.le_next = NOLIST;
152 bp->b_data = buffers + i * MAXBSIZE;
153 if (i < residual)
154 bp->b_bufsize = (base + 1) * CLBYTES;
155 else
156 bp->b_bufsize = base * CLBYTES;
157 bp->b_flags = B_INVAL;
158 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
159 binsheadfree(bp, dp);
160 binshash(bp, &invalhash);
161 }
162 }
163
164 static __inline struct buf *
165 bio_doread(vp, blkno, size, cred, async)
166 struct vnode *vp;
167 daddr_t blkno;
168 int size;
169 struct ucred *cred;
170 int async;
171 {
172 register struct buf *bp;
173 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
174
175 bp = getblk(vp, blkno, size, 0, 0);
176
177 /*
178 * If buffer does not have data valid, start a read.
179 * Note that if buffer is B_INVAL, getblk() won't return it.
180 * Therefore, it's valid if it's I/O has completed or been delayed.
181 */
182 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
183 /* Start I/O for the buffer (keeping credentials). */
184 SET(bp->b_flags, B_READ | async);
185 if (cred != NOCRED && bp->b_rcred == NOCRED) {
186 crhold(cred);
187 bp->b_rcred = cred;
188 }
189 VOP_STRATEGY(bp);
190
191 /* Pay for the read. */
192 p->p_stats->p_ru.ru_inblock++;
193 } else if (async) {
194 brelse(bp);
195 }
196
197 return (bp);
198 }
199
200 /*
201 * Read a disk block.
202 * This algorithm described in Bach (p.54).
203 */
204 int
205 bread(vp, blkno, size, cred, bpp)
206 struct vnode *vp;
207 daddr_t blkno;
208 int size;
209 struct ucred *cred;
210 struct buf **bpp;
211 {
212 register struct buf *bp;
213
214 /* Get buffer for block. */
215 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
216
217 /*
218 * Delayed write buffers are found in the cache and have
219 * valid contents. Also, B_ERROR is not set, otherwise
220 * getblk() would not have returned them.
221 */
222 if (ISSET(bp->b_flags, B_DELWRI))
223 return (0);
224
225 /*
226 * Otherwise, we had to start a read for it; wait until
227 * it's valid and return the result.
228 */
229 return (biowait(bp));
230 }
231
232 /*
233 * Read-ahead multiple disk blocks. The first is sync, the rest async.
234 * Trivial modification to the breada algorithm presented in Bach (p.55).
235 */
236 int
237 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
238 struct vnode *vp;
239 daddr_t blkno; int size;
240 daddr_t rablks[]; int rasizes[];
241 int nrablks;
242 struct ucred *cred;
243 struct buf **bpp;
244 {
245 register struct buf *bp;
246 int i;
247
248 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
249
250 /*
251 * For each of the read-ahead blocks, start a read, if necessary.
252 */
253 for (i = 0; i < nrablks; i++) {
254 /* If it's in the cache, just go on to next one. */
255 if (incore(vp, rablks[i]))
256 continue;
257
258 /* Get a buffer for the read-ahead block */
259 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
260 }
261
262 /*
263 * Delayed write buffers are found in the cache and have
264 * valid contents. Also, B_ERROR is not set, otherwise
265 * getblk() would not have returned them.
266 */
267 if (ISSET(bp->b_flags, B_DELWRI))
268 SET(bp->b_flags, B_DONE);
269
270 /*
271 * Otherwise, we had to start a read for it; wait until
272 * it's valid and return the result.
273 */
274 return (biowait(bp));
275 }
276
277 /*
278 * Read with single-block read-ahead. Defined in Bach (p.55), but
279 * implemented as a call to breadn().
280 * XXX for compatibility with old file systems.
281 */
282 int
283 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
284 struct vnode *vp;
285 daddr_t blkno; int size;
286 daddr_t rablkno; int rabsize;
287 struct ucred *cred;
288 struct buf **bpp;
289 {
290
291 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
292 }
293
294 /*
295 * Block write. Described in Bach (p.56)
296 */
297 int
298 bwrite(bp)
299 struct buf *bp;
300 {
301 int rv, sync, wasdelayed, s;
302 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
303
304 /*
305 * Remember buffer type, to switch on it later. If the write was
306 * synchronous, but the file system was mounted with MNT_ASYNC,
307 * convert it to a delayed write.
308 * XXX note that this relies on delayed tape writes being converted
309 * to async, not sync writes (which is safe, but ugly).
310 */
311 sync = !ISSET(bp->b_flags, B_ASYNC);
312 if (sync && bp->b_vp && bp->b_vp->v_mount &&
313 ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
314 bdwrite(bp);
315 return (0);
316 }
317
318 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
319 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
320
321 s = splbio();
322
323 /*
324 * Pay for the I/O operation and make sure the buf is on the correct
325 * vnode queue.
326 */
327 if (wasdelayed)
328 reassignbuf(bp, bp->b_vp);
329 else
330 p->p_stats->p_ru.ru_oublock++;
331
332 /* Initiate disk write. Make sure the appropriate party is charged. */
333 bp->b_vp->v_numoutput++;
334 splx(s);
335
336 SET(bp->b_flags, B_WRITEINPROG);
337 VOP_STRATEGY(bp);
338
339 if (sync) {
340 /* If I/O was synchronous, wait for it to complete. */
341 rv = biowait(bp);
342
343 /* Release the buffer. */
344 brelse(bp);
345
346 return (rv);
347 } else {
348 return (0);
349 }
350 }
351
352 int
353 vn_bwrite(v)
354 void *v;
355 {
356 struct vop_bwrite_args *ap = v;
357
358 return (bwrite(ap->a_bp));
359 }
360
361 /*
362 * Delayed write.
363 *
364 * The buffer is marked dirty, but is not queued for I/O.
365 * This routine should be used when the buffer is expected
366 * to be modified again soon, typically a small write that
367 * partially fills a buffer.
368 *
369 * NB: magnetic tapes cannot be delayed; they must be
370 * written in the order that the writes are requested.
371 *
372 * Described in Leffler, et al. (pp. 208-213).
373 */
374 void
375 bdwrite(bp)
376 struct buf *bp;
377 {
378 int s;
379 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
380
381 /* If this is a tape block, write the block now. */
382 /* XXX NOTE: the memory filesystem usurpes major device */
383 /* XXX number 255, which is a bad idea. */
384 if (bp->b_dev != NODEV &&
385 major(bp->b_dev) != 255 && /* XXX - MFS buffers! */
386 bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
387 bawrite(bp);
388 return;
389 }
390
391 /*
392 * If the block hasn't been seen before:
393 * (1) Mark it as having been seen,
394 * (2) Charge for the write,
395 * (3) Make sure it's on its vnode's correct block list.
396 */
397 if (!ISSET(bp->b_flags, B_DELWRI)) {
398 SET(bp->b_flags, B_DELWRI);
399 p->p_stats->p_ru.ru_oublock++;
400 s = splbio();
401 reassignbuf(bp, bp->b_vp);
402 splx(s);
403 }
404
405 /* Otherwise, the "write" is done, so mark and release the buffer. */
406 CLR(bp->b_flags, B_NEEDCOMMIT);
407 SET(bp->b_flags, B_DONE);
408 brelse(bp);
409 }
410
411 /*
412 * Asynchronous block write; just an asynchronous bwrite().
413 */
414 void
415 bawrite(bp)
416 struct buf *bp;
417 {
418
419 SET(bp->b_flags, B_ASYNC);
420 VOP_BWRITE(bp);
421 }
422
423 /*
424 * Release a buffer on to the free lists.
425 * Described in Bach (p. 46).
426 */
427 void
428 brelse(bp)
429 struct buf *bp;
430 {
431 struct bqueues *bufq;
432 int s;
433
434 /* Wake up any processes waiting for any buffer to become free. */
435 if (needbuffer) {
436 needbuffer = 0;
437 wakeup(&needbuffer);
438 }
439
440 /* Wake up any proceeses waiting for _this_ buffer to become free. */
441 if (ISSET(bp->b_flags, B_WANTED)) {
442 CLR(bp->b_flags, B_WANTED);
443 wakeup(bp);
444 }
445
446 /* Block disk interrupts. */
447 s = splbio();
448
449 /*
450 * Determine which queue the buffer should be on, then put it there.
451 */
452
453 /* If it's locked, don't report an error; try again later. */
454 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
455 CLR(bp->b_flags, B_ERROR);
456
457 /* If it's not cacheable, or an error, mark it invalid. */
458 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
459 SET(bp->b_flags, B_INVAL);
460
461 if (ISSET(bp->b_flags, B_VFLUSH)) {
462 /*
463 * This is a delayed write buffer that was just flushed to
464 * disk. It is still on the LRU queue. If it's become
465 * invalid, then we need to move it to a different queue;
466 * otherwise leave it in its current position.
467 */
468 CLR(bp->b_flags, B_VFLUSH);
469 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
470 goto already_queued;
471 else
472 bremfree(bp);
473 }
474
475 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
476 /*
477 * If it's invalid or empty, dissociate it from its vnode
478 * and put on the head of the appropriate queue.
479 */
480 if (bp->b_vp)
481 brelvp(bp);
482 CLR(bp->b_flags, B_DELWRI);
483 if (bp->b_bufsize <= 0)
484 /* no data */
485 bufq = &bufqueues[BQ_EMPTY];
486 else
487 /* invalid data */
488 bufq = &bufqueues[BQ_AGE];
489 binsheadfree(bp, bufq);
490 } else {
491 /*
492 * It has valid data. Put it on the end of the appropriate
493 * queue, so that it'll stick around for as long as possible.
494 */
495 if (ISSET(bp->b_flags, B_LOCKED))
496 /* locked in core */
497 bufq = &bufqueues[BQ_LOCKED];
498 else if (ISSET(bp->b_flags, B_AGE))
499 /* stale but valid data */
500 bufq = &bufqueues[BQ_AGE];
501 else
502 /* valid data */
503 bufq = &bufqueues[BQ_LRU];
504 binstailfree(bp, bufq);
505 }
506
507 already_queued:
508 /* Unlock the buffer. */
509 CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
510
511 /* Allow disk interrupts. */
512 splx(s);
513 }
514
515 /*
516 * Determine if a block is in the cache.
517 * Just look on what would be its hash chain. If it's there, return
518 * a pointer to it, unless it's marked invalid. If it's marked invalid,
519 * we normally don't return the buffer, unless the caller explicitly
520 * wants us to.
521 */
522 struct buf *
523 incore(vp, blkno)
524 struct vnode *vp;
525 daddr_t blkno;
526 {
527 struct buf *bp;
528
529 bp = BUFHASH(vp, blkno)->lh_first;
530
531 /* Search hash chain */
532 for (; bp != NULL; bp = bp->b_hash.le_next) {
533 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
534 !ISSET(bp->b_flags, B_INVAL))
535 return (bp);
536 }
537
538 return (0);
539 }
540
541 /*
542 * Get a block of requested size that is associated with
543 * a given vnode and block offset. If it is found in the
544 * block cache, mark it as having been found, make it busy
545 * and return it. Otherwise, return an empty block of the
546 * correct size. It is up to the caller to insure that the
547 * cached blocks be of the correct size.
548 */
549 struct buf *
550 getblk(vp, blkno, size, slpflag, slptimeo)
551 register struct vnode *vp;
552 daddr_t blkno;
553 int size, slpflag, slptimeo;
554 {
555 struct bufhashhdr *bh;
556 struct buf *bp;
557 int s, err;
558
559 /*
560 * XXX
561 * The following is an inlined version of 'incore()', but with
562 * the 'invalid' test moved to after the 'busy' test. It's
563 * necessary because there are some cases in which the NFS
564 * code sets B_INVAL prior to writing data to the server, but
565 * in which the buffers actually contain valid data. In this
566 * case, we can't allow the system to allocate a new buffer for
567 * the block until the write is finished.
568 */
569 bh = BUFHASH(vp, blkno);
570 start:
571 bp = bh->lh_first;
572 for (; bp != NULL; bp = bp->b_hash.le_next) {
573 if (bp->b_lblkno != blkno || bp->b_vp != vp)
574 continue;
575
576 s = splbio();
577 if (ISSET(bp->b_flags, B_BUSY)) {
578 SET(bp->b_flags, B_WANTED);
579 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
580 slptimeo);
581 splx(s);
582 if (err)
583 return (NULL);
584 goto start;
585 }
586
587 if (!ISSET(bp->b_flags, B_INVAL)) {
588 SET(bp->b_flags, (B_BUSY | B_CACHE));
589 bremfree(bp);
590 splx(s);
591 break;
592 }
593 splx(s);
594 }
595
596 if (bp == NULL) {
597 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
598 goto start;
599 binshash(bp, bh);
600 bp->b_blkno = bp->b_lblkno = blkno;
601 s = splbio();
602 bgetvp(vp, bp);
603 splx(s);
604 }
605 allocbuf(bp, size);
606 return (bp);
607 }
608
609 /*
610 * Get an empty, disassociated buffer of given size.
611 */
612 struct buf *
613 geteblk(size)
614 int size;
615 {
616 struct buf *bp;
617
618 while ((bp = getnewbuf(0, 0)) == 0)
619 ;
620 SET(bp->b_flags, B_INVAL);
621 binshash(bp, &invalhash);
622 allocbuf(bp, size);
623
624 return (bp);
625 }
626
627 /*
628 * Expand or contract the actual memory allocated to a buffer.
629 *
630 * If the buffer shrinks, data is lost, so it's up to the
631 * caller to have written it out *first*; this routine will not
632 * start a write. If the buffer grows, it's the callers
633 * responsibility to fill out the buffer's additional contents.
634 */
635 void
636 allocbuf(bp, size)
637 struct buf *bp;
638 int size;
639 {
640 struct buf *nbp;
641 vm_size_t desired_size;
642 int s;
643
644 desired_size = roundup(size, CLBYTES);
645 if (desired_size > MAXBSIZE)
646 panic("allocbuf: buffer larger than MAXBSIZE requested");
647
648 if (bp->b_bufsize == desired_size)
649 goto out;
650
651 /*
652 * If the buffer is smaller than the desired size, we need to snarf
653 * it from other buffers. Get buffers (via getnewbuf()), and
654 * steal their pages.
655 */
656 while (bp->b_bufsize < desired_size) {
657 int amt;
658
659 /* find a buffer */
660 while ((nbp = getnewbuf(0, 0)) == NULL)
661 ;
662 SET(nbp->b_flags, B_INVAL);
663 binshash(nbp, &invalhash);
664
665 /* and steal its pages, up to the amount we need */
666 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
667 pagemove((nbp->b_data + nbp->b_bufsize - amt),
668 bp->b_data + bp->b_bufsize, amt);
669 bp->b_bufsize += amt;
670 nbp->b_bufsize -= amt;
671
672 /* reduce transfer count if we stole some data */
673 if (nbp->b_bcount > nbp->b_bufsize)
674 nbp->b_bcount = nbp->b_bufsize;
675
676 #ifdef DIAGNOSTIC
677 if (nbp->b_bufsize < 0)
678 panic("allocbuf: negative bufsize");
679 #endif
680
681 brelse(nbp);
682 }
683
684 /*
685 * If we want a buffer smaller than the current size,
686 * shrink this buffer. Grab a buf head from the EMPTY queue,
687 * move a page onto it, and put it on front of the AGE queue.
688 * If there are no free buffer headers, leave the buffer alone.
689 */
690 if (bp->b_bufsize > desired_size) {
691 s = splbio();
692 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
693 /* No free buffer head */
694 splx(s);
695 goto out;
696 }
697 bremfree(nbp);
698 SET(nbp->b_flags, B_BUSY);
699 splx(s);
700
701 /* move the page to it and note this change */
702 pagemove(bp->b_data + desired_size,
703 nbp->b_data, bp->b_bufsize - desired_size);
704 nbp->b_bufsize = bp->b_bufsize - desired_size;
705 bp->b_bufsize = desired_size;
706 nbp->b_bcount = 0;
707 SET(nbp->b_flags, B_INVAL);
708
709 /* release the newly-filled buffer and leave */
710 brelse(nbp);
711 }
712
713 out:
714 bp->b_bcount = size;
715 }
716
717 /*
718 * Find a buffer which is available for use.
719 * Select something from a free list.
720 * Preference is to AGE list, then LRU list.
721 */
722 struct buf *
723 getnewbuf(slpflag, slptimeo)
724 int slpflag, slptimeo;
725 {
726 register struct buf *bp;
727 int s;
728
729 start:
730 s = splbio();
731 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
732 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
733 bremfree(bp);
734 } else {
735 /* wait for a free buffer of any kind */
736 needbuffer = 1;
737 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
738 splx(s);
739 return (0);
740 }
741
742 if (ISSET(bp->b_flags, B_VFLUSH)) {
743 /*
744 * This is a delayed write buffer being flushed to disk. Make
745 * sure it gets aged out of the queue when it's finished, and
746 * leave it off the LRU queue.
747 */
748 CLR(bp->b_flags, B_VFLUSH);
749 SET(bp->b_flags, B_AGE);
750 splx(s);
751 goto start;
752 }
753
754 /* Buffer is no longer on free lists. */
755 SET(bp->b_flags, B_BUSY);
756
757 /* If buffer was a delayed write, start it, and go back to the top. */
758 if (ISSET(bp->b_flags, B_DELWRI)) {
759 splx(s);
760 /*
761 * This buffer has gone through the LRU, so make sure it gets
762 * reused ASAP.
763 */
764 SET(bp->b_flags, B_AGE);
765 bawrite(bp);
766 goto start;
767 }
768
769 /* disassociate us from our vnode, if we had one... */
770 if (bp->b_vp)
771 brelvp(bp);
772 splx(s);
773
774 /* clear out various other fields */
775 bp->b_flags = B_BUSY;
776 bp->b_dev = NODEV;
777 bp->b_blkno = bp->b_lblkno = 0;
778 bp->b_iodone = 0;
779 bp->b_error = 0;
780 bp->b_resid = 0;
781 bp->b_bcount = 0;
782 bp->b_dirtyoff = bp->b_dirtyend = 0;
783 bp->b_validoff = bp->b_validend = 0;
784
785 /* nuke any credentials we were holding */
786 if (bp->b_rcred != NOCRED) {
787 crfree(bp->b_rcred);
788 bp->b_rcred = NOCRED;
789 }
790 if (bp->b_wcred != NOCRED) {
791 crfree(bp->b_wcred);
792 bp->b_wcred = NOCRED;
793 }
794
795 bremhash(bp);
796 return (bp);
797 }
798
799 /*
800 * Wait for operations on the buffer to complete.
801 * When they do, extract and return the I/O's error value.
802 */
803 int
804 biowait(bp)
805 struct buf *bp;
806 {
807 int s;
808
809 s = splbio();
810 while (!ISSET(bp->b_flags, B_DONE))
811 tsleep(bp, PRIBIO + 1, "biowait", 0);
812 splx(s);
813
814 /* check for interruption of I/O (e.g. via NFS), then errors. */
815 if (ISSET(bp->b_flags, B_EINTR)) {
816 CLR(bp->b_flags, B_EINTR);
817 return (EINTR);
818 } else if (ISSET(bp->b_flags, B_ERROR))
819 return (bp->b_error ? bp->b_error : EIO);
820 else
821 return (0);
822 }
823
824 /*
825 * Mark I/O complete on a buffer.
826 *
827 * If a callback has been requested, e.g. the pageout
828 * daemon, do so. Otherwise, awaken waiting processes.
829 *
830 * [ Leffler, et al., says on p.247:
831 * "This routine wakes up the blocked process, frees the buffer
832 * for an asynchronous write, or, for a request by the pagedaemon
833 * process, invokes a procedure specified in the buffer structure" ]
834 *
835 * In real life, the pagedaemon (or other system processes) wants
836 * to do async stuff to, and doesn't want the buffer brelse()'d.
837 * (for swap pager, that puts swap buffers on the free lists (!!!),
838 * for the vn device, that puts malloc'd buffers on the free lists!)
839 */
840 void
841 biodone(bp)
842 struct buf *bp;
843 {
844 if (ISSET(bp->b_flags, B_DONE))
845 panic("biodone already");
846 SET(bp->b_flags, B_DONE); /* note that it's done */
847
848 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
849 vwakeup(bp);
850
851 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
852 CLR(bp->b_flags, B_CALL); /* but note callout done */
853 (*bp->b_iodone)(bp);
854 } else if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release it */
855 brelse(bp);
856 else { /* or just wakeup the buffer */
857 CLR(bp->b_flags, B_WANTED);
858 wakeup(bp);
859 }
860 }
861
862 /*
863 * Return a count of buffers on the "locked" queue.
864 */
865 int
866 count_lock_queue()
867 {
868 register struct buf *bp;
869 register int n = 0;
870
871 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
872 bp = bp->b_freelist.tqe_next)
873 n++;
874 return (n);
875 }
876
877 #ifdef DEBUG
878 /*
879 * Print out statistics on the current allocation of the buffer pool.
880 * Can be enabled to print out on every ``sync'' by setting "syncprt"
881 * in vfs_syscalls.c using sysctl.
882 */
883 void
884 vfs_bufstats()
885 {
886 int s, i, j, count;
887 register struct buf *bp;
888 register struct bqueues *dp;
889 int counts[MAXBSIZE/CLBYTES+1];
890 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
891
892 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
893 count = 0;
894 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
895 counts[j] = 0;
896 s = splbio();
897 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
898 counts[bp->b_bufsize/CLBYTES]++;
899 count++;
900 }
901 splx(s);
902 printf("%s: total-%d", bname[i], count);
903 for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
904 if (counts[j] != 0)
905 printf(", %d-%d", j * CLBYTES, counts[j]);
906 printf("\n");
907 }
908 }
909 #endif /* DEBUG */
910