vfs_bio.c revision 1.63 1 /* $NetBSD: vfs_bio.c,v 1.63 2000/01/21 23:22:24 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 */
43
44 /*
45 * Some references:
46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 * UNIX Operating System (Addison Welley, 1989)
49 */
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61
62 #include <vm/vm.h>
63
64 #include <miscfs/specfs/specdev.h>
65
66 /* Macros to clear/set/test flags. */
67 #define SET(t, f) (t) |= (f)
68 #define CLR(t, f) (t) &= ~(f)
69 #define ISSET(t, f) ((t) & (f))
70
71 /*
72 * Definitions for the buffer hash lists.
73 */
74 #define BUFHASH(dvp, lbn) \
75 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
76 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
77 u_long bufhash;
78 struct bio_ops bioops; /* I/O operation notification */
79
80 /*
81 * Insq/Remq for the buffer hash lists.
82 */
83 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
84 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
85
86 /*
87 * Definitions for the buffer free lists.
88 */
89 #define BQUEUES 4 /* number of free buffer queues */
90
91 #define BQ_LOCKED 0 /* super-blocks &c */
92 #define BQ_LRU 1 /* lru, useful buffers */
93 #define BQ_AGE 2 /* rubbish */
94 #define BQ_EMPTY 3 /* buffer headers with no memory */
95
96 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
97 int needbuffer;
98
99 /*
100 * Insq/Remq for the buffer free lists.
101 */
102 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
103 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
104
105 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
106 struct ucred *, int));
107 int count_lock_queue __P((void));
108
109 void
110 bremfree(bp)
111 struct buf *bp;
112 {
113 int s = splbio();
114
115 struct bqueues *dp = NULL;
116
117 /*
118 * We only calculate the head of the freelist when removing
119 * the last element of the list as that is the only time that
120 * it is needed (e.g. to reset the tail pointer).
121 *
122 * NB: This makes an assumption about how tailq's are implemented.
123 */
124 if (bp->b_freelist.tqe_next == NULL) {
125 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
126 if (dp->tqh_last == &bp->b_freelist.tqe_next)
127 break;
128 if (dp == &bufqueues[BQUEUES])
129 panic("bremfree: lost tail");
130 }
131 TAILQ_REMOVE(dp, bp, b_freelist);
132
133 splx(s);
134 }
135
136 /*
137 * Initialize buffers and hash links for buffers.
138 */
139 void
140 bufinit()
141 {
142 register struct buf *bp;
143 struct bqueues *dp;
144 register int i;
145 int base, residual;
146
147 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
148 TAILQ_INIT(dp);
149 bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
150 base = bufpages / nbuf;
151 residual = bufpages % nbuf;
152 for (i = 0; i < nbuf; i++) {
153 bp = &buf[i];
154 memset((char *)bp, 0, sizeof(*bp));
155 bp->b_dev = NODEV;
156 bp->b_rcred = NOCRED;
157 bp->b_wcred = NOCRED;
158 bp->b_vnbufs.le_next = NOLIST;
159 LIST_INIT(&bp->b_dep);
160 bp->b_data = buffers + i * MAXBSIZE;
161 if (i < residual)
162 bp->b_bufsize = (base + 1) * NBPG;
163 else
164 bp->b_bufsize = base * NBPG;
165 bp->b_flags = B_INVAL;
166 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
167 binsheadfree(bp, dp);
168 binshash(bp, &invalhash);
169 }
170 }
171
172 static __inline struct buf *
173 bio_doread(vp, blkno, size, cred, async)
174 struct vnode *vp;
175 daddr_t blkno;
176 int size;
177 struct ucred *cred;
178 int async;
179 {
180 register struct buf *bp;
181 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
182
183 bp = getblk(vp, blkno, size, 0, 0);
184
185 /*
186 * If buffer does not have data valid, start a read.
187 * Note that if buffer is B_INVAL, getblk() won't return it.
188 * Therefore, it's valid if it's I/O has completed or been delayed.
189 */
190 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
191 /* Start I/O for the buffer (keeping credentials). */
192 SET(bp->b_flags, B_READ | async);
193 if (cred != NOCRED && bp->b_rcred == NOCRED) {
194 crhold(cred);
195 bp->b_rcred = cred;
196 }
197 VOP_STRATEGY(bp);
198
199 /* Pay for the read. */
200 p->p_stats->p_ru.ru_inblock++;
201 } else if (async) {
202 brelse(bp);
203 }
204
205 return (bp);
206 }
207
208 /*
209 * Read a disk block.
210 * This algorithm described in Bach (p.54).
211 */
212 int
213 bread(vp, blkno, size, cred, bpp)
214 struct vnode *vp;
215 daddr_t blkno;
216 int size;
217 struct ucred *cred;
218 struct buf **bpp;
219 {
220 register struct buf *bp;
221
222 /* Get buffer for block. */
223 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
224
225 /*
226 * Delayed write buffers are found in the cache and have
227 * valid contents. Also, B_ERROR is not set, otherwise
228 * getblk() would not have returned them.
229 */
230 if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
231 return (0);
232
233 /*
234 * Otherwise, we had to start a read for it; wait until
235 * it's valid and return the result.
236 */
237 return (biowait(bp));
238 }
239
240 /*
241 * Read-ahead multiple disk blocks. The first is sync, the rest async.
242 * Trivial modification to the breada algorithm presented in Bach (p.55).
243 */
244 int
245 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
246 struct vnode *vp;
247 daddr_t blkno; int size;
248 daddr_t rablks[]; int rasizes[];
249 int nrablks;
250 struct ucred *cred;
251 struct buf **bpp;
252 {
253 register struct buf *bp;
254 int i;
255
256 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
257
258 /*
259 * For each of the read-ahead blocks, start a read, if necessary.
260 */
261 for (i = 0; i < nrablks; i++) {
262 /* If it's in the cache, just go on to next one. */
263 if (incore(vp, rablks[i]))
264 continue;
265
266 /* Get a buffer for the read-ahead block */
267 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
268 }
269
270 /*
271 * Delayed write buffers are found in the cache and have
272 * valid contents. Also, B_ERROR is not set, otherwise
273 * getblk() would not have returned them.
274 */
275 if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
276 return (0);
277
278 /*
279 * Otherwise, we had to start a read for it; wait until
280 * it's valid and return the result.
281 */
282 return (biowait(bp));
283 }
284
285 /*
286 * Read with single-block read-ahead. Defined in Bach (p.55), but
287 * implemented as a call to breadn().
288 * XXX for compatibility with old file systems.
289 */
290 int
291 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
292 struct vnode *vp;
293 daddr_t blkno; int size;
294 daddr_t rablkno; int rabsize;
295 struct ucred *cred;
296 struct buf **bpp;
297 {
298
299 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
300 }
301
302 /*
303 * Block write. Described in Bach (p.56)
304 */
305 int
306 bwrite(bp)
307 struct buf *bp;
308 {
309 int rv, sync, wasdelayed, s;
310 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
311 struct vnode *vp;
312 struct mount *mp;
313
314 /*
315 * Remember buffer type, to switch on it later. If the write was
316 * synchronous, but the file system was mounted with MNT_ASYNC,
317 * convert it to a delayed write.
318 * XXX note that this relies on delayed tape writes being converted
319 * to async, not sync writes (which is safe, but ugly).
320 */
321 sync = !ISSET(bp->b_flags, B_ASYNC);
322 if (sync && bp->b_vp && bp->b_vp->v_mount &&
323 ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
324 bdwrite(bp);
325 return (0);
326 }
327
328 /*
329 * Collect statistics on synchronous and asynchronous writes.
330 * Writes to block devices are charged to their associated
331 * filesystem (if any).
332 */
333 if ((vp = bp->b_vp) != NULL) {
334 if (vp->v_type == VBLK)
335 mp = vp->v_specmountpoint;
336 else
337 mp = vp->v_mount;
338 if (mp != NULL) {
339 if (sync)
340 mp->mnt_stat.f_syncwrites++;
341 else
342 mp->mnt_stat.f_asyncwrites++;
343 }
344 }
345
346 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
347
348 s = splbio();
349
350 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
351
352 /*
353 * Pay for the I/O operation and make sure the buf is on the correct
354 * vnode queue.
355 */
356 if (wasdelayed)
357 reassignbuf(bp, bp->b_vp);
358 else
359 p->p_stats->p_ru.ru_oublock++;
360
361 /* Initiate disk write. Make sure the appropriate party is charged. */
362 bp->b_vp->v_numoutput++;
363 splx(s);
364
365 SET(bp->b_flags, B_WRITEINPROG);
366 VOP_STRATEGY(bp);
367
368 if (sync) {
369 /* If I/O was synchronous, wait for it to complete. */
370 rv = biowait(bp);
371
372 /* Release the buffer. */
373 brelse(bp);
374
375 return (rv);
376 } else {
377 return (0);
378 }
379 }
380
381 int
382 vn_bwrite(v)
383 void *v;
384 {
385 struct vop_bwrite_args *ap = v;
386
387 return (bwrite(ap->a_bp));
388 }
389
390 /*
391 * Delayed write.
392 *
393 * The buffer is marked dirty, but is not queued for I/O.
394 * This routine should be used when the buffer is expected
395 * to be modified again soon, typically a small write that
396 * partially fills a buffer.
397 *
398 * NB: magnetic tapes cannot be delayed; they must be
399 * written in the order that the writes are requested.
400 *
401 * Described in Leffler, et al. (pp. 208-213).
402 */
403 void
404 bdwrite(bp)
405 struct buf *bp;
406 {
407 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
408 int s;
409
410 /* If this is a tape block, write the block now. */
411 /* XXX NOTE: the memory filesystem usurpes major device */
412 /* XXX number 255, which is a bad idea. */
413 if (bp->b_dev != NODEV &&
414 major(bp->b_dev) != 255 && /* XXX - MFS buffers! */
415 bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
416 bawrite(bp);
417 return;
418 }
419
420 /*
421 * If the block hasn't been seen before:
422 * (1) Mark it as having been seen,
423 * (2) Charge for the write,
424 * (3) Make sure it's on its vnode's correct block list.
425 */
426 s = splbio();
427
428 if (!ISSET(bp->b_flags, B_DELWRI)) {
429 SET(bp->b_flags, B_DELWRI);
430 p->p_stats->p_ru.ru_oublock++;
431 reassignbuf(bp, bp->b_vp);
432 }
433
434 /* Otherwise, the "write" is done, so mark and release the buffer. */
435 CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
436 splx(s);
437
438 brelse(bp);
439 }
440
441 /*
442 * Asynchronous block write; just an asynchronous bwrite().
443 */
444 void
445 bawrite(bp)
446 struct buf *bp;
447 {
448
449 SET(bp->b_flags, B_ASYNC);
450 VOP_BWRITE(bp);
451 }
452
453 /*
454 * Ordered block write; asynchronous, but I/O will occur in order queued.
455 */
456 void
457 bowrite(bp)
458 struct buf *bp;
459 {
460
461 SET(bp->b_flags, B_ASYNC | B_ORDERED);
462 VOP_BWRITE(bp);
463 }
464
465 /*
466 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
467 */
468 void
469 bdirty(bp)
470 struct buf *bp;
471 {
472 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
473 int s;
474
475 s = splbio();
476
477 CLR(bp->b_flags, B_AGE);
478
479 if (!ISSET(bp->b_flags, B_DELWRI)) {
480 SET(bp->b_flags, B_DELWRI);
481 p->p_stats->p_ru.ru_oublock++;
482 reassignbuf(bp, bp->b_vp);
483 }
484
485 splx(s);
486 }
487
488 /*
489 * Release a buffer on to the free lists.
490 * Described in Bach (p. 46).
491 */
492 void
493 brelse(bp)
494 struct buf *bp;
495 {
496 struct bqueues *bufq;
497 int s;
498
499 /* Wake up any processes waiting for any buffer to become free. */
500 if (needbuffer) {
501 needbuffer = 0;
502 wakeup(&needbuffer);
503 }
504
505 /* Block disk interrupts. */
506 s = splbio();
507
508 /* Wake up any proceeses waiting for _this_ buffer to become free. */
509 if (ISSET(bp->b_flags, B_WANTED)) {
510 CLR(bp->b_flags, B_WANTED|B_AGE);
511 wakeup(bp);
512 }
513
514 /*
515 * Determine which queue the buffer should be on, then put it there.
516 */
517
518 /* If it's locked, don't report an error; try again later. */
519 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
520 CLR(bp->b_flags, B_ERROR);
521
522 /* If it's not cacheable, or an error, mark it invalid. */
523 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
524 SET(bp->b_flags, B_INVAL);
525
526 if (ISSET(bp->b_flags, B_VFLUSH)) {
527 /*
528 * This is a delayed write buffer that was just flushed to
529 * disk. It is still on the LRU queue. If it's become
530 * invalid, then we need to move it to a different queue;
531 * otherwise leave it in its current position.
532 */
533 CLR(bp->b_flags, B_VFLUSH);
534 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
535 goto already_queued;
536 else
537 bremfree(bp);
538 }
539
540 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
541 /*
542 * If it's invalid or empty, dissociate it from its vnode
543 * and put on the head of the appropriate queue.
544 */
545 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
546 (*bioops.io_deallocate)(bp);
547 CLR(bp->b_flags, B_DONE|B_DELWRI);
548 if (bp->b_vp) {
549 reassignbuf(bp, bp->b_vp);
550 brelvp(bp);
551 }
552 if (bp->b_bufsize <= 0)
553 /* no data */
554 bufq = &bufqueues[BQ_EMPTY];
555 else
556 /* invalid data */
557 bufq = &bufqueues[BQ_AGE];
558 binsheadfree(bp, bufq);
559 } else {
560 /*
561 * It has valid data. Put it on the end of the appropriate
562 * queue, so that it'll stick around for as long as possible.
563 */
564 if (ISSET(bp->b_flags, B_LOCKED))
565 /* locked in core */
566 bufq = &bufqueues[BQ_LOCKED];
567 else if (ISSET(bp->b_flags, B_AGE))
568 /* stale but valid data */
569 bufq = &bufqueues[BQ_AGE];
570 else
571 /* valid data */
572 bufq = &bufqueues[BQ_LRU];
573 binstailfree(bp, bufq);
574 }
575
576 already_queued:
577 /* Unlock the buffer. */
578 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
579
580 /* Allow disk interrupts. */
581 splx(s);
582 }
583
584 /*
585 * Determine if a block is in the cache.
586 * Just look on what would be its hash chain. If it's there, return
587 * a pointer to it, unless it's marked invalid. If it's marked invalid,
588 * we normally don't return the buffer, unless the caller explicitly
589 * wants us to.
590 */
591 struct buf *
592 incore(vp, blkno)
593 struct vnode *vp;
594 daddr_t blkno;
595 {
596 struct buf *bp;
597
598 bp = BUFHASH(vp, blkno)->lh_first;
599
600 /* Search hash chain */
601 for (; bp != NULL; bp = bp->b_hash.le_next) {
602 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
603 !ISSET(bp->b_flags, B_INVAL))
604 return (bp);
605 }
606
607 return (0);
608 }
609
610 /*
611 * Get a block of requested size that is associated with
612 * a given vnode and block offset. If it is found in the
613 * block cache, mark it as having been found, make it busy
614 * and return it. Otherwise, return an empty block of the
615 * correct size. It is up to the caller to insure that the
616 * cached blocks be of the correct size.
617 */
618 struct buf *
619 getblk(vp, blkno, size, slpflag, slptimeo)
620 register struct vnode *vp;
621 daddr_t blkno;
622 int size, slpflag, slptimeo;
623 {
624 struct bufhashhdr *bh;
625 struct buf *bp;
626 int s, err;
627
628 /*
629 * XXX
630 * The following is an inlined version of 'incore()', but with
631 * the 'invalid' test moved to after the 'busy' test. It's
632 * necessary because there are some cases in which the NFS
633 * code sets B_INVAL prior to writing data to the server, but
634 * in which the buffers actually contain valid data. In this
635 * case, we can't allow the system to allocate a new buffer for
636 * the block until the write is finished.
637 */
638 bh = BUFHASH(vp, blkno);
639 start:
640 bp = bh->lh_first;
641 for (; bp != NULL; bp = bp->b_hash.le_next) {
642 if (bp->b_lblkno != blkno || bp->b_vp != vp)
643 continue;
644
645 s = splbio();
646 if (ISSET(bp->b_flags, B_BUSY)) {
647 SET(bp->b_flags, B_WANTED);
648 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
649 slptimeo);
650 splx(s);
651 if (err)
652 return (NULL);
653 goto start;
654 }
655
656 if (!ISSET(bp->b_flags, B_INVAL)) {
657 #ifdef DIAGNOSTIC
658 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
659 bp->b_bcount < size)
660 panic("getblk: block size invariant failed");
661 #endif
662 SET(bp->b_flags, B_BUSY);
663 bremfree(bp);
664 splx(s);
665 break;
666 }
667 splx(s);
668 }
669
670 if (bp == NULL) {
671 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
672 goto start;
673 binshash(bp, bh);
674 bp->b_blkno = bp->b_lblkno = blkno;
675 s = splbio();
676 bgetvp(vp, bp);
677 splx(s);
678 }
679 allocbuf(bp, size);
680 return (bp);
681 }
682
683 /*
684 * Get an empty, disassociated buffer of given size.
685 */
686 struct buf *
687 geteblk(size)
688 int size;
689 {
690 struct buf *bp;
691
692 while ((bp = getnewbuf(0, 0)) == 0)
693 ;
694 SET(bp->b_flags, B_INVAL);
695 binshash(bp, &invalhash);
696 allocbuf(bp, size);
697
698 return (bp);
699 }
700
701 /*
702 * Expand or contract the actual memory allocated to a buffer.
703 *
704 * If the buffer shrinks, data is lost, so it's up to the
705 * caller to have written it out *first*; this routine will not
706 * start a write. If the buffer grows, it's the callers
707 * responsibility to fill out the buffer's additional contents.
708 */
709 void
710 allocbuf(bp, size)
711 struct buf *bp;
712 int size;
713 {
714 struct buf *nbp;
715 vsize_t desired_size;
716 int s;
717
718 desired_size = roundup(size, NBPG);
719 if (desired_size > MAXBSIZE)
720 panic("allocbuf: buffer larger than MAXBSIZE requested");
721
722 if (bp->b_bufsize == desired_size)
723 goto out;
724
725 /*
726 * If the buffer is smaller than the desired size, we need to snarf
727 * it from other buffers. Get buffers (via getnewbuf()), and
728 * steal their pages.
729 */
730 while (bp->b_bufsize < desired_size) {
731 int amt;
732
733 /* find a buffer */
734 while ((nbp = getnewbuf(0, 0)) == NULL)
735 ;
736 SET(nbp->b_flags, B_INVAL);
737 binshash(nbp, &invalhash);
738
739 /* and steal its pages, up to the amount we need */
740 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
741 pagemove((nbp->b_data + nbp->b_bufsize - amt),
742 bp->b_data + bp->b_bufsize, amt);
743 bp->b_bufsize += amt;
744 nbp->b_bufsize -= amt;
745
746 /* reduce transfer count if we stole some data */
747 if (nbp->b_bcount > nbp->b_bufsize)
748 nbp->b_bcount = nbp->b_bufsize;
749
750 #ifdef DIAGNOSTIC
751 if (nbp->b_bufsize < 0)
752 panic("allocbuf: negative bufsize");
753 #endif
754
755 brelse(nbp);
756 }
757
758 /*
759 * If we want a buffer smaller than the current size,
760 * shrink this buffer. Grab a buf head from the EMPTY queue,
761 * move a page onto it, and put it on front of the AGE queue.
762 * If there are no free buffer headers, leave the buffer alone.
763 */
764 if (bp->b_bufsize > desired_size) {
765 s = splbio();
766 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
767 /* No free buffer head */
768 splx(s);
769 goto out;
770 }
771 bremfree(nbp);
772 SET(nbp->b_flags, B_BUSY);
773 splx(s);
774
775 /* move the page to it and note this change */
776 pagemove(bp->b_data + desired_size,
777 nbp->b_data, bp->b_bufsize - desired_size);
778 nbp->b_bufsize = bp->b_bufsize - desired_size;
779 bp->b_bufsize = desired_size;
780 nbp->b_bcount = 0;
781 SET(nbp->b_flags, B_INVAL);
782
783 /* release the newly-filled buffer and leave */
784 brelse(nbp);
785 }
786
787 out:
788 bp->b_bcount = size;
789 }
790
791 /*
792 * Find a buffer which is available for use.
793 * Select something from a free list.
794 * Preference is to AGE list, then LRU list.
795 */
796 struct buf *
797 getnewbuf(slpflag, slptimeo)
798 int slpflag, slptimeo;
799 {
800 register struct buf *bp;
801 int s;
802
803 start:
804 s = splbio();
805 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
806 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
807 bremfree(bp);
808 } else {
809 /* wait for a free buffer of any kind */
810 needbuffer = 1;
811 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
812 splx(s);
813 return (0);
814 }
815
816 if (ISSET(bp->b_flags, B_VFLUSH)) {
817 /*
818 * This is a delayed write buffer being flushed to disk. Make
819 * sure it gets aged out of the queue when it's finished, and
820 * leave it off the LRU queue.
821 */
822 CLR(bp->b_flags, B_VFLUSH);
823 SET(bp->b_flags, B_AGE);
824 splx(s);
825 goto start;
826 }
827
828 /* Buffer is no longer on free lists. */
829 SET(bp->b_flags, B_BUSY);
830
831 /* If buffer was a delayed write, start it, and go back to the top. */
832 if (ISSET(bp->b_flags, B_DELWRI)) {
833 splx(s);
834 /*
835 * This buffer has gone through the LRU, so make sure it gets
836 * reused ASAP.
837 */
838 SET(bp->b_flags, B_AGE);
839 bawrite(bp);
840 goto start;
841 }
842
843 /* disassociate us from our vnode, if we had one... */
844 if (bp->b_vp)
845 brelvp(bp);
846 splx(s);
847
848 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
849 (*bioops.io_deallocate)(bp);
850
851 /* clear out various other fields */
852 bp->b_flags = B_BUSY;
853 bp->b_dev = NODEV;
854 bp->b_blkno = bp->b_lblkno = 0;
855 bp->b_iodone = 0;
856 bp->b_error = 0;
857 bp->b_resid = 0;
858 bp->b_bcount = 0;
859 bp->b_dirtyoff = bp->b_dirtyend = 0;
860 bp->b_validoff = bp->b_validend = 0;
861
862 /* nuke any credentials we were holding */
863 if (bp->b_rcred != NOCRED) {
864 crfree(bp->b_rcred);
865 bp->b_rcred = NOCRED;
866 }
867 if (bp->b_wcred != NOCRED) {
868 crfree(bp->b_wcred);
869 bp->b_wcred = NOCRED;
870 }
871
872 bremhash(bp);
873 return (bp);
874 }
875
876 /*
877 * Wait for operations on the buffer to complete.
878 * When they do, extract and return the I/O's error value.
879 */
880 int
881 biowait(bp)
882 struct buf *bp;
883 {
884 int s;
885
886 s = splbio();
887 while (!ISSET(bp->b_flags, B_DONE))
888 tsleep(bp, PRIBIO + 1, "biowait", 0);
889 splx(s);
890
891 /* check for interruption of I/O (e.g. via NFS), then errors. */
892 if (ISSET(bp->b_flags, B_EINTR)) {
893 CLR(bp->b_flags, B_EINTR);
894 return (EINTR);
895 } else if (ISSET(bp->b_flags, B_ERROR))
896 return (bp->b_error ? bp->b_error : EIO);
897 else
898 return (0);
899 }
900
901 /*
902 * Mark I/O complete on a buffer.
903 *
904 * If a callback has been requested, e.g. the pageout
905 * daemon, do so. Otherwise, awaken waiting processes.
906 *
907 * [ Leffler, et al., says on p.247:
908 * "This routine wakes up the blocked process, frees the buffer
909 * for an asynchronous write, or, for a request by the pagedaemon
910 * process, invokes a procedure specified in the buffer structure" ]
911 *
912 * In real life, the pagedaemon (or other system processes) wants
913 * to do async stuff to, and doesn't want the buffer brelse()'d.
914 * (for swap pager, that puts swap buffers on the free lists (!!!),
915 * for the vn device, that puts malloc'd buffers on the free lists!)
916 */
917 void
918 biodone(bp)
919 struct buf *bp;
920 {
921 int s = splbio();
922
923 if (ISSET(bp->b_flags, B_DONE))
924 panic("biodone already");
925 SET(bp->b_flags, B_DONE); /* note that it's done */
926
927 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
928 (*bioops.io_complete)(bp);
929
930 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
931 vwakeup(bp);
932
933 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
934 CLR(bp->b_flags, B_CALL); /* but note callout done */
935 (*bp->b_iodone)(bp);
936 } else {
937 if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release */
938 brelse(bp);
939 else { /* or just wakeup the buffer */
940 CLR(bp->b_flags, B_WANTED);
941 wakeup(bp);
942 }
943 }
944
945 splx(s);
946 }
947
948 /*
949 * Return a count of buffers on the "locked" queue.
950 */
951 int
952 count_lock_queue()
953 {
954 register struct buf *bp;
955 register int n = 0;
956
957 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
958 bp = bp->b_freelist.tqe_next)
959 n++;
960 return (n);
961 }
962
963 #ifdef DEBUG
964 /*
965 * Print out statistics on the current allocation of the buffer pool.
966 * Can be enabled to print out on every ``sync'' by setting "syncprt"
967 * in vfs_syscalls.c using sysctl.
968 */
969 void
970 vfs_bufstats()
971 {
972 int s, i, j, count;
973 register struct buf *bp;
974 register struct bqueues *dp;
975 int counts[MAXBSIZE/NBPG+1];
976 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
977
978 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
979 count = 0;
980 for (j = 0; j <= MAXBSIZE/NBPG; j++)
981 counts[j] = 0;
982 s = splbio();
983 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
984 counts[bp->b_bufsize/NBPG]++;
985 count++;
986 }
987 splx(s);
988 printf("%s: total-%d", bname[i], count);
989 for (j = 0; j <= MAXBSIZE/NBPG; j++)
990 if (counts[j] != 0)
991 printf(", %d-%d", j * NBPG, counts[j]);
992 printf("\n");
993 }
994 }
995 #endif /* DEBUG */
996