vfs_bio.c revision 1.78 1 /* $NetBSD: vfs_bio.c,v 1.78 2002/02/10 23:14:18 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 */
43
44 /*
45 * Some references:
46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 * UNIX Operating System (Addison Welley, 1989)
49 */
50
51 #include <sys/cdefs.h>
52 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.78 2002/02/10 23:14:18 chs Exp $");
53
54 #include <sys/param.h>
55 #include <sys/systm.h>
56 #include <sys/proc.h>
57 #include <sys/buf.h>
58 #include <sys/vnode.h>
59 #include <sys/mount.h>
60 #include <sys/malloc.h>
61 #include <sys/resourcevar.h>
62 #include <sys/conf.h>
63
64 #include <uvm/uvm.h>
65
66 #include <miscfs/specfs/specdev.h>
67
68 /* Macros to clear/set/test flags. */
69 #define SET(t, f) (t) |= (f)
70 #define CLR(t, f) (t) &= ~(f)
71 #define ISSET(t, f) ((t) & (f))
72
73 /*
74 * Definitions for the buffer hash lists.
75 */
76 #define BUFHASH(dvp, lbn) \
77 (&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
78 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
79 u_long bufhash;
80 struct bio_ops bioops; /* I/O operation notification */
81
82 /*
83 * Insq/Remq for the buffer hash lists.
84 */
85 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
86 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
87
88 /*
89 * Definitions for the buffer free lists.
90 */
91 #define BQUEUES 4 /* number of free buffer queues */
92
93 #define BQ_LOCKED 0 /* super-blocks &c */
94 #define BQ_LRU 1 /* lru, useful buffers */
95 #define BQ_AGE 2 /* rubbish */
96 #define BQ_EMPTY 3 /* buffer headers with no memory */
97
98 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
99 int needbuffer;
100
101 /*
102 * Buffer pool for I/O buffers.
103 */
104 struct pool bufpool;
105
106 /*
107 * Insq/Remq for the buffer free lists.
108 */
109 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
110 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
111
112 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
113 struct ucred *, int));
114 int count_lock_queue __P((void));
115
116 void
117 bremfree(bp)
118 struct buf *bp;
119 {
120 int s = splbio();
121
122 struct bqueues *dp = NULL;
123
124 /*
125 * We only calculate the head of the freelist when removing
126 * the last element of the list as that is the only time that
127 * it is needed (e.g. to reset the tail pointer).
128 *
129 * NB: This makes an assumption about how tailq's are implemented.
130 */
131 if (bp->b_freelist.tqe_next == NULL) {
132 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
133 if (dp->tqh_last == &bp->b_freelist.tqe_next)
134 break;
135 if (dp == &bufqueues[BQUEUES])
136 panic("bremfree: lost tail");
137 }
138 TAILQ_REMOVE(dp, bp, b_freelist);
139 splx(s);
140 }
141
142 /*
143 * Initialize buffers and hash links for buffers.
144 */
145 void
146 bufinit()
147 {
148 struct buf *bp;
149 struct bqueues *dp;
150 int i;
151 int base, residual;
152
153 /*
154 * Initialize the buffer pool. This pool is used for buffers
155 * which are strictly I/O control blocks, not buffer cache
156 * buffers.
157 */
158 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0,
159 NULL, NULL, M_DEVBUF);
160
161 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
162 TAILQ_INIT(dp);
163 bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
164 base = bufpages / nbuf;
165 residual = bufpages % nbuf;
166 for (i = 0; i < nbuf; i++) {
167 bp = &buf[i];
168 memset((char *)bp, 0, sizeof(*bp));
169 bp->b_dev = NODEV;
170 bp->b_vnbufs.le_next = NOLIST;
171 LIST_INIT(&bp->b_dep);
172 bp->b_data = buffers + i * MAXBSIZE;
173 if (i < residual)
174 bp->b_bufsize = (base + 1) * PAGE_SIZE;
175 else
176 bp->b_bufsize = base * PAGE_SIZE;
177 bp->b_flags = B_INVAL;
178 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
179 binsheadfree(bp, dp);
180 binshash(bp, &invalhash);
181 }
182 }
183
184 static __inline struct buf *
185 bio_doread(vp, blkno, size, cred, async)
186 struct vnode *vp;
187 daddr_t blkno;
188 int size;
189 struct ucred *cred;
190 int async;
191 {
192 struct buf *bp;
193 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
194
195 bp = getblk(vp, blkno, size, 0, 0);
196
197 /*
198 * If buffer does not have data valid, start a read.
199 * Note that if buffer is B_INVAL, getblk() won't return it.
200 * Therefore, it's valid if it's I/O has completed or been delayed.
201 */
202 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
203 /* Start I/O for the buffer. */
204 SET(bp->b_flags, B_READ | async);
205 VOP_STRATEGY(bp);
206
207 /* Pay for the read. */
208 p->p_stats->p_ru.ru_inblock++;
209 } else if (async) {
210 brelse(bp);
211 }
212
213 return (bp);
214 }
215
216 /*
217 * Read a disk block.
218 * This algorithm described in Bach (p.54).
219 */
220 int
221 bread(vp, blkno, size, cred, bpp)
222 struct vnode *vp;
223 daddr_t blkno;
224 int size;
225 struct ucred *cred;
226 struct buf **bpp;
227 {
228 struct buf *bp;
229
230 /* Get buffer for block. */
231 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
232
233 /*
234 * Delayed write buffers are found in the cache and have
235 * valid contents. Also, B_ERROR is not set, otherwise
236 * getblk() would not have returned them.
237 */
238 if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
239 return (0);
240
241 /*
242 * Otherwise, we had to start a read for it; wait until
243 * it's valid and return the result.
244 */
245 return (biowait(bp));
246 }
247
248 /*
249 * Read-ahead multiple disk blocks. The first is sync, the rest async.
250 * Trivial modification to the breada algorithm presented in Bach (p.55).
251 */
252 int
253 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
254 struct vnode *vp;
255 daddr_t blkno; int size;
256 daddr_t rablks[]; int rasizes[];
257 int nrablks;
258 struct ucred *cred;
259 struct buf **bpp;
260 {
261 struct buf *bp;
262 int i;
263
264 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
265
266 /*
267 * For each of the read-ahead blocks, start a read, if necessary.
268 */
269 for (i = 0; i < nrablks; i++) {
270 /* If it's in the cache, just go on to next one. */
271 if (incore(vp, rablks[i]))
272 continue;
273
274 /* Get a buffer for the read-ahead block */
275 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
276 }
277
278 /*
279 * Delayed write buffers are found in the cache and have
280 * valid contents. Also, B_ERROR is not set, otherwise
281 * getblk() would not have returned them.
282 */
283 if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
284 return (0);
285
286 /*
287 * Otherwise, we had to start a read for it; wait until
288 * it's valid and return the result.
289 */
290 return (biowait(bp));
291 }
292
293 /*
294 * Read with single-block read-ahead. Defined in Bach (p.55), but
295 * implemented as a call to breadn().
296 * XXX for compatibility with old file systems.
297 */
298 int
299 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
300 struct vnode *vp;
301 daddr_t blkno; int size;
302 daddr_t rablkno; int rabsize;
303 struct ucred *cred;
304 struct buf **bpp;
305 {
306
307 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
308 }
309
310 /*
311 * Block write. Described in Bach (p.56)
312 */
313 int
314 bwrite(bp)
315 struct buf *bp;
316 {
317 int rv, sync, wasdelayed, s;
318 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
319 struct vnode *vp;
320 struct mount *mp;
321
322 vp = bp->b_vp;
323 if (vp != NULL) {
324 if (vp->v_type == VBLK)
325 mp = vp->v_specmountpoint;
326 else
327 mp = vp->v_mount;
328 } else {
329 mp = NULL;
330 }
331
332 /*
333 * Remember buffer type, to switch on it later. If the write was
334 * synchronous, but the file system was mounted with MNT_ASYNC,
335 * convert it to a delayed write.
336 * XXX note that this relies on delayed tape writes being converted
337 * to async, not sync writes (which is safe, but ugly).
338 */
339 sync = !ISSET(bp->b_flags, B_ASYNC);
340 if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
341 bdwrite(bp);
342 return (0);
343 }
344
345 /*
346 * Collect statistics on synchronous and asynchronous writes.
347 * Writes to block devices are charged to their associated
348 * filesystem (if any).
349 */
350 if (mp != NULL) {
351 if (sync)
352 mp->mnt_stat.f_syncwrites++;
353 else
354 mp->mnt_stat.f_asyncwrites++;
355 }
356
357 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
358
359 s = splbio();
360
361 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
362
363 /*
364 * Pay for the I/O operation and make sure the buf is on the correct
365 * vnode queue.
366 */
367 if (wasdelayed)
368 reassignbuf(bp, bp->b_vp);
369 else
370 p->p_stats->p_ru.ru_oublock++;
371
372 /* Initiate disk write. Make sure the appropriate party is charged. */
373 bp->b_vp->v_numoutput++;
374 splx(s);
375
376 VOP_STRATEGY(bp);
377
378 if (sync) {
379 /* If I/O was synchronous, wait for it to complete. */
380 rv = biowait(bp);
381
382 /* Release the buffer. */
383 brelse(bp);
384
385 return (rv);
386 } else {
387 return (0);
388 }
389 }
390
391 int
392 vn_bwrite(v)
393 void *v;
394 {
395 struct vop_bwrite_args *ap = v;
396
397 return (bwrite(ap->a_bp));
398 }
399
400 /*
401 * Delayed write.
402 *
403 * The buffer is marked dirty, but is not queued for I/O.
404 * This routine should be used when the buffer is expected
405 * to be modified again soon, typically a small write that
406 * partially fills a buffer.
407 *
408 * NB: magnetic tapes cannot be delayed; they must be
409 * written in the order that the writes are requested.
410 *
411 * Described in Leffler, et al. (pp. 208-213).
412 */
413 void
414 bdwrite(bp)
415 struct buf *bp;
416 {
417 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
418 int s;
419
420 /* If this is a tape block, write the block now. */
421 /* XXX NOTE: the memory filesystem usurpes major device */
422 /* XXX number 255, which is a bad idea. */
423 if (bp->b_dev != NODEV &&
424 major(bp->b_dev) != 255 && /* XXX - MFS buffers! */
425 bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
426 bawrite(bp);
427 return;
428 }
429
430 /*
431 * If the block hasn't been seen before:
432 * (1) Mark it as having been seen,
433 * (2) Charge for the write,
434 * (3) Make sure it's on its vnode's correct block list.
435 */
436 s = splbio();
437
438 if (!ISSET(bp->b_flags, B_DELWRI)) {
439 SET(bp->b_flags, B_DELWRI);
440 p->p_stats->p_ru.ru_oublock++;
441 reassignbuf(bp, bp->b_vp);
442 }
443
444 /* Otherwise, the "write" is done, so mark and release the buffer. */
445 CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
446 splx(s);
447
448 brelse(bp);
449 }
450
451 /*
452 * Asynchronous block write; just an asynchronous bwrite().
453 */
454 void
455 bawrite(bp)
456 struct buf *bp;
457 {
458
459 SET(bp->b_flags, B_ASYNC);
460 VOP_BWRITE(bp);
461 }
462
463 /*
464 * Ordered block write; asynchronous, but I/O will occur in order queued.
465 */
466 void
467 bowrite(bp)
468 struct buf *bp;
469 {
470
471 SET(bp->b_flags, B_ASYNC | B_ORDERED);
472 VOP_BWRITE(bp);
473 }
474
475 /*
476 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
477 */
478 void
479 bdirty(bp)
480 struct buf *bp;
481 {
482 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
483 int s;
484
485 s = splbio();
486
487 CLR(bp->b_flags, B_AGE);
488
489 if (!ISSET(bp->b_flags, B_DELWRI)) {
490 SET(bp->b_flags, B_DELWRI);
491 p->p_stats->p_ru.ru_oublock++;
492 reassignbuf(bp, bp->b_vp);
493 }
494
495 splx(s);
496 }
497
498 /*
499 * Release a buffer on to the free lists.
500 * Described in Bach (p. 46).
501 */
502 void
503 brelse(bp)
504 struct buf *bp;
505 {
506 struct bqueues *bufq;
507 int s;
508
509 KASSERT(ISSET(bp->b_flags, B_BUSY));
510
511 /* Wake up any processes waiting for any buffer to become free. */
512 if (needbuffer) {
513 needbuffer = 0;
514 wakeup(&needbuffer);
515 }
516
517 /* Block disk interrupts. */
518 s = splbio();
519
520 /* Wake up any proceeses waiting for _this_ buffer to become free. */
521 if (ISSET(bp->b_flags, B_WANTED)) {
522 CLR(bp->b_flags, B_WANTED|B_AGE);
523 wakeup(bp);
524 }
525
526 /*
527 * Determine which queue the buffer should be on, then put it there.
528 */
529
530 /* If it's locked, don't report an error; try again later. */
531 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
532 CLR(bp->b_flags, B_ERROR);
533
534 /* If it's not cacheable, or an error, mark it invalid. */
535 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
536 SET(bp->b_flags, B_INVAL);
537
538 if (ISSET(bp->b_flags, B_VFLUSH)) {
539 /*
540 * This is a delayed write buffer that was just flushed to
541 * disk. It is still on the LRU queue. If it's become
542 * invalid, then we need to move it to a different queue;
543 * otherwise leave it in its current position.
544 */
545 CLR(bp->b_flags, B_VFLUSH);
546 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
547 goto already_queued;
548 else
549 bremfree(bp);
550 }
551
552 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
553 /*
554 * If it's invalid or empty, dissociate it from its vnode
555 * and put on the head of the appropriate queue.
556 */
557 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
558 (*bioops.io_deallocate)(bp);
559 CLR(bp->b_flags, B_DONE|B_DELWRI);
560 if (bp->b_vp) {
561 reassignbuf(bp, bp->b_vp);
562 brelvp(bp);
563 }
564 if (bp->b_bufsize <= 0)
565 /* no data */
566 bufq = &bufqueues[BQ_EMPTY];
567 else
568 /* invalid data */
569 bufq = &bufqueues[BQ_AGE];
570 binsheadfree(bp, bufq);
571 } else {
572 /*
573 * It has valid data. Put it on the end of the appropriate
574 * queue, so that it'll stick around for as long as possible.
575 * If buf is AGE, but has dependencies, must put it on last
576 * bufqueue to be scanned, ie LRU. This protects against the
577 * livelock where BQ_AGE only has buffers with dependencies,
578 * and we thus never get to the dependent buffers in BQ_LRU.
579 */
580 if (ISSET(bp->b_flags, B_LOCKED))
581 /* locked in core */
582 bufq = &bufqueues[BQ_LOCKED];
583 else if (!ISSET(bp->b_flags, B_AGE))
584 /* valid data */
585 bufq = &bufqueues[BQ_LRU];
586 else {
587 /* stale but valid data */
588 int has_deps;
589
590 if (LIST_FIRST(&bp->b_dep) != NULL &&
591 bioops.io_countdeps)
592 has_deps = (*bioops.io_countdeps)(bp, 0);
593 else
594 has_deps = 0;
595 bufq = has_deps ? &bufqueues[BQ_LRU] :
596 &bufqueues[BQ_AGE];
597 }
598 binstailfree(bp, bufq);
599 }
600
601 already_queued:
602 /* Unlock the buffer. */
603 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
604 SET(bp->b_flags, B_CACHE);
605
606 /* Allow disk interrupts. */
607 splx(s);
608 }
609
610 /*
611 * Determine if a block is in the cache.
612 * Just look on what would be its hash chain. If it's there, return
613 * a pointer to it, unless it's marked invalid. If it's marked invalid,
614 * we normally don't return the buffer, unless the caller explicitly
615 * wants us to.
616 */
617 struct buf *
618 incore(vp, blkno)
619 struct vnode *vp;
620 daddr_t blkno;
621 {
622 struct buf *bp;
623
624 bp = BUFHASH(vp, blkno)->lh_first;
625
626 /* Search hash chain */
627 for (; bp != NULL; bp = bp->b_hash.le_next) {
628 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
629 !ISSET(bp->b_flags, B_INVAL))
630 return (bp);
631 }
632
633 return (NULL);
634 }
635
636 /*
637 * Get a block of requested size that is associated with
638 * a given vnode and block offset. If it is found in the
639 * block cache, mark it as having been found, make it busy
640 * and return it. Otherwise, return an empty block of the
641 * correct size. It is up to the caller to insure that the
642 * cached blocks be of the correct size.
643 */
644 struct buf *
645 getblk(vp, blkno, size, slpflag, slptimeo)
646 struct vnode *vp;
647 daddr_t blkno;
648 int size, slpflag, slptimeo;
649 {
650 struct buf *bp;
651 int s, err;
652
653 start:
654 bp = incore(vp, blkno);
655 if (bp != NULL) {
656 s = splbio();
657 if (ISSET(bp->b_flags, B_BUSY)) {
658 if (curproc == uvm.pagedaemon_proc) {
659 splx(s);
660 return NULL;
661 }
662 SET(bp->b_flags, B_WANTED);
663 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
664 slptimeo);
665 splx(s);
666 if (err)
667 return (NULL);
668 goto start;
669 }
670 #ifdef DIAGNOSTIC
671 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
672 bp->b_bcount < size && vp->v_type != VBLK)
673 panic("getblk: block size invariant failed");
674 #endif
675 SET(bp->b_flags, B_BUSY);
676 bremfree(bp);
677 splx(s);
678 } else {
679 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
680 goto start;
681
682 binshash(bp, BUFHASH(vp, blkno));
683 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
684 s = splbio();
685 bgetvp(vp, bp);
686 splx(s);
687 }
688 allocbuf(bp, size);
689 return (bp);
690 }
691
692 /*
693 * Get an empty, disassociated buffer of given size.
694 */
695 struct buf *
696 geteblk(size)
697 int size;
698 {
699 struct buf *bp;
700
701 while ((bp = getnewbuf(0, 0)) == 0)
702 ;
703 SET(bp->b_flags, B_INVAL);
704 binshash(bp, &invalhash);
705 allocbuf(bp, size);
706 return (bp);
707 }
708
709 /*
710 * Expand or contract the actual memory allocated to a buffer.
711 *
712 * If the buffer shrinks, data is lost, so it's up to the
713 * caller to have written it out *first*; this routine will not
714 * start a write. If the buffer grows, it's the callers
715 * responsibility to fill out the buffer's additional contents.
716 */
717 void
718 allocbuf(bp, size)
719 struct buf *bp;
720 int size;
721 {
722 struct buf *nbp;
723 vsize_t desired_size;
724 int s;
725
726 desired_size = round_page((vsize_t)size);
727 if (desired_size > MAXBSIZE)
728 panic("allocbuf: buffer larger than MAXBSIZE requested");
729
730 if (bp->b_bufsize == desired_size)
731 goto out;
732
733 /*
734 * If the buffer is smaller than the desired size, we need to snarf
735 * it from other buffers. Get buffers (via getnewbuf()), and
736 * steal their pages.
737 */
738 while (bp->b_bufsize < desired_size) {
739 int amt;
740
741 /* find a buffer */
742 while ((nbp = getnewbuf(0, 0)) == NULL)
743 ;
744
745 SET(nbp->b_flags, B_INVAL);
746 binshash(nbp, &invalhash);
747
748 /* and steal its pages, up to the amount we need */
749 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
750 pagemove((nbp->b_data + nbp->b_bufsize - amt),
751 bp->b_data + bp->b_bufsize, amt);
752 bp->b_bufsize += amt;
753 nbp->b_bufsize -= amt;
754
755 /* reduce transfer count if we stole some data */
756 if (nbp->b_bcount > nbp->b_bufsize)
757 nbp->b_bcount = nbp->b_bufsize;
758
759 #ifdef DIAGNOSTIC
760 if (nbp->b_bufsize < 0)
761 panic("allocbuf: negative bufsize");
762 #endif
763
764 brelse(nbp);
765 }
766
767 /*
768 * If we want a buffer smaller than the current size,
769 * shrink this buffer. Grab a buf head from the EMPTY queue,
770 * move a page onto it, and put it on front of the AGE queue.
771 * If there are no free buffer headers, leave the buffer alone.
772 */
773 if (bp->b_bufsize > desired_size) {
774 s = splbio();
775 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
776 /* No free buffer head */
777 splx(s);
778 goto out;
779 }
780 bremfree(nbp);
781 SET(nbp->b_flags, B_BUSY);
782 splx(s);
783
784 /* move the page to it and note this change */
785 pagemove(bp->b_data + desired_size,
786 nbp->b_data, bp->b_bufsize - desired_size);
787 nbp->b_bufsize = bp->b_bufsize - desired_size;
788 bp->b_bufsize = desired_size;
789 nbp->b_bcount = 0;
790 SET(nbp->b_flags, B_INVAL);
791
792 /* release the newly-filled buffer and leave */
793 brelse(nbp);
794 }
795
796 out:
797 bp->b_bcount = size;
798 }
799
800 /*
801 * Find a buffer which is available for use.
802 * Select something from a free list.
803 * Preference is to AGE list, then LRU list.
804 */
805 struct buf *
806 getnewbuf(slpflag, slptimeo)
807 int slpflag, slptimeo;
808 {
809 struct buf *bp;
810 int s;
811
812 start:
813 s = splbio();
814 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
815 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
816 bremfree(bp);
817 } else {
818 /* wait for a free buffer of any kind */
819 needbuffer = 1;
820 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
821 splx(s);
822 return (NULL);
823 }
824
825 if (ISSET(bp->b_flags, B_VFLUSH)) {
826 /*
827 * This is a delayed write buffer being flushed to disk. Make
828 * sure it gets aged out of the queue when it's finished, and
829 * leave it off the LRU queue.
830 */
831 CLR(bp->b_flags, B_VFLUSH);
832 SET(bp->b_flags, B_AGE);
833 splx(s);
834 goto start;
835 }
836
837 /* Buffer is no longer on free lists. */
838 SET(bp->b_flags, B_BUSY);
839
840 /*
841 * If buffer was a delayed write, start it and return NULL
842 * (since we might sleep while starting the write).
843 */
844 if (ISSET(bp->b_flags, B_DELWRI)) {
845 splx(s);
846 /*
847 * This buffer has gone through the LRU, so make sure it gets
848 * reused ASAP.
849 */
850 SET(bp->b_flags, B_AGE);
851 bawrite(bp);
852 return (NULL);
853 }
854
855 /* disassociate us from our vnode, if we had one... */
856 if (bp->b_vp)
857 brelvp(bp);
858 splx(s);
859
860 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
861 (*bioops.io_deallocate)(bp);
862
863 /* clear out various other fields */
864 bp->b_flags = B_BUSY;
865 bp->b_dev = NODEV;
866 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
867 bp->b_iodone = 0;
868 bp->b_error = 0;
869 bp->b_resid = 0;
870 bp->b_bcount = 0;
871
872 bremhash(bp);
873 return (bp);
874 }
875
876 /*
877 * Wait for operations on the buffer to complete.
878 * When they do, extract and return the I/O's error value.
879 */
880 int
881 biowait(bp)
882 struct buf *bp;
883 {
884 int s;
885
886 s = splbio();
887 while (!ISSET(bp->b_flags, B_DONE))
888 tsleep(bp, PRIBIO + 1, "biowait", 0);
889 splx(s);
890
891 /* check for interruption of I/O (e.g. via NFS), then errors. */
892 if (ISSET(bp->b_flags, B_EINTR)) {
893 CLR(bp->b_flags, B_EINTR);
894 return (EINTR);
895 } else if (ISSET(bp->b_flags, B_ERROR))
896 return (bp->b_error ? bp->b_error : EIO);
897 else
898 return (0);
899 }
900
901 /*
902 * Mark I/O complete on a buffer.
903 *
904 * If a callback has been requested, e.g. the pageout
905 * daemon, do so. Otherwise, awaken waiting processes.
906 *
907 * [ Leffler, et al., says on p.247:
908 * "This routine wakes up the blocked process, frees the buffer
909 * for an asynchronous write, or, for a request by the pagedaemon
910 * process, invokes a procedure specified in the buffer structure" ]
911 *
912 * In real life, the pagedaemon (or other system processes) wants
913 * to do async stuff to, and doesn't want the buffer brelse()'d.
914 * (for swap pager, that puts swap buffers on the free lists (!!!),
915 * for the vn device, that puts malloc'd buffers on the free lists!)
916 */
917 void
918 biodone(bp)
919 struct buf *bp;
920 {
921 int s = splbio();
922
923 if (ISSET(bp->b_flags, B_DONE))
924 panic("biodone already");
925 SET(bp->b_flags, B_DONE); /* note that it's done */
926
927 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
928 (*bioops.io_complete)(bp);
929
930 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
931 vwakeup(bp);
932
933 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
934 CLR(bp->b_flags, B_CALL); /* but note callout done */
935 (*bp->b_iodone)(bp);
936 } else {
937 if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release */
938 brelse(bp);
939 else { /* or just wakeup the buffer */
940 CLR(bp->b_flags, B_WANTED);
941 wakeup(bp);
942 }
943 }
944
945 splx(s);
946 }
947
948 /*
949 * Return a count of buffers on the "locked" queue.
950 */
951 int
952 count_lock_queue()
953 {
954 struct buf *bp;
955 int n = 0;
956
957 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
958 bp = bp->b_freelist.tqe_next)
959 n++;
960 return (n);
961 }
962
963 #ifdef DEBUG
964 /*
965 * Print out statistics on the current allocation of the buffer pool.
966 * Can be enabled to print out on every ``sync'' by setting "syncprt"
967 * in vfs_syscalls.c using sysctl.
968 */
969 void
970 vfs_bufstats()
971 {
972 int s, i, j, count;
973 struct buf *bp;
974 struct bqueues *dp;
975 int counts[(MAXBSIZE / PAGE_SIZE) + 1];
976 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
977
978 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
979 count = 0;
980 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
981 counts[j] = 0;
982 s = splbio();
983 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
984 counts[bp->b_bufsize/PAGE_SIZE]++;
985 count++;
986 }
987 splx(s);
988 printf("%s: total-%d", bname[i], count);
989 for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
990 if (counts[j] != 0)
991 printf(", %d-%d", j * PAGE_SIZE, counts[j]);
992 printf("\n");
993 }
994 }
995 #endif /* DEBUG */
996