vfs_bio.c revision 1.69 1 /* $NetBSD: vfs_bio.c,v 1.69 2000/11/08 05:53:10 chs Exp $ */
2
3 /*-
4 * Copyright (c) 1994 Christopher G. Demetriou
5 * Copyright (c) 1982, 1986, 1989, 1993
6 * The Regents of the University of California. All rights reserved.
7 * (c) UNIX System Laboratories, Inc.
8 * All or some portions of this file are derived from material licensed
9 * to the University of California by American Telephone and Telegraph
10 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
11 * the permission of UNIX System Laboratories, Inc.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. All advertising materials mentioning features or use of this software
22 * must display the following acknowledgement:
23 * This product includes software developed by the University of
24 * California, Berkeley and its contributors.
25 * 4. Neither the name of the University nor the names of its contributors
26 * may be used to endorse or promote products derived from this software
27 * without specific prior written permission.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 * SUCH DAMAGE.
40 *
41 * @(#)vfs_bio.c 8.6 (Berkeley) 1/11/94
42 */
43
44 /*
45 * Some references:
46 * Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
47 * Leffler, et al.: The Design and Implementation of the 4.3BSD
48 * UNIX Operating System (Addison Welley, 1989)
49 */
50
51 #include <sys/param.h>
52 #include <sys/systm.h>
53 #include <sys/proc.h>
54 #include <sys/buf.h>
55 #include <sys/vnode.h>
56 #include <sys/mount.h>
57 #include <sys/trace.h>
58 #include <sys/malloc.h>
59 #include <sys/resourcevar.h>
60 #include <sys/conf.h>
61
62 #include <miscfs/specfs/specdev.h>
63
64 /* Macros to clear/set/test flags. */
65 #define SET(t, f) (t) |= (f)
66 #define CLR(t, f) (t) &= ~(f)
67 #define ISSET(t, f) ((t) & (f))
68
69 /*
70 * Definitions for the buffer hash lists.
71 */
72 #define BUFHASH(dvp, lbn) \
73 (&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
74 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
75 u_long bufhash;
76 struct bio_ops bioops; /* I/O operation notification */
77
78 /*
79 * Insq/Remq for the buffer hash lists.
80 */
81 #define binshash(bp, dp) LIST_INSERT_HEAD(dp, bp, b_hash)
82 #define bremhash(bp) LIST_REMOVE(bp, b_hash)
83
84 /*
85 * Definitions for the buffer free lists.
86 */
87 #define BQUEUES 4 /* number of free buffer queues */
88
89 #define BQ_LOCKED 0 /* super-blocks &c */
90 #define BQ_LRU 1 /* lru, useful buffers */
91 #define BQ_AGE 2 /* rubbish */
92 #define BQ_EMPTY 3 /* buffer headers with no memory */
93
94 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
95 int needbuffer;
96
97 /*
98 * Buffer pool for I/O buffers.
99 */
100 struct pool bufpool;
101
102 /*
103 * Insq/Remq for the buffer free lists.
104 */
105 #define binsheadfree(bp, dp) TAILQ_INSERT_HEAD(dp, bp, b_freelist)
106 #define binstailfree(bp, dp) TAILQ_INSERT_TAIL(dp, bp, b_freelist)
107
108 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
109 struct ucred *, int));
110 int count_lock_queue __P((void));
111
112 void
113 bremfree(bp)
114 struct buf *bp;
115 {
116 int s = splbio();
117
118 struct bqueues *dp = NULL;
119
120 /*
121 * We only calculate the head of the freelist when removing
122 * the last element of the list as that is the only time that
123 * it is needed (e.g. to reset the tail pointer).
124 *
125 * NB: This makes an assumption about how tailq's are implemented.
126 */
127 if (bp->b_freelist.tqe_next == NULL) {
128 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
129 if (dp->tqh_last == &bp->b_freelist.tqe_next)
130 break;
131 if (dp == &bufqueues[BQUEUES])
132 panic("bremfree: lost tail");
133 }
134 TAILQ_REMOVE(dp, bp, b_freelist);
135
136 splx(s);
137 }
138
139 /*
140 * Initialize buffers and hash links for buffers.
141 */
142 void
143 bufinit()
144 {
145 struct buf *bp;
146 struct bqueues *dp;
147 int i;
148 int base, residual;
149
150 /*
151 * Initialize the buffer pool. This pool is used for buffers
152 * which are strictly I/O control blocks, not buffer cache
153 * buffers.
154 */
155 pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0,
156 NULL, NULL, M_DEVBUF);
157
158 for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
159 TAILQ_INIT(dp);
160 bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
161 base = bufpages / nbuf;
162 residual = bufpages % nbuf;
163 for (i = 0; i < nbuf; i++) {
164 bp = &buf[i];
165 memset((char *)bp, 0, sizeof(*bp));
166 bp->b_dev = NODEV;
167 bp->b_rcred = NOCRED;
168 bp->b_wcred = NOCRED;
169 bp->b_vnbufs.le_next = NOLIST;
170 LIST_INIT(&bp->b_dep);
171 bp->b_data = buffers + i * MAXBSIZE;
172 if (i < residual)
173 bp->b_bufsize = (base + 1) * NBPG;
174 else
175 bp->b_bufsize = base * NBPG;
176 bp->b_flags = B_INVAL;
177 dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
178 binsheadfree(bp, dp);
179 binshash(bp, &invalhash);
180 }
181 }
182
183 static __inline struct buf *
184 bio_doread(vp, blkno, size, cred, async)
185 struct vnode *vp;
186 daddr_t blkno;
187 int size;
188 struct ucred *cred;
189 int async;
190 {
191 struct buf *bp;
192 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
193
194 bp = getblk(vp, blkno, size, 0, 0);
195
196 /*
197 * If buffer does not have data valid, start a read.
198 * Note that if buffer is B_INVAL, getblk() won't return it.
199 * Therefore, it's valid if it's I/O has completed or been delayed.
200 */
201 if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
202 /* Start I/O for the buffer (keeping credentials). */
203 SET(bp->b_flags, B_READ | async);
204 if (cred != NOCRED && bp->b_rcred == NOCRED) {
205 crhold(cred);
206 bp->b_rcred = cred;
207 }
208 VOP_STRATEGY(bp);
209
210 /* Pay for the read. */
211 p->p_stats->p_ru.ru_inblock++;
212 } else if (async) {
213 brelse(bp);
214 }
215
216 return (bp);
217 }
218
219 /*
220 * Read a disk block.
221 * This algorithm described in Bach (p.54).
222 */
223 int
224 bread(vp, blkno, size, cred, bpp)
225 struct vnode *vp;
226 daddr_t blkno;
227 int size;
228 struct ucred *cred;
229 struct buf **bpp;
230 {
231 struct buf *bp;
232
233 /* Get buffer for block. */
234 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
235
236 /*
237 * Delayed write buffers are found in the cache and have
238 * valid contents. Also, B_ERROR is not set, otherwise
239 * getblk() would not have returned them.
240 */
241 if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
242 return (0);
243
244 /*
245 * Otherwise, we had to start a read for it; wait until
246 * it's valid and return the result.
247 */
248 return (biowait(bp));
249 }
250
251 /*
252 * Read-ahead multiple disk blocks. The first is sync, the rest async.
253 * Trivial modification to the breada algorithm presented in Bach (p.55).
254 */
255 int
256 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
257 struct vnode *vp;
258 daddr_t blkno; int size;
259 daddr_t rablks[]; int rasizes[];
260 int nrablks;
261 struct ucred *cred;
262 struct buf **bpp;
263 {
264 struct buf *bp;
265 int i;
266
267 bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
268
269 /*
270 * For each of the read-ahead blocks, start a read, if necessary.
271 */
272 for (i = 0; i < nrablks; i++) {
273 /* If it's in the cache, just go on to next one. */
274 if (incore(vp, rablks[i]))
275 continue;
276
277 /* Get a buffer for the read-ahead block */
278 (void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
279 }
280
281 /*
282 * Delayed write buffers are found in the cache and have
283 * valid contents. Also, B_ERROR is not set, otherwise
284 * getblk() would not have returned them.
285 */
286 if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
287 return (0);
288
289 /*
290 * Otherwise, we had to start a read for it; wait until
291 * it's valid and return the result.
292 */
293 return (biowait(bp));
294 }
295
296 /*
297 * Read with single-block read-ahead. Defined in Bach (p.55), but
298 * implemented as a call to breadn().
299 * XXX for compatibility with old file systems.
300 */
301 int
302 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
303 struct vnode *vp;
304 daddr_t blkno; int size;
305 daddr_t rablkno; int rabsize;
306 struct ucred *cred;
307 struct buf **bpp;
308 {
309
310 return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
311 }
312
313 /*
314 * Block write. Described in Bach (p.56)
315 */
316 int
317 bwrite(bp)
318 struct buf *bp;
319 {
320 int rv, sync, wasdelayed, s;
321 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
322 struct vnode *vp;
323 struct mount *mp;
324
325 /*
326 * Remember buffer type, to switch on it later. If the write was
327 * synchronous, but the file system was mounted with MNT_ASYNC,
328 * convert it to a delayed write.
329 * XXX note that this relies on delayed tape writes being converted
330 * to async, not sync writes (which is safe, but ugly).
331 */
332 sync = !ISSET(bp->b_flags, B_ASYNC);
333 if (sync && bp->b_vp && bp->b_vp->v_mount &&
334 ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
335 bdwrite(bp);
336 return (0);
337 }
338
339 /*
340 * Collect statistics on synchronous and asynchronous writes.
341 * Writes to block devices are charged to their associated
342 * filesystem (if any).
343 */
344 if ((vp = bp->b_vp) != NULL) {
345 if (vp->v_type == VBLK)
346 mp = vp->v_specmountpoint;
347 else
348 mp = vp->v_mount;
349 if (mp != NULL) {
350 if (sync)
351 mp->mnt_stat.f_syncwrites++;
352 else
353 mp->mnt_stat.f_asyncwrites++;
354 }
355 }
356
357 wasdelayed = ISSET(bp->b_flags, B_DELWRI);
358
359 s = splbio();
360
361 CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
362
363 /*
364 * Pay for the I/O operation and make sure the buf is on the correct
365 * vnode queue.
366 */
367 if (wasdelayed)
368 reassignbuf(bp, bp->b_vp);
369 else
370 p->p_stats->p_ru.ru_oublock++;
371
372 /* Initiate disk write. Make sure the appropriate party is charged. */
373 bp->b_vp->v_numoutput++;
374 splx(s);
375
376 SET(bp->b_flags, B_WRITEINPROG);
377 VOP_STRATEGY(bp);
378
379 if (sync) {
380 /* If I/O was synchronous, wait for it to complete. */
381 rv = biowait(bp);
382
383 /* Release the buffer. */
384 brelse(bp);
385
386 return (rv);
387 } else {
388 return (0);
389 }
390 }
391
392 int
393 vn_bwrite(v)
394 void *v;
395 {
396 struct vop_bwrite_args *ap = v;
397
398 return (bwrite(ap->a_bp));
399 }
400
401 /*
402 * Delayed write.
403 *
404 * The buffer is marked dirty, but is not queued for I/O.
405 * This routine should be used when the buffer is expected
406 * to be modified again soon, typically a small write that
407 * partially fills a buffer.
408 *
409 * NB: magnetic tapes cannot be delayed; they must be
410 * written in the order that the writes are requested.
411 *
412 * Described in Leffler, et al. (pp. 208-213).
413 */
414 void
415 bdwrite(bp)
416 struct buf *bp;
417 {
418 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
419 int s;
420
421 /* If this is a tape block, write the block now. */
422 /* XXX NOTE: the memory filesystem usurpes major device */
423 /* XXX number 255, which is a bad idea. */
424 if (bp->b_dev != NODEV &&
425 major(bp->b_dev) != 255 && /* XXX - MFS buffers! */
426 bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
427 bawrite(bp);
428 return;
429 }
430
431 /*
432 * If the block hasn't been seen before:
433 * (1) Mark it as having been seen,
434 * (2) Charge for the write,
435 * (3) Make sure it's on its vnode's correct block list.
436 */
437 s = splbio();
438
439 if (!ISSET(bp->b_flags, B_DELWRI)) {
440 SET(bp->b_flags, B_DELWRI);
441 p->p_stats->p_ru.ru_oublock++;
442 reassignbuf(bp, bp->b_vp);
443 }
444
445 /* Otherwise, the "write" is done, so mark and release the buffer. */
446 CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
447 splx(s);
448
449 brelse(bp);
450 }
451
452 /*
453 * Asynchronous block write; just an asynchronous bwrite().
454 */
455 void
456 bawrite(bp)
457 struct buf *bp;
458 {
459
460 SET(bp->b_flags, B_ASYNC);
461 VOP_BWRITE(bp);
462 }
463
464 /*
465 * Ordered block write; asynchronous, but I/O will occur in order queued.
466 */
467 void
468 bowrite(bp)
469 struct buf *bp;
470 {
471
472 SET(bp->b_flags, B_ASYNC | B_ORDERED);
473 VOP_BWRITE(bp);
474 }
475
476 /*
477 * Same as first half of bdwrite, mark buffer dirty, but do not release it.
478 */
479 void
480 bdirty(bp)
481 struct buf *bp;
482 {
483 struct proc *p = (curproc != NULL ? curproc : &proc0); /* XXX */
484 int s;
485
486 s = splbio();
487
488 CLR(bp->b_flags, B_AGE);
489
490 if (!ISSET(bp->b_flags, B_DELWRI)) {
491 SET(bp->b_flags, B_DELWRI);
492 p->p_stats->p_ru.ru_oublock++;
493 reassignbuf(bp, bp->b_vp);
494 }
495
496 splx(s);
497 }
498
499 /*
500 * Release a buffer on to the free lists.
501 * Described in Bach (p. 46).
502 */
503 void
504 brelse(bp)
505 struct buf *bp;
506 {
507 struct bqueues *bufq;
508 int s;
509
510 /* Wake up any processes waiting for any buffer to become free. */
511 if (needbuffer) {
512 needbuffer = 0;
513 wakeup(&needbuffer);
514 }
515
516 /* Block disk interrupts. */
517 s = splbio();
518
519 /* Wake up any proceeses waiting for _this_ buffer to become free. */
520 if (ISSET(bp->b_flags, B_WANTED)) {
521 CLR(bp->b_flags, B_WANTED|B_AGE);
522 wakeup(bp);
523 }
524
525 /*
526 * Determine which queue the buffer should be on, then put it there.
527 */
528
529 /* If it's locked, don't report an error; try again later. */
530 if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
531 CLR(bp->b_flags, B_ERROR);
532
533 /* If it's not cacheable, or an error, mark it invalid. */
534 if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
535 SET(bp->b_flags, B_INVAL);
536
537 if (ISSET(bp->b_flags, B_VFLUSH)) {
538 /*
539 * This is a delayed write buffer that was just flushed to
540 * disk. It is still on the LRU queue. If it's become
541 * invalid, then we need to move it to a different queue;
542 * otherwise leave it in its current position.
543 */
544 CLR(bp->b_flags, B_VFLUSH);
545 if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
546 goto already_queued;
547 else
548 bremfree(bp);
549 }
550
551 if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
552 /*
553 * If it's invalid or empty, dissociate it from its vnode
554 * and put on the head of the appropriate queue.
555 */
556 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
557 (*bioops.io_deallocate)(bp);
558 CLR(bp->b_flags, B_DONE|B_DELWRI);
559 if (bp->b_vp) {
560 reassignbuf(bp, bp->b_vp);
561 brelvp(bp);
562 }
563 if (bp->b_bufsize <= 0)
564 /* no data */
565 bufq = &bufqueues[BQ_EMPTY];
566 else
567 /* invalid data */
568 bufq = &bufqueues[BQ_AGE];
569 binsheadfree(bp, bufq);
570 } else {
571 /*
572 * It has valid data. Put it on the end of the appropriate
573 * queue, so that it'll stick around for as long as possible.
574 * If buf is AGE, but has dependencies, must put it on last
575 * bufqueue to be scanned, ie LRU. This protects against the
576 * livelock where BQ_AGE only has buffers with dependencies,
577 * and we thus never get to the dependent buffers in BQ_LRU.
578 */
579 if (ISSET(bp->b_flags, B_LOCKED))
580 /* locked in core */
581 bufq = &bufqueues[BQ_LOCKED];
582 else if (!ISSET(bp->b_flags, B_AGE))
583 /* valid data */
584 bufq = &bufqueues[BQ_LRU];
585 else {
586 /* stale but valid data */
587 int has_deps;
588
589 if (LIST_FIRST(&bp->b_dep) != NULL &&
590 bioops.io_countdeps)
591 has_deps = (*bioops.io_countdeps)(bp, 0);
592 else
593 has_deps = 0;
594 bufq = has_deps ? &bufqueues[BQ_LRU] :
595 &bufqueues[BQ_AGE];
596 }
597 binstailfree(bp, bufq);
598 }
599
600 already_queued:
601 /* Unlock the buffer. */
602 CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
603
604 /* Allow disk interrupts. */
605 splx(s);
606 }
607
608 /*
609 * Determine if a block is in the cache.
610 * Just look on what would be its hash chain. If it's there, return
611 * a pointer to it, unless it's marked invalid. If it's marked invalid,
612 * we normally don't return the buffer, unless the caller explicitly
613 * wants us to.
614 */
615 struct buf *
616 incore(vp, blkno)
617 struct vnode *vp;
618 daddr_t blkno;
619 {
620 struct buf *bp;
621
622 bp = BUFHASH(vp, blkno)->lh_first;
623
624 /* Search hash chain */
625 for (; bp != NULL; bp = bp->b_hash.le_next) {
626 if (bp->b_lblkno == blkno && bp->b_vp == vp &&
627 !ISSET(bp->b_flags, B_INVAL))
628 return (bp);
629 }
630
631 return (0);
632 }
633
634 /*
635 * Get a block of requested size that is associated with
636 * a given vnode and block offset. If it is found in the
637 * block cache, mark it as having been found, make it busy
638 * and return it. Otherwise, return an empty block of the
639 * correct size. It is up to the caller to insure that the
640 * cached blocks be of the correct size.
641 */
642 struct buf *
643 getblk(vp, blkno, size, slpflag, slptimeo)
644 struct vnode *vp;
645 daddr_t blkno;
646 int size, slpflag, slptimeo;
647 {
648 struct bufhashhdr *bh;
649 struct buf *bp;
650 int s, err;
651
652 /*
653 * XXX
654 * The following is an inlined version of 'incore()', but with
655 * the 'invalid' test moved to after the 'busy' test. It's
656 * necessary because there are some cases in which the NFS
657 * code sets B_INVAL prior to writing data to the server, but
658 * in which the buffers actually contain valid data. In this
659 * case, we can't allow the system to allocate a new buffer for
660 * the block until the write is finished.
661 */
662 bh = BUFHASH(vp, blkno);
663 start:
664 bp = bh->lh_first;
665 for (; bp != NULL; bp = bp->b_hash.le_next) {
666 if (bp->b_lblkno != blkno || bp->b_vp != vp)
667 continue;
668
669 s = splbio();
670 if (ISSET(bp->b_flags, B_BUSY)) {
671 SET(bp->b_flags, B_WANTED);
672 err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
673 slptimeo);
674 splx(s);
675 if (err)
676 return (NULL);
677 goto start;
678 }
679
680 if (!ISSET(bp->b_flags, B_INVAL)) {
681 #ifdef DIAGNOSTIC
682 if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
683 bp->b_bcount < size)
684 panic("getblk: block size invariant failed");
685 #endif
686 SET(bp->b_flags, B_BUSY);
687 bremfree(bp);
688 splx(s);
689 break;
690 }
691 splx(s);
692 }
693
694 if (bp == NULL) {
695 if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
696 goto start;
697 binshash(bp, bh);
698 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
699 s = splbio();
700 bgetvp(vp, bp);
701 splx(s);
702 }
703 allocbuf(bp, size);
704 return (bp);
705 }
706
707 /*
708 * Get an empty, disassociated buffer of given size.
709 */
710 struct buf *
711 geteblk(size)
712 int size;
713 {
714 struct buf *bp;
715
716 while ((bp = getnewbuf(0, 0)) == 0)
717 ;
718 SET(bp->b_flags, B_INVAL);
719 binshash(bp, &invalhash);
720 allocbuf(bp, size);
721
722 return (bp);
723 }
724
725 /*
726 * Expand or contract the actual memory allocated to a buffer.
727 *
728 * If the buffer shrinks, data is lost, so it's up to the
729 * caller to have written it out *first*; this routine will not
730 * start a write. If the buffer grows, it's the callers
731 * responsibility to fill out the buffer's additional contents.
732 */
733 void
734 allocbuf(bp, size)
735 struct buf *bp;
736 int size;
737 {
738 struct buf *nbp;
739 vsize_t desired_size;
740 int s;
741
742 desired_size = round_page((vsize_t)size);
743 if (desired_size > MAXBSIZE)
744 panic("allocbuf: buffer larger than MAXBSIZE requested");
745
746 if (bp->b_bufsize == desired_size)
747 goto out;
748
749 /*
750 * If the buffer is smaller than the desired size, we need to snarf
751 * it from other buffers. Get buffers (via getnewbuf()), and
752 * steal their pages.
753 */
754 while (bp->b_bufsize < desired_size) {
755 int amt;
756
757 /* find a buffer */
758 while ((nbp = getnewbuf(0, 0)) == NULL)
759 ;
760 SET(nbp->b_flags, B_INVAL);
761 binshash(nbp, &invalhash);
762
763 /* and steal its pages, up to the amount we need */
764 amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
765 pagemove((nbp->b_data + nbp->b_bufsize - amt),
766 bp->b_data + bp->b_bufsize, amt);
767 bp->b_bufsize += amt;
768 nbp->b_bufsize -= amt;
769
770 /* reduce transfer count if we stole some data */
771 if (nbp->b_bcount > nbp->b_bufsize)
772 nbp->b_bcount = nbp->b_bufsize;
773
774 #ifdef DIAGNOSTIC
775 if (nbp->b_bufsize < 0)
776 panic("allocbuf: negative bufsize");
777 #endif
778
779 brelse(nbp);
780 }
781
782 /*
783 * If we want a buffer smaller than the current size,
784 * shrink this buffer. Grab a buf head from the EMPTY queue,
785 * move a page onto it, and put it on front of the AGE queue.
786 * If there are no free buffer headers, leave the buffer alone.
787 */
788 if (bp->b_bufsize > desired_size) {
789 s = splbio();
790 if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
791 /* No free buffer head */
792 splx(s);
793 goto out;
794 }
795 bremfree(nbp);
796 SET(nbp->b_flags, B_BUSY);
797 splx(s);
798
799 /* move the page to it and note this change */
800 pagemove(bp->b_data + desired_size,
801 nbp->b_data, bp->b_bufsize - desired_size);
802 nbp->b_bufsize = bp->b_bufsize - desired_size;
803 bp->b_bufsize = desired_size;
804 nbp->b_bcount = 0;
805 SET(nbp->b_flags, B_INVAL);
806
807 /* release the newly-filled buffer and leave */
808 brelse(nbp);
809 }
810
811 out:
812 bp->b_bcount = size;
813 }
814
815 /*
816 * Find a buffer which is available for use.
817 * Select something from a free list.
818 * Preference is to AGE list, then LRU list.
819 */
820 struct buf *
821 getnewbuf(slpflag, slptimeo)
822 int slpflag, slptimeo;
823 {
824 struct buf *bp;
825 int s;
826
827 start:
828 s = splbio();
829 if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
830 (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
831 bremfree(bp);
832 } else {
833 /* wait for a free buffer of any kind */
834 needbuffer = 1;
835 tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
836 splx(s);
837 return (0);
838 }
839
840 if (ISSET(bp->b_flags, B_VFLUSH)) {
841 /*
842 * This is a delayed write buffer being flushed to disk. Make
843 * sure it gets aged out of the queue when it's finished, and
844 * leave it off the LRU queue.
845 */
846 CLR(bp->b_flags, B_VFLUSH);
847 SET(bp->b_flags, B_AGE);
848 splx(s);
849 goto start;
850 }
851
852 /* Buffer is no longer on free lists. */
853 SET(bp->b_flags, B_BUSY);
854
855 /* If buffer was a delayed write, start it, and go back to the top. */
856 if (ISSET(bp->b_flags, B_DELWRI)) {
857 splx(s);
858 /*
859 * This buffer has gone through the LRU, so make sure it gets
860 * reused ASAP.
861 */
862 SET(bp->b_flags, B_AGE);
863 bawrite(bp);
864 goto start;
865 }
866
867 /* disassociate us from our vnode, if we had one... */
868 if (bp->b_vp)
869 brelvp(bp);
870 splx(s);
871
872 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
873 (*bioops.io_deallocate)(bp);
874
875 /* clear out various other fields */
876 bp->b_flags = B_BUSY;
877 bp->b_dev = NODEV;
878 bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
879 bp->b_iodone = 0;
880 bp->b_error = 0;
881 bp->b_resid = 0;
882 bp->b_bcount = 0;
883 bp->b_dirtyoff = bp->b_dirtyend = 0;
884 bp->b_validoff = bp->b_validend = 0;
885
886 /* nuke any credentials we were holding */
887 if (bp->b_rcred != NOCRED) {
888 crfree(bp->b_rcred);
889 bp->b_rcred = NOCRED;
890 }
891 if (bp->b_wcred != NOCRED) {
892 crfree(bp->b_wcred);
893 bp->b_wcred = NOCRED;
894 }
895
896 bremhash(bp);
897 return (bp);
898 }
899
900 /*
901 * Wait for operations on the buffer to complete.
902 * When they do, extract and return the I/O's error value.
903 */
904 int
905 biowait(bp)
906 struct buf *bp;
907 {
908 int s;
909
910 s = splbio();
911 while (!ISSET(bp->b_flags, B_DONE))
912 tsleep(bp, PRIBIO + 1, "biowait", 0);
913 splx(s);
914
915 /* check for interruption of I/O (e.g. via NFS), then errors. */
916 if (ISSET(bp->b_flags, B_EINTR)) {
917 CLR(bp->b_flags, B_EINTR);
918 return (EINTR);
919 } else if (ISSET(bp->b_flags, B_ERROR))
920 return (bp->b_error ? bp->b_error : EIO);
921 else
922 return (0);
923 }
924
925 /*
926 * Mark I/O complete on a buffer.
927 *
928 * If a callback has been requested, e.g. the pageout
929 * daemon, do so. Otherwise, awaken waiting processes.
930 *
931 * [ Leffler, et al., says on p.247:
932 * "This routine wakes up the blocked process, frees the buffer
933 * for an asynchronous write, or, for a request by the pagedaemon
934 * process, invokes a procedure specified in the buffer structure" ]
935 *
936 * In real life, the pagedaemon (or other system processes) wants
937 * to do async stuff to, and doesn't want the buffer brelse()'d.
938 * (for swap pager, that puts swap buffers on the free lists (!!!),
939 * for the vn device, that puts malloc'd buffers on the free lists!)
940 */
941 void
942 biodone(bp)
943 struct buf *bp;
944 {
945 int s = splbio();
946
947 if (ISSET(bp->b_flags, B_DONE))
948 panic("biodone already");
949 SET(bp->b_flags, B_DONE); /* note that it's done */
950
951 if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
952 (*bioops.io_complete)(bp);
953
954 if (!ISSET(bp->b_flags, B_READ)) /* wake up reader */
955 vwakeup(bp);
956
957 if (ISSET(bp->b_flags, B_CALL)) { /* if necessary, call out */
958 CLR(bp->b_flags, B_CALL); /* but note callout done */
959 (*bp->b_iodone)(bp);
960 } else {
961 if (ISSET(bp->b_flags, B_ASYNC)) /* if async, release */
962 brelse(bp);
963 else { /* or just wakeup the buffer */
964 CLR(bp->b_flags, B_WANTED);
965 wakeup(bp);
966 }
967 }
968
969 splx(s);
970 }
971
972 /*
973 * Return a count of buffers on the "locked" queue.
974 */
975 int
976 count_lock_queue()
977 {
978 struct buf *bp;
979 int n = 0;
980
981 for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
982 bp = bp->b_freelist.tqe_next)
983 n++;
984 return (n);
985 }
986
987 #ifdef DEBUG
988 /*
989 * Print out statistics on the current allocation of the buffer pool.
990 * Can be enabled to print out on every ``sync'' by setting "syncprt"
991 * in vfs_syscalls.c using sysctl.
992 */
993 void
994 vfs_bufstats()
995 {
996 int s, i, j, count;
997 struct buf *bp;
998 struct bqueues *dp;
999 int counts[MAXBSIZE/NBPG+1];
1000 static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
1001
1002 for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
1003 count = 0;
1004 for (j = 0; j <= MAXBSIZE/NBPG; j++)
1005 counts[j] = 0;
1006 s = splbio();
1007 for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
1008 counts[bp->b_bufsize/NBPG]++;
1009 count++;
1010 }
1011 splx(s);
1012 printf("%s: total-%d", bname[i], count);
1013 for (j = 0; j <= MAXBSIZE/NBPG; j++)
1014 if (counts[j] != 0)
1015 printf(", %d-%d", j * NBPG, counts[j]);
1016 printf("\n");
1017 }
1018 }
1019 #endif /* DEBUG */
1020