Home | History | Annotate | Line # | Download | only in kern
vfs_bio.c revision 1.51
      1 /*	$NetBSD: vfs_bio.c,v 1.51 1997/07/08 21:42:59 pk Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1994 Christopher G. Demetriou
      5  * Copyright (c) 1982, 1986, 1989, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  * (c) UNIX System Laboratories, Inc.
      8  * All or some portions of this file are derived from material licensed
      9  * to the University of California by American Telephone and Telegraph
     10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     11  * the permission of UNIX System Laboratories, Inc.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the University of
     24  *	California, Berkeley and its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     42  */
     43 
     44 /*
     45  * Some references:
     46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
     47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
     48  *		UNIX Operating System (Addison Welley, 1989)
     49  */
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/proc.h>
     54 #include <sys/buf.h>
     55 #include <sys/vnode.h>
     56 #include <sys/mount.h>
     57 #include <sys/trace.h>
     58 #include <sys/malloc.h>
     59 #include <sys/resourcevar.h>
     60 #include <sys/conf.h>
     61 
     62 #include <vm/vm.h>
     63 
     64 /* Macros to clear/set/test flags. */
     65 #define	SET(t, f)	(t) |= (f)
     66 #define	CLR(t, f)	(t) &= ~(f)
     67 #define	ISSET(t, f)	((t) & (f))
     68 
     69 /*
     70  * Definitions for the buffer hash lists.
     71  */
     72 #define	BUFHASH(dvp, lbn)	\
     73 	(&bufhashtbl[((long)(dvp) / sizeof(*(dvp)) + (int)(lbn)) & bufhash])
     74 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
     75 u_long	bufhash;
     76 
     77 /*
     78  * Insq/Remq for the buffer hash lists.
     79  */
     80 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
     81 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
     82 
     83 /*
     84  * Definitions for the buffer free lists.
     85  */
     86 #define	BQUEUES		4		/* number of free buffer queues */
     87 
     88 #define	BQ_LOCKED	0		/* super-blocks &c */
     89 #define	BQ_LRU		1		/* lru, useful buffers */
     90 #define	BQ_AGE		2		/* rubbish */
     91 #define	BQ_EMPTY	3		/* buffer headers with no memory */
     92 
     93 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
     94 int needbuffer;
     95 
     96 /*
     97  * Insq/Remq for the buffer free lists.
     98  */
     99 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
    100 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
    101 
    102 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
    103 					    struct ucred *, int));
    104 int count_lock_queue __P((void));
    105 
    106 void
    107 bremfree(bp)
    108 	struct buf *bp;
    109 {
    110 	struct bqueues *dp = NULL;
    111 
    112 	/*
    113 	 * We only calculate the head of the freelist when removing
    114 	 * the last element of the list as that is the only time that
    115 	 * it is needed (e.g. to reset the tail pointer).
    116 	 *
    117 	 * NB: This makes an assumption about how tailq's are implemented.
    118 	 */
    119 	if (bp->b_freelist.tqe_next == NULL) {
    120 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    121 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    122 				break;
    123 		if (dp == &bufqueues[BQUEUES])
    124 			panic("bremfree: lost tail");
    125 	}
    126 	TAILQ_REMOVE(dp, bp, b_freelist);
    127 }
    128 
    129 /*
    130  * Initialize buffers and hash links for buffers.
    131  */
    132 void
    133 bufinit()
    134 {
    135 	register struct buf *bp;
    136 	struct bqueues *dp;
    137 	register int i;
    138 	int base, residual;
    139 
    140 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    141 		TAILQ_INIT(dp);
    142 	bufhashtbl = hashinit(nbuf, M_CACHE, &bufhash);
    143 	base = bufpages / nbuf;
    144 	residual = bufpages % nbuf;
    145 	for (i = 0; i < nbuf; i++) {
    146 		bp = &buf[i];
    147 		bzero((char *)bp, sizeof *bp);
    148 		bp->b_dev = NODEV;
    149 		bp->b_rcred = NOCRED;
    150 		bp->b_wcred = NOCRED;
    151 		bp->b_vnbufs.le_next = NOLIST;
    152 		bp->b_data = buffers + i * MAXBSIZE;
    153 		if (i < residual)
    154 			bp->b_bufsize = (base + 1) * CLBYTES;
    155 		else
    156 			bp->b_bufsize = base * CLBYTES;
    157 		bp->b_flags = B_INVAL;
    158 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
    159 		binsheadfree(bp, dp);
    160 		binshash(bp, &invalhash);
    161 	}
    162 }
    163 
    164 static __inline struct buf *
    165 bio_doread(vp, blkno, size, cred, async)
    166 	struct vnode *vp;
    167 	daddr_t blkno;
    168 	int size;
    169 	struct ucred *cred;
    170 	int async;
    171 {
    172 	register struct buf *bp;
    173 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    174 
    175 	bp = getblk(vp, blkno, size, 0, 0);
    176 
    177 	/*
    178 	 * If buffer does not have data valid, start a read.
    179 	 * Note that if buffer is B_INVAL, getblk() won't return it.
    180 	 * Therefore, it's valid if it's I/O has completed or been delayed.
    181 	 */
    182 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
    183 		/* Start I/O for the buffer (keeping credentials). */
    184 		SET(bp->b_flags, B_READ | async);
    185 		if (cred != NOCRED && bp->b_rcred == NOCRED) {
    186 			crhold(cred);
    187 			bp->b_rcred = cred;
    188 		}
    189 		VOP_STRATEGY(bp);
    190 
    191 		/* Pay for the read. */
    192 		p->p_stats->p_ru.ru_inblock++;
    193 	} else if (async) {
    194 		brelse(bp);
    195 	}
    196 
    197 	return (bp);
    198 }
    199 
    200 /*
    201  * Read a disk block.
    202  * This algorithm described in Bach (p.54).
    203  */
    204 int
    205 bread(vp, blkno, size, cred, bpp)
    206 	struct vnode *vp;
    207 	daddr_t blkno;
    208 	int size;
    209 	struct ucred *cred;
    210 	struct buf **bpp;
    211 {
    212 	register struct buf *bp;
    213 
    214 	/* Get buffer for block. */
    215 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    216 
    217 	/*
    218 	 * Delayed write buffers are found in the cache and have
    219 	 * valid contents. Also, B_ERROR is not set, otherwise
    220 	 * getblk() would not have returned them.
    221 	 */
    222 	if (ISSET(bp->b_flags, B_DELWRI))
    223 		return (0);
    224 
    225 	/*
    226 	 * Otherwise, we had to start a read for it; wait until
    227 	 * it's valid and return the result.
    228 	 */
    229 	return (biowait(bp));
    230 }
    231 
    232 /*
    233  * Read-ahead multiple disk blocks. The first is sync, the rest async.
    234  * Trivial modification to the breada algorithm presented in Bach (p.55).
    235  */
    236 int
    237 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
    238 	struct vnode *vp;
    239 	daddr_t blkno; int size;
    240 	daddr_t rablks[]; int rasizes[];
    241 	int nrablks;
    242 	struct ucred *cred;
    243 	struct buf **bpp;
    244 {
    245 	register struct buf *bp;
    246 	int i;
    247 
    248 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    249 
    250 	/*
    251 	 * For each of the read-ahead blocks, start a read, if necessary.
    252 	 */
    253 	for (i = 0; i < nrablks; i++) {
    254 		/* If it's in the cache, just go on to next one. */
    255 		if (incore(vp, rablks[i]))
    256 			continue;
    257 
    258 		/* Get a buffer for the read-ahead block */
    259 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
    260 	}
    261 
    262 	/*
    263 	 * Delayed write buffers are found in the cache and have
    264 	 * valid contents. Also, B_ERROR is not set, otherwise
    265 	 * getblk() would not have returned them.
    266 	 */
    267 	if (ISSET(bp->b_flags, B_DELWRI))
    268 		SET(bp->b_flags, B_DONE);
    269 
    270 	/*
    271 	 * Otherwise, we had to start a read for it; wait until
    272 	 * it's valid and return the result.
    273 	 */
    274 	return (biowait(bp));
    275 }
    276 
    277 /*
    278  * Read with single-block read-ahead.  Defined in Bach (p.55), but
    279  * implemented as a call to breadn().
    280  * XXX for compatibility with old file systems.
    281  */
    282 int
    283 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
    284 	struct vnode *vp;
    285 	daddr_t blkno; int size;
    286 	daddr_t rablkno; int rabsize;
    287 	struct ucred *cred;
    288 	struct buf **bpp;
    289 {
    290 
    291 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
    292 }
    293 
    294 /*
    295  * Block write.  Described in Bach (p.56)
    296  */
    297 int
    298 bwrite(bp)
    299 	struct buf *bp;
    300 {
    301 	int rv, sync, wasdelayed, s;
    302 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    303 
    304 	/*
    305 	 * Remember buffer type, to switch on it later.  If the write was
    306 	 * synchronous, but the file system was mounted with MNT_ASYNC,
    307 	 * convert it to a delayed write.
    308 	 * XXX note that this relies on delayed tape writes being converted
    309 	 * to async, not sync writes (which is safe, but ugly).
    310 	 */
    311 	sync = !ISSET(bp->b_flags, B_ASYNC);
    312 	if (sync && bp->b_vp && bp->b_vp->v_mount &&
    313 	    ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) {
    314 		bdwrite(bp);
    315 		return (0);
    316 	}
    317 
    318 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
    319 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
    320 
    321 	s = splbio();
    322 
    323 	/*
    324 	 * Pay for the I/O operation and make sure the buf is on the correct
    325 	 * vnode queue.
    326 	 */
    327 	if (wasdelayed)
    328 		reassignbuf(bp, bp->b_vp);
    329 	else
    330 		p->p_stats->p_ru.ru_oublock++;
    331 
    332 	/* Initiate disk write.  Make sure the appropriate party is charged. */
    333 	bp->b_vp->v_numoutput++;
    334 	splx(s);
    335 
    336 	SET(bp->b_flags, B_WRITEINPROG);
    337 	VOP_STRATEGY(bp);
    338 
    339 	if (sync) {
    340 		/* If I/O was synchronous, wait for it to complete. */
    341 		rv = biowait(bp);
    342 
    343 		/* Release the buffer. */
    344 		brelse(bp);
    345 
    346 		return (rv);
    347 	} else {
    348 		return (0);
    349 	}
    350 }
    351 
    352 int
    353 vn_bwrite(v)
    354 	void *v;
    355 {
    356 	struct vop_bwrite_args *ap = v;
    357 
    358 	return (bwrite(ap->a_bp));
    359 }
    360 
    361 /*
    362  * Delayed write.
    363  *
    364  * The buffer is marked dirty, but is not queued for I/O.
    365  * This routine should be used when the buffer is expected
    366  * to be modified again soon, typically a small write that
    367  * partially fills a buffer.
    368  *
    369  * NB: magnetic tapes cannot be delayed; they must be
    370  * written in the order that the writes are requested.
    371  *
    372  * Described in Leffler, et al. (pp. 208-213).
    373  */
    374 void
    375 bdwrite(bp)
    376 	struct buf *bp;
    377 {
    378 	int s;
    379 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    380 
    381 	/* If this is a tape block, write the block now. */
    382 	if (bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
    383 		bawrite(bp);
    384 		return;
    385 	}
    386 
    387 	/*
    388 	 * If the block hasn't been seen before:
    389 	 *	(1) Mark it as having been seen,
    390 	 *	(2) Charge for the write,
    391 	 *	(3) Make sure it's on its vnode's correct block list.
    392 	 */
    393 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    394 		SET(bp->b_flags, B_DELWRI);
    395 		p->p_stats->p_ru.ru_oublock++;
    396 		s = splbio();
    397 		reassignbuf(bp, bp->b_vp);
    398 		splx(s);
    399 	}
    400 
    401 	/* Otherwise, the "write" is done, so mark and release the buffer. */
    402 	CLR(bp->b_flags, B_NEEDCOMMIT);
    403 	SET(bp->b_flags, B_DONE);
    404 	brelse(bp);
    405 }
    406 
    407 /*
    408  * Asynchronous block write; just an asynchronous bwrite().
    409  */
    410 void
    411 bawrite(bp)
    412 	struct buf *bp;
    413 {
    414 
    415 	SET(bp->b_flags, B_ASYNC);
    416 	VOP_BWRITE(bp);
    417 }
    418 
    419 /*
    420  * Release a buffer on to the free lists.
    421  * Described in Bach (p. 46).
    422  */
    423 void
    424 brelse(bp)
    425 	struct buf *bp;
    426 {
    427 	struct bqueues *bufq;
    428 	int s;
    429 
    430 	/* Wake up any processes waiting for any buffer to become free. */
    431 	if (needbuffer) {
    432 		needbuffer = 0;
    433 		wakeup(&needbuffer);
    434 	}
    435 
    436 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
    437 	if (ISSET(bp->b_flags, B_WANTED)) {
    438 		CLR(bp->b_flags, B_WANTED);
    439 		wakeup(bp);
    440 	}
    441 
    442 	/* Block disk interrupts. */
    443 	s = splbio();
    444 
    445 	/*
    446 	 * Determine which queue the buffer should be on, then put it there.
    447 	 */
    448 
    449 	/* If it's locked, don't report an error; try again later. */
    450 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
    451 		CLR(bp->b_flags, B_ERROR);
    452 
    453 	/* If it's not cacheable, or an error, mark it invalid. */
    454 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
    455 		SET(bp->b_flags, B_INVAL);
    456 
    457 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    458 		/*
    459 		 * This is a delayed write buffer that was just flushed to
    460 		 * disk.  It is still on the LRU queue.  If it's become
    461 		 * invalid, then we need to move it to a different queue;
    462 		 * otherwise leave it in its current position.
    463 		 */
    464 		CLR(bp->b_flags, B_VFLUSH);
    465 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
    466 			goto already_queued;
    467 		else
    468 			bremfree(bp);
    469 	}
    470 
    471 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
    472 		/*
    473 		 * If it's invalid or empty, dissociate it from its vnode
    474 		 * and put on the head of the appropriate queue.
    475 		 */
    476 		if (bp->b_vp)
    477 			brelvp(bp);
    478 		CLR(bp->b_flags, B_DELWRI);
    479 		if (bp->b_bufsize <= 0)
    480 			/* no data */
    481 			bufq = &bufqueues[BQ_EMPTY];
    482 		else
    483 			/* invalid data */
    484 			bufq = &bufqueues[BQ_AGE];
    485 		binsheadfree(bp, bufq);
    486 	} else {
    487 		/*
    488 		 * It has valid data.  Put it on the end of the appropriate
    489 		 * queue, so that it'll stick around for as long as possible.
    490 		 */
    491 		if (ISSET(bp->b_flags, B_LOCKED))
    492 			/* locked in core */
    493 			bufq = &bufqueues[BQ_LOCKED];
    494 		else if (ISSET(bp->b_flags, B_AGE))
    495 			/* stale but valid data */
    496 			bufq = &bufqueues[BQ_AGE];
    497 		else
    498 			/* valid data */
    499 			bufq = &bufqueues[BQ_LRU];
    500 		binstailfree(bp, bufq);
    501 	}
    502 
    503 already_queued:
    504 	/* Unlock the buffer. */
    505 	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE));
    506 
    507 	/* Allow disk interrupts. */
    508 	splx(s);
    509 }
    510 
    511 /*
    512  * Determine if a block is in the cache.
    513  * Just look on what would be its hash chain.  If it's there, return
    514  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
    515  * we normally don't return the buffer, unless the caller explicitly
    516  * wants us to.
    517  */
    518 struct buf *
    519 incore(vp, blkno)
    520 	struct vnode *vp;
    521 	daddr_t blkno;
    522 {
    523 	struct buf *bp;
    524 
    525 	bp = BUFHASH(vp, blkno)->lh_first;
    526 
    527 	/* Search hash chain */
    528 	for (; bp != NULL; bp = bp->b_hash.le_next) {
    529 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
    530 		    !ISSET(bp->b_flags, B_INVAL))
    531 		return (bp);
    532 	}
    533 
    534 	return (0);
    535 }
    536 
    537 /*
    538  * Get a block of requested size that is associated with
    539  * a given vnode and block offset. If it is found in the
    540  * block cache, mark it as having been found, make it busy
    541  * and return it. Otherwise, return an empty block of the
    542  * correct size. It is up to the caller to insure that the
    543  * cached blocks be of the correct size.
    544  */
    545 struct buf *
    546 getblk(vp, blkno, size, slpflag, slptimeo)
    547 	register struct vnode *vp;
    548 	daddr_t blkno;
    549 	int size, slpflag, slptimeo;
    550 {
    551 	struct bufhashhdr *bh;
    552 	struct buf *bp;
    553 	int s, err;
    554 
    555 	/*
    556 	 * XXX
    557 	 * The following is an inlined version of 'incore()', but with
    558 	 * the 'invalid' test moved to after the 'busy' test.  It's
    559 	 * necessary because there are some cases in which the NFS
    560 	 * code sets B_INVAL prior to writing data to the server, but
    561 	 * in which the buffers actually contain valid data.  In this
    562 	 * case, we can't allow the system to allocate a new buffer for
    563 	 * the block until the write is finished.
    564 	 */
    565 	bh = BUFHASH(vp, blkno);
    566 start:
    567         bp = bh->lh_first;
    568         for (; bp != NULL; bp = bp->b_hash.le_next) {
    569                 if (bp->b_lblkno != blkno || bp->b_vp != vp)
    570 			continue;
    571 
    572 		s = splbio();
    573 		if (ISSET(bp->b_flags, B_BUSY)) {
    574 			SET(bp->b_flags, B_WANTED);
    575 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
    576 			    slptimeo);
    577 			splx(s);
    578 			if (err)
    579 				return (NULL);
    580 			goto start;
    581 		}
    582 
    583 		if (!ISSET(bp->b_flags, B_INVAL)) {
    584 			SET(bp->b_flags, (B_BUSY | B_CACHE));
    585 			bremfree(bp);
    586 			splx(s);
    587 			break;
    588 		}
    589 		splx(s);
    590         }
    591 
    592 	if (bp == NULL) {
    593 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
    594 			goto start;
    595 		binshash(bp, bh);
    596 		bp->b_blkno = bp->b_lblkno = blkno;
    597 		s = splbio();
    598 		bgetvp(vp, bp);
    599 		splx(s);
    600 	}
    601 	allocbuf(bp, size);
    602 	return (bp);
    603 }
    604 
    605 /*
    606  * Get an empty, disassociated buffer of given size.
    607  */
    608 struct buf *
    609 geteblk(size)
    610 	int size;
    611 {
    612 	struct buf *bp;
    613 
    614 	while ((bp = getnewbuf(0, 0)) == 0)
    615 		;
    616 	SET(bp->b_flags, B_INVAL);
    617 	binshash(bp, &invalhash);
    618 	allocbuf(bp, size);
    619 
    620 	return (bp);
    621 }
    622 
    623 /*
    624  * Expand or contract the actual memory allocated to a buffer.
    625  *
    626  * If the buffer shrinks, data is lost, so it's up to the
    627  * caller to have written it out *first*; this routine will not
    628  * start a write.  If the buffer grows, it's the callers
    629  * responsibility to fill out the buffer's additional contents.
    630  */
    631 void
    632 allocbuf(bp, size)
    633 	struct buf *bp;
    634 	int size;
    635 {
    636 	struct buf      *nbp;
    637 	vm_size_t       desired_size;
    638 	int	     s;
    639 
    640 	desired_size = roundup(size, CLBYTES);
    641 	if (desired_size > MAXBSIZE)
    642 		panic("allocbuf: buffer larger than MAXBSIZE requested");
    643 
    644 	if (bp->b_bufsize == desired_size)
    645 		goto out;
    646 
    647 	/*
    648 	 * If the buffer is smaller than the desired size, we need to snarf
    649 	 * it from other buffers.  Get buffers (via getnewbuf()), and
    650 	 * steal their pages.
    651 	 */
    652 	while (bp->b_bufsize < desired_size) {
    653 		int amt;
    654 
    655 		/* find a buffer */
    656 		while ((nbp = getnewbuf(0, 0)) == NULL)
    657 			;
    658 		SET(nbp->b_flags, B_INVAL);
    659 		binshash(nbp, &invalhash);
    660 
    661 		/* and steal its pages, up to the amount we need */
    662 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
    663 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
    664 			 bp->b_data + bp->b_bufsize, amt);
    665 		bp->b_bufsize += amt;
    666 		nbp->b_bufsize -= amt;
    667 
    668 		/* reduce transfer count if we stole some data */
    669 		if (nbp->b_bcount > nbp->b_bufsize)
    670 			nbp->b_bcount = nbp->b_bufsize;
    671 
    672 #ifdef DIAGNOSTIC
    673 		if (nbp->b_bufsize < 0)
    674 			panic("allocbuf: negative bufsize");
    675 #endif
    676 
    677 		brelse(nbp);
    678 	}
    679 
    680 	/*
    681 	 * If we want a buffer smaller than the current size,
    682 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
    683 	 * move a page onto it, and put it on front of the AGE queue.
    684 	 * If there are no free buffer headers, leave the buffer alone.
    685 	 */
    686 	if (bp->b_bufsize > desired_size) {
    687 		s = splbio();
    688 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
    689 			/* No free buffer head */
    690 			splx(s);
    691 			goto out;
    692 		}
    693 		bremfree(nbp);
    694 		SET(nbp->b_flags, B_BUSY);
    695 		splx(s);
    696 
    697 		/* move the page to it and note this change */
    698 		pagemove(bp->b_data + desired_size,
    699 		    nbp->b_data, bp->b_bufsize - desired_size);
    700 		nbp->b_bufsize = bp->b_bufsize - desired_size;
    701 		bp->b_bufsize = desired_size;
    702 		nbp->b_bcount = 0;
    703 		SET(nbp->b_flags, B_INVAL);
    704 
    705 		/* release the newly-filled buffer and leave */
    706 		brelse(nbp);
    707 	}
    708 
    709 out:
    710 	bp->b_bcount = size;
    711 }
    712 
    713 /*
    714  * Find a buffer which is available for use.
    715  * Select something from a free list.
    716  * Preference is to AGE list, then LRU list.
    717  */
    718 struct buf *
    719 getnewbuf(slpflag, slptimeo)
    720 	int slpflag, slptimeo;
    721 {
    722 	register struct buf *bp;
    723 	int s;
    724 
    725 start:
    726 	s = splbio();
    727 	if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
    728 	    (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
    729 		bremfree(bp);
    730 	} else {
    731 		/* wait for a free buffer of any kind */
    732 		needbuffer = 1;
    733 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
    734 		splx(s);
    735 		return (0);
    736 	}
    737 
    738 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    739 		/*
    740 		 * This is a delayed write buffer being flushed to disk.  Make
    741 		 * sure it gets aged out of the queue when it's finished, and
    742 		 * leave it off the LRU queue.
    743 		 */
    744 		CLR(bp->b_flags, B_VFLUSH);
    745 		SET(bp->b_flags, B_AGE);
    746 		splx(s);
    747 		goto start;
    748 	}
    749 
    750 	/* Buffer is no longer on free lists. */
    751 	SET(bp->b_flags, B_BUSY);
    752 
    753 	/* If buffer was a delayed write, start it, and go back to the top. */
    754 	if (ISSET(bp->b_flags, B_DELWRI)) {
    755 		splx(s);
    756 		/*
    757 		 * This buffer has gone through the LRU, so make sure it gets
    758 		 * reused ASAP.
    759 		 */
    760 		SET(bp->b_flags, B_AGE);
    761 		bawrite(bp);
    762 		goto start;
    763 	}
    764 
    765 	/* disassociate us from our vnode, if we had one... */
    766 	if (bp->b_vp)
    767 		brelvp(bp);
    768 	splx(s);
    769 
    770 	/* clear out various other fields */
    771 	bp->b_flags = B_BUSY;
    772 	bp->b_dev = NODEV;
    773 	bp->b_blkno = bp->b_lblkno = 0;
    774 	bp->b_iodone = 0;
    775 	bp->b_error = 0;
    776 	bp->b_resid = 0;
    777 	bp->b_bcount = 0;
    778 	bp->b_dirtyoff = bp->b_dirtyend = 0;
    779 	bp->b_validoff = bp->b_validend = 0;
    780 
    781 	/* nuke any credentials we were holding */
    782 	if (bp->b_rcred != NOCRED) {
    783 		crfree(bp->b_rcred);
    784 		bp->b_rcred = NOCRED;
    785 	}
    786 	if (bp->b_wcred != NOCRED) {
    787 		crfree(bp->b_wcred);
    788 		bp->b_wcred = NOCRED;
    789 	}
    790 
    791 	bremhash(bp);
    792 	return (bp);
    793 }
    794 
    795 /*
    796  * Wait for operations on the buffer to complete.
    797  * When they do, extract and return the I/O's error value.
    798  */
    799 int
    800 biowait(bp)
    801 	struct buf *bp;
    802 {
    803 	int s;
    804 
    805 	s = splbio();
    806 	while (!ISSET(bp->b_flags, B_DONE))
    807 		tsleep(bp, PRIBIO + 1, "biowait", 0);
    808 	splx(s);
    809 
    810 	/* check for interruption of I/O (e.g. via NFS), then errors. */
    811 	if (ISSET(bp->b_flags, B_EINTR)) {
    812 		CLR(bp->b_flags, B_EINTR);
    813 		return (EINTR);
    814 	} else if (ISSET(bp->b_flags, B_ERROR))
    815 		return (bp->b_error ? bp->b_error : EIO);
    816 	else
    817 		return (0);
    818 }
    819 
    820 /*
    821  * Mark I/O complete on a buffer.
    822  *
    823  * If a callback has been requested, e.g. the pageout
    824  * daemon, do so. Otherwise, awaken waiting processes.
    825  *
    826  * [ Leffler, et al., says on p.247:
    827  *	"This routine wakes up the blocked process, frees the buffer
    828  *	for an asynchronous write, or, for a request by the pagedaemon
    829  *	process, invokes a procedure specified in the buffer structure" ]
    830  *
    831  * In real life, the pagedaemon (or other system processes) wants
    832  * to do async stuff to, and doesn't want the buffer brelse()'d.
    833  * (for swap pager, that puts swap buffers on the free lists (!!!),
    834  * for the vn device, that puts malloc'd buffers on the free lists!)
    835  */
    836 void
    837 biodone(bp)
    838 	struct buf *bp;
    839 {
    840 	if (ISSET(bp->b_flags, B_DONE))
    841 		panic("biodone already");
    842 	SET(bp->b_flags, B_DONE);		/* note that it's done */
    843 
    844 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
    845 		vwakeup(bp);
    846 
    847 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
    848 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
    849 		(*bp->b_iodone)(bp);
    850 	} else if (ISSET(bp->b_flags, B_ASYNC))	/* if async, release it */
    851 		brelse(bp);
    852 	else {					/* or just wakeup the buffer */
    853 		CLR(bp->b_flags, B_WANTED);
    854 		wakeup(bp);
    855 	}
    856 }
    857 
    858 /*
    859  * Return a count of buffers on the "locked" queue.
    860  */
    861 int
    862 count_lock_queue()
    863 {
    864 	register struct buf *bp;
    865 	register int n = 0;
    866 
    867 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
    868 	    bp = bp->b_freelist.tqe_next)
    869 		n++;
    870 	return (n);
    871 }
    872 
    873 #ifdef DEBUG
    874 /*
    875  * Print out statistics on the current allocation of the buffer pool.
    876  * Can be enabled to print out on every ``sync'' by setting "syncprt"
    877  * in vfs_syscalls.c using sysctl.
    878  */
    879 void
    880 vfs_bufstats()
    881 {
    882 	int s, i, j, count;
    883 	register struct buf *bp;
    884 	register struct bqueues *dp;
    885 	int counts[MAXBSIZE/CLBYTES+1];
    886 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
    887 
    888 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
    889 		count = 0;
    890 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
    891 			counts[j] = 0;
    892 		s = splbio();
    893 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
    894 			counts[bp->b_bufsize/CLBYTES]++;
    895 			count++;
    896 		}
    897 		splx(s);
    898 		printf("%s: total-%d", bname[i], count);
    899 		for (j = 0; j <= MAXBSIZE/CLBYTES; j++)
    900 			if (counts[j] != 0)
    901 				printf(", %d-%d", j * CLBYTES, counts[j]);
    902 		printf("\n");
    903 	}
    904 }
    905 #endif /* DEBUG */
    906