Home | History | Annotate | Line # | Download | only in kern
vfs_bio.c revision 1.81
      1 /*	$NetBSD: vfs_bio.c,v 1.81 2002/05/12 23:06:27 matt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1994 Christopher G. Demetriou
      5  * Copyright (c) 1982, 1986, 1989, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  * (c) UNIX System Laboratories, Inc.
      8  * All or some portions of this file are derived from material licensed
      9  * to the University of California by American Telephone and Telegraph
     10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     11  * the permission of UNIX System Laboratories, Inc.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the University of
     24  *	California, Berkeley and its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     42  */
     43 
     44 /*
     45  * Some references:
     46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
     47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
     48  *		UNIX Operating System (Addison Welley, 1989)
     49  */
     50 
     51 #include "opt_softdep.h"
     52 
     53 #include <sys/cdefs.h>
     54 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.81 2002/05/12 23:06:27 matt Exp $");
     55 
     56 #include <sys/param.h>
     57 #include <sys/systm.h>
     58 #include <sys/proc.h>
     59 #include <sys/buf.h>
     60 #include <sys/vnode.h>
     61 #include <sys/mount.h>
     62 #include <sys/malloc.h>
     63 #include <sys/resourcevar.h>
     64 #include <sys/conf.h>
     65 
     66 #include <uvm/uvm.h>
     67 
     68 #include <miscfs/specfs/specdev.h>
     69 
     70 /* Macros to clear/set/test flags. */
     71 #define	SET(t, f)	(t) |= (f)
     72 #define	CLR(t, f)	(t) &= ~(f)
     73 #define	ISSET(t, f)	((t) & (f))
     74 
     75 /*
     76  * Definitions for the buffer hash lists.
     77  */
     78 #define	BUFHASH(dvp, lbn)	\
     79 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
     80 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
     81 u_long	bufhash;
     82 #ifndef SOFTDEP
     83 struct bio_ops bioops;	/* I/O operation notification */
     84 #endif
     85 
     86 /*
     87  * Insq/Remq for the buffer hash lists.
     88  */
     89 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
     90 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
     91 
     92 /*
     93  * Definitions for the buffer free lists.
     94  */
     95 #define	BQUEUES		4		/* number of free buffer queues */
     96 
     97 #define	BQ_LOCKED	0		/* super-blocks &c */
     98 #define	BQ_LRU		1		/* lru, useful buffers */
     99 #define	BQ_AGE		2		/* rubbish */
    100 #define	BQ_EMPTY	3		/* buffer headers with no memory */
    101 
    102 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
    103 int needbuffer;
    104 
    105 /*
    106  * Buffer pool for I/O buffers.
    107  */
    108 struct pool bufpool;
    109 
    110 /*
    111  * Insq/Remq for the buffer free lists.
    112  */
    113 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
    114 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
    115 
    116 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
    117 					    struct ucred *, int));
    118 int count_lock_queue __P((void));
    119 
    120 void
    121 bremfree(bp)
    122 	struct buf *bp;
    123 {
    124 	int s = splbio();
    125 
    126 	struct bqueues *dp = NULL;
    127 
    128 	/*
    129 	 * We only calculate the head of the freelist when removing
    130 	 * the last element of the list as that is the only time that
    131 	 * it is needed (e.g. to reset the tail pointer).
    132 	 *
    133 	 * NB: This makes an assumption about how tailq's are implemented.
    134 	 */
    135 	if (bp->b_freelist.tqe_next == NULL) {
    136 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    137 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    138 				break;
    139 		if (dp == &bufqueues[BQUEUES])
    140 			panic("bremfree: lost tail");
    141 	}
    142 	TAILQ_REMOVE(dp, bp, b_freelist);
    143 	splx(s);
    144 }
    145 
    146 /*
    147  * Initialize buffers and hash links for buffers.
    148  */
    149 void
    150 bufinit()
    151 {
    152 	struct buf *bp;
    153 	struct bqueues *dp;
    154 	int i;
    155 	int base, residual;
    156 
    157 	/*
    158 	 * Initialize the buffer pool.  This pool is used for buffers
    159 	 * which are strictly I/O control blocks, not buffer cache
    160 	 * buffers.
    161 	 */
    162 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
    163 
    164 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    165 		TAILQ_INIT(dp);
    166 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
    167 	base = bufpages / nbuf;
    168 	residual = bufpages % nbuf;
    169 	for (i = 0; i < nbuf; i++) {
    170 		bp = &buf[i];
    171 		memset((char *)bp, 0, sizeof(*bp));
    172 		bp->b_dev = NODEV;
    173 		bp->b_vnbufs.le_next = NOLIST;
    174 		LIST_INIT(&bp->b_dep);
    175 		bp->b_data = buffers + i * MAXBSIZE;
    176 		if (i < residual)
    177 			bp->b_bufsize = (base + 1) * PAGE_SIZE;
    178 		else
    179 			bp->b_bufsize = base * PAGE_SIZE;
    180 		bp->b_flags = B_INVAL;
    181 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
    182 		binsheadfree(bp, dp);
    183 		binshash(bp, &invalhash);
    184 	}
    185 }
    186 
    187 static __inline struct buf *
    188 bio_doread(vp, blkno, size, cred, async)
    189 	struct vnode *vp;
    190 	daddr_t blkno;
    191 	int size;
    192 	struct ucred *cred;
    193 	int async;
    194 {
    195 	struct buf *bp;
    196 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    197 
    198 	bp = getblk(vp, blkno, size, 0, 0);
    199 
    200 	/*
    201 	 * If buffer does not have data valid, start a read.
    202 	 * Note that if buffer is B_INVAL, getblk() won't return it.
    203 	 * Therefore, it's valid if it's I/O has completed or been delayed.
    204 	 */
    205 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
    206 		/* Start I/O for the buffer. */
    207 		SET(bp->b_flags, B_READ | async);
    208 		VOP_STRATEGY(bp);
    209 
    210 		/* Pay for the read. */
    211 		p->p_stats->p_ru.ru_inblock++;
    212 	} else if (async) {
    213 		brelse(bp);
    214 	}
    215 
    216 	return (bp);
    217 }
    218 
    219 /*
    220  * Read a disk block.
    221  * This algorithm described in Bach (p.54).
    222  */
    223 int
    224 bread(vp, blkno, size, cred, bpp)
    225 	struct vnode *vp;
    226 	daddr_t blkno;
    227 	int size;
    228 	struct ucred *cred;
    229 	struct buf **bpp;
    230 {
    231 	struct buf *bp;
    232 
    233 	/* Get buffer for block. */
    234 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    235 
    236 	/* Wait for the read to complete, and return result. */
    237 	return (biowait(bp));
    238 }
    239 
    240 /*
    241  * Read-ahead multiple disk blocks. The first is sync, the rest async.
    242  * Trivial modification to the breada algorithm presented in Bach (p.55).
    243  */
    244 int
    245 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
    246 	struct vnode *vp;
    247 	daddr_t blkno; int size;
    248 	daddr_t rablks[]; int rasizes[];
    249 	int nrablks;
    250 	struct ucred *cred;
    251 	struct buf **bpp;
    252 {
    253 	struct buf *bp;
    254 	int i;
    255 
    256 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    257 
    258 	/*
    259 	 * For each of the read-ahead blocks, start a read, if necessary.
    260 	 */
    261 	for (i = 0; i < nrablks; i++) {
    262 		/* If it's in the cache, just go on to next one. */
    263 		if (incore(vp, rablks[i]))
    264 			continue;
    265 
    266 		/* Get a buffer for the read-ahead block */
    267 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
    268 	}
    269 
    270 	/* Otherwise, we had to start a read for it; wait until it's valid. */
    271 	return (biowait(bp));
    272 }
    273 
    274 /*
    275  * Read with single-block read-ahead.  Defined in Bach (p.55), but
    276  * implemented as a call to breadn().
    277  * XXX for compatibility with old file systems.
    278  */
    279 int
    280 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
    281 	struct vnode *vp;
    282 	daddr_t blkno; int size;
    283 	daddr_t rablkno; int rabsize;
    284 	struct ucred *cred;
    285 	struct buf **bpp;
    286 {
    287 
    288 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
    289 }
    290 
    291 /*
    292  * Block write.  Described in Bach (p.56)
    293  */
    294 int
    295 bwrite(bp)
    296 	struct buf *bp;
    297 {
    298 	int rv, sync, wasdelayed, s;
    299 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    300 	struct vnode *vp;
    301 	struct mount *mp;
    302 
    303 	vp = bp->b_vp;
    304 	if (vp != NULL) {
    305 		if (vp->v_type == VBLK)
    306 			mp = vp->v_specmountpoint;
    307 		else
    308 			mp = vp->v_mount;
    309 	} else {
    310 		mp = NULL;
    311 	}
    312 
    313 	/*
    314 	 * Remember buffer type, to switch on it later.  If the write was
    315 	 * synchronous, but the file system was mounted with MNT_ASYNC,
    316 	 * convert it to a delayed write.
    317 	 * XXX note that this relies on delayed tape writes being converted
    318 	 * to async, not sync writes (which is safe, but ugly).
    319 	 */
    320 	sync = !ISSET(bp->b_flags, B_ASYNC);
    321 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
    322 		bdwrite(bp);
    323 		return (0);
    324 	}
    325 
    326 	/*
    327 	 * Collect statistics on synchronous and asynchronous writes.
    328 	 * Writes to block devices are charged to their associated
    329 	 * filesystem (if any).
    330 	 */
    331 	if (mp != NULL) {
    332 		if (sync)
    333 			mp->mnt_stat.f_syncwrites++;
    334 		else
    335 			mp->mnt_stat.f_asyncwrites++;
    336 	}
    337 
    338 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
    339 
    340 	s = splbio();
    341 
    342 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
    343 
    344 	/*
    345 	 * Pay for the I/O operation and make sure the buf is on the correct
    346 	 * vnode queue.
    347 	 */
    348 	if (wasdelayed)
    349 		reassignbuf(bp, bp->b_vp);
    350 	else
    351 		p->p_stats->p_ru.ru_oublock++;
    352 
    353 	/* Initiate disk write.  Make sure the appropriate party is charged. */
    354 	bp->b_vp->v_numoutput++;
    355 	splx(s);
    356 
    357 	VOP_STRATEGY(bp);
    358 
    359 	if (sync) {
    360 		/* If I/O was synchronous, wait for it to complete. */
    361 		rv = biowait(bp);
    362 
    363 		/* Release the buffer. */
    364 		brelse(bp);
    365 
    366 		return (rv);
    367 	} else {
    368 		return (0);
    369 	}
    370 }
    371 
    372 int
    373 vn_bwrite(v)
    374 	void *v;
    375 {
    376 	struct vop_bwrite_args *ap = v;
    377 
    378 	return (bwrite(ap->a_bp));
    379 }
    380 
    381 /*
    382  * Delayed write.
    383  *
    384  * The buffer is marked dirty, but is not queued for I/O.
    385  * This routine should be used when the buffer is expected
    386  * to be modified again soon, typically a small write that
    387  * partially fills a buffer.
    388  *
    389  * NB: magnetic tapes cannot be delayed; they must be
    390  * written in the order that the writes are requested.
    391  *
    392  * Described in Leffler, et al. (pp. 208-213).
    393  */
    394 void
    395 bdwrite(bp)
    396 	struct buf *bp;
    397 {
    398 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    399 	int s;
    400 
    401 	/* If this is a tape block, write the block now. */
    402 	/* XXX NOTE: the memory filesystem usurpes major device */
    403 	/* XXX       number 255, which is a bad idea.		*/
    404 	if (bp->b_dev != NODEV &&
    405 	    major(bp->b_dev) != 255 &&	/* XXX - MFS buffers! */
    406 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
    407 		bawrite(bp);
    408 		return;
    409 	}
    410 
    411 	/*
    412 	 * If the block hasn't been seen before:
    413 	 *	(1) Mark it as having been seen,
    414 	 *	(2) Charge for the write,
    415 	 *	(3) Make sure it's on its vnode's correct block list.
    416 	 */
    417 	s = splbio();
    418 
    419 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    420 		SET(bp->b_flags, B_DELWRI);
    421 		p->p_stats->p_ru.ru_oublock++;
    422 		reassignbuf(bp, bp->b_vp);
    423 	}
    424 
    425 	/* Otherwise, the "write" is done, so mark and release the buffer. */
    426 	CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
    427 	splx(s);
    428 
    429 	brelse(bp);
    430 }
    431 
    432 /*
    433  * Asynchronous block write; just an asynchronous bwrite().
    434  */
    435 void
    436 bawrite(bp)
    437 	struct buf *bp;
    438 {
    439 
    440 	SET(bp->b_flags, B_ASYNC);
    441 	VOP_BWRITE(bp);
    442 }
    443 
    444 /*
    445  * Ordered block write; asynchronous, but I/O will occur in order queued.
    446  */
    447 void
    448 bowrite(bp)
    449 	struct buf *bp;
    450 {
    451 
    452 	SET(bp->b_flags, B_ASYNC | B_ORDERED);
    453 	VOP_BWRITE(bp);
    454 }
    455 
    456 /*
    457  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
    458  */
    459 void
    460 bdirty(bp)
    461 	struct buf *bp;
    462 {
    463 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    464 	int s;
    465 
    466 	s = splbio();
    467 
    468 	CLR(bp->b_flags, B_AGE);
    469 
    470 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    471 		SET(bp->b_flags, B_DELWRI);
    472 		p->p_stats->p_ru.ru_oublock++;
    473 		reassignbuf(bp, bp->b_vp);
    474 	}
    475 
    476 	splx(s);
    477 }
    478 
    479 /*
    480  * Release a buffer on to the free lists.
    481  * Described in Bach (p. 46).
    482  */
    483 void
    484 brelse(bp)
    485 	struct buf *bp;
    486 {
    487 	struct bqueues *bufq;
    488 	int s;
    489 
    490 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    491 
    492 	/* Wake up any processes waiting for any buffer to become free. */
    493 	if (needbuffer) {
    494 		needbuffer = 0;
    495 		wakeup(&needbuffer);
    496 	}
    497 
    498 	/* Block disk interrupts. */
    499 	s = splbio();
    500 
    501 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
    502 	if (ISSET(bp->b_flags, B_WANTED)) {
    503 		CLR(bp->b_flags, B_WANTED|B_AGE);
    504 		wakeup(bp);
    505 	}
    506 
    507 	/*
    508 	 * Determine which queue the buffer should be on, then put it there.
    509 	 */
    510 
    511 	/* If it's locked, don't report an error; try again later. */
    512 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
    513 		CLR(bp->b_flags, B_ERROR);
    514 
    515 	/* If it's not cacheable, or an error, mark it invalid. */
    516 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
    517 		SET(bp->b_flags, B_INVAL);
    518 
    519 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    520 		/*
    521 		 * This is a delayed write buffer that was just flushed to
    522 		 * disk.  It is still on the LRU queue.  If it's become
    523 		 * invalid, then we need to move it to a different queue;
    524 		 * otherwise leave it in its current position.
    525 		 */
    526 		CLR(bp->b_flags, B_VFLUSH);
    527 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
    528 			goto already_queued;
    529 		else
    530 			bremfree(bp);
    531 	}
    532 
    533 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
    534 		/*
    535 		 * If it's invalid or empty, dissociate it from its vnode
    536 		 * and put on the head of the appropriate queue.
    537 		 */
    538 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    539 			(*bioops.io_deallocate)(bp);
    540 		CLR(bp->b_flags, B_DONE|B_DELWRI);
    541 		if (bp->b_vp) {
    542 			reassignbuf(bp, bp->b_vp);
    543 			brelvp(bp);
    544 		}
    545 		if (bp->b_bufsize <= 0)
    546 			/* no data */
    547 			bufq = &bufqueues[BQ_EMPTY];
    548 		else
    549 			/* invalid data */
    550 			bufq = &bufqueues[BQ_AGE];
    551 		binsheadfree(bp, bufq);
    552 	} else {
    553 		/*
    554 		 * It has valid data.  Put it on the end of the appropriate
    555 		 * queue, so that it'll stick around for as long as possible.
    556 		 * If buf is AGE, but has dependencies, must put it on last
    557 		 * bufqueue to be scanned, ie LRU. This protects against the
    558 		 * livelock where BQ_AGE only has buffers with dependencies,
    559 		 * and we thus never get to the dependent buffers in BQ_LRU.
    560 		 */
    561 		if (ISSET(bp->b_flags, B_LOCKED))
    562 			/* locked in core */
    563 			bufq = &bufqueues[BQ_LOCKED];
    564 		else if (!ISSET(bp->b_flags, B_AGE))
    565 			/* valid data */
    566 			bufq = &bufqueues[BQ_LRU];
    567 		else {
    568 			/* stale but valid data */
    569 			int has_deps;
    570 
    571 			if (LIST_FIRST(&bp->b_dep) != NULL &&
    572 			    bioops.io_countdeps)
    573 				has_deps = (*bioops.io_countdeps)(bp, 0);
    574 			else
    575 				has_deps = 0;
    576 			bufq = has_deps ? &bufqueues[BQ_LRU] :
    577 			    &bufqueues[BQ_AGE];
    578 		}
    579 		binstailfree(bp, bufq);
    580 	}
    581 
    582 already_queued:
    583 	/* Unlock the buffer. */
    584 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
    585 	SET(bp->b_flags, B_CACHE);
    586 
    587 	/* Allow disk interrupts. */
    588 	splx(s);
    589 }
    590 
    591 /*
    592  * Determine if a block is in the cache.
    593  * Just look on what would be its hash chain.  If it's there, return
    594  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
    595  * we normally don't return the buffer, unless the caller explicitly
    596  * wants us to.
    597  */
    598 struct buf *
    599 incore(vp, blkno)
    600 	struct vnode *vp;
    601 	daddr_t blkno;
    602 {
    603 	struct buf *bp;
    604 
    605 	bp = BUFHASH(vp, blkno)->lh_first;
    606 
    607 	/* Search hash chain */
    608 	for (; bp != NULL; bp = bp->b_hash.le_next) {
    609 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
    610 		    !ISSET(bp->b_flags, B_INVAL))
    611 		return (bp);
    612 	}
    613 
    614 	return (NULL);
    615 }
    616 
    617 /*
    618  * Get a block of requested size that is associated with
    619  * a given vnode and block offset. If it is found in the
    620  * block cache, mark it as having been found, make it busy
    621  * and return it. Otherwise, return an empty block of the
    622  * correct size. It is up to the caller to insure that the
    623  * cached blocks be of the correct size.
    624  */
    625 struct buf *
    626 getblk(vp, blkno, size, slpflag, slptimeo)
    627 	struct vnode *vp;
    628 	daddr_t blkno;
    629 	int size, slpflag, slptimeo;
    630 {
    631 	struct buf *bp;
    632 	int s, err;
    633 
    634 start:
    635 	bp = incore(vp, blkno);
    636 	if (bp != NULL) {
    637 		s = splbio();
    638 		if (ISSET(bp->b_flags, B_BUSY)) {
    639 			if (curproc == uvm.pagedaemon_proc) {
    640 				splx(s);
    641 				return NULL;
    642 			}
    643 			SET(bp->b_flags, B_WANTED);
    644 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
    645 				     slptimeo);
    646 			splx(s);
    647 			if (err)
    648 				return (NULL);
    649 			goto start;
    650 		}
    651 #ifdef DIAGNOSTIC
    652 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
    653 		    bp->b_bcount < size && vp->v_type != VBLK)
    654 			panic("getblk: block size invariant failed");
    655 #endif
    656 		SET(bp->b_flags, B_BUSY);
    657 		bremfree(bp);
    658 		splx(s);
    659 	} else {
    660 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
    661 			goto start;
    662 
    663 		binshash(bp, BUFHASH(vp, blkno));
    664 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
    665 		s = splbio();
    666 		bgetvp(vp, bp);
    667 		splx(s);
    668 	}
    669 	allocbuf(bp, size);
    670 	return (bp);
    671 }
    672 
    673 /*
    674  * Get an empty, disassociated buffer of given size.
    675  */
    676 struct buf *
    677 geteblk(size)
    678 	int size;
    679 {
    680 	struct buf *bp;
    681 
    682 	while ((bp = getnewbuf(0, 0)) == 0)
    683 		;
    684 	SET(bp->b_flags, B_INVAL);
    685 	binshash(bp, &invalhash);
    686 	allocbuf(bp, size);
    687 	return (bp);
    688 }
    689 
    690 /*
    691  * Expand or contract the actual memory allocated to a buffer.
    692  *
    693  * If the buffer shrinks, data is lost, so it's up to the
    694  * caller to have written it out *first*; this routine will not
    695  * start a write.  If the buffer grows, it's the callers
    696  * responsibility to fill out the buffer's additional contents.
    697  */
    698 void
    699 allocbuf(bp, size)
    700 	struct buf *bp;
    701 	int size;
    702 {
    703 	struct buf *nbp;
    704 	vsize_t desired_size;
    705 	int s;
    706 
    707 	desired_size = round_page((vsize_t)size);
    708 	if (desired_size > MAXBSIZE)
    709 		panic("allocbuf: buffer larger than MAXBSIZE requested");
    710 
    711 	if (bp->b_bufsize == desired_size)
    712 		goto out;
    713 
    714 	/*
    715 	 * If the buffer is smaller than the desired size, we need to snarf
    716 	 * it from other buffers.  Get buffers (via getnewbuf()), and
    717 	 * steal their pages.
    718 	 */
    719 	while (bp->b_bufsize < desired_size) {
    720 		int amt;
    721 
    722 		/* find a buffer */
    723 		while ((nbp = getnewbuf(0, 0)) == NULL)
    724 			;
    725 
    726 		SET(nbp->b_flags, B_INVAL);
    727 		binshash(nbp, &invalhash);
    728 
    729 		/* and steal its pages, up to the amount we need */
    730 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
    731 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
    732 			 bp->b_data + bp->b_bufsize, amt);
    733 		bp->b_bufsize += amt;
    734 		nbp->b_bufsize -= amt;
    735 
    736 		/* reduce transfer count if we stole some data */
    737 		if (nbp->b_bcount > nbp->b_bufsize)
    738 			nbp->b_bcount = nbp->b_bufsize;
    739 
    740 #ifdef DIAGNOSTIC
    741 		if (nbp->b_bufsize < 0)
    742 			panic("allocbuf: negative bufsize");
    743 #endif
    744 
    745 		brelse(nbp);
    746 	}
    747 
    748 	/*
    749 	 * If we want a buffer smaller than the current size,
    750 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
    751 	 * move a page onto it, and put it on front of the AGE queue.
    752 	 * If there are no free buffer headers, leave the buffer alone.
    753 	 */
    754 	if (bp->b_bufsize > desired_size) {
    755 		s = splbio();
    756 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
    757 			/* No free buffer head */
    758 			splx(s);
    759 			goto out;
    760 		}
    761 		bremfree(nbp);
    762 		SET(nbp->b_flags, B_BUSY);
    763 		splx(s);
    764 
    765 		/* move the page to it and note this change */
    766 		pagemove(bp->b_data + desired_size,
    767 		    nbp->b_data, bp->b_bufsize - desired_size);
    768 		nbp->b_bufsize = bp->b_bufsize - desired_size;
    769 		bp->b_bufsize = desired_size;
    770 		nbp->b_bcount = 0;
    771 		SET(nbp->b_flags, B_INVAL);
    772 
    773 		/* release the newly-filled buffer and leave */
    774 		brelse(nbp);
    775 	}
    776 
    777 out:
    778 	bp->b_bcount = size;
    779 }
    780 
    781 /*
    782  * Find a buffer which is available for use.
    783  * Select something from a free list.
    784  * Preference is to AGE list, then LRU list.
    785  */
    786 struct buf *
    787 getnewbuf(slpflag, slptimeo)
    788 	int slpflag, slptimeo;
    789 {
    790 	struct buf *bp;
    791 	int s;
    792 
    793 start:
    794 	s = splbio();
    795 	if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
    796 	    (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
    797 		bremfree(bp);
    798 	} else {
    799 		/* wait for a free buffer of any kind */
    800 		needbuffer = 1;
    801 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
    802 		splx(s);
    803 		return (NULL);
    804 	}
    805 
    806 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    807 		/*
    808 		 * This is a delayed write buffer being flushed to disk.  Make
    809 		 * sure it gets aged out of the queue when it's finished, and
    810 		 * leave it off the LRU queue.
    811 		 */
    812 		CLR(bp->b_flags, B_VFLUSH);
    813 		SET(bp->b_flags, B_AGE);
    814 		splx(s);
    815 		goto start;
    816 	}
    817 
    818 	/* Buffer is no longer on free lists. */
    819 	SET(bp->b_flags, B_BUSY);
    820 
    821 	/*
    822 	 * If buffer was a delayed write, start it and return NULL
    823 	 * (since we might sleep while starting the write).
    824 	 */
    825 	if (ISSET(bp->b_flags, B_DELWRI)) {
    826 		splx(s);
    827 		/*
    828 		 * This buffer has gone through the LRU, so make sure it gets
    829 		 * reused ASAP.
    830 		 */
    831 		SET(bp->b_flags, B_AGE);
    832 		bawrite(bp);
    833 		return (NULL);
    834 	}
    835 
    836 	/* disassociate us from our vnode, if we had one... */
    837 	if (bp->b_vp)
    838 		brelvp(bp);
    839 	splx(s);
    840 
    841 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    842 		(*bioops.io_deallocate)(bp);
    843 
    844 	/* clear out various other fields */
    845 	bp->b_flags = B_BUSY;
    846 	bp->b_dev = NODEV;
    847 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
    848 	bp->b_iodone = 0;
    849 	bp->b_error = 0;
    850 	bp->b_resid = 0;
    851 	bp->b_bcount = 0;
    852 
    853 	bremhash(bp);
    854 	return (bp);
    855 }
    856 
    857 /*
    858  * Wait for operations on the buffer to complete.
    859  * When they do, extract and return the I/O's error value.
    860  */
    861 int
    862 biowait(bp)
    863 	struct buf *bp;
    864 {
    865 	int s;
    866 
    867 	s = splbio();
    868 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
    869 		tsleep(bp, PRIBIO + 1, "biowait", 0);
    870 	splx(s);
    871 
    872 	/* check for interruption of I/O (e.g. via NFS), then errors. */
    873 	if (ISSET(bp->b_flags, B_EINTR)) {
    874 		CLR(bp->b_flags, B_EINTR);
    875 		return (EINTR);
    876 	} else if (ISSET(bp->b_flags, B_ERROR))
    877 		return (bp->b_error ? bp->b_error : EIO);
    878 	else
    879 		return (0);
    880 }
    881 
    882 /*
    883  * Mark I/O complete on a buffer.
    884  *
    885  * If a callback has been requested, e.g. the pageout
    886  * daemon, do so. Otherwise, awaken waiting processes.
    887  *
    888  * [ Leffler, et al., says on p.247:
    889  *	"This routine wakes up the blocked process, frees the buffer
    890  *	for an asynchronous write, or, for a request by the pagedaemon
    891  *	process, invokes a procedure specified in the buffer structure" ]
    892  *
    893  * In real life, the pagedaemon (or other system processes) wants
    894  * to do async stuff to, and doesn't want the buffer brelse()'d.
    895  * (for swap pager, that puts swap buffers on the free lists (!!!),
    896  * for the vn device, that puts malloc'd buffers on the free lists!)
    897  */
    898 void
    899 biodone(bp)
    900 	struct buf *bp;
    901 {
    902 	int s = splbio();
    903 
    904 	if (ISSET(bp->b_flags, B_DONE))
    905 		panic("biodone already");
    906 	SET(bp->b_flags, B_DONE);		/* note that it's done */
    907 
    908 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
    909 		(*bioops.io_complete)(bp);
    910 
    911 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
    912 		vwakeup(bp);
    913 
    914 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
    915 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
    916 		(*bp->b_iodone)(bp);
    917 	} else {
    918 		if (ISSET(bp->b_flags, B_ASYNC))	/* if async, release */
    919 			brelse(bp);
    920 		else {				/* or just wakeup the buffer */
    921 			CLR(bp->b_flags, B_WANTED);
    922 			wakeup(bp);
    923 		}
    924 	}
    925 
    926 	splx(s);
    927 }
    928 
    929 /*
    930  * Return a count of buffers on the "locked" queue.
    931  */
    932 int
    933 count_lock_queue()
    934 {
    935 	struct buf *bp;
    936 	int n = 0;
    937 
    938 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
    939 	    bp = bp->b_freelist.tqe_next)
    940 		n++;
    941 	return (n);
    942 }
    943 
    944 #ifdef DEBUG
    945 /*
    946  * Print out statistics on the current allocation of the buffer pool.
    947  * Can be enabled to print out on every ``sync'' by setting "syncprt"
    948  * in vfs_syscalls.c using sysctl.
    949  */
    950 void
    951 vfs_bufstats()
    952 {
    953 	int s, i, j, count;
    954 	struct buf *bp;
    955 	struct bqueues *dp;
    956 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
    957 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
    958 
    959 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
    960 		count = 0;
    961 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
    962 			counts[j] = 0;
    963 		s = splbio();
    964 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
    965 			counts[bp->b_bufsize/PAGE_SIZE]++;
    966 			count++;
    967 		}
    968 		splx(s);
    969 		printf("%s: total-%d", bname[i], count);
    970 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
    971 			if (counts[j] != 0)
    972 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
    973 		printf("\n");
    974 	}
    975 }
    976 #endif /* DEBUG */
    977