Home | History | Annotate | Line # | Download | only in kern
vfs_bio.c revision 1.76
      1 /*	$NetBSD: vfs_bio.c,v 1.76 2001/04/01 16:16:56 chs Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1994 Christopher G. Demetriou
      5  * Copyright (c) 1982, 1986, 1989, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  * (c) UNIX System Laboratories, Inc.
      8  * All or some portions of this file are derived from material licensed
      9  * to the University of California by American Telephone and Telegraph
     10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     11  * the permission of UNIX System Laboratories, Inc.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the University of
     24  *	California, Berkeley and its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     42  */
     43 
     44 /*
     45  * Some references:
     46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
     47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
     48  *		UNIX Operating System (Addison Welley, 1989)
     49  */
     50 
     51 #include <sys/param.h>
     52 #include <sys/systm.h>
     53 #include <sys/proc.h>
     54 #include <sys/buf.h>
     55 #include <sys/vnode.h>
     56 #include <sys/mount.h>
     57 #include <sys/malloc.h>
     58 #include <sys/resourcevar.h>
     59 #include <sys/conf.h>
     60 
     61 #include <uvm/uvm.h>
     62 
     63 #include <miscfs/specfs/specdev.h>
     64 
     65 /* Macros to clear/set/test flags. */
     66 #define	SET(t, f)	(t) |= (f)
     67 #define	CLR(t, f)	(t) &= ~(f)
     68 #define	ISSET(t, f)	((t) & (f))
     69 
     70 /*
     71  * Definitions for the buffer hash lists.
     72  */
     73 #define	BUFHASH(dvp, lbn)	\
     74 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
     75 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
     76 u_long	bufhash;
     77 struct bio_ops bioops;	/* I/O operation notification */
     78 
     79 /*
     80  * Insq/Remq for the buffer hash lists.
     81  */
     82 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
     83 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
     84 
     85 /*
     86  * Definitions for the buffer free lists.
     87  */
     88 #define	BQUEUES		4		/* number of free buffer queues */
     89 
     90 #define	BQ_LOCKED	0		/* super-blocks &c */
     91 #define	BQ_LRU		1		/* lru, useful buffers */
     92 #define	BQ_AGE		2		/* rubbish */
     93 #define	BQ_EMPTY	3		/* buffer headers with no memory */
     94 
     95 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
     96 int needbuffer;
     97 
     98 /*
     99  * Buffer pool for I/O buffers.
    100  */
    101 struct pool bufpool;
    102 
    103 /*
    104  * Insq/Remq for the buffer free lists.
    105  */
    106 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
    107 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
    108 
    109 static __inline struct buf *bio_doread __P((struct vnode *, daddr_t, int,
    110 					    struct ucred *, int));
    111 int count_lock_queue __P((void));
    112 
    113 void
    114 bremfree(bp)
    115 	struct buf *bp;
    116 {
    117 	int s = splbio();
    118 
    119 	struct bqueues *dp = NULL;
    120 
    121 	/*
    122 	 * We only calculate the head of the freelist when removing
    123 	 * the last element of the list as that is the only time that
    124 	 * it is needed (e.g. to reset the tail pointer).
    125 	 *
    126 	 * NB: This makes an assumption about how tailq's are implemented.
    127 	 */
    128 	if (bp->b_freelist.tqe_next == NULL) {
    129 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    130 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    131 				break;
    132 		if (dp == &bufqueues[BQUEUES])
    133 			panic("bremfree: lost tail");
    134 	}
    135 	TAILQ_REMOVE(dp, bp, b_freelist);
    136 	splx(s);
    137 }
    138 
    139 /*
    140  * Initialize buffers and hash links for buffers.
    141  */
    142 void
    143 bufinit()
    144 {
    145 	struct buf *bp;
    146 	struct bqueues *dp;
    147 	int i;
    148 	int base, residual;
    149 
    150 	/*
    151 	 * Initialize the buffer pool.  This pool is used for buffers
    152 	 * which are strictly I/O control blocks, not buffer cache
    153 	 * buffers.
    154 	 */
    155 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", 0,
    156 	    NULL, NULL, M_DEVBUF);
    157 
    158 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    159 		TAILQ_INIT(dp);
    160 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
    161 	base = bufpages / nbuf;
    162 	residual = bufpages % nbuf;
    163 	for (i = 0; i < nbuf; i++) {
    164 		bp = &buf[i];
    165 		memset((char *)bp, 0, sizeof(*bp));
    166 		bp->b_dev = NODEV;
    167 		bp->b_vnbufs.le_next = NOLIST;
    168 		LIST_INIT(&bp->b_dep);
    169 		bp->b_data = buffers + i * MAXBSIZE;
    170 		if (i < residual)
    171 			bp->b_bufsize = (base + 1) * PAGE_SIZE;
    172 		else
    173 			bp->b_bufsize = base * PAGE_SIZE;
    174 		bp->b_flags = B_INVAL;
    175 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
    176 		binsheadfree(bp, dp);
    177 		binshash(bp, &invalhash);
    178 	}
    179 }
    180 
    181 static __inline struct buf *
    182 bio_doread(vp, blkno, size, cred, async)
    183 	struct vnode *vp;
    184 	daddr_t blkno;
    185 	int size;
    186 	struct ucred *cred;
    187 	int async;
    188 {
    189 	struct buf *bp;
    190 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    191 
    192 	bp = getblk(vp, blkno, size, 0, 0);
    193 
    194 	/*
    195 	 * If buffer does not have data valid, start a read.
    196 	 * Note that if buffer is B_INVAL, getblk() won't return it.
    197 	 * Therefore, it's valid if it's I/O has completed or been delayed.
    198 	 */
    199 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
    200 		/* Start I/O for the buffer. */
    201 		SET(bp->b_flags, B_READ | async);
    202 		VOP_STRATEGY(bp);
    203 
    204 		/* Pay for the read. */
    205 		p->p_stats->p_ru.ru_inblock++;
    206 	} else if (async) {
    207 		brelse(bp);
    208 	}
    209 
    210 	return (bp);
    211 }
    212 
    213 /*
    214  * Read a disk block.
    215  * This algorithm described in Bach (p.54).
    216  */
    217 int
    218 bread(vp, blkno, size, cred, bpp)
    219 	struct vnode *vp;
    220 	daddr_t blkno;
    221 	int size;
    222 	struct ucred *cred;
    223 	struct buf **bpp;
    224 {
    225 	struct buf *bp;
    226 
    227 	/* Get buffer for block. */
    228 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    229 
    230 	/*
    231 	 * Delayed write buffers are found in the cache and have
    232 	 * valid contents. Also, B_ERROR is not set, otherwise
    233 	 * getblk() would not have returned them.
    234 	 */
    235 	if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
    236 		return (0);
    237 
    238 	/*
    239 	 * Otherwise, we had to start a read for it; wait until
    240 	 * it's valid and return the result.
    241 	 */
    242 	return (biowait(bp));
    243 }
    244 
    245 /*
    246  * Read-ahead multiple disk blocks. The first is sync, the rest async.
    247  * Trivial modification to the breada algorithm presented in Bach (p.55).
    248  */
    249 int
    250 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
    251 	struct vnode *vp;
    252 	daddr_t blkno; int size;
    253 	daddr_t rablks[]; int rasizes[];
    254 	int nrablks;
    255 	struct ucred *cred;
    256 	struct buf **bpp;
    257 {
    258 	struct buf *bp;
    259 	int i;
    260 
    261 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    262 
    263 	/*
    264 	 * For each of the read-ahead blocks, start a read, if necessary.
    265 	 */
    266 	for (i = 0; i < nrablks; i++) {
    267 		/* If it's in the cache, just go on to next one. */
    268 		if (incore(vp, rablks[i]))
    269 			continue;
    270 
    271 		/* Get a buffer for the read-ahead block */
    272 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
    273 	}
    274 
    275 	/*
    276 	 * Delayed write buffers are found in the cache and have
    277 	 * valid contents. Also, B_ERROR is not set, otherwise
    278 	 * getblk() would not have returned them.
    279 	 */
    280 	if (ISSET(bp->b_flags, B_DONE|B_DELWRI))
    281 		return (0);
    282 
    283 	/*
    284 	 * Otherwise, we had to start a read for it; wait until
    285 	 * it's valid and return the result.
    286 	 */
    287 	return (biowait(bp));
    288 }
    289 
    290 /*
    291  * Read with single-block read-ahead.  Defined in Bach (p.55), but
    292  * implemented as a call to breadn().
    293  * XXX for compatibility with old file systems.
    294  */
    295 int
    296 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
    297 	struct vnode *vp;
    298 	daddr_t blkno; int size;
    299 	daddr_t rablkno; int rabsize;
    300 	struct ucred *cred;
    301 	struct buf **bpp;
    302 {
    303 
    304 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
    305 }
    306 
    307 /*
    308  * Block write.  Described in Bach (p.56)
    309  */
    310 int
    311 bwrite(bp)
    312 	struct buf *bp;
    313 {
    314 	int rv, sync, wasdelayed, s;
    315 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    316 	struct vnode *vp;
    317 	struct mount *mp;
    318 
    319 	vp = bp->b_vp;
    320 	if (vp != NULL) {
    321 		if (vp->v_type == VBLK)
    322 			mp = vp->v_specmountpoint;
    323 		else
    324 			mp = vp->v_mount;
    325 	} else {
    326 		mp = NULL;
    327 	}
    328 
    329 	/*
    330 	 * Remember buffer type, to switch on it later.  If the write was
    331 	 * synchronous, but the file system was mounted with MNT_ASYNC,
    332 	 * convert it to a delayed write.
    333 	 * XXX note that this relies on delayed tape writes being converted
    334 	 * to async, not sync writes (which is safe, but ugly).
    335 	 */
    336 	sync = !ISSET(bp->b_flags, B_ASYNC);
    337 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
    338 		bdwrite(bp);
    339 		return (0);
    340 	}
    341 
    342 	/*
    343 	 * Collect statistics on synchronous and asynchronous writes.
    344 	 * Writes to block devices are charged to their associated
    345 	 * filesystem (if any).
    346 	 */
    347 	if (mp != NULL) {
    348 		if (sync)
    349 			mp->mnt_stat.f_syncwrites++;
    350 		else
    351 			mp->mnt_stat.f_asyncwrites++;
    352 	}
    353 
    354 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
    355 
    356 	s = splbio();
    357 
    358 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
    359 
    360 	/*
    361 	 * Pay for the I/O operation and make sure the buf is on the correct
    362 	 * vnode queue.
    363 	 */
    364 	if (wasdelayed)
    365 		reassignbuf(bp, bp->b_vp);
    366 	else
    367 		p->p_stats->p_ru.ru_oublock++;
    368 
    369 	/* Initiate disk write.  Make sure the appropriate party is charged. */
    370 	bp->b_vp->v_numoutput++;
    371 	splx(s);
    372 
    373 	VOP_STRATEGY(bp);
    374 
    375 	if (sync) {
    376 		/* If I/O was synchronous, wait for it to complete. */
    377 		rv = biowait(bp);
    378 
    379 		/* Release the buffer. */
    380 		brelse(bp);
    381 
    382 		return (rv);
    383 	} else {
    384 		return (0);
    385 	}
    386 }
    387 
    388 int
    389 vn_bwrite(v)
    390 	void *v;
    391 {
    392 	struct vop_bwrite_args *ap = v;
    393 
    394 	return (bwrite(ap->a_bp));
    395 }
    396 
    397 /*
    398  * Delayed write.
    399  *
    400  * The buffer is marked dirty, but is not queued for I/O.
    401  * This routine should be used when the buffer is expected
    402  * to be modified again soon, typically a small write that
    403  * partially fills a buffer.
    404  *
    405  * NB: magnetic tapes cannot be delayed; they must be
    406  * written in the order that the writes are requested.
    407  *
    408  * Described in Leffler, et al. (pp. 208-213).
    409  */
    410 void
    411 bdwrite(bp)
    412 	struct buf *bp;
    413 {
    414 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    415 	int s;
    416 
    417 	/* If this is a tape block, write the block now. */
    418 	/* XXX NOTE: the memory filesystem usurpes major device */
    419 	/* XXX       number 255, which is a bad idea.		*/
    420 	if (bp->b_dev != NODEV &&
    421 	    major(bp->b_dev) != 255 &&	/* XXX - MFS buffers! */
    422 	    bdevsw[major(bp->b_dev)].d_type == D_TAPE) {
    423 		bawrite(bp);
    424 		return;
    425 	}
    426 
    427 	/*
    428 	 * If the block hasn't been seen before:
    429 	 *	(1) Mark it as having been seen,
    430 	 *	(2) Charge for the write,
    431 	 *	(3) Make sure it's on its vnode's correct block list.
    432 	 */
    433 	s = splbio();
    434 
    435 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    436 		SET(bp->b_flags, B_DELWRI);
    437 		p->p_stats->p_ru.ru_oublock++;
    438 		reassignbuf(bp, bp->b_vp);
    439 	}
    440 
    441 	/* Otherwise, the "write" is done, so mark and release the buffer. */
    442 	CLR(bp->b_flags, B_NEEDCOMMIT|B_DONE);
    443 	splx(s);
    444 
    445 	brelse(bp);
    446 }
    447 
    448 /*
    449  * Asynchronous block write; just an asynchronous bwrite().
    450  */
    451 void
    452 bawrite(bp)
    453 	struct buf *bp;
    454 {
    455 
    456 	SET(bp->b_flags, B_ASYNC);
    457 	VOP_BWRITE(bp);
    458 }
    459 
    460 /*
    461  * Ordered block write; asynchronous, but I/O will occur in order queued.
    462  */
    463 void
    464 bowrite(bp)
    465 	struct buf *bp;
    466 {
    467 
    468 	SET(bp->b_flags, B_ASYNC | B_ORDERED);
    469 	VOP_BWRITE(bp);
    470 }
    471 
    472 /*
    473  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
    474  */
    475 void
    476 bdirty(bp)
    477 	struct buf *bp;
    478 {
    479 	struct proc *p = (curproc != NULL ? curproc : &proc0);	/* XXX */
    480 	int s;
    481 
    482 	s = splbio();
    483 
    484 	CLR(bp->b_flags, B_AGE);
    485 
    486 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    487 		SET(bp->b_flags, B_DELWRI);
    488 		p->p_stats->p_ru.ru_oublock++;
    489 		reassignbuf(bp, bp->b_vp);
    490 	}
    491 
    492 	splx(s);
    493 }
    494 
    495 /*
    496  * Release a buffer on to the free lists.
    497  * Described in Bach (p. 46).
    498  */
    499 void
    500 brelse(bp)
    501 	struct buf *bp;
    502 {
    503 	struct bqueues *bufq;
    504 	int s;
    505 
    506 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    507 
    508 	/* Wake up any processes waiting for any buffer to become free. */
    509 	if (needbuffer) {
    510 		needbuffer = 0;
    511 		wakeup(&needbuffer);
    512 	}
    513 
    514 	/* Block disk interrupts. */
    515 	s = splbio();
    516 
    517 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
    518 	if (ISSET(bp->b_flags, B_WANTED)) {
    519 		CLR(bp->b_flags, B_WANTED|B_AGE);
    520 		wakeup(bp);
    521 	}
    522 
    523 	/*
    524 	 * Determine which queue the buffer should be on, then put it there.
    525 	 */
    526 
    527 	/* If it's locked, don't report an error; try again later. */
    528 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
    529 		CLR(bp->b_flags, B_ERROR);
    530 
    531 	/* If it's not cacheable, or an error, mark it invalid. */
    532 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
    533 		SET(bp->b_flags, B_INVAL);
    534 
    535 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    536 		/*
    537 		 * This is a delayed write buffer that was just flushed to
    538 		 * disk.  It is still on the LRU queue.  If it's become
    539 		 * invalid, then we need to move it to a different queue;
    540 		 * otherwise leave it in its current position.
    541 		 */
    542 		CLR(bp->b_flags, B_VFLUSH);
    543 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
    544 			goto already_queued;
    545 		else
    546 			bremfree(bp);
    547 	}
    548 
    549 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
    550 		/*
    551 		 * If it's invalid or empty, dissociate it from its vnode
    552 		 * and put on the head of the appropriate queue.
    553 		 */
    554 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    555 			(*bioops.io_deallocate)(bp);
    556 		CLR(bp->b_flags, B_DONE|B_DELWRI);
    557 		if (bp->b_vp) {
    558 			reassignbuf(bp, bp->b_vp);
    559 			brelvp(bp);
    560 		}
    561 		if (bp->b_bufsize <= 0)
    562 			/* no data */
    563 			bufq = &bufqueues[BQ_EMPTY];
    564 		else
    565 			/* invalid data */
    566 			bufq = &bufqueues[BQ_AGE];
    567 		binsheadfree(bp, bufq);
    568 	} else {
    569 		/*
    570 		 * It has valid data.  Put it on the end of the appropriate
    571 		 * queue, so that it'll stick around for as long as possible.
    572 		 * If buf is AGE, but has dependencies, must put it on last
    573 		 * bufqueue to be scanned, ie LRU. This protects against the
    574 		 * livelock where BQ_AGE only has buffers with dependencies,
    575 		 * and we thus never get to the dependent buffers in BQ_LRU.
    576 		 */
    577 		if (ISSET(bp->b_flags, B_LOCKED))
    578 			/* locked in core */
    579 			bufq = &bufqueues[BQ_LOCKED];
    580 		else if (!ISSET(bp->b_flags, B_AGE))
    581 			/* valid data */
    582 			bufq = &bufqueues[BQ_LRU];
    583 		else {
    584 			/* stale but valid data */
    585 			int has_deps;
    586 
    587 			if (LIST_FIRST(&bp->b_dep) != NULL &&
    588 			    bioops.io_countdeps)
    589 				has_deps = (*bioops.io_countdeps)(bp, 0);
    590 			else
    591 				has_deps = 0;
    592 			bufq = has_deps ? &bufqueues[BQ_LRU] :
    593 			    &bufqueues[BQ_AGE];
    594 		}
    595 		binstailfree(bp, bufq);
    596 	}
    597 
    598 already_queued:
    599 	/* Unlock the buffer. */
    600 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE|B_ORDERED);
    601 	SET(bp->b_flags, B_CACHE);
    602 
    603 	/* Allow disk interrupts. */
    604 	splx(s);
    605 }
    606 
    607 /*
    608  * Determine if a block is in the cache.
    609  * Just look on what would be its hash chain.  If it's there, return
    610  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
    611  * we normally don't return the buffer, unless the caller explicitly
    612  * wants us to.
    613  */
    614 struct buf *
    615 incore(vp, blkno)
    616 	struct vnode *vp;
    617 	daddr_t blkno;
    618 {
    619 	struct buf *bp;
    620 
    621 	bp = BUFHASH(vp, blkno)->lh_first;
    622 
    623 	/* Search hash chain */
    624 	for (; bp != NULL; bp = bp->b_hash.le_next) {
    625 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
    626 		    !ISSET(bp->b_flags, B_INVAL))
    627 		return (bp);
    628 	}
    629 
    630 	return (NULL);
    631 }
    632 
    633 /*
    634  * Get a block of requested size that is associated with
    635  * a given vnode and block offset. If it is found in the
    636  * block cache, mark it as having been found, make it busy
    637  * and return it. Otherwise, return an empty block of the
    638  * correct size. It is up to the caller to insure that the
    639  * cached blocks be of the correct size.
    640  */
    641 struct buf *
    642 getblk(vp, blkno, size, slpflag, slptimeo)
    643 	struct vnode *vp;
    644 	daddr_t blkno;
    645 	int size, slpflag, slptimeo;
    646 {
    647 	struct buf *bp;
    648 	int s, err;
    649 
    650 start:
    651 	bp = incore(vp, blkno);
    652 	if (bp != NULL) {
    653 		s = splbio();
    654 		if (ISSET(bp->b_flags, B_BUSY)) {
    655 			if (curproc == uvm.pagedaemon_proc) {
    656 				splx(s);
    657 				return NULL;
    658 			}
    659 			SET(bp->b_flags, B_WANTED);
    660 			err = tsleep(bp, slpflag | (PRIBIO + 1), "getblk",
    661 				     slptimeo);
    662 			splx(s);
    663 			if (err)
    664 				return (NULL);
    665 			goto start;
    666 		}
    667 #ifdef DIAGNOSTIC
    668 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) && bp->b_bcount < size)
    669 			panic("getblk: block size invariant failed");
    670 #endif
    671 		SET(bp->b_flags, B_BUSY);
    672 		bremfree(bp);
    673 		splx(s);
    674 	} else {
    675 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL)
    676 			goto start;
    677 
    678 		binshash(bp, BUFHASH(vp, blkno));
    679 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
    680 		s = splbio();
    681 		bgetvp(vp, bp);
    682 		splx(s);
    683 	}
    684 	allocbuf(bp, size);
    685 	return (bp);
    686 }
    687 
    688 /*
    689  * Get an empty, disassociated buffer of given size.
    690  */
    691 struct buf *
    692 geteblk(size)
    693 	int size;
    694 {
    695 	struct buf *bp;
    696 
    697 	while ((bp = getnewbuf(0, 0)) == 0)
    698 		;
    699 	SET(bp->b_flags, B_INVAL);
    700 	binshash(bp, &invalhash);
    701 	allocbuf(bp, size);
    702 	return (bp);
    703 }
    704 
    705 /*
    706  * Expand or contract the actual memory allocated to a buffer.
    707  *
    708  * If the buffer shrinks, data is lost, so it's up to the
    709  * caller to have written it out *first*; this routine will not
    710  * start a write.  If the buffer grows, it's the callers
    711  * responsibility to fill out the buffer's additional contents.
    712  */
    713 void
    714 allocbuf(bp, size)
    715 	struct buf *bp;
    716 	int size;
    717 {
    718 	struct buf *nbp;
    719 	vsize_t desired_size;
    720 	int s;
    721 
    722 	desired_size = round_page((vsize_t)size);
    723 	if (desired_size > MAXBSIZE)
    724 		panic("allocbuf: buffer larger than MAXBSIZE requested");
    725 
    726 	if (bp->b_bufsize == desired_size)
    727 		goto out;
    728 
    729 	/*
    730 	 * If the buffer is smaller than the desired size, we need to snarf
    731 	 * it from other buffers.  Get buffers (via getnewbuf()), and
    732 	 * steal their pages.
    733 	 */
    734 	while (bp->b_bufsize < desired_size) {
    735 		int amt;
    736 
    737 		/* find a buffer */
    738 		while ((nbp = getnewbuf(0, 0)) == NULL)
    739 			;
    740 
    741 		SET(nbp->b_flags, B_INVAL);
    742 		binshash(nbp, &invalhash);
    743 
    744 		/* and steal its pages, up to the amount we need */
    745 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
    746 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
    747 			 bp->b_data + bp->b_bufsize, amt);
    748 		bp->b_bufsize += amt;
    749 		nbp->b_bufsize -= amt;
    750 
    751 		/* reduce transfer count if we stole some data */
    752 		if (nbp->b_bcount > nbp->b_bufsize)
    753 			nbp->b_bcount = nbp->b_bufsize;
    754 
    755 #ifdef DIAGNOSTIC
    756 		if (nbp->b_bufsize < 0)
    757 			panic("allocbuf: negative bufsize");
    758 #endif
    759 
    760 		brelse(nbp);
    761 	}
    762 
    763 	/*
    764 	 * If we want a buffer smaller than the current size,
    765 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
    766 	 * move a page onto it, and put it on front of the AGE queue.
    767 	 * If there are no free buffer headers, leave the buffer alone.
    768 	 */
    769 	if (bp->b_bufsize > desired_size) {
    770 		s = splbio();
    771 		if ((nbp = bufqueues[BQ_EMPTY].tqh_first) == NULL) {
    772 			/* No free buffer head */
    773 			splx(s);
    774 			goto out;
    775 		}
    776 		bremfree(nbp);
    777 		SET(nbp->b_flags, B_BUSY);
    778 		splx(s);
    779 
    780 		/* move the page to it and note this change */
    781 		pagemove(bp->b_data + desired_size,
    782 		    nbp->b_data, bp->b_bufsize - desired_size);
    783 		nbp->b_bufsize = bp->b_bufsize - desired_size;
    784 		bp->b_bufsize = desired_size;
    785 		nbp->b_bcount = 0;
    786 		SET(nbp->b_flags, B_INVAL);
    787 
    788 		/* release the newly-filled buffer and leave */
    789 		brelse(nbp);
    790 	}
    791 
    792 out:
    793 	bp->b_bcount = size;
    794 }
    795 
    796 /*
    797  * Find a buffer which is available for use.
    798  * Select something from a free list.
    799  * Preference is to AGE list, then LRU list.
    800  */
    801 struct buf *
    802 getnewbuf(slpflag, slptimeo)
    803 	int slpflag, slptimeo;
    804 {
    805 	struct buf *bp;
    806 	int s;
    807 
    808 start:
    809 	s = splbio();
    810 	if ((bp = bufqueues[BQ_AGE].tqh_first) != NULL ||
    811 	    (bp = bufqueues[BQ_LRU].tqh_first) != NULL) {
    812 		bremfree(bp);
    813 	} else {
    814 		/* wait for a free buffer of any kind */
    815 		needbuffer = 1;
    816 		tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf", slptimeo);
    817 		splx(s);
    818 		return (NULL);
    819 	}
    820 
    821 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    822 		/*
    823 		 * This is a delayed write buffer being flushed to disk.  Make
    824 		 * sure it gets aged out of the queue when it's finished, and
    825 		 * leave it off the LRU queue.
    826 		 */
    827 		CLR(bp->b_flags, B_VFLUSH);
    828 		SET(bp->b_flags, B_AGE);
    829 		splx(s);
    830 		goto start;
    831 	}
    832 
    833 	/* Buffer is no longer on free lists. */
    834 	SET(bp->b_flags, B_BUSY);
    835 
    836 	/*
    837 	 * If buffer was a delayed write, start it and return NULL
    838 	 * (since we might sleep while starting the write).
    839 	 */
    840 	if (ISSET(bp->b_flags, B_DELWRI)) {
    841 		splx(s);
    842 		/*
    843 		 * This buffer has gone through the LRU, so make sure it gets
    844 		 * reused ASAP.
    845 		 */
    846 		SET(bp->b_flags, B_AGE);
    847 		bawrite(bp);
    848 		return (NULL);
    849 	}
    850 
    851 	/* disassociate us from our vnode, if we had one... */
    852 	if (bp->b_vp)
    853 		brelvp(bp);
    854 	splx(s);
    855 
    856 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    857 		(*bioops.io_deallocate)(bp);
    858 
    859 	/* clear out various other fields */
    860 	bp->b_flags = B_BUSY;
    861 	bp->b_dev = NODEV;
    862 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
    863 	bp->b_iodone = 0;
    864 	bp->b_error = 0;
    865 	bp->b_resid = 0;
    866 	bp->b_bcount = 0;
    867 
    868 	bremhash(bp);
    869 	return (bp);
    870 }
    871 
    872 /*
    873  * Wait for operations on the buffer to complete.
    874  * When they do, extract and return the I/O's error value.
    875  */
    876 int
    877 biowait(bp)
    878 	struct buf *bp;
    879 {
    880 	int s;
    881 
    882 	s = splbio();
    883 	while (!ISSET(bp->b_flags, B_DONE))
    884 		tsleep(bp, PRIBIO + 1, "biowait", 0);
    885 	splx(s);
    886 
    887 	/* check for interruption of I/O (e.g. via NFS), then errors. */
    888 	if (ISSET(bp->b_flags, B_EINTR)) {
    889 		CLR(bp->b_flags, B_EINTR);
    890 		return (EINTR);
    891 	} else if (ISSET(bp->b_flags, B_ERROR))
    892 		return (bp->b_error ? bp->b_error : EIO);
    893 	else
    894 		return (0);
    895 }
    896 
    897 /*
    898  * Mark I/O complete on a buffer.
    899  *
    900  * If a callback has been requested, e.g. the pageout
    901  * daemon, do so. Otherwise, awaken waiting processes.
    902  *
    903  * [ Leffler, et al., says on p.247:
    904  *	"This routine wakes up the blocked process, frees the buffer
    905  *	for an asynchronous write, or, for a request by the pagedaemon
    906  *	process, invokes a procedure specified in the buffer structure" ]
    907  *
    908  * In real life, the pagedaemon (or other system processes) wants
    909  * to do async stuff to, and doesn't want the buffer brelse()'d.
    910  * (for swap pager, that puts swap buffers on the free lists (!!!),
    911  * for the vn device, that puts malloc'd buffers on the free lists!)
    912  */
    913 void
    914 biodone(bp)
    915 	struct buf *bp;
    916 {
    917 	int s = splbio();
    918 
    919 	if (ISSET(bp->b_flags, B_DONE))
    920 		panic("biodone already");
    921 	SET(bp->b_flags, B_DONE);		/* note that it's done */
    922 
    923 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
    924 		(*bioops.io_complete)(bp);
    925 
    926 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
    927 		vwakeup(bp);
    928 
    929 	if (ISSET(bp->b_flags, B_CALL)) {	/* if necessary, call out */
    930 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
    931 		(*bp->b_iodone)(bp);
    932 	} else {
    933 		if (ISSET(bp->b_flags, B_ASYNC))	/* if async, release */
    934 			brelse(bp);
    935 		else {				/* or just wakeup the buffer */
    936 			CLR(bp->b_flags, B_WANTED);
    937 			wakeup(bp);
    938 		}
    939 	}
    940 
    941 	splx(s);
    942 }
    943 
    944 /*
    945  * Return a count of buffers on the "locked" queue.
    946  */
    947 int
    948 count_lock_queue()
    949 {
    950 	struct buf *bp;
    951 	int n = 0;
    952 
    953 	for (bp = bufqueues[BQ_LOCKED].tqh_first; bp;
    954 	    bp = bp->b_freelist.tqe_next)
    955 		n++;
    956 	return (n);
    957 }
    958 
    959 #ifdef DEBUG
    960 /*
    961  * Print out statistics on the current allocation of the buffer pool.
    962  * Can be enabled to print out on every ``sync'' by setting "syncprt"
    963  * in vfs_syscalls.c using sysctl.
    964  */
    965 void
    966 vfs_bufstats()
    967 {
    968 	int s, i, j, count;
    969 	struct buf *bp;
    970 	struct bqueues *dp;
    971 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
    972 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
    973 
    974 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
    975 		count = 0;
    976 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
    977 			counts[j] = 0;
    978 		s = splbio();
    979 		for (bp = dp->tqh_first; bp; bp = bp->b_freelist.tqe_next) {
    980 			counts[bp->b_bufsize/PAGE_SIZE]++;
    981 			count++;
    982 		}
    983 		splx(s);
    984 		printf("%s: total-%d", bname[i], count);
    985 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
    986 			if (counts[j] != 0)
    987 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
    988 		printf("\n");
    989 	}
    990 }
    991 #endif /* DEBUG */
    992