Home | History | Annotate | Line # | Download | only in kern
vfs_bio.c revision 1.92
      1 /*	$NetBSD: vfs_bio.c,v 1.92 2003/04/09 12:55:51 yamt Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1994 Christopher G. Demetriou
      5  * Copyright (c) 1982, 1986, 1989, 1993
      6  *	The Regents of the University of California.  All rights reserved.
      7  * (c) UNIX System Laboratories, Inc.
      8  * All or some portions of this file are derived from material licensed
      9  * to the University of California by American Telephone and Telegraph
     10  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     11  * the permission of UNIX System Laboratories, Inc.
     12  *
     13  * Redistribution and use in source and binary forms, with or without
     14  * modification, are permitted provided that the following conditions
     15  * are met:
     16  * 1. Redistributions of source code must retain the above copyright
     17  *    notice, this list of conditions and the following disclaimer.
     18  * 2. Redistributions in binary form must reproduce the above copyright
     19  *    notice, this list of conditions and the following disclaimer in the
     20  *    documentation and/or other materials provided with the distribution.
     21  * 3. All advertising materials mentioning features or use of this software
     22  *    must display the following acknowledgement:
     23  *	This product includes software developed by the University of
     24  *	California, Berkeley and its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     42  */
     43 
     44 /*
     45  * Some references:
     46  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
     47  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
     48  *		UNIX Operating System (Addison Welley, 1989)
     49  */
     50 
     51 #include "opt_softdep.h"
     52 
     53 #include <sys/cdefs.h>
     54 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.92 2003/04/09 12:55:51 yamt Exp $");
     55 
     56 #include <sys/param.h>
     57 #include <sys/systm.h>
     58 #include <sys/proc.h>
     59 #include <sys/buf.h>
     60 #include <sys/vnode.h>
     61 #include <sys/mount.h>
     62 #include <sys/malloc.h>
     63 #include <sys/resourcevar.h>
     64 #include <sys/conf.h>
     65 
     66 #include <uvm/uvm.h>
     67 
     68 #include <miscfs/specfs/specdev.h>
     69 
     70 /* Macros to clear/set/test flags. */
     71 #define	SET(t, f)	(t) |= (f)
     72 #define	CLR(t, f)	(t) &= ~(f)
     73 #define	ISSET(t, f)	((t) & (f))
     74 
     75 /*
     76  * Definitions for the buffer hash lists.
     77  */
     78 #define	BUFHASH(dvp, lbn)	\
     79 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
     80 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
     81 u_long	bufhash;
     82 #ifndef SOFTDEP
     83 struct bio_ops bioops;	/* I/O operation notification */
     84 #endif
     85 
     86 /*
     87  * Insq/Remq for the buffer hash lists.
     88  */
     89 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
     90 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
     91 
     92 /*
     93  * Definitions for the buffer free lists.
     94  */
     95 #define	BQUEUES		4		/* number of free buffer queues */
     96 
     97 #define	BQ_LOCKED	0		/* super-blocks &c */
     98 #define	BQ_LRU		1		/* lru, useful buffers */
     99 #define	BQ_AGE		2		/* rubbish */
    100 #define	BQ_EMPTY	3		/* buffer headers with no memory */
    101 
    102 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
    103 int needbuffer;
    104 
    105 /*
    106  * Buffer queue lock.
    107  * Take this lock first if also taking some buffer's b_interlock.
    108  */
    109 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
    110 
    111 /*
    112  * Buffer pool for I/O buffers.
    113  */
    114 struct pool bufpool;
    115 
    116 /*
    117  * bread()/breadn() helper.
    118  */
    119 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
    120 					struct ucred *, int);
    121 int count_lock_queue(void);
    122 
    123 /*
    124  * Insq/Remq for the buffer free lists.
    125  * Call with buffer queue locked.
    126  */
    127 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
    128 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
    129 
    130 void
    131 bremfree(bp)
    132 	struct buf *bp;
    133 {
    134 	struct bqueues *dp = NULL;
    135 
    136 	/*
    137 	 * We only calculate the head of the freelist when removing
    138 	 * the last element of the list as that is the only time that
    139 	 * it is needed (e.g. to reset the tail pointer).
    140 	 *
    141 	 * NB: This makes an assumption about how tailq's are implemented.
    142 	 */
    143 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
    144 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    145 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    146 				break;
    147 		if (dp == &bufqueues[BQUEUES])
    148 			panic("bremfree: lost tail");
    149 	}
    150 	TAILQ_REMOVE(dp, bp, b_freelist);
    151 }
    152 
    153 /*
    154  * Initialize buffers and hash links for buffers.
    155  */
    156 void
    157 bufinit()
    158 {
    159 	struct buf *bp;
    160 	struct bqueues *dp;
    161 	u_int i, base, residual;
    162 
    163 	/*
    164 	 * Initialize the buffer pool.  This pool is used for buffers
    165 	 * which are strictly I/O control blocks, not buffer cache
    166 	 * buffers.
    167 	 */
    168 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
    169 
    170 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    171 		TAILQ_INIT(dp);
    172 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
    173 	base = bufpages / nbuf;
    174 	residual = bufpages % nbuf;
    175 	for (i = 0; i < nbuf; i++) {
    176 		bp = &buf[i];
    177 		memset((char *)bp, 0, sizeof(*bp));
    178 		BUF_INIT(bp);
    179 		bp->b_dev = NODEV;
    180 		bp->b_vnbufs.le_next = NOLIST;
    181 		bp->b_data = buffers + i * MAXBSIZE;
    182 		if (i < residual)
    183 			bp->b_bufsize = (base + 1) * PAGE_SIZE;
    184 		else
    185 			bp->b_bufsize = base * PAGE_SIZE;
    186 		bp->b_flags = B_INVAL;
    187 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
    188 		binsheadfree(bp, dp);
    189 		binshash(bp, &invalhash);
    190 	}
    191 }
    192 
    193 static __inline struct buf *
    194 bio_doread(vp, blkno, size, cred, async)
    195 	struct vnode *vp;
    196 	daddr_t blkno;
    197 	int size;
    198 	struct ucred *cred;
    199 	int async;
    200 {
    201 	struct buf *bp;
    202 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    203 	struct proc *p = l->l_proc;
    204 
    205 	bp = getblk(vp, blkno, size, 0, 0);
    206 
    207 #ifdef DIAGNOSTIC
    208 	if (bp == NULL) {
    209 		panic("bio_doread: no such buf");
    210 	}
    211 #endif
    212 
    213 	/*
    214 	 * If buffer does not have data valid, start a read.
    215 	 * Note that if buffer is B_INVAL, getblk() won't return it.
    216 	 * Therefore, it's valid if its I/O has completed or been delayed.
    217 	 */
    218 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
    219 		/* Start I/O for the buffer. */
    220 		SET(bp->b_flags, B_READ | async);
    221 		VOP_STRATEGY(bp);
    222 
    223 		/* Pay for the read. */
    224 		p->p_stats->p_ru.ru_inblock++;
    225 	} else if (async) {
    226 		brelse(bp);
    227 	}
    228 
    229 	return (bp);
    230 }
    231 
    232 /*
    233  * Read a disk block.
    234  * This algorithm described in Bach (p.54).
    235  */
    236 int
    237 bread(vp, blkno, size, cred, bpp)
    238 	struct vnode *vp;
    239 	daddr_t blkno;
    240 	int size;
    241 	struct ucred *cred;
    242 	struct buf **bpp;
    243 {
    244 	struct buf *bp;
    245 
    246 	/* Get buffer for block. */
    247 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    248 
    249 	/* Wait for the read to complete, and return result. */
    250 	return (biowait(bp));
    251 }
    252 
    253 /*
    254  * Read-ahead multiple disk blocks. The first is sync, the rest async.
    255  * Trivial modification to the breada algorithm presented in Bach (p.55).
    256  */
    257 int
    258 breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
    259 	struct vnode *vp;
    260 	daddr_t blkno; int size;
    261 	daddr_t rablks[]; int rasizes[];
    262 	int nrablks;
    263 	struct ucred *cred;
    264 	struct buf **bpp;
    265 {
    266 	struct buf *bp;
    267 	int i;
    268 
    269 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    270 
    271 	/*
    272 	 * For each of the read-ahead blocks, start a read, if necessary.
    273 	 */
    274 	for (i = 0; i < nrablks; i++) {
    275 		/* If it's in the cache, just go on to next one. */
    276 		if (incore(vp, rablks[i]))
    277 			continue;
    278 
    279 		/* Get a buffer for the read-ahead block */
    280 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
    281 	}
    282 
    283 	/* Otherwise, we had to start a read for it; wait until it's valid. */
    284 	return (biowait(bp));
    285 }
    286 
    287 /*
    288  * Read with single-block read-ahead.  Defined in Bach (p.55), but
    289  * implemented as a call to breadn().
    290  * XXX for compatibility with old file systems.
    291  */
    292 int
    293 breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
    294 	struct vnode *vp;
    295 	daddr_t blkno; int size;
    296 	daddr_t rablkno; int rabsize;
    297 	struct ucred *cred;
    298 	struct buf **bpp;
    299 {
    300 
    301 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
    302 }
    303 
    304 /*
    305  * Block write.  Described in Bach (p.56)
    306  */
    307 int
    308 bwrite(bp)
    309 	struct buf *bp;
    310 {
    311 	int rv, sync, wasdelayed, s;
    312 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    313 	struct proc *p = l->l_proc;
    314 	struct vnode *vp;
    315 	struct mount *mp;
    316 
    317 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    318 
    319 	vp = bp->b_vp;
    320 	if (vp != NULL) {
    321 		if (vp->v_type == VBLK)
    322 			mp = vp->v_specmountpoint;
    323 		else
    324 			mp = vp->v_mount;
    325 	} else {
    326 		mp = NULL;
    327 	}
    328 
    329 	/*
    330 	 * Remember buffer type, to switch on it later.  If the write was
    331 	 * synchronous, but the file system was mounted with MNT_ASYNC,
    332 	 * convert it to a delayed write.
    333 	 * XXX note that this relies on delayed tape writes being converted
    334 	 * to async, not sync writes (which is safe, but ugly).
    335 	 */
    336 	sync = !ISSET(bp->b_flags, B_ASYNC);
    337 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
    338 		bdwrite(bp);
    339 		return (0);
    340 	}
    341 
    342 	/*
    343 	 * Collect statistics on synchronous and asynchronous writes.
    344 	 * Writes to block devices are charged to their associated
    345 	 * filesystem (if any).
    346 	 */
    347 	if (mp != NULL) {
    348 		if (sync)
    349 			mp->mnt_stat.f_syncwrites++;
    350 		else
    351 			mp->mnt_stat.f_asyncwrites++;
    352 	}
    353 
    354 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
    355 
    356 	s = splbio();
    357 	simple_lock(&bp->b_interlock);
    358 
    359 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
    360 
    361 	/*
    362 	 * Pay for the I/O operation and make sure the buf is on the correct
    363 	 * vnode queue.
    364 	 */
    365 	if (wasdelayed)
    366 		reassignbuf(bp, bp->b_vp);
    367 	else
    368 		p->p_stats->p_ru.ru_oublock++;
    369 
    370 	/* Initiate disk write.  Make sure the appropriate party is charged. */
    371 	V_INCR_NUMOUTPUT(bp->b_vp);
    372 	simple_unlock(&bp->b_interlock);
    373 	splx(s);
    374 
    375 	VOP_STRATEGY(bp);
    376 
    377 	if (sync) {
    378 		/* If I/O was synchronous, wait for it to complete. */
    379 		rv = biowait(bp);
    380 
    381 		/* Release the buffer. */
    382 		brelse(bp);
    383 
    384 		return (rv);
    385 	} else {
    386 		return (0);
    387 	}
    388 }
    389 
    390 int
    391 vn_bwrite(v)
    392 	void *v;
    393 {
    394 	struct vop_bwrite_args *ap = v;
    395 
    396 	return (bwrite(ap->a_bp));
    397 }
    398 
    399 /*
    400  * Delayed write.
    401  *
    402  * The buffer is marked dirty, but is not queued for I/O.
    403  * This routine should be used when the buffer is expected
    404  * to be modified again soon, typically a small write that
    405  * partially fills a buffer.
    406  *
    407  * NB: magnetic tapes cannot be delayed; they must be
    408  * written in the order that the writes are requested.
    409  *
    410  * Described in Leffler, et al. (pp. 208-213).
    411  */
    412 void
    413 bdwrite(bp)
    414 	struct buf *bp;
    415 {
    416 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    417 	struct proc *p = l->l_proc;
    418 	const struct bdevsw *bdev;
    419 	int s;
    420 
    421 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    422 
    423 	/* If this is a tape block, write the block now. */
    424 	bdev = bdevsw_lookup(bp->b_dev);
    425 	if (bdev != NULL && bdev->d_type == D_TAPE) {
    426 		bawrite(bp);
    427 		return;
    428 	}
    429 
    430 	/*
    431 	 * If the block hasn't been seen before:
    432 	 *	(1) Mark it as having been seen,
    433 	 *	(2) Charge for the write,
    434 	 *	(3) Make sure it's on its vnode's correct block list.
    435 	 */
    436 	s = splbio();
    437 	simple_lock(&bp->b_interlock);
    438 
    439 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    440 		SET(bp->b_flags, B_DELWRI);
    441 		p->p_stats->p_ru.ru_oublock++;
    442 		reassignbuf(bp, bp->b_vp);
    443 	}
    444 
    445 	/* Otherwise, the "write" is done, so mark and release the buffer. */
    446 	CLR(bp->b_flags, B_DONE);
    447 	simple_unlock(&bp->b_interlock);
    448 	splx(s);
    449 
    450 	brelse(bp);
    451 }
    452 
    453 /*
    454  * Asynchronous block write; just an asynchronous bwrite().
    455  */
    456 void
    457 bawrite(bp)
    458 	struct buf *bp;
    459 {
    460 	int s;
    461 
    462 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    463 
    464 	s = splbio();
    465 	simple_lock(&bp->b_interlock);
    466 	SET(bp->b_flags, B_ASYNC);
    467 	simple_unlock(&bp->b_interlock);
    468 	splx(s);
    469 	VOP_BWRITE(bp);
    470 }
    471 
    472 /*
    473  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
    474  * Call at splbio() and with the buffer interlock locked.
    475  * Note: called only from biodone() through ffs softdep's bioops.io_complete()
    476  */
    477 void
    478 bdirty(bp)
    479 	struct buf *bp;
    480 {
    481 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    482 	struct proc *p = l->l_proc;
    483 
    484 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    485 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
    486 
    487 	CLR(bp->b_flags, B_AGE);
    488 
    489 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    490 		SET(bp->b_flags, B_DELWRI);
    491 		p->p_stats->p_ru.ru_oublock++;
    492 		reassignbuf(bp, bp->b_vp);
    493 	}
    494 }
    495 
    496 /*
    497  * Release a buffer on to the free lists.
    498  * Described in Bach (p. 46).
    499  */
    500 void
    501 brelse(bp)
    502 	struct buf *bp;
    503 {
    504 	struct bqueues *bufq;
    505 	int s;
    506 
    507 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    508 
    509 	/* Block disk interrupts. */
    510 	s = splbio();
    511 	simple_lock(&bqueue_slock);
    512 	simple_lock(&bp->b_interlock);
    513 
    514 	/* Wake up any processes waiting for any buffer to become free. */
    515 	if (needbuffer) {
    516 		needbuffer = 0;
    517 		wakeup(&needbuffer);
    518 	}
    519 
    520 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
    521 	if (ISSET(bp->b_flags, B_WANTED)) {
    522 		CLR(bp->b_flags, B_WANTED|B_AGE);
    523 		wakeup(bp);
    524 	}
    525 
    526 	/*
    527 	 * Determine which queue the buffer should be on, then put it there.
    528 	 */
    529 
    530 	/* If it's locked, don't report an error; try again later. */
    531 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
    532 		CLR(bp->b_flags, B_ERROR);
    533 
    534 	/* If it's not cacheable, or an error, mark it invalid. */
    535 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
    536 		SET(bp->b_flags, B_INVAL);
    537 
    538 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    539 		/*
    540 		 * This is a delayed write buffer that was just flushed to
    541 		 * disk.  It is still on the LRU queue.  If it's become
    542 		 * invalid, then we need to move it to a different queue;
    543 		 * otherwise leave it in its current position.
    544 		 */
    545 		CLR(bp->b_flags, B_VFLUSH);
    546 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
    547 			goto already_queued;
    548 		else
    549 			bremfree(bp);
    550 	}
    551 
    552 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
    553 		/*
    554 		 * If it's invalid or empty, dissociate it from its vnode
    555 		 * and put on the head of the appropriate queue.
    556 		 */
    557 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    558 			(*bioops.io_deallocate)(bp);
    559 		CLR(bp->b_flags, B_DONE|B_DELWRI);
    560 		if (bp->b_vp) {
    561 			reassignbuf(bp, bp->b_vp);
    562 			brelvp(bp);
    563 		}
    564 		if (bp->b_bufsize <= 0)
    565 			/* no data */
    566 			bufq = &bufqueues[BQ_EMPTY];
    567 		else
    568 			/* invalid data */
    569 			bufq = &bufqueues[BQ_AGE];
    570 		binsheadfree(bp, bufq);
    571 	} else {
    572 		/*
    573 		 * It has valid data.  Put it on the end of the appropriate
    574 		 * queue, so that it'll stick around for as long as possible.
    575 		 * If buf is AGE, but has dependencies, must put it on last
    576 		 * bufqueue to be scanned, ie LRU. This protects against the
    577 		 * livelock where BQ_AGE only has buffers with dependencies,
    578 		 * and we thus never get to the dependent buffers in BQ_LRU.
    579 		 */
    580 		if (ISSET(bp->b_flags, B_LOCKED))
    581 			/* locked in core */
    582 			bufq = &bufqueues[BQ_LOCKED];
    583 		else if (!ISSET(bp->b_flags, B_AGE))
    584 			/* valid data */
    585 			bufq = &bufqueues[BQ_LRU];
    586 		else {
    587 			/* stale but valid data */
    588 			int has_deps;
    589 
    590 			if (LIST_FIRST(&bp->b_dep) != NULL &&
    591 			    bioops.io_countdeps)
    592 				has_deps = (*bioops.io_countdeps)(bp, 0);
    593 			else
    594 				has_deps = 0;
    595 			bufq = has_deps ? &bufqueues[BQ_LRU] :
    596 			    &bufqueues[BQ_AGE];
    597 		}
    598 		binstailfree(bp, bufq);
    599 	}
    600 
    601 already_queued:
    602 	/* Unlock the buffer. */
    603 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
    604 	SET(bp->b_flags, B_CACHE);
    605 
    606 	/* Allow disk interrupts. */
    607 	simple_unlock(&bp->b_interlock);
    608 	simple_unlock(&bqueue_slock);
    609 	splx(s);
    610 }
    611 
    612 /*
    613  * Determine if a block is in the cache.
    614  * Just look on what would be its hash chain.  If it's there, return
    615  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
    616  * we normally don't return the buffer, unless the caller explicitly
    617  * wants us to.
    618  */
    619 struct buf *
    620 incore(vp, blkno)
    621 	struct vnode *vp;
    622 	daddr_t blkno;
    623 {
    624 	struct buf *bp;
    625 
    626 	/* Search hash chain */
    627 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
    628 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
    629 		    !ISSET(bp->b_flags, B_INVAL))
    630 		return (bp);
    631 	}
    632 
    633 	return (NULL);
    634 }
    635 
    636 /*
    637  * Get a block of requested size that is associated with
    638  * a given vnode and block offset. If it is found in the
    639  * block cache, mark it as having been found, make it busy
    640  * and return it. Otherwise, return an empty block of the
    641  * correct size. It is up to the caller to insure that the
    642  * cached blocks be of the correct size.
    643  */
    644 struct buf *
    645 getblk(vp, blkno, size, slpflag, slptimeo)
    646 	struct vnode *vp;
    647 	daddr_t blkno;
    648 	int size, slpflag, slptimeo;
    649 {
    650 	struct buf *bp;
    651 	int s, err;
    652 
    653 start:
    654 	s = splbio();
    655 	simple_lock(&bqueue_slock);
    656 	bp = incore(vp, blkno);
    657 	if (bp != NULL) {
    658 		simple_lock(&bp->b_interlock);
    659 		if (ISSET(bp->b_flags, B_BUSY)) {
    660 			simple_unlock(&bqueue_slock);
    661 			if (curproc == uvm.pagedaemon_proc) {
    662 				simple_unlock(&bp->b_interlock);
    663 				splx(s);
    664 				return NULL;
    665 			}
    666 			SET(bp->b_flags, B_WANTED);
    667 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
    668 					"getblk", slptimeo, &bp->b_interlock);
    669 			splx(s);
    670 			if (err)
    671 				return (NULL);
    672 			goto start;
    673 		}
    674 #ifdef DIAGNOSTIC
    675 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
    676 		    bp->b_bcount < size && vp->v_type != VBLK)
    677 			panic("getblk: block size invariant failed");
    678 #endif
    679 		SET(bp->b_flags, B_BUSY);
    680 		bremfree(bp);
    681 	} else {
    682 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL) {
    683 			simple_unlock(&bqueue_slock);
    684 			splx(s);
    685 			goto start;
    686 		}
    687 
    688 		binshash(bp, BUFHASH(vp, blkno));
    689 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
    690 		bgetvp(vp, bp);
    691 	}
    692 	simple_unlock(&bp->b_interlock);
    693 	simple_unlock(&bqueue_slock);
    694 	splx(s);
    695 	allocbuf(bp, size);
    696 	return (bp);
    697 }
    698 
    699 /*
    700  * Get an empty, disassociated buffer of given size.
    701  */
    702 struct buf *
    703 geteblk(size)
    704 	int size;
    705 {
    706 	struct buf *bp;
    707 	int s;
    708 
    709 	s = splbio();
    710 	simple_lock(&bqueue_slock);
    711 	while ((bp = getnewbuf(0, 0)) == 0)
    712 		;
    713 
    714 	SET(bp->b_flags, B_INVAL);
    715 	binshash(bp, &invalhash);
    716 	simple_unlock(&bqueue_slock);
    717 	simple_unlock(&bp->b_interlock);
    718 	splx(s);
    719 	allocbuf(bp, size);
    720 	return (bp);
    721 }
    722 
    723 /*
    724  * Expand or contract the actual memory allocated to a buffer.
    725  *
    726  * If the buffer shrinks, data is lost, so it's up to the
    727  * caller to have written it out *first*; this routine will not
    728  * start a write.  If the buffer grows, it's the callers
    729  * responsibility to fill out the buffer's additional contents.
    730  */
    731 void
    732 allocbuf(bp, size)
    733 	struct buf *bp;
    734 	int size;
    735 {
    736 	struct buf *nbp;
    737 	vsize_t desired_size;
    738 	int s;
    739 
    740 	desired_size = round_page((vsize_t)size);
    741 	if (desired_size > MAXBSIZE)
    742 		panic("allocbuf: buffer larger than MAXBSIZE requested");
    743 
    744 	if (bp->b_bufsize == desired_size)
    745 		goto out;
    746 
    747 	/*
    748 	 * If the buffer is smaller than the desired size, we need to snarf
    749 	 * it from other buffers.  Get buffers (via getnewbuf()), and
    750 	 * steal their pages.
    751 	 */
    752 	while (bp->b_bufsize < desired_size) {
    753 		int amt;
    754 
    755 		/* find a buffer */
    756 		s = splbio();
    757 		simple_lock(&bqueue_slock);
    758 		while ((nbp = getnewbuf(0, 0)) == NULL)
    759 			;
    760 
    761 		SET(nbp->b_flags, B_INVAL);
    762 		binshash(nbp, &invalhash);
    763 
    764 		simple_unlock(&nbp->b_interlock);
    765 		simple_unlock(&bqueue_slock);
    766 		splx(s);
    767 
    768 		/* and steal its pages, up to the amount we need */
    769 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
    770 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
    771 			 bp->b_data + bp->b_bufsize, amt);
    772 		bp->b_bufsize += amt;
    773 		nbp->b_bufsize -= amt;
    774 
    775 		/* reduce transfer count if we stole some data */
    776 		if (nbp->b_bcount > nbp->b_bufsize)
    777 			nbp->b_bcount = nbp->b_bufsize;
    778 
    779 #ifdef DIAGNOSTIC
    780 		if (nbp->b_bufsize < 0)
    781 			panic("allocbuf: negative bufsize");
    782 #endif
    783 		brelse(nbp);
    784 	}
    785 
    786 	/*
    787 	 * If we want a buffer smaller than the current size,
    788 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
    789 	 * move a page onto it, and put it on front of the AGE queue.
    790 	 * If there are no free buffer headers, leave the buffer alone.
    791 	 */
    792 	if (bp->b_bufsize > desired_size) {
    793 		s = splbio();
    794 		simple_lock(&bqueue_slock);
    795 		if ((nbp = TAILQ_FIRST(&bufqueues[BQ_EMPTY])) == NULL) {
    796 			/* No free buffer head */
    797 			simple_unlock(&bqueue_slock);
    798 			splx(s);
    799 			goto out;
    800 		}
    801 		/* No need to lock nbp since it came from the empty queue */
    802 		bremfree(nbp);
    803 		SET(nbp->b_flags, B_BUSY | B_INVAL);
    804 		simple_unlock(&bqueue_slock);
    805 		splx(s);
    806 
    807 		/* move the page to it and note this change */
    808 		pagemove(bp->b_data + desired_size,
    809 		    nbp->b_data, bp->b_bufsize - desired_size);
    810 		nbp->b_bufsize = bp->b_bufsize - desired_size;
    811 		bp->b_bufsize = desired_size;
    812 		nbp->b_bcount = 0;
    813 
    814 		/* release the newly-filled buffer and leave */
    815 		brelse(nbp);
    816 	}
    817 
    818 out:
    819 	bp->b_bcount = size;
    820 }
    821 
    822 /*
    823  * Find a buffer which is available for use.
    824  * Select something from a free list.
    825  * Preference is to AGE list, then LRU list.
    826  *
    827  * Called with buffer queues locked.
    828  * Return buffer locked.
    829  */
    830 struct buf *
    831 getnewbuf(slpflag, slptimeo)
    832 	int slpflag, slptimeo;
    833 {
    834 	struct buf *bp;
    835 
    836 start:
    837 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
    838 
    839 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
    840 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
    841 		simple_lock(&bp->b_interlock);
    842 		bremfree(bp);
    843 	} else {
    844 		/* wait for a free buffer of any kind */
    845 		needbuffer = 1;
    846 		ltsleep(&needbuffer, slpflag|(PRIBIO+1),
    847 			"getnewbuf", slptimeo, &bqueue_slock);
    848 		return (NULL);
    849 	}
    850 
    851 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    852 		/*
    853 		 * This is a delayed write buffer being flushed to disk.  Make
    854 		 * sure it gets aged out of the queue when it's finished, and
    855 		 * leave it off the LRU queue.
    856 		 */
    857 		CLR(bp->b_flags, B_VFLUSH);
    858 		SET(bp->b_flags, B_AGE);
    859 		simple_unlock(&bp->b_interlock);
    860 		goto start;
    861 	}
    862 
    863 	/* Buffer is no longer on free lists. */
    864 	SET(bp->b_flags, B_BUSY);
    865 
    866 	/*
    867 	 * If buffer was a delayed write, start it and return NULL
    868 	 * (since we might sleep while starting the write).
    869 	 */
    870 	if (ISSET(bp->b_flags, B_DELWRI)) {
    871 		/*
    872 		 * This buffer has gone through the LRU, so make sure it gets
    873 		 * reused ASAP.
    874 		 */
    875 		SET(bp->b_flags, B_AGE);
    876 		simple_unlock(&bp->b_interlock);
    877 		simple_unlock(&bqueue_slock);
    878 		bawrite(bp);
    879 		simple_lock(&bqueue_slock);
    880 		return (NULL);
    881 	}
    882 
    883 	/* disassociate us from our vnode, if we had one... */
    884 	if (bp->b_vp)
    885 		brelvp(bp);
    886 
    887 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    888 		(*bioops.io_deallocate)(bp);
    889 
    890 	/* clear out various other fields */
    891 	bp->b_flags = B_BUSY;
    892 	bp->b_dev = NODEV;
    893 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
    894 	bp->b_iodone = 0;
    895 	bp->b_error = 0;
    896 	bp->b_resid = 0;
    897 	bp->b_bcount = 0;
    898 
    899 	bremhash(bp);
    900 	return (bp);
    901 }
    902 
    903 /*
    904  * Wait for operations on the buffer to complete.
    905  * When they do, extract and return the I/O's error value.
    906  */
    907 int
    908 biowait(bp)
    909 	struct buf *bp;
    910 {
    911 	int s, error;
    912 
    913 	s = splbio();
    914 	simple_lock(&bp->b_interlock);
    915 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
    916 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
    917 
    918 	/* check for interruption of I/O (e.g. via NFS), then errors. */
    919 	if (ISSET(bp->b_flags, B_EINTR)) {
    920 		CLR(bp->b_flags, B_EINTR);
    921 		error = EINTR;
    922 	} else if (ISSET(bp->b_flags, B_ERROR))
    923 		error = bp->b_error ? bp->b_error : EIO;
    924 	else
    925 		error = 0;
    926 
    927 	simple_unlock(&bp->b_interlock);
    928 	splx(s);
    929 	return (error);
    930 }
    931 
    932 /*
    933  * Mark I/O complete on a buffer.
    934  *
    935  * If a callback has been requested, e.g. the pageout
    936  * daemon, do so. Otherwise, awaken waiting processes.
    937  *
    938  * [ Leffler, et al., says on p.247:
    939  *	"This routine wakes up the blocked process, frees the buffer
    940  *	for an asynchronous write, or, for a request by the pagedaemon
    941  *	process, invokes a procedure specified in the buffer structure" ]
    942  *
    943  * In real life, the pagedaemon (or other system processes) wants
    944  * to do async stuff to, and doesn't want the buffer brelse()'d.
    945  * (for swap pager, that puts swap buffers on the free lists (!!!),
    946  * for the vn device, that puts malloc'd buffers on the free lists!)
    947  */
    948 void
    949 biodone(bp)
    950 	struct buf *bp;
    951 {
    952 	int s = splbio();
    953 
    954 	simple_lock(&bp->b_interlock);
    955 	if (ISSET(bp->b_flags, B_DONE))
    956 		panic("biodone already");
    957 	SET(bp->b_flags, B_DONE);		/* note that it's done */
    958 
    959 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
    960 		(*bioops.io_complete)(bp);
    961 
    962 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
    963 		vwakeup(bp);
    964 
    965 	/*
    966 	 * If necessary, call out.  Unlock the buffer before calling
    967 	 * iodone() as the buffer isn't valid any more when it return.
    968 	 */
    969 	if (ISSET(bp->b_flags, B_CALL)) {
    970 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
    971 		simple_unlock(&bp->b_interlock);
    972 		(*bp->b_iodone)(bp);
    973 	} else {
    974 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
    975 			simple_unlock(&bp->b_interlock);
    976 			brelse(bp);
    977 		} else {			/* or just wakeup the buffer */
    978 			CLR(bp->b_flags, B_WANTED);
    979 			wakeup(bp);
    980 			simple_unlock(&bp->b_interlock);
    981 		}
    982 	}
    983 
    984 	splx(s);
    985 }
    986 
    987 /*
    988  * Return a count of buffers on the "locked" queue.
    989  */
    990 int
    991 count_lock_queue()
    992 {
    993 	struct buf *bp;
    994 	int n = 0;
    995 
    996 	simple_lock(&bqueue_slock);
    997 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
    998 		n++;
    999 	simple_unlock(&bqueue_slock);
   1000 	return (n);
   1001 }
   1002 
   1003 #ifdef DEBUG
   1004 /*
   1005  * Print out statistics on the current allocation of the buffer pool.
   1006  * Can be enabled to print out on every ``sync'' by setting "syncprt"
   1007  * in vfs_syscalls.c using sysctl.
   1008  */
   1009 void
   1010 vfs_bufstats()
   1011 {
   1012 	int s, i, j, count;
   1013 	struct buf *bp;
   1014 	struct bqueues *dp;
   1015 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
   1016 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
   1017 
   1018 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
   1019 		count = 0;
   1020 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
   1021 			counts[j] = 0;
   1022 		s = splbio();
   1023 		TAILQ_FOREACH(bp, dp, b_freelist) {
   1024 			counts[bp->b_bufsize/PAGE_SIZE]++;
   1025 			count++;
   1026 		}
   1027 		splx(s);
   1028 		printf("%s: total-%d", bname[i], count);
   1029 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
   1030 			if (counts[j] != 0)
   1031 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
   1032 		printf("\n");
   1033 	}
   1034 }
   1035 #endif /* DEBUG */
   1036