Home | History | Annotate | Line # | Download | only in kern
vfs_bio.c revision 1.106
      1 /*	$NetBSD: vfs_bio.c,v 1.106 2004/01/09 06:26:15 tls Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1982, 1986, 1989, 1993
      5  *	The Regents of the University of California.  All rights reserved.
      6  * (c) UNIX System Laboratories, Inc.
      7  * All or some portions of this file are derived from material licensed
      8  * to the University of California by American Telephone and Telegraph
      9  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     10  * the permission of UNIX System Laboratories, Inc.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. Neither the name of the University nor the names of its contributors
     21  *    may be used to endorse or promote products derived from this software
     22  *    without specific prior written permission.
     23  *
     24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  * SUCH DAMAGE.
     35  *
     36  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     37  */
     38 
     39 /*-
     40  * Copyright (c) 1994 Christopher G. Demetriou
     41  *
     42  * Redistribution and use in source and binary forms, with or without
     43  * modification, are permitted provided that the following conditions
     44  * are met:
     45  * 1. Redistributions of source code must retain the above copyright
     46  *    notice, this list of conditions and the following disclaimer.
     47  * 2. Redistributions in binary form must reproduce the above copyright
     48  *    notice, this list of conditions and the following disclaimer in the
     49  *    documentation and/or other materials provided with the distribution.
     50  * 3. All advertising materials mentioning features or use of this software
     51  *    must display the following acknowledgement:
     52  *	This product includes software developed by the University of
     53  *	California, Berkeley and its contributors.
     54  * 4. Neither the name of the University nor the names of its contributors
     55  *    may be used to endorse or promote products derived from this software
     56  *    without specific prior written permission.
     57  *
     58  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  * SUCH DAMAGE.
     69  *
     70  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     71  */
     72 
     73 /*
     74  * Some references:
     75  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
     76  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
     77  *		UNIX Operating System (Addison Welley, 1989)
     78  */
     79 
     80 #include "opt_bufcache.h"
     81 #include "opt_softdep.h"
     82 
     83 #include <sys/cdefs.h>
     84 __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.106 2004/01/09 06:26:15 tls Exp $");
     85 
     86 #include <sys/param.h>
     87 #include <sys/systm.h>
     88 #include <sys/kernel.h>
     89 #include <sys/proc.h>
     90 #include <sys/buf.h>
     91 #include <sys/vnode.h>
     92 #include <sys/mount.h>
     93 #include <sys/malloc.h>
     94 #include <sys/resourcevar.h>
     95 #include <sys/sysctl.h>
     96 #include <sys/conf.h>
     97 
     98 #include <uvm/uvm.h>
     99 
    100 #include <miscfs/specfs/specdev.h>
    101 
    102 #ifndef	BUFPAGES
    103 # define BUFPAGES 0
    104 #endif
    105 
    106 #ifdef BUFCACHE
    107 # if (BUFCACHE < 5) || (BUFCACHE > 95)
    108 #  error BUFCACHE is not between 5 and 95
    109 # endif
    110 #else
    111 # define BUFCACHE 20
    112 #endif
    113 
    114 u_int	nbuf;			/* XXX - for softdep_lockedbufs */
    115 u_int	bufpages = BUFPAGES;	/* optional hardwired count */
    116 u_int	bufcache = BUFCACHE;	/* max % of RAM to use for buffer cache */
    117 
    118 
    119 /* Macros to clear/set/test flags. */
    120 #define	SET(t, f)	(t) |= (f)
    121 #define	CLR(t, f)	(t) &= ~(f)
    122 #define	ISSET(t, f)	((t) & (f))
    123 
    124 /*
    125  * Definitions for the buffer hash lists.
    126  */
    127 #define	BUFHASH(dvp, lbn)	\
    128 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
    129 LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
    130 u_long	bufhash;
    131 #ifndef SOFTDEP
    132 struct bio_ops bioops;	/* I/O operation notification */
    133 #endif
    134 
    135 /*
    136  * Insq/Remq for the buffer hash lists.
    137  */
    138 #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
    139 #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
    140 
    141 /*
    142  * Definitions for the buffer free lists.
    143  */
    144 #define	BQUEUES		3		/* number of free buffer queues */
    145 
    146 #define	BQ_LOCKED	0		/* super-blocks &c */
    147 #define	BQ_LRU		1		/* lru, useful buffers */
    148 #define	BQ_AGE		2		/* rubbish */
    149 
    150 TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
    151 int needbuffer;
    152 
    153 /*
    154  * Buffer queue lock.
    155  * Take this lock first if also taking some buffer's b_interlock.
    156  */
    157 struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
    158 
    159 /*
    160  * Buffer pool for I/O buffers.
    161  */
    162 struct pool bufpool;
    163 
    164 /* XXX - somewhat gross.. */
    165 #if MAXBSIZE == 0x2000
    166 #define NMEMPOOLS 4
    167 #elif MAXBSIZE == 0x4000
    168 #define NMEMPOOLS 5
    169 #elif MAXBSIZE == 0x8000
    170 #define NMEMPOOLS 6
    171 #else
    172 #define NMEMPOOLS 7
    173 #endif
    174 
    175 #define MEMPOOL_INDEX_OFFSET 10		/* smallest pool is 1k */
    176 #if (1 << (NMEMPOOLS + MEMPOOL_INDEX_OFFSET - 1)) != MAXBSIZE
    177 #error update vfs_bio buffer memory parameters
    178 #endif
    179 
    180 /* Buffer memory pools */
    181 static struct pool bmempools[NMEMPOOLS];
    182 
    183 struct vm_map *buf_map;
    184 
    185 /*
    186  * Buffer memory pool allocator.
    187  */
    188 static void *
    189 bufpool_page_alloc(struct pool *pp, int flags)
    190 {
    191 	return (void *)uvm_km_kmemalloc1(buf_map,
    192 					uvm.kernel_object, MAXBSIZE, MAXBSIZE,
    193 					UVM_UNKNOWN_OFFSET,
    194 					(flags & PR_WAITOK) ? 0
    195 							    : UVM_KMF_NOWAIT);
    196 }
    197 
    198 static void
    199 bufpool_page_free(struct pool *pp, void *v)
    200 {
    201 	uvm_km_free(buf_map, (vaddr_t)v, MAXBSIZE);
    202 }
    203 
    204 static struct pool_allocator bufmempool_allocator = {
    205 	bufpool_page_alloc, bufpool_page_free, MAXBSIZE,
    206 };
    207 
    208 /* Buffer memory management variables */
    209 u_long bufmem_valimit;
    210 u_long bufmem_hiwater;
    211 u_long bufmem_lowater;
    212 u_long bufmem;
    213 
    214 /*
    215  * MD code can call this to set a hard limit on the amount
    216  * of virtual memory used by the buffer cache.
    217  */
    218 int
    219 buf_setvalimit(vsize_t sz)
    220 {
    221 
    222 	/* We need to accommodate at least NMEMPOOLS of MAXBSIZE each */
    223 	if (sz < NMEMPOOLS * MAXBSIZE)
    224 		return EINVAL;
    225 
    226 	bufmem_valimit = sz;
    227 	return 0;
    228 }
    229 
    230 static int buf_trim(void);
    231 
    232 /*
    233  * bread()/breadn() helper.
    234  */
    235 static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
    236 					struct ucred *, int);
    237 int count_lock_queue(void);
    238 
    239 /*
    240  * Insq/Remq for the buffer free lists.
    241  * Call with buffer queue locked.
    242  */
    243 #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
    244 #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
    245 
    246 #ifdef DEBUG
    247 int debug_verify_freelist = 0;
    248 static int checkfreelist(struct buf *bp, struct bqueues *dp)
    249 {
    250 	struct buf *b;
    251 	TAILQ_FOREACH(b, dp, b_freelist) {
    252 		if (b == bp)
    253 			return 1;
    254 	}
    255 	return 0;
    256 }
    257 #endif
    258 
    259 void
    260 bremfree(struct buf *bp)
    261 {
    262 	struct bqueues *dp = NULL;
    263 
    264 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
    265 
    266 	KDASSERT(!debug_verify_freelist ||
    267 		checkfreelist(bp, &bufqueues[BQ_AGE]) ||
    268 		checkfreelist(bp, &bufqueues[BQ_LRU]) ||
    269 		checkfreelist(bp, &bufqueues[BQ_LOCKED]) );
    270 
    271 	/*
    272 	 * We only calculate the head of the freelist when removing
    273 	 * the last element of the list as that is the only time that
    274 	 * it is needed (e.g. to reset the tail pointer).
    275 	 *
    276 	 * NB: This makes an assumption about how tailq's are implemented.
    277 	 *
    278 	 * We break the TAILQ abstraction in order to efficiently remove a
    279 	 * buffer from its freelist without having to know exactly which
    280 	 * freelist it is on.
    281 	 */
    282 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
    283 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    284 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    285 				break;
    286 		if (dp == &bufqueues[BQUEUES])
    287 			panic("bremfree: lost tail");
    288 	}
    289 	TAILQ_REMOVE(dp, bp, b_freelist);
    290 }
    291 
    292 u_long
    293 buf_memcalc(void)
    294 {
    295 	u_long n;
    296 
    297 	/*
    298 	 * Determine the upper bound of memory to use for buffers.
    299 	 *
    300 	 *	- If bufpages is specified, use that as the number
    301 	 *	  pages.
    302 	 *
    303 	 *	- Otherwise, use bufcache as the percentage of
    304 	 *	  physical memory.
    305 	 */
    306 	if (bufpages != 0) {
    307 		n = bufpages;
    308 	} else {
    309 		if (bufcache < 5) {
    310 			printf("forcing bufcache %d -> 5", bufcache);
    311 			bufcache = 5;
    312 		}
    313 		if (bufcache > 95) {
    314 			printf("forcing bufcache %d -> 95", bufcache);
    315 			bufcache = 95;
    316 		}
    317 		n = physmem / 100 * bufcache;
    318 	}
    319 
    320 	n <<= PAGE_SHIFT;
    321 	if (bufmem_valimit != 0 && n > bufmem_valimit)
    322 		n = bufmem_valimit;
    323 
    324 	return (n);
    325 }
    326 
    327 /*
    328  * Initialize buffers and hash links for buffers.
    329  */
    330 void
    331 bufinit(void)
    332 {
    333 	struct bqueues *dp;
    334 	int smallmem;
    335 	u_int i;
    336 
    337 	/*
    338 	 * Initialize buffer cache memory parameters.
    339 	 */
    340 	bufmem = 0;
    341 	bufmem_hiwater = buf_memcalc();
    342 	/* lowater is approx. 2% of memory (with bufcache=30) */
    343 	bufmem_lowater = (bufmem_hiwater >> 4);
    344 	if (bufmem_lowater < 64 * 1024)
    345 		/* Ensure a reasonable minimum value */
    346 		bufmem_lowater = 64 * 1024;
    347 
    348 	if (bufmem_valimit != 0) {
    349 		vaddr_t minaddr = 0, maxaddr;
    350 		buf_map = uvm_km_suballoc(kernel_map, &minaddr, &maxaddr,
    351 					  bufmem_valimit, VM_MAP_PAGEABLE,
    352 					  FALSE, 0);
    353 		if (buf_map == NULL)
    354 			panic("bufinit: cannot allocate submap");
    355 	} else
    356 		buf_map = kernel_map;
    357 
    358 	/*
    359 	 * Initialize the buffer pools.
    360 	 */
    361 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
    362 
    363 	/* On "small" machines use small pool page sizes where possible */
    364 	smallmem = (physmem < atop(16*1024*1024));
    365 
    366 	for (i = 0; i < NMEMPOOLS; i++) {
    367 		struct pool_allocator *pa;
    368 		struct pool *pp = &bmempools[i];
    369 		u_int size = 1 << (i + MEMPOOL_INDEX_OFFSET);
    370 		char *name = malloc(8, M_TEMP, M_WAITOK);
    371 		snprintf(name, 8, "buf%dk", 1 << i);
    372 		pa = (size <= PAGE_SIZE && smallmem)
    373 			? &pool_allocator_nointr
    374 			: &bufmempool_allocator;
    375 		pool_init(pp, size, 0, 0, 0, name, pa);
    376 		pool_setlowat(pp, 1);
    377 	}
    378 
    379 	/* Initialize the buffer queues */
    380 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    381 		TAILQ_INIT(dp);
    382 
    383 	/*
    384 	 * Estimate hash table size based on the amount of memory we
    385 	 * intend to use for the buffer cache. The average buffer
    386 	 * size is dependent on our clients (i.e. filesystems).
    387 	 *
    388 	 * For now, use an empirical 3K per buffer.
    389 	 */
    390 	nbuf = (bufmem_hiwater / 1024) / 3;
    391 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
    392 }
    393 
    394 static int
    395 buf_lotsfree(void)
    396 {
    397 	return (bufmem < bufmem_lowater ||
    398 		(bufmem < bufmem_hiwater && uvmexp.free > 2*uvmexp.freetarg));
    399 }
    400 
    401 /*
    402  * Return estimate of # of buffers we think need to be
    403  * released to help resolve low memory conditions.
    404  */
    405 static int
    406 buf_canrelease(void)
    407 {
    408 	int n;
    409 
    410 	if (bufmem < bufmem_lowater)
    411 		return 0;
    412 
    413 	n = uvmexp.freetarg - uvmexp.free;
    414 	if (n < 0)
    415 		n = 0;
    416 	return 2*n;
    417 }
    418 
    419 /*
    420  * Buffer memory allocation helper functions
    421  */
    422 static __inline u_long
    423 buf_mempoolidx(u_long size)
    424 {
    425 	u_int n = 0;
    426 
    427 	size -= 1;
    428 	size >>= MEMPOOL_INDEX_OFFSET;
    429 	while (size) {
    430 		size >>= 1;
    431 		n += 1;
    432 	}
    433 	if (n >= NMEMPOOLS)
    434 		panic("buf mem pool index %d", n);
    435 	return n;
    436 }
    437 
    438 static __inline u_long
    439 buf_roundsize(u_long size)
    440 {
    441 	/* Round up to nearest power of 2 */
    442 	return (1 << (buf_mempoolidx(size) + MEMPOOL_INDEX_OFFSET));
    443 }
    444 
    445 static __inline caddr_t
    446 buf_malloc(size_t size)
    447 {
    448 	u_int n = buf_mempoolidx(size);
    449 	caddr_t addr;
    450 	int s;
    451 
    452 	while (1) {
    453 		addr = pool_get(&bmempools[n], PR_NOWAIT);
    454 		if (addr != NULL)
    455 			break;
    456 
    457 		/* No memory, see if we can free some. If so, try again */
    458 		if (buf_drain(1) > 0)
    459 			continue;
    460 
    461 		/* Wait for buffers to arrive on the LRU queue */
    462 		s = splbio();
    463 		simple_lock(&bqueue_slock);
    464 		needbuffer = 1;
    465 		ltsleep(&needbuffer, PNORELOCK | (PRIBIO+1),
    466 			"buf_malloc", 0, &bqueue_slock);
    467 		splx(s);
    468 	}
    469 
    470 	return addr;
    471 }
    472 
    473 static void
    474 buf_mrelease(caddr_t addr, size_t size)
    475 {
    476 
    477 	pool_put(&bmempools[buf_mempoolidx(size)], addr);
    478 	pool_reclaim(&bmempools[buf_mempoolidx(size)]);
    479 }
    480 
    481 
    482 static __inline struct buf *
    483 bio_doread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
    484     int async)
    485 {
    486 	struct buf *bp;
    487 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    488 	struct proc *p = l->l_proc;
    489 
    490 	bp = getblk(vp, blkno, size, 0, 0);
    491 
    492 #ifdef DIAGNOSTIC
    493 	if (bp == NULL) {
    494 		panic("bio_doread: no such buf");
    495 	}
    496 #endif
    497 
    498 	/*
    499 	 * If buffer does not have data valid, start a read.
    500 	 * Note that if buffer is B_INVAL, getblk() won't return it.
    501 	 * Therefore, it's valid if its I/O has completed or been delayed.
    502 	 */
    503 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
    504 		/* Start I/O for the buffer. */
    505 		SET(bp->b_flags, B_READ | async);
    506 		VOP_STRATEGY(bp);
    507 
    508 		/* Pay for the read. */
    509 		p->p_stats->p_ru.ru_inblock++;
    510 	} else if (async) {
    511 		brelse(bp);
    512 	}
    513 
    514 	return (bp);
    515 }
    516 
    517 /*
    518  * Read a disk block.
    519  * This algorithm described in Bach (p.54).
    520  */
    521 int
    522 bread(struct vnode *vp, daddr_t blkno, int size, struct ucred *cred,
    523     struct buf **bpp)
    524 {
    525 	struct buf *bp;
    526 
    527 	/* Get buffer for block. */
    528 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    529 
    530 	/* Wait for the read to complete, and return result. */
    531 	return (biowait(bp));
    532 }
    533 
    534 /*
    535  * Read-ahead multiple disk blocks. The first is sync, the rest async.
    536  * Trivial modification to the breada algorithm presented in Bach (p.55).
    537  */
    538 int
    539 breadn(struct vnode *vp, daddr_t blkno, int size, daddr_t *rablks,
    540     int *rasizes, int nrablks, struct ucred *cred, struct buf **bpp)
    541 {
    542 	struct buf *bp;
    543 	int i;
    544 
    545 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    546 
    547 	/*
    548 	 * For each of the read-ahead blocks, start a read, if necessary.
    549 	 */
    550 	for (i = 0; i < nrablks; i++) {
    551 		/* If it's in the cache, just go on to next one. */
    552 		if (incore(vp, rablks[i]))
    553 			continue;
    554 
    555 		/* Get a buffer for the read-ahead block */
    556 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
    557 	}
    558 
    559 	/* Otherwise, we had to start a read for it; wait until it's valid. */
    560 	return (biowait(bp));
    561 }
    562 
    563 /*
    564  * Read with single-block read-ahead.  Defined in Bach (p.55), but
    565  * implemented as a call to breadn().
    566  * XXX for compatibility with old file systems.
    567  */
    568 int
    569 breada(struct vnode *vp, daddr_t blkno, int size, daddr_t rablkno,
    570     int rabsize, struct ucred *cred, struct buf **bpp)
    571 {
    572 
    573 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
    574 }
    575 
    576 /*
    577  * Block write.  Described in Bach (p.56)
    578  */
    579 int
    580 bwrite(struct buf *bp)
    581 {
    582 	int rv, sync, wasdelayed, s;
    583 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    584 	struct proc *p = l->l_proc;
    585 	struct vnode *vp;
    586 	struct mount *mp;
    587 
    588 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    589 
    590 	vp = bp->b_vp;
    591 	if (vp != NULL) {
    592 		if (vp->v_type == VBLK)
    593 			mp = vp->v_specmountpoint;
    594 		else
    595 			mp = vp->v_mount;
    596 	} else {
    597 		mp = NULL;
    598 	}
    599 
    600 	/*
    601 	 * Remember buffer type, to switch on it later.  If the write was
    602 	 * synchronous, but the file system was mounted with MNT_ASYNC,
    603 	 * convert it to a delayed write.
    604 	 * XXX note that this relies on delayed tape writes being converted
    605 	 * to async, not sync writes (which is safe, but ugly).
    606 	 */
    607 	sync = !ISSET(bp->b_flags, B_ASYNC);
    608 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
    609 		bdwrite(bp);
    610 		return (0);
    611 	}
    612 
    613 	/*
    614 	 * Collect statistics on synchronous and asynchronous writes.
    615 	 * Writes to block devices are charged to their associated
    616 	 * filesystem (if any).
    617 	 */
    618 	if (mp != NULL) {
    619 		if (sync)
    620 			mp->mnt_stat.f_syncwrites++;
    621 		else
    622 			mp->mnt_stat.f_asyncwrites++;
    623 	}
    624 
    625 	s = splbio();
    626 	simple_lock(&bp->b_interlock);
    627 
    628 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
    629 
    630 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
    631 
    632 	/*
    633 	 * Pay for the I/O operation and make sure the buf is on the correct
    634 	 * vnode queue.
    635 	 */
    636 	if (wasdelayed)
    637 		reassignbuf(bp, bp->b_vp);
    638 	else
    639 		p->p_stats->p_ru.ru_oublock++;
    640 
    641 	/* Initiate disk write.  Make sure the appropriate party is charged. */
    642 	V_INCR_NUMOUTPUT(bp->b_vp);
    643 	simple_unlock(&bp->b_interlock);
    644 	splx(s);
    645 
    646 	VOP_STRATEGY(bp);
    647 
    648 	if (sync) {
    649 		/* If I/O was synchronous, wait for it to complete. */
    650 		rv = biowait(bp);
    651 
    652 		/* Release the buffer. */
    653 		brelse(bp);
    654 
    655 		return (rv);
    656 	} else {
    657 		return (0);
    658 	}
    659 }
    660 
    661 int
    662 vn_bwrite(void *v)
    663 {
    664 	struct vop_bwrite_args *ap = v;
    665 
    666 	return (bwrite(ap->a_bp));
    667 }
    668 
    669 /*
    670  * Delayed write.
    671  *
    672  * The buffer is marked dirty, but is not queued for I/O.
    673  * This routine should be used when the buffer is expected
    674  * to be modified again soon, typically a small write that
    675  * partially fills a buffer.
    676  *
    677  * NB: magnetic tapes cannot be delayed; they must be
    678  * written in the order that the writes are requested.
    679  *
    680  * Described in Leffler, et al. (pp. 208-213).
    681  */
    682 void
    683 bdwrite(struct buf *bp)
    684 {
    685 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    686 	struct proc *p = l->l_proc;
    687 	const struct bdevsw *bdev;
    688 	int s;
    689 
    690 	/* If this is a tape block, write the block now. */
    691 	bdev = bdevsw_lookup(bp->b_dev);
    692 	if (bdev != NULL && bdev->d_type == D_TAPE) {
    693 		bawrite(bp);
    694 		return;
    695 	}
    696 
    697 	/*
    698 	 * If the block hasn't been seen before:
    699 	 *	(1) Mark it as having been seen,
    700 	 *	(2) Charge for the write,
    701 	 *	(3) Make sure it's on its vnode's correct block list.
    702 	 */
    703 	s = splbio();
    704 	simple_lock(&bp->b_interlock);
    705 
    706 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    707 
    708 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    709 		SET(bp->b_flags, B_DELWRI);
    710 		p->p_stats->p_ru.ru_oublock++;
    711 		reassignbuf(bp, bp->b_vp);
    712 	}
    713 
    714 	/* Otherwise, the "write" is done, so mark and release the buffer. */
    715 	CLR(bp->b_flags, B_DONE);
    716 	simple_unlock(&bp->b_interlock);
    717 	splx(s);
    718 
    719 	brelse(bp);
    720 }
    721 
    722 /*
    723  * Asynchronous block write; just an asynchronous bwrite().
    724  */
    725 void
    726 bawrite(struct buf *bp)
    727 {
    728 	int s;
    729 
    730 	s = splbio();
    731 	simple_lock(&bp->b_interlock);
    732 
    733 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    734 
    735 	SET(bp->b_flags, B_ASYNC);
    736 	simple_unlock(&bp->b_interlock);
    737 	splx(s);
    738 	VOP_BWRITE(bp);
    739 }
    740 
    741 /*
    742  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
    743  * Call at splbio() and with the buffer interlock locked.
    744  * Note: called only from biodone() through ffs softdep's bioops.io_complete()
    745  */
    746 void
    747 bdirty(struct buf *bp)
    748 {
    749 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    750 	struct proc *p = l->l_proc;
    751 
    752 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
    753 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    754 
    755 	CLR(bp->b_flags, B_AGE);
    756 
    757 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    758 		SET(bp->b_flags, B_DELWRI);
    759 		p->p_stats->p_ru.ru_oublock++;
    760 		reassignbuf(bp, bp->b_vp);
    761 	}
    762 }
    763 
    764 /*
    765  * Release a buffer on to the free lists.
    766  * Described in Bach (p. 46).
    767  */
    768 void
    769 brelse(struct buf *bp)
    770 {
    771 	struct bqueues *bufq;
    772 	int s;
    773 
    774 	/* Block disk interrupts. */
    775 	s = splbio();
    776 	simple_lock(&bqueue_slock);
    777 	simple_lock(&bp->b_interlock);
    778 
    779 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    780 	KASSERT(!ISSET(bp->b_flags, B_CALL));
    781 
    782 	/* Wake up any processes waiting for any buffer to become free. */
    783 	if (needbuffer) {
    784 		needbuffer = 0;
    785 		wakeup(&needbuffer);
    786 	}
    787 
    788 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
    789 	if (ISSET(bp->b_flags, B_WANTED)) {
    790 		CLR(bp->b_flags, B_WANTED|B_AGE);
    791 		wakeup(bp);
    792 	}
    793 
    794 	/*
    795 	 * Determine which queue the buffer should be on, then put it there.
    796 	 */
    797 
    798 	/* If it's locked, don't report an error; try again later. */
    799 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
    800 		CLR(bp->b_flags, B_ERROR);
    801 
    802 	/* If it's not cacheable, or an error, mark it invalid. */
    803 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
    804 		SET(bp->b_flags, B_INVAL);
    805 
    806 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    807 		/*
    808 		 * This is a delayed write buffer that was just flushed to
    809 		 * disk.  It is still on the LRU queue.  If it's become
    810 		 * invalid, then we need to move it to a different queue;
    811 		 * otherwise leave it in its current position.
    812 		 */
    813 		CLR(bp->b_flags, B_VFLUSH);
    814 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE)) {
    815 			KDASSERT(!debug_verify_freelist || checkfreelist(bp, &bufqueues[BQ_LRU]));
    816 			goto already_queued;
    817 		} else {
    818 			bremfree(bp);
    819 		}
    820 	}
    821 
    822   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_AGE]));
    823   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LRU]));
    824   KDASSERT(!debug_verify_freelist || !checkfreelist(bp, &bufqueues[BQ_LOCKED]));
    825 
    826 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
    827 		/*
    828 		 * If it's invalid or empty, dissociate it from its vnode
    829 		 * and put on the head of the appropriate queue.
    830 		 */
    831 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    832 			(*bioops.io_deallocate)(bp);
    833 		CLR(bp->b_flags, B_DONE|B_DELWRI);
    834 		if (bp->b_vp) {
    835 			reassignbuf(bp, bp->b_vp);
    836 			brelvp(bp);
    837 		}
    838 		if (bp->b_bufsize <= 0)
    839 			/* no data */
    840 			goto already_queued;
    841 		else
    842 			/* invalid data */
    843 			bufq = &bufqueues[BQ_AGE];
    844 		binsheadfree(bp, bufq);
    845 	} else {
    846 		/*
    847 		 * It has valid data.  Put it on the end of the appropriate
    848 		 * queue, so that it'll stick around for as long as possible.
    849 		 * If buf is AGE, but has dependencies, must put it on last
    850 		 * bufqueue to be scanned, ie LRU. This protects against the
    851 		 * livelock where BQ_AGE only has buffers with dependencies,
    852 		 * and we thus never get to the dependent buffers in BQ_LRU.
    853 		 */
    854 		if (ISSET(bp->b_flags, B_LOCKED))
    855 			/* locked in core */
    856 			bufq = &bufqueues[BQ_LOCKED];
    857 		else if (!ISSET(bp->b_flags, B_AGE))
    858 			/* valid data */
    859 			bufq = &bufqueues[BQ_LRU];
    860 		else {
    861 			/* stale but valid data */
    862 			int has_deps;
    863 
    864 			if (LIST_FIRST(&bp->b_dep) != NULL &&
    865 			    bioops.io_countdeps)
    866 				has_deps = (*bioops.io_countdeps)(bp, 0);
    867 			else
    868 				has_deps = 0;
    869 			bufq = has_deps ? &bufqueues[BQ_LRU] :
    870 			    &bufqueues[BQ_AGE];
    871 		}
    872 		binstailfree(bp, bufq);
    873 	}
    874 
    875 already_queued:
    876 	/* Unlock the buffer. */
    877 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
    878 	SET(bp->b_flags, B_CACHE);
    879 
    880 	/* Allow disk interrupts. */
    881 	simple_unlock(&bp->b_interlock);
    882 	simple_unlock(&bqueue_slock);
    883 	if (bp->b_bufsize <= 0) {
    884 #ifdef DEBUG
    885 		memset((char *)bp, 0, sizeof(*bp));
    886 #endif
    887 		pool_put(&bufpool, bp);
    888 	}
    889 	splx(s);
    890 }
    891 
    892 /*
    893  * Determine if a block is in the cache.
    894  * Just look on what would be its hash chain.  If it's there, return
    895  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
    896  * we normally don't return the buffer, unless the caller explicitly
    897  * wants us to.
    898  */
    899 struct buf *
    900 incore(struct vnode *vp, daddr_t blkno)
    901 {
    902 	struct buf *bp;
    903 
    904 	/* Search hash chain */
    905 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
    906 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
    907 		    !ISSET(bp->b_flags, B_INVAL))
    908 		return (bp);
    909 	}
    910 
    911 	return (NULL);
    912 }
    913 
    914 /*
    915  * Get a block of requested size that is associated with
    916  * a given vnode and block offset. If it is found in the
    917  * block cache, mark it as having been found, make it busy
    918  * and return it. Otherwise, return an empty block of the
    919  * correct size. It is up to the caller to insure that the
    920  * cached blocks be of the correct size.
    921  */
    922 struct buf *
    923 getblk(struct vnode *vp, daddr_t blkno, int size, int slpflag, int slptimeo)
    924 {
    925 	struct buf *bp;
    926 	int s, err;
    927 	int preserve;
    928 
    929 start:
    930 	s = splbio();
    931 	simple_lock(&bqueue_slock);
    932 	bp = incore(vp, blkno);
    933 	if (bp != NULL) {
    934 		simple_lock(&bp->b_interlock);
    935 		if (ISSET(bp->b_flags, B_BUSY)) {
    936 			simple_unlock(&bqueue_slock);
    937 			if (curproc == uvm.pagedaemon_proc) {
    938 				simple_unlock(&bp->b_interlock);
    939 				splx(s);
    940 				return NULL;
    941 			}
    942 			SET(bp->b_flags, B_WANTED);
    943 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
    944 					"getblk", slptimeo, &bp->b_interlock);
    945 			splx(s);
    946 			if (err)
    947 				return (NULL);
    948 			goto start;
    949 		}
    950 #ifdef DIAGNOSTIC
    951 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
    952 		    bp->b_bcount < size && vp->v_type != VBLK)
    953 			panic("getblk: block size invariant failed");
    954 #endif
    955 		SET(bp->b_flags, B_BUSY);
    956 		bremfree(bp);
    957 		preserve = 1;
    958 	} else {
    959 		if ((bp = getnewbuf(slpflag, slptimeo, 0)) == NULL) {
    960 			simple_unlock(&bqueue_slock);
    961 			splx(s);
    962 			goto start;
    963 		}
    964 
    965 		binshash(bp, BUFHASH(vp, blkno));
    966 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
    967 		bgetvp(vp, bp);
    968 		preserve = 0;
    969 	}
    970 	simple_unlock(&bp->b_interlock);
    971 	simple_unlock(&bqueue_slock);
    972 	splx(s);
    973 	/*
    974 	 * LFS can't track total size of B_LOCKED buffer (locked_queue_bytes)
    975 	 * if we re-size buffers here.
    976 	 */
    977 	if (ISSET(bp->b_flags, B_LOCKED)) {
    978 		KASSERT(bp->b_bufsize >= size);
    979 	} else {
    980 		allocbuf(bp, size, preserve);
    981 	}
    982 	return (bp);
    983 }
    984 
    985 /*
    986  * Get an empty, disassociated buffer of given size.
    987  */
    988 struct buf *
    989 geteblk(int size)
    990 {
    991 	struct buf *bp;
    992 	int s;
    993 
    994 	s = splbio();
    995 	simple_lock(&bqueue_slock);
    996 	while ((bp = getnewbuf(0, 0, 0)) == 0)
    997 		;
    998 
    999 	SET(bp->b_flags, B_INVAL);
   1000 	binshash(bp, &invalhash);
   1001 	simple_unlock(&bqueue_slock);
   1002 	simple_unlock(&bp->b_interlock);
   1003 	splx(s);
   1004 	allocbuf(bp, size, 0);
   1005 	return (bp);
   1006 }
   1007 
   1008 /*
   1009  * Expand or contract the actual memory allocated to a buffer.
   1010  *
   1011  * If the buffer shrinks, data is lost, so it's up to the
   1012  * caller to have written it out *first*; this routine will not
   1013  * start a write.  If the buffer grows, it's the callers
   1014  * responsibility to fill out the buffer's additional contents.
   1015  */
   1016 void
   1017 allocbuf(struct buf *bp, int size, int preserve)
   1018 {
   1019 	vsize_t oldsize, desired_size;
   1020 	caddr_t addr;
   1021 	int s, delta;
   1022 
   1023 	desired_size = buf_roundsize(size);
   1024 	if (desired_size > MAXBSIZE)
   1025 		printf("allocbuf: buffer larger than MAXBSIZE requested");
   1026 
   1027 	bp->b_bcount = size;
   1028 
   1029 	oldsize = bp->b_bufsize;
   1030 	if (oldsize == desired_size)
   1031 		return;
   1032 
   1033 	/*
   1034 	 * If we want a buffer of a different size, re-allocate the
   1035 	 * buffer's memory; copy old content only if needed.
   1036 	 */
   1037 	addr = buf_malloc(desired_size);
   1038 	if (preserve)
   1039 		memcpy(addr, bp->b_data, MIN(oldsize,desired_size));
   1040 	if (bp->b_data != NULL)
   1041 		buf_mrelease(bp->b_data, oldsize);
   1042 	bp->b_data = addr;
   1043 	bp->b_bufsize = desired_size;
   1044 
   1045 	/*
   1046 	 * Update overall buffer memory counter (protected by bqueue_slock)
   1047 	 */
   1048 	delta = (long)desired_size - (long)oldsize;
   1049 
   1050 	s = splbio();
   1051 	simple_lock(&bqueue_slock);
   1052 	if ((bufmem += delta) > bufmem_hiwater) {
   1053 		/*
   1054 		 * Need to trim overall memory usage.
   1055 		 */
   1056 		while (buf_canrelease()) {
   1057 			if (buf_trim() == 0)
   1058 				break;
   1059 		}
   1060 	}
   1061 
   1062 	simple_unlock(&bqueue_slock);
   1063 	splx(s);
   1064 }
   1065 
   1066 /*
   1067  * Find a buffer which is available for use.
   1068  * Select something from a free list.
   1069  * Preference is to AGE list, then LRU list.
   1070  *
   1071  * Called at splbio and with buffer queues locked.
   1072  * Return buffer locked.
   1073  */
   1074 struct buf *
   1075 getnewbuf(int slpflag, int slptimeo, int from_bufq)
   1076 {
   1077 	struct buf *bp;
   1078 
   1079 start:
   1080 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
   1081 
   1082 	/*
   1083 	 * Get a new buffer from the pool; but use NOWAIT because
   1084 	 * we have the buffer queues locked.
   1085 	 */
   1086 	if (buf_lotsfree() && !from_bufq &&
   1087 	    (bp = pool_get(&bufpool, PR_NOWAIT)) != NULL) {
   1088 		memset((char *)bp, 0, sizeof(*bp));
   1089 		BUF_INIT(bp);
   1090 		bp->b_dev = NODEV;
   1091 		bp->b_vnbufs.le_next = NOLIST;
   1092 		bp->b_flags = B_BUSY;
   1093 		simple_lock(&bp->b_interlock);
   1094 		return (bp);
   1095 	}
   1096 
   1097 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
   1098 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
   1099 		simple_lock(&bp->b_interlock);
   1100 		bremfree(bp);
   1101 	} else {
   1102 		/* wait for a free buffer of any kind */
   1103 		needbuffer = 1;
   1104 		ltsleep(&needbuffer, slpflag|(PRIBIO+1),
   1105 			"getnewbuf", slptimeo, &bqueue_slock);
   1106 		return (NULL);
   1107 	}
   1108 
   1109 #ifdef DIAGNOSTIC
   1110 	if (bp->b_bufsize <= 0)
   1111 		panic("buffer %p: on queue but empty", bp);
   1112 #endif
   1113 
   1114 	if (ISSET(bp->b_flags, B_VFLUSH)) {
   1115 		/*
   1116 		 * This is a delayed write buffer being flushed to disk.  Make
   1117 		 * sure it gets aged out of the queue when it's finished, and
   1118 		 * leave it off the LRU queue.
   1119 		 */
   1120 		CLR(bp->b_flags, B_VFLUSH);
   1121 		SET(bp->b_flags, B_AGE);
   1122 		simple_unlock(&bp->b_interlock);
   1123 		goto start;
   1124 	}
   1125 
   1126 	/* Buffer is no longer on free lists. */
   1127 	SET(bp->b_flags, B_BUSY);
   1128 
   1129 	/*
   1130 	 * If buffer was a delayed write, start it and return NULL
   1131 	 * (since we might sleep while starting the write).
   1132 	 */
   1133 	if (ISSET(bp->b_flags, B_DELWRI)) {
   1134 		/*
   1135 		 * This buffer has gone through the LRU, so make sure it gets
   1136 		 * reused ASAP.
   1137 		 */
   1138 		SET(bp->b_flags, B_AGE);
   1139 		simple_unlock(&bp->b_interlock);
   1140 		simple_unlock(&bqueue_slock);
   1141 		bawrite(bp);
   1142 		simple_lock(&bqueue_slock);
   1143 		return (NULL);
   1144 	}
   1145 
   1146 	/* disassociate us from our vnode, if we had one... */
   1147 	if (bp->b_vp)
   1148 		brelvp(bp);
   1149 
   1150 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
   1151 		(*bioops.io_deallocate)(bp);
   1152 
   1153 	/* clear out various other fields */
   1154 	bp->b_flags = B_BUSY;
   1155 	bp->b_dev = NODEV;
   1156 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
   1157 	bp->b_iodone = 0;
   1158 	bp->b_error = 0;
   1159 	bp->b_resid = 0;
   1160 	bp->b_bcount = 0;
   1161 
   1162 	bremhash(bp);
   1163 	return (bp);
   1164 }
   1165 
   1166 /*
   1167  * Attempt to free an aged buffer off the queues.
   1168  * Called at splbio and with queue lock held.
   1169  * Returns the amount of buffer memory freed.
   1170  */
   1171 int
   1172 buf_trim(void)
   1173 {
   1174 	struct buf *bp;
   1175 	long size = 0;
   1176 	int wanted;
   1177 
   1178 	/* Instruct getnewbuf() to get buffers off the queues */
   1179 	if ((bp = getnewbuf(PCATCH, 1, 1)) == NULL)
   1180 		return 0;
   1181 
   1182 	wanted = ISSET(bp->b_flags, B_WANTED);
   1183 	simple_unlock(&bp->b_interlock);
   1184 	if (wanted) {
   1185 		printf("buftrim: got WANTED buffer\n");
   1186 		SET(bp->b_flags, B_INVAL);
   1187 		binshash(bp, &invalhash);
   1188 		simple_unlock(&bqueue_slock);
   1189 		goto out;
   1190 	}
   1191 	size = bp->b_bufsize;
   1192 	bufmem -= size;
   1193 	simple_unlock(&bqueue_slock);
   1194 	if (size > 0) {
   1195 		buf_mrelease(bp->b_data, size);
   1196 		bp->b_bcount = bp->b_bufsize = 0;
   1197 	}
   1198 
   1199 out:
   1200 	/* brelse() will return the buffer to the global buffer pool */
   1201 	brelse(bp);
   1202 	simple_lock(&bqueue_slock);
   1203 	return size;
   1204 }
   1205 
   1206 int
   1207 buf_drain(int n)
   1208 {
   1209 	int s, size = 0;
   1210 
   1211 	/* If not asked for a specific amount, make our own estimate */
   1212 	if (n == 0)
   1213 		n = buf_canrelease();
   1214 
   1215 	s = splbio();
   1216 	simple_lock(&bqueue_slock);
   1217 	while (n-- > 0 && bufmem > bufmem_lowater)
   1218 		size += buf_trim();
   1219 	simple_unlock(&bqueue_slock);
   1220 	splx(s);
   1221 	return size;
   1222 }
   1223 
   1224 /*
   1225  * Wait for operations on the buffer to complete.
   1226  * When they do, extract and return the I/O's error value.
   1227  */
   1228 int
   1229 biowait(struct buf *bp)
   1230 {
   1231 	int s, error;
   1232 
   1233 	s = splbio();
   1234 	simple_lock(&bp->b_interlock);
   1235 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
   1236 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
   1237 
   1238 	/* check for interruption of I/O (e.g. via NFS), then errors. */
   1239 	if (ISSET(bp->b_flags, B_EINTR)) {
   1240 		CLR(bp->b_flags, B_EINTR);
   1241 		error = EINTR;
   1242 	} else if (ISSET(bp->b_flags, B_ERROR))
   1243 		error = bp->b_error ? bp->b_error : EIO;
   1244 	else
   1245 		error = 0;
   1246 
   1247 	simple_unlock(&bp->b_interlock);
   1248 	splx(s);
   1249 	return (error);
   1250 }
   1251 
   1252 /*
   1253  * Mark I/O complete on a buffer.
   1254  *
   1255  * If a callback has been requested, e.g. the pageout
   1256  * daemon, do so. Otherwise, awaken waiting processes.
   1257  *
   1258  * [ Leffler, et al., says on p.247:
   1259  *	"This routine wakes up the blocked process, frees the buffer
   1260  *	for an asynchronous write, or, for a request by the pagedaemon
   1261  *	process, invokes a procedure specified in the buffer structure" ]
   1262  *
   1263  * In real life, the pagedaemon (or other system processes) wants
   1264  * to do async stuff to, and doesn't want the buffer brelse()'d.
   1265  * (for swap pager, that puts swap buffers on the free lists (!!!),
   1266  * for the vn device, that puts malloc'd buffers on the free lists!)
   1267  */
   1268 void
   1269 biodone(struct buf *bp)
   1270 {
   1271 	int s = splbio();
   1272 
   1273 	simple_lock(&bp->b_interlock);
   1274 	if (ISSET(bp->b_flags, B_DONE))
   1275 		panic("biodone already");
   1276 	SET(bp->b_flags, B_DONE);		/* note that it's done */
   1277 
   1278 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
   1279 		(*bioops.io_complete)(bp);
   1280 
   1281 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
   1282 		vwakeup(bp);
   1283 
   1284 	/*
   1285 	 * If necessary, call out.  Unlock the buffer before calling
   1286 	 * iodone() as the buffer isn't valid any more when it return.
   1287 	 */
   1288 	if (ISSET(bp->b_flags, B_CALL)) {
   1289 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
   1290 		simple_unlock(&bp->b_interlock);
   1291 		(*bp->b_iodone)(bp);
   1292 	} else {
   1293 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
   1294 			simple_unlock(&bp->b_interlock);
   1295 			brelse(bp);
   1296 		} else {			/* or just wakeup the buffer */
   1297 			CLR(bp->b_flags, B_WANTED);
   1298 			wakeup(bp);
   1299 			simple_unlock(&bp->b_interlock);
   1300 		}
   1301 	}
   1302 
   1303 	splx(s);
   1304 }
   1305 
   1306 /*
   1307  * Return a count of buffers on the "locked" queue.
   1308  */
   1309 int
   1310 count_lock_queue(void)
   1311 {
   1312 	struct buf *bp;
   1313 	int n = 0;
   1314 
   1315 	simple_lock(&bqueue_slock);
   1316 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
   1317 		n++;
   1318 	simple_unlock(&bqueue_slock);
   1319 	return (n);
   1320 }
   1321 
   1322 /*
   1323  * Wait for all buffers to complete I/O
   1324  * Return the number of "stuck" buffers.
   1325  */
   1326 int
   1327 buf_syncwait(void)
   1328 {
   1329 	struct buf *bp;
   1330 	int iter, nbusy, nbusy_prev = 0, dcount, s, ihash;
   1331 
   1332 	dcount = 10000;
   1333 	for (iter = 0; iter < 20;) {
   1334 		s = splbio();
   1335 		simple_lock(&bqueue_slock);
   1336 		nbusy = 0;
   1337 		for (ihash = 0; ihash < bufhash+1; ihash++) {
   1338 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
   1339 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
   1340 				nbusy++;
   1341 			/*
   1342 			 * With soft updates, some buffers that are
   1343 			 * written will be remarked as dirty until other
   1344 			 * buffers are written.
   1345 			 */
   1346 			if (bp->b_vp && bp->b_vp->v_mount
   1347 			    && (bp->b_vp->v_mount->mnt_flag & MNT_SOFTDEP)
   1348 			    && (bp->b_flags & B_DELWRI)) {
   1349 				simple_lock(&bp->b_interlock);
   1350 				bremfree(bp);
   1351 				bp->b_flags |= B_BUSY;
   1352 				nbusy++;
   1353 				simple_unlock(&bp->b_interlock);
   1354 				simple_unlock(&bqueue_slock);
   1355 				bawrite(bp);
   1356 				if (dcount-- <= 0) {
   1357 					printf("softdep ");
   1358 					goto fail;
   1359 				}
   1360 				simple_lock(&bqueue_slock);
   1361 			}
   1362 		    }
   1363 		}
   1364 
   1365 		simple_unlock(&bqueue_slock);
   1366 		splx(s);
   1367 
   1368 		if (nbusy == 0)
   1369 			break;
   1370 		if (nbusy_prev == 0)
   1371 			nbusy_prev = nbusy;
   1372 		printf("%d ", nbusy);
   1373 		tsleep(&nbusy, PRIBIO, "bflush",
   1374 		    (iter == 0) ? 1 : hz / 25 * iter);
   1375 		if (nbusy >= nbusy_prev) /* we didn't flush anything */
   1376 			iter++;
   1377 		else
   1378 			nbusy_prev = nbusy;
   1379 	}
   1380 
   1381 	if (nbusy) {
   1382 fail:;
   1383 #if defined(DEBUG) || defined(DEBUG_HALT_BUSY)
   1384 		printf("giving up\nPrinting vnodes for busy buffers\n");
   1385 		for (ihash = 0; ihash < bufhash+1; ihash++) {
   1386 		    LIST_FOREACH(bp, &bufhashtbl[ihash], b_hash) {
   1387 			if ((bp->b_flags & (B_BUSY|B_INVAL|B_READ)) == B_BUSY)
   1388 				vprint(NULL, bp->b_vp);
   1389 		    }
   1390 		}
   1391 #endif
   1392 	}
   1393 
   1394 	return nbusy;
   1395 }
   1396 
   1397 #define KERN_BUFSLOP 20
   1398 static int
   1399 sysctl_dobuf(SYSCTLFN_ARGS)
   1400 {
   1401 	struct buf *bp;
   1402 	char *dp;
   1403 	u_int i, elem_size;
   1404 	size_t len, buflen, needed;
   1405 	int error, s;
   1406 
   1407 	dp = oldp;
   1408 	len = buflen = oldp != NULL ? *oldlenp : 0;
   1409 	error = 0;
   1410 	needed = 0;
   1411 	elem_size = sizeof(struct buf);
   1412 
   1413 	s = splbio();
   1414 	simple_lock(&bqueue_slock);
   1415 	for (i = 0; i < BQUEUES; i++) {
   1416 		TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
   1417 			if (len >= sizeof(elem_size)) {
   1418 				error = copyout(bp, dp, elem_size);
   1419 				if (error)
   1420 					goto cleanup;
   1421 				dp += elem_size;
   1422 				len -= elem_size;
   1423 			}
   1424 			needed += elem_size;
   1425 		}
   1426 	}
   1427 cleanup:
   1428 	simple_unlock(&bqueue_slock);
   1429 	splx(s);
   1430 
   1431 	if (oldp != NULL) {
   1432 		*oldlenp = (char *)dp - (char *)oldp;
   1433 		if (needed > *oldlenp)
   1434 			error = ENOMEM;
   1435 	} else {
   1436 		needed += KERN_BUFSLOP;
   1437 		*oldlenp = needed;
   1438 	}
   1439 
   1440 	return (error);
   1441 }
   1442 
   1443 static int sysctlnum_bufcache, sysctlnum_bufmemhiwater, sysctlnum_bufmemlowater;
   1444 
   1445 static int
   1446 sysctl_bufvm_update(SYSCTLFN_ARGS)
   1447 {
   1448 	int t, error;
   1449 	struct sysctlnode node;
   1450 
   1451 	node = *rnode;
   1452 	node.sysctl_data = &t;
   1453 	t = *(int*)rnode->sysctl_data;
   1454 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
   1455 	if (error || newp == NULL)
   1456 		return (error);
   1457 
   1458 	if (rnode->sysctl_num == sysctlnum_bufcache) {
   1459 		if (t < 0 || t > 100)
   1460 			return (EINVAL);
   1461 		bufcache = t;
   1462 		bufmem_hiwater = buf_memcalc();
   1463 		bufmem_lowater = (bufmem_hiwater >> 4);
   1464 	} else if (rnode->sysctl_num == sysctlnum_bufmemlowater) {
   1465 		bufmem_lowater = t;
   1466 	} else if (rnode->sysctl_num == sysctlnum_bufmemhiwater) {
   1467 		bufmem_hiwater = t;
   1468 	} else
   1469 		return (EINVAL);
   1470 
   1471 	/* Drain until below new high water mark */
   1472 	while ((t = bufmem - bufmem_hiwater) >= 0) {
   1473 		if (buf_drain(t / (2*1024)) <= 0)
   1474 			break;
   1475 	}
   1476 
   1477 	return 0;
   1478 }
   1479 
   1480 SYSCTL_SETUP(sysctl_kern_buf_setup, "sysctl kern.buf subtree setup")
   1481 {
   1482 
   1483 	sysctl_createv(SYSCTL_PERMANENT,
   1484 		       CTLTYPE_NODE, "kern", NULL,
   1485 		       NULL, 0, NULL, 0,
   1486 		       CTL_KERN, CTL_EOL);
   1487 	sysctl_createv(SYSCTL_PERMANENT,
   1488 		       CTLTYPE_NODE, "buf", NULL,
   1489 		       sysctl_dobuf, 0, NULL, 0,
   1490 		       CTL_KERN, KERN_BUF, CTL_EOL);
   1491 }
   1492 
   1493 SYSCTL_SETUP(sysctl_vm_buf_setup, "sysctl vm.buf* subtree setup")
   1494 {
   1495 	struct sysctlnode *rnode;
   1496 
   1497 	sysctl_createv(SYSCTL_PERMANENT,
   1498 		       CTLTYPE_NODE, "vm", NULL,
   1499 		       NULL, 0, NULL, 0,
   1500 		       CTL_VM, CTL_EOL);
   1501 
   1502 	rnode = NULL;
   1503 	if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
   1504 			   CTLTYPE_INT, "bufcache", &rnode,
   1505 			   sysctl_bufvm_update, 0, &bufcache, 0,
   1506 			   CTL_VM, CTL_CREATE, CTL_EOL) == 0)
   1507 		sysctlnum_bufcache = rnode->sysctl_num;
   1508 
   1509 	rnode = NULL;
   1510 	if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
   1511 			   CTLTYPE_INT, "bufmem_lowater", &rnode,
   1512 			   sysctl_bufvm_update, 0, &bufmem_lowater, 0,
   1513 			   CTL_VM, CTL_CREATE, CTL_EOL) == 0)
   1514 		sysctlnum_bufmemlowater = rnode->sysctl_num;
   1515 
   1516 	rnode = NULL;
   1517 	if (sysctl_createv(SYSCTL_PERMANENT|SYSCTL_READWRITE,
   1518 			   CTLTYPE_INT, "bufmem_hiwater", &rnode,
   1519 			   sysctl_bufvm_update, 0, &bufmem_hiwater, 0,
   1520 			   CTL_VM, CTL_CREATE, CTL_EOL) == 0)
   1521 		sysctlnum_bufmemhiwater = rnode->sysctl_num;
   1522 }
   1523 
   1524 #ifdef DEBUG
   1525 /*
   1526  * Print out statistics on the current allocation of the buffer pool.
   1527  * Can be enabled to print out on every ``sync'' by setting "syncprt"
   1528  * in vfs_syscalls.c using sysctl.
   1529  */
   1530 void
   1531 vfs_bufstats(void)
   1532 {
   1533 	int s, i, j, count;
   1534 	struct buf *bp;
   1535 	struct bqueues *dp;
   1536 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
   1537 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE" };
   1538 
   1539 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
   1540 		count = 0;
   1541 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
   1542 			counts[j] = 0;
   1543 		s = splbio();
   1544 		TAILQ_FOREACH(bp, dp, b_freelist) {
   1545 			counts[bp->b_bufsize/PAGE_SIZE]++;
   1546 			count++;
   1547 		}
   1548 		splx(s);
   1549 		printf("%s: total-%d", bname[i], count);
   1550 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
   1551 			if (counts[j] != 0)
   1552 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
   1553 		printf("\n");
   1554 	}
   1555 }
   1556 #endif /* DEBUG */
   1557