Home | History | Annotate | Line # | Download | only in kern
vfs_bio.c revision 1.94
      1  1.94      yamt /*	$NetBSD: vfs_bio.c,v 1.94 2003/09/07 11:57:43 yamt Exp $	*/
      2  1.31       cgd 
      3  1.31       cgd /*-
      4  1.31       cgd  * Copyright (c) 1982, 1986, 1989, 1993
      5  1.31       cgd  *	The Regents of the University of California.  All rights reserved.
      6  1.31       cgd  * (c) UNIX System Laboratories, Inc.
      7  1.31       cgd  * All or some portions of this file are derived from material licensed
      8  1.31       cgd  * to the University of California by American Telephone and Telegraph
      9  1.31       cgd  * Co. or Unix System Laboratories, Inc. and are reproduced herein with
     10  1.31       cgd  * the permission of UNIX System Laboratories, Inc.
     11  1.31       cgd  *
     12  1.31       cgd  * Redistribution and use in source and binary forms, with or without
     13  1.31       cgd  * modification, are permitted provided that the following conditions
     14  1.31       cgd  * are met:
     15  1.31       cgd  * 1. Redistributions of source code must retain the above copyright
     16  1.31       cgd  *    notice, this list of conditions and the following disclaimer.
     17  1.31       cgd  * 2. Redistributions in binary form must reproduce the above copyright
     18  1.31       cgd  *    notice, this list of conditions and the following disclaimer in the
     19  1.31       cgd  *    documentation and/or other materials provided with the distribution.
     20  1.93       agc  * 3. Neither the name of the University nor the names of its contributors
     21  1.93       agc  *    may be used to endorse or promote products derived from this software
     22  1.93       agc  *    without specific prior written permission.
     23  1.93       agc  *
     24  1.93       agc  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25  1.93       agc  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26  1.93       agc  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27  1.93       agc  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28  1.93       agc  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29  1.93       agc  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30  1.93       agc  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31  1.93       agc  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32  1.93       agc  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33  1.93       agc  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34  1.93       agc  * SUCH DAMAGE.
     35  1.93       agc  *
     36  1.93       agc  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     37  1.93       agc  */
     38  1.93       agc 
     39  1.93       agc /*-
     40  1.93       agc  * Copyright (c) 1994 Christopher G. Demetriou
     41  1.93       agc  *
     42  1.93       agc  * Redistribution and use in source and binary forms, with or without
     43  1.93       agc  * modification, are permitted provided that the following conditions
     44  1.93       agc  * are met:
     45  1.93       agc  * 1. Redistributions of source code must retain the above copyright
     46  1.93       agc  *    notice, this list of conditions and the following disclaimer.
     47  1.93       agc  * 2. Redistributions in binary form must reproduce the above copyright
     48  1.93       agc  *    notice, this list of conditions and the following disclaimer in the
     49  1.93       agc  *    documentation and/or other materials provided with the distribution.
     50  1.31       cgd  * 3. All advertising materials mentioning features or use of this software
     51  1.31       cgd  *    must display the following acknowledgement:
     52  1.31       cgd  *	This product includes software developed by the University of
     53  1.31       cgd  *	California, Berkeley and its contributors.
     54  1.31       cgd  * 4. Neither the name of the University nor the names of its contributors
     55  1.31       cgd  *    may be used to endorse or promote products derived from this software
     56  1.31       cgd  *    without specific prior written permission.
     57  1.31       cgd  *
     58  1.31       cgd  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     59  1.31       cgd  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     60  1.31       cgd  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     61  1.31       cgd  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     62  1.31       cgd  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     63  1.31       cgd  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     64  1.31       cgd  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     65  1.31       cgd  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     66  1.31       cgd  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     67  1.31       cgd  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     68  1.31       cgd  * SUCH DAMAGE.
     69  1.31       cgd  *
     70  1.31       cgd  *	@(#)vfs_bio.c	8.6 (Berkeley) 1/11/94
     71  1.31       cgd  */
     72  1.31       cgd 
     73  1.31       cgd /*
     74  1.31       cgd  * Some references:
     75  1.31       cgd  *	Bach: The Design of the UNIX Operating System (Prentice Hall, 1986)
     76  1.31       cgd  *	Leffler, et al.: The Design and Implementation of the 4.3BSD
     77  1.31       cgd  *		UNIX Operating System (Addison Welley, 1989)
     78  1.31       cgd  */
     79  1.77     lukem 
     80  1.81      matt #include "opt_softdep.h"
     81  1.81      matt 
     82  1.77     lukem #include <sys/cdefs.h>
     83  1.94      yamt __KERNEL_RCSID(0, "$NetBSD: vfs_bio.c,v 1.94 2003/09/07 11:57:43 yamt Exp $");
     84  1.31       cgd 
     85  1.31       cgd #include <sys/param.h>
     86  1.31       cgd #include <sys/systm.h>
     87  1.31       cgd #include <sys/proc.h>
     88  1.31       cgd #include <sys/buf.h>
     89  1.31       cgd #include <sys/vnode.h>
     90  1.31       cgd #include <sys/mount.h>
     91  1.31       cgd #include <sys/malloc.h>
     92  1.31       cgd #include <sys/resourcevar.h>
     93  1.35   mycroft #include <sys/conf.h>
     94  1.40  christos 
     95  1.73       chs #include <uvm/uvm.h>
     96  1.71   thorpej 
     97  1.59      fvdl #include <miscfs/specfs/specdev.h>
     98  1.59      fvdl 
     99  1.31       cgd /* Macros to clear/set/test flags. */
    100  1.31       cgd #define	SET(t, f)	(t) |= (f)
    101  1.31       cgd #define	CLR(t, f)	(t) &= ~(f)
    102  1.31       cgd #define	ISSET(t, f)	((t) & (f))
    103  1.31       cgd 
    104  1.31       cgd /*
    105  1.31       cgd  * Definitions for the buffer hash lists.
    106  1.31       cgd  */
    107  1.31       cgd #define	BUFHASH(dvp, lbn)	\
    108  1.73       chs 	(&bufhashtbl[(((long)(dvp) >> 8) + (int)(lbn)) & bufhash])
    109  1.31       cgd LIST_HEAD(bufhashhdr, buf) *bufhashtbl, invalhash;
    110  1.31       cgd u_long	bufhash;
    111  1.81      matt #ifndef SOFTDEP
    112  1.59      fvdl struct bio_ops bioops;	/* I/O operation notification */
    113  1.81      matt #endif
    114  1.31       cgd 
    115  1.31       cgd /*
    116  1.31       cgd  * Insq/Remq for the buffer hash lists.
    117  1.31       cgd  */
    118  1.31       cgd #define	binshash(bp, dp)	LIST_INSERT_HEAD(dp, bp, b_hash)
    119  1.31       cgd #define	bremhash(bp)		LIST_REMOVE(bp, b_hash)
    120  1.31       cgd 
    121  1.31       cgd /*
    122  1.31       cgd  * Definitions for the buffer free lists.
    123  1.31       cgd  */
    124  1.31       cgd #define	BQUEUES		4		/* number of free buffer queues */
    125  1.31       cgd 
    126  1.31       cgd #define	BQ_LOCKED	0		/* super-blocks &c */
    127  1.31       cgd #define	BQ_LRU		1		/* lru, useful buffers */
    128  1.31       cgd #define	BQ_AGE		2		/* rubbish */
    129  1.31       cgd #define	BQ_EMPTY	3		/* buffer headers with no memory */
    130  1.31       cgd 
    131  1.31       cgd TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
    132  1.31       cgd int needbuffer;
    133  1.31       cgd 
    134  1.31       cgd /*
    135  1.87        pk  * Buffer queue lock.
    136  1.87        pk  * Take this lock first if also taking some buffer's b_interlock.
    137  1.87        pk  */
    138  1.87        pk struct simplelock bqueue_slock = SIMPLELOCK_INITIALIZER;
    139  1.87        pk 
    140  1.87        pk /*
    141  1.65   thorpej  * Buffer pool for I/O buffers.
    142  1.65   thorpej  */
    143  1.65   thorpej struct pool bufpool;
    144  1.65   thorpej 
    145  1.65   thorpej /*
    146  1.87        pk  * bread()/breadn() helper.
    147  1.87        pk  */
    148  1.87        pk static __inline struct buf *bio_doread(struct vnode *, daddr_t, int,
    149  1.87        pk 					struct ucred *, int);
    150  1.87        pk int count_lock_queue(void);
    151  1.87        pk 
    152  1.87        pk /*
    153  1.31       cgd  * Insq/Remq for the buffer free lists.
    154  1.87        pk  * Call with buffer queue locked.
    155  1.31       cgd  */
    156  1.31       cgd #define	binsheadfree(bp, dp)	TAILQ_INSERT_HEAD(dp, bp, b_freelist)
    157  1.31       cgd #define	binstailfree(bp, dp)	TAILQ_INSERT_TAIL(dp, bp, b_freelist)
    158  1.31       cgd 
    159  1.31       cgd void
    160  1.31       cgd bremfree(bp)
    161  1.31       cgd 	struct buf *bp;
    162  1.31       cgd {
    163  1.31       cgd 	struct bqueues *dp = NULL;
    164  1.94      yamt 
    165  1.94      yamt 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
    166  1.31       cgd 
    167  1.31       cgd 	/*
    168  1.31       cgd 	 * We only calculate the head of the freelist when removing
    169  1.31       cgd 	 * the last element of the list as that is the only time that
    170  1.31       cgd 	 * it is needed (e.g. to reset the tail pointer).
    171  1.31       cgd 	 *
    172  1.31       cgd 	 * NB: This makes an assumption about how tailq's are implemented.
    173  1.31       cgd 	 */
    174  1.84      matt 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
    175  1.31       cgd 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    176  1.31       cgd 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    177  1.31       cgd 				break;
    178  1.31       cgd 		if (dp == &bufqueues[BQUEUES])
    179  1.31       cgd 			panic("bremfree: lost tail");
    180  1.31       cgd 	}
    181  1.31       cgd 	TAILQ_REMOVE(dp, bp, b_freelist);
    182  1.31       cgd }
    183  1.31       cgd 
    184  1.31       cgd /*
    185  1.31       cgd  * Initialize buffers and hash links for buffers.
    186  1.31       cgd  */
    187  1.31       cgd void
    188  1.31       cgd bufinit()
    189  1.31       cgd {
    190  1.66  augustss 	struct buf *bp;
    191  1.31       cgd 	struct bqueues *dp;
    192  1.82   thorpej 	u_int i, base, residual;
    193  1.65   thorpej 
    194  1.65   thorpej 	/*
    195  1.65   thorpej 	 * Initialize the buffer pool.  This pool is used for buffers
    196  1.65   thorpej 	 * which are strictly I/O control blocks, not buffer cache
    197  1.65   thorpej 	 * buffers.
    198  1.65   thorpej 	 */
    199  1.79   thorpej 	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
    200  1.31       cgd 
    201  1.31       cgd 	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    202  1.31       cgd 		TAILQ_INIT(dp);
    203  1.70        ad 	bufhashtbl = hashinit(nbuf, HASH_LIST, M_CACHE, M_WAITOK, &bufhash);
    204  1.31       cgd 	base = bufpages / nbuf;
    205  1.31       cgd 	residual = bufpages % nbuf;
    206  1.31       cgd 	for (i = 0; i < nbuf; i++) {
    207  1.31       cgd 		bp = &buf[i];
    208  1.55     perry 		memset((char *)bp, 0, sizeof(*bp));
    209  1.91   thorpej 		BUF_INIT(bp);
    210  1.31       cgd 		bp->b_dev = NODEV;
    211  1.31       cgd 		bp->b_vnbufs.le_next = NOLIST;
    212  1.31       cgd 		bp->b_data = buffers + i * MAXBSIZE;
    213  1.31       cgd 		if (i < residual)
    214  1.71   thorpej 			bp->b_bufsize = (base + 1) * PAGE_SIZE;
    215  1.31       cgd 		else
    216  1.71   thorpej 			bp->b_bufsize = base * PAGE_SIZE;
    217  1.31       cgd 		bp->b_flags = B_INVAL;
    218  1.31       cgd 		dp = bp->b_bufsize ? &bufqueues[BQ_AGE] : &bufqueues[BQ_EMPTY];
    219  1.31       cgd 		binsheadfree(bp, dp);
    220  1.31       cgd 		binshash(bp, &invalhash);
    221  1.31       cgd 	}
    222  1.31       cgd }
    223  1.31       cgd 
    224  1.40  christos static __inline struct buf *
    225  1.34   mycroft bio_doread(vp, blkno, size, cred, async)
    226  1.31       cgd 	struct vnode *vp;
    227  1.31       cgd 	daddr_t blkno;
    228  1.31       cgd 	int size;
    229  1.31       cgd 	struct ucred *cred;
    230  1.34   mycroft 	int async;
    231  1.31       cgd {
    232  1.66  augustss 	struct buf *bp;
    233  1.86   thorpej 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    234  1.86   thorpej 	struct proc *p = l->l_proc;
    235  1.31       cgd 
    236  1.34   mycroft 	bp = getblk(vp, blkno, size, 0, 0);
    237  1.31       cgd 
    238  1.86   thorpej #ifdef DIAGNOSTIC
    239  1.86   thorpej 	if (bp == NULL) {
    240  1.86   thorpej 		panic("bio_doread: no such buf");
    241  1.86   thorpej 	}
    242  1.86   thorpej #endif
    243  1.86   thorpej 
    244  1.31       cgd 	/*
    245  1.34   mycroft 	 * If buffer does not have data valid, start a read.
    246  1.31       cgd 	 * Note that if buffer is B_INVAL, getblk() won't return it.
    247  1.87        pk 	 * Therefore, it's valid if its I/O has completed or been delayed.
    248  1.31       cgd 	 */
    249  1.34   mycroft 	if (!ISSET(bp->b_flags, (B_DONE | B_DELWRI))) {
    250  1.73       chs 		/* Start I/O for the buffer. */
    251  1.34   mycroft 		SET(bp->b_flags, B_READ | async);
    252  1.34   mycroft 		VOP_STRATEGY(bp);
    253  1.31       cgd 
    254  1.34   mycroft 		/* Pay for the read. */
    255  1.49       cgd 		p->p_stats->p_ru.ru_inblock++;
    256  1.34   mycroft 	} else if (async) {
    257  1.34   mycroft 		brelse(bp);
    258  1.31       cgd 	}
    259  1.31       cgd 
    260  1.34   mycroft 	return (bp);
    261  1.34   mycroft }
    262  1.34   mycroft 
    263  1.34   mycroft /*
    264  1.34   mycroft  * Read a disk block.
    265  1.34   mycroft  * This algorithm described in Bach (p.54).
    266  1.34   mycroft  */
    267  1.40  christos int
    268  1.34   mycroft bread(vp, blkno, size, cred, bpp)
    269  1.34   mycroft 	struct vnode *vp;
    270  1.34   mycroft 	daddr_t blkno;
    271  1.34   mycroft 	int size;
    272  1.34   mycroft 	struct ucred *cred;
    273  1.34   mycroft 	struct buf **bpp;
    274  1.34   mycroft {
    275  1.66  augustss 	struct buf *bp;
    276  1.34   mycroft 
    277  1.34   mycroft 	/* Get buffer for block. */
    278  1.34   mycroft 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    279  1.31       cgd 
    280  1.80       chs 	/* Wait for the read to complete, and return result. */
    281  1.31       cgd 	return (biowait(bp));
    282  1.31       cgd }
    283  1.31       cgd 
    284  1.31       cgd /*
    285  1.31       cgd  * Read-ahead multiple disk blocks. The first is sync, the rest async.
    286  1.31       cgd  * Trivial modification to the breada algorithm presented in Bach (p.55).
    287  1.31       cgd  */
    288  1.40  christos int
    289  1.31       cgd breadn(vp, blkno, size, rablks, rasizes, nrablks, cred, bpp)
    290  1.31       cgd 	struct vnode *vp;
    291  1.31       cgd 	daddr_t blkno; int size;
    292  1.31       cgd 	daddr_t rablks[]; int rasizes[];
    293  1.31       cgd 	int nrablks;
    294  1.31       cgd 	struct ucred *cred;
    295  1.31       cgd 	struct buf **bpp;
    296  1.31       cgd {
    297  1.66  augustss 	struct buf *bp;
    298  1.31       cgd 	int i;
    299  1.31       cgd 
    300  1.34   mycroft 	bp = *bpp = bio_doread(vp, blkno, size, cred, 0);
    301  1.31       cgd 
    302  1.31       cgd 	/*
    303  1.31       cgd 	 * For each of the read-ahead blocks, start a read, if necessary.
    304  1.31       cgd 	 */
    305  1.31       cgd 	for (i = 0; i < nrablks; i++) {
    306  1.31       cgd 		/* If it's in the cache, just go on to next one. */
    307  1.31       cgd 		if (incore(vp, rablks[i]))
    308  1.31       cgd 			continue;
    309  1.31       cgd 
    310  1.31       cgd 		/* Get a buffer for the read-ahead block */
    311  1.34   mycroft 		(void) bio_doread(vp, rablks[i], rasizes[i], cred, B_ASYNC);
    312  1.31       cgd 	}
    313  1.31       cgd 
    314  1.80       chs 	/* Otherwise, we had to start a read for it; wait until it's valid. */
    315  1.31       cgd 	return (biowait(bp));
    316  1.31       cgd }
    317  1.31       cgd 
    318  1.31       cgd /*
    319  1.31       cgd  * Read with single-block read-ahead.  Defined in Bach (p.55), but
    320  1.31       cgd  * implemented as a call to breadn().
    321  1.31       cgd  * XXX for compatibility with old file systems.
    322  1.31       cgd  */
    323  1.40  christos int
    324  1.31       cgd breada(vp, blkno, size, rablkno, rabsize, cred, bpp)
    325  1.31       cgd 	struct vnode *vp;
    326  1.31       cgd 	daddr_t blkno; int size;
    327  1.31       cgd 	daddr_t rablkno; int rabsize;
    328  1.31       cgd 	struct ucred *cred;
    329  1.31       cgd 	struct buf **bpp;
    330  1.31       cgd {
    331  1.34   mycroft 
    332  1.31       cgd 	return (breadn(vp, blkno, size, &rablkno, &rabsize, 1, cred, bpp));
    333  1.31       cgd }
    334  1.31       cgd 
    335  1.31       cgd /*
    336  1.31       cgd  * Block write.  Described in Bach (p.56)
    337  1.31       cgd  */
    338  1.40  christos int
    339  1.31       cgd bwrite(bp)
    340  1.31       cgd 	struct buf *bp;
    341  1.31       cgd {
    342  1.44        pk 	int rv, sync, wasdelayed, s;
    343  1.86   thorpej 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    344  1.86   thorpej 	struct proc *p = l->l_proc;
    345  1.59      fvdl 	struct vnode *vp;
    346  1.59      fvdl 	struct mount *mp;
    347  1.31       cgd 
    348  1.87        pk 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    349  1.87        pk 
    350  1.76       chs 	vp = bp->b_vp;
    351  1.76       chs 	if (vp != NULL) {
    352  1.76       chs 		if (vp->v_type == VBLK)
    353  1.76       chs 			mp = vp->v_specmountpoint;
    354  1.76       chs 		else
    355  1.76       chs 			mp = vp->v_mount;
    356  1.76       chs 	} else {
    357  1.76       chs 		mp = NULL;
    358  1.76       chs 	}
    359  1.76       chs 
    360  1.38       cgd 	/*
    361  1.38       cgd 	 * Remember buffer type, to switch on it later.  If the write was
    362  1.38       cgd 	 * synchronous, but the file system was mounted with MNT_ASYNC,
    363  1.38       cgd 	 * convert it to a delayed write.
    364  1.38       cgd 	 * XXX note that this relies on delayed tape writes being converted
    365  1.38       cgd 	 * to async, not sync writes (which is safe, but ugly).
    366  1.38       cgd 	 */
    367  1.31       cgd 	sync = !ISSET(bp->b_flags, B_ASYNC);
    368  1.76       chs 	if (sync && mp != NULL && ISSET(mp->mnt_flag, MNT_ASYNC)) {
    369  1.37       cgd 		bdwrite(bp);
    370  1.37       cgd 		return (0);
    371  1.37       cgd 	}
    372  1.46   mycroft 
    373  1.59      fvdl 	/*
    374  1.59      fvdl 	 * Collect statistics on synchronous and asynchronous writes.
    375  1.59      fvdl 	 * Writes to block devices are charged to their associated
    376  1.59      fvdl 	 * filesystem (if any).
    377  1.59      fvdl 	 */
    378  1.76       chs 	if (mp != NULL) {
    379  1.76       chs 		if (sync)
    380  1.76       chs 			mp->mnt_stat.f_syncwrites++;
    381  1.59      fvdl 		else
    382  1.76       chs 			mp->mnt_stat.f_asyncwrites++;
    383  1.59      fvdl 	}
    384  1.59      fvdl 
    385  1.31       cgd 	wasdelayed = ISSET(bp->b_flags, B_DELWRI);
    386  1.31       cgd 
    387  1.44        pk 	s = splbio();
    388  1.87        pk 	simple_lock(&bp->b_interlock);
    389  1.46   mycroft 
    390  1.60      fvdl 	CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI));
    391  1.60      fvdl 
    392  1.46   mycroft 	/*
    393  1.46   mycroft 	 * Pay for the I/O operation and make sure the buf is on the correct
    394  1.46   mycroft 	 * vnode queue.
    395  1.46   mycroft 	 */
    396  1.46   mycroft 	if (wasdelayed)
    397  1.46   mycroft 		reassignbuf(bp, bp->b_vp);
    398  1.46   mycroft 	else
    399  1.49       cgd 		p->p_stats->p_ru.ru_oublock++;
    400  1.32   mycroft 
    401  1.31       cgd 	/* Initiate disk write.  Make sure the appropriate party is charged. */
    402  1.87        pk 	V_INCR_NUMOUTPUT(bp->b_vp);
    403  1.87        pk 	simple_unlock(&bp->b_interlock);
    404  1.44        pk 	splx(s);
    405  1.46   mycroft 
    406  1.31       cgd 	VOP_STRATEGY(bp);
    407  1.31       cgd 
    408  1.34   mycroft 	if (sync) {
    409  1.46   mycroft 		/* If I/O was synchronous, wait for it to complete. */
    410  1.31       cgd 		rv = biowait(bp);
    411  1.31       cgd 
    412  1.34   mycroft 		/* Release the buffer. */
    413  1.31       cgd 		brelse(bp);
    414  1.34   mycroft 
    415  1.34   mycroft 		return (rv);
    416  1.34   mycroft 	} else {
    417  1.34   mycroft 		return (0);
    418  1.31       cgd 	}
    419  1.31       cgd }
    420  1.31       cgd 
    421  1.31       cgd int
    422  1.40  christos vn_bwrite(v)
    423  1.40  christos 	void *v;
    424  1.31       cgd {
    425  1.40  christos 	struct vop_bwrite_args *ap = v;
    426  1.34   mycroft 
    427  1.31       cgd 	return (bwrite(ap->a_bp));
    428  1.31       cgd }
    429  1.31       cgd 
    430  1.31       cgd /*
    431  1.31       cgd  * Delayed write.
    432  1.31       cgd  *
    433  1.31       cgd  * The buffer is marked dirty, but is not queued for I/O.
    434  1.31       cgd  * This routine should be used when the buffer is expected
    435  1.31       cgd  * to be modified again soon, typically a small write that
    436  1.31       cgd  * partially fills a buffer.
    437  1.31       cgd  *
    438  1.31       cgd  * NB: magnetic tapes cannot be delayed; they must be
    439  1.31       cgd  * written in the order that the writes are requested.
    440  1.31       cgd  *
    441  1.31       cgd  * Described in Leffler, et al. (pp. 208-213).
    442  1.31       cgd  */
    443  1.31       cgd void
    444  1.31       cgd bdwrite(bp)
    445  1.31       cgd 	struct buf *bp;
    446  1.31       cgd {
    447  1.86   thorpej 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    448  1.86   thorpej 	struct proc *p = l->l_proc;
    449  1.85   gehenna 	const struct bdevsw *bdev;
    450  1.45        pk 	int s;
    451  1.31       cgd 
    452  1.87        pk 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    453  1.87        pk 
    454  1.46   mycroft 	/* If this is a tape block, write the block now. */
    455  1.90        pk 	bdev = bdevsw_lookup(bp->b_dev);
    456  1.90        pk 	if (bdev != NULL && bdev->d_type == D_TAPE) {
    457  1.90        pk 		bawrite(bp);
    458  1.90        pk 		return;
    459  1.46   mycroft 	}
    460  1.46   mycroft 
    461  1.31       cgd 	/*
    462  1.31       cgd 	 * If the block hasn't been seen before:
    463  1.31       cgd 	 *	(1) Mark it as having been seen,
    464  1.45        pk 	 *	(2) Charge for the write,
    465  1.45        pk 	 *	(3) Make sure it's on its vnode's correct block list.
    466  1.31       cgd 	 */
    467  1.60      fvdl 	s = splbio();
    468  1.87        pk 	simple_lock(&bp->b_interlock);
    469  1.60      fvdl 
    470  1.31       cgd 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    471  1.31       cgd 		SET(bp->b_flags, B_DELWRI);
    472  1.49       cgd 		p->p_stats->p_ru.ru_oublock++;
    473  1.31       cgd 		reassignbuf(bp, bp->b_vp);
    474  1.31       cgd 	}
    475  1.31       cgd 
    476  1.31       cgd 	/* Otherwise, the "write" is done, so mark and release the buffer. */
    477  1.92      yamt 	CLR(bp->b_flags, B_DONE);
    478  1.87        pk 	simple_unlock(&bp->b_interlock);
    479  1.60      fvdl 	splx(s);
    480  1.60      fvdl 
    481  1.31       cgd 	brelse(bp);
    482  1.31       cgd }
    483  1.31       cgd 
    484  1.31       cgd /*
    485  1.31       cgd  * Asynchronous block write; just an asynchronous bwrite().
    486  1.31       cgd  */
    487  1.31       cgd void
    488  1.31       cgd bawrite(bp)
    489  1.31       cgd 	struct buf *bp;
    490  1.31       cgd {
    491  1.87        pk 	int s;
    492  1.31       cgd 
    493  1.87        pk 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    494  1.87        pk 
    495  1.87        pk 	s = splbio();
    496  1.87        pk 	simple_lock(&bp->b_interlock);
    497  1.31       cgd 	SET(bp->b_flags, B_ASYNC);
    498  1.87        pk 	simple_unlock(&bp->b_interlock);
    499  1.87        pk 	splx(s);
    500  1.31       cgd 	VOP_BWRITE(bp);
    501  1.31       cgd }
    502  1.31       cgd 
    503  1.31       cgd /*
    504  1.59      fvdl  * Same as first half of bdwrite, mark buffer dirty, but do not release it.
    505  1.88        pk  * Call at splbio() and with the buffer interlock locked.
    506  1.88        pk  * Note: called only from biodone() through ffs softdep's bioops.io_complete()
    507  1.59      fvdl  */
    508  1.59      fvdl void
    509  1.59      fvdl bdirty(bp)
    510  1.59      fvdl 	struct buf *bp;
    511  1.59      fvdl {
    512  1.86   thorpej 	struct lwp *l  = (curlwp != NULL ? curlwp : &lwp0);	/* XXX */
    513  1.86   thorpej 	struct proc *p = l->l_proc;
    514  1.59      fvdl 
    515  1.87        pk 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    516  1.88        pk 	LOCK_ASSERT(simple_lock_held(&bp->b_interlock));
    517  1.61      fvdl 
    518  1.61      fvdl 	CLR(bp->b_flags, B_AGE);
    519  1.60      fvdl 
    520  1.59      fvdl 	if (!ISSET(bp->b_flags, B_DELWRI)) {
    521  1.59      fvdl 		SET(bp->b_flags, B_DELWRI);
    522  1.59      fvdl 		p->p_stats->p_ru.ru_oublock++;
    523  1.59      fvdl 		reassignbuf(bp, bp->b_vp);
    524  1.59      fvdl 	}
    525  1.59      fvdl }
    526  1.59      fvdl 
    527  1.59      fvdl /*
    528  1.31       cgd  * Release a buffer on to the free lists.
    529  1.31       cgd  * Described in Bach (p. 46).
    530  1.31       cgd  */
    531  1.31       cgd void
    532  1.31       cgd brelse(bp)
    533  1.31       cgd 	struct buf *bp;
    534  1.31       cgd {
    535  1.31       cgd 	struct bqueues *bufq;
    536  1.31       cgd 	int s;
    537  1.31       cgd 
    538  1.73       chs 	KASSERT(ISSET(bp->b_flags, B_BUSY));
    539  1.73       chs 
    540  1.87        pk 	/* Block disk interrupts. */
    541  1.87        pk 	s = splbio();
    542  1.87        pk 	simple_lock(&bqueue_slock);
    543  1.87        pk 	simple_lock(&bp->b_interlock);
    544  1.87        pk 
    545  1.31       cgd 	/* Wake up any processes waiting for any buffer to become free. */
    546  1.31       cgd 	if (needbuffer) {
    547  1.31       cgd 		needbuffer = 0;
    548  1.31       cgd 		wakeup(&needbuffer);
    549  1.31       cgd 	}
    550  1.31       cgd 
    551  1.31       cgd 	/* Wake up any proceeses waiting for _this_ buffer to become free. */
    552  1.31       cgd 	if (ISSET(bp->b_flags, B_WANTED)) {
    553  1.57   mycroft 		CLR(bp->b_flags, B_WANTED|B_AGE);
    554  1.31       cgd 		wakeup(bp);
    555  1.31       cgd 	}
    556  1.31       cgd 
    557  1.31       cgd 	/*
    558  1.31       cgd 	 * Determine which queue the buffer should be on, then put it there.
    559  1.31       cgd 	 */
    560  1.31       cgd 
    561  1.31       cgd 	/* If it's locked, don't report an error; try again later. */
    562  1.31       cgd 	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
    563  1.31       cgd 		CLR(bp->b_flags, B_ERROR);
    564  1.31       cgd 
    565  1.31       cgd 	/* If it's not cacheable, or an error, mark it invalid. */
    566  1.31       cgd 	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
    567  1.31       cgd 		SET(bp->b_flags, B_INVAL);
    568  1.31       cgd 
    569  1.50   mycroft 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    570  1.50   mycroft 		/*
    571  1.50   mycroft 		 * This is a delayed write buffer that was just flushed to
    572  1.50   mycroft 		 * disk.  It is still on the LRU queue.  If it's become
    573  1.50   mycroft 		 * invalid, then we need to move it to a different queue;
    574  1.50   mycroft 		 * otherwise leave it in its current position.
    575  1.50   mycroft 		 */
    576  1.50   mycroft 		CLR(bp->b_flags, B_VFLUSH);
    577  1.50   mycroft 		if (!ISSET(bp->b_flags, B_ERROR|B_INVAL|B_LOCKED|B_AGE))
    578  1.50   mycroft 			goto already_queued;
    579  1.50   mycroft 		else
    580  1.50   mycroft 			bremfree(bp);
    581  1.50   mycroft 	}
    582  1.50   mycroft 
    583  1.31       cgd 	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
    584  1.31       cgd 		/*
    585  1.31       cgd 		 * If it's invalid or empty, dissociate it from its vnode
    586  1.31       cgd 		 * and put on the head of the appropriate queue.
    587  1.31       cgd 		 */
    588  1.59      fvdl 		if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    589  1.59      fvdl 			(*bioops.io_deallocate)(bp);
    590  1.59      fvdl 		CLR(bp->b_flags, B_DONE|B_DELWRI);
    591  1.59      fvdl 		if (bp->b_vp) {
    592  1.59      fvdl 			reassignbuf(bp, bp->b_vp);
    593  1.31       cgd 			brelvp(bp);
    594  1.59      fvdl 		}
    595  1.31       cgd 		if (bp->b_bufsize <= 0)
    596  1.31       cgd 			/* no data */
    597  1.31       cgd 			bufq = &bufqueues[BQ_EMPTY];
    598  1.31       cgd 		else
    599  1.31       cgd 			/* invalid data */
    600  1.31       cgd 			bufq = &bufqueues[BQ_AGE];
    601  1.31       cgd 		binsheadfree(bp, bufq);
    602  1.31       cgd 	} else {
    603  1.31       cgd 		/*
    604  1.31       cgd 		 * It has valid data.  Put it on the end of the appropriate
    605  1.31       cgd 		 * queue, so that it'll stick around for as long as possible.
    606  1.67      fvdl 		 * If buf is AGE, but has dependencies, must put it on last
    607  1.67      fvdl 		 * bufqueue to be scanned, ie LRU. This protects against the
    608  1.67      fvdl 		 * livelock where BQ_AGE only has buffers with dependencies,
    609  1.67      fvdl 		 * and we thus never get to the dependent buffers in BQ_LRU.
    610  1.31       cgd 		 */
    611  1.31       cgd 		if (ISSET(bp->b_flags, B_LOCKED))
    612  1.31       cgd 			/* locked in core */
    613  1.31       cgd 			bufq = &bufqueues[BQ_LOCKED];
    614  1.67      fvdl 		else if (!ISSET(bp->b_flags, B_AGE))
    615  1.31       cgd 			/* valid data */
    616  1.31       cgd 			bufq = &bufqueues[BQ_LRU];
    617  1.67      fvdl 		else {
    618  1.67      fvdl 			/* stale but valid data */
    619  1.67      fvdl 			int has_deps;
    620  1.67      fvdl 
    621  1.67      fvdl 			if (LIST_FIRST(&bp->b_dep) != NULL &&
    622  1.67      fvdl 			    bioops.io_countdeps)
    623  1.67      fvdl 				has_deps = (*bioops.io_countdeps)(bp, 0);
    624  1.67      fvdl 			else
    625  1.67      fvdl 				has_deps = 0;
    626  1.67      fvdl 			bufq = has_deps ? &bufqueues[BQ_LRU] :
    627  1.67      fvdl 			    &bufqueues[BQ_AGE];
    628  1.67      fvdl 		}
    629  1.31       cgd 		binstailfree(bp, bufq);
    630  1.31       cgd 	}
    631  1.31       cgd 
    632  1.50   mycroft already_queued:
    633  1.31       cgd 	/* Unlock the buffer. */
    634  1.83   hannken 	CLR(bp->b_flags, B_AGE|B_ASYNC|B_BUSY|B_NOCACHE);
    635  1.73       chs 	SET(bp->b_flags, B_CACHE);
    636  1.31       cgd 
    637  1.31       cgd 	/* Allow disk interrupts. */
    638  1.87        pk 	simple_unlock(&bp->b_interlock);
    639  1.87        pk 	simple_unlock(&bqueue_slock);
    640  1.31       cgd 	splx(s);
    641  1.31       cgd }
    642  1.31       cgd 
    643  1.31       cgd /*
    644  1.31       cgd  * Determine if a block is in the cache.
    645  1.31       cgd  * Just look on what would be its hash chain.  If it's there, return
    646  1.31       cgd  * a pointer to it, unless it's marked invalid.  If it's marked invalid,
    647  1.31       cgd  * we normally don't return the buffer, unless the caller explicitly
    648  1.31       cgd  * wants us to.
    649  1.31       cgd  */
    650  1.31       cgd struct buf *
    651  1.31       cgd incore(vp, blkno)
    652  1.31       cgd 	struct vnode *vp;
    653  1.31       cgd 	daddr_t blkno;
    654  1.31       cgd {
    655  1.31       cgd 	struct buf *bp;
    656  1.31       cgd 
    657  1.31       cgd 	/* Search hash chain */
    658  1.84      matt 	LIST_FOREACH(bp, BUFHASH(vp, blkno), b_hash) {
    659  1.31       cgd 		if (bp->b_lblkno == blkno && bp->b_vp == vp &&
    660  1.31       cgd 		    !ISSET(bp->b_flags, B_INVAL))
    661  1.31       cgd 		return (bp);
    662  1.31       cgd 	}
    663  1.31       cgd 
    664  1.73       chs 	return (NULL);
    665  1.31       cgd }
    666  1.31       cgd 
    667  1.31       cgd /*
    668  1.31       cgd  * Get a block of requested size that is associated with
    669  1.31       cgd  * a given vnode and block offset. If it is found in the
    670  1.31       cgd  * block cache, mark it as having been found, make it busy
    671  1.31       cgd  * and return it. Otherwise, return an empty block of the
    672  1.31       cgd  * correct size. It is up to the caller to insure that the
    673  1.31       cgd  * cached blocks be of the correct size.
    674  1.31       cgd  */
    675  1.31       cgd struct buf *
    676  1.31       cgd getblk(vp, blkno, size, slpflag, slptimeo)
    677  1.66  augustss 	struct vnode *vp;
    678  1.31       cgd 	daddr_t blkno;
    679  1.31       cgd 	int size, slpflag, slptimeo;
    680  1.31       cgd {
    681  1.31       cgd 	struct buf *bp;
    682  1.31       cgd 	int s, err;
    683  1.31       cgd 
    684  1.39       cgd start:
    685  1.87        pk 	s = splbio();
    686  1.87        pk 	simple_lock(&bqueue_slock);
    687  1.73       chs 	bp = incore(vp, blkno);
    688  1.73       chs 	if (bp != NULL) {
    689  1.87        pk 		simple_lock(&bp->b_interlock);
    690  1.31       cgd 		if (ISSET(bp->b_flags, B_BUSY)) {
    691  1.87        pk 			simple_unlock(&bqueue_slock);
    692  1.73       chs 			if (curproc == uvm.pagedaemon_proc) {
    693  1.87        pk 				simple_unlock(&bp->b_interlock);
    694  1.73       chs 				splx(s);
    695  1.73       chs 				return NULL;
    696  1.73       chs 			}
    697  1.31       cgd 			SET(bp->b_flags, B_WANTED);
    698  1.87        pk 			err = ltsleep(bp, slpflag | (PRIBIO + 1) | PNORELOCK,
    699  1.87        pk 					"getblk", slptimeo, &bp->b_interlock);
    700  1.31       cgd 			splx(s);
    701  1.31       cgd 			if (err)
    702  1.31       cgd 				return (NULL);
    703  1.31       cgd 			goto start;
    704  1.31       cgd 		}
    705  1.57   mycroft #ifdef DIAGNOSTIC
    706  1.78       chs 		if (ISSET(bp->b_flags, B_DONE|B_DELWRI) &&
    707  1.78       chs 		    bp->b_bcount < size && vp->v_type != VBLK)
    708  1.73       chs 			panic("getblk: block size invariant failed");
    709  1.57   mycroft #endif
    710  1.73       chs 		SET(bp->b_flags, B_BUSY);
    711  1.73       chs 		bremfree(bp);
    712  1.73       chs 	} else {
    713  1.87        pk 		if ((bp = getnewbuf(slpflag, slptimeo)) == NULL) {
    714  1.87        pk 			simple_unlock(&bqueue_slock);
    715  1.87        pk 			splx(s);
    716  1.31       cgd 			goto start;
    717  1.87        pk 		}
    718  1.73       chs 
    719  1.73       chs 		binshash(bp, BUFHASH(vp, blkno));
    720  1.64   thorpej 		bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = blkno;
    721  1.31       cgd 		bgetvp(vp, bp);
    722  1.31       cgd 	}
    723  1.87        pk 	simple_unlock(&bp->b_interlock);
    724  1.87        pk 	simple_unlock(&bqueue_slock);
    725  1.87        pk 	splx(s);
    726  1.39       cgd 	allocbuf(bp, size);
    727  1.31       cgd 	return (bp);
    728  1.31       cgd }
    729  1.31       cgd 
    730  1.31       cgd /*
    731  1.31       cgd  * Get an empty, disassociated buffer of given size.
    732  1.31       cgd  */
    733  1.31       cgd struct buf *
    734  1.31       cgd geteblk(size)
    735  1.31       cgd 	int size;
    736  1.31       cgd {
    737  1.31       cgd 	struct buf *bp;
    738  1.87        pk 	int s;
    739  1.31       cgd 
    740  1.87        pk 	s = splbio();
    741  1.87        pk 	simple_lock(&bqueue_slock);
    742  1.31       cgd 	while ((bp = getnewbuf(0, 0)) == 0)
    743  1.31       cgd 		;
    744  1.87        pk 
    745  1.31       cgd 	SET(bp->b_flags, B_INVAL);
    746  1.31       cgd 	binshash(bp, &invalhash);
    747  1.87        pk 	simple_unlock(&bqueue_slock);
    748  1.87        pk 	simple_unlock(&bp->b_interlock);
    749  1.87        pk 	splx(s);
    750  1.31       cgd 	allocbuf(bp, size);
    751  1.31       cgd 	return (bp);
    752  1.31       cgd }
    753  1.31       cgd 
    754  1.31       cgd /*
    755  1.31       cgd  * Expand or contract the actual memory allocated to a buffer.
    756  1.31       cgd  *
    757  1.31       cgd  * If the buffer shrinks, data is lost, so it's up to the
    758  1.31       cgd  * caller to have written it out *first*; this routine will not
    759  1.31       cgd  * start a write.  If the buffer grows, it's the callers
    760  1.31       cgd  * responsibility to fill out the buffer's additional contents.
    761  1.31       cgd  */
    762  1.40  christos void
    763  1.31       cgd allocbuf(bp, size)
    764  1.31       cgd 	struct buf *bp;
    765  1.31       cgd 	int size;
    766  1.31       cgd {
    767  1.73       chs 	struct buf *nbp;
    768  1.73       chs 	vsize_t desired_size;
    769  1.73       chs 	int s;
    770  1.31       cgd 
    771  1.69       chs 	desired_size = round_page((vsize_t)size);
    772  1.31       cgd 	if (desired_size > MAXBSIZE)
    773  1.31       cgd 		panic("allocbuf: buffer larger than MAXBSIZE requested");
    774  1.31       cgd 
    775  1.31       cgd 	if (bp->b_bufsize == desired_size)
    776  1.31       cgd 		goto out;
    777  1.31       cgd 
    778  1.31       cgd 	/*
    779  1.31       cgd 	 * If the buffer is smaller than the desired size, we need to snarf
    780  1.31       cgd 	 * it from other buffers.  Get buffers (via getnewbuf()), and
    781  1.31       cgd 	 * steal their pages.
    782  1.31       cgd 	 */
    783  1.31       cgd 	while (bp->b_bufsize < desired_size) {
    784  1.31       cgd 		int amt;
    785  1.31       cgd 
    786  1.31       cgd 		/* find a buffer */
    787  1.87        pk 		s = splbio();
    788  1.87        pk 		simple_lock(&bqueue_slock);
    789  1.31       cgd 		while ((nbp = getnewbuf(0, 0)) == NULL)
    790  1.31       cgd 			;
    791  1.73       chs 
    792  1.34   mycroft 		SET(nbp->b_flags, B_INVAL);
    793  1.34   mycroft 		binshash(nbp, &invalhash);
    794  1.31       cgd 
    795  1.87        pk 		simple_unlock(&nbp->b_interlock);
    796  1.87        pk 		simple_unlock(&bqueue_slock);
    797  1.87        pk 		splx(s);
    798  1.87        pk 
    799  1.31       cgd 		/* and steal its pages, up to the amount we need */
    800  1.31       cgd 		amt = min(nbp->b_bufsize, (desired_size - bp->b_bufsize));
    801  1.31       cgd 		pagemove((nbp->b_data + nbp->b_bufsize - amt),
    802  1.40  christos 			 bp->b_data + bp->b_bufsize, amt);
    803  1.31       cgd 		bp->b_bufsize += amt;
    804  1.31       cgd 		nbp->b_bufsize -= amt;
    805  1.31       cgd 
    806  1.31       cgd 		/* reduce transfer count if we stole some data */
    807  1.31       cgd 		if (nbp->b_bcount > nbp->b_bufsize)
    808  1.31       cgd 			nbp->b_bcount = nbp->b_bufsize;
    809  1.31       cgd 
    810  1.31       cgd #ifdef DIAGNOSTIC
    811  1.31       cgd 		if (nbp->b_bufsize < 0)
    812  1.31       cgd 			panic("allocbuf: negative bufsize");
    813  1.31       cgd #endif
    814  1.31       cgd 		brelse(nbp);
    815  1.31       cgd 	}
    816  1.31       cgd 
    817  1.31       cgd 	/*
    818  1.31       cgd 	 * If we want a buffer smaller than the current size,
    819  1.31       cgd 	 * shrink this buffer.  Grab a buf head from the EMPTY queue,
    820  1.31       cgd 	 * move a page onto it, and put it on front of the AGE queue.
    821  1.31       cgd 	 * If there are no free buffer headers, leave the buffer alone.
    822  1.31       cgd 	 */
    823  1.31       cgd 	if (bp->b_bufsize > desired_size) {
    824  1.31       cgd 		s = splbio();
    825  1.87        pk 		simple_lock(&bqueue_slock);
    826  1.84      matt 		if ((nbp = TAILQ_FIRST(&bufqueues[BQ_EMPTY])) == NULL) {
    827  1.31       cgd 			/* No free buffer head */
    828  1.87        pk 			simple_unlock(&bqueue_slock);
    829  1.31       cgd 			splx(s);
    830  1.31       cgd 			goto out;
    831  1.31       cgd 		}
    832  1.87        pk 		/* No need to lock nbp since it came from the empty queue */
    833  1.31       cgd 		bremfree(nbp);
    834  1.87        pk 		SET(nbp->b_flags, B_BUSY | B_INVAL);
    835  1.87        pk 		simple_unlock(&bqueue_slock);
    836  1.31       cgd 		splx(s);
    837  1.31       cgd 
    838  1.31       cgd 		/* move the page to it and note this change */
    839  1.31       cgd 		pagemove(bp->b_data + desired_size,
    840  1.31       cgd 		    nbp->b_data, bp->b_bufsize - desired_size);
    841  1.31       cgd 		nbp->b_bufsize = bp->b_bufsize - desired_size;
    842  1.31       cgd 		bp->b_bufsize = desired_size;
    843  1.31       cgd 		nbp->b_bcount = 0;
    844  1.31       cgd 
    845  1.31       cgd 		/* release the newly-filled buffer and leave */
    846  1.31       cgd 		brelse(nbp);
    847  1.31       cgd 	}
    848  1.31       cgd 
    849  1.31       cgd out:
    850  1.31       cgd 	bp->b_bcount = size;
    851  1.31       cgd }
    852  1.31       cgd 
    853  1.31       cgd /*
    854  1.31       cgd  * Find a buffer which is available for use.
    855  1.31       cgd  * Select something from a free list.
    856  1.31       cgd  * Preference is to AGE list, then LRU list.
    857  1.87        pk  *
    858  1.87        pk  * Called with buffer queues locked.
    859  1.87        pk  * Return buffer locked.
    860  1.31       cgd  */
    861  1.31       cgd struct buf *
    862  1.31       cgd getnewbuf(slpflag, slptimeo)
    863  1.31       cgd 	int slpflag, slptimeo;
    864  1.31       cgd {
    865  1.66  augustss 	struct buf *bp;
    866  1.31       cgd 
    867  1.31       cgd start:
    868  1.87        pk 	LOCK_ASSERT(simple_lock_held(&bqueue_slock));
    869  1.87        pk 
    870  1.84      matt 	if ((bp = TAILQ_FIRST(&bufqueues[BQ_AGE])) != NULL ||
    871  1.84      matt 	    (bp = TAILQ_FIRST(&bufqueues[BQ_LRU])) != NULL) {
    872  1.87        pk 		simple_lock(&bp->b_interlock);
    873  1.31       cgd 		bremfree(bp);
    874  1.31       cgd 	} else {
    875  1.31       cgd 		/* wait for a free buffer of any kind */
    876  1.31       cgd 		needbuffer = 1;
    877  1.87        pk 		ltsleep(&needbuffer, slpflag|(PRIBIO+1),
    878  1.87        pk 			"getnewbuf", slptimeo, &bqueue_slock);
    879  1.73       chs 		return (NULL);
    880  1.31       cgd 	}
    881  1.31       cgd 
    882  1.50   mycroft 	if (ISSET(bp->b_flags, B_VFLUSH)) {
    883  1.50   mycroft 		/*
    884  1.50   mycroft 		 * This is a delayed write buffer being flushed to disk.  Make
    885  1.50   mycroft 		 * sure it gets aged out of the queue when it's finished, and
    886  1.50   mycroft 		 * leave it off the LRU queue.
    887  1.50   mycroft 		 */
    888  1.50   mycroft 		CLR(bp->b_flags, B_VFLUSH);
    889  1.50   mycroft 		SET(bp->b_flags, B_AGE);
    890  1.87        pk 		simple_unlock(&bp->b_interlock);
    891  1.50   mycroft 		goto start;
    892  1.50   mycroft 	}
    893  1.50   mycroft 
    894  1.31       cgd 	/* Buffer is no longer on free lists. */
    895  1.31       cgd 	SET(bp->b_flags, B_BUSY);
    896  1.31       cgd 
    897  1.75       chs 	/*
    898  1.75       chs 	 * If buffer was a delayed write, start it and return NULL
    899  1.75       chs 	 * (since we might sleep while starting the write).
    900  1.75       chs 	 */
    901  1.31       cgd 	if (ISSET(bp->b_flags, B_DELWRI)) {
    902  1.50   mycroft 		/*
    903  1.50   mycroft 		 * This buffer has gone through the LRU, so make sure it gets
    904  1.50   mycroft 		 * reused ASAP.
    905  1.50   mycroft 		 */
    906  1.50   mycroft 		SET(bp->b_flags, B_AGE);
    907  1.87        pk 		simple_unlock(&bp->b_interlock);
    908  1.89        pk 		simple_unlock(&bqueue_slock);
    909  1.50   mycroft 		bawrite(bp);
    910  1.89        pk 		simple_lock(&bqueue_slock);
    911  1.75       chs 		return (NULL);
    912  1.31       cgd 	}
    913  1.31       cgd 
    914  1.31       cgd 	/* disassociate us from our vnode, if we had one... */
    915  1.31       cgd 	if (bp->b_vp)
    916  1.31       cgd 		brelvp(bp);
    917  1.31       cgd 
    918  1.59      fvdl 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_deallocate)
    919  1.59      fvdl 		(*bioops.io_deallocate)(bp);
    920  1.59      fvdl 
    921  1.31       cgd 	/* clear out various other fields */
    922  1.31       cgd 	bp->b_flags = B_BUSY;
    923  1.31       cgd 	bp->b_dev = NODEV;
    924  1.64   thorpej 	bp->b_blkno = bp->b_lblkno = bp->b_rawblkno = 0;
    925  1.31       cgd 	bp->b_iodone = 0;
    926  1.31       cgd 	bp->b_error = 0;
    927  1.31       cgd 	bp->b_resid = 0;
    928  1.31       cgd 	bp->b_bcount = 0;
    929  1.31       cgd 
    930  1.34   mycroft 	bremhash(bp);
    931  1.31       cgd 	return (bp);
    932  1.31       cgd }
    933  1.31       cgd 
    934  1.31       cgd /*
    935  1.31       cgd  * Wait for operations on the buffer to complete.
    936  1.31       cgd  * When they do, extract and return the I/O's error value.
    937  1.31       cgd  */
    938  1.31       cgd int
    939  1.31       cgd biowait(bp)
    940  1.31       cgd 	struct buf *bp;
    941  1.31       cgd {
    942  1.87        pk 	int s, error;
    943  1.59      fvdl 
    944  1.31       cgd 	s = splbio();
    945  1.87        pk 	simple_lock(&bp->b_interlock);
    946  1.80       chs 	while (!ISSET(bp->b_flags, B_DONE | B_DELWRI))
    947  1.87        pk 		ltsleep(bp, PRIBIO + 1, "biowait", 0, &bp->b_interlock);
    948  1.31       cgd 
    949  1.31       cgd 	/* check for interruption of I/O (e.g. via NFS), then errors. */
    950  1.31       cgd 	if (ISSET(bp->b_flags, B_EINTR)) {
    951  1.31       cgd 		CLR(bp->b_flags, B_EINTR);
    952  1.87        pk 		error = EINTR;
    953  1.31       cgd 	} else if (ISSET(bp->b_flags, B_ERROR))
    954  1.87        pk 		error = bp->b_error ? bp->b_error : EIO;
    955  1.31       cgd 	else
    956  1.87        pk 		error = 0;
    957  1.87        pk 
    958  1.87        pk 	simple_unlock(&bp->b_interlock);
    959  1.87        pk 	splx(s);
    960  1.87        pk 	return (error);
    961  1.31       cgd }
    962  1.31       cgd 
    963  1.31       cgd /*
    964  1.31       cgd  * Mark I/O complete on a buffer.
    965  1.31       cgd  *
    966  1.31       cgd  * If a callback has been requested, e.g. the pageout
    967  1.31       cgd  * daemon, do so. Otherwise, awaken waiting processes.
    968  1.31       cgd  *
    969  1.31       cgd  * [ Leffler, et al., says on p.247:
    970  1.31       cgd  *	"This routine wakes up the blocked process, frees the buffer
    971  1.31       cgd  *	for an asynchronous write, or, for a request by the pagedaemon
    972  1.31       cgd  *	process, invokes a procedure specified in the buffer structure" ]
    973  1.31       cgd  *
    974  1.31       cgd  * In real life, the pagedaemon (or other system processes) wants
    975  1.31       cgd  * to do async stuff to, and doesn't want the buffer brelse()'d.
    976  1.31       cgd  * (for swap pager, that puts swap buffers on the free lists (!!!),
    977  1.31       cgd  * for the vn device, that puts malloc'd buffers on the free lists!)
    978  1.31       cgd  */
    979  1.31       cgd void
    980  1.31       cgd biodone(bp)
    981  1.31       cgd 	struct buf *bp;
    982  1.31       cgd {
    983  1.60      fvdl 	int s = splbio();
    984  1.60      fvdl 
    985  1.87        pk 	simple_lock(&bp->b_interlock);
    986  1.31       cgd 	if (ISSET(bp->b_flags, B_DONE))
    987  1.31       cgd 		panic("biodone already");
    988  1.31       cgd 	SET(bp->b_flags, B_DONE);		/* note that it's done */
    989  1.31       cgd 
    990  1.59      fvdl 	if (LIST_FIRST(&bp->b_dep) != NULL && bioops.io_complete)
    991  1.59      fvdl 		(*bioops.io_complete)(bp);
    992  1.59      fvdl 
    993  1.31       cgd 	if (!ISSET(bp->b_flags, B_READ))	/* wake up reader */
    994  1.31       cgd 		vwakeup(bp);
    995  1.31       cgd 
    996  1.87        pk 	/*
    997  1.87        pk 	 * If necessary, call out.  Unlock the buffer before calling
    998  1.87        pk 	 * iodone() as the buffer isn't valid any more when it return.
    999  1.87        pk 	 */
   1000  1.87        pk 	if (ISSET(bp->b_flags, B_CALL)) {
   1001  1.31       cgd 		CLR(bp->b_flags, B_CALL);	/* but note callout done */
   1002  1.87        pk 		simple_unlock(&bp->b_interlock);
   1003  1.31       cgd 		(*bp->b_iodone)(bp);
   1004  1.59      fvdl 	} else {
   1005  1.87        pk 		if (ISSET(bp->b_flags, B_ASYNC)) {	/* if async, release */
   1006  1.87        pk 			simple_unlock(&bp->b_interlock);
   1007  1.59      fvdl 			brelse(bp);
   1008  1.87        pk 		} else {			/* or just wakeup the buffer */
   1009  1.59      fvdl 			CLR(bp->b_flags, B_WANTED);
   1010  1.59      fvdl 			wakeup(bp);
   1011  1.87        pk 			simple_unlock(&bp->b_interlock);
   1012  1.59      fvdl 		}
   1013  1.31       cgd 	}
   1014  1.60      fvdl 
   1015  1.60      fvdl 	splx(s);
   1016  1.31       cgd }
   1017  1.31       cgd 
   1018  1.31       cgd /*
   1019  1.31       cgd  * Return a count of buffers on the "locked" queue.
   1020  1.31       cgd  */
   1021  1.31       cgd int
   1022  1.31       cgd count_lock_queue()
   1023  1.31       cgd {
   1024  1.66  augustss 	struct buf *bp;
   1025  1.66  augustss 	int n = 0;
   1026  1.31       cgd 
   1027  1.87        pk 	simple_lock(&bqueue_slock);
   1028  1.84      matt 	TAILQ_FOREACH(bp, &bufqueues[BQ_LOCKED], b_freelist)
   1029  1.31       cgd 		n++;
   1030  1.87        pk 	simple_unlock(&bqueue_slock);
   1031  1.31       cgd 	return (n);
   1032  1.31       cgd }
   1033  1.31       cgd 
   1034  1.36       cgd #ifdef DEBUG
   1035  1.31       cgd /*
   1036  1.31       cgd  * Print out statistics on the current allocation of the buffer pool.
   1037  1.31       cgd  * Can be enabled to print out on every ``sync'' by setting "syncprt"
   1038  1.31       cgd  * in vfs_syscalls.c using sysctl.
   1039  1.31       cgd  */
   1040  1.31       cgd void
   1041  1.31       cgd vfs_bufstats()
   1042  1.31       cgd {
   1043  1.31       cgd 	int s, i, j, count;
   1044  1.66  augustss 	struct buf *bp;
   1045  1.66  augustss 	struct bqueues *dp;
   1046  1.72    simonb 	int counts[(MAXBSIZE / PAGE_SIZE) + 1];
   1047  1.31       cgd 	static char *bname[BQUEUES] = { "LOCKED", "LRU", "AGE", "EMPTY" };
   1048  1.71   thorpej 
   1049  1.31       cgd 	for (dp = bufqueues, i = 0; dp < &bufqueues[BQUEUES]; dp++, i++) {
   1050  1.31       cgd 		count = 0;
   1051  1.71   thorpej 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
   1052  1.31       cgd 			counts[j] = 0;
   1053  1.31       cgd 		s = splbio();
   1054  1.84      matt 		TAILQ_FOREACH(bp, dp, b_freelist) {
   1055  1.71   thorpej 			counts[bp->b_bufsize/PAGE_SIZE]++;
   1056  1.31       cgd 			count++;
   1057  1.31       cgd 		}
   1058  1.31       cgd 		splx(s);
   1059  1.48  christos 		printf("%s: total-%d", bname[i], count);
   1060  1.71   thorpej 		for (j = 0; j <= MAXBSIZE/PAGE_SIZE; j++)
   1061  1.31       cgd 			if (counts[j] != 0)
   1062  1.71   thorpej 				printf(", %d-%d", j * PAGE_SIZE, counts[j]);
   1063  1.48  christos 		printf("\n");
   1064  1.31       cgd 	}
   1065  1.31       cgd }
   1066  1.36       cgd #endif /* DEBUG */
   1067