Home | History | Annotate | Line # | Download | only in fsck_lfs
bufcache.c revision 1.5
      1 /* $NetBSD: bufcache.c,v 1.5 2005/04/06 02:38:17 perseant Exp $ */
      2 /*-
      3  * Copyright (c) 2003 The NetBSD Foundation, Inc.
      4  * All rights reserved.
      5  *
      6  * This code is derived from software contributed to The NetBSD Foundation
      7  * by Konrad E. Schroder <perseant (at) hhhh.org>.
      8  *
      9  * Redistribution and use in source and binary forms, with or without
     10  * modification, are permitted provided that the following conditions
     11  * are met:
     12  * 1. Redistributions of source code must retain the above copyright
     13  *    notice, this list of conditions and the following disclaimer.
     14  * 2. Redistributions in binary form must reproduce the above copyright
     15  *    notice, this list of conditions and the following disclaimer in the
     16  *    documentation and/or other materials provided with the distribution.
     17  * 3. All advertising materials mentioning features or use of this software
     18  *    must display the following acknowledgement:
     19  *	This product includes software developed by the NetBSD
     20  *	Foundation, Inc. and its contributors.
     21  * 4. Neither the name of The NetBSD Foundation nor the names of its
     22  *    contributors may be used to endorse or promote products derived
     23  *    from this software without specific prior written permission.
     24  *
     25  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     26  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     27  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     28  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     29  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     30  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     31  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     32  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     33  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     34  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     35  * POSSIBILITY OF SUCH DAMAGE.
     36  */
     37 
     38 #include <sys/types.h>
     39 #include <sys/param.h>
     40 #include <sys/time.h>
     41 #include <sys/buf.h>
     42 #include <sys/queue.h>
     43 #include <sys/mount.h>
     44 
     45 #include <assert.h>
     46 #include <err.h>
     47 #include <stdio.h>
     48 #include <stdlib.h>
     49 #include <string.h>
     50 #include <unistd.h>
     51 
     52 #include "bufcache.h"
     53 #include "vnode.h"
     54 
     55 /*
     56  * Definitions for the buffer free lists.
     57  */
     58 #define	BQUEUES		3	/* number of free buffer queues */
     59 
     60 #define	BQ_LOCKED	0	/* super-blocks &c */
     61 #define	BQ_LRU		1	/* lru, useful buffers */
     62 #define	BQ_AGE		2	/* rubbish */
     63 
     64 TAILQ_HEAD(bqueues, ubuf) bufqueues[BQUEUES];
     65 
     66 #define HASH_MAX 101
     67 
     68 struct bufhash_struct bufhash[HASH_MAX];
     69 
     70 int maxbufs = BUF_CACHE_SIZE;
     71 int nbufs = 0;
     72 int cachehits = 0;
     73 int cachemisses = 0;
     74 int hashmax = 0;
     75 off_t locked_queue_bytes = 0;
     76 int locked_queue_count = 0;
     77 
     78 /* Simple buffer hash function */
     79 static int
     80 vl_hash(struct uvnode * vp, daddr_t lbn)
     81 {
     82 	return (int)((unsigned long) vp + lbn) % HASH_MAX;
     83 }
     84 
     85 /* Initialize buffer cache */
     86 void
     87 bufinit(void)
     88 {
     89 	int i;
     90 
     91 	for (i = 0; i < BQUEUES; i++) {
     92 		TAILQ_INIT(&bufqueues[i]);
     93 	}
     94 	for (i = 0; i < HASH_MAX; i++)
     95 		LIST_INIT(&bufhash[i]);
     96 }
     97 
     98 /* Print statistics of buffer cache usage */
     99 void
    100 bufstats(void)
    101 {
    102 	printf("buffer cache: %d hits %d misses (%2.2f%%); hash depth %d\n",
    103 	    cachehits, cachemisses,
    104 	    (cachehits * 100.0) / (cachehits + cachemisses),
    105 	    hashmax);
    106 }
    107 
    108 /*
    109  * Remove a buffer from the cache.
    110  * Caller must remove the buffer from its free list.
    111  */
    112 void
    113 buf_destroy(struct ubuf * bp)
    114 {
    115 	bp->b_flags |= B_NEEDCOMMIT;
    116 	LIST_REMOVE(bp, b_vnbufs);
    117 	LIST_REMOVE(bp, b_hash);
    118 	free(bp->b_data);
    119 	free(bp);
    120 	--nbufs;
    121 }
    122 
    123 /* Remove a buffer from its free list. */
    124 void
    125 bremfree(struct ubuf * bp)
    126 {
    127 	struct bqueues *dp = NULL;
    128 
    129 	/*
    130 	 * We only calculate the head of the freelist when removing
    131 	 * the last element of the list as that is the only time that
    132 	 * it is needed (e.g. to reset the tail pointer).
    133 	 *
    134 	 * NB: This makes an assumption about how tailq's are implemented.
    135 	 */
    136 	if (bp->b_flags & B_LOCKED) {
    137 		locked_queue_bytes -= bp->b_bcount;
    138 		--locked_queue_count;
    139 	}
    140 	if (TAILQ_NEXT(bp, b_freelist) == NULL) {
    141 		for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
    142 			if (dp->tqh_last == &bp->b_freelist.tqe_next)
    143 				break;
    144 		if (dp == &bufqueues[BQUEUES])
    145 			errx(1, "bremfree: lost tail");
    146 	}
    147 	++bp->b_vp->v_usecount;
    148 	TAILQ_REMOVE(dp, bp, b_freelist);
    149 }
    150 
    151 /* Return a buffer if it is in the cache, otherwise return NULL. */
    152 struct ubuf *
    153 incore(struct uvnode * vp, int lbn)
    154 {
    155 	struct ubuf *bp;
    156 	int hash, depth;
    157 
    158 	hash = vl_hash(vp, lbn);
    159 	/* XXX use a real hash instead. */
    160 	depth = 0;
    161 	LIST_FOREACH(bp, &bufhash[hash], b_hash) {
    162 		if (++depth > hashmax)
    163 			hashmax = depth;
    164 		if (bp->b_vp == vp && bp->b_lblkno == lbn) {
    165 			return bp;
    166 		}
    167 	}
    168 	return NULL;
    169 }
    170 
    171 /*
    172  * Return a buffer of the given size, lbn and uvnode.
    173  * If none is in core, make a new one.
    174  */
    175 struct ubuf *
    176 getblk(struct uvnode * vp, daddr_t lbn, int size)
    177 {
    178 	struct ubuf *bp;
    179 #ifdef DEBUG
    180 	static int warned;
    181 #endif
    182 
    183 	/*
    184 	 * First check the buffer cache lists.
    185 	 * We might sometimes need to resize a buffer.  If we are growing
    186 	 * the buffer, its contents are invalid; but shrinking is okay.
    187 	 */
    188 	if ((bp = incore(vp, lbn)) != NULL) {
    189 		assert(!(bp->b_flags & B_NEEDCOMMIT));
    190 		assert(!(bp->b_flags & B_BUSY));
    191 		bp->b_flags |= B_BUSY;
    192 		bremfree(bp);
    193 		if (bp->b_bcount == size)
    194 			return bp;
    195 		else if (bp->b_bcount > size) {
    196 			assert(!(bp->b_flags & B_DELWRI));
    197 			bp->b_bcount = size;
    198 			bp->b_data = realloc(bp->b_data, size);
    199 			return bp;
    200 		}
    201 
    202 		buf_destroy(bp);
    203 		bp = NULL;
    204 	}
    205 
    206 	/*
    207 	 * Not on the list.
    208 	 * Get a new block of the appropriate size and use that.
    209 	 * If not enough space, free blocks from the AGE and LRU lists
    210 	 * to make room.
    211 	 */
    212 	while (nbufs >= maxbufs + locked_queue_count) {
    213 		bp = TAILQ_FIRST(&bufqueues[BQ_AGE]);
    214 		if (bp)
    215 			TAILQ_REMOVE(&bufqueues[BQ_AGE], bp, b_freelist);
    216 		if (bp == NULL) {
    217 			bp = TAILQ_FIRST(&bufqueues[BQ_LRU]);
    218 			if (bp)
    219 				TAILQ_REMOVE(&bufqueues[BQ_LRU], bp,
    220 				    b_freelist);
    221 		}
    222 		if (bp) {
    223 			if (bp->b_flags & B_DELWRI)
    224 				VOP_STRATEGY(bp);
    225 			buf_destroy(bp);
    226 		}
    227 #ifdef DEBUG
    228 		else {
    229 			if (!warned) {
    230 				warnx("allocating more than %d buffers",
    231 					maxbufs);
    232 				++warned;
    233 			}
    234 			break;
    235 		}
    236 #endif
    237 	}
    238 	++nbufs;
    239 	bp = (struct ubuf *) malloc(sizeof(*bp));
    240 	memset(bp, 0, sizeof(*bp));
    241 	bp->b_data = malloc(size);
    242 	memset(bp->b_data, 0, size);
    243 
    244 	bp->b_vp = vp;
    245 	bp->b_blkno = bp->b_lblkno = lbn;
    246 	bp->b_bcount = size;
    247 	LIST_INSERT_HEAD(&bufhash[vl_hash(vp, lbn)], bp, b_hash);
    248 	LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
    249 	bp->b_flags = B_BUSY;
    250 
    251 	return bp;
    252 }
    253 
    254 /* Write a buffer to disk according to its strategy routine. */
    255 void
    256 bwrite(struct ubuf * bp)
    257 {
    258 	bp->b_flags &= ~(B_READ | B_DONE | B_DELWRI | B_LOCKED);
    259 	VOP_STRATEGY(bp);
    260 	bp->b_flags |= B_DONE;
    261 	reassignbuf(bp, bp->b_vp);
    262 	brelse(bp);
    263 }
    264 
    265 /* Put a buffer back on its free list, clear B_BUSY. */
    266 void
    267 brelse(struct ubuf * bp)
    268 {
    269 	int age;
    270 
    271 	assert(!(bp->b_flags & B_NEEDCOMMIT));
    272 	assert(bp->b_flags & B_BUSY);
    273 
    274 	age = bp->b_flags & B_AGE;
    275 	bp->b_flags &= ~(B_BUSY | B_AGE);
    276 	if (bp->b_flags & B_INVAL) {
    277 		buf_destroy(bp);
    278 		return;
    279 	}
    280 	if (bp->b_flags & B_LOCKED) {
    281 		locked_queue_bytes += bp->b_bcount;
    282 		++locked_queue_count;
    283 		TAILQ_INSERT_TAIL(&bufqueues[BQ_LOCKED], bp, b_freelist);
    284 	} else if (age) {
    285 		TAILQ_INSERT_TAIL(&bufqueues[BQ_AGE], bp, b_freelist);
    286 	} else {
    287 		TAILQ_INSERT_TAIL(&bufqueues[BQ_LRU], bp, b_freelist);
    288 	}
    289 	--bp->b_vp->v_usecount;
    290 }
    291 
    292 /* Read the given block from disk, return it B_BUSY. */
    293 int
    294 bread(struct uvnode * vp, daddr_t lbn, int size, struct ucred * unused,
    295     struct ubuf ** bpp)
    296 {
    297 	struct ubuf *bp;
    298 	daddr_t daddr;
    299 	int error, count;
    300 
    301 	bp = getblk(vp, lbn, size);
    302 	*bpp = bp;
    303 	if (bp->b_flags & (B_DELWRI | B_DONE)) {
    304 		++cachehits;
    305 		return 0;
    306 	}
    307 	++cachemisses;
    308 
    309 	/*
    310 	 * Not found.  Need to find that block's location on disk,
    311 	 * and load it in.
    312 	 */
    313 	daddr = -1;
    314 	error = VOP_BMAP(vp, lbn, &daddr);
    315 	bp->b_blkno = daddr;
    316 	if (daddr >= 0) {
    317 		count = pread(vp->v_fd, bp->b_data, bp->b_bcount,
    318 				dbtob((off_t) daddr));
    319 		if (count == bp->b_bcount) {
    320 			bp->b_flags |= B_DONE;
    321 			return 0;
    322 		}
    323 		return -1;
    324 	}
    325 	memset(bp->b_data, 0, bp->b_bcount);
    326 	return 0;
    327 }
    328 
    329 /* Move a buffer between dirty and clean block lists. */
    330 void
    331 reassignbuf(struct ubuf * bp, struct uvnode * vp)
    332 {
    333 	LIST_REMOVE(bp, b_vnbufs);
    334 	if (bp->b_flags & B_DELWRI) {
    335 		LIST_INSERT_HEAD(&vp->v_dirtyblkhd, bp, b_vnbufs);
    336 	} else {
    337 		LIST_INSERT_HEAD(&vp->v_cleanblkhd, bp, b_vnbufs);
    338 	}
    339 }
    340 
    341 #ifdef DEBUG
    342 void
    343 dump_free_lists(void)
    344 {
    345 	struct ubuf *bp;
    346 	int i;
    347 
    348 	for (i = 0; i <= BQ_LOCKED; i++) {
    349 		printf("==> free list %d:\n", i);
    350 		TAILQ_FOREACH(bp, &bufqueues[i], b_freelist) {
    351 			printf("vp %p lbn %" PRId64 " flags %lx\n",
    352 				bp->b_vp, bp->b_lblkno, bp->b_flags);
    353 		}
    354 	}
    355 }
    356 #endif
    357