Home | History | Annotate | Line # | Download | only in systat
bufcache.c revision 1.8
      1 /*	$NetBSD: bufcache.c,v 1.8 2000/07/05 11:03:20 ad Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Simon Burge.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 #ifndef lint
     41 __RCSID("$NetBSD: bufcache.c,v 1.8 2000/07/05 11:03:20 ad Exp $");
     42 #endif /* not lint */
     43 
     44 #include <sys/param.h>
     45 #include <sys/buf.h>
     46 #include <sys/mount.h>
     47 #include <sys/queue.h>
     48 #include <sys/time.h>
     49 #include <sys/vnode.h>
     50 
     51 #include <err.h>
     52 #include <kvm.h>
     53 #include <nlist.h>
     54 #include <paths.h>
     55 #include <stdlib.h>
     56 #include <unistd.h>
     57 
     58 #include "systat.h"
     59 #include "extern.h"
     60 
     61 
     62 /*
     63  * Definitions for the buffer free lists (from sys/kern/vfs_bio.c).
     64  */
     65 #define	BQUEUES		4		/* number of free buffer queues */
     66 
     67 #define	BQ_LOCKED	0		/* super-blocks &c */
     68 #define	BQ_LRU		1		/* lru, useful buffers */
     69 #define	BQ_AGE		2		/* rubbish */
     70 #define	BQ_EMPTY	3		/* buffer headers with no memory */
     71 
     72 #define VCACHE_SIZE	50
     73 
     74 struct vcache {
     75 	int vc_age;
     76 	struct vnode *vc_addr;
     77 	struct vnode vc_node;
     78 };
     79 
     80 struct ml_entry {
     81 	int ml_count;
     82 	long ml_size;
     83 	long ml_valid;
     84 	struct mount *ml_addr;
     85 	struct mount ml_mount;
     86 	LIST_ENTRY(ml_entry) ml_entries;
     87 };
     88 
     89 static struct nlist namelist[] = {
     90 #define	X_NBUF		0
     91 	{ "_nbuf" },
     92 #define	X_BUF		1
     93 	{ "_buf" },
     94 #define	X_BUFQUEUES	2
     95 	{ "_bufqueues" },
     96 #define	X_BUFPAGES	3
     97 	{ "_bufpages" },
     98 	{ "" },
     99 };
    100 
    101 static struct vcache vcache[VCACHE_SIZE];
    102 static LIST_HEAD(mount_list, ml_entry) mount_list;
    103 
    104 static int nbuf, bufpages, bufkb;
    105 static void *bufaddr;
    106 static struct buf *buf = NULL;
    107 static TAILQ_HEAD(bqueues, buf) bufqueues[BQUEUES];
    108 
    109 static void	vc_init(void);
    110 static void	ml_init(void);
    111 static struct 	vnode *vc_lookup(struct vnode *);
    112 static struct 	mount *ml_lookup(struct mount *, int, int);
    113 
    114 
    115 WINDOW *
    116 openbufcache(void)
    117 {
    118 
    119 	return (subwin(stdscr, LINES-5-1, 0, 5, 0));
    120 }
    121 
    122 void
    123 closebufcache(WINDOW *w)
    124 {
    125 
    126 	if (w == NULL)
    127 		return;
    128 	wclear(w);
    129 	wrefresh(w);
    130 	delwin(w);
    131 	ml_init();		/* Clear out mount list */
    132 }
    133 
    134 void
    135 labelbufcache(void)
    136 {
    137 	mvwprintw(wnd, 0, 0, "There are %d buffers using %d kBytes of memory.",
    138 	    nbuf, bufkb);
    139 	wclrtoeol(wnd);
    140 	wmove(wnd, 1, 0);
    141 	wclrtoeol(wnd);
    142 	mvwaddstr(wnd, 2, 0,
    143 "File System          Bufs used   %   kB in use   %  Bufsize kB   %  Util %");
    144 	wclrtoeol(wnd);
    145 }
    146 
    147 void
    148 showbufcache(void)
    149 {
    150 	int tbuf, i, lastrow;
    151 	long tvalid, tsize;
    152 	struct ml_entry *ml;
    153 
    154 	tbuf = tvalid = tsize = 0;
    155 	lastrow = 3;	/* Leave room for header. */
    156 	for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
    157 	    i++, ml = LIST_NEXT(ml, ml_entries)) {
    158 
    159 		/* Display in window if enough room. */
    160 		if (i < getmaxy(wnd) - 2) {
    161 			mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
    162 			    "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
    163 			wprintw(wnd,
    164 			    "    %6d %3d    %8ld %3ld    %8ld %3ld     %3ld",
    165 			    ml->ml_count, (100 * ml->ml_count) / nbuf,
    166 			    ml->ml_valid, (100 * ml->ml_valid) / bufkb,
    167 			    ml->ml_size, (100 * ml->ml_size) / bufkb,
    168 			    (100 * ml->ml_valid) / ml->ml_size);
    169 			wclrtoeol(wnd);
    170 			lastrow = i;
    171 		}
    172 
    173 		/* Update statistics. */
    174 		tbuf += ml->ml_count;
    175 		tvalid += ml->ml_valid;
    176 		tsize += ml->ml_size;
    177 	}
    178 
    179 	wclrtobot(wnd);
    180 	mvwprintw(wnd, lastrow + 2, 0,
    181 	    "%-20s    %6d %3d    %8ld %3ld    %8ld %3ld     %3ld",
    182 	    "Total:", tbuf, (100 * tbuf) / nbuf,
    183 	    tvalid, (100 * tvalid) / bufkb,
    184 	    tsize, (100 * tsize) / bufkb, (100 * tvalid) / tsize);
    185 }
    186 
    187 int
    188 initbufcache(void)
    189 {
    190 	if (namelist[X_NBUF].n_type == 0) {
    191 		if (kvm_nlist(kd, namelist)) {
    192 			nlisterr(namelist);
    193 			return(0);
    194 		}
    195 		if (namelist[X_NBUF].n_type == 0) {
    196 			error("namelist on %s failed", _PATH_UNIX);
    197 			return(0);
    198 		}
    199 	}
    200 
    201 	NREAD(X_NBUF, &nbuf, sizeof(nbuf));
    202 	NREAD(X_BUFPAGES, &bufpages, sizeof(bufpages));
    203 	bufkb = bufpages * sysconf(_SC_PAGESIZE) / 1024;
    204 
    205 	if ((buf = malloc(nbuf * sizeof(struct buf))) == NULL) {
    206 		error("malloc failed");
    207 		die(0);
    208 	}
    209 	NREAD(X_BUF, &bufaddr, sizeof(bufaddr));
    210 
    211 	return(1);
    212 }
    213 
    214 void
    215 fetchbufcache(void)
    216 {
    217 	int i, count;
    218 	struct buf *bp;
    219 	struct vnode *vn;
    220 	struct mount *mt;
    221 	struct ml_entry *ml;
    222 
    223 	/* Re-read bufqueues lists and buffer cache headers */
    224 	NREAD(X_BUFQUEUES, bufqueues, sizeof(bufqueues));
    225 	KREAD(bufaddr, buf, sizeof(struct buf) * nbuf);
    226 
    227 	/* Initialise vnode cache and mount list. */
    228 	vc_init();
    229 	ml_init();
    230 	for (i = 0; i < BQUEUES; i++) {
    231 		for (bp = bufqueues[i].tqh_first; bp != NULL;
    232 		    bp = bp->b_freelist.tqe_next) {
    233 			if (bp != NULL) {
    234 				bp = (struct buf *)((u_long)bp + (u_long)buf -
    235 				    (u_long)bufaddr);
    236 
    237 				if (bp->b_vp != NULL) {
    238 					vn = vc_lookup(bp->b_vp);
    239 					if (vn == NULL)
    240 						errx(1,
    241 						    "vc_lookup returns NULL!\n");
    242 					if (vn->v_mount != NULL)
    243 						mt = ml_lookup(vn->v_mount,
    244 						    bp->b_bufsize,
    245 						    bp->b_bcount);
    246 				}
    247 			}
    248 		}
    249 	}
    250 
    251 	/* simple sort - there's not that many entries */
    252 	do {
    253 		if ((ml = LIST_FIRST(&mount_list)) == NULL ||
    254 		    LIST_NEXT(ml, ml_entries) == NULL)
    255 			break;
    256 
    257 		count = 0;
    258 		for (ml = LIST_FIRST(&mount_list); ml != NULL;
    259 		    ml = LIST_NEXT(ml, ml_entries)) {
    260 			if (LIST_NEXT(ml, ml_entries) == NULL)
    261 				break;
    262 			if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
    263 				ml = LIST_NEXT(ml, ml_entries);
    264 				LIST_REMOVE(ml, ml_entries);
    265 				LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
    266 				count++;
    267 			}
    268 		}
    269 	} while (count != 0);
    270 }
    271 
    272 static void
    273 vc_init(void)
    274 {
    275 	int i;
    276 
    277 	/* vc_addr == NULL for unused cache entry. */
    278 	for (i = 0; i < VCACHE_SIZE; i++)
    279 		vcache[i].vc_addr = NULL;
    280 }
    281 
    282 static void
    283 ml_init(void)
    284 {
    285 	struct ml_entry *ml;
    286 
    287 	/* Throw out the current mount list and start again. */
    288 	while ((ml = LIST_FIRST(&mount_list)) != NULL) {
    289 		LIST_REMOVE(ml, ml_entries);
    290 		free(ml);
    291 	}
    292 }
    293 
    294 
    295 static struct vnode *
    296 vc_lookup(struct vnode *vaddr)
    297 {
    298 	struct vnode *ret;
    299 	int i, oldest, match;
    300 
    301 	ret = NULL;
    302 	oldest = match = 0;
    303 	for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
    304 		vcache[i].vc_age++;
    305 		if (vcache[i].vc_addr == NULL)
    306 			break;
    307 		if (vcache[i].vc_age < vcache[oldest].vc_age)
    308 			oldest = i;
    309 		if (vcache[i].vc_addr == vaddr) {
    310 			vcache[i].vc_age = 0;
    311 			match = i;
    312 			ret = &vcache[i].vc_node;
    313 		}
    314 	}
    315 
    316 	/* Find an entry in the cache? */
    317 	if (ret != NULL)
    318 		return(ret);
    319 
    320 	/* Go past the end of the cache? */
    321 	if  (i >= VCACHE_SIZE)
    322 		i = oldest;
    323 
    324 	/* Read in new vnode and reset age counter. */
    325 	KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode));
    326 	vcache[i].vc_addr = vaddr;
    327 	vcache[i].vc_age = 0;
    328 
    329 	return(&vcache[i].vc_node);
    330 }
    331 
    332 static struct mount *
    333 ml_lookup(struct mount *maddr, int size, int valid)
    334 {
    335 	struct ml_entry *ml;
    336 
    337 	for (ml = LIST_FIRST(&mount_list); ml != NULL;
    338 	    ml = LIST_NEXT(ml, ml_entries))
    339 		if (ml->ml_addr == maddr) {
    340 			ml->ml_count++;
    341 			ml->ml_size += size / 1024;
    342 			ml->ml_valid += valid / 1024;
    343 			if (ml->ml_addr == NULL)
    344 				return(NULL);
    345 			else
    346 				return(&ml->ml_mount);
    347 		}
    348 
    349 	if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
    350 		error("out of memory");
    351 		die(0);
    352 	}
    353 	LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
    354 	ml->ml_count = 1;
    355 	ml->ml_size = size / 1024;
    356 	ml->ml_valid = valid / 1024;
    357 	ml->ml_addr = maddr;
    358 	if (maddr == NULL)
    359 		return(NULL);
    360 
    361 	KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
    362 	return(&ml->ml_mount);
    363 }
    364