Home | History | Annotate | Line # | Download | only in systat
bufcache.c revision 1.17
      1 /*	$NetBSD: bufcache.c,v 1.17 2004/05/11 21:56:20 martin Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Simon Burge.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 #ifndef lint
     41 __RCSID("$NetBSD: bufcache.c,v 1.17 2004/05/11 21:56:20 martin Exp $");
     42 #endif /* not lint */
     43 
     44 #include <sys/param.h>
     45 #include <sys/buf.h>
     46 #include <sys/mount.h>
     47 #include <sys/sysctl.h>
     48 #include <sys/vnode.h>
     49 
     50 #include <uvm/uvm_extern.h>
     51 
     52 #include <err.h>
     53 #include <errno.h>
     54 #include <kvm.h>
     55 #include <math.h>
     56 #include <nlist.h>
     57 #include <stdlib.h>
     58 #include <string.h>
     59 #include <unistd.h>
     60 
     61 #include <miscfs/specfs/specdev.h>
     62 
     63 #include "systat.h"
     64 #include "extern.h"
     65 
     66 #define VCACHE_SIZE	50
     67 
     68 struct vcache {
     69 	int vc_age;
     70 	struct vnode *vc_addr;
     71 	struct vnode vc_node;
     72 };
     73 
     74 struct ml_entry {
     75 	u_int ml_count;
     76 	u_long ml_size;
     77 	u_long ml_valid;
     78 	struct mount *ml_addr;
     79 	LIST_ENTRY(ml_entry) ml_entries;
     80 	struct mount ml_mount;
     81 };
     82 
     83 static struct nlist namelist[] = {
     84 #define	X_BUFMEM	0
     85 	{ "_bufmem" },
     86 	{ "" },
     87 };
     88 
     89 static struct vcache vcache[VCACHE_SIZE];
     90 static LIST_HEAD(mount_list, ml_entry) mount_list;
     91 
     92 static u_long bufmem;
     93 static u_int nbuf, pgwidth, kbwidth;
     94 static struct uvmexp_sysctl uvmexp;
     95 
     96 static void	vc_init(void);
     97 static void	ml_init(void);
     98 static struct 	vnode *vc_lookup(struct vnode *);
     99 static struct 	mount *ml_lookup(struct mount *, int, int);
    100 static void	fetchuvmexp(void);
    101 
    102 
    103 WINDOW *
    104 openbufcache(void)
    105 {
    106 
    107 	return (subwin(stdscr, -1, 0, 5, 0));
    108 }
    109 
    110 void
    111 closebufcache(WINDOW *w)
    112 {
    113 
    114 	if (w == NULL)
    115 		return;
    116 	wclear(w);
    117 	wrefresh(w);
    118 	delwin(w);
    119 	ml_init();		/* Clear out mount list */
    120 }
    121 
    122 void
    123 labelbufcache(void)
    124 {
    125 	wmove(wnd, 1, 0);
    126 	wclrtoeol(wnd);
    127 	wmove(wnd, 2, 0);
    128 	wclrtoeol(wnd);
    129 	wmove(wnd, 3, 0);
    130 	wclrtoeol(wnd);
    131 	mvwaddstr(wnd, 4, 0,
    132 "File System          Bufs used   %   kB in use   %  Bufsize kB   %  Util %");
    133 	wclrtoeol(wnd);
    134 }
    135 
    136 void
    137 showbufcache(void)
    138 {
    139 	int tbuf, i, lastrow;
    140 	double tvalid, tsize;
    141 	struct ml_entry *ml;
    142 
    143 	NREAD(X_BUFMEM, &bufmem, sizeof(bufmem));
    144 
    145 	mvwprintw(wnd, 0, 0,
    146 	    "There are %*d metadata buffers using           %*ld kBytes of memory.",
    147 	    pgwidth, nbuf, kbwidth, bufmem/1024);
    148 	wclrtoeol(wnd);
    149 	mvwprintw(wnd, 1, 0,
    150 	    "There are %*llu pages for cached file data using %*llu kBytes of memory.",
    151 	    pgwidth, (long long)uvmexp.filepages,
    152 	    kbwidth, (long long) uvmexp.filepages * getpagesize() / 1024);
    153 	wclrtoeol(wnd);
    154 	mvwprintw(wnd, 2, 0,
    155 	    "There are %*llu pages for executables using      %*llu kBytes of memory.",
    156 	    pgwidth, (long long)uvmexp.execpages,
    157 	    kbwidth, (long long) uvmexp.execpages * getpagesize() / 1024);
    158 	wclrtoeol(wnd);
    159 
    160 	if (nbuf == 0 || bufmem == 0) {
    161 		wclrtobot(wnd);
    162 		return;
    163 	}
    164 
    165 	tbuf = 0;
    166 	tvalid = tsize = 0;
    167 	lastrow = 5;	/* Leave room for header. */
    168 	for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
    169 	    i++, ml = LIST_NEXT(ml, ml_entries)) {
    170 
    171 		int c = ml->ml_count;
    172 		double v = ml->ml_valid;
    173 		double s = ml->ml_size;
    174 
    175 		/* Display in window if enough room. */
    176 		if (i < getmaxy(wnd) - 2) {
    177 			mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
    178 			    "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
    179 			wprintw(wnd,
    180 			    "    %6d %3d    %8ld %3.0f    %8ld %3.0f     %3.0f",
    181 			    c, (100 * c) / nbuf,
    182 			    (long)(v/1024), 100 * v / bufmem,
    183 			    (long)(s/1024), 100 * s / bufmem,
    184 			    100 * v / s);
    185 			wclrtoeol(wnd);
    186 			lastrow = i;
    187 		}
    188 
    189 		/* Update statistics. */
    190 		tbuf += c;
    191 		tvalid += v;
    192 		tsize += s;
    193 	}
    194 
    195 	wclrtobot(wnd);
    196 	mvwprintw(wnd, lastrow + 2, 0,
    197 	    "%-20s    %6d %3d    %8ld %3.0f    %8ld %3.0f     %3.0f",
    198 	    "Total:", tbuf, (100 * tbuf) / nbuf,
    199 	    (long)(tvalid/1024), 100 * tvalid / bufmem,
    200 	    (long)(tsize/1024), 100 * tsize / bufmem,
    201 	    tsize != 0 ? ((100 * tvalid) / tsize) : 0);
    202 }
    203 
    204 int
    205 initbufcache(void)
    206 {
    207 	if (namelist[0].n_type == 0) {
    208 		if (kvm_nlist(kd, namelist)) {
    209 			nlisterr(namelist);
    210 			return(0);
    211 		}
    212 	}
    213 
    214 	fetchuvmexp();
    215 	pgwidth = (int)(floor(log10((double)uvmexp.npages)) + 1);
    216 	kbwidth = (int)(floor(log10(uvmexp.npages * getpagesize() / 1024.0)) + 1);
    217 
    218 	return(1);
    219 }
    220 
    221 static void
    222 fetchuvmexp(void)
    223 {
    224 	int mib[2];
    225 	size_t size;
    226 
    227 	/* Re-read pages used for vnodes & executables */
    228 	size = sizeof(uvmexp);
    229 	mib[0] = CTL_VM;
    230 	mib[1] = VM_UVMEXP2;
    231 	if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) {
    232 		error("can't get uvmexp: %s\n", strerror(errno));
    233 		memset(&uvmexp, 0, sizeof(uvmexp));
    234 	}
    235 }
    236 
    237 void
    238 fetchbufcache(void)
    239 {
    240 	int count;
    241 	struct buf_sysctl *bp, *buffers;
    242 	struct vnode *vn;
    243 	struct mount *mt;
    244 	struct ml_entry *ml;
    245 	int mib[6];
    246 	size_t size;
    247 	int extraslop = 0;
    248 
    249 	/* Re-read pages used for vnodes & executables */
    250 	fetchuvmexp();
    251 
    252 	/* Initialise vnode cache and mount list. */
    253 	vc_init();
    254 	ml_init();
    255 
    256 	/* Get metadata buffers */
    257 	size = 0;
    258 	buffers = NULL;
    259 	mib[0] = CTL_KERN;
    260 	mib[1] = KERN_BUF;
    261 	mib[2] = KERN_BUF_ALL;
    262 	mib[3] = KERN_BUF_ALL;
    263 	mib[4] = (int)sizeof(struct buf_sysctl);
    264 	mib[5] = INT_MAX; /* we want them all */
    265 again:
    266 	if (sysctl(mib, 6, NULL, &size, NULL, 0) < 0) {
    267 		error("can't get buffers size: %s\n", strerror(errno));
    268 		return;
    269 	}
    270 	if (size == 0)
    271 		return;
    272 
    273 	size += extraslop * sizeof(struct buf_sysctl);
    274 	buffers = malloc(size);
    275 	if (buffers == NULL) {
    276 		error("can't allocate buffers: %s\n", strerror(errno));
    277 		return;
    278 	}
    279 	if (sysctl(mib, 6, buffers, &size, NULL, 0) < 0) {
    280 		free(buffers);
    281 		if (extraslop == 0) {
    282 			extraslop = 100;
    283 			goto again;
    284 		}
    285 		error("can't get buffers: %s\n", strerror(errno));
    286 		return;
    287 	}
    288 
    289 	nbuf = size / sizeof(struct buf_sysctl);
    290 	for (bp = buffers; bp < buffers + nbuf; bp++) {
    291 		if (UINT64TOPTR(bp->b_vp) != NULL) {
    292 			struct mount *mp;
    293 			vn = vc_lookup(UINT64TOPTR(bp->b_vp));
    294 			if (vn == NULL)
    295 				break;
    296 
    297 			mp = vn->v_mount;
    298 			/*
    299 			 * References to mounted-on vnodes should be
    300 			 * counted towards the mounted filesystem.
    301 			 */
    302 			if (vn->v_type == VBLK && vn->v_specinfo != NULL) {
    303 				struct specinfo sp;
    304 				if (!KREAD(vn->v_specinfo, &sp, sizeof(sp)))
    305 					continue;
    306 				if (sp.si_mountpoint)
    307 					mp = sp.si_mountpoint;
    308 			}
    309 			if (mp != NULL)
    310 				mt = ml_lookup(mp,
    311 				    bp->b_bufsize,
    312 				    bp->b_bcount);
    313 		}
    314 	}
    315 
    316 	/* simple sort - there's not that many entries */
    317 	do {
    318 		if ((ml = LIST_FIRST(&mount_list)) == NULL ||
    319 		    LIST_NEXT(ml, ml_entries) == NULL)
    320 			break;
    321 
    322 		count = 0;
    323 		for (ml = LIST_FIRST(&mount_list); ml != NULL;
    324 		    ml = LIST_NEXT(ml, ml_entries)) {
    325 			if (LIST_NEXT(ml, ml_entries) == NULL)
    326 				break;
    327 			if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
    328 				ml = LIST_NEXT(ml, ml_entries);
    329 				LIST_REMOVE(ml, ml_entries);
    330 				LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
    331 				count++;
    332 			}
    333 		}
    334 	} while (count != 0);
    335 
    336 	free(buffers);
    337 }
    338 
    339 static void
    340 vc_init(void)
    341 {
    342 	int i;
    343 
    344 	/* vc_addr == NULL for unused cache entry. */
    345 	for (i = 0; i < VCACHE_SIZE; i++)
    346 		vcache[i].vc_addr = NULL;
    347 }
    348 
    349 static void
    350 ml_init(void)
    351 {
    352 	struct ml_entry *ml;
    353 
    354 	/* Throw out the current mount list and start again. */
    355 	while ((ml = LIST_FIRST(&mount_list)) != NULL) {
    356 		LIST_REMOVE(ml, ml_entries);
    357 		free(ml);
    358 	}
    359 }
    360 
    361 
    362 static struct vnode *
    363 vc_lookup(struct vnode *vaddr)
    364 {
    365 	struct vnode *ret;
    366 	size_t i, oldest;
    367 
    368 	ret = NULL;
    369 	oldest = 0;
    370 	for (i = 0; i < VCACHE_SIZE; i++) {
    371 		if (vcache[i].vc_addr == NULL)
    372 			break;
    373 		vcache[i].vc_age++;
    374 		if (vcache[i].vc_age < vcache[oldest].vc_age)
    375 			oldest = i;
    376 		if (vcache[i].vc_addr == vaddr) {
    377 			vcache[i].vc_age = 0;
    378 			ret = &vcache[i].vc_node;
    379 		}
    380 	}
    381 
    382 	/* Find an entry in the cache? */
    383 	if (ret != NULL)
    384 		return(ret);
    385 
    386 	/* Go past the end of the cache? */
    387 	if  (i >= VCACHE_SIZE)
    388 		i = oldest;
    389 
    390 	/* Read in new vnode and reset age counter. */
    391 	if (KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode)) == 0)
    392 		return NULL;
    393 	vcache[i].vc_addr = vaddr;
    394 	vcache[i].vc_age = 0;
    395 
    396 	return(&vcache[i].vc_node);
    397 }
    398 
    399 static struct mount *
    400 ml_lookup(struct mount *maddr, int size, int valid)
    401 {
    402 	struct ml_entry *ml;
    403 
    404 	for (ml = LIST_FIRST(&mount_list); ml != NULL;
    405 	    ml = LIST_NEXT(ml, ml_entries))
    406 		if (ml->ml_addr == maddr) {
    407 			ml->ml_count++;
    408 			ml->ml_size += size;
    409 			ml->ml_valid += valid;
    410 			if (ml->ml_addr == NULL)
    411 				return(NULL);
    412 			else
    413 				return(&ml->ml_mount);
    414 		}
    415 
    416 	if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
    417 		error("out of memory");
    418 		die(0);
    419 	}
    420 	LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
    421 	ml->ml_count = 1;
    422 	ml->ml_size = size;
    423 	ml->ml_valid = valid;
    424 	ml->ml_addr = maddr;
    425 	if (maddr == NULL)
    426 		return(NULL);
    427 
    428 	KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
    429 	return(&ml->ml_mount);
    430 }
    431