Home | History | Annotate | Line # | Download | only in systat
bufcache.c revision 1.16
      1 /*	$NetBSD: bufcache.c,v 1.16 2004/02/19 03:56:30 atatat Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 1999 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Simon Burge.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  * 3. All advertising materials mentioning features or use of this software
     19  *    must display the following acknowledgement:
     20  *        This product includes software developed by the NetBSD
     21  *        Foundation, Inc. and its contributors.
     22  * 4. Neither the name of The NetBSD Foundation nor the names of its
     23  *    contributors may be used to endorse or promote products derived
     24  *    from this software without specific prior written permission.
     25  *
     26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     36  * POSSIBILITY OF SUCH DAMAGE.
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 #ifndef lint
     41 __RCSID("$NetBSD: bufcache.c,v 1.16 2004/02/19 03:56:30 atatat Exp $");
     42 #endif /* not lint */
     43 
     44 #include <sys/param.h>
     45 #include <sys/buf.h>
     46 #include <sys/mount.h>
     47 #include <sys/sysctl.h>
     48 #include <sys/vnode.h>
     49 
     50 #include <uvm/uvm_extern.h>
     51 
     52 #include <err.h>
     53 #include <errno.h>
     54 #include <kvm.h>
     55 #include <math.h>
     56 #include <nlist.h>
     57 #include <stdlib.h>
     58 #include <string.h>
     59 #include <unistd.h>
     60 
     61 #include <miscfs/specfs/specdev.h>
     62 
     63 #include "systat.h"
     64 #include "extern.h"
     65 
     66 #define VCACHE_SIZE	50
     67 
     68 struct vcache {
     69 	int vc_age;
     70 	struct vnode *vc_addr;
     71 	struct vnode vc_node;
     72 };
     73 
     74 struct ml_entry {
     75 	u_int ml_count;
     76 	u_long ml_size;
     77 	u_long ml_valid;
     78 	struct mount *ml_addr;
     79 	LIST_ENTRY(ml_entry) ml_entries;
     80 	struct mount ml_mount;
     81 };
     82 
     83 static struct nlist namelist[] = {
     84 #define	X_BUFMEM	0
     85 	{ "_bufmem" },
     86 	{ "" },
     87 };
     88 
     89 static struct vcache vcache[VCACHE_SIZE];
     90 static LIST_HEAD(mount_list, ml_entry) mount_list;
     91 
     92 static u_int nbuf, bufmem, pgwidth, kbwidth;
     93 static struct uvmexp_sysctl uvmexp;
     94 
     95 static void	vc_init(void);
     96 static void	ml_init(void);
     97 static struct 	vnode *vc_lookup(struct vnode *);
     98 static struct 	mount *ml_lookup(struct mount *, int, int);
     99 static void	fetchuvmexp(void);
    100 
    101 
    102 WINDOW *
    103 openbufcache(void)
    104 {
    105 
    106 	return (subwin(stdscr, -1, 0, 5, 0));
    107 }
    108 
    109 void
    110 closebufcache(WINDOW *w)
    111 {
    112 
    113 	if (w == NULL)
    114 		return;
    115 	wclear(w);
    116 	wrefresh(w);
    117 	delwin(w);
    118 	ml_init();		/* Clear out mount list */
    119 }
    120 
    121 void
    122 labelbufcache(void)
    123 {
    124 	wmove(wnd, 1, 0);
    125 	wclrtoeol(wnd);
    126 	wmove(wnd, 2, 0);
    127 	wclrtoeol(wnd);
    128 	wmove(wnd, 3, 0);
    129 	wclrtoeol(wnd);
    130 	mvwaddstr(wnd, 4, 0,
    131 "File System          Bufs used   %   kB in use   %  Bufsize kB   %  Util %");
    132 	wclrtoeol(wnd);
    133 }
    134 
    135 void
    136 showbufcache(void)
    137 {
    138 	int tbuf, i, lastrow;
    139 	double tvalid, tsize;
    140 	struct ml_entry *ml;
    141 
    142 	NREAD(X_BUFMEM, &bufmem, sizeof(bufmem));
    143 
    144 	mvwprintw(wnd, 0, 0,
    145 	    "There are %*d metadata buffers using           %*d kBytes of memory.",
    146 	    pgwidth, nbuf, kbwidth, bufmem/1024);
    147 	wclrtoeol(wnd);
    148 	mvwprintw(wnd, 1, 0,
    149 	    "There are %*llu pages for cached file data using %*llu kBytes of memory.",
    150 	    pgwidth, (long long)uvmexp.filepages,
    151 	    kbwidth, (long long) uvmexp.filepages * getpagesize() / 1024);
    152 	wclrtoeol(wnd);
    153 	mvwprintw(wnd, 2, 0,
    154 	    "There are %*llu pages for executables using      %*llu kBytes of memory.",
    155 	    pgwidth, (long long)uvmexp.execpages,
    156 	    kbwidth, (long long) uvmexp.execpages * getpagesize() / 1024);
    157 	wclrtoeol(wnd);
    158 
    159 	if (nbuf == 0 || bufmem == 0) {
    160 		wclrtobot(wnd);
    161 		return;
    162 	}
    163 
    164 	tbuf = 0;
    165 	tvalid = tsize = 0;
    166 	lastrow = 5;	/* Leave room for header. */
    167 	for (i = lastrow, ml = LIST_FIRST(&mount_list); ml != NULL;
    168 	    i++, ml = LIST_NEXT(ml, ml_entries)) {
    169 
    170 		int c = ml->ml_count;
    171 		double v = ml->ml_valid;
    172 		double s = ml->ml_size;
    173 
    174 		/* Display in window if enough room. */
    175 		if (i < getmaxy(wnd) - 2) {
    176 			mvwprintw(wnd, i, 0, "%-20.20s", ml->ml_addr == NULL ?
    177 			    "NULL" : ml->ml_mount.mnt_stat.f_mntonname);
    178 			wprintw(wnd,
    179 			    "    %6d %3d    %8ld %3.0f    %8ld %3.0f     %3.0f",
    180 			    c, (100 * c) / nbuf,
    181 			    (long)(v/1024), 100 * v / bufmem,
    182 			    (long)(s/1024), 100 * s / bufmem,
    183 			    100 * v / s);
    184 			wclrtoeol(wnd);
    185 			lastrow = i;
    186 		}
    187 
    188 		/* Update statistics. */
    189 		tbuf += c;
    190 		tvalid += v;
    191 		tsize += s;
    192 	}
    193 
    194 	wclrtobot(wnd);
    195 	mvwprintw(wnd, lastrow + 2, 0,
    196 	    "%-20s    %6d %3d    %8ld %3.0f    %8ld %3.0f     %3.0f",
    197 	    "Total:", tbuf, (100 * tbuf) / nbuf,
    198 	    (long)(tvalid/1024), 100 * tvalid / bufmem,
    199 	    (long)(tsize/1024), 100 * tsize / bufmem,
    200 	    tsize != 0 ? ((100 * tvalid) / tsize) : 0);
    201 }
    202 
    203 int
    204 initbufcache(void)
    205 {
    206 	if (namelist[0].n_type == 0) {
    207 		if (kvm_nlist(kd, namelist)) {
    208 			nlisterr(namelist);
    209 			return(0);
    210 		}
    211 	}
    212 
    213 	fetchuvmexp();
    214 	pgwidth = (int)(floor(log10((double)uvmexp.npages)) + 1);
    215 	kbwidth = (int)(floor(log10(uvmexp.npages * getpagesize() / 1024.0)) + 1);
    216 
    217 	return(1);
    218 }
    219 
    220 static void
    221 fetchuvmexp(void)
    222 {
    223 	int mib[2];
    224 	size_t size;
    225 
    226 	/* Re-read pages used for vnodes & executables */
    227 	size = sizeof(uvmexp);
    228 	mib[0] = CTL_VM;
    229 	mib[1] = VM_UVMEXP2;
    230 	if (sysctl(mib, 2, &uvmexp, &size, NULL, 0) < 0) {
    231 		error("can't get uvmexp: %s\n", strerror(errno));
    232 		memset(&uvmexp, 0, sizeof(uvmexp));
    233 	}
    234 }
    235 
    236 void
    237 fetchbufcache(void)
    238 {
    239 	int count;
    240 	struct buf_sysctl *bp, *buffers;
    241 	struct vnode *vn;
    242 	struct mount *mt;
    243 	struct ml_entry *ml;
    244 	int mib[6];
    245 	size_t size;
    246 	int extraslop = 0;
    247 
    248 	/* Re-read pages used for vnodes & executables */
    249 	fetchuvmexp();
    250 
    251 	/* Initialise vnode cache and mount list. */
    252 	vc_init();
    253 	ml_init();
    254 
    255 	/* Get metadata buffers */
    256 	size = 0;
    257 	buffers = NULL;
    258 	mib[0] = CTL_KERN;
    259 	mib[1] = KERN_BUF;
    260 	mib[2] = KERN_BUF_ALL;
    261 	mib[3] = KERN_BUF_ALL;
    262 	mib[4] = (int)sizeof(struct buf_sysctl);
    263 	mib[5] = INT_MAX; /* we want them all */
    264 again:
    265 	if (sysctl(mib, 6, NULL, &size, NULL, 0) < 0) {
    266 		error("can't get buffers size: %s\n", strerror(errno));
    267 		return;
    268 	}
    269 	if (size == 0)
    270 		return;
    271 
    272 	size += extraslop * sizeof(struct buf_sysctl);
    273 	buffers = malloc(size);
    274 	if (buffers == NULL) {
    275 		error("can't allocate buffers: %s\n", strerror(errno));
    276 		return;
    277 	}
    278 	if (sysctl(mib, 6, buffers, &size, NULL, 0) < 0) {
    279 		free(buffers);
    280 		if (extraslop == 0) {
    281 			extraslop = 100;
    282 			goto again;
    283 		}
    284 		error("can't get buffers: %s\n", strerror(errno));
    285 		return;
    286 	}
    287 
    288 	nbuf = size / sizeof(struct buf_sysctl);
    289 	for (bp = buffers; bp < buffers + nbuf; bp++) {
    290 		if (UINT64TOPTR(bp->b_vp) != NULL) {
    291 			struct mount *mp;
    292 			vn = vc_lookup(UINT64TOPTR(bp->b_vp));
    293 			if (vn == NULL)
    294 				break;
    295 
    296 			mp = vn->v_mount;
    297 			/*
    298 			 * References to mounted-on vnodes should be
    299 			 * counted towards the mounted filesystem.
    300 			 */
    301 			if (vn->v_type == VBLK && vn->v_specinfo != NULL) {
    302 				struct specinfo sp;
    303 				if (!KREAD(vn->v_specinfo, &sp, sizeof(sp)))
    304 					continue;
    305 				if (sp.si_mountpoint)
    306 					mp = sp.si_mountpoint;
    307 			}
    308 			if (mp != NULL)
    309 				mt = ml_lookup(mp,
    310 				    bp->b_bufsize,
    311 				    bp->b_bcount);
    312 		}
    313 	}
    314 
    315 	/* simple sort - there's not that many entries */
    316 	do {
    317 		if ((ml = LIST_FIRST(&mount_list)) == NULL ||
    318 		    LIST_NEXT(ml, ml_entries) == NULL)
    319 			break;
    320 
    321 		count = 0;
    322 		for (ml = LIST_FIRST(&mount_list); ml != NULL;
    323 		    ml = LIST_NEXT(ml, ml_entries)) {
    324 			if (LIST_NEXT(ml, ml_entries) == NULL)
    325 				break;
    326 			if (ml->ml_count < LIST_NEXT(ml, ml_entries)->ml_count) {
    327 				ml = LIST_NEXT(ml, ml_entries);
    328 				LIST_REMOVE(ml, ml_entries);
    329 				LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
    330 				count++;
    331 			}
    332 		}
    333 	} while (count != 0);
    334 
    335 	free(buffers);
    336 }
    337 
    338 static void
    339 vc_init(void)
    340 {
    341 	int i;
    342 
    343 	/* vc_addr == NULL for unused cache entry. */
    344 	for (i = 0; i < VCACHE_SIZE; i++)
    345 		vcache[i].vc_addr = NULL;
    346 }
    347 
    348 static void
    349 ml_init(void)
    350 {
    351 	struct ml_entry *ml;
    352 
    353 	/* Throw out the current mount list and start again. */
    354 	while ((ml = LIST_FIRST(&mount_list)) != NULL) {
    355 		LIST_REMOVE(ml, ml_entries);
    356 		free(ml);
    357 	}
    358 }
    359 
    360 
    361 static struct vnode *
    362 vc_lookup(struct vnode *vaddr)
    363 {
    364 	struct vnode *ret;
    365 	size_t i, oldest;
    366 
    367 	ret = NULL;
    368 	oldest = 0;
    369 	for (i = 0; i < VCACHE_SIZE || vcache[i].vc_addr == NULL; i++) {
    370 		vcache[i].vc_age++;
    371 		if (vcache[i].vc_addr == NULL)
    372 			break;
    373 		if (vcache[i].vc_age < vcache[oldest].vc_age)
    374 			oldest = i;
    375 		if (vcache[i].vc_addr == vaddr) {
    376 			vcache[i].vc_age = 0;
    377 			ret = &vcache[i].vc_node;
    378 		}
    379 	}
    380 
    381 	/* Find an entry in the cache? */
    382 	if (ret != NULL)
    383 		return(ret);
    384 
    385 	/* Go past the end of the cache? */
    386 	if  (i >= VCACHE_SIZE)
    387 		i = oldest;
    388 
    389 	/* Read in new vnode and reset age counter. */
    390 	if (KREAD(vaddr, &vcache[i].vc_node, sizeof(struct vnode)) == 0)
    391 		return NULL;
    392 	vcache[i].vc_addr = vaddr;
    393 	vcache[i].vc_age = 0;
    394 
    395 	return(&vcache[i].vc_node);
    396 }
    397 
    398 static struct mount *
    399 ml_lookup(struct mount *maddr, int size, int valid)
    400 {
    401 	struct ml_entry *ml;
    402 
    403 	for (ml = LIST_FIRST(&mount_list); ml != NULL;
    404 	    ml = LIST_NEXT(ml, ml_entries))
    405 		if (ml->ml_addr == maddr) {
    406 			ml->ml_count++;
    407 			ml->ml_size += size;
    408 			ml->ml_valid += valid;
    409 			if (ml->ml_addr == NULL)
    410 				return(NULL);
    411 			else
    412 				return(&ml->ml_mount);
    413 		}
    414 
    415 	if ((ml = malloc(sizeof(struct ml_entry))) == NULL) {
    416 		error("out of memory");
    417 		die(0);
    418 	}
    419 	LIST_INSERT_HEAD(&mount_list, ml, ml_entries);
    420 	ml->ml_count = 1;
    421 	ml->ml_size = size;
    422 	ml->ml_valid = valid;
    423 	ml->ml_addr = maddr;
    424 	if (maddr == NULL)
    425 		return(NULL);
    426 
    427 	KREAD(maddr, &ml->ml_mount, sizeof(struct mount));
    428 	return(&ml->ml_mount);
    429 }
    430